gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python2
# encoding: utf-8
"""
blast.py
Python interface to the RCSB REST API for BLASTing sequences. The interface uses the BioCache class to reduce repeatedly
hitting the RCSB servers.
Created by Shane O'Connor 2016.
"""
import datetime
import os
import urllib.request, urllib.error, urllib.parse
import string
from klab import colortext
from klab.bio.cache import BioCache
upper_case_letters = set(list(string.ascii_uppercase))
class BLAST(object):
'''Using a class makes it easier to set the BLAST parameters once.'''
date_format = '%Y-%m-%dT%H:%M:%S'
def __init__(self, bio_cache = None, cache_dir = None, matrix = 'BLOSUM62', silent = False, cut_off = 0.001, sequence_identity_cut_off = 70, stale_period_in_hours = 7 * 24, min_sequence_length = 20, force_lookup = False):
'''If data is staler than stale_period_in_hours then we query it anew from the source e.g. BLAST results.'''
self.bio_cache = bio_cache
self.cache_dir = cache_dir
if not(bio_cache) and (cache_dir and os.path.exists(cache_dir)):
self.bio_cache = BioCache(cache_dir = cache_dir , max_capacity = 1000, silent = True)
self.silent = silent
self.matrix = matrix
self.cut_off = cut_off
self.sequence_identity_cut_off = sequence_identity_cut_off
self.stale_period_in_hours = stale_period_in_hours
self.min_sequence_length = min_sequence_length
self.force_lookup = force_lookup
#########################
# Utility functions
#########################
def log(self, msg, silent, pfunk = None):
if silent == None:
silent = self.silent
if not silent:
if not pfunk:
print(msg)
else:
pfunk(msg)
#########################
# BLAST functions
#########################
def by_pdb(self, pdb_id, take_top_percentile = 30.0, cut_off = None, matrix = None, sequence_identity_cut_off = None, silent = None):
'''Returns a list of all PDB files which contain protein sequences similar to the protein sequences of pdb_id.
Only protein chains are considered in the matching so e.g. some results may have DNA or RNA chains or ligands
while some may not.
'''
self.log('BLASTing {0}'.format(pdb_id), silent, colortext.pcyan)
# Preamble
matrix = matrix or self.matrix
cut_off = cut_off or self.cut_off
sequence_identity_cut_off = sequence_identity_cut_off or self.sequence_identity_cut_off
# Parse PDB file
p = self.bio_cache.get_pdb_object(pdb_id)
chain_ids = sorted(p.seqres_sequences.keys())
assert(chain_ids)
# Run BLAST over all chains
hits = set(self.blast_by_pdb_chain(pdb_id, chain_ids[0], cut_off = cut_off, matrix = matrix, sequence_identity_cut_off = sequence_identity_cut_off, take_top_percentile = take_top_percentile, silent = silent))
for chain_id in chain_ids[1:]:
chain_hits = self.blast_by_pdb_chain(pdb_id, chain_id, cut_off = cut_off, matrix = matrix, sequence_identity_cut_off = sequence_identity_cut_off, take_top_percentile = take_top_percentile)
if chain_hits != None:
# None suggests that the chain was not a protein chain whereas an empty list suggest a protein chain with no hits
hits = hits.intersection(set(chain_hits))
return sorted(hits)
def blast_by_pdb_chain(self, pdb_id, chain_id, take_top_percentile = 30.0, cut_off = None, matrix = None, sequence_identity_cut_off = None, silent = None):
# Checks
pdb_id, chain_id = pdb_id.strip(), chain_id.strip()
if len(pdb_id) != 4:
raise Exception('A PDB ID of four characters was expected. "{0}" was passed.'.format(pdb_id))
if 5 <= len(chain_id) <= 0:
raise Exception('A chain ID of between 1-4 characters was expected. "{0}" was passed.'.format(chain_id))
self.log('BLASTing {0}:{1}'.format(pdb_id, chain_id), silent)
# Construct query
query_data = dict(
structureId = pdb_id,
chainId = chain_id,
)
xml_query = self._construct_query(query_data, cut_off = cut_off, matrix = matrix, sequence_identity_cut_off = sequence_identity_cut_off)
# Read cached results
if self.bio_cache:
data = self.bio_cache.load_pdb_chain_blast(pdb_id, chain_id, query_data['eCutOff'], query_data['matrix'], query_data['sequenceIdentityCutoff'])
if data:
assert('query_date' in data)
query_date = datetime.datetime.strptime(data['query_date'], BLAST.date_format)
age_in_hours = ((datetime.datetime.now() - query_date).total_seconds()) / (3600.0)
assert(age_in_hours > -24.01)
if not self.force_lookup:
if age_in_hours < self.stale_period_in_hours:
return data['hits']
# POST the request and parse the PDB hits
result = self._post(xml_query)
hits = [l.strip().split(':')[0] for l in result.split('\n') if l.strip()]
if pdb_id not in hits:
if not hits:
try:
p = self.bio_cache.get_pdb_object(pdb_id)
chain_type = p.chain_types[chain_id]
sequence_length = len(p.seqres_sequences[chain_id])
if not(chain_type == 'Protein' or chain_type == 'Protein skeleton'):
colortext.warning('Chain {1} of {0} is a {2} chain.'.format(pdb_id, chain_id, chain_type))
hits = None # None suggests that the chain was not a protein chain whereas an empty list suggest a protein chain with no hits
elif sequence_length < self.min_sequence_length:
colortext.warning('Chain {1} of {0} only contains {2} residues. The minimum sequence length is set to {3} residues so we will ignore this chain in matching.'.format(pdb_id, chain_id, sequence_length, self.min_sequence_length))
hits = None # None suggests that the chain was not a protein chain whereas an empty list suggest a protein chain with no hits
except:
raise colortext.Exception('Failed to determine the chain type for chain {1} of {0}.'.format(pdb_id, chain_id))
else:
raise Exception('A BLAST of {0} chain {1} failed to find any hits for {0}. Is the chain a polypeptide chain?'.format(pdb_id, chain_id))
query_data['hits'] = hits
# Cache the results
if self.bio_cache:
self.bio_cache.save_pdb_chain_blast(pdb_id, chain_id, query_data['eCutOff'], query_data['matrix'], query_data['sequenceIdentityCutoff'], query_data)
return query_data['hits']
def by_sequence(self, sequence, take_top_percentile = 30.0, cut_off = None, matrix = None, sequence_identity_cut_off = None, silent = None):
# Checks
if set(sequence).intersection(upper_case_letters) != set(sequence): # We allow all characters just in case these are valid. Alternatively, we could check against basics.py::residue_type_1to3_map.keys() - 'X'
raise Exception('The sequence {0} contained unexpected characters: {1}.'.format(colortext.myellow(sequence), colortext.morange(','.join(sorted(set(sequence).difference(upper_case_letters))))))
self.log('BLASTing sequence {0}'.format(sequence), silent)
# Construct query
query_data = dict(sequence = sequence)
xml_query = self._construct_query(query_data, cut_off = cut_off, matrix = matrix, sequence_identity_cut_off = sequence_identity_cut_off)
# Read cached results
if self.bio_cache:
data = self.bio_cache.load_sequence_blast(sequence, query_data['eCutOff'], query_data['matrix'], query_data['sequenceIdentityCutoff'])
if data:
assert('query_date' in data)
query_date = datetime.datetime.strptime(data['query_date'], BLAST.date_format)
age_in_hours = ((datetime.datetime.now() - query_date).total_seconds()) / (3600.0)
assert(age_in_hours > -24.01)
if age_in_hours < self.stale_period_in_hours:
return data['hits']
# POST the request and parse the PDB hits
result = self._post(xml_query)
hits = list(map(str, [l.strip().split(':')[0] for l in result.split('\n') if l.strip()]))
query_data['hits'] = hits
# Cache the results
if self.bio_cache:
self.bio_cache.save_sequence_blast(sequence, query_data['eCutOff'], query_data['matrix'], query_data['sequenceIdentityCutoff'], query_data)
return query_data['hits']
#########################
# Private functions
#########################
def _construct_query(self, query_data, cut_off = None, matrix = None, sequence_identity_cut_off = None):
if not 'matrix' in query_data:
query_data['matrix'] = matrix or self.matrix
if not 'eCutOff' in query_data:
query_data['eCutOff'] = cut_off or self.cut_off
if not 'sequenceIdentityCutoff' in query_data:
query_data['sequenceIdentityCutoff'] = sequence_identity_cut_off or self.sequence_identity_cut_off
query_data['query_date'] = datetime.datetime.strftime(datetime.datetime.now(), BLAST.date_format)
description = ''
extra_lines = []
if 'structureId' in query_data and 'chainId' in query_data:
description = 'Sequence Search (Structure:Chain = {structureId}:{chainId}, Expectation Value = {eCutOff}, Search Tool = BLAST)'
extra_lines += ['\t<structureId>{structureId}</structureId>'.format(**query_data), '\t<chainId>{chainId}</chainId>'.format(**query_data)]
elif 'sequence' in query_data:
description = 'Sequence Search (Sequence = {sequence}, Expectation Value = {eCutOff}, Search Tool = BLAST)'
extra_lines += ['\t<sequence>{sequence}</sequence>'.format(**query_data)]
xml_query = '\n'.join([
'<orgPdbQuery>',
'\t<queryType>org.pdb.query.simple.SequenceQuery</queryType>',
'\t<description>' + description + '</description>',
] + extra_lines + [
'\t<eCutOff>{eCutOff}</eCutOff>',
'\t<searchTool>blast</searchTool>',
'\t<sequenceIdentityCutoff>{sequenceIdentityCutoff}</sequenceIdentityCutoff>',
'</orgPdbQuery>']).format(**query_data)
return xml_query
def _post(self, xml_query):
'''POST the request.'''
req = urllib.request.Request(url = 'http://www.rcsb.org/pdb/rest/search', data=xml_query)
f = urllib.request.urlopen(req)
return f.read().strip()
|
|
import gtk
import gtk.gdk as gdk
import gobject
class List(object):
def __init__(self, count):
self.count = count
def __len__(self):
return self.count
def get_row(self, row):
return None
def get_item(self, row, col):
print "get_data(%d, %d)" % (row, col)
return "(%d, %d)" % (row, col)
import sqlite3 as sqlite
class SQLiteQueryList(List):
def __init__(self, db, tablename, fields='*', where=None, fetch_count=256):
self.L = []
SQLquery = "SELECT %s FROM %s" % (fields, tablename)
if where:
SQLquery += " WHERE " + where
count = db.execute("SELECT COUNT(*) FROM %s" % tablename).fetchone()[0]
count = 100
self.cursor = db.execute(SQLquery)
self.fetch_count = fetch_count
List.__init__(self, count)
def get_row(self, row):
try:
return self.L[row]
except IndexError:
d = row - len(self.L) + 1
d = (d + self.fetch_count - 1)/self.fetch_count
print "fetching %d rows" % (d*self.fetch_count)
self.L.extend(self.cursor.fetchmany(d*self.fetch_count))
try:
return self.L[row]
except:
print len(self.L), row
raise
def get_item(self, row, col):
return self.get_row(row)[col]
class DrawGrid(gtk.DrawingArea):
def __init__(self, columns, data):
gtk.DrawingArea.__init__(self)
self.__draw_hlines = False
self.__draw_vlines = False
self.columns = columns
self.data = data
self.h = 24
self.top = 0
self.height = len(data)*self.h
self.width = None
self.top_pix = 0
self.left_pix = 0
self.do_layout()
self.gc = None
def set_enable_grid_lines(self, val):
pass
def get_enable_grid_lines(self):
return self.__enable
def do_layout(self):
self.width = sum(item[0] for item in columns)
def set_width(self, column, newwidth):
item = self.columns[column]
if item[0] != newwidth:
print "here"
self.columns[column] = (newwidth, item[1])
self.queue_draw()
def set_h_scroll(self, x):
if self.left_pix != x:
self.left_pix = x
self.queue_draw()
def set_v_scroll(self, y):
self.top_pix = y
self.queue_draw()
def draw_area(self, row_start, row_end):
y = - (self.top_pix % self.h)
for i in xrange(row_start, row_end+1):
x = -self.left_pix
for j, (w, renderer) in enumerate(self.columns):
r = gtk.gdk.Rectangle(x, y, w, self.h)
background_area = r
cell_area = r
expose_area = cell_area
renderer.set_property("text", self.data.get_item(i, j))
renderer.render(
self.window,
self,
background_area,
cell_area,
expose_area,
gtk.CELL_RENDERER_PRELIT
)
x += w
y += self.h
def draw_grid(self):
wnd = self.window
y = - (self.top_pix % self.h)
w = self.allocation.width
h = self.allocation.height
while y <= w:
wnd.draw_line(self.gc, 0, y, w, y)
y += self.h
x = -self.left_pix
for width, renderer in self.columns:
wnd.draw_line(self.gc, x, 0, x, h)
x += width
def do_realize(self):
gtk.DrawingArea.do_realize(self)
gc = self.get_style().bg_gc[gtk.STATE_NORMAL]
self.gc = gtk.gdk.GC(self.window)
self.gc.copy(gc)
self.gc.set_rgb_fg_color(gtk.gdk.Color(0xffff, 0xffff, 0xffff))
def do_expose_event(self, event):
t = self.top_pix / self.h
b = (self.top_pix + self.allocation.height) / self.h
print t, b, b-t+1
self.draw_grid()
self.draw_area(t, b)
gobject.type_register(DrawGrid)
class Header(gtk.Container):
def __init__(self, grid_view = None):
gtk.Container.__init__(self)
self.grid_view = grid_view
self.columns = []
self.height = 45
self.highlight = [None, 0, None, 0]
self.dragging = False
self.drag_dist = 10
self.last_x = None
self.gc = None
self._changed = False
self.total_width = 0
self.x_scroll = 0
self.drag_cursor = gtk.gdk.Cursor(gtk.gdk.SB_H_DOUBLE_ARROW)
def _do_layout(self):
y = 0
x = - self.x_scroll
h = self.height
for w, button in self.columns:
R = gdk.Rectangle(x, y, w, h)
button.size_allocate(R)
x += w
self.total_width = x
def set_scroll(self, x):
if x != self.x_scroll:
self.x_scroll = x
self._do_layout()
def set_column_width(self, index, width):
self.columns[index] = (width, self.columns[index][1])
def get_right_edge(self, column_index):
return self.get_left_edge(column_index) + self.columns[column_index][0]
def get_left_edge(self, column_index):
"returns x positon of left column"
if column_index < 0:
return 0
return sum(width for width, _ in self.columns[:column_index])
def add_column(self, title, width):
b = gtk.Button(title)
b.size_request()
b.set_parent(self)
b.index = len(self.columns)
self.columns.append((width, b))
self._changed = True
b.add_events(gtk.gdk.POINTER_MOTION_MASK)
b.connect("event", self.event)
def __column_edge(self, widget, event):
if event.x <= self.drag_dist:
return -1
elif widget.allocation.width - event.x <= self.drag_dist:
return +1
else:
return 0
def set_highlighted(self, button, edge):
if self.highlight[0] != button:
if self.highlight[1] != 0:
self.highlight[0].window.set_cursor(None)
self.highlight[0] = button
if edge:
button.window.set_cursor(self.drag_cursor)
else:
button.window.set_cursor(None)
elif edge != self.highlight[1]:
self.highlight[1] = edge
if edge:
button.window.set_cursor(self.drag_cursor)
else:
button.window.set_cursor(None)
pass
def event(self, button, event):
if self.dragging:
if event.type == gtk.gdk.MOTION_NOTIFY:
button = self.highlight[0]
x = int(event.x)
button.set_size_request(x, self.height)
self._do_layout()
try:
self.on_dragging(self.highlight[0], self.highlight[2] + x)
finally:
return True
elif event.type == gtk.gdk.BUTTON_RELEASE:
try:
self.on_end_drag(self.highlight[0], True)
finally:
self.dragging = False
return True
elif event.type == gtk.gdk.ENTER_NOTIFY or event.type == gtk.gdk.LEAVE_NOTIFY:
return True
elif event.type == gdk.KEY_PRESS:
try:
if event.keyval == gtk.keysyms.Escape:
self.dragging = False
self.on_end_drag(self.highlight[0], False)
finally:
return True
else:
if event.type == gtk.gdk.MOTION_NOTIFY:
e = self.__column_edge(button, event)
self.set_highlighted(button, e)
elif event.type == gtk.gdk.BUTTON_PRESS and event.button == 1:
if self.highlight[1] == +1:
self.highlight[2] = self.get_left_edge(button.index)
self.highlight[3] = self.columns[button.index][0]
self.dragging = True
try:
self.on_start_drag(self.highlight[0], self.highlight[2] + int(event.x))
finally:
return True
def on_start_drag(self, button, x):
self.last_x == None
self.draw_line(x)
pass
def on_dragging(self, button, x):
try:
self.draw_line(x)
self.set_column_width(button.index, x - button.allocation.x)
self._do_layout()
except:
import traceback
traceback.print_exc()
def on_end_drag(self, button, accepted):
if accepted:
try:
self.grid_view.set_width(button.index, button.allocation.width)
except:
import traceback
traceback.print_exc()
else:
self.set_column_width(button.index, self.highlight[3])
self._do_layout()
self.draw_line(-100)
self.last_x = None
pass
def draw_line(self, x):
if not self.grid_view:
return
window = self.grid_view.window
if self.gc is None:
self.gc = gtk.gdk.GC(window)
self.gc.copy(self.grid_view.gc)
self.gc.set_function(gtk.gdk.XOR)
self.gc.line_width = 3
y = 1000
print self.last_x, x
if self.last_x is not None:
window.draw_line(self.gc, self.last_x, 0, self.last_x, y)
window.draw_line(self.gc, x, 0, x, y)
self.last_x = x
def do_realize(self):
# The do_realize method is responsible for creating GDK (windowing system)
# resources. In this example we will create a new gdk.Window which we
# then draw on
# First set an internal flag telling that we're realized
self.set_flags(gtk.REALIZED)
# Create a new gdk.Window which we can draw on.
# Also say that we want to receive exposure events by setting
# the event_mask
self.window = gdk.Window(
self.get_parent_window(),
width=self.allocation.width,
height=self.allocation.height,
window_type=gdk.WINDOW_CHILD,
wclass=gdk.INPUT_OUTPUT,
event_mask=self.get_events() | gdk.EXPOSURE_MASK | gdk.BUTTON_PRESS_MASK)
# Associate the gdk.Window with ourselves, Gtk+ needs a reference
# between the widget and the gdk window
self.window.set_user_data(self)
# Attach the style to the gdk.Window, a style contains colors and
# GC contextes used for drawing
self.style.attach(self.window)
# The default color of the background should be what
# the style (theme engine) tells us.
self.style.set_background(self.window, gtk.STATE_NORMAL)
self.window.move_resize(*self.allocation)
def do_size_allocate(self, allocation):
print "####allocation", allocation
self.allocation = allocation
if self.flags() & gtk.REALIZED:
self.window.move_resize(*allocation)
self._do_layout()
def do_size_request(self, requisition):
if self._changed:
print "####request", requisition.width, requisition.height
requisition.width = self.get_right_edge(-1)
requisition.height = self.height
self._changed = False
def do_forall(self, internal, callback, data):
#print internal, callback, data
for w, button in self.columns:
callback(button, data)
gobject.type_register(Header)
class HeaderView(gtk.VBox):
__gsignals__ = dict(set_scroll_adjustments=
(gobject.SIGNAL_RUN_LAST, None,
(gtk.Adjustment, gtk.Adjustment)))
def __init__(self, header, gridview):
gtk.VBox.__init__(self)
self.header = header
self.gridview = gridview
self.hadjustment = None
self.vadjustment = None
vbox = self
vbox.pack_start(header, expand=False)
vbox.pack_start(gridview, expand=True)
def h_value_changed(self, adjustment):
print "h=", adjustment.value
s = int(adjustment.value)
self.header.set_scroll(s)
self.gridview.set_h_scroll(s)
pass
def v_value_changed(self, adjustment):
print "v=", adjustment.value
s = int(adjustment.value)
self.gridview.set_v_scroll(s)
pass
def do_size_allocate(self, allocation):
gtk.VBox.do_size_allocate(self, allocation)
self.hadjustment.page_size = allocation.width
self.vadjustment.page_size = self.gridview.allocation.height
def do_set_scroll_adjustments(self, hadjustment, vadjustment):
self.hadjustment = hadjustment
self.vadjustment = vadjustment
hadjustment.set_all(0, 0, self.gridview.width, 1, 10, self.allocation.width)
vadjustment.set_all(0, 0, self.gridview.height, 1, 10, self.allocation.height)
hadjustment.connect("value-changed", self.h_value_changed)
vadjustment.connect("value-changed", self.v_value_changed)
HeaderView.set_set_scroll_adjustments_signal('set-scroll-adjustments')
if __name__ == '__main__':
H = Header()
c = sqlite.connect('test')
r = c.execute("SELECT * FROM files")
columns = []
for i, item in enumerate(r.description):
renderer = gtk.CellRendererText()
w = 150
columns.append((w, renderer))
H.add_column(item[0], w)
#H.do_layout()
r.close()
# l = List(1000000)
l = SQLiteQueryList(c, 'files', fetch_count=64)
v = DrawGrid(columns, l)
H.grid_view = v
HV = HeaderView(H, v)
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.set_size_request(840, 400)
sw.add(HV)
def print_cell_renderer(*args):
for w, r in columns:
print r.get_size(v)
print H.allocation
b1 = gtk.Button("")
b1.connect("clicked", print_cell_renderer)
d = gtk.Dialog()
d.vbox.pack_start(sw, expand=True, fill=True)
d.vbox.pack_start(b1, expand=False, fill=False)
d.show_all()
# run
d.run()
print "rows fetched: %d of %d" % (len(l.L), l.count)
# vim: ts=4 sw=4
|
|
import random
from collections import namedtuple
import pytest
from markupsafe import Markup
from jinja2 import Environment
from jinja2 import StrictUndefined
from jinja2 import TemplateRuntimeError
from jinja2 import UndefinedError
from jinja2.exceptions import TemplateAssertionError
class Magic:
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class Magic2:
def __init__(self, value1, value2):
self.value1 = value1
self.value2 = value2
def __str__(self):
return f"({self.value1},{self.value2})"
class TestFilter:
def test_filter_calling(self, env):
rv = env.call_filter("sum", [1, 2, 3])
assert rv == 6
def test_capitalize(self, env):
tmpl = env.from_string('{{ "foo bar"|capitalize }}')
assert tmpl.render() == "Foo bar"
def test_center(self, env):
tmpl = env.from_string('{{ "foo"|center(9) }}')
assert tmpl.render() == " foo "
def test_default(self, env):
tmpl = env.from_string(
"{{ missing|default('no') }}|{{ false|default('no') }}|"
"{{ false|default('no', true) }}|{{ given|default('no') }}"
)
assert tmpl.render(given="yes") == "no|False|no|yes"
@pytest.mark.parametrize(
"args,expect",
(
("", "[('aa', 0), ('AB', 3), ('b', 1), ('c', 2)]"),
("true", "[('AB', 3), ('aa', 0), ('b', 1), ('c', 2)]"),
('by="value"', "[('aa', 0), ('b', 1), ('c', 2), ('AB', 3)]"),
("reverse=true", "[('c', 2), ('b', 1), ('AB', 3), ('aa', 0)]"),
),
)
def test_dictsort(self, env, args, expect):
t = env.from_string(f"{{{{ foo|dictsort({args}) }}}}")
out = t.render(foo={"aa": 0, "b": 1, "c": 2, "AB": 3})
assert out == expect
def test_batch(self, env):
tmpl = env.from_string("{{ foo|batch(3)|list }}|{{ foo|batch(3, 'X')|list }}")
out = tmpl.render(foo=list(range(10)))
assert out == (
"[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]|"
"[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 'X', 'X']]"
)
def test_slice(self, env):
tmpl = env.from_string("{{ foo|slice(3)|list }}|{{ foo|slice(3, 'X')|list }}")
out = tmpl.render(foo=list(range(10)))
assert out == (
"[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]|"
"[[0, 1, 2, 3], [4, 5, 6, 'X'], [7, 8, 9, 'X']]"
)
def test_escape(self, env):
tmpl = env.from_string("""{{ '<">&'|escape }}""")
out = tmpl.render()
assert out == "<">&"
@pytest.mark.parametrize(
("chars", "expect"), [(None, "..stays.."), (".", " ..stays"), (" .", "stays")]
)
def test_trim(self, env, chars, expect):
tmpl = env.from_string("{{ foo|trim(chars) }}")
out = tmpl.render(foo=" ..stays..", chars=chars)
assert out == expect
def test_striptags(self, env):
tmpl = env.from_string("""{{ foo|striptags }}""")
out = tmpl.render(
foo=' <p>just a small \n <a href="#">'
"example</a> link</p>\n<p>to a webpage</p> "
"<!-- <p>and some commented stuff</p> -->"
)
assert out == "just a small example link to a webpage"
def test_filesizeformat(self, env):
tmpl = env.from_string(
"{{ 100|filesizeformat }}|"
"{{ 1000|filesizeformat }}|"
"{{ 1000000|filesizeformat }}|"
"{{ 1000000000|filesizeformat }}|"
"{{ 1000000000000|filesizeformat }}|"
"{{ 100|filesizeformat(true) }}|"
"{{ 1000|filesizeformat(true) }}|"
"{{ 1000000|filesizeformat(true) }}|"
"{{ 1000000000|filesizeformat(true) }}|"
"{{ 1000000000000|filesizeformat(true) }}"
)
out = tmpl.render()
assert out == (
"100 Bytes|1.0 kB|1.0 MB|1.0 GB|1.0 TB|100 Bytes|"
"1000 Bytes|976.6 KiB|953.7 MiB|931.3 GiB"
)
def test_filesizeformat_issue59(self, env):
tmpl = env.from_string(
"{{ 300|filesizeformat }}|"
"{{ 3000|filesizeformat }}|"
"{{ 3000000|filesizeformat }}|"
"{{ 3000000000|filesizeformat }}|"
"{{ 3000000000000|filesizeformat }}|"
"{{ 300|filesizeformat(true) }}|"
"{{ 3000|filesizeformat(true) }}|"
"{{ 3000000|filesizeformat(true) }}"
)
out = tmpl.render()
assert out == (
"300 Bytes|3.0 kB|3.0 MB|3.0 GB|3.0 TB|300 Bytes|2.9 KiB|2.9 MiB"
)
def test_first(self, env):
tmpl = env.from_string("{{ foo|first }}")
out = tmpl.render(foo=list(range(10)))
assert out == "0"
@pytest.mark.parametrize(
("value", "expect"), (("42", "42.0"), ("abc", "0.0"), ("32.32", "32.32"))
)
def test_float(self, env, value, expect):
t = env.from_string("{{ value|float }}")
assert t.render(value=value) == expect
def test_float_default(self, env):
t = env.from_string("{{ value|float(default=1.0) }}")
assert t.render(value="abc") == "1.0"
def test_format(self, env):
tmpl = env.from_string("{{ '%s|%s'|format('a', 'b') }}")
out = tmpl.render()
assert out == "a|b"
@staticmethod
def _test_indent_multiline_template(env, markup=False):
text = "\n".join(["", "foo bar", '"baz"', ""])
if markup:
text = Markup(text)
t = env.from_string("{{ foo|indent(2, false, false) }}")
assert t.render(foo=text) == '\n foo bar\n "baz"\n'
t = env.from_string("{{ foo|indent(2, false, true) }}")
assert t.render(foo=text) == '\n foo bar\n "baz"\n '
t = env.from_string("{{ foo|indent(2, true, false) }}")
assert t.render(foo=text) == ' \n foo bar\n "baz"\n'
t = env.from_string("{{ foo|indent(2, true, true) }}")
assert t.render(foo=text) == ' \n foo bar\n "baz"\n '
def test_indent(self, env):
self._test_indent_multiline_template(env)
t = env.from_string('{{ "jinja"|indent }}')
assert t.render() == "jinja"
t = env.from_string('{{ "jinja"|indent(first=true) }}')
assert t.render() == " jinja"
t = env.from_string('{{ "jinja"|indent(blank=true) }}')
assert t.render() == "jinja"
def test_indent_markup_input(self, env):
"""
Tests cases where the filter input is a Markup type
"""
self._test_indent_multiline_template(env, markup=True)
def test_indent_width_string(self, env):
t = env.from_string("{{ 'jinja\nflask'|indent(width='>>> ', first=True) }}")
assert t.render() == ">>> jinja\n>>> flask"
@pytest.mark.parametrize(
("value", "expect"),
(
("42", "42"),
("abc", "0"),
("32.32", "32"),
("12345678901234567890", "12345678901234567890"),
),
)
def test_int(self, env, value, expect):
t = env.from_string("{{ value|int }}")
assert t.render(value=value) == expect
@pytest.mark.parametrize(
("value", "base", "expect"),
(("0x4d32", 16, "19762"), ("011", 8, "9"), ("0x33Z", 16, "0")),
)
def test_int_base(self, env, value, base, expect):
t = env.from_string("{{ value|int(base=base) }}")
assert t.render(value=value, base=base) == expect
def test_int_default(self, env):
t = env.from_string("{{ value|int(default=1) }}")
assert t.render(value="abc") == "1"
def test_int_special_method(self, env):
class IntIsh:
def __int__(self):
return 42
t = env.from_string("{{ value|int }}")
assert t.render(value=IntIsh()) == "42"
def test_join(self, env):
tmpl = env.from_string('{{ [1, 2, 3]|join("|") }}')
out = tmpl.render()
assert out == "1|2|3"
env2 = Environment(autoescape=True)
tmpl = env2.from_string('{{ ["<foo>", "<span>foo</span>"|safe]|join }}')
assert tmpl.render() == "<foo><span>foo</span>"
def test_join_attribute(self, env):
User = namedtuple("User", "username")
tmpl = env.from_string("""{{ users|join(', ', 'username') }}""")
assert tmpl.render(users=map(User, ["foo", "bar"])) == "foo, bar"
def test_last(self, env):
tmpl = env.from_string("""{{ foo|last }}""")
out = tmpl.render(foo=list(range(10)))
assert out == "9"
def test_length(self, env):
tmpl = env.from_string("""{{ "hello world"|length }}""")
out = tmpl.render()
assert out == "11"
def test_lower(self, env):
tmpl = env.from_string("""{{ "FOO"|lower }}""")
out = tmpl.render()
assert out == "foo"
def test_items(self, env):
d = {i: c for i, c in enumerate("abc")}
tmpl = env.from_string("""{{ d|items|list }}""")
out = tmpl.render(d=d)
assert out == "[(0, 'a'), (1, 'b'), (2, 'c')]"
def test_items_undefined(self, env):
tmpl = env.from_string("""{{ d|items|list }}""")
out = tmpl.render()
assert out == "[]"
def test_pprint(self, env):
from pprint import pformat
tmpl = env.from_string("""{{ data|pprint }}""")
data = list(range(1000))
assert tmpl.render(data=data) == pformat(data)
def test_random(self, env, request):
# restore the random state when the test ends
state = random.getstate()
request.addfinalizer(lambda: random.setstate(state))
# generate the random values from a known seed
random.seed("jinja")
expected = [random.choice("1234567890") for _ in range(10)]
# check that the random sequence is generated again by a template
# ensures that filter result is not constant folded
random.seed("jinja")
t = env.from_string('{{ "1234567890"|random }}')
for value in expected:
assert t.render() == value
def test_reverse(self, env):
tmpl = env.from_string(
"{{ 'foobar'|reverse|join }}|{{ [1, 2, 3]|reverse|list }}"
)
assert tmpl.render() == "raboof|[3, 2, 1]"
def test_string(self, env):
x = [1, 2, 3, 4, 5]
tmpl = env.from_string("""{{ obj|string }}""")
assert tmpl.render(obj=x) == str(x)
def test_title(self, env):
tmpl = env.from_string("""{{ "foo bar"|title }}""")
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string("""{{ "foo's bar"|title }}""")
assert tmpl.render() == "Foo's Bar"
tmpl = env.from_string("""{{ "foo bar"|title }}""")
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string("""{{ "f bar f"|title }}""")
assert tmpl.render() == "F Bar F"
tmpl = env.from_string("""{{ "foo-bar"|title }}""")
assert tmpl.render() == "Foo-Bar"
tmpl = env.from_string("""{{ "foo\tbar"|title }}""")
assert tmpl.render() == "Foo\tBar"
tmpl = env.from_string("""{{ "FOO\tBAR"|title }}""")
assert tmpl.render() == "Foo\tBar"
tmpl = env.from_string("""{{ "foo (bar)"|title }}""")
assert tmpl.render() == "Foo (Bar)"
tmpl = env.from_string("""{{ "foo {bar}"|title }}""")
assert tmpl.render() == "Foo {Bar}"
tmpl = env.from_string("""{{ "foo [bar]"|title }}""")
assert tmpl.render() == "Foo [Bar]"
tmpl = env.from_string("""{{ "foo <bar>"|title }}""")
assert tmpl.render() == "Foo <Bar>"
class Foo:
def __str__(self):
return "foo-bar"
tmpl = env.from_string("""{{ data|title }}""")
out = tmpl.render(data=Foo())
assert out == "Foo-Bar"
def test_truncate(self, env):
tmpl = env.from_string(
'{{ data|truncate(15, true, ">>>") }}|'
'{{ data|truncate(15, false, ">>>") }}|'
"{{ smalldata|truncate(15) }}"
)
out = tmpl.render(data="foobar baz bar" * 1000, smalldata="foobar baz bar")
assert out == "foobar baz b>>>|foobar baz>>>|foobar baz bar"
def test_truncate_very_short(self, env):
tmpl = env.from_string(
'{{ "foo bar baz"|truncate(9) }}|{{ "foo bar baz"|truncate(9, true) }}'
)
out = tmpl.render()
assert out == "foo bar baz|foo bar baz"
def test_truncate_end_length(self, env):
tmpl = env.from_string('{{ "Joel is a slug"|truncate(7, true) }}')
out = tmpl.render()
assert out == "Joel..."
def test_upper(self, env):
tmpl = env.from_string('{{ "foo"|upper }}')
assert tmpl.render() == "FOO"
def test_urlize(self, env):
tmpl = env.from_string('{{ "foo example.org bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="https://example.org" rel="noopener">' "example.org</a> bar"
)
tmpl = env.from_string('{{ "foo http://www.example.com/ bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="http://www.example.com/" rel="noopener">'
"http://www.example.com/</a> bar"
)
tmpl = env.from_string('{{ "foo mailto:email@example.com bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="mailto:email@example.com">email@example.com</a> bar'
)
tmpl = env.from_string('{{ "foo email@example.com bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="mailto:email@example.com">email@example.com</a> bar'
)
def test_urlize_rel_policy(self):
env = Environment()
env.policies["urlize.rel"] = None
tmpl = env.from_string('{{ "foo http://www.example.com/ bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="http://www.example.com/">http://www.example.com/</a> bar'
)
def test_urlize_target_parameter(self, env):
tmpl = env.from_string(
'{{ "foo http://www.example.com/ bar"|urlize(target="_blank") }}'
)
assert (
tmpl.render()
== 'foo <a href="http://www.example.com/" rel="noopener" target="_blank">'
"http://www.example.com/</a> bar"
)
def test_urlize_extra_schemes_parameter(self, env):
tmpl = env.from_string(
'{{ "foo tel:+1-514-555-1234 ftp://localhost bar"|'
'urlize(extra_schemes=["tel:", "ftp:"]) }}'
)
assert tmpl.render() == (
'foo <a href="tel:+1-514-555-1234" rel="noopener">'
'tel:+1-514-555-1234</a> <a href="ftp://localhost" rel="noopener">'
"ftp://localhost</a> bar"
)
def test_wordcount(self, env):
tmpl = env.from_string('{{ "foo bar baz"|wordcount }}')
assert tmpl.render() == "3"
strict_env = Environment(undefined=StrictUndefined)
t = strict_env.from_string("{{ s|wordcount }}")
with pytest.raises(UndefinedError):
t.render()
def test_block(self, env):
tmpl = env.from_string("{% filter lower|escape %}<HEHE>{% endfilter %}")
assert tmpl.render() == "<hehe>"
def test_chaining(self, env):
tmpl = env.from_string("""{{ ['<foo>', '<bar>']|first|upper|escape }}""")
assert tmpl.render() == "<FOO>"
def test_sum(self, env):
tmpl = env.from_string("""{{ [1, 2, 3, 4, 5, 6]|sum }}""")
assert tmpl.render() == "21"
def test_sum_attributes(self, env):
tmpl = env.from_string("""{{ values|sum('value') }}""")
assert tmpl.render(values=[{"value": 23}, {"value": 1}, {"value": 18}]) == "42"
def test_sum_attributes_nested(self, env):
tmpl = env.from_string("""{{ values|sum('real.value') }}""")
assert (
tmpl.render(
values=[
{"real": {"value": 23}},
{"real": {"value": 1}},
{"real": {"value": 18}},
]
)
== "42"
)
def test_sum_attributes_tuple(self, env):
tmpl = env.from_string("""{{ values.items()|sum('1') }}""")
assert tmpl.render(values={"foo": 23, "bar": 1, "baz": 18}) == "42"
def test_abs(self, env):
tmpl = env.from_string("""{{ -1|abs }}|{{ 1|abs }}""")
assert tmpl.render() == "1|1", tmpl.render()
def test_round_positive(self, env):
tmpl = env.from_string(
"{{ 2.7|round }}|{{ 2.1|round }}|"
"{{ 2.1234|round(3, 'floor') }}|"
"{{ 2.1|round(0, 'ceil') }}"
)
assert tmpl.render() == "3.0|2.0|2.123|3.0", tmpl.render()
def test_round_negative(self, env):
tmpl = env.from_string(
"{{ 21.3|round(-1)}}|"
"{{ 21.3|round(-1, 'ceil')}}|"
"{{ 21.3|round(-1, 'floor')}}"
)
assert tmpl.render() == "20.0|30.0|20.0", tmpl.render()
def test_xmlattr(self, env):
tmpl = env.from_string(
"{{ {'foo': 42, 'bar': 23, 'fish': none, "
"'spam': missing, 'blub:blub': '<?>'}|xmlattr }}"
)
out = tmpl.render().split()
assert len(out) == 3
assert 'foo="42"' in out
assert 'bar="23"' in out
assert 'blub:blub="<?>"' in out
def test_sort1(self, env):
tmpl = env.from_string("{{ [2, 3, 1]|sort }}|{{ [2, 3, 1]|sort(true) }}")
assert tmpl.render() == "[1, 2, 3]|[3, 2, 1]"
def test_sort2(self, env):
tmpl = env.from_string('{{ "".join(["c", "A", "b", "D"]|sort) }}')
assert tmpl.render() == "AbcD"
def test_sort3(self, env):
tmpl = env.from_string("""{{ ['foo', 'Bar', 'blah']|sort }}""")
assert tmpl.render() == "['Bar', 'blah', 'foo']"
def test_sort4(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value')|join }}""")
assert tmpl.render(items=map(Magic, [3, 2, 4, 1])) == "1234"
def test_sort5(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value.0')|join }}""")
assert tmpl.render(items=map(Magic, [[3], [2], [4], [1]])) == "[1][2][3][4]"
def test_sort6(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value1,value2')|join }}""")
assert (
tmpl.render(
items=map(
lambda x: Magic2(x[0], x[1]), [(3, 1), (2, 2), (2, 1), (2, 5)]
)
)
== "(2,1)(2,2)(2,5)(3,1)"
)
def test_sort7(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value2,value1')|join }}""")
assert (
tmpl.render(
items=map(
lambda x: Magic2(x[0], x[1]), [(3, 1), (2, 2), (2, 1), (2, 5)]
)
)
== "(2,1)(3,1)(2,2)(2,5)"
)
def test_sort8(self, env):
tmpl = env.from_string(
"""{{ items|sort(attribute='value1.0,value2.0')|join }}"""
)
assert (
tmpl.render(
items=map(
lambda x: Magic2(x[0], x[1]),
[([3], [1]), ([2], [2]), ([2], [1]), ([2], [5])],
)
)
== "([2],[1])([2],[2])([2],[5])([3],[1])"
)
def test_unique(self, env):
t = env.from_string('{{ "".join(["b", "A", "a", "b"]|unique) }}')
assert t.render() == "bA"
def test_unique_case_sensitive(self, env):
t = env.from_string('{{ "".join(["b", "A", "a", "b"]|unique(true)) }}')
assert t.render() == "bAa"
def test_unique_attribute(self, env):
t = env.from_string("{{ items|unique(attribute='value')|join }}")
assert t.render(items=map(Magic, [3, 2, 4, 1, 2])) == "3241"
@pytest.mark.parametrize(
"source,expect",
(
('{{ ["a", "B"]|min }}', "a"),
('{{ ["a", "B"]|min(case_sensitive=true) }}', "B"),
("{{ []|min }}", ""),
('{{ ["a", "B"]|max }}', "B"),
('{{ ["a", "B"]|max(case_sensitive=true) }}', "a"),
("{{ []|max }}", ""),
),
)
def test_min_max(self, env, source, expect):
t = env.from_string(source)
assert t.render() == expect
@pytest.mark.parametrize(("name", "expect"), [("min", "1"), ("max", "9")])
def test_min_max_attribute(self, env, name, expect):
t = env.from_string("{{ items|" + name + '(attribute="value") }}')
assert t.render(items=map(Magic, [5, 1, 9])) == expect
def test_groupby(self, env):
tmpl = env.from_string(
"""
{%- for grouper, list in [{'foo': 1, 'bar': 2},
{'foo': 2, 'bar': 3},
{'foo': 1, 'bar': 1},
{'foo': 3, 'bar': 4}]|groupby('foo') -%}
{{ grouper }}{% for x in list %}: {{ x.foo }}, {{ x.bar }}{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render().split("|") == ["1: 1, 2: 1, 1", "2: 2, 3", "3: 3, 4", ""]
def test_groupby_tuple_index(self, env):
tmpl = env.from_string(
"""
{%- for grouper, list in [('a', 1), ('a', 2), ('b', 1)]|groupby(0) -%}
{{ grouper }}{% for x in list %}:{{ x.1 }}{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render() == "a:1:2|b:1|"
def test_groupby_multidot(self, env):
Date = namedtuple("Date", "day,month,year")
Article = namedtuple("Article", "title,date")
articles = [
Article("aha", Date(1, 1, 1970)),
Article("interesting", Date(2, 1, 1970)),
Article("really?", Date(3, 1, 1970)),
Article("totally not", Date(1, 1, 1971)),
]
tmpl = env.from_string(
"""
{%- for year, list in articles|groupby('date.year') -%}
{{ year }}{% for x in list %}[{{ x.title }}]{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render(articles=articles).split("|") == [
"1970[aha][interesting][really?]",
"1971[totally not]",
"",
]
def test_groupby_default(self, env):
tmpl = env.from_string(
"{% for city, items in users|groupby('city', default='NY') %}"
"{{ city }}: {{ items|map(attribute='name')|join(', ') }}\n"
"{% endfor %}"
)
out = tmpl.render(
users=[
{"name": "emma", "city": "NY"},
{"name": "smith", "city": "WA"},
{"name": "john"},
]
)
assert out == "NY: emma, john\nWA: smith\n"
@pytest.mark.parametrize(
("case_sensitive", "expect"),
[
(False, "a: 1, 3\nb: 2\n"),
(True, "A: 3\na: 1\nb: 2\n"),
],
)
def test_groupby_case(self, env, case_sensitive, expect):
tmpl = env.from_string(
"{% for k, vs in data|groupby('k', case_sensitive=cs) %}"
"{{ k }}: {{ vs|join(', ', attribute='v') }}\n"
"{% endfor %}"
)
out = tmpl.render(
data=[{"k": "a", "v": 1}, {"k": "b", "v": 2}, {"k": "A", "v": 3}],
cs=case_sensitive,
)
assert out == expect
def test_filtertag(self, env):
tmpl = env.from_string(
"{% filter upper|replace('FOO', 'foo') %}foobar{% endfilter %}"
)
assert tmpl.render() == "fooBAR"
def test_replace(self, env):
env = Environment()
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string="<foo>") == "<f4242>"
env = Environment(autoescape=True)
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string="<foo>") == "<f4242>"
tmpl = env.from_string('{{ string|replace("<", 42) }}')
assert tmpl.render(string="<foo>") == "42foo>"
tmpl = env.from_string('{{ string|replace("o", ">x<") }}')
assert tmpl.render(string=Markup("foo")) == "f>x<>x<"
def test_forceescape(self, env):
tmpl = env.from_string("{{ x|forceescape }}")
assert tmpl.render(x=Markup("<div />")) == "<div />"
def test_safe(self, env):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ "<div>foo</div>"|safe }}')
assert tmpl.render() == "<div>foo</div>"
tmpl = env.from_string('{{ "<div>foo</div>" }}')
assert tmpl.render() == "<div>foo</div>"
@pytest.mark.parametrize(
("value", "expect"),
[
("Hello, world!", "Hello%2C%20world%21"),
("Hello, world\u203d", "Hello%2C%20world%E2%80%BD"),
({"f": 1}, "f=1"),
([("f", 1), ("z", 2)], "f=1&z=2"),
({"\u203d": 1}, "%E2%80%BD=1"),
({0: 1}, "0=1"),
([("a b/c", "a b/c")], "a+b%2Fc=a+b%2Fc"),
("a b/c", "a%20b/c"),
],
)
def test_urlencode(self, value, expect):
e = Environment(autoescape=True)
t = e.from_string("{{ value|urlencode }}")
assert t.render(value=value) == expect
def test_simple_map(self, env):
env = Environment()
tmpl = env.from_string('{{ ["1", "2", "3"]|map("int")|sum }}')
assert tmpl.render() == "6"
def test_map_sum(self, env):
tmpl = env.from_string('{{ [[1,2], [3], [4,5,6]]|map("sum")|list }}')
assert tmpl.render() == "[3, 3, 15]"
def test_attribute_map(self, env):
User = namedtuple("User", "name")
env = Environment()
users = [
User("john"),
User("jane"),
User("mike"),
]
tmpl = env.from_string('{{ users|map(attribute="name")|join("|") }}')
assert tmpl.render(users=users) == "john|jane|mike"
def test_empty_map(self, env):
env = Environment()
tmpl = env.from_string('{{ none|map("upper")|list }}')
assert tmpl.render() == "[]"
def test_map_default(self, env):
Fullname = namedtuple("Fullname", "firstname,lastname")
Firstname = namedtuple("Firstname", "firstname")
env = Environment()
tmpl = env.from_string(
'{{ users|map(attribute="lastname", default="smith")|join(", ") }}'
)
test_list = env.from_string(
'{{ users|map(attribute="lastname", default=["smith","x"])|join(", ") }}'
)
test_str = env.from_string(
'{{ users|map(attribute="lastname", default="")|join(", ") }}'
)
users = [
Fullname("john", "lennon"),
Fullname("jane", "edwards"),
Fullname("jon", None),
Firstname("mike"),
]
assert tmpl.render(users=users) == "lennon, edwards, None, smith"
assert test_list.render(users=users) == "lennon, edwards, None, ['smith', 'x']"
assert test_str.render(users=users) == "lennon, edwards, None, "
def test_simple_select(self, env):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|select("odd")|join("|") }}')
assert tmpl.render() == "1|3|5"
def test_bool_select(self, env):
env = Environment()
tmpl = env.from_string('{{ [none, false, 0, 1, 2, 3, 4, 5]|select|join("|") }}')
assert tmpl.render() == "1|2|3|4|5"
def test_simple_reject(self, env):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|reject("odd")|join("|") }}')
assert tmpl.render() == "2|4"
def test_bool_reject(self, env):
env = Environment()
tmpl = env.from_string('{{ [none, false, 0, 1, 2, 3, 4, 5]|reject|join("|") }}')
assert tmpl.render() == "None|False|0"
def test_simple_select_attr(self, env):
User = namedtuple("User", "name,is_active")
env = Environment()
users = [
User("john", True),
User("jane", True),
User("mike", False),
]
tmpl = env.from_string(
'{{ users|selectattr("is_active")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "john|jane"
def test_simple_reject_attr(self, env):
User = namedtuple("User", "name,is_active")
env = Environment()
users = [
User("john", True),
User("jane", True),
User("mike", False),
]
tmpl = env.from_string(
'{{ users|rejectattr("is_active")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "mike"
def test_func_select_attr(self, env):
User = namedtuple("User", "id,name")
env = Environment()
users = [
User(1, "john"),
User(2, "jane"),
User(3, "mike"),
]
tmpl = env.from_string(
'{{ users|selectattr("id", "odd")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "john|mike"
def test_func_reject_attr(self, env):
User = namedtuple("User", "id,name")
env = Environment()
users = [
User(1, "john"),
User(2, "jane"),
User(3, "mike"),
]
tmpl = env.from_string(
'{{ users|rejectattr("id", "odd")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "jane"
def test_json_dump(self):
env = Environment(autoescape=True)
t = env.from_string("{{ x|tojson }}")
assert t.render(x={"foo": "bar"}) == '{"foo": "bar"}'
assert t.render(x="\"ba&r'") == r'"\"ba\u0026r\u0027"'
assert t.render(x="<bar>") == r'"\u003cbar\u003e"'
def my_dumps(value, **options):
assert options == {"foo": "bar"}
return "42"
env.policies["json.dumps_function"] = my_dumps
env.policies["json.dumps_kwargs"] = {"foo": "bar"}
assert t.render(x=23) == "42"
def test_wordwrap(self, env):
env.newline_sequence = "\n"
t = env.from_string("{{ s|wordwrap(20) }}")
result = t.render(s="Hello!\nThis is Jinja saying something.")
assert result == "Hello!\nThis is Jinja saying\nsomething."
def test_filter_undefined(self, env):
with pytest.raises(TemplateAssertionError, match="No filter named 'f'"):
env.from_string("{{ var|f }}")
def test_filter_undefined_in_if(self, env):
t = env.from_string("{%- if x is defined -%}{{ x|f }}{%- else -%}x{% endif %}")
assert t.render() == "x"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(x=42)
def test_filter_undefined_in_elif(self, env):
t = env.from_string(
"{%- if x is defined -%}{{ x }}{%- elif y is defined -%}"
"{{ y|f }}{%- else -%}foo{%- endif -%}"
)
assert t.render() == "foo"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(y=42)
def test_filter_undefined_in_else(self, env):
t = env.from_string(
"{%- if x is not defined -%}foo{%- else -%}{{ x|f }}{%- endif -%}"
)
assert t.render() == "foo"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(x=42)
def test_filter_undefined_in_nested_if(self, env):
t = env.from_string(
"{%- if x is not defined -%}foo{%- else -%}{%- if y "
"is defined -%}{{ y|f }}{%- endif -%}{{ x }}{%- endif -%}"
)
assert t.render() == "foo"
assert t.render(x=42) == "42"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(x=24, y=42)
def test_filter_undefined_in_condexpr(self, env):
t1 = env.from_string("{{ x|f if x is defined else 'foo' }}")
t2 = env.from_string("{{ 'foo' if x is not defined else x|f }}")
assert t1.render() == t2.render() == "foo"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t1.render(x=42)
t2.render(x=42)
|
|
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
if sys.platform == 'win32':
import wmi
from oslo_log import log as logging
from nova import exception
from nova.i18n import _, _LE
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import vmutilsv2
from nova.virt.hyperv import volumeutilsv2
LOG = logging.getLogger(__name__)
class LiveMigrationUtils(object):
def __init__(self):
self._vmutils = vmutilsv2.VMUtilsV2()
self._volutils = volumeutilsv2.VolumeUtilsV2()
def _get_conn_v2(self, host='localhost'):
try:
return wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
except wmi.x_wmi as ex:
LOG.exception(_LE('Get version 2 connection error'))
if ex.com_error.hresult == -2147217394:
msg = (_('Live migration is not supported on target host "%s"')
% host)
elif ex.com_error.hresult == -2147023174:
msg = (_('Target live migration host "%s" is unreachable')
% host)
else:
msg = _('Live migration failed: %s') % ex.message
raise vmutils.HyperVException(msg)
def check_live_migration_config(self):
conn_v2 = self._get_conn_v2()
migration_svc = conn_v2.Msvm_VirtualSystemMigrationService()[0]
vsmssds = migration_svc.associators(
wmi_association_class='Msvm_ElementSettingData',
wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData')
vsmssd = vsmssds[0]
if not vsmssd.EnableVirtualSystemMigration:
raise vmutils.HyperVException(
_('Live migration is not enabled on this host'))
if not migration_svc.MigrationServiceListenerIPAddressList:
raise vmutils.HyperVException(
_('Live migration networks are not configured on this host'))
def _get_vm(self, conn_v2, vm_name):
vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if not n:
raise exception.NotFound(_('VM not found: %s') % vm_name)
elif n > 1:
raise vmutils.HyperVException(_('Duplicate VM name found: %s')
% vm_name)
return vms[0]
def _destroy_planned_vm(self, conn_v2_remote, planned_vm):
LOG.debug("Destroying existing remote planned VM: %s",
planned_vm.ElementName)
vs_man_svc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.DestroySystem(planned_vm.path_())
self._vmutils.check_ret_val(ret_val, job_path)
def _check_existing_planned_vm(self, conn_v2_remote, vm):
# Make sure that there's not yet a remote planned VM on the target
# host for this VM
planned_vms = conn_v2_remote.Msvm_PlannedComputerSystem(Name=vm.Name)
if planned_vms:
self._destroy_planned_vm(conn_v2_remote, planned_vms[0])
def _create_remote_planned_vm(self, conn_v2_local, conn_v2_remote,
vm, rmt_ip_addr_list, dest_host):
# Staged
vsmsd = conn_v2_local.query("select * from "
"Msvm_VirtualSystemMigrationSettingData "
"where MigrationType = 32770")[0]
vsmsd.DestinationIPAddressList = rmt_ip_addr_list
migration_setting_data = vsmsd.GetText_(1)
LOG.debug("Creating remote planned VM for VM: %s",
vm.ElementName)
migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
(job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
ComputerSystem=vm.path_(),
DestinationHost=dest_host,
MigrationSettingData=migration_setting_data)
self._vmutils.check_ret_val(ret_val, job_path)
return conn_v2_remote.Msvm_PlannedComputerSystem(Name=vm.Name)[0]
def _get_physical_disk_paths(self, vm_name):
ide_ctrl_path = self._vmutils.get_vm_ide_controller(vm_name, 0)
if ide_ctrl_path:
ide_paths = self._vmutils.get_controller_volume_paths(
ide_ctrl_path)
else:
ide_paths = {}
scsi_ctrl_path = self._vmutils.get_vm_scsi_controller(vm_name)
scsi_paths = self._vmutils.get_controller_volume_paths(scsi_ctrl_path)
return dict(ide_paths.items() + scsi_paths.items())
def _get_remote_disk_data(self, vmutils_remote, disk_paths, dest_host):
volutils_remote = volumeutilsv2.VolumeUtilsV2(dest_host)
disk_paths_remote = {}
for (rasd_rel_path, disk_path) in disk_paths.items():
target = self._volutils.get_target_from_disk_path(disk_path)
if target:
(target_iqn, target_lun) = target
dev_num = volutils_remote.get_device_number_for_target(
target_iqn, target_lun)
disk_path_remote = (
vmutils_remote.get_mounted_disk_by_drive_number(dev_num))
disk_paths_remote[rasd_rel_path] = disk_path_remote
else:
LOG.debug("Could not retrieve iSCSI target "
"from disk path: %s", disk_path)
return disk_paths_remote
def _update_planned_vm_disk_resources(self, vmutils_remote, conn_v2_remote,
planned_vm, vm_name,
disk_paths_remote):
vm_settings = planned_vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')[0]
updated_resource_setting_data = []
sasds = vm_settings.associators(
wmi_association_class='Msvm_VirtualSystemSettingDataComponent')
for sasd in sasds:
if (sasd.ResourceType == 17 and sasd.ResourceSubType ==
"Microsoft:Hyper-V:Physical Disk Drive" and
sasd.HostResource):
# Replace the local disk target with the correct remote one
old_disk_path = sasd.HostResource[0]
new_disk_path = disk_paths_remote.pop(sasd.path().RelPath)
LOG.debug("Replacing host resource "
"%(old_disk_path)s with "
"%(new_disk_path)s on planned VM %(vm_name)s",
{'old_disk_path': old_disk_path,
'new_disk_path': new_disk_path,
'vm_name': vm_name})
sasd.HostResource = [new_disk_path]
updated_resource_setting_data.append(sasd.GetText_(1))
LOG.debug("Updating remote planned VM disk paths for VM: %s",
vm_name)
vsmsvc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0]
(res_settings, job_path, ret_val) = vsmsvc.ModifyResourceSettings(
ResourceSettings=updated_resource_setting_data)
vmutils_remote.check_ret_val(ret_val, job_path)
def _get_vhd_setting_data(self, vm):
vm_settings = vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')[0]
new_resource_setting_data = []
sasds = vm_settings.associators(
wmi_association_class='Msvm_VirtualSystemSettingDataComponent',
wmi_result_class='Msvm_StorageAllocationSettingData')
for sasd in sasds:
if (sasd.ResourceType == 31 and sasd.ResourceSubType ==
"Microsoft:Hyper-V:Virtual Hard Disk"):
new_resource_setting_data.append(sasd.GetText_(1))
return new_resource_setting_data
def _live_migrate_vm(self, conn_v2_local, vm, planned_vm, rmt_ip_addr_list,
new_resource_setting_data, dest_host):
# VirtualSystemAndStorage
vsmsd = conn_v2_local.query("select * from "
"Msvm_VirtualSystemMigrationSettingData "
"where MigrationType = 32771")[0]
vsmsd.DestinationIPAddressList = rmt_ip_addr_list
if planned_vm:
vsmsd.DestinationPlannedVirtualSystemId = planned_vm.Name
migration_setting_data = vsmsd.GetText_(1)
migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
LOG.debug("Starting live migration for VM: %s", vm.ElementName)
(job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
ComputerSystem=vm.path_(),
DestinationHost=dest_host,
MigrationSettingData=migration_setting_data,
NewResourceSettingData=new_resource_setting_data)
self._vmutils.check_ret_val(ret_val, job_path)
def _get_remote_ip_address_list(self, conn_v2_remote, dest_host):
LOG.debug("Getting live migration networks for remote host: %s",
dest_host)
migr_svc_rmt = conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
return migr_svc_rmt.MigrationServiceListenerIPAddressList
def live_migrate_vm(self, vm_name, dest_host):
self.check_live_migration_config()
conn_v2_local = self._get_conn_v2()
conn_v2_remote = self._get_conn_v2(dest_host)
vm = self._get_vm(conn_v2_local, vm_name)
self._check_existing_planned_vm(conn_v2_remote, vm)
rmt_ip_addr_list = self._get_remote_ip_address_list(conn_v2_remote,
dest_host)
planned_vm = None
disk_paths = self._get_physical_disk_paths(vm_name)
if disk_paths:
vmutils_remote = vmutilsv2.VMUtilsV2(dest_host)
disk_paths_remote = self._get_remote_disk_data(vmutils_remote,
disk_paths,
dest_host)
planned_vm = self._create_remote_planned_vm(conn_v2_local,
conn_v2_remote,
vm, rmt_ip_addr_list,
dest_host)
self._update_planned_vm_disk_resources(vmutils_remote,
conn_v2_remote, planned_vm,
vm_name, disk_paths_remote)
new_resource_setting_data = self._get_vhd_setting_data(vm)
self._live_migrate_vm(conn_v2_local, vm, planned_vm, rmt_ip_addr_list,
new_resource_setting_data, dest_host)
|
|
#!/usr/bin/env python3
import sys
if sys.version_info[0] < 3:
PY3, Unicode, Bytes = False, unicode, str
else:
PY3, Unicode, Bytes = True, str, bytes
def to_bytes(v, encoding="utf-8", **kwargs):
if isinstance(v, Bytes):
return v
elif isinstance(v, Unicode):
return v.encode(encoding)
return to_bytes(str(v), encoding=encoding)
def to_unicode(v, encoding="utf-8", **kwargs):
if isinstance(v, Bytes):
return v.decode(encoding)
elif isinstance(v, Unicode):
return v
return to_unicode(str(v), encoding=encoding)
to_str = to_unicode if PY3 else to_bytes
is_bytes = lambda s: isinstance(s, Bytes)
is_unicode = lambda s: isinstance(s, Unicode)
is_string = lambda s: isinstance(s, (Bytes, Unicode))
# Patch End
##############################################################################
##############################################################################
# Python 2.6 Patch
try:
from argparse import ArgumentParser
except ImportError:
from optparse import OptionParser
class OptionParserGroupProxy(object):
def __init__(self, group):
self.__group = group
def __repr__(self):
return str(self.__group)
def __getattr__(self, name):
return getattr(self.__group, name)
def add_argument(self, *args, **kwargs):
return self.__group.add_option(*args, **kwargs)
class ArgumentParser(OptionParser):
def add_argument_group(self, *args, **kwargs):
group = self.add_option_group(*args, **kwargs)
return OptionParserGroupProxy(group)
def add_argument(self, *args, **kwargs):
return self.add_option(*args, **kwargs)
# Python 2.6 Patch End
##############################################################################
# @Author: xgfone
# @Email: xgfone@126.com
class Configuration(object):
class Group(object):
def __init__(self, group_name):
self.__name = group_name
def __repr__(self):
attrs = []
for key, value in vars(self).items():
if key != "_Group__name":
attrs.append("{0}={1}".format(key, value))
return "{0}({1})".format(self.__class__.__name__, ", ".join(attrs))
def __contains__(self, name):
return hasattr(self, name)
def __getattr__(self, name):
e = "The group '{0}' has no the option '{1}'"
raise AttributeError(e.format(self.__name, name))
def __setitem__(self, name, value):
setattr(self, name, value)
def __getitem__(self, name):
try:
return getattr(self, name)
except AttributeError:
e = "The group '{0}' has no the option '{1}'"
raise KeyError(e.format(self.__name, name))
def items(self):
d = vars(self)
d.pop("_Group__name")
return d.items()
__slots__ = ["_default_group_name", "_default_group", "_allow_empty",
"_encoding", "_parsed", "_caches", "_opts", "_bool_true",
"_bool_false", "_py2", "_description", "_version"]
def __init__(self, description=None, allow_empty=False, encoding="utf-8",
default_group="DEFAULT", version=None):
"""A simple configuration file parser based on the format INI.
When an configuration option does not exist, for getting one default
value, not raising an exception, please use the method of get(), or the
builtin function of getattr().
"""
self._parsed = False
self._description = description
self._default_group_name = default_group
self._default_group = Configuration.Group(self._default_group_name)
self._allow_empty = allow_empty
self._encoding = encoding
self._version = version if version else "Unknown"
self._caches = {self._default_group_name: self._default_group}
self._opts = {}
self._bool_true = ["t", "1", "on", "true"]
self._bool_false = ["f", "0", "off", "false"]
try:
"".decode()
except AttributeError:
self._py2 = False
else:
self._py2 = True
def __getattr__(self, name):
if not self._parsed:
raise Exception("Not parsed")
try:
return self._caches[name]
except KeyError:
pass
msg = "'{0}' object has no attribute '{1}'"
raise AttributeError(msg.format(self.__class__.__name__, name))
def __getitem__(self, name):
if not self._parsed:
raise Exception("Not parsed")
_name = self._uniformize(name)
try:
return self._caches[_name]
except KeyError:
pass
msg = "'{0}' has no key '{1}'"
raise KeyError(msg.format(self.__class__.__name__, name))
def __repr__(self):
attrs = []
for key, value in self._caches.items():
attrs.append("{0}={1}".format(key, value))
return "{0}({1})".format(self.__class__.__name__, ", ".join(attrs))
def _set_group_opt(self, group_name, opt_name, opt_value, force=False):
gname = group_name if group_name else self._default_group_name
group = self._caches[gname]
if hasattr(group, opt_name) and not force:
e = "The group '{0}' has had the option of '{1}'"
raise ValueError(e.format(gname, opt_name))
setattr(self._caches[gname], opt_name, opt_value)
def _register(self, name, parser, default=None, group=None, help=None, short=None):
if self._parsed:
raise Exception("Have been parsed")
name = self._uniformize(name)
group = self._uniformize(group if group else self._default_group_name)
self._opts.setdefault(group, {})
if name in self._opts[group]:
raise KeyError("The option {0} has been regisetered".format(name))
self._opts[group][name] = (parser, default, help, short)
self._caches.setdefault(group, Configuration.Group(group))
def _parse_int(self, value):
return int(value)
def _parse_float(self, value):
return float(value)
def _parse_bool(self, value):
if isinstance(value, bool):
return value
elif not is_string(value):
return bool(value)
value = value.lower()
if value in self._bool_true:
return True
elif value in self._bool_false:
return False
raise ValueError("invalid bool value '{0}'".format(value))
def _parse_string(self, value):
if self._py2:
if isinstance(value, str):
return value.decode(self._encoding)
else:
if not isinstance(value, str):
return value.decode(self._encoding)
return value
def _parse_ints(self, value):
return self._parse_list(self._parse_int, value)
def _parse_strings(self, value):
return self._parse_list(self._parse_string, value)
def _parse_list(self, parser, value):
if isinstance(value, (list, tuple)):
vs = value
else:
vs = (v.strip() for v in value.split(",") if v.strip())
return tuple((parser(v) for v in vs))
def _uniformize(self, name):
return name.replace("-", "_")
def _unniformize(self, name):
return name.replace("_", "-")
def parsed(self):
"""Return True if it has been parsed, or False."""
return self._parsed
def parse_files(self, filenames=""):
"""Parse the INI configuration files.
The argument is either a string standing for the path of the
configuration file, or a list of them.
"""
if self._parsed:
raise Exception("Have been parsed")
self._parsed = True
if filenames:
if not isinstance(filenames, (list, tuple)):
filenames = self._parse_string(filenames).strip(", ").split(",")
for filename in filenames:
self._parse_file(filename)
self._check_and_fix()
def _check_and_fix(self):
for gname, opts in self._opts.items():
group = self._caches[gname]
for name, opt in opts.items():
if name in group:
continue
elif opt[1] is not None or opt[0] == self._parse_bool:
self._set_group_opt(gname, name, opt[1])
continue
if not self._allow_empty:
msg = "The option '{0}' in the group '{1}' has no value."
raise ValueError(msg.format(name, gname))
# Set the options in the default group into self.
group = self._caches.pop(self._default_group_name)
for key, value in group.items():
if key in self._caches:
msg = "'{0}' had has the value '{1}'"
raise ValueError(msg.format(self.__class__.__name__, key))
self._caches[key] = value
def _parse_file(self, filename):
filename = str(filename)
with open(filename) as f:
lines = f.readlines()
gname = self._default_group_name
index, max_index = 0, len(lines)
while index < max_index:
line = self._parse_string(lines[index]).strip()
index += 1
# Comment
if not line or line[0] in ("#", "=", ";"):
continue
# Group Section
if line[0] == "[":
if line[-1] != "]":
m = ("the format of the group is wrong, "
"which must start with [ and end with ]")
raise ValueError(m)
_gname = line[1:-1]
if not _gname:
raise ValueError("the group name is empty")
if _gname not in self._caches:
continue
gname = _gname
continue
# Group Option Values
items = line.split("=", 1)
if len(items) != 2:
raise ValueError("the format is wrong, must contain '=': " + line)
name, value = self._uniformize(items[0].strip()), items[1].strip()
# Handle the continuation line
if value[-1:] == "\\":
values = [value.rstrip("\\").strip()]
while index < max_index:
value = lines[index].strip()
values.append(value.rstrip("\\").strip())
index += 1
if value[-1:] != "\\":
break
value = "\n".join(values)
opt = self._opts[gname].get(name, None)
if opt:
self._set_group_opt(gname, name, opt[0](value))
def register_bool(self, name, short=None, default=None, group=None, help=None):
"""Register the bool option.
The value of this option will be parsed to the type of bool.
"""
self._register(name, self._parse_bool, short=short, default=default,
group=group, help=help)
def register_int(self, name, short=None, default=None, group=None, help=None):
"""Register the int option.
The value of this option will be parsed to the type of int.
"""
self._register(name, self._parse_int, short=short, default=default,
group=group, help=help)
def register_float(self, name, short=None, default=None, group=None, help=None):
"""Register the float option.
The value of this option will be parsed to the type of float.
"""
self._register(name, self._parse_float, short=short, default=default,
group=group, help=help)
def register_str(self, name, short=None, default=None, group=None, help=None):
"""Register the str option.
The value of this option will be parsed to the type of str.
"""
self._register(name, self._parse_string, short=short, default=default,
group=group, help=help)
def register_int_list(self, name, short=None, default=None, group=None, help=None):
"""Register the int list option.
The value of this option will be parsed to the type of int list.
"""
self._register(name, self._parse_ints, short=short, default=default,
group=group, help=help)
def register_str_list(self, name, short=None, default=None, group=None, help=None):
"""Register the string list option.
The value of this option will be parsed to the type of string list.
"""
self._register(name, self._parse_strings, short=short, default=default,
group=group, help=help)
###########################################################################
# Parse CLI
def parse(self, *args, **kwargs):
return self.parse_cli(*args, **kwargs)
def parse_cli(self, args=None, config_file_name="config-file"):
"""Parse the cli options."""
if self._parsed:
raise Exception("Have been parsed")
self._parsed = True
if args is None:
args = sys.argv[1:]
if not args:
self._check_and_fix()
return None
gopts, args = self._parser_cli(args, description=self._description,
config_file_name=config_file_name)
if getattr(args, "version", False):
print(self._version)
sys.exit(0)
if config_file_name:
config_file = getattr(args, self._uniformize(config_file_name), "")
for filename in config_file.split(","):
filename = filename.strip()
if filename:
self._parse_file(filename)
for cli_opt, (gname, name) in gopts.items():
opt = self._opts[gname][name]
value = getattr(args, cli_opt, None)
if value is not None:
value = opt[0](value)
if value != opt[1]:
self._set_group_opt(gname, name, value, force=True)
self._check_and_fix()
return args
def _parser_cli(self, args, description=None, config_file_name=None):
cli = ArgumentParser(description=description)
if config_file_name:
cli.add_argument("--" + config_file_name, default="",
help="The config file path.")
cli.add_argument("--version", action="store_true",
help="Print the version and exit.")
group_opts = {}
for gname, opts in self._opts.items():
if gname == self._default_group_name:
group = cli
else:
group = cli.add_argument_group(gname)
for name, (parser, default, help, short) in opts.items():
action = None
if parser == self._parse_bool:
action = "store_false" if default else "store_true"
default = False if default is None else default
if gname == self._default_group_name:
opt_name = self._unniformize(name)
opt_key = self._uniformize(name)
else:
opt_name = self._unniformize("{0}-{1}".format(gname, name))
opt_key = self._uniformize(opt_name)
group_opts[opt_key] = (gname, name)
short = "-" + short if short and short[0] != "-" else short
names = [short, "--" + opt_name] if short else ["--" + opt_name]
group.add_argument(*names, action=action, default=default, help=help)
return group_opts, cli.parse_args(args=args)
if __name__ == "__main__":
conf = Configuration()
conf.register_bool("bool", short="b")
conf.register_int("int")
conf.register_str("attr", default="abc", help="opt test")
conf.register_int("attr", default=None, group="group", help="group test")
conf.parse_cli(["--group-attr", "456", "--bool", "--int", "123"])
print("int = {0}, type={1}".format(conf.int, type(conf.int)))
print("bool = {0}, type={1}".format(conf.bool, type(conf.bool)))
print("conf.attr = {0}".format(conf.attr))
print('conf["attr"] = {0}'.format(conf["attr"]))
print("conf.group.attr = {0}".format(conf.group.attr))
print(str)
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Implements HappyProcess class that manages happy processes.
#
from __future__ import absolute_import
import os
import psutil
import sys
import time
import math
from happy.Utils import *
from happy.HappyHost import HappyHost
import happy.HappyLinkDelete
class HappyProcess(HappyHost):
def __init__(self, node_id=None):
HappyHost.__init__(self)
def GetProcessByPID(self, pid, create_time):
"""A helper method for finding the process by PID and creation time. Returns a
psutils.Process object if there is a process matching the PID, creation
time tuple, or None if no such process exist.
"""
if pid is None or create_time is None:
return None
p = psutil.Process(pid)
try:
p_create_time = p.create_time()
except Exception:
p_create_time = p.create_time
return create_time == p_create_time and p or None
def processExists(self, tag, node_id=None):
if node_id is None:
node_id = self.node_id
pid = self.getNodeProcessPID(tag, node_id)
create_time = self.getNodeProcessCreateTime(tag, node_id)
if pid is None:
return False
try:
p = self.GetProcessByPID(pid, create_time)
return p is not None and p.is_running() and p.status not in [psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD]
except Exception:
return False
def BlockOnProcessPID(self, pid, create_time, timeout=None):
if pid is None:
return
p = None
try:
p = self.GetProcessByPID(pid, create_time)
if p is not None and p.is_running() and p.status not in [psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD]:
val = p.wait(timeout)
if val is None:
self.logger.debug("Process is terminated ")
else:
self.logger.debug("Process is terminated, possibly by os ")
except psutil.TimeoutExpired:
self.logger.info("TimeoutExpired happens")
if p is not None:
self.logger.info("kill process")
self.TerminateProcessTree(pid, create_time)
except Exception:
self.logger.debug("Process is terminated for unknown reasons")
pass
return
def BlockOnProcess(self, tag, node_id=None, timeout=None):
if node_id is None:
node_id = self.node_id
pid = self.getNodeProcessPID(tag, node_id)
create_time = self.getNodeProcessCreateTime(tag, node_id)
if pid is None:
return
self.BlockOnProcessPID(pid, create_time, timeout)
def GetProcessTreeAsList(self, pid, create_time):
try:
p = self.GetProcessByPID(pid, create_time)
if p is None:
return []
# python psutil 2.x and later expose Process.children() method; the
# equivalent functionality in psutil 1.2.1 was called get_children()
try:
childs = p.children(recursive=True)
except AttributeError:
childs = p.get_children(recursive=True)
# At the time of this writing, get_children returns a list of the
# children in breadth-first order. All leaves
# are at the end of the list.
return [p] + childs
except Exception:
return []
def __wait_procs(self, procs, timeout):
before = time.time()
after = before
alive = procs
# (old versions of psutil have a bug and return too soon)
while alive and (after - before) < timeout:
next_timeout = math.ceil(timeout - (after - before))
gone, alive = psutil.wait_procs(alive, timeout=next_timeout)
after = time.time()
if after < before:
after = before
return alive
def __signal_procs(self, procs, signal):
for c in procs:
try:
# We sudo, in case we don't own the process
cmd = "kill -" + signal + " " + str(c.pid)
cmd = self.runAsRoot(cmd)
ret = self.CallAtHost(cmd)
if (ret != 0):
emsg = "Failed to send %s to process with PID %s." % (signal, str(c.pid))
self.logger.debug("[%s] HappyProcessStop: %s" % (self.node_id, emsg))
except Exception:
emsg = "Failed to send %s to process with PID %s." % (signal, str(c.pid))
self.logger.debug("[%s] HappyProcessStop: %s" % (self.node_id, emsg))
pass
def TerminateProcessTree(self, pid, create_time):
# HappyProcessStart creates a tree of processes.
# For example, if a normal user types "happy-process-start node01 ping ...",
# ps reports:
# root 141987 0.1 0.0 88764 5480 pts/43 S 19:37 0:00 sudo ip netns exec happy000 sudo -u andreello ping 127.0.0.1
# root 141988 0.1 0.1 124400 42524 pts/43 S 19:37 0:00 \_ sudo -u andreello ping 127.0.0.1
# andreel+ 141989 0.0 0.0 6500 628 pts/43 S 19:37 0:00 \_ ping 127.0.0.1
# But in some cases it will create only one process.
# If the command above is entered by root, ps shows:
#
# root 142652 0.0 0.0 6500 628 pts/43 S 19:41 0:00 ping 127.0.0.1
#
# Note that HappyProcessStart stores the pid of the oldest parent.
#
# The goal is to send a SIGUSR1 to the actual process (in the example above, 'watch ls')
# If the process has not registered a handler for SIGUSR1, it will be terminated.
# Otherwise, the test process should handle the signal by cleaning up and exiting gracefully.
# All processes up the hierarchy should then exit naturally.
#
# So, it should be sufficient to send a SIGUSR1 to the youngest child process.
# But, we want to support the case of a process that itself spawns several children.
# For that reason, the code below sends a SIGUSR1 to all children of the main process
# and to the main process itself without checking if a process is a leaf of the tree or not.
# Note that sudo relays the signals it receives to its child process, so we're potentially
# sending the same signal twice to some of the children.
#
# Note that sending signals to different processes is not atomic, and so we don't know
# in which order the processes will actually exit. Also, PIDs can be reused, and so
# while looping through the process list and sending signals, there is no hard guarantee
# that the PID we're sending a signal to is still the same process.
# We do know that the PID stored in the happy state still refers to the right process because
# we also store and double check the create_time attribute.
# psutil also checks timestamps between invocations, and so psutil.wait_procs() won't get
# fooled by a new process having the same PID as one of the PIDs in procs.
# If we wanted to send signals using psutil we'd need to be root as most of the
# Happy processes belong to root.
#
# If the processes have not terminated after 30 seconds, they are sent a SIGTERM, and
# and then a SIGKILL.
# The timeouts are set at 30 seconds in case many Happy instances are run in parallel
# and the system is under heavy load.
#
procs = self.GetProcessTreeAsList(pid, create_time)
self.TerminateProcesses(procs)
def TerminateProcesses(self, procs):
# first send SIGUSR1
self.__signal_procs(procs, "SIGUSR1")
alive = self.__wait_procs(procs, 30)
if alive:
# if process ignored SIGUSR1, try sending terminate
self.__signal_procs(alive, "SIGTERM")
alive = self.__wait_procs(alive, 30)
if alive:
# if process is still around, just kill it
self.__signal_procs(alive, "SIGKILL")
def GetProcessByName(self, name):
processlist = []
# At python.psutil 2.0.0, psutil.get_pid_list() has been replaced
# by psutil.pids(). Try to access the new one first,
# If that throws, try the old one.
try:
pids = psutil.pids()
except:
pids = psutil.get_pid_list()
for pid in pids:
try:
p = psutil.Process(pid)
if p.name == name:
processlist.append(p)
except:
pass
return processlist
|
|
# -*- coding: utf-8 -*-
""" Simple Generic Location Tracking System
@copyright: 2011-14 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from datetime import datetime, timedelta
from gluon import current
try:
from gluon.dal.objects import Table, Rows, Row
except ImportError:
# old web2py
from gluon.dal import Table, Rows, Row
from gluon.html import *
from s3rest import S3Method
__all__ = ("S3Tracker",
"S3CheckInMethod",
"S3CheckOutMethod",
)
UID = "uuid" # field name for UIDs
TRACK_ID = "track_id" # field name for track ID
LOCATION_ID = "location_id" # field name for base location
LOCATION = "gis_location" # location tablename
PRESENCE = "sit_presence" # presence tablename
# =============================================================================
class S3Trackable(object):
"""
Trackable types instance(s)
"""
def __init__(self, table=None, tablename=None, record=None, query=None,
record_id=None, record_ids=None, rtable=None):
"""
Constructor:
@param table: a Table object
@param tablename: a Str tablename
@param record: a Row object
@param query: a Query object
@param record_id: a record ID (if object is a Table)
@param record_ids: a list of record IDs (if object is a Table)
- these should be in ascending order
@param rtable: the resource table (for the recursive calls)
"""
db = current.db
s3db = current.s3db
self.records = []
self.table = s3db.sit_trackable
self.rtable = rtable
# if isinstance(trackable, (Table, str)):
# if hasattr(trackable, "_tablename"):
# table = trackable
# tablename = table._tablename
# else:
# table = s3db[trackable]
# tablename = trackable
# fields = self.__get_fields(table)
# if not fields:
# raise SyntaxError("Not a trackable type: %s" % tablename)
# query = (table._id > 0)
# if uid is None:
# if record_id is not None:
# if isinstance(record_id, (list, tuple)):
# query = (table._id.belongs(record_id))
# else:
# query = (table._id == record_id)
# elif UID in table.fields:
# if not isinstance(uid, (list, tuple)):
# query = (table[UID].belongs(uid))
# else:
# query = (table[UID] == uid)
# fields = [table[f] for f in fields]
# rows = db(query).select(*fields)
if table or tablename:
if table:
tablename = table._tablename
else:
table = s3db[tablename]
fields = self.__get_fields(table)
if not fields:
raise SyntaxError("Not a trackable type: %s" % tablename)
if record_ids:
query = (table._id.belongs(record_ids))
limitby = (0, len(record_ids))
orderby = table._id
elif record_id:
query = (table._id == record_id)
limitby = (0, 1)
orderby = None
else:
query = (table._id > 0)
limitby = None
orderby = table._id
fields = [table[f] for f in fields]
rows = db(query).select(limitby=limitby, orderby=orderby, *fields)
# elif isinstance(trackable, Row):
# fields = self.__get_fields(trackable)
# if not fields:
# raise SyntaxError("Required fields not present in the row")
# rows = Rows(records=[trackable], compact=False)
elif record:
fields = self.__get_fields(record)
if not fields:
raise SyntaxError("Required fields not present in the row")
rows = Rows(records=[record], compact=False)
# elif isinstance(trackable, Rows):
# rows = [r for r in trackable if self.__get_fields(r)]
# fail = len(trackable) - len(rows)
# if fail:
# raise SyntaxError("Required fields not present in %d of the rows" % fail)
# rows = Rows(records=rows, compact=False)
# elif isinstance(trackable, (Query, Expression)):
# tablename = db._adapter.get_table(trackable)
# self.rtable = s3db[tablename]
# fields = self.__get_fields(self.rtable)
# if not fields:
# raise SyntaxError("Not a trackable type: %s" % tablename)
# query = trackable
# fields = [self.rtable[f] for f in fields]
# rows = db(query).select(*fields)
elif query:
tablename = db._adapter.get_table(query)
self.rtable = s3db[tablename]
fields = self.__get_fields(self.rtable)
if not fields:
raise SyntaxError("Not a trackable type: %s" % tablename)
fields = [self.rtable[f] for f in fields]
rows = db(query).select(*fields)
# elif isinstance(trackable, Set):
# query = trackable.query
# tablename = db._adapter.get_table(query)
# table = s3db[tablename]
# fields = self.__get_fields(table)
# if not fields:
# raise SyntaxError("Not a trackable type: %s" % tablename)
# fields = [table[f] for f in fields]
# rows = trackable.select(*fields)
else:
raise SyntaxError("Invalid parameters")
records = []
for r in rows:
if self.__super_entity(r):
table = s3db[r.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("No trackable type: %s" % table._tablename)
fields = [table[f] for f in fields]
query = table[UID] == r[UID]
row = db(query).select(limitby=(0, 1), *fields).first()
if row:
records.append(row)
else:
records.append(r)
self.records = Rows(records=records, compact=False)
# -------------------------------------------------------------------------
@staticmethod
def __super_entity(trackable):
"""
Check whether a trackable is a super-entity
@param trackable: the trackable object
"""
if hasattr(trackable, "fields"):
keys = trackable.fields
else:
keys = trackable
return "instance_type" in keys
# -------------------------------------------------------------------------
def __get_fields(self, trackable, super_entity=True):
"""
Check a trackable for presence of required fields
@param: the trackable object
"""
fields = []
if hasattr(trackable, "fields"):
keys = trackable.fields
else:
keys = trackable
try:
if super_entity and \
self.__super_entity(trackable) and UID in keys:
return ("instance_type", UID)
if LOCATION_ID in keys:
fields.append(LOCATION_ID)
if TRACK_ID in keys:
fields.append(TRACK_ID)
return fields
elif hasattr(trackable, "update_record") or \
isinstance(trackable, Table) or \
isinstance(trackable, Row):
return fields
except:
pass
return None
# -------------------------------------------------------------------------
def get_location(self,
timestmp=None,
_fields=None,
_filter=None,
as_rows=False,
exclude=[]):
"""
Get the current location of the instance(s) (at the given time)
@param timestmp: last datetime for presence (defaults to current time)
@param _fields: fields to retrieve from the location records (None for ALL)
@param _filter: filter for the locations
@param as_rows: return the result as Rows object
@param exclude: interlocks to break at (avoids circular check-ins)
@return: a location record, or a list of location records (if multiple)
@ToDo: Also show Timestamp of when seen there
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
ltable = s3db[LOCATION]
if timestmp is None:
timestmp = datetime.utcnow()
locations = []
for r in self.records:
location = None
if TRACK_ID in r:
query = ((ptable.deleted == False) & \
(ptable[TRACK_ID] == r[TRACK_ID]) & \
(ptable.timestmp <= timestmp))
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence:
if presence.interlock:
exclude = [r[TRACK_ID]] + exclude
tablename, record_id = presence.interlock.split(",", 1)
trackable = S3Trackable(tablename=tablename, record_id=record_id)
record = trackable.records.first()
if TRACK_ID not in record or \
record[TRACK_ID] not in exclude:
location = trackable.get_location(timestmp=timestmp,
exclude=exclude,
_fields=_fields,
as_rows=True).first()
elif presence.location_id:
query = (ltable.id == presence.location_id)
if _filter is not None:
query = query & _filter
if _fields is None:
location = db(query).select(ltable.ALL,
limitby=(0, 1)).first()
else:
location = db(query).select(limitby=(0, 1),
*_fields).first()
if not location:
if len(self.records) > 1:
trackable = S3Trackable(record=r, rtable=self.rtable)
else:
trackable = self
location = trackable.get_base_location(_fields=_fields)
if location:
locations.append(location)
else:
# Ensure we return an entry so that indexes match
locations.append(Row({"lat": None, "lon": None}))
if as_rows:
return Rows(records=locations, compact=False)
if not locations:
return None
else:
return locations
# -------------------------------------------------------------------------
def set_location(self, location, timestmp=None):
"""
Set the current location of instance(s) (at the given time)
@param location: the location (as Row or record ID)
@param timestmp: the datetime of the presence (defaults to current time)
@return: nothing
"""
ptable = current.s3db[PRESENCE]
if timestmp is None:
timestmp = datetime.utcnow()
if isinstance(location, S3Trackable):
location = location.get_base_location()
if isinstance(location, Rows):
location = location.first()
if isinstance(location, Row):
if "location_id" in location:
location = location.location_id
else:
location = location.id
if not location:
return
else:
data = dict(location_id=location, timestmp=timestmp)
for r in self.records:
if TRACK_ID not in r:
# No track ID => set base location
if len(self.records) > 1:
trackable = S3Trackable(record=r)
else:
trackable = self
trackable.set_base_location(location)
elif r[TRACK_ID]:
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
return location
# -------------------------------------------------------------------------
def check_in(self, table, record, timestmp=None):
"""
Bind the presence of the instance(s) to another instance
@param table: table name of the other resource
@param record: record in the other resource (as Row or record ID)
@param timestmp: datetime of the check-in
@return: nothing
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
if isinstance(table, str):
table = s3db[table]
fields = self.__get_fields(table)
if not fields:
raise SyntaxError("No location data in %s" % table._tablename)
interlock = None
if isinstance(record, Rows):
record = record.first()
if not isinstance(record, Row):
record = table[record]
if self.__super_entity(record):
table = s3db[record.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("No trackable type: %s" % table._tablename)
query = table[UID] == record[UID]
record = db(query).select(limitby=(0, 1)).first()
if record and table._id.name in record:
record = record[table._id.name]
if record:
interlock = "%s,%s" % (table, record)
else:
raise SyntaxError("No record specified for %s" % table._tablename)
if interlock:
if timestmp is None:
timestmp = datetime.utcnow()
data = dict(location_id=None,
timestmp=timestmp,
interlock=interlock)
q = ((ptable.deleted == False) & (ptable.timestmp <= timestmp))
for r in self.records:
if TRACK_ID not in r:
# Cannot check-in a non-trackable
continue
query = q & (ptable[TRACK_ID] == r[TRACK_ID])
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence and presence.interlock == interlock:
# already checked-in to the same instance
continue
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
# -------------------------------------------------------------------------
def check_out(self, table=None, record=None, timestmp=None):
"""
Make the last log entry before timestmp independent from
the referenced entity (if any)
@param timestmp: the date/time of the check-out, defaults
to current time
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
if timestmp is None:
timestmp = datetime.utcnow()
interlock = None
if table is not None:
if isinstance(table, str):
table = s3db[table]
if isinstance(record, Rows):
record = record.first()
if self.__super_entity(table):
if not isinstance(record, Row):
record = table[record]
table = s3db[record.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("No trackable type: %s" % table._tablename)
query = table[UID] == record[UID]
record = db(query).select(limitby=(0, 1)).first()
if isinstance(record, Row) and table._id.name in record:
record = record[table._id.name]
if record:
interlock = "%s,%s" % (table, record)
else:
return
q = ((ptable.deleted == False) & (ptable.timestmp <= timestmp))
for r in self.records:
if TRACK_ID not in r:
# Cannot check-out a non-trackable
continue
query = q & (ptable[TRACK_ID] == r[TRACK_ID])
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence and presence.interlock:
if interlock and presence.interlock != interlock:
continue
elif not interlock and table and \
not presence.interlock.startswith("%s" % table):
continue
tablename, record_id = presence.interlock.split(",", 1)
trackable = S3Trackable(tablename=tablename, record_id=record_id)
location = trackable.get_location(_fields=["id"],
timestmp=timestmp,
as_rows=True).first()
if timestmp - presence.timestmp < timedelta(seconds=1):
timestmp = timestmp + timedelta(seconds=1)
data = dict(location_id=location.id,
timestmp=timestmp,
interlock=None)
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
# -------------------------------------------------------------------------
def remove_location(self, location=None):
"""
Remove a location from the presence log of the instance(s)
@todo: implement
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def get_base_location(self,
_fields=None,
_filter=None,
as_rows=False):
"""
Get the base location of the instance(s)
@param _fields: fields to retrieve from the location records (None for ALL)
@param _filter: filter for the locations
@param as_rows: return the result as Rows object
@return: the base location(s) of the current instance
"""
db = current.db
s3db = current.s3db
ltable = s3db[LOCATION]
rtable = self.rtable
locations = []
for r in self.records:
location = None
query = None
if LOCATION_ID in r:
query = (ltable.id == r[LOCATION_ID])
if rtable:
query = query & (rtable[LOCATION_ID] == ltable.id)
if TRACK_ID in r:
query = query & (rtable[TRACK_ID] == r[TRACK_ID])
elif TRACK_ID in r:
q = (self.table[TRACK_ID] == r[TRACK_ID])
trackable = db(q).select(limitby=(0, 1)).first()
table = s3db[trackable.instance_type]
if LOCATION_ID in table.fields:
query = ((table[TRACK_ID] == r[TRACK_ID]) &
(table[LOCATION_ID] == ltable.id))
if query:
if _filter is not None:
query = query & _filter
if not _fields:
location = db(query).select(ltable.ALL,
limitby=(0, 1)).first()
else:
location = db(query).select(limitby=(0, 1),
*_fields).first()
if location:
locations.append(location)
else:
# Ensure we return an entry so that indexes match
locations.append(Row({"lat": None, "lon": None}))
if as_rows:
return Rows(records=locations, compact=False)
if not locations:
return None
elif len(locations) == 1:
return locations[0]
else:
return locations
# -------------------------------------------------------------------------
def set_base_location(self, location=None):
"""
Set the base location of the instance(s)
@param location: the location for the base location as Row or record ID
@return: nothing
@note: instance tables without a location_id field will be ignored
"""
if isinstance(location, S3Trackable):
location = location.get_base_location()
if isinstance(location, Rows):
location = location.first()
if isinstance(location, Row):
location.get("id", None)
if not location or not str(location).isdigit():
# Location not found
return
else:
data = {LOCATION_ID:location}
# Update records without track ID
for r in self.records:
if TRACK_ID in r:
continue
elif LOCATION_ID in r:
if hasattr(r, "update_record"):
r.update_record(**data)
else:
raise SyntaxError("Cannot relate record to a table.")
db = current.db
s3db = current.s3db
# Update records with track ID
# => this can happen table-wise = less queries
track_ids = [r[TRACK_ID] for r in self.records if TRACK_ID in r]
rows = db(self.table[TRACK_ID].belongs(track_ids)).select()
tables = []
append = tables.append
types = set()
seen = types.add
for r in rows:
instance_type = r.instance_type
if instance_type not in types:
seen(instance_type)
table = s3db[instance_type]
if instance_type not in tables and LOCATION_ID in table.fields:
append(table)
else:
# No location ID in this type => ignore gracefully
continue
# Location specified => update all base locations
for table in tables:
db(table[TRACK_ID].belongs(track_ids)).update(**data)
# Refresh records
for r in self.records:
if LOCATION_ID in r:
r[LOCATION_ID] = location
return location
# -------------------------------------------------------------------------
def __update_timestamp(self, track_id, timestamp):
"""
Update the timestamp of a trackable
@param track_id: the trackable ID (super-entity key)
@param timestamp: the timestamp
"""
if timestamp is None:
timestamp = datetime.utcnow()
if track_id:
trackable = self.table[track_id]
if trackable:
trackable.update_record(track_timestmp=timestamp)
# =============================================================================
class S3Tracker(object):
"""
S3 Tracking system, can be instantiated once as global 's3tracker' object
"""
def __init__(self):
"""
Constructor
"""
# -------------------------------------------------------------------------
def __call__(self, table=None, record_id=None, record_ids=None,
tablename=None, record=None, query=None):
"""
Get a tracking interface for a record or set of records
@param table: a Table object
@param record_id: a record ID (together with Table or tablename)
@param record_ids: a list/tuple of record IDs (together with Table or tablename)
@param tablename: a Str object
@param record: a Row object
@param query: a Query object
@return: a S3Trackable instance for the specified record(s)
"""
return S3Trackable(table=table,
tablename=tablename,
record_id=record_id,
record_ids=record_ids,
record=record,
query=query,
)
# -------------------------------------------------------------------------
def get_all(self, entity,
location=None,
bbox=None,
timestmp=None):
"""
Get all instances of the given entity at the given location and time
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def get_checked_in(self, table, record,
instance_type=None,
timestmp=None):
"""
Get all trackables of the given type that are checked-in
to the given instance at the given time
"""
raise NotImplementedError
# =============================================================================
class S3CheckInMethod(S3Method):
"""
Custom Method to allow a trackable resource to check-in
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
s3db = current.s3db
response = current.response
table = r.table
tracker = S3Trackable(table, record_id=r.id)
title = T("Check-In")
get_vars = r.get_vars
# Are we being passed a location_id?
location_id = get_vars.get("location_id", None)
if not location_id:
# Are we being passed a lat and lon?
lat = get_vars.get("lat", None)
if lat is not None:
lon = get_vars.get("lon", None)
if lon is not None:
form_vars = Storage(lat = float(lat),
lon = float(lon),
)
form = Storage(vars=form_vars)
s3db.gis_location_onvalidation(form)
location_id = s3db.gis_location.insert(**form_vars)
form = None
if not location_id:
# Give the user a form to check-in
# Test the formstyle
formstyle = current.deployment_settings.get_ui_formstyle()
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "location_id"
label = LABEL("%s:" % T("Location"))
from s3.s3widgets import S3LocationSelectorWidget2
field = table.location_id
#value = tracker.get_location(_fields=["id"],
# as_rows=True).first().id
value = None # We always want to create a new Location, not update the existing one
widget = S3LocationSelectorWidget2()(field, value)
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Check-In"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(current.request.vars, current.session):
location_id = form.vars.get("location_id", None)
if location_id:
# We're not Checking-in in S3Track terms (that's about interlocking with another object)
#tracker.check_in()
#timestmp = form.vars.get("timestmp", None)
#if timestmp:
# # @ToDo: Convert from string
# pass
#tracker.set_location(location_id, timestmp=timestmp)
tracker.set_location(location_id)
response.confirmation = T("Checked-In successfully!")
response.view = "check-in.html"
output = dict(form = form,
title = title,
)
return output
# @ToDo: JSON representation for check-in from mobile devices
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# =============================================================================
class S3CheckOutMethod(S3Method):
"""
Custom Method to allow a trackable resource to check-out
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
s3db = current.s3db
response = current.response
tracker = S3Trackable(r.table, record_id=r.id)
title = T("Check-Out")
# Give the user a form to check-out
# Test the formstyle
formstyle = current.deployment_settings.get_ui_formstyle()
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Check-Out"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(current.request.vars, current.session):
# Check-Out
# We're not Checking-out in S3Track terms (that's about removing an interlock with another object)
# What we're doing is saying that we're now back at our base location
#tracker.check_out()
#timestmp = form_vars.get("timestmp", None)
#if timestmp:
# # @ToDo: Convert from string
# pass
#tracker.set_location(r.record.location_id, timestmp=timestmp)
tracker.set_location(r.record.location_id)
response.confirmation = T("Checked-Out successfully!")
response.view = "check-in.html"
output = dict(form = form,
title = title,
)
return output
# @ToDo: JSON representation for check-out from mobile devices
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# END =========================================================================
|
|
import sys
import pgmagick
_EXIF_TAGS = [
"ImageWidth",
"ImageLength",
"BitsPerSample",
"Compression",
"PhotometricInterpretation",
"ImageDescription",
"Make",
"Model",
"StripOffsets",
"Orientation",
"SamplesPerPixel",
"RowsPerStrip",
"StripByteCounts",
"XResolution",
"YResolution",
"PlanarConfiguration",
"ResolutionUnit",
"TransferFunction",
"CreatorTool",
"ModifyDate",
"Artist",
"WhitePoint",
"PrimaryChromaticities",
"JPEGInterchangeFormat",
"JPEGInterchangeFormatLength",
"YCbCrCoefficients",
"YCbCrSubSampling",
"YCbCrPositioning",
"ReferenceBlackWhite",
"Copyright",
"ExposureTime",
"FNumber",
"ExifIFDPointer",
"ExposureProgram",
"SpectralSensitivity",
"GPSInfoIFDPointer",
"ISOSpeedRatings",
"OECF",
"ExifVersion",
"DateTimeOriginal",
"MetadataDate",
"ComponentsConfiguration",
"CompressedBitsPerPixel",
"ShutterSpeedValue",
"ApertureValue",
"BrightnessValue",
"ExposureBiasValue",
"MaxApertureValue",
"SubjectDistance",
"MeteringMode",
"LightSource",
"Flash",
"FocalLength",
"SubjectArea",
"MakerNote",
"UserComment",
"SubSecTime",
"SubSecTimeOriginal",
"SubSecTimeDegitized",
"FlashpixVersion",
"ColorSpace",
"PixelXDimension",
"PixelYDimension",
"RelatedSoundFile",
"InteroperabilityIFDPointer",
"FlashEnergy",
"SpatialFrequencyResponse",
"FocalPlaneXResolution",
"FocalPlaneYResolution",
"FocalPlaneResolutionUnit",
"SubjectLocation",
"ExposureIndex",
"SensingMethod",
"FileSource",
"SceneType",
"CFAPattern",
"CustomRendered",
"ExposureMode",
"WhiteBalance",
"DigitalZoomRatio",
"FocalLengthIn35mmFilm",
"SceneCaptureType",
"GainControl",
"Contrast",
"Saturation",
"Sharpness",
"DeviceSettingDescription",
"SubjectDistanceRange",
"0xa420",
"0xa500",
"DNGVersion",
"DNGBackwardVersion",
"DNGUniqueCameraModel",
"DNGLocalizedCameraModel",
"DNGCFAPlaneColor",
"DNGCFALayout",
"DNGLinearizationTable",
"DNGBlackLevelRepeatDim",
"DNGBlackLevel",
"DNGBlackLevelDeltaH",
"DNGBlackLevelDeltaV",
"DNGWhiteLevel",
"DNGDefaultscale",
"DNGDefaultCropOrigin",
"DNGDefaultCropSize",
"DNGColorMatrix1",
"DNGColorMatrix2",
"DNGCameraCalibration1",
"DNGCameraCalibration2",
"DNGReductionMatrix1",
"DNGReductionMatrix2",
"DNGAnalogBalance",
"DNGAsShotNeutral",
"DNGAsShotWhiteXY",
"DNGBaselineExposure",
"DNGBaselineNoise",
"DNGBaselineSharpness",
"DNGBayerGreenSplit",
"DNGLinearResponseLimit",
"DNGCameraSerialNumber",
"DNGLensInfo",
"DNGChromaBlurRadius",
"DNGAntiAliasStrength",
"DNGShadowScale",
"DNGPrivateData",
"DNGMakerNoteSafety",
"DNGCalibrationIlluminant1",
"DNGCalibrationIlluminant2",
"DNGBestQualityScale",
"DNGRawDataUniqueID",
"DNGOriginalRawFileName",
"DNGOriginalRawFileData",
"DNGActiveArea",
"DNGMaskedArea",
"DNGAsShotICCProfile",
"DNGAsShotPreProfileMatrix",
"DNGCurrentICCProfile",
"DNGCurrentPreProfileMatrix",
"GPSVersionID",
"GPSLatitudeRef",
"GPSLatitude",
"GPSLongitudeRef",
"GPSLongitude",
"GPSAltitudeRef",
"GPSAltitude",
"GPSTimeStamp",
"GPSSatellites",
"GPSStatus",
"GPSMessureMode",
"GPSDOP",
"GPSSpeedRef",
"GPSSpeed",
"GPSTrackRef",
"GPSTrack",
"GPSImgDirectionRef",
"GPSImgDirection",
"GPSMapDatum",
"GPSDestLatitudeRef",
"GPSDestLatitude",
"GPSDestLongitudeRef",
"GPSDestLongitude",
"GPSDestBearingRef",
"GPSDestBearing",
"GPSDestDistanceRef",
"GPSDestDistance",
"GPSProcessingMethod",
"GPSAreaInformation",
"GPSDateStamp",
"GPSDifferential",
"InteroperabilityIndex",
"CRSRawFileName",
"CRSVersion",
"CRSWhiteBalance",
"CRSTemperature",
"CRSTint",
"CRSShadowTint",
"CRSExposure",
"CRSShadows",
"CRSBrightness",
"CRSContrast",
"CRSSaturation",
"CRSRedSaturation",
"CRSGreenSaturation",
"CRSBlueSaturation",
"CRSSharpness",
"CRSLuminanceSmoothing",
"CRSRedHue",
"CRSGreenHue",
"CRSBlueHue",
"CRSColorNoiseReduction",
"CRSChromaticAberrationR",
"CRSChromaticAberrationB",
"CRSVignetteAmount",
"CRSLens",
"CRSSerialNumber",
"CRSAutoBrightness",
"CRSAutoShadows",
"CRSAutoContrast",
"CRSAutoExposure",
"OLSpecialMode",
"OLJpegQuality",
"OLMacro",
"OLDigitalZoom",
"OLSoftwareRelease",
"OLpictInfo",
"OLCameraID",
"OLDataDump",
"OLFlashMode",
"OLExposureBias",
"OLFocusMode",
"OLFocusDistance",
"OLZoom",
"OLMacroFocus",
"OLSharpness",
"OLColourMatrix",
"OLBlackLevel",
"OLWhiteBalance",
"OLRedBias",
"OLBlueBias",
"OLSerialNumber",
"OLFlashBias",
"OLContrast",
"OLSharpnessFactor",
"OLColourControl",
"OLValidBits",
"OLCoringFilter",
"OLImageWidth",
"OLImageHeight",
"OLCompressionRatio",
"PXExposureTime",
"PXFNumber",
"PXISOSpeed",
"PXExposureBias",
"PXWhiteBalance",
"PXLensID",
"PXImageTone",
"EXThumbInfo",
"EXThumbSize",
"EXThumbOffset",
"EXQualityMode",
"EXImageSize",
"EXISOSensitivity",
"EXWhiteBalance",
"EXFocalLength",
"EXSaturation",
"EXContrast",
"EXSharpness",
"EXPIM",
"EXThumbnail",
"EXWBBias",
"EXFlash",
"EXObjectDistance",
"EXFlashDistance",
"EXRecordMode",
"EXSelfTimer",
"EXQuality",
"EXFocusMode",
"EXTimeZone",
"EXBestshotMode",
"EXCCDSensitivity",
"EXColorMode",
"EXColorEnhance",
"EXFilter",
"PXOCaptureMode",
"PXOQualityLevel",
"PXOFocusMode",
"PXOFlashMode",
"PXOWhiteBalance",
"PXODigitalZoom",
"PXOSharpness",
"PXOContrast",
"PXOSaturation",
"PXOISOSpeed",
"PXOColorMode",
"PXOTimeZone",
"PXODaylightSavings",
"NKISOSetting",
"NKColorMode",
"NKQuality",
"NKWhitebalance",
"NKSharpness",
"NKFocusMode",
"NKFlashSetting",
"NKFlashMode",
"NKWhiteBalanceOffset",
"NKISOselection",
"NKThumbnailIFDOffset",
"NKImageAdjustment",
"NKContrastSetting",
"NKAdapter",
"NKLensSetting",
"NKLensInfo",
"NKManualFocusDistance",
"NKDigitalZoom",
"NKAFFocusPoint",
"NKShutterMode",
"NKColorSpace",
"NKColorOffset",
"NKNoiseReduction",
"NKLendID",
"NKShotCount",
"NKFinishSetting",
"NKDigitalImgProg",
"NKEQuality",
"NKEColorMode",
"NKEImageAdjustment",
"NKECCDSensitivity",
"NKEWhiteBalance",
"NKEFocus",
"NKEDigitalZoom",
"NKEConverter",
"MLTMakerNoteVersion",
"MLTCameraSettingsOld",
"MLTExposureMode",
"MLTFlashMode",
"MLTWhiteBalance",
"MLTImageSize",
"MLTImageQuality",
"MLTDriveMode",
"MLTMeteringMode",
"MLTFilmSpeed",
"MLTShutterSpeed",
"MLTAperture",
"MLTMacroMode",
"MLTDigitalZoom",
"MLTExposureCompensation",
"MLTBracketStep",
"MLTunknown16",
"MLTIntervalLength",
"MLTIntervalNumber",
"MLTFocalLength",
"MLTFocusDistance",
"MLTFlashFired",
"MLTDate",
"MLTTime",
"MLTMaxAperture",
"MLTFileNumberMemory",
"MLTLastFileNumber",
"MLTWhiteBalanceRed",
"MLTWhiteBalanceGreen",
"MLTWhiteBalanceBlue",
"MLTSaturation",
"MLTContrast",
"MLTSharpness",
"MLTSubjectProgram",
"MLTFlashCompensation",
"MLTISOSetting",
"MLTCameraModel",
"MLTIntervalMode",
"MLTFolderName",
"MLTColorMode",
"MLTColorFilter",
"MLTBWFilter",
"MLTInternalFlash",
"MLTBrightnessValue",
"MLTSpotFocusPointX",
"MLTSpotFocusPointY",
"MLTWideFocusZone",
"MLTFocusMode",
"MLTFocusArea",
"MLTDECPosition",
"MLTComppressImageSize",
"MLTThumbnail",
"MLTThumbnailOffset",
"MLTThumbnailLength",
"MLTLensID",
"MLTPIMInformation",
"MLTCameraSettings",
"SGSerialID",
"SGDriveMode",
"SGImageSize",
"SGAFMode",
"SGFocusMode",
"SGWhiteBalance",
"SGExposureMode",
"SGMeteringMode",
"SGFocalLength",
"SGColorSpace",
"SGExposure",
"SGContrast",
"SGShadow",
"SGHighlight",
"SGSaturation",
"SGSharpness",
"SGX3FillLight",
"SGColorCoordination",
"SGCustomSettingMode",
"SGJpegQuality",
"SGFirmware",
"SGSoftware",
"SGAutoBlacket",
"CNMacroMode",
"CNSelfTimer",
"CNFlash",
"CNDriveMode",
"CNFocusMode",
"CNImageSize",
"CNImageSelect",
"CNDigitalZoom",
"CNContrast",
"CNSaturation",
"CNSharpness",
"CNISOSensitive",
"CNMeteringMode",
"CNFocusType",
"CNAFPoint",
"CNExposurePorgram",
"CNLensID",
"CNLensMaximum",
"CNLensMinimum",
"CNLensUnit",
"CNFlashDetailed",
"CNFocusSetting",
"CNImageStabilization",
"CNImageEffect",
"CNHueBias",
"CNWhitebalance",
"CNImageNumber",
"CNAFPointUsed",
"CNFlashBias",
"CNAperture",
"CNExposure",
"CNNDFilter",
"CNImageType",
"CNFirmware",
"CNUser",
"CNSerial",
"CNNoiseReduction",
"CNButtunFunction",
"CNMirrorLockUp",
"CNShutterStep",
"CNAFSupliment",
"CNApexPriority",
"CNAEFunction",
"CNShutterSynchro",
"CNAFStopButton",
"CNFlashMemLimit",
"CNMenuPosition",
"CNSETFunction",
"CNSensorCleaning",
"CNColorTemp",
"CNColorSpace",
"FJVersion",
"FJQuality",
"FJSharpness",
"FJWhiteBalance",
"FJColor",
"FJFlashMode",
"FJFlashStrength",
"FJMacro",
"FJFocusMode",
"FJSlowSync",
"FJPictureMode",
"FJContBlacket",
"FJBlurWarning",
"FJFocusWarning",
"FJAEWarning",
"KCMode",
]
def _convert_colorobj(input_obj):
if isinstance(input_obj, (list, tuple)):
r, g, b = int(input_obj[0]), int(input_obj[1]), int(input_obj[2])
color = pgmagick.Color(r, g, b)
elif isinstance(input_obj, str):
color = pgmagick.Color(input_obj)
else:
color = input_obj
assert isinstance(color, pgmagick.Color)
return color
def _convert_coordinatelist(input_obj):
"""convert from 'list' or 'tuple' object to pgmagick.CoordinateList.
:type input_obj: list or tuple
"""
cdl = pgmagick.CoordinateList()
for obj in input_obj:
cdl.append(pgmagick.Coordinate(obj[0], obj[1]))
return cdl
def _convert_paintmethod(input_obj):
if isinstance(input_obj, pgmagick.PaintMethod):
return input_obj
pm = pgmagick.PaintMethod()
if input_obj.lower() == 'filltoborder':
paint_method = pm.FillToBorderMethod
else:
paint_method = getattr(pm, "%sMethod" % input_obj.title())
return paint_method
def _convert_vpathlist(input_obj):
"""convert from 'list' or 'tuple' object to pgmagick.VPathList.
:type input_obj: list or tuple
"""
vpl = pgmagick.VPathList()
for obj in input_obj:
# FIXME
obj = pgmagick.PathMovetoAbs(pgmagick.Coordinate(obj[0], obj[1]))
vpl.append(obj)
return vpl
class Image(object):
@property
def height(self):
return self.img.rows()
@property
def width(self):
return self.img.columns()
def __init__(self, filename=None, color=None, *args, **kargs):
self.img = None
if sys.version_info >= (3, ) and isinstance(filename, (str)):
self.img = pgmagick.Image(str(filename))
elif sys.version_info < (3, ) and isinstance(filename, (unicode, str)):
self.img = pgmagick.Image(str(filename))
elif isinstance(filename, (list, tuple)):
size = filename
geometry = pgmagick.Geometry(int(size[0]), int(size[1]))
if isinstance(color, (list, tuple)):
r, g, b = int(color[0]), int(color[1]), int(color[2])
color = pgmagick.Color(r, g, b)
self.img = pgmagick.Image(geometry, color)
elif isinstance(color, str):
if color.find('gradient') == 0 or color.find('plasma') == 0:
self.img = pgmagick.Image(geometry, pgmagick.Color())
self.img.read(color)
else:
color = pgmagick.Color(color)
self.img = pgmagick.Image(geometry, color)
else:
self.img = pgmagick.Image(geometry, pgmagick.Color())
else:
self.img = pgmagick.Image()
def write(self, filename):
self.img.write(str(filename))
# API of Manipulate An Image
def adaptive_threshold(self, width, height, offset=0):
# TODO: not implemented
pass
def add_noise(self, noise_type):
# TODO: not implemented
pass
def add_noise_channel(self, channel, noise_type):
# TODO: not implemented
pass
def affine_transform(self, affine):
# TODO: not implemented
pass
def annotate(self, string, position=(0, 0), gravity='center', angle=0):
position = pgmagick.Geometry(int(position[0]), int(position[1]))
gravity = getattr(pgmagick.GravityType, "%sGravity" % gravity.title())
self.img.annotate(string, position, gravity, angle)
def blur(self, radius=0.0, sigma=1.0):
# TODO: not implemented
pass
def blur_channel(self, channel, radius=0.0, sigma=1.0):
if isinstance(channel, str):
channel = getattr(pgmagick.ChannelType,
"%sChannel" % channel.title())
self.img.blurChannel(channel, radius, sigma)
def border(self, geometry=None):
# TODO: not implemented
pass
def channel(self, channel):
# TODO: not implemented
pass
def channel_depth(self, channel, depth=None):
# TODO: not implemented
pass
def charcoal(self, radius=0.0, sigma=1.0):
# TODO: not implemented
pass
def chop(self, geometry):
# TODO: not implemented
pass
def colorize(self, opacity_rgb, color):
# TODO: not implemented
pass
def color_matrix(self, order, color_matrix):
# TODO: not implemented
pass
def comment(self, comment):
# TODO: not implemented
pass
def compare(self, reference):
# TODO: not implemented
pass
def composite(self, composite_img, offset,
compose=pgmagick.CompositeOperator.InCompositeOp):
if isinstance(offset, (list, tuple)):
x = int(offset[0])
y = int(offset[1])
offset = pgmagick.Geometry(x, y)
elif isinstance(offset, pgmagick.Geometry):
pass
elif isinstance(offset, str): # is gravity (string)
offset = getattr(pgmagick.GravityType,
"%sGravity" % offset.title())
else: # is gravity (pgmagick.GravityType)
pass
if isinstance(compose, pgmagick.CompositeOperator):
pass
elif compose.lower() in ('copyblue', 'copygreen', 'copyopacity',
'copyred', 'copycyan', 'copymagenta',
'copyyellow', 'copyblack'):
color = compose.lower().split('copy')[1].title()
compose = getattr(pgmagick.CompositeOperator,
"Copy%sCompositeOp" % color)
else: # other string
compose = getattr(pgmagick.CompositeOperator,
"%sCompositeOp" % compose.title())
self.img.composite(composite_img, offset, compose)
def contrast(self, sharpen):
# TODO: not implemented
pass
def convolve(self, order, kernel):
# TODO: not implemented
pass
def crop(self, *geometry):
if len(geometry) == 4:
width, height = geometry[0], geometry[1]
x, y = geometry[2], geometry[3]
g = pgmagick.Geometry(x, y, width, height)
elif len(geometry) == 1 and isinstance(geometry[0], pgmagick.Geometry):
g = geometry[0]
else:
raise Exception("not support object", geometry)
self.img.crop(g)
def cycle_colormap(self, amount):
# TODO: not implemented
pass
def draw(self, draw_obj):
if isinstance(draw_obj, (list, tuple)):
draw = pgmagick.DrawableList()
for d in draw_obj:
draw.append(d)
elif isinstance(draw_obj, Draw):
draw = draw_obj.drawer
else:
draw = draw_obj
self.img.draw(draw)
def edge(self, radius=0.0):
# TODO: not implemented
pass
def emboss(self, radius=0.0, sigma=1.0):
# TODO: not implemented
pass
def floodfill_color(self, point, fill_color):
# TODO: not implemented
pass
def floodfill_opacity(self, point, opacity, method):
# TODO: not implemented
pass
def floodfill_texture(self, point, texture):
# TODO: not implemented
pass
def font(self, font=None):
if font:
self.img.font(font)
else:
return self.img.font()
def scale(self, size, filter_type=None):
if isinstance(size, float):
scaled_height = self.img.rows() * size
scaled_width = self.img.columns() * size
size = "%dx%d" % (int(scaled_width), int(scaled_height))
elif isinstance(size, (list, tuple)):
scaled_width, scaled_height = int(size[0]), int(size[1])
size = "%dx%d" % (int(scaled_width), int(scaled_height))
if filter_type:
filter_type = getattr(pgmagick.FilterTypes,
"%sFilter" % filter_type.title())
pgmagick.Image.filterType(self.img, filter_type)
geometry = pgmagick.Geometry(size)
self.img.scale(geometry)
# API of Set/Get Image
def font_pointsize(self, point_size=None):
if point_size:
point_size = float(point_size)
self.img.fontPointsize(point_size)
else:
return self.img.fontPointsize()
# extra
def get_exif_info(self):
"""return exif-tag dict
"""
_dict = {}
for tag in _EXIF_TAGS:
ret = self.img.attribute("EXIF:%s" % tag)
if ret and ret != 'unknown':
_dict[tag] = ret
return _dict
class Draw(object):
def __init__(self):
self.drawer = pgmagick.DrawableList()
def affine(self, sx, sy, rx, ry, tx, ty):
affine = pgmagick.DrawableAffine(float(sx), float(sy),
float(rx), float(ry),
float(tx), float(ty))
self.drawer.append(affine)
def arc(self, start_x, start_y, end_x, end_y, start_degrees, end_degrees):
arc = pgmagick.DrawableArc(float(start_x), float(start_y),
float(end_x), float(end_y),
float(start_degrees), float(end_degrees))
self.drawer.append(arc)
def bezier(self, points):
"""Draw a Bezier-curve.
:param points: ex.) ((5, 5), (6, 6), (7, 7))
:type points: list
"""
coordinates = pgmagick.CoordinateList()
for point in points:
x, y = float(point[0]), float(point[1])
coordinates.append(pgmagick.Coordinate(x, y))
self.drawer.append(pgmagick.DrawableBezier(coordinates))
def circle(self, origin_x, origin_y, perim_x, perim_y):
circle = pgmagick.DrawableCircle(float(origin_x), float(origin_y),
float(perim_x), float(perim_y))
self.drawer.append(circle)
def color(self, x, y, paint_method):
"""
:param paint_method: 'point' or 'replace' or 'floodfill' or
'filltoborder' or 'reset'
:type paint_method: str or pgmagick.PaintMethod
"""
paint_method = _convert_paintmethod(paint_method)
color = pgmagick.DrawableColor(x, y, paint_method)
self.drawer.append(color)
def composite(self, x, y, width, height, image,
op=pgmagick.CompositeOperator.OverCompositeOp):
# FIXME: unable to composite pgmagick.Image object.
if width == 0 or height == 0:
composite = pgmagick.DrawableCompositeImage(x, y, image)
else:
composite = pgmagick.DrawableCompositeImage(x, y, width, height,
image, op)
self.drawer.append(composite)
def ellipse(self, org_x, org_y, radius_x, radius_y, arc_start, arc_end):
"""
:param org_x: origination x axis
:param org_y: origination y axis
:param radius_x: radius x axis
:param radius_y: radius y axis
:param arc_start: arc start angle
:param arc_end: arc end angle
"""
ellipse = pgmagick.DrawableEllipse(float(org_x), float(org_y),
float(radius_x), float(radius_y),
float(arc_start), float(arc_end))
self.drawer.append(ellipse)
def fill_color(self, color):
color = _convert_colorobj(color)
fill_color = pgmagick.DrawableFillColor(color)
self.drawer.append(fill_color)
def fill_rule(self, rule='evenodd'):
if rule.lower() == 'evenodd':
fill_rule = pgmagick.FillRule.EvenOddRule
elif rule.lower() == 'nonzero':
fill_rule = pgmagick.FillRule.NonZeroRule
else:
fill_rule = rule
fill_rule = pgmagick.DrawableFillRule(fill_rule)
self.drawer.append(fill_rule)
def fill_opacity(self, opacity):
"""
:param opacity: 0.0 ~ 1.0
"""
opacity = pgmagick.DrawableFillOpacity(float(opacity))
self.drawer.append(opacity)
def font(self, family, style='normal', weight=400, stretch='normal'):
style = getattr(pgmagick.StyleType, "%sStyle" % style.title())
stretch = stretch.lower()
if 'condensed' in stretch:
tmp = stretch.split('condensed')[0]
stretch = "%sCondensedStretch" % tmp.title()
elif 'expanded' in stretch:
tmp = stretch.split('expanded')[0]
stretch = "%sExpandedStretch" % tmp.title()
else:
stretch = "%sStretch" % stretch.title()
stretch = getattr(pgmagick.StretchType, "%s" % stretch)
if weight is 'bold':
weight = 800
font = pgmagick.DrawableFont(family, style, weight, stretch)
self.drawer.append(font)
def gravity(self, gravity_type):
if isinstance(gravity_type, str):
gravity = getattr(pgmagick.GravityType,
"%sGravity" % gravity_type.title())
else:
gravity = gravity_type
gravity_type = pgmagick.DrawableGravity(gravity)
self.drawer.append(gravity_type)
def line(self, start_x, start_y, end_x, end_y):
line = pgmagick.DrawableLine(start_x, start_y, end_x, end_y)
self.drawer.append(line)
def matte(self, x, y, paint_method):
"""
:param paint_method: 'point' or 'replace' or 'floodfill' or
'filltoborder' or 'reset'
:type paint_method: str or pgmagick.PaintMethod
"""
paint_method = _convert_paintmethod(paint_method)
self.drawer.append(pgmagick.DrawableMatte(x, y, paint_method))
def miterlimit(self, miterlimit):
# FIXME
self.drawer.append(pgmagick.DrawableMiterLimit(miterlimit))
def path(self, vpath):
# FIXME
if isinstance(vpath, pgmagick.VPathList):
vpath = vpath
elif isinstance(vpath, (list, tuple)):
vpath = _convert_vpathlist(vpath)
self.drawer.append(pgmagick.DrawablePath(vpath))
def point(self, x, y):
self.drawer.append(pgmagick.DrawablePoint(x, y))
def pointsize(self, pointsize):
pointsize = pgmagick.DrawablePointSize(pointsize)
self.drawer.append(pointsize)
def polygon(self, coordinates):
if isinstance(coordinates, pgmagick.CoordinateList):
obj = coordinates
elif isinstance(coordinates, (list, tuple)):
obj = _convert_coordinatelist(coordinates)
self.drawer.append(pgmagick.DrawablePolygon(obj))
def polyline(self, coordinates):
if isinstance(coordinates, pgmagick.CoordinateList):
obj = coordinates
elif isinstance(coordinates, (list, tuple)):
obj = _convert_coordinatelist(coordinates)
self.drawer.append(pgmagick.DrawablePolyline(obj))
def rectangle(self, upperleft_x, upperleft_y, lowerright_x, lowerright_y):
r = pgmagick.DrawableRectangle(upperleft_x, upperleft_y,
lowerright_x, lowerright_y)
self.drawer.append(r)
def rotation(self, angle):
self.drawer.append(pgmagick.DrawableRotation(angle))
def round_rectangle(self, center_x, center_y,
width, height, corner_width, corner_height):
rr = pgmagick.DrawableRoundRectangle(center_x, center_y,
width, height,
corner_width, corner_height)
self.drawer.append(rr)
def scaling(self, x, y):
"""Scaling Draw Object
:param x: 0.0 ~ 1.0
:param y: 0.0 ~ 1.0
"""
self.drawer.append(pgmagick.DrawableScaling(float(x), float(y)))
def skewx(self, angle):
self.drawer.append(pgmagick.DrawableSkewX(float(angle)))
def skewy(self, angle):
self.drawer.append(pgmagick.DrawableSkewY(float(angle)))
def stroke_antialias(self, flag=True):
"""stroke antialias
:param flag: True or False. (default is True)
:type flag: bool
"""
antialias = pgmagick.DrawableStrokeAntialias(flag)
self.drawer.append(antialias)
def stroke_color(self, color):
color = _convert_colorobj(color)
color = pgmagick.DrawableStrokeColor(color)
self.drawer.append(color)
def stroke_linecap(self, linecap):
"""set to stroke linecap.
:param linecap: 'undefined', 'butt', 'round', 'square'
:type linecap: str
"""
linecap = getattr(pgmagick.LineCap, "%sCap" % linecap.title())
linecap = pgmagick.DrawableStrokeLineCap(linecap)
self.drawer.append(linecap)
def stroke_linejoin(self, linejoin):
"""set to stroke linejoin.
:param linejoin: 'undefined', 'miter', 'round', 'bevel'
:type linejoin: str
"""
linejoin = getattr(pgmagick.LineJoin, "%sJoin" % linejoin.title())
linejoin = pgmagick.DrawableStrokeLineJoin(linejoin)
self.drawer.append(linejoin)
def stroke_opacity(self, opacity):
self.drawer.append(pgmagick.DrawableStrokeOpacity(float(opacity)))
def stroke_width(self, width):
width = float(width)
width = pgmagick.DrawableStrokeWidth(width)
self.drawer.append(width)
def text(self, x, y, string, encoding=None):
# TODO: unable encoding
text = pgmagick.DrawableText(x, y, string)
self.drawer.append(text)
def text_antialias(self, flag=True):
"""text antialias
:param flag: True or False. (default is True)
:type flag: bool
"""
antialias = pgmagick.DrawableTextAntialias(flag)
self.drawer.append(antialias)
def text_decoration(self, decoration):
"""text decoration
:param decoration: 'no', 'underline', 'overline', 'linethrough'
:type decoration: str
"""
if decoration.lower() == 'linethrough':
d = pgmagick.DecorationType.LineThroughDecoration
else:
decoration_type_string = "%sDecoration" % decoration.title()
d = getattr(pgmagick.DecorationType, "%s" % decoration_type_string)
decoration = pgmagick.DrawableTextDecoration(d)
self.drawer.append(decoration)
def text_undercolor(self, color):
color = _convert_colorobj(color)
undercolor = pgmagick.DrawableTextUnderColor(color)
self.drawer.append(undercolor)
def translate(self, x, y):
self.drawer.append(pgmagick.DrawableTranslation(float(x), float(y)))
|
|
# Copyright 2011 NTT
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import calendar
import contextlib
import datetime
import os
import mock
import mox
from oslo.config import cfg
from nova import context
from nova import db
from nova import exception
from nova.network import driver
from nova.network import linux_net
from nova import objects
from nova.openstack.common import fileutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova.openstack.common import timeutils
from nova import test
from nova import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('share_dhcp_address', 'nova.objects.network')
CONF.import_opt('network_device_mtu', 'nova.objects.network')
HOST = "testhost"
instances = {'00000000-0000-0000-0000-0000000000000000':
{'id': 0,
'uuid': '00000000-0000-0000-0000-0000000000000000',
'host': 'fake_instance00',
'created_at': datetime.datetime(1955, 11, 5, 0, 0, 0),
'updated_at': datetime.datetime(1985, 10, 26, 1, 35, 0),
'hostname': 'fake_instance00'},
'00000000-0000-0000-0000-0000000000000001':
{'id': 1,
'uuid': '00000000-0000-0000-0000-0000000000000001',
'host': 'fake_instance01',
'created_at': datetime.datetime(1955, 11, 5, 0, 0, 0),
'updated_at': datetime.datetime(1985, 10, 26, 1, 35, 0),
'hostname': 'fake_instance01'}}
addresses = [{"address": "10.0.0.1"},
{"address": "10.0.0.2"},
{"address": "10.0.0.3"},
{"address": "10.0.0.4"},
{"address": "10.0.0.5"},
{"address": "10.0.0.6"}]
networks = [{'id': 0,
'uuid': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'dhcp_server': '0.0.0.0',
'dhcp_start': '192.168.100.1',
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2',
'mtu': None,
'dhcp_server': '192.168.0.1',
'enable_dhcp': True,
'share_address': False},
{'id': 1,
'uuid': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
'label': 'test1',
'injected': False,
'multi_host': True,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'dhcp_server': '0.0.0.0',
'dhcp_start': '192.168.100.1',
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2',
'mtu': None,
'dhcp_server': '192.168.1.1',
'enable_dhcp': True,
'share_address': False}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_id': 0,
'allocated': True,
'leased': True,
'virtual_interface_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
{'id': 1,
'network_id': 1,
'address': '192.168.1.100',
'instance_id': 0,
'allocated': True,
'leased': True,
'virtual_interface_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
{'id': 2,
'network_id': 1,
'address': '192.168.0.101',
'instance_id': 1,
'allocated': True,
'leased': True,
'virtual_interface_id': 2,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []},
{'id': 3,
'network_id': 0,
'address': '192.168.1.101',
'instance_id': 1,
'allocated': True,
'leased': True,
'virtual_interface_id': 3,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []},
{'id': 4,
'network_id': 0,
'address': '192.168.0.102',
'instance_id': 0,
'allocated': True,
'leased': False,
'virtual_interface_id': 4,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
{'id': 5,
'network_id': 1,
'address': '192.168.1.102',
'instance_id': 1,
'allocated': True,
'leased': False,
'virtual_interface_id': 5,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []},
{'id': 6,
'network_id': 1,
'address': '192.168.1.103',
'instance_id': 1,
'allocated': False,
'leased': True,
'virtual_interface_id': 6,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []}]
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'},
{'id': 3,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:03',
'uuid': '00000000-0000-0000-0000-0000000000000003',
'network_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'},
{'id': 4,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:04',
'uuid': '00000000-0000-0000-0000-0000000000000004',
'network_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
{'id': 5,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:05',
'uuid': '00000000-0000-0000-0000-0000000000000005',
'network_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'},
{'id': 6,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:06',
'uuid': '00000000-0000-0000-0000-0000000000000006',
'network_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'}]
def get_associated(context, network_id, host=None, address=None):
result = []
for datum in fixed_ips:
if (datum['network_id'] == network_id
and datum['instance_uuid'] is not None
and datum['virtual_interface_id'] is not None):
instance = instances[datum['instance_uuid']]
if host and host != instance['host']:
continue
if address and address != datum['address']:
continue
cleaned = {}
cleaned['address'] = datum['address']
cleaned['instance_uuid'] = datum['instance_uuid']
cleaned['network_id'] = datum['network_id']
cleaned['vif_id'] = datum['virtual_interface_id']
vif = vifs[datum['virtual_interface_id']]
cleaned['vif_address'] = vif['address']
cleaned['instance_hostname'] = instance['hostname']
cleaned['instance_updated'] = instance['updated_at']
cleaned['instance_created'] = instance['created_at']
cleaned['allocated'] = datum['allocated']
cleaned['leased'] = datum['leased']
result.append(cleaned)
return result
class LinuxNetworkTestCase(test.NoDBTestCase):
def setUp(self):
super(LinuxNetworkTestCase, self).setUp()
self.driver = driver.load_network_driver()
self.driver.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=True)
def get_vifs(_context, instance_uuid, use_slave):
return [vif for vif in vifs if vif['instance_uuid'] ==
instance_uuid]
def get_instance(_context, instance_id):
return instances[instance_id]
self.stubs.Set(db, 'virtual_interface_get_by_instance', get_vifs)
self.stubs.Set(db, 'instance_get', get_instance)
self.stubs.Set(db, 'network_get_associated_fixed_ips', get_associated)
def _test_add_snat_rule(self, expected, is_external):
def verify_add_rule(chain, rule):
self.assertEqual(chain, 'snat')
self.assertEqual(rule, expected)
self.called = True
self.stubs.Set(linux_net.iptables_manager.ipv4['nat'],
'add_rule', verify_add_rule)
self.called = False
linux_net.add_snat_rule('10.0.0.0/24', is_external)
if expected:
self.assertTrue(self.called)
def test_add_snat_rule_no_ext(self):
self.flags(routing_source_ip='10.10.10.1')
expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 '
'-j SNAT --to-source 10.10.10.1 -o eth0')
self._test_add_snat_rule(expected, False)
def test_add_snat_rule_ext(self):
self.flags(routing_source_ip='10.10.10.1')
expected = ()
self._test_add_snat_rule(expected, True)
def test_add_snat_rule_snat_range_no_ext(self):
self.flags(routing_source_ip='10.10.10.1',
force_snat_range=['10.10.10.0/24'])
expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 '
'-j SNAT --to-source 10.10.10.1 -o eth0')
self._test_add_snat_rule(expected, False)
def test_add_snat_rule_snat_range_ext(self):
self.flags(routing_source_ip='10.10.10.1',
force_snat_range=['10.10.10.0/24'])
expected = ('-s 10.0.0.0/24 -d 10.10.10.0/24 '
'-j SNAT --to-source 10.10.10.1')
self._test_add_snat_rule(expected, True)
def test_update_dhcp_for_nw00(self):
self.flags(use_single_default_gateway=True)
self.mox.StubOutWithMock(self.driver, 'write_to_file')
self.mox.StubOutWithMock(fileutils, 'ensure_tree')
self.mox.StubOutWithMock(os, 'chmod')
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.driver.update_dhcp(self.context, "eth0", networks[0])
def test_update_dhcp_for_nw01(self):
self.flags(use_single_default_gateway=True)
self.mox.StubOutWithMock(self.driver, 'write_to_file')
self.mox.StubOutWithMock(fileutils, 'ensure_tree')
self.mox.StubOutWithMock(os, 'chmod')
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.driver.update_dhcp(self.context, "eth0", networks[0])
def test_get_dhcp_hosts_for_nw00(self):
self.flags(use_single_default_gateway=True)
expected = (
"DE:AD:BE:EF:00:00,fake_instance00.novalocal,"
"192.168.0.100,net:NW-0\n"
"DE:AD:BE:EF:00:03,fake_instance01.novalocal,"
"192.168.1.101,net:NW-3\n"
"DE:AD:BE:EF:00:04,fake_instance00.novalocal,"
"192.168.0.102,net:NW-4"
)
actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[0])
self.assertEqual(actual_hosts, expected)
def test_get_dhcp_hosts_for_nw01(self):
self.flags(use_single_default_gateway=True)
self.flags(host='fake_instance01')
expected = (
"DE:AD:BE:EF:00:02,fake_instance01.novalocal,"
"192.168.0.101,net:NW-2\n"
"DE:AD:BE:EF:00:05,fake_instance01.novalocal,"
"192.168.1.102,net:NW-5"
)
actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[1])
self.assertEqual(actual_hosts, expected)
def test_get_dns_hosts_for_nw00(self):
expected = (
"192.168.0.100\tfake_instance00.novalocal\n"
"192.168.1.101\tfake_instance01.novalocal\n"
"192.168.0.102\tfake_instance00.novalocal"
)
actual_hosts = self.driver.get_dns_hosts(self.context, networks[0])
self.assertEqual(actual_hosts, expected)
def test_get_dns_hosts_for_nw01(self):
expected = (
"192.168.1.100\tfake_instance00.novalocal\n"
"192.168.0.101\tfake_instance01.novalocal\n"
"192.168.1.102\tfake_instance01.novalocal"
)
actual_hosts = self.driver.get_dns_hosts(self.context, networks[1])
self.assertEqual(actual_hosts, expected)
def test_get_dhcp_opts_for_nw00(self):
self.flags(use_single_default_gateway=True)
expected_opts = 'NW-0,3,192.168.0.1\nNW-3,3\nNW-4,3'
actual_opts = self.driver.get_dhcp_opts(self.context, networks[0])
self.assertEqual(actual_opts, expected_opts)
def test_get_dhcp_opts_for_nw00_no_single_default_gateway(self):
self.flags(use_single_default_gateway=False)
expected_opts = '3,192.168.0.1'
actual_opts = self.driver.get_dhcp_opts(self.context, networks[0])
self.assertEqual(actual_opts, expected_opts)
def test_get_dhcp_opts_for_nw01(self):
self.flags(use_single_default_gateway=True, host='fake_instance01')
expected_opts = "NW-2,3,192.168.1.1\nNW-5,3"
actual_opts = self.driver.get_dhcp_opts(self.context, networks[1])
self.assertEqual(actual_opts, expected_opts)
def test_get_dhcp_leases_for_nw00(self):
timestamp = timeutils.utcnow()
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
leases = self.driver.get_dhcp_leases(self.context, networks[0])
leases = leases.split('\n')
for lease in leases:
lease = lease.split(' ')
data = get_associated(self.context, 0, address=lease[2])[0]
self.assertTrue(data['allocated'])
self.assertTrue(data['leased'])
self.assertTrue(lease[0] > seconds_since_epoch)
self.assertEqual(data['vif_address'], lease[1])
self.assertEqual(data['address'], lease[2])
self.assertEqual(data['instance_hostname'], lease[3])
self.assertEqual('*', lease[4])
def test_get_dhcp_leases_for_nw01(self):
self.flags(host='fake_instance01')
timestamp = timeutils.utcnow()
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
leases = self.driver.get_dhcp_leases(self.context, networks[1])
leases = leases.split('\n')
for lease in leases:
lease = lease.split(' ')
data = get_associated(self.context, 1, address=lease[2])[0]
self.assertTrue(data['leased'])
self.assertTrue(lease[0] > seconds_since_epoch)
self.assertEqual(data['vif_address'], lease[1])
self.assertEqual(data['address'], lease[2])
self.assertEqual(data['instance_hostname'], lease[3])
self.assertEqual('*', lease[4])
def test_dhcp_opts_not_default_gateway_network(self):
expected = "NW-0,3"
fixedip = objects.FixedIPList.get_by_network(self.context,
{'id': 0})[0]
actual = self.driver._host_dhcp_opts(fixedip)
self.assertEqual(actual, expected)
def test_host_dhcp_without_default_gateway_network(self):
expected = ','.join(['DE:AD:BE:EF:00:00',
'fake_instance00.novalocal',
'192.168.0.100'])
fixedip = objects.FixedIPList.get_by_network(self.context,
{'id': 0})[0]
actual = self.driver._host_dhcp(fixedip)
self.assertEqual(actual, expected)
def test_host_dns_without_default_gateway_network(self):
expected = "192.168.0.100\tfake_instance00.novalocal"
fixedip = objects.FixedIPList.get_by_network(self.context,
{'id': 0})[0]
actual = self.driver._host_dns(fixedip)
self.assertEqual(actual, expected)
def test_linux_bridge_driver_plug(self):
"""Makes sure plug doesn't drop FORWARD by default.
Ensures bug 890195 doesn't reappear.
"""
def fake_execute(*args, **kwargs):
return "", ""
self.stubs.Set(utils, 'execute', fake_execute)
def verify_add_rule(chain, rule):
self.assertEqual(chain, 'FORWARD')
self.assertIn('ACCEPT', rule)
self.stubs.Set(linux_net.iptables_manager.ipv4['filter'],
'add_rule', verify_add_rule)
driver = linux_net.LinuxBridgeInterfaceDriver()
driver.plug({"bridge": "br100", "bridge_interface": "eth0",
"share_address": False}, "fakemac")
def test_linux_ovs_driver_plug_exception(self):
self.flags(fake_network=False)
def fake_execute(*args, **kwargs):
raise processutils.ProcessExecutionError('error')
def fake_device_exists(*args, **kwargs):
return False
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(linux_net, 'device_exists', fake_device_exists)
driver = linux_net.LinuxOVSInterfaceDriver()
self.assertRaises(exception.AgentError,
driver.plug, {'uuid': 'fake_network_uuid'},
'fake_mac')
def test_vlan_override(self):
"""Makes sure vlan_interface flag overrides network bridge_interface.
Allows heterogeneous networks a la bug 833426
"""
driver = linux_net.LinuxBridgeInterfaceDriver()
info = {}
@staticmethod
def test_ensure(vlan, bridge, interface, network, mac_address, mtu):
info['passed_interface'] = interface
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'ensure_vlan_bridge', test_ensure)
network = {
"bridge": "br100",
"bridge_interface": "base_interface",
"share_address": False,
"vlan": "fake"
}
self.flags(vlan_interface="")
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "base_interface")
self.flags(vlan_interface="override_interface")
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "override_interface")
driver.plug(network, "fakemac")
def test_flat_override(self):
"""Makes sure flat_interface flag overrides network bridge_interface.
Allows heterogeneous networks a la bug 833426
"""
driver = linux_net.LinuxBridgeInterfaceDriver()
info = {}
@staticmethod
def test_ensure(bridge, interface, network, gateway):
info['passed_interface'] = interface
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'ensure_bridge', test_ensure)
network = {
"bridge": "br100",
"bridge_interface": "base_interface",
"share_address": False,
}
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "base_interface")
self.flags(flat_interface="override_interface")
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "override_interface")
def _test_dnsmasq_execute(self, extra_expected=None):
network_ref = {'id': 'fake',
'label': 'fake',
'gateway': '10.0.0.1',
'multi_host': False,
'cidr': '10.0.0.0/24',
'netmask': '255.255.255.0',
'dns1': '8.8.4.4',
'dhcp_start': '1.0.0.2',
'dhcp_server': '10.0.0.1',
'share_address': False}
def fake_execute(*args, **kwargs):
executes.append(args)
return "", ""
def fake_add_dhcp_mangle_rule(*args, **kwargs):
executes.append(args)
self.stubs.Set(linux_net, '_execute', fake_execute)
self.stubs.Set(linux_net, '_add_dhcp_mangle_rule',
fake_add_dhcp_mangle_rule)
self.stubs.Set(os, 'chmod', lambda *a, **kw: None)
self.stubs.Set(linux_net, 'write_to_file', lambda *a, **kw: None)
self.stubs.Set(linux_net, '_dnsmasq_pid_for', lambda *a, **kw: None)
dev = 'br100'
default_domain = CONF.dhcp_domain
for domain in ('', default_domain):
executes = []
self.flags(dhcp_domain=domain)
linux_net.restart_dhcp(self.context, dev, network_ref)
expected = ['env',
'CONFIG_FILE=%s' % jsonutils.dumps(CONF.dhcpbridge_flagfile),
'NETWORK_ID=fake',
'dnsmasq',
'--strict-order',
'--bind-interfaces',
'--conf-file=%s' % CONF.dnsmasq_config_file,
'--pid-file=%s' % linux_net._dhcp_file(dev, 'pid'),
'--dhcp-optsfile=%s' % linux_net._dhcp_file(dev, 'opts'),
'--listen-address=%s' % network_ref['dhcp_server'],
'--except-interface=lo',
"--dhcp-range=set:%s,%s,static,%s,%ss" % (network_ref['label'],
network_ref['dhcp_start'],
network_ref['netmask'],
CONF.dhcp_lease_time),
'--dhcp-lease-max=256',
'--dhcp-hostsfile=%s' % linux_net._dhcp_file(dev, 'conf'),
'--dhcp-script=%s' % CONF.dhcpbridge,
'--leasefile-ro']
if CONF.dhcp_domain:
expected.append('--domain=%s' % CONF.dhcp_domain)
if extra_expected:
expected += extra_expected
self.assertEqual([(dev,), tuple(expected)], executes)
def test_dnsmasq_execute(self):
self._test_dnsmasq_execute()
def test_dnsmasq_execute_dns_servers(self):
self.flags(dns_server=['1.1.1.1', '2.2.2.2'])
expected = [
'--no-hosts',
'--no-resolv',
'--server=1.1.1.1',
'--server=2.2.2.2',
]
self._test_dnsmasq_execute(expected)
def test_dnsmasq_execute_use_network_dns_servers(self):
self.flags(use_network_dns_servers=True)
expected = [
'--no-hosts',
'--no-resolv',
'--server=8.8.4.4',
]
self._test_dnsmasq_execute(expected)
def test_isolated_host(self):
self.flags(fake_network=False,
share_dhcp_address=True)
# NOTE(vish): use a fresh copy of the manager for each test
self.stubs.Set(linux_net, 'iptables_manager',
linux_net.IptablesManager())
self.stubs.Set(linux_net, 'binary_name', 'test')
executes = []
def fake_execute(*args, **kwargs):
executes.append(args)
return "", ""
self.stubs.Set(utils, 'execute', fake_execute)
driver = linux_net.LinuxBridgeInterfaceDriver()
@staticmethod
def fake_ensure(bridge, interface, network, gateway):
return bridge
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'ensure_bridge', fake_ensure)
iface = 'eth0'
dhcp = '192.168.1.1'
network = {'dhcp_server': dhcp,
'share_address': False,
'bridge': 'br100',
'bridge_interface': iface}
driver.plug(network, 'fakemac')
expected = [
('ebtables', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i',
iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-I', 'INPUT', '-p', 'ARP', '-i',
iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o',
iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-I', 'OUTPUT', '-p', 'ARP', '-o',
iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-i',
iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
'-j', 'DROP'),
('ebtables', '-t', 'filter', '-I', 'FORWARD', '-p', 'IPv4', '-i',
iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
'-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-o',
iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
'-j', 'DROP'),
('ebtables', '-t', 'filter', '-I', 'FORWARD', '-p', 'IPv4', '-o',
iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
'-j', 'DROP'),
('iptables-save', '-c'),
('iptables-restore', '-c'),
('ip6tables-save', '-c'),
('ip6tables-restore', '-c'),
]
self.assertEqual(executes, expected)
executes = []
@staticmethod
def fake_remove(bridge, gateway):
return
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'remove_bridge', fake_remove)
driver.unplug(network)
expected = [
('ebtables', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i',
iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o',
iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-i',
iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
'-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-o',
iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
'-j', 'DROP'),
]
self.assertEqual(executes, expected)
def _test_initialize_gateway(self, existing, expected, routes=''):
self.flags(fake_network=False)
executes = []
def fake_execute(*args, **kwargs):
executes.append(args)
if args[0] == 'ip' and args[1] == 'addr' and args[2] == 'show':
return existing, ""
if args[0] == 'ip' and args[1] == 'route' and args[2] == 'show':
return routes, ""
if args[0] == 'sysctl':
return '1\n', ''
self.stubs.Set(utils, 'execute', fake_execute)
network = {'dhcp_server': '192.168.1.1',
'cidr': '192.168.1.0/24',
'broadcast': '192.168.1.255',
'cidr_v6': '2001:db8::/64'}
self.driver.initialize_gateway_device('eth0', network)
self.assertEqual(executes, expected)
def test_initialize_gateway_moves_wrong_ip(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-n', 'net.ipv4.ip_forward'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', 'route', 'show', 'dev', 'eth0'),
('ip', 'addr', 'del', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected)
def test_initialize_gateway_resets_route(self):
routes = ("default via 192.168.0.1 dev eth0\n"
"192.168.100.0/24 via 192.168.0.254 dev eth0 proto static\n")
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-n', 'net.ipv4.ip_forward'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', 'route', 'show', 'dev', 'eth0'),
('ip', 'route', 'del', 'default', 'dev', 'eth0'),
('ip', 'route', 'del', '192.168.100.0/24', 'dev', 'eth0'),
('ip', 'addr', 'del', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'route', 'add', 'default', 'via', '192.168.0.1',
'dev', 'eth0'),
('ip', 'route', 'add', '192.168.100.0/24', 'via', '192.168.0.254',
'dev', 'eth0', 'proto', 'static'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected, routes)
def test_initialize_gateway_no_move_right_ip(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet 192.168.1.1/24 brd 192.168.1.255 scope global eth0\n"
" inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-n', 'net.ipv4.ip_forward'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected)
def test_initialize_gateway_add_if_blank(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-n', 'net.ipv4.ip_forward'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', 'route', 'show', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected)
def test_ensure_floating_no_duplicate_forwards(self):
ln = linux_net
self.stubs.Set(ln.iptables_manager, 'apply', lambda: None)
self.stubs.Set(ln, 'ensure_ebtables_rules', lambda *a, **kw: None)
net = {'bridge': 'br100', 'cidr': '10.0.0.0/24'}
ln.ensure_floating_forward('10.10.10.10', '10.0.0.1', 'eth0', net)
ln.ensure_floating_forward('10.10.10.11', '10.0.0.10', 'eth0', net)
two_forward_rules = len(linux_net.iptables_manager.ipv4['nat'].rules)
ln.ensure_floating_forward('10.10.10.10', '10.0.0.3', 'eth0', net)
dup_forward_rules = len(linux_net.iptables_manager.ipv4['nat'].rules)
self.assertEqual(two_forward_rules, dup_forward_rules)
def test_apply_ran(self):
manager = linux_net.IptablesManager()
manager.iptables_apply_deferred = False
self.mox.StubOutWithMock(manager, '_apply')
manager._apply()
self.mox.ReplayAll()
empty_ret = manager.apply()
self.assertIsNone(empty_ret)
def test_apply_not_run(self):
manager = linux_net.IptablesManager()
manager.iptables_apply_deferred = True
self.mox.StubOutWithMock(manager, '_apply')
self.mox.ReplayAll()
manager.apply()
def test_deferred_unset_apply_ran(self):
manager = linux_net.IptablesManager()
manager.iptables_apply_deferred = True
self.mox.StubOutWithMock(manager, '_apply')
manager._apply()
self.mox.ReplayAll()
manager.defer_apply_off()
self.assertFalse(manager.iptables_apply_deferred)
def _test_add_metadata_accept_rule(self, expected):
def verify_add_rule(chain, rule):
self.assertEqual(chain, 'INPUT')
self.assertEqual(expected, rule)
self.stubs.Set(linux_net.iptables_manager.ipv4['filter'],
'add_rule', verify_add_rule)
linux_net.metadata_accept()
def test_metadata_accept(self):
self.flags(metadata_port='8775')
self.flags(metadata_host='10.10.10.1')
expected = ('-s 0.0.0.0/0 -p tcp -m tcp --dport 8775 '
'-d 10.10.10.1 -j ACCEPT')
self._test_add_metadata_accept_rule(expected)
def test_metadata_accept_localhost(self):
self.flags(metadata_port='8775')
self.flags(metadata_host='127.0.0.1')
expected = ('-s 0.0.0.0/0 -p tcp -m tcp --dport 8775 '
'-m addrtype --dst-type LOCAL -j ACCEPT')
self._test_add_metadata_accept_rule(expected)
def _test_add_metadata_forward_rule(self, expected):
def verify_add_rule(chain, rule):
self.assertEqual(chain, 'PREROUTING')
self.assertEqual(expected, rule)
self.stubs.Set(linux_net.iptables_manager.ipv4['nat'],
'add_rule', verify_add_rule)
linux_net.metadata_forward()
def test_metadata_forward(self):
self.flags(metadata_port='8775')
self.flags(metadata_host='10.10.10.1')
expected = ('-s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp '
'--dport 80 -j DNAT --to-destination 10.10.10.1:8775')
self._test_add_metadata_forward_rule(expected)
def test_metadata_forward_localhost(self):
self.flags(metadata_port='8775')
self.flags(metadata_host='127.0.0.1')
expected = ('-s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp '
'--dport 80 -j REDIRECT --to-ports 8775')
self._test_add_metadata_forward_rule(expected)
def test_ensure_bridge_brings_up_interface(self):
calls = {
'device_exists': [mock.call('bridge')],
'_execute': [
mock.call('brctl', 'addif', 'bridge', 'eth0',
run_as_root=True, check_exit_code=False),
mock.call('ip', 'link', 'set', 'eth0', 'up',
run_as_root=True, check_exit_code=False),
mock.call('ip', 'route', 'show', 'dev', 'eth0'),
mock.call('ip', 'addr', 'show', 'dev', 'eth0', 'scope',
'global'),
]
}
with contextlib.nested(
mock.patch.object(linux_net, 'device_exists', return_value=True),
mock.patch.object(linux_net, '_execute', return_value=('', ''))
) as (device_exists, _execute):
driver = linux_net.LinuxBridgeInterfaceDriver()
driver.ensure_bridge('bridge', 'eth0')
device_exists.assert_has_calls(calls['device_exists'])
_execute.assert_has_calls(calls['_execute'])
def test_ensure_bridge_brclt_addif_exception(self):
def fake_execute(*cmd, **kwargs):
if ('brctl', 'addif', 'bridge', 'eth0') == cmd:
return ('', 'some error happens')
else:
return ('', '')
with contextlib.nested(
mock.patch.object(linux_net, 'device_exists', return_value=True),
mock.patch.object(linux_net, '_execute', fake_execute)
) as (device_exists, _):
driver = linux_net.LinuxBridgeInterfaceDriver()
self.assertRaises(exception.NovaException,
driver.ensure_bridge, 'bridge', 'eth0')
device_exists.assert_called_once_with('bridge')
def test_set_device_mtu_configured(self):
self.flags(network_device_mtu=10000)
calls = [
mock.call('ip', 'link', 'set', 'fake-dev', 'mtu',
10000, run_as_root=True,
check_exit_code=[0, 2, 254])
]
with mock.patch.object(utils, 'execute', return_value=('', '')) as ex:
linux_net._set_device_mtu('fake-dev')
ex.assert_has_calls(calls)
def test_set_device_mtu_default(self):
calls = []
with mock.patch.object(utils, 'execute', return_value=('', '')) as ex:
linux_net._set_device_mtu('fake-dev')
ex.assert_has_calls(calls)
def _ovs_vif_port(self, calls):
with mock.patch.object(utils, 'execute', return_value=('', '')) as ex:
linux_net.create_ovs_vif_port('fake-bridge', 'fake-dev',
'fake-iface-id', 'fake-mac',
'fake-instance-uuid')
ex.assert_has_calls(calls)
def test_ovs_vif_port(self):
calls = [
mock.call('ovs-vsctl', '--timeout=120', '--', '--if-exists',
'del-port', 'fake-dev', '--', 'add-port',
'fake-bridge', 'fake-dev',
'--', 'set', 'Interface', 'fake-dev',
'external-ids:iface-id=fake-iface-id',
'external-ids:iface-status=active',
'external-ids:attached-mac=fake-mac',
'external-ids:vm-uuid=fake-instance-uuid',
run_as_root=True)
]
self._ovs_vif_port(calls)
def test_ovs_vif_port_with_mtu(self):
self.flags(network_device_mtu=10000)
calls = [
mock.call('ovs-vsctl', '--timeout=120', '--', '--if-exists',
'del-port', 'fake-dev', '--', 'add-port',
'fake-bridge', 'fake-dev',
'--', 'set', 'Interface', 'fake-dev',
'external-ids:iface-id=fake-iface-id',
'external-ids:iface-status=active',
'external-ids:attached-mac=fake-mac',
'external-ids:vm-uuid=fake-instance-uuid',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev', 'mtu',
10000, run_as_root=True,
check_exit_code=[0, 2, 254])
]
self._ovs_vif_port(calls)
def _create_veth_pair(self, calls):
with mock.patch.object(utils, 'execute', return_value=('', '')) as ex:
linux_net._create_veth_pair('fake-dev1', 'fake-dev2')
ex.assert_has_calls(calls)
def test_create_veth_pair(self):
calls = [
mock.call('ip', 'link', 'add', 'fake-dev1', 'type', 'veth',
'peer', 'name', 'fake-dev2', run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev1', 'up',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev1', 'promisc', 'on',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev2', 'up',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev2', 'promisc', 'on',
run_as_root=True)
]
self._create_veth_pair(calls)
def test_create_veth_pair_with_mtu(self):
self.flags(network_device_mtu=10000)
calls = [
mock.call('ip', 'link', 'add', 'fake-dev1', 'type', 'veth',
'peer', 'name', 'fake-dev2', run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev1', 'up',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev1', 'promisc', 'on',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev1', 'mtu',
10000, run_as_root=True,
check_exit_code=[0, 2, 254]),
mock.call('ip', 'link', 'set', 'fake-dev2', 'up',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev2', 'promisc', 'on',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev2', 'mtu',
10000, run_as_root=True,
check_exit_code=[0, 2, 254])
]
self._create_veth_pair(calls)
|
|
#
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre (copied from CPython)"""
# XXX: show string offset and offending character for all errors
import sys
from .sre_constants import *
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
self.lookbehind = 0
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def dump(self, level=0):
seqtypes = (tuple, list)
for op, av in self.data:
print level*" " + op,
if op == IN:
# member sublanguage
print
for op, a in av:
print (level+1)*" " + op, a
elif op == BRANCH:
print
for i, a in enumerate(av[1]):
if i:
print level*" " + "or"
a.dump(level+1)
elif op == GROUPREF_EXISTS:
condgroup, item_yes, item_no = av
print condgroup
item_yes.dump(level+1)
if item_no:
print level*" " + "else"
item_no.dump(level+1)
elif isinstance(av, seqtypes):
nl = 0
for a in av:
if isinstance(a, SubPattern):
if not nl:
print
a.dump(level+1)
nl = 1
else:
print a,
nl = 0
if not nl:
print
else:
print av
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = MAXREPEAT - 1
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + i * av[0]
hi = hi + j * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = min(lo, MAXREPEAT - 1), min(hi, MAXREPEAT)
return self.width
class Tokenizer:
def __init__(self, string):
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index]
if char[0] == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error("bogus escape (end of line)")
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] == IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[2:]
if len(escape) != 2:
raise error("bogus escape: %s" % repr("\\" + escape))
return LITERAL, int(escape, 16) & 0xff
elif c in OCTDIGITS:
# octal escape (up to three digits)
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[1:]
return LITERAL, int(escape, 8) & 0xff
elif c in DIGITS:
raise error("bogus escape: %s" % repr(escape))
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "0":
# octal escape
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error("cannot refer to open group")
if state.lookbehind:
import warnings
warnings.warn('group references in lookbehind '
'assertions are not supported',
RuntimeWarning)
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error("pattern not properly closed")
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error("conditional backref with more than two branches")
else:
item_no = None
if source.next and not source.match(")", 0):
raise error("pattern not properly closed")
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error("unexpected end of regular expression")
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error("bad character range")
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error("bad character range")
setappend((RANGE, (lo, hi)))
else:
raise error("unexpected end of regular expression")
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise error("bad repeat interval")
else:
raise error("not supported")
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error("nothing to repeat")
if item[0][0] in REPEATCODES:
raise error("multiple repeat")
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ">":
break
name = name + char
group = 1
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name %r" %
name)
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
name = name + char
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in backref group name "
"%r" % name)
gid = state.groupdict.get(name)
if gid is None:
msg = "unknown group name: {0!r}".format(name)
raise error(msg)
if state.lookbehind:
import warnings
warnings.warn('group references in lookbehind '
'assertions are not supported',
RuntimeWarning)
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
raise error("unknown specifier: ?P%s" % char)
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error("unbalanced parenthesis")
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error("syntax error")
dir = -1 # lookbehind
char = sourceget()
state.lookbehind += 1
p = _parse_sub(source, state)
if dir < 0:
state.lookbehind -= 1
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
condname = condname + char
group = 2
if not condname:
raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
msg = "unknown group name: {0!r}".format(condname)
raise error(msg)
else:
try:
condgroup = int(condname)
except ValueError:
raise error("bad character in group name")
if state.lookbehind:
import warnings
warnings.warn('group references in lookbehind '
'assertions are not supported',
RuntimeWarning)
else:
# flags
if not source.next in FLAGS:
raise error("unexpected end of pattern")
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
if char == ")":
break
raise error("unknown extension")
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error("parser error")
return subpattern
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
tail = source.get()
if tail == ")":
raise error("unbalanced parenthesis")
elif tail:
raise error("bogus characters at end of regular expression")
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
if flags & SRE_FLAG_DEBUG:
p.dump()
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if type(sep) is type(""):
makechar = chr
else:
makechar = unichr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error("unterminated group name")
if char == ">":
break
name = name + char
if not name:
raise error("missing group name")
try:
index = int(name)
if index < 0:
raise error("negative group number")
except ValueError:
if not isname(name):
raise error("bad character in group name")
try:
index = pattern.groupindex[name]
except KeyError:
msg = "unknown group name: {0!r}".format(name)
raise IndexError(msg)
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = s
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error("unmatched group")
except IndexError:
raise error("invalid group reference")
return sep.join(literals)
|
|
# Copyright (c) 2012-2014 Tycho Andersen
# Copyright (c) 2013 xarvh
# Copyright (c) 2013 roger
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 dequis
# Copyright (c) 2015 Dario Giovannetti
# Copyright (c) 2015 Alexander Lozovskoy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import contextlib
import xcffib
import xcffib.xproto
from . import command
from . import hook
from . import window
from . import utils
from .log_utils import logger
class _Group(command.CommandObject):
"""
A group is a container for a bunch of windows, analogous to workspaces
in other window managers. Each client window managed by the window
manager belongs to exactly one group.
"""
def __init__(self, name, layout=None):
self.name = name
self.customLayout = layout # will be set on _configure
self.windows = set()
self.qtile = None
self.layouts = []
self.floating_layout = None
# self.focusHistory lists the group's windows in the order they
# received focus, from the oldest (first item) to the currently
# focused window (last item); NB the list does *not* contain any
# windows that never received focus; refer to self.windows for the
# complete set
self.focusHistory = []
self.screen = None
self.currentLayout = None
def _configure(self, layouts, floating_layout, qtile):
self.screen = None
self.currentLayout = 0
self.focusHistory = []
self.windows = set()
self.qtile = qtile
self.layouts = [i.clone(self) for i in layouts]
self.floating_layout = floating_layout.clone(self)
if self.customLayout is not None:
self.layout = self.customLayout
self.customLayout = None
@property
def currentWindow(self):
try:
return self.focusHistory[-1]
except IndexError:
# no window has focus
return None
@currentWindow.setter
def currentWindow(self, win):
try:
self.focusHistory.remove(win)
except ValueError:
# win has never received focus before
pass
self.focusHistory.append(win)
def _remove_from_focus_history(self, win):
try:
index = self.focusHistory.index(win)
except ValueError:
# win has never received focus
return False
else:
del self.focusHistory[index]
# return True if win was the last item (i.e. it was currentWindow)
return index == len(self.focusHistory)
@property
def layout(self):
return self.layouts[self.currentLayout]
@layout.setter
def layout(self, layout):
"""
"layout" is a string with matching the name of a Layout object.
"""
for index, obj in enumerate(self.layouts):
if obj.name == layout:
self.currentLayout = index
hook.fire(
"layout_change",
self.layouts[self.currentLayout],
self
)
self.layoutAll()
return
raise ValueError("No such layout: %s" % layout)
def toLayoutIndex(self, index):
assert 0 <= index < len(self.layouts), "layout index out of bounds"
self.layout.hide()
self.currentLayout = index
hook.fire("layout_change", self.layouts[self.currentLayout], self)
self.layoutAll()
screen = self.screen.get_rect()
self.layout.show(screen)
def nextLayout(self):
self.toLayoutIndex((self.currentLayout + 1) % (len(self.layouts)))
def prevLayout(self):
self.toLayoutIndex((self.currentLayout - 1) % (len(self.layouts)))
def layoutAll(self, warp=False):
"""
Layout the floating layer, then the current layout.
If we have have a currentWindow give it focus, optionally
moving warp to it.
"""
if self.screen and len(self.windows):
with self.disableMask(xcffib.xproto.EventMask.EnterWindow):
normal = [x for x in self.windows if not x.floating]
floating = [
x for x in self.windows
if x.floating and not x.minimized
]
screen = self.screen.get_rect()
if normal:
try:
self.layout.layout(normal, screen)
except:
logger.exception("Exception in layout %s",
self.layout.name)
if floating:
self.floating_layout.layout(floating, screen)
if self.currentWindow and \
self.screen == self.qtile.currentScreen:
self.currentWindow.focus(warp)
def _setScreen(self, screen):
"""
Set this group's screen to new_screen
"""
if screen == self.screen:
return
self.screen = screen
if self.screen:
# move all floating guys offset to new screen
self.floating_layout.to_screen(self.screen)
self.layoutAll()
rect = self.screen.get_rect()
self.floating_layout.show(rect)
self.layout.show(rect)
else:
self.hide()
def hide(self):
self.screen = None
with self.disableMask(xcffib.xproto.EventMask.EnterWindow |
xcffib.xproto.EventMask.FocusChange |
xcffib.xproto.EventMask.LeaveWindow):
for i in self.windows:
i.hide()
self.layout.hide()
@contextlib.contextmanager
def disableMask(self, mask):
for i in self.windows:
i._disableMask(mask)
yield
for i in self.windows:
i._resetMask()
def focus(self, win, warp=True):
"""
if win is in the group, blur any windows and call
``focus`` on the layout (in case it wants to track
anything), fire focus_change hook and invoke layoutAll.
warp - warp pointer to win. This should basically always be True,
unless the focus event is coming from something like EnterNotify,
where the user is actively using the mouse or on full screen
layouts where only one window is "maximized" at a time, and it
doesn't make sense for the mouse to automatically move.
"""
if self.qtile._drag:
# don't change focus while dragging windows
return
if win:
if win not in self.windows:
return
self.currentWindow = win
if win.floating:
for l in self.layouts:
l.blur()
self.floating_layout.focus(win)
else:
self.floating_layout.blur()
for l in self.layouts:
l.focus(win)
hook.fire("focus_change")
# !!! note that warp isn't hooked up now
self.layoutAll(warp)
def info(self):
return dict(
name=self.name,
focus=self.currentWindow.name if self.currentWindow else None,
windows=[i.name for i in self.windows],
focusHistory=[i.name for i in self.focusHistory],
layout=self.layout.name,
layouts=[l.name for l in self.layouts],
floating_info=self.floating_layout.info(),
screen=self.screen.index if self.screen else None
)
def add(self, win, focus=True):
hook.fire("group_window_add")
self.windows.add(win)
win.group = self
try:
if 'fullscreen' in win.window.get_net_wm_state() and \
self.qtile.config.auto_fullscreen:
win._float_state = window.FULLSCREEN
elif self.floating_layout.match(win):
# !!! tell it to float, can't set floating
# because it's too early
# so just set the flag underneath
win._float_state = window.FLOATING
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
pass # doesn't matter
if win.floating:
self.floating_layout.add(win)
else:
for i in self.layouts:
i.add(win)
if focus:
self.focus(win, True)
def remove(self, win):
self.windows.remove(win)
hadfocus = self._remove_from_focus_history(win)
win.group = None
nextfocus = None
if win.floating:
nextfocus = self.floating_layout.remove(win)
if not hadfocus:
# For example a notification
return
nextfocus = nextfocus or \
self.currentWindow or \
self.layout.focus_first() or \
self.floating_layout.focus_first()
else:
for i in self.layouts:
if i is self.layout:
nextfocus = i.remove(win)
else:
i.remove(win)
if not hadfocus:
return
nextfocus = nextfocus or \
self.floating_layout.focus_first() or \
self.currentWindow or \
self.layout.focus_first()
self.focus(nextfocus, True)
# else: TODO: change focus
def mark_floating(self, win, floating):
if floating:
if win in self.floating_layout.clients:
# already floating
pass
else:
for i in self.layouts:
i.remove(win)
if win is self.currentWindow:
i.blur()
self.floating_layout.add(win)
if win is self.currentWindow:
self.floating_layout.focus(win)
else:
self.floating_layout.remove(win)
self.floating_layout.blur()
for i in self.layouts:
i.add(win)
if win is self.currentWindow:
i.focus(win)
self.layoutAll()
def _items(self, name):
if name == "layout":
return (True, list(range(len(self.layouts))))
elif name == "window":
return (True, [i.window.wid for i in self.windows])
elif name == "screen":
return (True, None)
def _select(self, name, sel):
if name == "layout":
if sel is None:
return self.layout
else:
return utils.lget(self.layouts, sel)
elif name == "window":
if sel is None:
return self.currentWindow
else:
for i in self.windows:
if i.window.wid == sel:
return i
elif name == "screen":
return self.screen
def cmd_setlayout(self, layout):
self.layout = layout
def cmd_info(self):
"""
Returns a dictionary of info for this group.
"""
return self.info()
def cmd_toscreen(self, screen=None):
"""
Pull a group to a specified screen.
- screen: Screen offset. If not specified,
we assume the current screen.
Pull group to the current screen:
toscreen()
Pull group to screen 0:
toscreen(0)
"""
if screen is None:
screen = self.qtile.currentScreen
else:
screen = self.qtile.screens[screen]
screen.setGroup(self)
def _dirGroup(self, direction, skip_empty=False, skip_managed=False):
"""
Find a group walking the groups list in the specified
direction.
skip_empty skips the empty groups
skip_managed skips the groups that have a screen
"""
def match(group):
if group is self:
return True
if skip_empty and not group.windows:
return False
if skip_managed and group.screen:
return False
return True
groups = [group for group in self.qtile.groups if match(group)]
index = (groups.index(self) + direction) % len(groups)
return groups[index]
def prevGroup(self, skip_empty=False, skip_managed=False):
return self._dirGroup(-1, skip_empty, skip_managed)
def nextGroup(self, skip_empty=False, skip_managed=False):
return self._dirGroup(1, skip_empty, skip_managed)
def cmd_unminimize_all(self):
"""
Unminimise all windows in this group.
"""
for w in self.windows:
w.minimized = False
self.layoutAll()
def cmd_next_window(self):
if not self.windows:
return
if self.currentWindow.floating:
nxt = self.floating_layout.focus_next(self.currentWindow) or \
self.layout.focus_first() or \
self.floating_layout.focus_first()
else:
nxt = self.layout.focus_next(self.currentWindow) or \
self.floating_layout.focus_first() or \
self.layout.focus_first()
self.focus(nxt, True)
def cmd_prev_window(self):
if not self.windows:
return
if self.currentWindow.floating:
nxt = self.floating_layout.focus_previous(self.currentWindow) or \
self.layout.focus_last() or \
self.floating_layout.focus_last()
else:
nxt = self.layout.focus_previous(self.currentWindow) or \
self.floating_layout.focus_last() or \
self.layout.focus_last()
self.focus(nxt, True)
def cmd_switch_groups(self, name):
"""
Switch position of current group with name
"""
self.qtile.cmd_switch_groups(self.name, name)
def __repr__(self):
return "<group.Group (%r)>" % self.name
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
from lxml import etree
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import unit
from nova import utils
from nova.virt import images
libvirt_opts = [
cfg.BoolOpt('libvirt_snapshot_compression',
default=False,
help='Compress snapshot images when possible. This '
'currently applies exclusively to qcow2 images'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts)
CONF.import_opt('instances_path', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
def execute(*args, **kwargs):
return utils.execute(*args, **kwargs)
def get_iscsi_initiator():
"""Get iscsi initiator name for this machine."""
# NOTE(vish) openiscsi stores initiator name in a file that
# needs root permission to read.
try:
contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi')
except exception.FileNotFound:
return None
for l in contents.split('\n'):
if l.startswith('InitiatorName='):
return l[l.index('=') + 1:].strip()
def get_fc_hbas():
"""Get the Fibre Channel HBA information."""
out = None
try:
out, err = execute('systool', '-c', 'fc_host', '-v',
run_as_root=True)
except processutils.ProcessExecutionError as exc:
# This handles the case where rootwrap is used
# and systool is not installed
# 96 = nova.cmd.rootwrap.RC_NOEXECFOUND:
if exc.exit_code == 96:
LOG.warn(_("systool is not installed"))
return []
except OSError as exc:
# This handles the case where rootwrap is NOT used
# and systool is not installed
if exc.errno == errno.ENOENT:
LOG.warn(_("systool is not installed"))
return []
if out is None:
raise RuntimeError(_("Cannot find any Fibre Channel HBAs"))
lines = out.split('\n')
# ignore the first 2 lines
lines = lines[2:]
hbas = []
hba = {}
lastline = None
for line in lines:
line = line.strip()
# 2 newlines denotes a new hba port
if line == '' and lastline == '':
if len(hba) > 0:
hbas.append(hba)
hba = {}
else:
val = line.split('=')
if len(val) == 2:
key = val[0].strip().replace(" ", "")
value = val[1].strip()
hba[key] = value.replace('"', '')
lastline = line
return hbas
def get_fc_hbas_info():
"""Get Fibre Channel WWNs and device paths from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
hbas_info = []
for hba in hbas:
wwpn = hba['port_name'].replace('0x', '')
wwnn = hba['node_name'].replace('0x', '')
device_path = hba['ClassDevicepath']
device = hba['ClassDevice']
hbas_info.append({'port_name': wwpn,
'node_name': wwnn,
'host_device': device,
'device_path': device_path})
return hbas_info
def get_fc_wwpns():
"""Get Fibre Channel WWPNs from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
wwpns = []
if hbas:
for hba in hbas:
if hba['port_state'] == 'Online':
wwpn = hba['port_name'].replace('0x', '')
wwpns.append(wwpn)
return wwpns
def get_fc_wwnns():
"""Get Fibre Channel WWNNs from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
wwnns = []
if hbas:
for hba in hbas:
if hba['port_state'] == 'Online':
wwnn = hba['node_name'].replace('0x', '')
wwnns.append(wwnn)
return wwnns
def create_image(disk_format, path, size):
"""Create a disk image
:param disk_format: Disk image format (as known by qemu-img)
:param path: Desired location of the disk image
:param size: Desired size of disk image. May be given as an int or
a string. If given as an int, it will be interpreted
as bytes. If it's a string, it should consist of a number
with an optional suffix ('K' for Kibibytes,
M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
If no suffix is given, it will be interpreted as bytes.
"""
execute('qemu-img', 'create', '-f', disk_format, path, size)
def create_cow_image(backing_file, path, size=None):
"""Create COW image
Creates a COW image with the given backing file
:param backing_file: Existing image on which to base the COW image
:param path: Desired location of the COW image
"""
base_cmd = ['qemu-img', 'create', '-f', 'qcow2']
cow_opts = []
if backing_file:
cow_opts += ['backing_file=%s' % backing_file]
base_details = images.qemu_img_info(backing_file)
else:
base_details = None
# This doesn't seem to get inherited so force it to...
# http://paste.ubuntu.com/1213295/
# TODO(harlowja) probably file a bug against qemu-img/qemu
if base_details and base_details.cluster_size is not None:
cow_opts += ['cluster_size=%s' % base_details.cluster_size]
# For now don't inherit this due the following discussion...
# See: http://www.gossamer-threads.com/lists/openstack/dev/10592
# if 'preallocation' in base_details:
# cow_opts += ['preallocation=%s' % base_details['preallocation']]
if base_details and base_details.encryption:
cow_opts += ['encryption=%s' % base_details.encryption]
if size is not None:
cow_opts += ['size=%s' % size]
if cow_opts:
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
cow_opts = ['-o', csv_opts]
cmd = base_cmd + cow_opts + [path]
execute(*cmd)
def create_lvm_image(vg, lv, size, sparse=False):
"""Create LVM image.
Creates a LVM image with given size.
:param vg: existing volume group which should hold this image
:param lv: name for this image (logical volume)
:size: size of image in bytes
:sparse: create sparse logical volume
"""
vg_info = get_volume_group_info(vg)
free_space = vg_info['free']
def check_size(vg, lv, size):
if size > free_space:
raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.'
' Only %(free_space)db available,'
' but %(size)db required'
' by volume %(lv)s.') %
{'vg': vg,
'free_space': free_space,
'size': size,
'lv': lv})
if sparse:
preallocated_space = 64 * unit.Mi
check_size(vg, lv, preallocated_space)
if free_space < size:
LOG.warning(_('Volume group %(vg)s will not be able'
' to hold sparse volume %(lv)s.'
' Virtual volume size is %(size)db,'
' but free space on volume group is'
' only %(free_space)db.'),
{'vg': vg,
'free_space': free_space,
'size': size,
'lv': lv})
cmd = ('lvcreate', '-L', '%db' % preallocated_space,
'--virtualsize', '%db' % size, '-n', lv, vg)
else:
check_size(vg, lv, size)
cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg)
execute(*cmd, run_as_root=True, attempts=3)
def import_rbd_image(*args):
execute('rbd', 'import', *args)
def list_rbd_volumes(pool):
"""List volumes names for given ceph pool.
:param pool: ceph pool name
"""
out, err = utils.execute('rbd', '-p', pool, 'ls')
return [line.strip() for line in out.splitlines()]
def remove_rbd_volumes(pool, *names):
"""Remove one or more rbd volume."""
for name in names:
rbd_remove = ('rbd', '-p', pool, 'rm', name)
try:
execute(*rbd_remove, attempts=3, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.warn(_("rbd remove %(name)s in pool %(pool)s failed"),
{'name': name, 'pool': pool})
def get_volume_group_info(vg):
"""Return free/used/total space info for a volume group in bytes
:param vg: volume group name
:returns: A dict containing:
:total: How big the filesystem is (in bytes)
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
"""
out, err = execute('vgs', '--noheadings', '--nosuffix',
'--separator', '|',
'--units', 'b', '-o', 'vg_size,vg_free', vg,
run_as_root=True)
info = out.split('|')
if len(info) != 2:
raise RuntimeError(_("vg %s must be LVM volume group") % vg)
return {'total': int(info[0]),
'free': int(info[1]),
'used': int(info[0]) - int(info[1])}
def list_logical_volumes(vg):
"""List logical volumes paths for given volume group.
:param vg: volume group name
"""
out, err = execute('lvs', '--noheadings', '-o', 'lv_name', vg,
run_as_root=True)
return [line.strip() for line in out.splitlines()]
def logical_volume_info(path):
"""Get logical volume info.
:param path: logical volume path
"""
out, err = execute('lvs', '-o', 'vg_all,lv_all',
'--separator', '|', path, run_as_root=True)
info = [line.split('|') for line in out.splitlines()]
if len(info) != 2:
raise RuntimeError(_("Path %s must be LVM logical volume") % path)
return dict(zip(*info))
def logical_volume_size(path):
"""Get logical volume size in bytes.
:param path: logical volume path
"""
# TODO(p-draigbrady) Possibly replace with the more general
# use of blockdev --getsize64 in future
out, _err = execute('lvs', '-o', 'lv_size', '--noheadings', '--units',
'b', '--nosuffix', path, run_as_root=True)
return int(out)
def clear_logical_volume(path):
"""Obfuscate the logical volume.
:param path: logical volume path
"""
# TODO(p-draigbrady): We currently overwrite with zeros
# but we may want to make this configurable in future
# for more or less security conscious setups.
vol_size = logical_volume_size(path)
bs = unit.Mi
direct_flags = ('oflag=direct',)
sync_flags = ()
remaining_bytes = vol_size
# The loop caters for versions of dd that
# don't support the iflag=count_bytes option.
while remaining_bytes:
zero_blocks = remaining_bytes / bs
seek_blocks = (vol_size - remaining_bytes) / bs
zero_cmd = ('dd', 'bs=%s' % bs,
'if=/dev/zero', 'of=%s' % path,
'seek=%s' % seek_blocks, 'count=%s' % zero_blocks)
zero_cmd += direct_flags
zero_cmd += sync_flags
if zero_blocks:
utils.execute(*zero_cmd, run_as_root=True)
remaining_bytes %= bs
bs /= 1024 # Limit to 3 iterations
# Use O_DIRECT with initial block size and fdatasync otherwise
direct_flags = ()
sync_flags = ('conv=fdatasync',)
def remove_logical_volumes(*paths):
"""Remove one or more logical volume."""
for path in paths:
clear_logical_volume(path)
if paths:
lvremove = ('lvremove', '-f') + paths
execute(*lvremove, attempts=3, run_as_root=True)
def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
"""Pick the libvirt primary backend driver name
If the hypervisor supports multiple backend drivers, then the name
attribute selects the primary backend driver name, while the optional
type attribute provides the sub-type. For example, xen supports a name
of "tap", "tap2", "phy", or "file", with a type of "aio" or "qcow2",
while qemu only supports a name of "qemu", but multiple types including
"raw", "bochs", "qcow2", and "qed".
:param is_block_dev:
:returns: driver_name or None
"""
if CONF.libvirt_type == "xen":
if is_block_dev:
return "phy"
else:
# 4000000 == 4.0.0
if hypervisor_version == 4000000:
return "tap"
else:
return "tap2"
elif CONF.libvirt_type in ('kvm', 'qemu'):
return "qemu"
else:
# UML doesn't want a driver_name set
return None
def get_disk_size(path):
"""Get the (virtual) size of a disk image
:param path: Path to the disk image
:returns: Size (in bytes) of the given disk image as it would be seen
by a virtual machine.
"""
size = images.qemu_img_info(path).virtual_size
return int(size)
def get_disk_backing_file(path, basename=True):
"""Get the backing file of a disk image
:param path: Path to the disk image
:returns: a path to the image's backing store
"""
backing_file = images.qemu_img_info(path).backing_file
if backing_file and basename:
backing_file = os.path.basename(backing_file)
return backing_file
def copy_image(src, dest, host=None):
"""Copy a disk image to an existing directory
:param src: Source image
:param dest: Destination path
:param host: Remote host
"""
if not host:
# We shell out to cp because that will intelligently copy
# sparse files. I.E. holes will not be written to DEST,
# rather recreated efficiently. In addition, since
# coreutils 8.11, holes can be read efficiently too.
execute('cp', src, dest)
else:
dest = "%s:%s" % (host, dest)
# Try rsync first as that can compress and create sparse dest files.
# Note however that rsync currently doesn't read sparse files
# efficiently: https://bugzilla.samba.org/show_bug.cgi?id=8918
# At least network traffic is mitigated with compression.
try:
# Do a relatively light weight test first, so that we
# can fall back to scp, without having run out of space
# on the destination for example.
execute('rsync', '--sparse', '--compress', '--dry-run', src, dest)
except processutils.ProcessExecutionError:
execute('scp', src, dest)
else:
execute('rsync', '--sparse', '--compress', src, dest)
def write_to_file(path, contents, umask=None):
"""Write the given contents to a file
:param path: Destination file
:param contents: Desired contents of the file
:param umask: Umask to set when creating this file (will be reset)
"""
if umask:
saved_umask = os.umask(umask)
try:
with open(path, 'w') as f:
f.write(contents)
finally:
if umask:
os.umask(saved_umask)
def chown(path, owner):
"""Change ownership of file or directory
:param path: File or directory whose ownership to change
:param owner: Desired new owner (given as uid or username)
"""
execute('chown', owner, path, run_as_root=True)
def extract_snapshot(disk_path, source_fmt, out_path, dest_fmt):
"""Extract a snapshot from a disk image.
Note that nobody should write to the disk image during this operation.
:param disk_path: Path to disk image
:param out_path: Desired path of extracted snapshot
"""
# NOTE(markmc): ISO is just raw to qemu-img
if dest_fmt == 'iso':
dest_fmt = 'raw'
qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt)
# Conditionally enable compression of snapshots.
if CONF.libvirt_snapshot_compression and dest_fmt == "qcow2":
qemu_img_cmd += ('-c',)
qemu_img_cmd += (disk_path, out_path)
execute(*qemu_img_cmd)
def load_file(path):
"""Read contents of file
:param path: File to read
"""
with open(path, 'r') as fp:
return fp.read()
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def file_delete(path):
"""Delete (unlink) file
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return os.unlink(path)
def find_disk(virt_dom):
"""Find root device path for instance
May be file or device
"""
xml_desc = virt_dom.XMLDesc(0)
domain = etree.fromstring(xml_desc)
if CONF.libvirt_type == 'lxc':
source = domain.find('devices/filesystem/source')
disk_path = source.get('dir')
disk_path = disk_path[0:disk_path.rfind('rootfs')]
disk_path = os.path.join(disk_path, 'disk')
else:
source = domain.find('devices/disk/source')
disk_path = source.get('file') or source.get('dev')
if not disk_path and CONF.libvirt_images_type == 'rbd':
disk_path = source.get('name')
if disk_path:
disk_path = 'rbd:' + disk_path
if not disk_path:
raise RuntimeError(_("Can't retrieve root device path "
"from instance libvirt configuration"))
return disk_path
def get_disk_type(path):
"""Retrieve disk type (raw, qcow2, lvm) for given file."""
if path.startswith('/dev'):
return 'lvm'
elif path.startswith('rbd:'):
return 'rbd'
return images.qemu_img_info(path).file_format
def get_fs_info(path):
"""Get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing:
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesystem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_frsize * hddinfo.f_blocks
free = hddinfo.f_frsize * hddinfo.f_bavail
used = hddinfo.f_frsize * (hddinfo.f_blocks - hddinfo.f_bfree)
return {'total': total,
'free': free,
'used': used}
def fetch_image(context, target, image_id, user_id, project_id, max_size=0):
"""Grab image."""
images.fetch_to_raw(context, image_id, target, user_id, project_id,
max_size=max_size)
def get_instance_path(instance, forceold=False, relative=False):
"""Determine the correct path for instance storage.
This method determines the directory name for instance storage, while
handling the fact that we changed the naming style to something more
unique in the grizzly release.
:param instance: the instance we want a path for
:param forceold: force the use of the pre-grizzly format
:param relative: if True, just the relative path is returned
:returns: a path to store information about that instance
"""
pre_grizzly_name = os.path.join(CONF.instances_path, instance['name'])
if forceold or os.path.exists(pre_grizzly_name):
if relative:
return instance['name']
return pre_grizzly_name
if relative:
return instance['uuid']
return os.path.join(CONF.instances_path, instance['uuid'])
|
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import threading
import jsonrpclib
from oslo.config import cfg
from neutron.common import constants as n_const
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import driver_api
from neutron.plugins.ml2.drivers.arista import config # noqa
from neutron.plugins.ml2.drivers.arista import db
from neutron.plugins.ml2.drivers.arista import exceptions as arista_exc
LOG = logging.getLogger(__name__)
EOS_UNREACHABLE_MSG = _('Unable to reach EOS')
DEFAULT_VLAN = 1
class AristaRPCWrapper(object):
"""Wraps Arista JSON RPC.
All communications between Neutron and EOS are over JSON RPC.
EOS - operating system used on Arista hardware
Command API - JSON RPC API provided by Arista EOS
"""
def __init__(self):
self._server = jsonrpclib.Server(self._eapi_host_url())
self.keystone_conf = cfg.CONF.keystone_authtoken
self.region = cfg.CONF.ml2_arista.region_name
self.sync_interval = cfg.CONF.ml2_arista.sync_interval
# The cli_commands dict stores the mapping between the CLI command key
# and the actual CLI command.
self.cli_commands = {}
self.initialize_cli_commands()
def _get_exit_mode_cmds(self, modes):
"""Returns a list of 'exit' commands for the modes.
:param modes: a list of CLI modes to exit out of.
"""
return ['exit'] * len(modes)
def initialize_cli_commands(self):
self.cli_commands['timestamp'] = []
def check_cli_commands(self):
"""Checks whether the CLI commands are vaild.
This method tries to execute the commands on EOS and if it succeedes
the command is stored.
"""
cmd = ['show openstack config region %s timestamp' % self.region]
try:
self._run_eos_cmds(cmd)
self.cli_commands['timestamp'] = cmd
except arista_exc.AristaRpcError:
self.cli_commands['timestamp'] = []
msg = _("'timestamp' command '%s' is not available on EOS") % cmd
LOG.warn(msg)
def _keystone_url(self):
keystone_auth_url = ('%s://%s:%s/v2.0/' %
(self.keystone_conf.auth_protocol,
self.keystone_conf.auth_host,
self.keystone_conf.auth_port))
return keystone_auth_url
def get_tenants(self):
"""Returns dict of all tenants known by EOS.
:returns: dictionary containing the networks per tenant
and VMs allocated per tenant
"""
cmds = ['show openstack config region %s' % self.region]
command_output = self._run_eos_cmds(cmds)
tenants = command_output[0]['tenants']
return tenants
def plug_port_into_network(self, vm_id, host_id, port_id,
net_id, tenant_id, port_name, device_owner):
"""Genric routine plug a port of a VM instace into network.
:param vm_id: globally unique identifier for VM instance
:param host: ID of the host where the VM is placed
:param port_id: globally unique port ID that connects VM to network
:param network_id: globally unique neutron network identifier
:param tenant_id: globally unique neutron tenant identifier
:param port_name: Name of the port - for display purposes
:param device_owner: Device owner - e.g. compute or network:dhcp
"""
if device_owner == n_const.DEVICE_OWNER_DHCP:
self.plug_dhcp_port_into_network(vm_id,
host_id,
port_id,
net_id,
tenant_id,
port_name)
elif device_owner.startswith('compute'):
self.plug_host_into_network(vm_id,
host_id,
port_id,
net_id,
tenant_id,
port_name)
def plug_host_into_network(self, vm_id, host, port_id,
network_id, tenant_id, port_name):
"""Creates VLAN between TOR and compute host.
:param vm_id: globally unique identifier for VM instance
:param host: ID of the host where the VM is placed
:param port_id: globally unique port ID that connects VM to network
:param network_id: globally unique neutron network identifier
:param tenant_id: globally unique neutron tenant identifier
:param port_name: Name of the port - for display purposes
"""
cmds = ['tenant %s' % tenant_id,
'vm id %s hostid %s' % (vm_id, host)]
if port_name:
cmds.append('port id %s name "%s" network-id %s' %
(port_id, port_name, network_id))
else:
cmds.append('port id %s network-id %s' %
(port_id, network_id))
cmds.append('exit')
cmds.append('exit')
self._run_openstack_cmds(cmds)
def plug_dhcp_port_into_network(self, dhcp_id, host, port_id,
network_id, tenant_id, port_name):
"""Creates VLAN between TOR and dhcp host.
:param dhcp_id: globally unique identifier for dhcp
:param host: ID of the host where the dhcp is hosted
:param port_id: globally unique port ID that connects dhcp to network
:param network_id: globally unique neutron network identifier
:param tenant_id: globally unique neutron tenant identifier
:param port_name: Name of the port - for display purposes
"""
cmds = ['tenant %s' % tenant_id,
'network id %s' % network_id]
if port_name:
cmds.append('dhcp id %s hostid %s port-id %s name "%s"' %
(dhcp_id, host, port_id, port_name))
else:
cmds.append('dhcp id %s hostid %s port-id %s' %
(dhcp_id, host, port_id))
cmds.append('exit')
self._run_openstack_cmds(cmds)
def unplug_host_from_network(self, vm_id, host, port_id,
network_id, tenant_id):
"""Removes previously configured VLAN between TOR and a host.
:param vm_id: globally unique identifier for VM instance
:param host: ID of the host where the VM is placed
:param port_id: globally unique port ID that connects VM to network
:param network_id: globally unique neutron network identifier
:param tenant_id: globally unique neutron tenant identifier
"""
cmds = ['tenant %s' % tenant_id,
'vm id %s hostid %s' % (vm_id, host),
'no port id %s' % port_id,
'exit',
'exit']
self._run_openstack_cmds(cmds)
def unplug_dhcp_port_from_network(self, dhcp_id, host, port_id,
network_id, tenant_id):
"""Removes previously configured VLAN between TOR and a dhcp host.
:param dhcp_id: globally unique identifier for dhcp
:param host: ID of the host where the dhcp is hosted
:param port_id: globally unique port ID that connects dhcp to network
:param network_id: globally unique neutron network identifier
:param tenant_id: globally unique neutron tenant identifier
"""
cmds = ['tenant %s' % tenant_id,
'network id %s' % network_id,
'no dhcp id %s port-id %s' % (dhcp_id, port_id),
'exit']
self._run_openstack_cmds(cmds)
def create_network(self, tenant_id, network):
"""Creates a single network on Arista hardware
:param tenant_id: globally unique neutron tenant identifier
:param network: dict containing network_id, network_name and
segmentation_id
"""
self.create_network_bulk(tenant_id, [network])
def create_network_bulk(self, tenant_id, network_list):
"""Creates a network on Arista Hardware
:param tenant_id: globally unique neutron tenant identifier
:param network_list: list of dicts containing network_id, network_name
and segmentation_id
"""
cmds = ['tenant %s' % tenant_id]
# Create a reference to function to avoid name lookups in the loop
append_cmd = cmds.append
for network in network_list:
try:
append_cmd('network id %s name "%s"' %
(network['network_id'], network['network_name']))
except KeyError:
append_cmd('network id %s' % network['network_id'])
# Enter segment mode without exiting out of network mode
if not network['segmentation_id']:
network['segmentation_id'] = DEFAULT_VLAN
append_cmd('segment 1 type vlan id %d' %
network['segmentation_id'])
cmds.extend(self._get_exit_mode_cmds(['segment', 'network', 'tenant']))
self._run_openstack_cmds(cmds)
def create_network_segments(self, tenant_id, network_id,
network_name, segments):
"""Creates a network on Arista Hardware
Note: This method is not used at the moment. create_network()
is used instead. This will be used once the support for
multiple segments is added in Neutron.
:param tenant_id: globally unique neutron tenant identifier
:param network_id: globally unique neutron network identifier
:param network_name: Network name - for display purposes
:param segments: List of segments in a given network
"""
if segments:
cmds = ['tenant %s' % tenant_id,
'network id %s name "%s"' % (network_id, network_name)]
seg_num = 1
for seg in segments:
cmds.append('segment %d type %s id %d' % (seg_num,
seg['network_type'], seg['segmentation_id']))
seg_num += 1
cmds.append('exit') # exit for segment mode
cmds.append('exit') # exit for network mode
cmds.append('exit') # exit for tenant mode
self._run_openstack_cmds(cmds)
def delete_network(self, tenant_id, network_id):
"""Deletes a specified network for a given tenant
:param tenant_id: globally unique neutron tenant identifier
:param network_id: globally unique neutron network identifier
"""
self.delete_network_bulk(tenant_id, [network_id])
def delete_network_bulk(self, tenant_id, network_id_list):
"""Deletes the network ids specified for a tenant
:param tenant_id: globally unique neutron tenant identifier
:param network_id_list: list of globally unique neutron network
identifiers
"""
cmds = ['tenant %s' % tenant_id]
for network_id in network_id_list:
cmds.append('no network id %s' % network_id)
cmds.extend(self._get_exit_mode_cmds(['network', 'tenant']))
self._run_openstack_cmds(cmds)
def delete_vm(self, tenant_id, vm_id):
"""Deletes a VM from EOS for a given tenant
:param tenant_id : globally unique neutron tenant identifier
:param vm_id : id of a VM that needs to be deleted.
"""
self.delete_vm_bulk(tenant_id, [vm_id])
def delete_vm_bulk(self, tenant_id, vm_id_list):
"""Deletes VMs from EOS for a given tenant
:param tenant_id : globally unique neutron tenant identifier
:param vm_id_list : ids of VMs that needs to be deleted.
"""
cmds = ['tenant %s' % tenant_id]
for vm_id in vm_id_list:
cmds.append('no vm id %s' % vm_id)
cmds.extend(self._get_exit_mode_cmds(['vm', 'tenant']))
self._run_openstack_cmds(cmds)
def create_vm_port_bulk(self, tenant_id, vm_port_list, vms):
"""Sends a bulk request to create ports.
:param tenant_id: globaly unique neutron tenant identifier
:param vm_port_list: list of ports that need to be created.
:param vms: list of vms to which the ports will be attached to.
"""
cmds = ['tenant %s' % tenant_id]
# Create a reference to function to avoid name lookups in the loop
append_cmd = cmds.append
for port in vm_port_list:
try:
vm = vms[port['device_id']]
except KeyError:
msg = _("VM id %(vmid)s not found for port %(portid)s") % {
'vmid': port['device_id'],
'portid': port['id']}
LOG.warn(msg)
continue
port_name = '' if 'name' not in port else 'name "%s"' % (
port['name']
)
if port['device_owner'] == n_const.DEVICE_OWNER_DHCP:
append_cmd('network id %s' % port['network_id'])
append_cmd('dhcp id %s hostid %s port-id %s %s' %
(vm['vmId'], vm['host'], port['id'], port_name))
elif port['device_owner'].startswith('compute'):
append_cmd('vm id %s hostid %s' % (vm['vmId'], vm['host']))
append_cmd('port id %s %s network-id %s' %
(port['id'], port_name, port['network_id']))
else:
msg = _("Unknown device owner: %s") % port['device_owner']
LOG.warn(msg)
continue
append_cmd('exit')
self._run_openstack_cmds(cmds)
def delete_tenant(self, tenant_id):
"""Deletes a given tenant and all its networks and VMs from EOS.
:param tenant_id: globally unique neutron tenant identifier
"""
self.delete_tenant_bulk([tenant_id])
def delete_tenant_bulk(self, tenant_list):
"""Sends a bulk request to delete the tenants.
:param tenant_list: list of globaly unique neutron tenant ids which
need to be deleted.
"""
cmds = []
for tenant in tenant_list:
cmds.append('no tenant %s' % tenant)
cmds.append('exit')
self._run_openstack_cmds(cmds)
def delete_this_region(self):
"""Deleted the region data from EOS."""
cmds = ['enable',
'configure',
'cvx',
'service openstack',
'no region %s' % self.region,
'exit',
'exit',
'exit']
self._run_eos_cmds(cmds)
def register_with_eos(self):
"""This is the registration request with EOS.
This the initial handshake between Neutron and EOS.
critical end-point information is registered with EOS.
"""
cmds = ['auth url %s user %s password %s tenant %s' % (
self._keystone_url(),
self.keystone_conf.admin_user,
self.keystone_conf.admin_password,
self.keystone_conf.admin_tenant_name)]
log_cmds = ['auth url %s user %s password %s tenant %s' % (
self._keystone_url(),
self.keystone_conf.admin_user,
'******',
self.keystone_conf.admin_tenant_name)]
sync_interval_cmd = 'sync interval %d' % self.sync_interval
cmds.append(sync_interval_cmd)
log_cmds.append(sync_interval_cmd)
self._run_openstack_cmds(cmds, commands_to_log=log_cmds)
def clear_region_updated_time(self):
# TODO(shashank): Remove this once the call is removed from the ML2
# driver.
pass
def get_region_updated_time(self):
"""Return the timestamp of the last update.
This method returns the time at which any entities in the region
were updated.
"""
timestamp_cmd = self.cli_commands['timestamp']
if timestamp_cmd:
return self._run_eos_cmds(commands=timestamp_cmd)[0]
return None
def _run_eos_cmds(self, commands, commands_to_log=None):
"""Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param commands_to_log : This should be set to the command that is
logged. If it is None, then the commands
param is logged.
"""
log_cmds = commands
if commands_to_log:
log_cmds = commands_to_log
LOG.info(_('Executing command on Arista EOS: %s'), log_cmds)
try:
# this returns array of return values for every command in
# full_command list
ret = self._server.runCmds(version=1, cmds=commands)
except Exception as error:
host = cfg.CONF.ml2_arista.eapi_host
error_msg_str = unicode(error)
if commands_to_log:
# The command might contain sensitive information. If the
# command to log is different from the actual command, use
# that in the error message.
for cmd, log_cmd in itertools.izip(commands, log_cmds):
error_msg_str = error_msg_str.replace(cmd, log_cmd)
msg = (_('Error %(err)s while trying to execute '
'commands %(cmd)s on EOS %(host)s') %
{'err': error_msg_str,
'cmd': commands_to_log,
'host': host})
# Logging exception here can reveal passwords as the exception
# contains the CLI command which contains the credentials.
LOG.error(msg)
raise arista_exc.AristaRpcError(msg=msg)
return ret
def _build_command(self, cmds):
"""Build full EOS's openstack CLI command.
Helper method to add commands to enter and exit from openstack
CLI modes.
:param cmds: The openstack CLI commands that need to be executed
in the openstack config mode.
"""
full_command = [
'enable',
'configure',
'cvx',
'service openstack',
'region %s' % self.region,
]
full_command.extend(cmds)
full_command.extend(self._get_exit_mode_cmds(['region',
'openstack',
'cvx']))
return full_command
def _run_openstack_cmds(self, commands, commands_to_log=None):
"""Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param commands_to_logs : This should be set to the command that is
logged. If it is None, then the commands
param is logged.
"""
full_command = self._build_command(commands)
if commands_to_log:
full_log_command = self._build_command(commands_to_log)
else:
full_log_command = None
self._run_eos_cmds(full_command, full_log_command)
def _eapi_host_url(self):
self._validate_config()
user = cfg.CONF.ml2_arista.eapi_username
pwd = cfg.CONF.ml2_arista.eapi_password
host = cfg.CONF.ml2_arista.eapi_host
eapi_server_url = ('https://%s:%s@%s/command-api' %
(user, pwd, host))
return eapi_server_url
def _validate_config(self):
if cfg.CONF.ml2_arista.get('eapi_host') == '':
msg = _('Required option eapi_host is not set')
LOG.error(msg)
raise arista_exc.AristaConfigError(msg=msg)
if cfg.CONF.ml2_arista.get('eapi_username') == '':
msg = _('Required option eapi_username is not set')
LOG.error(msg)
raise arista_exc.AristaConfigError(msg=msg)
class SyncService(object):
"""Synchronization of information between Neutron and EOS
Periodically (through configuration option), this service
ensures that Networks and VMs configured on EOS/Arista HW
are always in sync with Neutron DB.
"""
def __init__(self, rpc_wrapper, neutron_db):
self._rpc = rpc_wrapper
self._ndb = neutron_db
self._force_sync = True
self._region_updated_time = None
def force_sync(self):
"""Sets the force_sync flag."""
self._force_sync = True
def do_synchronize(self):
"""Periodically check whether EOS is in sync with ML2 driver.
If ML2 database is not in sync with EOS, then compute the diff and
send it down to EOS.
"""
if not self._sync_required():
return
# Send 'sync start' marker.
if not self._sync_start():
return
# Perform the actual synchronization.
self.synchronize()
# Send 'sync end' marker.
if not self._sync_end():
return
self._set_region_updated_time()
def synchronize(self):
"""Sends data to EOS which differs from neutron DB."""
LOG.info(_('Syncing Neutron <-> EOS'))
try:
#Always register with EOS to ensure that it has correct credentials
self._rpc.register_with_eos()
eos_tenants = self._rpc.get_tenants()
except arista_exc.AristaRpcError:
LOG.warning(EOS_UNREACHABLE_MSG)
self._force_sync = True
return
db_tenants = db.get_tenants()
# Delete tenants that are in EOS, but not in the database
tenants_to_delete = frozenset(eos_tenants.keys()).difference(
db_tenants.keys())
if tenants_to_delete:
try:
self._rpc.delete_tenant_bulk(tenants_to_delete)
except arista_exc.AristaRpcError:
LOG.warning(EOS_UNREACHABLE_MSG)
self._force_sync = True
return
# None of the commands have failed till now. But if subsequent
# operations fail, then force_sync is set to true
self._force_sync = False
vms_to_update = {}
for tenant in db_tenants:
db_nets = db.get_networks(tenant)
db_vms = db.get_vms(tenant)
eos_nets = self._get_eos_networks(eos_tenants, tenant)
eos_vms = self._get_eos_vms(eos_tenants, tenant)
db_nets_key_set = frozenset(db_nets.keys())
db_vms_key_set = frozenset(db_vms.keys())
eos_nets_key_set = frozenset(eos_nets.keys())
eos_vms_key_set = frozenset(eos_vms.keys())
# Find the networks that are present on EOS, but not in Neutron DB
nets_to_delete = eos_nets_key_set.difference(db_nets_key_set)
# Find the VMs that are present on EOS, but not in Neutron DB
vms_to_delete = eos_vms_key_set.difference(db_vms_key_set)
# Find the Networks that are present in Neutron DB, but not on EOS
nets_to_update = db_nets_key_set.difference(eos_nets_key_set)
# Find the VMs that are present in Neutron DB, but not on EOS
vms_to_update[tenant] = db_vms_key_set.difference(eos_vms_key_set)
try:
if vms_to_delete:
self._rpc.delete_vm_bulk(tenant, vms_to_delete)
if nets_to_delete:
self._rpc.delete_network_bulk(tenant, nets_to_delete)
if nets_to_update:
# Create a dict of networks keyed by id.
neutron_nets = dict(
(network['id'], network) for network in
self._ndb.get_all_networks_for_tenant(tenant)
)
networks = [{
'network_id': net_id,
'segmentation_id':
db_nets[net_id]['segmentationTypeId'],
'network_name':
neutron_nets.get(net_id, {'name': ''})['name'],
}
for net_id in nets_to_update
]
self._rpc.create_network_bulk(tenant, networks)
except arista_exc.AristaRpcError:
LOG.warning(EOS_UNREACHABLE_MSG)
self._force_sync = True
# Now update the VMs
for tenant in vms_to_update:
if not vms_to_update[tenant]:
continue
try:
# Filter the ports to only the vms that we are interested
# in.
vm_ports = [
port for port in self._ndb.get_all_ports_for_tenant(
tenant) if port['device_id'] in vms_to_update[tenant]
]
if vm_ports:
db_vms = db.get_vms(tenant)
self._rpc.create_vm_port_bulk(tenant, vm_ports, db_vms)
except arista_exc.AristaRpcError:
LOG.warning(EOS_UNREACHABLE_MSG)
self._force_sync = True
def _sync_start(self):
"""Let EOS know that a sync in being initiated."""
try:
self._rpc._run_openstack_cmds(['sync start'])
return True
except arista_exc.AristaRpcError:
self._force_sync = True
return False
def _sync_end(self):
"""Let EOS know that sync is complete."""
try:
self._rpc._run_openstack_cmds(['sync end'])
return True
except arista_exc.AristaRpcError:
self._force_sync = True
return False
def _region_in_sync(self):
"""Checks if the region is in sync with EOS.
Checks whether the timestamp stored in EOS is the same as the
timestamp stored locally.
"""
eos_region_updated_times = self._rpc.get_region_updated_time()
return (self._region_updated_time and
(self._region_updated_time['regionTimestamp'] ==
eos_region_updated_times['regionTimestamp']))
def _sync_required(self):
""""Check whether the sync is required."""
try:
# Get the time at which entities in the region were updated.
# If the times match, then ML2 is in sync with EOS. Otherwise
# perform a complete sync.
if not self._force_sync and self._region_in_sync():
LOG.info(_('OpenStack and EOS are in sync!'))
self._sync_end()
return False
except arista_exc.AristaRpcError:
LOG.warning(EOS_UNREACHABLE_MSG)
# Force an update incase of an error.
self._force_sync = True
return True
def _set_region_updated_time(self):
"""Get the region updated time from EOS and store it locally."""
try:
self._region_updated_time = self._rpc.get_region_updated_time()
except arista_exc.AristaRpcError:
# Force an update incase of an error.
self._force_sync = True
def _get_eos_networks(self, eos_tenants, tenant):
networks = {}
if eos_tenants and tenant in eos_tenants:
networks = eos_tenants[tenant]['tenantNetworks']
return networks
def _get_eos_vms(self, eos_tenants, tenant):
vms = {}
if eos_tenants and tenant in eos_tenants:
vms = eos_tenants[tenant]['tenantVmInstances']
return vms
class AristaDriver(driver_api.MechanismDriver):
"""Ml2 Mechanism driver for Arista networking hardware.
Remembers all networks and VMs that are provisioned on Arista Hardware.
Does not send network provisioning request if the network has already been
provisioned before for the given port.
"""
def __init__(self, rpc=None):
self.rpc = rpc or AristaRPCWrapper()
self.db_nets = db.AristaProvisionedNets()
self.db_vms = db.AristaProvisionedVms()
self.db_tenants = db.AristaProvisionedTenants()
self.ndb = db.NeutronNets()
confg = cfg.CONF.ml2_arista
self.segmentation_type = db.VLAN_SEGMENTATION
self.timer = None
self.eos = SyncService(self.rpc, self.ndb)
self.sync_timeout = confg['sync_interval']
self.eos_sync_lock = threading.Lock()
def initialize(self):
self.rpc.register_with_eos()
self._cleanup_db()
self.rpc.check_cli_commands()
# Registering with EOS updates self.rpc.region_updated_time. Clear it
# to force an initial sync
self.rpc.clear_region_updated_time()
self._synchronization_thread()
def create_network_precommit(self, context):
"""Remember the tenant, and network information."""
network = context.current
segments = context.network_segments
if segments[0][driver_api.NETWORK_TYPE] != p_const.TYPE_VLAN:
# If network type is not VLAN, do nothing
return
network_id = network['id']
tenant_id = network['tenant_id']
if not tenant_id:
tenant_id = context._plugin_context.tenant_id
segmentation_id = segments[0]['segmentation_id']
with self.eos_sync_lock:
db.remember_tenant(tenant_id)
db.remember_network(tenant_id,
network_id,
segmentation_id)
def create_network_postcommit(self, context):
"""Provision the network on the Arista Hardware."""
network = context.current
network_id = network['id']
network_name = network['name']
tenant_id = network['tenant_id']
if not tenant_id:
tenant_id = context._plugin_context.tenant_id
segments = context.network_segments
vlan_id = segments[0]['segmentation_id']
with self.eos_sync_lock:
if db.is_network_provisioned(tenant_id, network_id):
try:
network_dict = {
'network_id': network_id,
'segmentation_id': vlan_id,
'network_name': network_name,
}
self.rpc.create_network(tenant_id, network_dict)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
raise ml2_exc.MechanismDriverError()
else:
msg = _('Network %s is not created as it is not found in'
'Arista DB') % network_id
LOG.info(msg)
def update_network_precommit(self, context):
"""At the moment we only support network name change
Any other change in network is not supported at this time.
We do not store the network names, therefore, no DB store
action is performed here.
"""
new_network = context.current
orig_network = context.original
if new_network['name'] != orig_network['name']:
msg = _('Network name changed to %s') % new_network['name']
LOG.info(msg)
def update_network_postcommit(self, context):
"""At the moment we only support network name change
If network name is changed, a new network create request is
sent to the Arista Hardware.
"""
new_network = context.current
orig_network = context.original
if(new_network['name'] != orig_network['name']):
network_id = new_network['id']
network_name = new_network['name']
tenant_id = new_network['tenant_id']
if not tenant_id:
tenant_id = context._plugin_context.tenant_id
vlan_id = new_network['provider:segmentation_id']
with self.eos_sync_lock:
if db.is_network_provisioned(tenant_id, network_id):
try:
network_dict = {
'network_id': network_id,
'segmentation_id': vlan_id,
'network_name': network_name,
}
self.rpc.create_network(tenant_id, network_dict)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
raise ml2_exc.MechanismDriverError()
else:
msg = _('Network %s is not updated as it is not found in'
'Arista DB') % network_id
LOG.info(msg)
def delete_network_precommit(self, context):
"""Delete the network infromation from the DB."""
network = context.current
network_id = network['id']
tenant_id = network['tenant_id']
with self.eos_sync_lock:
if db.is_network_provisioned(tenant_id, network_id):
db.forget_network(tenant_id, network_id)
# if necessary, delete tenant as well.
self.delete_tenant(tenant_id)
def delete_network_postcommit(self, context):
"""Send network delete request to Arista HW."""
network = context.current
segments = context.network_segments
if segments[0][driver_api.NETWORK_TYPE] != p_const.TYPE_VLAN:
# If networtk type is not VLAN, do nothing
return
network_id = network['id']
tenant_id = network['tenant_id']
if not tenant_id:
tenant_id = context._plugin_context.tenant_id
with self.eos_sync_lock:
# Succeed deleting network in case EOS is not accessible.
# EOS state will be updated by sync thread once EOS gets
# alive.
try:
self.rpc.delete_network(tenant_id, network_id)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
raise ml2_exc.MechanismDriverError()
def create_port_precommit(self, context):
"""Remember the infromation about a VM and its ports
A VM information, along with the physical host information
is saved.
"""
port = context.current
device_id = port['device_id']
device_owner = port['device_owner']
host = context.host
# device_id and device_owner are set on VM boot
is_vm_boot = device_id and device_owner
if host and is_vm_boot:
port_id = port['id']
network_id = port['network_id']
tenant_id = port['tenant_id']
if not tenant_id:
tenant_id = context._plugin_context.tenant_id
with self.eos_sync_lock:
if not db.is_network_provisioned(tenant_id, network_id):
# Ignore this request if network is not provisioned
return
db.remember_tenant(tenant_id)
db.remember_vm(device_id, host, port_id,
network_id, tenant_id)
def create_port_postcommit(self, context):
"""Plug a physical host into a network.
Send provisioning request to Arista Hardware to plug a host
into appropriate network.
"""
port = context.current
device_id = port['device_id']
device_owner = port['device_owner']
host = context.host
# device_id and device_owner are set on VM boot
is_vm_boot = device_id and device_owner
if host and is_vm_boot:
port_id = port['id']
port_name = port['name']
network_id = port['network_id']
tenant_id = port['tenant_id']
if not tenant_id:
tenant_id = context._plugin_context.tenant_id
with self.eos_sync_lock:
hostname = self._host_name(host)
vm_provisioned = db.is_vm_provisioned(device_id,
host,
port_id,
network_id,
tenant_id)
net_provisioned = db.is_network_provisioned(tenant_id,
network_id)
if vm_provisioned and net_provisioned:
try:
self.rpc.plug_port_into_network(device_id,
hostname,
port_id,
network_id,
tenant_id,
port_name,
device_owner)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
raise ml2_exc.MechanismDriverError()
else:
msg = _('VM %s is not created as it is not found in '
'Arista DB') % device_id
LOG.info(msg)
def update_port_precommit(self, context):
"""Update the name of a given port.
At the moment we only support port name change.
Any other change to port is not supported at this time.
We do not store the port names, therefore, no DB store
action is performed here.
"""
new_port = context.current
orig_port = context.original
if new_port['name'] != orig_port['name']:
msg = _('Port name changed to %s') % new_port['name']
LOG.info(msg)
device_id = new_port['device_id']
device_owner = new_port['device_owner']
host = context.host
# device_id and device_owner are set on VM boot
is_vm_boot = device_id and device_owner
if host and host != orig_port['binding:host_id'] and is_vm_boot:
port_id = new_port['id']
network_id = new_port['network_id']
tenant_id = new_port['tenant_id']
if not tenant_id:
tenant_id = context._plugin_context.tenant_id
with self.eos_sync_lock:
db.update_vm_host(device_id, host, port_id,
network_id, tenant_id)
def update_port_postcommit(self, context):
"""Update the name of a given port in EOS.
At the moment we only support port name change
Any other change to port is not supported at this time.
"""
port = context.current
orig_port = context.original
device_id = port['device_id']
device_owner = port['device_owner']
host = context.host
is_vm_boot = device_id and device_owner
if host and is_vm_boot:
port_id = port['id']
port_name = port['name']
network_id = port['network_id']
tenant_id = port['tenant_id']
if not tenant_id:
tenant_id = context._plugin_context.tenant_id
with self.eos_sync_lock:
hostname = self._host_name(host)
segmentation_id = db.get_segmentation_id(tenant_id,
network_id)
vm_provisioned = db.is_vm_provisioned(device_id,
host,
port_id,
network_id,
tenant_id)
net_provisioned = db.is_network_provisioned(tenant_id,
network_id,
segmentation_id)
if vm_provisioned and net_provisioned:
try:
orig_host = orig_port['binding:host_id']
if host != orig_host:
# The port moved to a different host. So delete the
# old port on the old host before creating a new
# port on the new host.
self._delete_port(port, orig_host, tenant_id)
self.rpc.plug_port_into_network(device_id,
hostname,
port_id,
network_id,
tenant_id,
port_name,
device_owner)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
raise ml2_exc.MechanismDriverError()
else:
msg = _('VM %s is not updated as it is not found in '
'Arista DB') % device_id
LOG.info(msg)
def delete_port_precommit(self, context):
"""Delete information about a VM and host from the DB."""
port = context.current
host_id = context.host
device_id = port['device_id']
tenant_id = port['tenant_id']
if not tenant_id:
tenant_id = context._plugin_context.tenant_id
network_id = port['network_id']
port_id = port['id']
with self.eos_sync_lock:
if db.is_vm_provisioned(device_id, host_id, port_id,
network_id, tenant_id):
db.forget_vm(device_id, host_id, port_id,
network_id, tenant_id)
# if necessary, delete tenant as well.
self.delete_tenant(tenant_id)
def delete_port_postcommit(self, context):
"""unPlug a physical host from a network.
Send provisioning request to Arista Hardware to unplug a host
from appropriate network.
"""
port = context.current
host = context.host
tenant_id = port['tenant_id']
if not tenant_id:
tenant_id = context._plugin_context.tenant_id
with self.eos_sync_lock:
self._delete_port(port, host, tenant_id)
def _delete_port(self, port, host, tenant_id):
"""Deletes the port from EOS.
param port: Port which is to be deleted
param host: The host on which the port existed
param tenant_id: The tenant to which the port belongs to. Some times
the tenant id in the port dict is not present (as in
the case of HA router).
"""
device_id = port['device_id']
port_id = port['id']
network_id = port['network_id']
device_owner = port['device_owner']
try:
if not db.is_network_provisioned(tenant_id, network_id):
# If we do not have network associated with this, ignore it
return
hostname = self._host_name(host)
if device_owner == n_const.DEVICE_OWNER_DHCP:
self.rpc.unplug_dhcp_port_from_network(device_id,
hostname,
port_id,
network_id,
tenant_id)
else:
self.rpc.unplug_host_from_network(device_id,
hostname,
port_id,
network_id,
tenant_id)
# if necessary, delete tenant as well.
self.delete_tenant(tenant_id)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
raise ml2_exc.MechanismDriverError()
def delete_tenant(self, tenant_id):
"""delete a tenant from DB.
A tenant is deleted only if there is no network or VM configured
configured for this tenant.
"""
objects_for_tenant = (db.num_nets_provisioned(tenant_id) +
db.num_vms_provisioned(tenant_id))
if not objects_for_tenant:
db.forget_tenant(tenant_id)
def _host_name(self, hostname):
fqdns_used = cfg.CONF.ml2_arista['use_fqdn']
return hostname if fqdns_used else hostname.split('.')[0]
def _synchronization_thread(self):
with self.eos_sync_lock:
self.eos.do_synchronize()
self.timer = threading.Timer(self.sync_timeout,
self._synchronization_thread)
self.timer.start()
def stop_synchronization_thread(self):
if self.timer:
self.timer.cancel()
self.timer = None
def _cleanup_db(self):
"""Clean up any uncessary entries in our DB."""
db_tenants = db.get_tenants()
for tenant in db_tenants:
neutron_nets = self.ndb.get_all_networks_for_tenant(tenant)
neutron_nets_id = []
for net in neutron_nets:
neutron_nets_id.append(net['id'])
db_nets = db.get_networks(tenant)
for net_id in db_nets.keys():
if net_id not in neutron_nets_id:
db.forget_network(tenant, net_id)
|
|
"""Mountain-car problem simulation library for the reinforcement learning
miniproject.
"""
import pylab as plb
import numpy as np
class MountainCar():
"""A mountain-car problem.
For the miniproject, you are not meant to change the default parameters
(mass of the car, etc.)
Usage:
>>> mc = MountainCar()
Set the agent to apply a rightward force (positive in x)
>>> mc.apply_force(+1) # the actual value doesn't mattter only the
sign
Run an "agent time step" of 1s with 0.01 s integration time step
>>> mc.simulate_timesteps(n = 100, dt = 0.01)
Check the state variables of the agent, and the reward
>>> print mc.x, mc.x_d, mc.R
At some point, one might want to reset the position/speed of the car
>>> mc.reset()
"""
def __init__(self, g=10.0, d=100.0, H=10., m=10.0,
force_amplitude=3.0, reward_amplitude=1.,
reward_threshold=0.0):
# set internal parameters from constructor call
self.g = g # gravitational constant
self.d = d # minima location
self.H = H # height of the saddle point
self.m = m # mass of the car
self.force_amplitude = force_amplitude # amplitude of the force
# applied by the engine
self.reward_amplitude = reward_amplitude # value of the reward
self.reward_threshold = reward_threshold # x-axis threshold for the
# obtention of reward
# reset the car variables
self.reset()
def reset(self):
"""Reset the mountain car to a random initial position.
"""
# set position to range [-130; -50]
self.x = 80 * np.random.rand() - 130.0
# self.x = -60.0
# print self.x
# set x_dot to range [-5; 5]
self.x_d = 10.0 * np.random.rand() - 5.0
# reset reward
self.R = 0.0
# reset time
self.t = 0.0
# reset applied force
self.F = 0.0
def apply_force(self, direction):
"""Apply a force to the car.
Only three values of force are possible:
right (if direction > 0),
left (direction < 0) or
no force (direction = 0).\
"""
self.F = np.sign(direction) * self.force_amplitude
def _h(self, x):
"""Return the value of the landscape function h in x.
"""
return (x - self.d) ** 2 * (x + self.d) ** 2 / (
(self.d ** 4 / self.H) + x ** 2)
def _h_prime(self, x):
"""Return the value of the first derivative of the landscape
function h in x.
"""
c = self.d ** 4 / self.H
return 2 * x * (x ** 2 - self.d ** 2) * (
2 * c + self.d ** 2 + x ** 2) / (c + x ** 2) ** 2
def _h_second(self, x):
"""Return the value of the second derivative of the landscape
function h in x.
"""
c = self.d ** 4 / self.H
return 2 * (
- 2 * c ** 2 * (self.d ** 2 - 3 * x ** 2)
+ c * (-self.d ** 4 + 6 * self.d ** 2 * x ** 2 + 3 * x ** 4)
+ 3 * self.d ** 4 * x ** 2
+ x ** 6
) / (c + x ** 2) ** 3
def _energy(self, x, x_d):
"""Return the total energy of the car with variable x and x_d.
"""
# note that v and x dot are not the same: v includes the y direction!
return self.m * (
self.g * self._h(x) + 0.5 * (1 + self._h_prime(x) ** 2) * x_d ** 2)
def simulate_timesteps(self, n=1, dt=0.1):
"""Simulate the car dynamics for n timesteps of length dt.
"""
for i in range(n):
self._simulate_single_timestep(dt)
self.t += n * dt
# check for rewards
self.R = self._get_reward()
def _simulate_single_timestep(self, dt):
"""Simulate the car dynamics for a single timestep.
"""
# calculate the second derivative of x (horiz. acceleration)
alpha = np.arctan(self._h_prime(self.x))
x_dd = np.cos(alpha) * (self.F / self.m - np.sin(alpha) * (
self.g + self._h_second(self.x) * self.x_d ** 2))
# update the position and velocity on the x axis
self.x += self.x_d * dt + 0.5 * x_dd * dt ** 2
self.x_d += x_dd * dt
def _get_reward(self):
"""Check for and return reward.
"""
# if there's already a reward, we stick to it
if self.R > 0.0:
return self.R
# have we crossed the threshold?
if self.x >= self.reward_threshold:
return self.reward_amplitude
# else no reward
return 0.0
def get_positions(self):
return self.x, self.x_d
class MountainCarViewer():
"""Display the state of a MountainCar instance.
Usage:
>>> mc = MountainCar()
>>> mv = MoutainCarViewer(mc)
Turn matplotlib's "interactive mode" on and create figure
>>> plb.ion()
>>> mv.create_figure(n_steps = 200, max_time = 200)
This forces matplotlib to draw the fig. before the end of execution
>>> plb.draw()
Simulate the MountainCar, visualizing the state
>>> for n in range(200):
>>> mc.simulate_timesteps(100,0.01)
>>> mv.update_figure()
>>> plb.draw()
"""
def __init__(self, mountain_car):
assert isinstance(mountain_car, MountainCar), \
'Argument to MoutainCarViewer() must be a MountainCar instance'
self.mountain_car = mountain_car
def create_figure(self, n_steps, max_time, f=None):
"""Create a figure showing the progression of the car.
Call update_car_state susequently to update this figure.
Parameters:
-----------
n_steps -- number of times update_car_state will be called.
max_time -- the time the trial will last (to scale the plots).
f -- (optional) figure in which to create the plots.
"""
if f is None:
self.f = plb.figure()
else:
self.f = f
# create the to store the arrays
self.times = np.zeros(n_steps + 1)
self.positions = np.zeros((n_steps + 1, 2))
self.forces = np.zeros(n_steps + 1)
self.energies = np.zeros(n_steps + 1)
# Fill the initial values
self.i = 0
self._get_values()
# create the energy landscape plot
self.ax_position = plb.subplot(2, 1, 1)
self._plot_energy_landscape(self.ax_position)
self.h_position = self._plot_positions()
# create the force plot
self.ax_forces = plb.subplot(2, 2, 3)
self.h_forces = self._plot_forces()
plb.axis(xmin=0, xmax=max_time,
ymin=-1.1 * self.mountain_car.force_amplitude,
ymax=1.1 * self.mountain_car.force_amplitude)
# create the energy plot
self.ax_energies = plb.subplot(2, 2, 4)
self.h_energies = self._plot_energy()
plb.axis(xmin=0, xmax=max_time,
ymin=0.0, ymax=1000.)
def update_figure(self):
"""Update the figure.
Assumes the figure has already been created with create_figure.
"""
# increment
self.i += 1
assert self.i < len(self.forces), \
"update_figure was called too many times."
# get the new values from the car
self._get_values()
# update the plots
self._plot_positions(self.h_position)
self._plot_forces(self.h_forces)
self._plot_energy(self.h_energies)
def _get_values(self):
"""Retrieve the relevant car variables for the figure.
"""
self.times[self.i] = self.mountain_car.t
self.positions[self.i, 0] = self.mountain_car.x
self.positions[self.i, 1] = self.mountain_car.x_d
self.forces[self.i] = self.mountain_car.F
self.energies[self.i] = self.mountain_car._energy(
self.mountain_car.x, self.mountain_car.x_d)
def _plot_energy_landscape(self, ax=None):
"""plot the energy landscape for the mountain car in 2D.
Returns the axes instance created. Use plot_energy_landscape to let
the module decide whether you have the right modules for 3D plotting.
"""
# create coordinates for a grid in the x-x_dot space
X = np.linspace(-160, 160, 61)
XD = np.linspace(-20, 20, 51)
X, XD = np.meshgrid(X, XD)
# calculate the energy in each point of the grid
E = self.mountain_car._energy(X, XD)
# display the energy as an image
if ax is None:
f = plb.figure()
ax = plb.axes()
C = ax.contourf(X, XD, E, 100)
ax.set_xlabel('$x$')
ax.set_ylabel('$\dot x$')
cbar = plb.colorbar(C)
cbar.set_label('$E$')
return ax
def _plot_positions(self, handles=None):
"""plot the position and trajectory of the car in state space.
"""
# choose the color of the point according to the force direction
color = ['r', 'w', 'g'][1 + int(np.sign(self.mountain_car.F))]
if handles is None:
# create the plots
handles = [] # keep the plot objects in this list
handles.append(plb.plot(
np.atleast_1d(self.positions[:self.i + 1, 0]),
np.atleast_1d(self.positions[:self.i + 1, 1]),
',k'
)[0])
handles.append(plb.plot(
np.atleast_1d(self.positions[self.i, 0]),
np.atleast_1d(self.positions[self.i, 1]),
'o' + color,
markeredgecolor='none',
markersize=9,
)[0])
return tuple(handles)
else:
# update the plots
handles[0].set_xdata(np.atleast_1d(self.positions[:self.i + 1, 0]))
handles[0].set_ydata(np.atleast_1d(self.positions[:self.i + 1, 1]))
handles[1].set_xdata(np.atleast_1d(self.positions[self.i, 0]))
handles[1].set_ydata(np.atleast_1d(self.positions[self.i, 1]))
handles[1].set_color(color)
return handles
def _plot_forces(self, handle=None):
"""plot the force applied by the car vs time.
"""
# create the plots
if handle is None:
handle = plb.plot(
np.atleast_1d(self.times[:self.i + 1]),
np.atleast_1d(self.forces[:self.i + 1]),
',k',
)[0]
plb.xlabel('$t$')
plb.ylabel('$F$')
return handle
else:
# update the plot
handle.set_xdata(np.atleast_1d(self.times[:self.i + 1]))
handle.set_ydata(np.atleast_1d(self.forces[:self.i + 1]))
return handle
def _plot_energy(self, handle=None):
"""plot the energy of the car vs time.
"""
# create the plots
if handle is None:
handle = plb.plot(
np.atleast_1d(self.times[:self.i + 1]),
np.atleast_1d(self.energies[:self.i + 1]),
'k',
linewidth=0.5
)[0]
plb.xlabel('$t$')
plb.ylabel('$E$')
return handle
else:
# update the plot
handle.set_xdata(np.atleast_1d(self.times[:self.i + 1]))
handle.set_ydata(np.atleast_1d(self.energies[:self.i + 1]))
return handle
|
|
# -*- coding: utf-8 -*-
import os
from abc import abstractmethod
import numpy as np
from joblib import delayed, Parallel
from .errors import TileExtractionException
from .image import Image, TileBuilder, DefaultTileBuilder, SkipBordersTileTopology, FixedSizeTileTopology
from .information import WorkflowInformation
from .locator import BinaryLocator, SemanticLocator
from .logging import Loggable, SilentLogger
from .merger import SemanticMerger
from .timing import WorkflowTiming
from .util import batch_split, shape_array
__author__ = "Romain Mormont <romainmormont@hotmail.com>"
__version = "0.1"
def _segment_locate(tiles, images, segmenter, locator, timing):
"""Applies segmentation and location to a set of tiles
Parameters
----------
tiles: list (subtype: Tile, size: N)
The tile to process for the segment locate
images: ndarray (dims: [N, height, width[, ]])
Numpy array of images (same order as tiles
segmenter: Segmenter
For segmenting the image
locator: Locator
For converting a mask to polygons
timing: WorkflowTiming
The workflow timing object for measuring the execution times of the various steps
Returns
-------
polygons: iterable (subtype: shapely.geometry.Polygon)
Iterable containing the polygons found by the locate step
"""
with timing.cm(SLDCWorkflow.TIMING_DETECT_SEGMENT):
segmented = segmenter.segment_batch(images)
with timing.cm(SLDCWorkflow.TIMING_DETECT_LOCATE):
located = [locator.locate(segmented[i], offset=tiles[i].offset) for i in range(segmented.shape[0])]
return located
def _batch_segment_locate(tile_ids, tile_topology, segmenter, locator, logger=SilentLogger(), timing_root=None,
batch_size=1):
"""Helper function for parallel execution. Error occurring in this method is notified by returning None in place of
the found polygons list.
Parameters
----------
tile_ids: iterable (subtype: int, size: N)
The identifiers of the tiles to be processed
tile_topology: TileTopology
The tile topology from which were extracted the identifiers to process
segmenter: Segmenter
The segmenter object
locator: Locator
The locator object
batch_size: int
Batch size for segmentation
Returns
-------
timing: WorkflowTiming
The timing of execution for processing of the tile.
tiles_polygons: iterable (subtype: (int, shapely.geometry.Polygon), size: N)
A list containing the tile identifiers and the polygons found inside them
"""
timing = WorkflowTiming(root=timing_root)
tiles_polygons = list()
for start in range(0, len(tile_ids), batch_size):
end = min(len(tile_ids), start + batch_size)
batch_tile_ids = tile_ids[start:end]
# extract tiles
images = list()
kept_tiles = list()
for tile_id in batch_tile_ids:
tile = tile_topology.tile(tile_id)
try:
with timing.cm(SLDCWorkflow.TIMING_DETECT_LOAD):
images.append(tile.np_image)
kept_tiles.append(tile)
except TileExtractionException as e:
logger.w("Workflow: a tile (id:{}) couldn't be fetched computations '{}'".format(tile_id, str(e)))
tiles_polygons.append((tile_id, []))
located = _segment_locate(kept_tiles, np.array(images), segmenter, locator, timing)
tiles_polygons.extend(zip(map(lambda t: t.identifier, kept_tiles), located))
return timing, tiles_polygons
def _dc_with_timing(dispatcher_classifier, image, polygons, timing_root=None):
"""
dispatcher_classifier:
image:
polygons:
Returns
-------
pred: iterable (subtype: int, size: N)
The classes predicted for the passed polygons
proba: iterable (subtype: float, size: N)
The probability estimates for the predicted classes
dispatch: iterable (subtype: hashable, size: N)
The label of the dispatching rules which dispatched the polygons
timing: WorkflowTiming
The timing object containing the execution times of the dispatching and classification steps
"""
return dispatcher_classifier.dispatch_classify_batch(image, polygons, timing_root=timing_root)
def _parallel_segment_locate(pool, segmenter, locator, logger, tile_topology, timing):
"""Execute the segment locate phase
Parameters
----------
pool: Parallel
A pool of processes
segmenter: SemanticSegmenter
A segmenter
locator: Locator
A locator
logger: Logger
A logger
tile_topology: TileTopology
A tile topology
timing: WorkflowTiming
A workflow timing object for computing time
Returns
-------
tiles: iterable (size: n, subtype: int)
Iterable containing the tiles ids
tile_polygons: iterable (size: n, subtype: iterable of Polygon objects))
The iterable at index i contains the polygons and pixel values found in the tile having index tiles[i]
"""
# partition the tiles into batches for submitting them to processes
batches = tile_topology.partition_identifiers(pool.n_jobs)
# execute
results = pool(delayed(_batch_segment_locate)(
tile_ids,
tile_topology,
segmenter,
locator,
logger,
".".join([SLDCWorkflow.TIMING_ROOT, SLDCWorkflow.TIMING_DETECT])
) for tile_ids in batches)
sub_timings, tiles_polygons = list(zip(*results))
tiles = np.array([tid for result in tiles_polygons for tid, _ in result])
tile_polygons = shape_array([polygons for result in tiles_polygons for _, polygons in result])
# merge sub timings
for sub_timing in sub_timings:
timing.merge(sub_timing)
return tiles, tile_polygons
class Workflow(Loggable):
"""Abstract base class to be implemented by workflows"""
BORDER_TILES_SKIP = "skip"
BORDER_TILES_EXTEND = "extend"
BORDER_TILES_KEEP = "keep"
def __init__(self, tile_builder, tile_max_width=1024, tile_max_height=1024, tile_overlap=7, n_jobs=1,
seg_batch_size=1, dist_tolerance=1, border_tiles=BORDER_TILES_KEEP, logger=SilentLogger()):
"""
tile_builder: TileBuilder
An object for building specific tiles
tile_max_width: int (optional, default: 1024)
The maximum width of the tiles when iterating over the image
tile_max_height: int (optional, default: 1024)
The maximum height of the tiles when iterating over the image
tile_overlap: int (optional, default: 5)
The number of pixels of overlap between tiles when iterating over the image
seg_batch_size: int (optional, default: 1)
Batch size for segmentation
dist_tolerance: int (optional, default, 7)
Maximal distance between two polygons so that they are considered from the same object
logger: Logger (optional, default: SilentLogger)
A logger object
n_jobs: int (optional, default: 1)
The number of job available for executing the workflow.
border_tiles: str
"""
super(Workflow, self).__init__(logger=logger)
if seg_batch_size > 1 and border_tiles == self.BORDER_TILES_KEEP:
raise ValueError("When segmentation tile batch size is greater than 1 (here: {}), another border tiles "
"management should be picked.".format(seg_batch_size))
self._tile_max_width = tile_max_width
self._tile_max_height = tile_max_height
self._tile_overlap = tile_overlap
self._tile_builder = tile_builder
self._n_jobs = n_jobs
self._seg_batch_size = seg_batch_size
self._pool = None # cache across execution
self._dist_tolerance = dist_tolerance
self._border_tiles = border_tiles
@property
def border_tiles(self):
return self._border_tiles
@property
def dist_tolerance(self):
return self._dist_tolerance
@property
def batch_segment_enabled(self):
return self.seg_batch_size > 1
@property
def seg_batch_size(self):
return self._seg_batch_size
@property
def tile_max_width(self):
return self._tile_max_width
@property
def tile_max_height(self):
return self._tile_max_height
@property
def tile_overlap(self):
return self._tile_overlap
@property
def tile_builder(self):
return self._tile_builder
@property
def n_jobs(self):
return self._n_jobs
@n_jobs.setter
def n_jobs(self, value):
if value != self._n_jobs:
self._pool = None
self._n_jobs = value
def _set_pool(self):
"""Create a pool with self._n_jobs jobs in the self._pool variable.
If the pool already exists, this method does nothing.
"""
if self._pool is None:
self._pool = Parallel(n_jobs=self._n_jobs)
@property
def pool(self):
self._set_pool()
return self._pool
def __getstate__(self):
self._pool = None # so that the workflow is serializable
return self.__dict__
def _tile_topology(self, image):
"""Create a tile topology using the tile parameters for the given image
Parameters
----------
image: Image
The image to generate a topology for
Returns
-------
topology: TileTopology
The topology
"""
topology = image.tile_topology(
self.tile_builder,
max_width=self.tile_max_width,
max_height=self.tile_max_height,
overlap=self.tile_overlap
)
if self.border_tiles == self.BORDER_TILES_SKIP:
topology = SkipBordersTileTopology(topology)
elif self.border_tiles == self.BORDER_TILES_EXTEND:
topology = FixedSizeTileTopology(topology)
return topology
@abstractmethod
def process(self, image):
"""Process the given image using the workflow
Parameters
----------
image: Image
The image to process
Returns
-------
workflow_information: WorkflowInformation
The workflow information object containing all the information about detected objects, execution times...
Notes
-----
This method doesn't modify the image passed as parameter.
This method doesn't modify the object's attributes (except for the pool).
"""
pass
class SLDCWorkflow(Workflow):
"""A class that coordinates various components of the SLDC workflow in order to detect objects and return
their information.
"""
TIMING_ROOT = "workflow.sldc"
TIMING_DETECT = "detect"
TIMING_DETECT_LOAD = "load"
TIMING_DETECT_SEGMENT = "segment"
TIMING_DETECT_LOCATE = "locate"
TIMING_MERGE = "merge"
TIMING_DC = "dispatch_classify"
def __init__(self, segmenter, dispatcher_classifier, tile_builder, parallel_dispatch_classify=False, **kwargs):
"""Constructor for SLDCWorkflow objects
Parameters
----------
segmenter: Segmenter
The segmenter implementing segmentation procedure to apply on tiles.
tile_builder: TileBuilder
A tile builder
dispatcher_classifier: DispatcherClassifier
The dispatcher classifier object for dispatching polygons and classify them.
parallel_dispatch_classify: boolean (optional, default: False)
True for executing dispatching and classification in parallel, False for sequential.
"""
super(SLDCWorkflow, self).__init__(tile_builder, **kwargs)
self._segmenter = segmenter
self._locator = BinaryLocator()
self._merger = SemanticMerger(self.dist_tolerance)
self._dispatch_classifier = dispatcher_classifier
self._parallel_dispatch_classify = parallel_dispatch_classify
def process(self, image):
"""Process function"""
timing = WorkflowTiming(root=SLDCWorkflow.TIMING_ROOT)
tile_topology = self._tile_topology(image)
# segment locate
self.logger.info("SLDCWorkflow : start segment/locate.")
with timing.cm(SLDCWorkflow.TIMING_DETECT):
tiles, tile_polygons = self._segment_locate(tile_topology, timing)
self.logger.info(
"SLDCWorkflow : end segment/locate." + os.linesep +
"SLDCWorkflow : {} tile(s) processed in {} s.".format(len(tiles), timing.total(SLDCWorkflow.TIMING_DETECT)) + os.linesep +
"SLDCWorkflow : {} polygon(s) found on those tiles.".format(sum([len(polygons) for polygons in tile_polygons]))
)
# merge
self.logger.info("SLDCWorkflow : start merging")
with timing.cm(SLDCWorkflow.TIMING_MERGE):
polygons = self._merger.merge(tiles, tile_polygons, tile_topology)
self.logger.info(
"SLDCWorkflow : end merging." + os.linesep +
"SLDCWorkflow : {} polygon(s) found.".format(len(polygons)) + os.linesep +
"SLDCWorkflow : executed in {} s.".format(timing.total(SLDCWorkflow.TIMING_MERGE))
)
# dispatch classify
self.logger.info("SLDCWorkflow : start dispatch/classify.")
with timing.cm(SLDCWorkflow.TIMING_DC):
pred, proba, dispatch_indexes = self._dispatch_classify(image, polygons, timing)
self.logger.info(
"SLDCWorkflow : end dispatch/classify.\n" +
"SLDCWorkflow : executed in {} s.".format(timing.total(SLDCWorkflow.TIMING_DC))
)
return WorkflowInformation(polygons, pred, timing, dispatches=(dispatch_indexes, "dispatch"), probas=(proba, "proba"))
def _segment_locate(self, tile_topology, timing):
"""Execute the segment locate phase
Parameters
----------
tile_topology: TileTopology
A tile topology
timing: WorkflowTiming
A workflow timing object for computing time
Returns
-------
tiles: iterable (size: n, subtype: int)
Iterable containing the tiles ids
tile_polygons: iterable (size: n, subtype: iterable of Polygon objects))
The iterable at index i contains the polygons and pixel values found in the tile having index tiles[i]
"""
# partition the tiles into batches for submitting them to processes
tiles, tile_polygons = _parallel_segment_locate(
self.pool,
segmenter=self._segmenter,
locator=self._locator,
logger=self.logger,
tile_topology=tile_topology,
timing=timing
)
return tiles, list(map(lambda l: [t[0] for t in l], tile_polygons))
def _dispatch_classify(self, image, polygons, timing):
"""Execute dispatching and classification on several processes
Parameters
----------
image: Image
The image to process
polygons: iterable (subtype: shapely.geometry.Polygon, size: N)
The polygons to process
timing: WorkflowTiming
The workflow timing object to which must be appended the execution times
Returns
-------
predictions: iterable (subtype: int|None, size: N)
A list of integer codes indicating the predicted classes.
If none of the dispatching rules matched the polygon, the prediction associated with it is the one produced
by the fail callback for the given polygon. Especially, if no fail callback was registered, None is
returned.
probabilities: iterable (subtype: float, range: [0,1], size: N)
The probabilities associated with each predicted classes (0.0 for polygons that were not dispatched)
dispatches: iterable (size: N)
An iterable containing the identifiers of the rule that matched the polygons. If dispatching labels were
provided at construction, those are used to identify the rules. Otherwise, the integer indexes of the rules
in the list provided at construction are used. Polygons that weren't matched by any rule are returned -1 as
dispatch index.
"""
if len(polygons) == 0:
return np.array([]), np.array([]), np.array([])
timing_root = ".".join([SLDCWorkflow.TIMING_ROOT, SLDCWorkflow.TIMING_DC])
# disable parallel processing if user
pool = self.pool if self._parallel_dispatch_classify else Parallel(n_jobs=1)
n_jobs = self.n_jobs if self._parallel_dispatch_classify else 1
batches = batch_split(n_jobs, polygons)
results = pool(delayed(_dc_with_timing)(self._dispatch_classifier, image, batch, timing_root) for batch in batches)
predictions, probabilities, dispatch, timings = zip(*results)
# flatten
predictions = [pred for preds in predictions for pred in preds]
probabilities = [proba for probs in probabilities for proba in probs]
dispatch = [disp for disps in dispatch for disp in disps]
# merge timings
for curr_timing in timings:
timing.merge(curr_timing)
return predictions, probabilities, dispatch
class SSLWorkflow(Workflow):
"""SSL stands for Semantic-Segment-Locate. Detection is performed by a semantic segmentation algorithm.
"""
TIMING_ROOT = "workflow.ssl"
TIMING_DETECT = "detect"
TIMING_DETECT_LOAD = "load"
TIMING_DETECT_SEGMENT = "segment"
TIMING_DETECT_LOCATE = "locate"
TIMING_MERGE = "merge"
def __init__(self, segmenter, tile_builder, background_class=-1, **kwargs):
"""Constructor
Parameters
----------
segmenter: SemanticSegmenter
The semantic segmenter
tile_builder: TileBuilder
A tile builder
background_class: int (default: -1)
The background class not to locate (-1)
"""
super(SSLWorkflow, self).__init__(tile_builder, **kwargs)
self._segmenter = segmenter
self._locator = SemanticLocator(background=background_class)
self._merger = SemanticMerger(tolerance=self.dist_tolerance)
def process(self, image):
"""Process function"""
timing = WorkflowTiming(root=SSLWorkflow.TIMING_ROOT)
tile_topology = self._tile_topology(image)
# segment locate
self.logger.info("SLDCWorkflow : start segment/locate.")
with timing.cm(SSLWorkflow.TIMING_DETECT):
tiles, tile_polygons, tile_labels = self._segment_locate(tile_topology, timing)
self.logger.info(
"SLDCWorkflow : end segment/locate." + os.linesep +
"SLDCWorkflow : {} tile(s) processed in {} s.".format(len(tiles), timing.total(SSLWorkflow.TIMING_DETECT)) + os.linesep +
"SLDCWorkflow : {} polygon(s) found on those tiles.".format(sum([len(polygons) for polygons in tile_polygons]))
)
# merge
self.logger.info("SLDCWorkflow : start merging")
with timing.cm(SSLWorkflow.TIMING_MERGE):
polygons, labels = self._merger.merge(tiles, tile_polygons, tile_topology, labels=tile_labels)
self.logger.info(
"SLDCWorkflow : end merging." + os.linesep +
"SLDCWorkflow : {} polygon(s) found.".format(len(polygons)) + os.linesep +
"SLDCWorkflow : executed in {} s.".format(timing.total(SSLWorkflow.TIMING_MERGE))
)
return WorkflowInformation(polygons, labels, timing)
def _segment_locate(self, tile_topology, timing):
"""Execute the segment locate phase
Parameters
----------
tile_topology: TileTopology
A tile topology
timing: WorkflowTiming
A workflow timing object for computing time
Returns
-------
tiles: iterable (size: n, subtype: int)
Iterable containing the tiles ids
tile_polygons: iterable (size: n, subtype: iterable of Polygon objects))
The iterable at index i contains the polygons and pixel values found in the tile having index tiles[i]
"""
tiles, tile_polygons_labels = _parallel_segment_locate(
self.pool,
segmenter=self._segmenter,
locator=self._locator,
logger=self.logger,
tile_topology=tile_topology,
timing=timing
)
tile_polygons = list(map(lambda l: [t[0] for t in l], tile_polygons_labels))
tile_labels = list(map(lambda l: [t[1] for t in l], tile_polygons_labels))
return tiles, tile_polygons, tile_labels
|
|
from __future__ import division
"""
Author: Emmett Butler
"""
__license__ = """
Copyright 2015 Parse.ly, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["BalancedConsumer"]
import itertools
import logging
import math
import socket
import time
from uuid import uuid4
from kazoo.client import KazooClient
from kazoo.exceptions import NoNodeException, NodeExistsError
from kazoo.recipe.watchers import ChildrenWatch
from .common import OffsetType
from .exceptions import (KafkaException, PartitionOwnedError,
ConsumerStoppedException)
from .simpleconsumer import SimpleConsumer
log = logging.getLogger(__name__)
class BalancedConsumer():
"""
A self-balancing consumer for Kafka that uses ZooKeeper to communicate
with other balancing consumers.
Maintains a single instance of SimpleConsumer, periodically using the
consumer rebalancing algorithm to reassign partitions to this
SimpleConsumer.
"""
def __init__(self,
topic,
cluster,
consumer_group,
fetch_message_max_bytes=1024 * 1024,
num_consumer_fetchers=1,
auto_commit_enable=False,
auto_commit_interval_ms=60 * 1000,
queued_max_messages=2000,
fetch_min_bytes=1,
fetch_wait_max_ms=100,
offsets_channel_backoff_ms=1000,
offsets_commit_max_retries=5,
auto_offset_reset=OffsetType.LATEST,
consumer_timeout_ms=-1,
rebalance_max_retries=5,
rebalance_backoff_ms=2 * 1000,
zookeeper_connection_timeout_ms=6 * 1000,
zookeeper_connect='127.0.0.1:2181',
zookeeper=None,
auto_start=True,
reset_offset_on_start=False):
"""Create a BalancedConsumer instance
:param topic: The topic this consumer should consume
:type topic: :class:`pykafka.topic.Topic`
:param cluster: The cluster to which this consumer should connect
:type cluster: :class:`pykafka.cluster.Cluster`
:param consumer_group: The name of the consumer group this consumer
should join.
:type consumer_group: str
:param fetch_message_max_bytes: The number of bytes of messages to
attempt to fetch with each fetch request
:type fetch_message_max_bytes: int
:param num_consumer_fetchers: The number of workers used to make
FetchRequests
:type num_consumer_fetchers: int
:param auto_commit_enable: If true, periodically commit to kafka the
offset of messages already fetched by this consumer. This also
requires that `consumer_group` is not `None`.
:type auto_commit_enable: bool
:param auto_commit_interval_ms: The frequency (in milliseconds) at which
the consumer's offsets are committed to kafka. This setting is
ignored if `auto_commit_enable` is `False`.
:type auto_commit_interval_ms: int
:param queued_max_messages: The maximum number of messages buffered for
consumption in the internal
:class:`pykafka.simpleconsumer.SimpleConsumer`
:type queued_max_messages: int
:param fetch_min_bytes: The minimum amount of data (in bytes) that the
server should return for a fetch request. If insufficient data is
available, the request will block until sufficient data is available.
:type fetch_min_bytes: int
:param fetch_wait_max_ms: The maximum amount of time (in milliseconds)
that the server will block before answering a fetch request if
there isn't sufficient data to immediately satisfy `fetch_min_bytes`.
:type fetch_wait_max_ms: int
:param offsets_channel_backoff_ms: Backoff time to retry failed offset
commits and fetches.
:type offsets_channel_backoff_ms: int
:param offsets_commit_max_retries: The number of times the offset commit
worker should retry before raising an error.
:type offsets_commit_max_retries: int
:param auto_offset_reset: What to do if an offset is out of range. This
setting indicates how to reset the consumer's internal offset
counter when an `OffsetOutOfRangeError` is encountered.
:type auto_offset_reset: :class:`pykafka.common.OffsetType`
:param consumer_timeout_ms: Amount of time (in milliseconds) the
consumer may spend without messages available for consumption
before returning None.
:type consumer_timeout_ms: int
:param rebalance_max_retries: The number of times the rebalance should
retry before raising an error.
:type rebalance_max_retries: int
:param rebalance_backoff_ms: Backoff time (in milliseconds) between
retries during rebalance.
:type rebalance_backoff_ms: int
:param zookeeper_connection_timeout_ms: The maximum time (in
milliseconds) that the consumer waits while establishing a
connection to zookeeper.
:type zookeeper_connection_timeout_ms: int
:param zookeeper_connect: Comma-separated (ip1:port1,ip2:port2) strings
indicating the zookeeper nodes to which to connect.
:type zookeeper_connect: str
:param zookeeper: A KazooClient connected to a Zookeeper instance.
If provided, `zookeeper_connect` is ignored.
:type zookeeper: :class:`kazoo.client.KazooClient`
:param auto_start: Whether the consumer should begin communicating
with zookeeper after __init__ is complete. If false, communication
can be started with `start()`.
:type auto_start: bool
:param reset_offset_on_start: Whether the consumer should reset its
internal offset counter to `self._auto_offset_reset` and commit that
offset immediately upon starting up
:type reset_offset_on_start: bool
"""
self._cluster = cluster
self._consumer_group = consumer_group
self._topic = topic
self._auto_commit_enable = auto_commit_enable
self._auto_commit_interval_ms = auto_commit_interval_ms
self._fetch_message_max_bytes = fetch_message_max_bytes
self._fetch_min_bytes = fetch_min_bytes
self._rebalance_max_retries = rebalance_max_retries
self._num_consumer_fetchers = num_consumer_fetchers
self._queued_max_messages = queued_max_messages
self._fetch_wait_max_ms = fetch_wait_max_ms
self._rebalance_backoff_ms = rebalance_backoff_ms
self._consumer_timeout_ms = consumer_timeout_ms
self._offsets_channel_backoff_ms = offsets_channel_backoff_ms
self._offsets_commit_max_retries = offsets_commit_max_retries
self._auto_offset_reset = auto_offset_reset
self._zookeeper_connect = zookeeper_connect
self._zookeeper_connection_timeout_ms = zookeeper_connection_timeout_ms
self._reset_offset_on_start = reset_offset_on_start
self._rebalancing_lock = cluster.handler.Lock()
self._consumer = None
self._consumer_id = "{hostname}:{uuid}".format(
hostname=socket.gethostname(),
uuid=uuid4()
)
self._partitions = set()
self._setting_watches = True
self._topic_path = '/consumers/{group}/owners/{topic}'.format(
group=self._consumer_group,
topic=self._topic.name)
self._consumer_id_path = '/consumers/{group}/ids'.format(
group=self._consumer_group)
self._zookeeper = None
if zookeeper is not None:
self._zookeeper = zookeeper
if auto_start is True:
self.start()
def __repr__(self):
return "<{module}.{name} at {id_} (consumer_group={group})>".format(
module=self.__class__.__module__,
name=self.__class__.__name__,
id_=hex(id(self)),
group=self._consumer_group
)
def _setup_checker_worker(self):
"""Start the zookeeper partition checker thread"""
def checker():
while True:
time.sleep(120)
if not self._running:
break
self._check_held_partitions()
log.debug("Checker thread exiting")
log.debug("Starting checker thread")
return self._cluster.handler.spawn(checker)
@property
def partitions(self):
return self._consumer.partitions if self._consumer else None
@property
def held_offsets(self):
"""Return a map from partition id to held offset for each partition"""
if not self._consumer:
return None
return dict((p.partition.id, p.last_offset_consumed)
for p in self._consumer._partitions_by_id.itervalues())
def start(self):
"""Open connections and join a cluster."""
if self._zookeeper is None:
self._setup_zookeeper(self._zookeeper_connect,
self._zookeeper_connection_timeout_ms)
self._zookeeper.ensure_path(self._topic_path)
self._add_self()
self._set_watches()
self._rebalance()
self._running = True
self._setup_checker_worker()
def stop(self):
"""Close the zookeeper connection and stop consuming.
This method should be called as part of a graceful shutdown process.
"""
self._zookeeper.stop()
self._consumer.stop()
self._running = False
def _setup_zookeeper(self, zookeeper_connect, timeout):
"""Open a connection to a ZooKeeper host.
:param zookeeper_connect: The 'ip:port' address of the zookeeper node to
which to connect.
:type zookeeper_connect: str
:param timeout: Connection timeout (in milliseconds)
:type timeout: int
"""
self._zookeeper = KazooClient(zookeeper_connect, timeout=timeout / 1000)
self._zookeeper.start()
def _setup_internal_consumer(self, start=True):
"""Instantiate an internal SimpleConsumer.
If there is already a SimpleConsumer instance held by this object,
disable its workers and mark it for garbage collection before
creating a new one.
"""
reset_offset_on_start = self._reset_offset_on_start
if self._consumer is not None:
self._consumer.stop()
# only use this setting for the first call to
# _setup_internal_consumer. subsequent calls should not
# reset the offsets, since they can happen at any time
reset_offset_on_start = False
self._consumer = SimpleConsumer(
self._topic,
self._cluster,
consumer_group=self._consumer_group,
partitions=list(self._partitions),
auto_commit_enable=self._auto_commit_enable,
auto_commit_interval_ms=self._auto_commit_interval_ms,
fetch_message_max_bytes=self._fetch_message_max_bytes,
fetch_min_bytes=self._fetch_min_bytes,
num_consumer_fetchers=self._num_consumer_fetchers,
queued_max_messages=self._queued_max_messages,
fetch_wait_max_ms=self._fetch_wait_max_ms,
consumer_timeout_ms=self._consumer_timeout_ms,
offsets_channel_backoff_ms=self._offsets_channel_backoff_ms,
offsets_commit_max_retries=self._offsets_commit_max_retries,
auto_offset_reset=self._auto_offset_reset,
reset_offset_on_start=reset_offset_on_start,
auto_start=start
)
def _decide_partitions(self, participants):
"""Decide which partitions belong to this consumer.
Uses the consumer rebalancing algorithm described here
http://kafka.apache.org/documentation.html
It is very important that the participants array is sorted,
since this algorithm runs on each consumer and indexes into the same
array. The same array index operation must return the same
result on each consumer.
:param participants: Sorted list of ids of all other consumers in this
consumer group.
:type participants: Iterable of str
"""
# Freeze and sort partitions so we always have the same results
p_to_str = lambda p: '-'.join([p.topic.name, str(p.leader.id), str(p.id)])
all_parts = self._topic.partitions.values()
all_parts.sort(key=p_to_str)
# get start point, # of partitions, and remainder
participants.sort() # just make sure it's sorted.
idx = participants.index(self._consumer_id)
parts_per_consumer = math.floor(len(all_parts) / len(participants))
remainder_ppc = len(all_parts) % len(participants)
start = parts_per_consumer * idx + min(idx, remainder_ppc)
num_parts = parts_per_consumer + (0 if (idx + 1 > remainder_ppc) else 1)
# assign partitions from i*N to (i+1)*N - 1 to consumer Ci
new_partitions = itertools.islice(all_parts, start, start + num_parts)
new_partitions = set(new_partitions)
log.info('Balancing %i participants for %i partitions.\nOwning %i partitions.',
len(participants), len(all_parts), len(new_partitions))
log.debug('My partitions: %s', [p_to_str(p) for p in new_partitions])
return new_partitions
def _get_participants(self):
"""Use zookeeper to get the other consumers of this topic.
:return: A sorted list of the ids of the other consumers of this
consumer's topic
"""
try:
consumer_ids = self._zookeeper.get_children(self._consumer_id_path)
except NoNodeException:
log.debug("Consumer group doesn't exist. "
"No participants to find")
return []
participants = []
for id_ in consumer_ids:
try:
topic, stat = self._zookeeper.get("%s/%s" % (self._consumer_id_path, id_))
if topic == self._topic.name:
participants.append(id_)
except NoNodeException:
pass # disappeared between ``get_children`` and ``get``
participants.sort()
return participants
def _set_watches(self):
"""Set watches in zookeeper that will trigger rebalances.
Rebalances should be triggered whenever a broker, topic, or consumer
znode is changed in zookeeper. This ensures that the balance of the
consumer group remains up-to-date with the current state of the
cluster.
"""
self._setting_watches = True
# Set all our watches and then rebalance
broker_path = '/brokers/ids'
try:
self._broker_watcher = ChildrenWatch(
self._zookeeper, broker_path,
self._brokers_changed
)
except NoNodeException:
raise Exception(
'The broker_path "%s" does not exist in your '
'ZooKeeper cluster -- is your Kafka cluster running?'
% broker_path)
self._topics_watcher = ChildrenWatch(
self._zookeeper,
'/brokers/topics',
self._topics_changed
)
self._consumer_watcher = ChildrenWatch(
self._zookeeper, self._consumer_id_path,
self._consumers_changed
)
self._setting_watches = False
def _add_self(self):
"""Register this consumer in zookeeper.
This method ensures that the number of participants is at most the
number of partitions.
"""
participants = self._get_participants()
if len(self._topic.partitions) <= len(participants):
raise KafkaException("Cannot add consumer: more consumers than partitions")
path = '{path}/{id_}'.format(
path=self._consumer_id_path,
id_=self._consumer_id
)
self._zookeeper.create(
path, self._topic.name, ephemeral=True, makepath=True)
def _rebalance(self):
"""Claim partitions for this consumer.
This method is called whenever a zookeeper watch is triggered.
"""
if self._consumer is not None:
self.commit_offsets()
with self._rebalancing_lock:
log.info('Rebalancing consumer %s for topic %s.' % (
self._consumer_id, self._topic.name)
)
for i in xrange(self._rebalance_max_retries):
try:
# If retrying, be sure to make sure the
# partition allocation is correct.
participants = self._get_participants()
partitions = self._decide_partitions(participants)
old_partitions = self._partitions - partitions
self._remove_partitions(old_partitions)
new_partitions = partitions - self._partitions
self._add_partitions(new_partitions)
# Only re-create internal consumer if something changed.
if old_partitions or new_partitions:
self._setup_internal_consumer()
log.info('Rebalancing Complete.')
except PartitionOwnedError as ex:
if i == self._rebalance_max_retries - 1:
log.warning('Failed to acquire partition %s after %d retries.',
ex.partition, i)
raise
log.info('Unable to acquire partition %s. Retrying', ex.partition)
time.sleep(i * (self._rebalance_backoff_ms / 1000))
def _path_from_partition(self, p):
"""Given a partition, return its path in zookeeper.
:type p: :class:`pykafka.partition.Partition`
"""
return "%s/%s-%s" % (self._topic_path, p.leader.id, p.id)
def _remove_partitions(self, partitions):
"""Remove partitions from the zookeeper registry for this consumer.
Also remove these partitions from the consumer's internal
partition registry.
:param partitions: The partitions to remove.
:type partitions: Iterable of :class:`pykafka.partition.Partition`
"""
for p in partitions:
assert p in self._partitions
self._zookeeper.delete(self._path_from_partition(p))
self._partitions -= partitions
def _add_partitions(self, partitions):
"""Add partitions to the zookeeper registry for this consumer.
Also add these partitions to the consumer's internal partition registry.
:param partitions: The partitions to add.
:type partitions: Iterable of :class:`pykafka.partition.Partition`
"""
for p in partitions:
try:
self._zookeeper.create(
self._path_from_partition(p),
value=self._consumer_id,
ephemeral=True
)
self._partitions.add(p)
except NodeExistsError:
raise PartitionOwnedError(p)
def _check_held_partitions(self):
"""Double-check held partitions against zookeeper
Ensure that the partitions held by this consumer are the ones that
zookeeper thinks it's holding. If not, rebalance.
"""
log.info("Checking held partitions against ZooKeeper")
# build a set of partition ids zookeeper says we own
zk_partition_ids = set()
all_partitions = self._zookeeper.get_children(self._topic_path)
for partition_slug in all_partitions:
owner_id, stat = self._zookeeper.get(
'{path}/{slug}'.format(
path=self._topic_path, slug=partition_slug))
if owner_id == self._consumer_id:
zk_partition_ids.add(int(partition_slug.split('-')[1]))
# build a set of partition ids we think we own
internal_partition_ids = set([p.id for p in self._partitions])
# compare the two sets, rebalance if necessary
if internal_partition_ids != zk_partition_ids:
log.warning("Internal partition registry doesn't match ZooKeeper!")
log.debug("Internal partition ids: %s\nZooKeeper partition ids: %s",
internal_partition_ids, zk_partition_ids)
self._rebalance()
def _brokers_changed(self, brokers):
if self._setting_watches:
return
log.debug("Rebalance triggered by broker change")
self._rebalance()
def _consumers_changed(self, consumers):
if self._setting_watches:
return
log.debug("Rebalance triggered by consumer change")
self._rebalance()
def _topics_changed(self, topics):
if self._setting_watches:
return
log.debug("Rebalance triggered by topic change")
self._rebalance()
def reset_offsets(self, partition_offsets=None):
"""Reset offsets for the specified partitions
Issue an OffsetRequest for each partition and set the appropriate
returned offset in the OwnedPartition
:param partition_offsets: (`partition`, `offset`) pairs to reset
where `partition` is the partition for which to reset the offset
and `offset` is the new offset the partition should have
:type partition_offsets: Iterable of
(:class:`pykafka.partition.Partition`, int)
"""
if not self._consumer:
raise ConsumerStoppedException("Internal consumer is stopped")
self._consumer.reset_offsets(partition_offsets=partition_offsets)
def consume(self, block=True):
"""Get one message from the consumer
:param block: Whether to block while waiting for a message
:type block: bool
"""
def consumer_timed_out():
"""Indicates whether the consumer has received messages recently"""
if self._consumer_timeout_ms == -1:
return False
disp = (time.time() - self._last_message_time) * 1000.0
return disp > self._consumer_timeout_ms
message = None
self._last_message_time = time.time()
while message is None and not consumer_timed_out():
try:
message = self._consumer.consume(block=block)
except ConsumerStoppedException:
continue
if message:
self._last_message_time = time.time()
if not block:
return message
return message
def commit_offsets(self):
"""Commit offsets for this consumer's partitions
Uses the offset commit/fetch API
"""
return self._consumer.commit_offsets()
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import base64
import datetime
import json
import logging
from collections import defaultdict, namedtuple
from contextlib import closing
import cloudstorage
from dateutil.relativedelta import relativedelta
from mapreduce import mapreduce_pipeline, context
from pipeline import pipeline
from pipeline.common import List
from mcfw.properties import azzert
from mcfw.rpc import returns, arguments
from rogerthat.bizz.job.send_unread_messages import CleanupGoogleCloudStorageFiles
from rogerthat.consts import MIGRATION_QUEUE, DEBUG, PIPELINE_BUCKET
from rogerthat.models import Message
from rogerthat.models.properties.messaging import MessageMemberStatusTO
from rogerthat.settings import get_server_settings
from rogerthat.utils import is_flag_set, guid, send_mail
from rogerthat.utils.app import get_app_user_tuple_by_email
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
Stats = namedtuple('Stats', 'total received read acked')
Interval = namedtuple('Interval', 'days months')
@returns(tuple)
@arguments(year=int, week=int, week_count=int)
def get_week_range(year, week, week_count=1):
min_date = datetime.date(year, 1, 1) + relativedelta(weeks=week)
min_date -= relativedelta(days=min_date.weekday())
max_date = min_date + relativedelta(weeks=week_count) - relativedelta(seconds=1)
return min_date, max_date
@returns(tuple)
@arguments(year=int, month=int, month_count=int)
def get_month_range(year, month, month_count=1):
min_date = datetime.date(year, month, 1)
max_date = min_date + relativedelta(months=month_count) - relativedelta(seconds=1)
return min_date, max_date
@returns(unicode)
@arguments(min_time=long, max_time=long, interval=Interval, skip_messages_sent_by_js_mfr=bool)
def start_job(min_time, max_time, interval=None, skip_messages_sent_by_js_mfr=False):
key = 'user_stats_%s_%s' % (min_time, max_time)
key += guid()
if interval is None:
interval = Interval(days=1, months=0)
counter = MessageStatsPipeline(key, min_time, max_time, interval, skip_messages_sent_by_js_mfr)
task = counter.start(idempotence_key=key, return_task=True)
task.add(queue_name=MIGRATION_QUEUE)
redirect_url = "%s/status?root=%s" % (counter.base_path, counter.pipeline_id)
logging.info("UserMessageStats pipeline url: %s", redirect_url)
return get_server_settings().baseUrl + redirect_url
@returns(unicode)
@arguments(score=long, max_score=long)
def percent(score, max_score):
return u'%s%%' % (int(round(100.0 * score / max_score)) if max_score else 0)
@returns(int)
@arguments(timestamp=long, min_time=long, interval=Interval)
def get_interval_index(timestamp, min_time, interval):
azzert(timestamp >= min_time)
azzert(interval.months or interval.days)
date = datetime.datetime.utcfromtimestamp(timestamp)
min_date = datetime.datetime.utcfromtimestamp(min_time)
if interval.months:
months_diff = 12 * (date.year - min_date.year) + date.month - min_date.month
return months_diff / interval.months
if interval.days:
days_diff = (date - min_date).days
return days_diff / interval.days
@returns(unicode)
@arguments(i=int, min_date=(datetime.datetime, datetime.date), max_date=(datetime.datetime, datetime.date),
interval=Interval)
def get_period_str(i, min_date, max_date, interval):
interval = relativedelta(days=interval.days, months=interval.months)
from_date = min_date + relativedelta(days=interval.days * i, months=interval.months * i)
to_date = min(max_date, from_date + interval - relativedelta(seconds=1))
from_date_str = from_date.strftime('%d/%m/%Y')
to_date_str = to_date.strftime('%d/%m/%Y')
if from_date_str == to_date_str:
return from_date_str
return u'%s - %s' % (from_date_str, to_date_str)
def mapper(message):
params = context.get().mapreduce_spec.mapper.params
if params['skip_messages_sent_by_js_mfr'] and is_flag_set(Message.FLAG_SENT_BY_JS_MFR, message.flags):
return
min_time, max_time, interval = params['min_time'], params['max_time'], Interval(*params['interval'])
for i, member in enumerate(message.members):
if member == message.sender:
continue
ms = message.get_member_statuses()[i]
received = is_flag_set(MessageMemberStatusTO.STATUS_RECEIVED, ms.status) \
and min_time <= ms.received_timestamp <= max_time
read = received and is_flag_set(MessageMemberStatusTO.STATUS_READ, ms.status)
acked = is_flag_set(MessageMemberStatusTO.STATUS_ACKED, ms.status) and min_time <= ms.acked_timestamp <= max_time
stats = Stats(total=1, received=int(received), read=int(read), acked=int(acked))
index = get_interval_index(message.creationTimestamp, min_time, interval)
if DEBUG:
logging.debug('MAPPER: %s, %s, %s', member.email(), index, stats)
yield member.email(), (str(index), list(stats))
def _combine(lists):
'''Eg. [ [4,3,2,1], [4,3,2,1], [4,3,2,1] ] --> [12,9,6,3]'''
return map(sum, zip(*lists))
def _combine_interval_stats(new_values, prev_stats_dict):
for v in new_values:
index, new_stats = eval(v) if isinstance(v, basestring) else v
prev_stats = prev_stats_dict.get(index)
if prev_stats:
prev_stats_dict[index] = _combine([prev_stats, new_stats])
else:
prev_stats_dict[index] = new_stats
return prev_stats_dict
def combiner(key, new_values, previously_combined_values):
'''
key: the app_user email
new_values: newly collected list with tuples: [(index, Stats)]
previously_combined_values: previously combined list with tuples: [(index, Stats)]
'''
if DEBUG:
logging.debug('COMBINER %s new_values: %s', key, new_values)
logging.debug('COMBINER %s previously_combined_values: %s', key, previously_combined_values)
prev_stats = dict(previously_combined_values) # {index: Stats}
combined = _combine_interval_stats(new_values, prev_stats)
if DEBUG:
logging.debug('COMBINER %s combined: %s', key, combined)
for i in combined.iteritems():
yield i
def reducer(key, values):
'''
key: the identifier of the team group
values: collected list with tuples: [(index, Stats)]
'''
combined_stats = _combine_interval_stats(values, dict()) # Eg. {"0": [2,1], "3": [9,8]}
app_user, app_id = get_app_user_tuple_by_email(key)
json_line = json.dumps(dict(email=app_user.email(),
app_id=app_id,
stats=combined_stats))
if DEBUG:
logging.debug('REDUCER %s: %s', key, json_line)
yield '%s\n' % json_line
class MessageStatsPipeline(pipeline.Pipeline):
def run(self, key, min_time, max_time, interval, skip_messages_sent_by_js_mfr):
params = dict(mapper_spec='rogerthat.bizz.job.user_stats.mapper',
mapper_params=dict(bucket_name=PIPELINE_BUCKET,
entity_kind='rogerthat.models.Message',
filters=[('creationTimestamp', '>=', min_time),
('creationTimestamp', '<=', max_time)],
min_time=min_time,
max_time=max_time,
interval=interval,
skip_messages_sent_by_js_mfr=skip_messages_sent_by_js_mfr),
combiner_spec='rogerthat.bizz.job.user_stats.combiner',
reducer_spec='rogerthat.bizz.job.user_stats.reducer',
reducer_params=dict(output_writer=dict(bucket_name=PIPELINE_BUCKET),
min_time=min_time,
max_time=max_time,
interval=interval),
input_reader_spec='mapreduce.input_readers.DatastoreInputReader',
output_writer_spec='mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter',
shards=2 if DEBUG else 10)
output = yield mapreduce_pipeline.MapreducePipeline(key, **params)
process_output_pipeline = yield ProcessOutputPipeline(output, min_time, max_time, interval,
skip_messages_sent_by_js_mfr)
with pipeline.After(process_output_pipeline):
yield CleanupGoogleCloudStorageFiles(output)
def finalized(self):
if self.was_aborted:
logging.error("%s was aborted", self, _suppress=False)
return
logging.info("%s was finished", self)
class ProcessOutputPipeline(pipeline.Pipeline):
def run(self, output, min_time, max_time, interval, skip_messages_sent_by_js_mfr):
results = list()
for filename in output:
results.append((yield ProcessFilePipeline(filename, min_time, max_time, interval)))
yield List(*results)
def finalized(self):
if DEBUG:
logging.debug('ProcessOutputPipeline: self.outputs.default.value = %s', self.outputs.default.value)
_, min_time, max_time, interval, skip_messages_sent_by_js_mfr = self.args
interval = Interval(*interval)
result_length = get_interval_index(max_time, min_time, interval) + 1
factory = lambda: [Stats(0, 0, 0, 0) for _ in xrange(result_length)]
# Doing a final combine
final_stats_per_app = defaultdict(factory)
for stats_per_app_to_be_added in self.outputs.default.value:
for app_id, stats_list in stats_per_app_to_be_added.iteritems():
# Generate the results per app
for i, stats in enumerate(stats_list):
final_stats_per_app[app_id][i] = _combine([final_stats_per_app[app_id][i], stats])
if DEBUG:
logging.debug('ProcessOutputPipeline: final_stats_per_app = %s', final_stats_per_app)
self.send_mail(final_stats_per_app, min_time, max_time, interval, skip_messages_sent_by_js_mfr)
def send_mail(self, final_stats_per_app, min_time, max_time, interval, skip_messages_sent_by_js_mfr):
min_date = datetime.datetime.utcfromtimestamp(min_time)
max_date = datetime.datetime.utcfromtimestamp(max_time)
min_date_str = min_date.strftime('%d %b %Y')
max_date_str = max_date.strftime('%d %b %Y')
with closing(StringIO()) as s:
s.write('User stats from %s until %s with %s interval'
% (min_date_str, max_date_str,
('%s month(s)' % interval.months) if interval.months else ('%s day(s)' % interval.days)))
if skip_messages_sent_by_js_mfr:
s.write(' (messages sent by JS_MFR are skipped)')
s.write('.\nSee attached document for details per app.\n\nSummary:\n')
# TOTAL STATS PER PERIOD
result_length = get_interval_index(max_time, min_time, interval) + 1
total_stats_list = [Stats(0, 0, 0, 0) for _ in xrange(result_length)]
for stats_list in final_stats_per_app.itervalues():
for i, stats in enumerate(total_stats_list):
total_stats_list[i] = _combine([total_stats_list[i], stats_list[i]])
if DEBUG:
logging.warn('ProcessOutputPipeline: total_stats_list = %s', total_stats_list)
for i, stats in enumerate(total_stats_list):
stats = Stats(*stats)
s.write('''
%s
total: %s
received: %s/%s (%s of total)
read: %s/%s (%s of received, %s of total)
acked: %s/%s (%s of read, %s of received, %s of total)
''' % (get_period_str(i, min_date, max_date, interval), stats.total,
stats.received, stats.total, percent(stats.received, stats.total),
stats.read, stats.received, percent(stats.read, stats.received), percent(stats.read, stats.total),
stats.acked, stats.read, percent(stats.acked, stats.read), percent(stats.acked, stats.received), percent(stats.acked, stats.total)))
body = s.getvalue()
server_settings = get_server_settings()
mail_receivers = server_settings.supportWorkers
subject = u'Rogerthat user stats: %s - %s' % (min_date_str, max_date_str)
attachments = [self.create_xls_attachment(final_stats_per_app, total_stats_list, min_date, max_date, interval)]
send_mail(server_settings.dashboardEmail, mail_receivers, subject, body, attachments=attachments)
def create_xls_attachment(self, final_stats_per_app, total_stats_list, min_date, max_date, interval):
import xlwt
book = xlwt.Workbook(encoding='utf-8')
for app_id, stats_list in [('Total', total_stats_list)] + sorted(final_stats_per_app.iteritems()):
sheet = book.add_sheet(app_id)
for col, label in enumerate(('period', 'total', 'received', 'read', 'acked',
'received/total', 'read/received', 'read/total',
'acked/read', 'acked/received', 'acked/total')):
sheet.write(0, col, label)
for i, stats in enumerate(stats_list):
stats = Stats(*stats)
for col, text in enumerate((get_period_str(i, min_date, max_date, interval),
stats.total,
stats.received,
stats.read,
stats.acked,
percent(stats.received, stats.total),
percent(stats.read, stats.received),
percent(stats.read, stats.total),
percent(stats.acked, stats.read),
percent(stats.acked, stats.received),
percent(stats.acked, stats.total))):
sheet.write(i + 1, col, text)
with closing(StringIO()) as output:
book.save(output)
output.seek(0)
return ('%s - %s.xls' % (min_date.strftime('%d %b %Y'), max_date.strftime('%d %b %Y')),
base64.b64encode(output.getvalue()))
class ProcessFilePipeline(pipeline.Pipeline):
def run(self, filename, min_time, max_time, interval):
interval = Interval(*interval)
result_length = get_interval_index(max_time, min_time, interval) + 1
factory = lambda: [Stats(0, 0, 0, 0) for _ in xrange(result_length)]
# amount of unique users that (have received/read/acked) a message
stats_per_app = defaultdict(factory) # {app_id: [ [2,1], [0,0], [0,0], [9,8], [0,0] ]}
with cloudstorage.open(filename, "r") as f:
for json_line in f:
d = json.loads(json_line)
# Flatten the stats dict into a list
# Example with result_length=5: {"0": [2,1], "3": [9,8]} --> [[2,1], [0,0], [0,0], [9,8], [0,0]]
stats_list = factory()
for index_str, stats in d['stats'].iteritems():
stats_list[int(index_str)] = Stats(*stats)
# Generate the results per app
for i, stats in enumerate(stats_list):
stats_per_app[d['app_id']][i] = _combine([stats_per_app[d['app_id']][i], map(bool, stats)])
logging.debug('ProcessFilePipeline: %s', stats_per_app)
return stats_per_app
|
|
"""
Read SAS7BDAT files
Based on code written by Jared Hobbs:
https://bitbucket.org/jaredhobbs/sas7bdat
See also:
https://github.com/BioStatMatt/sas7bdat
Partial documentation of the file format:
https://cran.r-project.org/package=sas7bdat/vignettes/sas7bdat.pdf
Reference for binary data compression:
http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
"""
from collections import abc
from datetime import datetime, timedelta
import struct
from typing import IO, Any, Union, cast
import numpy as np
from pandas.errors import EmptyDataError, OutOfBoundsDatetime
import pandas as pd
from pandas.io.common import get_handle
from pandas.io.sas._sas import Parser
import pandas.io.sas.sas_constants as const
from pandas.io.sas.sasreader import ReaderBase
def _convert_datetimes(sas_datetimes: pd.Series, unit: str) -> pd.Series:
"""
Convert to Timestamp if possible, otherwise to datetime.datetime.
SAS float64 lacks precision for more than ms resolution so the fit
to datetime.datetime is ok.
Parameters
----------
sas_datetimes : {Series, Sequence[float]}
Dates or datetimes in SAS
unit : {str}
"d" if the floats represent dates, "s" for datetimes
Returns
-------
Series
Series of datetime64 dtype or datetime.datetime.
"""
try:
return pd.to_datetime(sas_datetimes, unit=unit, origin="1960-01-01")
except OutOfBoundsDatetime:
if unit == "s":
return sas_datetimes.apply(
lambda sas_float: datetime(1960, 1, 1) + timedelta(seconds=sas_float)
)
elif unit == "d":
return sas_datetimes.apply(
lambda sas_float: datetime(1960, 1, 1) + timedelta(days=sas_float)
)
else:
raise ValueError("unit must be 'd' or 's'")
class _SubheaderPointer:
offset: int
length: int
compression: int
ptype: int
def __init__(self, offset: int, length: int, compression: int, ptype: int):
self.offset = offset
self.length = length
self.compression = compression
self.ptype = ptype
class _Column:
col_id: int
name: Union[str, bytes]
label: Union[str, bytes]
format: Union[str, bytes] # TODO: i think allowing bytes is from py2 days
ctype: bytes
length: int
def __init__(
self,
col_id: int,
name: Union[str, bytes],
label: Union[str, bytes],
format: Union[str, bytes],
ctype: bytes,
length: int,
):
self.col_id = col_id
self.name = name
self.label = label
self.format = format
self.ctype = ctype
self.length = length
# SAS7BDAT represents a SAS data file in SAS7BDAT format.
class SAS7BDATReader(ReaderBase, abc.Iterator):
"""
Read SAS files in SAS7BDAT format.
Parameters
----------
path_or_buf : path name or buffer
Name of SAS file or file-like object pointing to SAS file
contents.
index : column identifier, defaults to None
Column to use as index.
convert_dates : boolean, defaults to True
Attempt to convert dates to Pandas datetime values. Note that
some rarely used SAS date formats may be unsupported.
blank_missing : boolean, defaults to True
Convert empty strings to missing values (SAS uses blanks to
indicate missing character variables).
chunksize : int, defaults to None
Return SAS7BDATReader object for iterations, returns chunks
with given number of lines.
encoding : string, defaults to None
String encoding.
convert_text : bool, defaults to True
If False, text variables are left as raw bytes.
convert_header_text : bool, defaults to True
If False, header text, including column names, are left as raw
bytes.
"""
def __init__(
self,
path_or_buf,
index=None,
convert_dates=True,
blank_missing=True,
chunksize=None,
encoding=None,
convert_text=True,
convert_header_text=True,
):
self.index = index
self.convert_dates = convert_dates
self.blank_missing = blank_missing
self.chunksize = chunksize
self.encoding = encoding
self.convert_text = convert_text
self.convert_header_text = convert_header_text
self.default_encoding = "latin-1"
self.compression = b""
self.column_names_strings = []
self.column_names = []
self.column_formats = []
self.columns = []
self._current_page_data_subheader_pointers = []
self._cached_page = None
self._column_data_lengths = []
self._column_data_offsets = []
self._column_types = []
self._current_row_in_file_index = 0
self._current_row_on_page_index = 0
self._current_row_in_file_index = 0
self.handles = get_handle(path_or_buf, "rb", is_text=False)
self._path_or_buf = cast(IO[Any], self.handles.handle)
try:
self._get_properties()
self._parse_metadata()
except Exception:
self.close()
raise
def column_data_lengths(self):
"""Return a numpy int64 array of the column data lengths"""
return np.asarray(self._column_data_lengths, dtype=np.int64)
def column_data_offsets(self):
"""Return a numpy int64 array of the column offsets"""
return np.asarray(self._column_data_offsets, dtype=np.int64)
def column_types(self):
"""
Returns a numpy character array of the column types:
s (string) or d (double)
"""
return np.asarray(self._column_types, dtype=np.dtype("S1"))
def close(self):
self.handles.close()
def _get_properties(self):
# Check magic number
self._path_or_buf.seek(0)
self._cached_page = self._path_or_buf.read(288)
if self._cached_page[0 : len(const.magic)] != const.magic:
raise ValueError("magic number mismatch (not a SAS file?)")
# Get alignment information
align1, align2 = 0, 0
buf = self._read_bytes(const.align_1_offset, const.align_1_length)
if buf == const.u64_byte_checker_value:
align2 = const.align_2_value
self.U64 = True
self._int_length = 8
self._page_bit_offset = const.page_bit_offset_x64
self._subheader_pointer_length = const.subheader_pointer_length_x64
else:
self.U64 = False
self._page_bit_offset = const.page_bit_offset_x86
self._subheader_pointer_length = const.subheader_pointer_length_x86
self._int_length = 4
buf = self._read_bytes(const.align_2_offset, const.align_2_length)
if buf == const.align_1_checker_value:
align1 = const.align_2_value
total_align = align1 + align2
# Get endianness information
buf = self._read_bytes(const.endianness_offset, const.endianness_length)
if buf == b"\x01":
self.byte_order = "<"
else:
self.byte_order = ">"
# Get encoding information
buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0]
if buf in const.encoding_names:
self.file_encoding = const.encoding_names[buf]
else:
self.file_encoding = f"unknown (code={buf})"
# Get platform information
buf = self._read_bytes(const.platform_offset, const.platform_length)
if buf == b"1":
self.platform = "unix"
elif buf == b"2":
self.platform = "windows"
else:
self.platform = "unknown"
buf = self._read_bytes(const.dataset_offset, const.dataset_length)
self.name = buf.rstrip(b"\x00 ")
if self.convert_header_text:
self.name = self.name.decode(self.encoding or self.default_encoding)
buf = self._read_bytes(const.file_type_offset, const.file_type_length)
self.file_type = buf.rstrip(b"\x00 ")
if self.convert_header_text:
self.file_type = self.file_type.decode(
self.encoding or self.default_encoding
)
# Timestamp is epoch 01/01/1960
epoch = datetime(1960, 1, 1)
x = self._read_float(
const.date_created_offset + align1, const.date_created_length
)
self.date_created = epoch + pd.to_timedelta(x, unit="s")
x = self._read_float(
const.date_modified_offset + align1, const.date_modified_length
)
self.date_modified = epoch + pd.to_timedelta(x, unit="s")
self.header_length = self._read_int(
const.header_size_offset + align1, const.header_size_length
)
# Read the rest of the header into cached_page.
buf = self._path_or_buf.read(self.header_length - 288)
self._cached_page += buf
if len(self._cached_page) != self.header_length:
raise ValueError("The SAS7BDAT file appears to be truncated.")
self._page_length = self._read_int(
const.page_size_offset + align1, const.page_size_length
)
self._page_count = self._read_int(
const.page_count_offset + align1, const.page_count_length
)
buf = self._read_bytes(
const.sas_release_offset + total_align, const.sas_release_length
)
self.sas_release = buf.rstrip(b"\x00 ")
if self.convert_header_text:
self.sas_release = self.sas_release.decode(
self.encoding or self.default_encoding
)
buf = self._read_bytes(
const.sas_server_type_offset + total_align, const.sas_server_type_length
)
self.server_type = buf.rstrip(b"\x00 ")
if self.convert_header_text:
self.server_type = self.server_type.decode(
self.encoding or self.default_encoding
)
buf = self._read_bytes(
const.os_version_number_offset + total_align, const.os_version_number_length
)
self.os_version = buf.rstrip(b"\x00 ")
if self.convert_header_text:
self.os_version = self.os_version.decode(
self.encoding or self.default_encoding
)
buf = self._read_bytes(const.os_name_offset + total_align, const.os_name_length)
buf = buf.rstrip(b"\x00 ")
if len(buf) > 0:
self.os_name = buf.decode(self.encoding or self.default_encoding)
else:
buf = self._read_bytes(
const.os_maker_offset + total_align, const.os_maker_length
)
self.os_name = buf.rstrip(b"\x00 ")
if self.convert_header_text:
self.os_name = self.os_name.decode(
self.encoding or self.default_encoding
)
def __next__(self):
da = self.read(nrows=self.chunksize or 1)
if da is None:
self.close()
raise StopIteration
return da
# Read a single float of the given width (4 or 8).
def _read_float(self, offset, width):
if width not in (4, 8):
self.close()
raise ValueError("invalid float width")
buf = self._read_bytes(offset, width)
fd = "f" if width == 4 else "d"
return struct.unpack(self.byte_order + fd, buf)[0]
# Read a single signed integer of the given width (1, 2, 4 or 8).
def _read_int(self, offset: int, width: int) -> int:
if width not in (1, 2, 4, 8):
self.close()
raise ValueError("invalid int width")
buf = self._read_bytes(offset, width)
it = {1: "b", 2: "h", 4: "l", 8: "q"}[width]
iv = struct.unpack(self.byte_order + it, buf)[0]
return iv
def _read_bytes(self, offset: int, length: int):
if self._cached_page is None:
self._path_or_buf.seek(offset)
buf = self._path_or_buf.read(length)
if len(buf) < length:
self.close()
msg = f"Unable to read {length:d} bytes from file position {offset:d}."
raise ValueError(msg)
return buf
else:
if offset + length > len(self._cached_page):
self.close()
raise ValueError("The cached page is too small.")
return self._cached_page[offset : offset + length]
def _parse_metadata(self):
done = False
while not done:
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
break
if len(self._cached_page) != self._page_length:
raise ValueError("Failed to read a meta data page from the SAS file.")
done = self._process_page_meta()
def _process_page_meta(self):
self._read_page_header()
pt = [const.page_meta_type, const.page_amd_type] + const.page_mix_types
if self._current_page_type in pt:
self._process_page_metadata()
is_data_page = self._current_page_type & const.page_data_type
is_mix_page = self._current_page_type in const.page_mix_types
return (
is_data_page
or is_mix_page
or self._current_page_data_subheader_pointers != []
)
def _read_page_header(self):
bit_offset = self._page_bit_offset
tx = const.page_type_offset + bit_offset
self._current_page_type = self._read_int(tx, const.page_type_length)
tx = const.block_count_offset + bit_offset
self._current_page_block_count = self._read_int(tx, const.block_count_length)
tx = const.subheader_count_offset + bit_offset
self._current_page_subheaders_count = self._read_int(
tx, const.subheader_count_length
)
def _process_page_metadata(self):
bit_offset = self._page_bit_offset
for i in range(self._current_page_subheaders_count):
pointer = self._process_subheader_pointers(
const.subheader_pointers_offset + bit_offset, i
)
if pointer.length == 0:
continue
if pointer.compression == const.truncated_subheader_id:
continue
subheader_signature = self._read_subheader_signature(pointer.offset)
subheader_index = self._get_subheader_index(
subheader_signature, pointer.compression, pointer.ptype
)
self._process_subheader(subheader_index, pointer)
def _get_subheader_index(self, signature, compression, ptype):
index = const.subheader_signature_to_index.get(signature)
if index is None:
f1 = (compression == const.compressed_subheader_id) or (compression == 0)
f2 = ptype == const.compressed_subheader_type
if (self.compression != b"") and f1 and f2:
index = const.SASIndex.data_subheader_index
else:
self.close()
raise ValueError("Unknown subheader signature")
return index
def _process_subheader_pointers(self, offset: int, subheader_pointer_index: int):
subheader_pointer_length = self._subheader_pointer_length
total_offset = offset + subheader_pointer_length * subheader_pointer_index
subheader_offset = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_length = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_compression = self._read_int(total_offset, 1)
total_offset += 1
subheader_type = self._read_int(total_offset, 1)
x = _SubheaderPointer(
subheader_offset, subheader_length, subheader_compression, subheader_type
)
return x
def _read_subheader_signature(self, offset):
subheader_signature = self._read_bytes(offset, self._int_length)
return subheader_signature
def _process_subheader(self, subheader_index, pointer):
offset = pointer.offset
length = pointer.length
if subheader_index == const.SASIndex.row_size_index:
processor = self._process_rowsize_subheader
elif subheader_index == const.SASIndex.column_size_index:
processor = self._process_columnsize_subheader
elif subheader_index == const.SASIndex.column_text_index:
processor = self._process_columntext_subheader
elif subheader_index == const.SASIndex.column_name_index:
processor = self._process_columnname_subheader
elif subheader_index == const.SASIndex.column_attributes_index:
processor = self._process_columnattributes_subheader
elif subheader_index == const.SASIndex.format_and_label_index:
processor = self._process_format_subheader
elif subheader_index == const.SASIndex.column_list_index:
processor = self._process_columnlist_subheader
elif subheader_index == const.SASIndex.subheader_counts_index:
processor = self._process_subheader_counts
elif subheader_index == const.SASIndex.data_subheader_index:
self._current_page_data_subheader_pointers.append(pointer)
return
else:
raise ValueError("unknown subheader index")
processor(offset, length)
def _process_rowsize_subheader(self, offset, length):
int_len = self._int_length
lcs_offset = offset
lcp_offset = offset
if self.U64:
lcs_offset += 682
lcp_offset += 706
else:
lcs_offset += 354
lcp_offset += 378
self.row_length = self._read_int(
offset + const.row_length_offset_multiplier * int_len, int_len
)
self.row_count = self._read_int(
offset + const.row_count_offset_multiplier * int_len, int_len
)
self.col_count_p1 = self._read_int(
offset + const.col_count_p1_multiplier * int_len, int_len
)
self.col_count_p2 = self._read_int(
offset + const.col_count_p2_multiplier * int_len, int_len
)
mx = const.row_count_on_mix_page_offset_multiplier * int_len
self._mix_page_row_count = self._read_int(offset + mx, int_len)
self._lcs = self._read_int(lcs_offset, 2)
self._lcp = self._read_int(lcp_offset, 2)
def _process_columnsize_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
self.column_count = self._read_int(offset, int_len)
if self.col_count_p1 + self.col_count_p2 != self.column_count:
print(
f"Warning: column count mismatch ({self.col_count_p1} + "
f"{self.col_count_p2} != {self.column_count})\n"
)
# Unknown purpose
def _process_subheader_counts(self, offset, length):
pass
def _process_columntext_subheader(self, offset, length):
offset += self._int_length
text_block_size = self._read_int(offset, const.text_block_size_length)
buf = self._read_bytes(offset, text_block_size)
cname_raw = buf[0:text_block_size].rstrip(b"\x00 ")
cname = cname_raw
if self.convert_header_text:
cname = cname.decode(self.encoding or self.default_encoding)
self.column_names_strings.append(cname)
if len(self.column_names_strings) == 1:
compression_literal = b""
for cl in const.compression_literals:
if cl in cname_raw:
compression_literal = cl
self.compression = compression_literal
offset -= self._int_length
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
compression_literal = buf.rstrip(b"\x00")
if compression_literal == b"":
self._lcs = 0
offset1 = offset + 32
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0 : self._lcp]
elif compression_literal == const.rle_compression:
offset1 = offset + 40
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0 : self._lcp]
elif self._lcs > 0:
self._lcp = 0
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcs)
self.creator_proc = buf[0 : self._lcp]
if self.convert_header_text:
if hasattr(self, "creator_proc"):
self.creator_proc = self.creator_proc.decode(
self.encoding or self.default_encoding
)
def _process_columnname_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
column_name_pointers_count = (length - 2 * int_len - 12) // 8
for i in range(column_name_pointers_count):
text_subheader = (
offset
+ const.column_name_pointer_length * (i + 1)
+ const.column_name_text_subheader_offset
)
col_name_offset = (
offset
+ const.column_name_pointer_length * (i + 1)
+ const.column_name_offset_offset
)
col_name_length = (
offset
+ const.column_name_pointer_length * (i + 1)
+ const.column_name_length_offset
)
idx = self._read_int(
text_subheader, const.column_name_text_subheader_length
)
col_offset = self._read_int(
col_name_offset, const.column_name_offset_length
)
col_len = self._read_int(col_name_length, const.column_name_length_length)
name_str = self.column_names_strings[idx]
self.column_names.append(name_str[col_offset : col_offset + col_len])
def _process_columnattributes_subheader(self, offset, length):
int_len = self._int_length
column_attributes_vectors_count = (length - 2 * int_len - 12) // (int_len + 8)
for i in range(column_attributes_vectors_count):
col_data_offset = (
offset + int_len + const.column_data_offset_offset + i * (int_len + 8)
)
col_data_len = (
offset
+ 2 * int_len
+ const.column_data_length_offset
+ i * (int_len + 8)
)
col_types = (
offset + 2 * int_len + const.column_type_offset + i * (int_len + 8)
)
x = self._read_int(col_data_offset, int_len)
self._column_data_offsets.append(x)
x = self._read_int(col_data_len, const.column_data_length_length)
self._column_data_lengths.append(x)
x = self._read_int(col_types, const.column_type_length)
self._column_types.append(b"d" if x == 1 else b"s")
def _process_columnlist_subheader(self, offset, length):
# unknown purpose
pass
def _process_format_subheader(self, offset, length):
int_len = self._int_length
text_subheader_format = (
offset + const.column_format_text_subheader_index_offset + 3 * int_len
)
col_format_offset = offset + const.column_format_offset_offset + 3 * int_len
col_format_len = offset + const.column_format_length_offset + 3 * int_len
text_subheader_label = (
offset + const.column_label_text_subheader_index_offset + 3 * int_len
)
col_label_offset = offset + const.column_label_offset_offset + 3 * int_len
col_label_len = offset + const.column_label_length_offset + 3 * int_len
x = self._read_int(
text_subheader_format, const.column_format_text_subheader_index_length
)
format_idx = min(x, len(self.column_names_strings) - 1)
format_start = self._read_int(
col_format_offset, const.column_format_offset_length
)
format_len = self._read_int(col_format_len, const.column_format_length_length)
label_idx = self._read_int(
text_subheader_label, const.column_label_text_subheader_index_length
)
label_idx = min(label_idx, len(self.column_names_strings) - 1)
label_start = self._read_int(col_label_offset, const.column_label_offset_length)
label_len = self._read_int(col_label_len, const.column_label_length_length)
label_names = self.column_names_strings[label_idx]
column_label = label_names[label_start : label_start + label_len]
format_names = self.column_names_strings[format_idx]
column_format = format_names[format_start : format_start + format_len]
current_column_number = len(self.columns)
col = _Column(
current_column_number,
self.column_names[current_column_number],
column_label,
column_format,
self._column_types[current_column_number],
self._column_data_lengths[current_column_number],
)
self.column_formats.append(column_format)
self.columns.append(col)
def read(self, nrows=None):
if (nrows is None) and (self.chunksize is not None):
nrows = self.chunksize
elif nrows is None:
nrows = self.row_count
if len(self._column_types) == 0:
self.close()
raise EmptyDataError("No columns to parse from file")
if self._current_row_in_file_index >= self.row_count:
return None
m = self.row_count - self._current_row_in_file_index
if nrows > m:
nrows = m
nd = self._column_types.count(b"d")
ns = self._column_types.count(b"s")
self._string_chunk = np.empty((ns, nrows), dtype=object)
self._byte_chunk = np.zeros((nd, 8 * nrows), dtype=np.uint8)
self._current_row_in_chunk_index = 0
p = Parser(self)
p.read(nrows)
rslt = self._chunk_to_dataframe()
if self.index is not None:
rslt = rslt.set_index(self.index)
return rslt
def _read_next_page(self):
self._current_page_data_subheader_pointers = []
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
return True
elif len(self._cached_page) != self._page_length:
self.close()
msg = (
"failed to read complete page from file (read "
f"{len(self._cached_page):d} of {self._page_length:d} bytes)"
)
raise ValueError(msg)
self._read_page_header()
page_type = self._current_page_type
if page_type == const.page_meta_type:
self._process_page_metadata()
is_data_page = page_type & const.page_data_type
pt = [const.page_meta_type] + const.page_mix_types
if not is_data_page and self._current_page_type not in pt:
return self._read_next_page()
return False
def _chunk_to_dataframe(self):
n = self._current_row_in_chunk_index
m = self._current_row_in_file_index
ix = range(m - n, m)
rslt = pd.DataFrame(index=ix)
js, jb = 0, 0
for j in range(self.column_count):
name = self.column_names[j]
if self._column_types[j] == b"d":
rslt[name] = self._byte_chunk[jb, :].view(dtype=self.byte_order + "d")
rslt[name] = np.asarray(rslt[name], dtype=np.float64)
if self.convert_dates:
if self.column_formats[j] in const.sas_date_formats:
rslt[name] = _convert_datetimes(rslt[name], "d")
elif self.column_formats[j] in const.sas_datetime_formats:
rslt[name] = _convert_datetimes(rslt[name], "s")
jb += 1
elif self._column_types[j] == b"s":
rslt[name] = self._string_chunk[js, :]
if self.convert_text and (self.encoding is not None):
rslt[name] = rslt[name].str.decode(
self.encoding or self.default_encoding
)
if self.blank_missing:
ii = rslt[name].str.len() == 0
rslt.loc[ii, name] = np.nan
js += 1
else:
self.close()
raise ValueError(f"unknown column type {self._column_types[j]}")
return rslt
|
|
from gym.spaces import Box
from functools import partial
import logging
import numpy as np
import ray
import ray.experimental.tf_utils
from ray.util.debug import log_once
from ray.rllib.agents.ddpg.ddpg_tf_model import DDPGTFModel
from ray.rllib.agents.ddpg.ddpg_torch_model import DDPGTorchModel
from ray.rllib.agents.ddpg.noop_model import NoopModel, TorchNoopModel
from ray.rllib.agents.dqn.dqn_tf_policy import postprocess_nstep_and_prio, \
PRIO_WEIGHTS
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.tf_action_dist import Deterministic
from ray.rllib.models.torch.torch_action_dist import TorchDeterministic
from ray.rllib.utils.annotations import override
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.framework import get_variable, try_import_tf
from ray.rllib.utils.tf_ops import huber_loss, make_tf_callable
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
def build_ddpg_models(policy, observation_space, action_space, config):
if policy.config["use_state_preprocessor"]:
default_model = None # catalog decides
num_outputs = 256 # arbitrary
config["model"]["no_final_linear"] = True
else:
default_model = TorchNoopModel if config["framework"] == "torch" \
else NoopModel
num_outputs = int(np.product(observation_space.shape))
policy.model = ModelCatalog.get_model_v2(
obs_space=observation_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework=config["framework"],
model_interface=(DDPGTorchModel
if config["framework"] == "torch" else DDPGTFModel),
default_model=default_model,
name="ddpg_model",
actor_hidden_activation=config["actor_hidden_activation"],
actor_hiddens=config["actor_hiddens"],
critic_hidden_activation=config["critic_hidden_activation"],
critic_hiddens=config["critic_hiddens"],
twin_q=config["twin_q"],
add_layer_norm=(policy.config["exploration_config"].get("type") ==
"ParameterNoise"),
)
policy.target_model = ModelCatalog.get_model_v2(
obs_space=observation_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework=config["framework"],
model_interface=(DDPGTorchModel
if config["framework"] == "torch" else DDPGTFModel),
default_model=default_model,
name="target_ddpg_model",
actor_hidden_activation=config["actor_hidden_activation"],
actor_hiddens=config["actor_hiddens"],
critic_hidden_activation=config["critic_hidden_activation"],
critic_hiddens=config["critic_hiddens"],
twin_q=config["twin_q"],
add_layer_norm=(policy.config["exploration_config"].get("type") ==
"ParameterNoise"),
)
return policy.model
def get_distribution_inputs_and_class(policy,
model,
obs_batch,
*,
explore=True,
is_training=False,
**kwargs):
model_out, _ = model({
"obs": obs_batch,
"is_training": is_training,
}, [], None)
dist_inputs = model.get_policy_output(model_out)
return dist_inputs, (TorchDeterministic
if policy.config["framework"] == "torch" else
Deterministic), [] # []=state out
def ddpg_actor_critic_loss(policy, model, _, train_batch):
twin_q = policy.config["twin_q"]
gamma = policy.config["gamma"]
n_step = policy.config["n_step"]
use_huber = policy.config["use_huber"]
huber_threshold = policy.config["huber_threshold"]
l2_reg = policy.config["l2_reg"]
input_dict = {
"obs": train_batch[SampleBatch.CUR_OBS],
"is_training": True,
}
input_dict_next = {
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": True,
}
model_out_t, _ = model(input_dict, [], None)
model_out_tp1, _ = model(input_dict_next, [], None)
target_model_out_tp1, _ = policy.target_model(input_dict_next, [], None)
# Policy network evaluation.
policy_t = model.get_policy_output(model_out_t)
policy_tp1 = \
policy.target_model.get_policy_output(target_model_out_tp1)
# Action outputs.
if policy.config["smooth_target_policy"]:
target_noise_clip = policy.config["target_noise_clip"]
clipped_normal_sample = tf.clip_by_value(
tf.random.normal(
tf.shape(policy_tp1), stddev=policy.config["target_noise"]),
-target_noise_clip, target_noise_clip)
policy_tp1_smoothed = tf.clip_by_value(
policy_tp1 + clipped_normal_sample,
policy.action_space.low * tf.ones_like(policy_tp1),
policy.action_space.high * tf.ones_like(policy_tp1))
else:
# No smoothing, just use deterministic actions.
policy_tp1_smoothed = policy_tp1
# Q-net(s) evaluation.
# prev_update_ops = set(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
# Q-values for given actions & observations in given current
q_t = model.get_q_values(model_out_t, train_batch[SampleBatch.ACTIONS])
# Q-values for current policy (no noise) in given current state
q_t_det_policy = model.get_q_values(model_out_t, policy_t)
if twin_q:
twin_q_t = model.get_twin_q_values(model_out_t,
train_batch[SampleBatch.ACTIONS])
# Target q-net(s) evaluation.
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1,
policy_tp1_smoothed)
if twin_q:
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1, policy_tp1_smoothed)
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if twin_q:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 = tf.minimum(q_tp1, twin_q_tp1)
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = \
(1.0 - tf.cast(train_batch[SampleBatch.DONES], tf.float32)) * \
q_tp1_best
# Compute RHS of bellman equation.
q_t_selected_target = tf.stop_gradient(train_batch[SampleBatch.REWARDS] +
gamma**n_step * q_tp1_best_masked)
# Compute the error (potentially clipped).
if twin_q:
td_error = q_t_selected - q_t_selected_target
twin_td_error = twin_q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold) + \
huber_loss(twin_td_error, huber_threshold)
else:
errors = 0.5 * tf.math.square(td_error) + \
0.5 * tf.math.square(twin_td_error)
else:
td_error = q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold)
else:
errors = 0.5 * tf.math.square(td_error)
critic_loss = tf.reduce_mean(
tf.cast(train_batch[PRIO_WEIGHTS], tf.float32) * errors)
actor_loss = -tf.reduce_mean(q_t_det_policy)
# Add l2-regularization if required.
if l2_reg is not None:
for var in policy.model.policy_variables():
if "bias" not in var.name:
actor_loss += (l2_reg * tf.nn.l2_loss(var))
for var in policy.model.q_variables():
if "bias" not in var.name:
critic_loss += (l2_reg * tf.nn.l2_loss(var))
# Model self-supervised losses.
if policy.config["use_state_preprocessor"]:
# Expand input_dict in case custom_loss' need them.
input_dict[SampleBatch.ACTIONS] = train_batch[SampleBatch.ACTIONS]
input_dict[SampleBatch.REWARDS] = train_batch[SampleBatch.REWARDS]
input_dict[SampleBatch.DONES] = train_batch[SampleBatch.DONES]
input_dict[SampleBatch.NEXT_OBS] = train_batch[SampleBatch.NEXT_OBS]
if log_once("ddpg_custom_loss"):
logger.warning(
"You are using a state-preprocessor with DDPG and "
"therefore, `custom_loss` will be called on your Model! "
"Please be aware that DDPG now uses the ModelV2 API, which "
"merges all previously separate sub-models (policy_model, "
"q_model, and twin_q_model) into one ModelV2, on which "
"`custom_loss` is called, passing it "
"[actor_loss, critic_loss] as 1st argument. "
"You may have to change your custom loss function to handle "
"this.")
[actor_loss, critic_loss] = model.custom_loss(
[actor_loss, critic_loss], input_dict)
# Store values for stats function.
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
policy.td_error = td_error
policy.q_t = q_t
# Return one loss value (even though we treat them separately in our
# 2 optimizers: actor and critic).
return policy.critic_loss + policy.actor_loss
def make_ddpg_optimizers(policy, config):
# Create separate optimizers for actor & critic losses.
if policy.config["framework"] in ["tf2", "tfe"]:
policy._actor_optimizer = tf.keras.optimizers.Adam(
learning_rate=config["actor_lr"])
policy._critic_optimizer = tf.keras.optimizers.Adam(
learning_rate=config["critic_lr"])
else:
policy._actor_optimizer = tf1.train.AdamOptimizer(
learning_rate=config["actor_lr"])
policy._critic_optimizer = tf1.train.AdamOptimizer(
learning_rate=config["critic_lr"])
# TODO: (sven) make this function return both optimizers and
# TFPolicy handle optimizers vs loss terms correctly (like torch).
return None
def build_apply_op(policy, optimizer, grads_and_vars):
# For policy gradient, update policy net one time v.s.
# update critic net `policy_delay` time(s).
should_apply_actor_opt = tf.equal(
tf.math.floormod(policy.global_step, policy.config["policy_delay"]), 0)
def make_apply_op():
return policy._actor_optimizer.apply_gradients(
policy._actor_grads_and_vars)
actor_op = tf.cond(
should_apply_actor_opt,
true_fn=make_apply_op,
false_fn=lambda: tf.no_op())
critic_op = policy._critic_optimizer.apply_gradients(
policy._critic_grads_and_vars)
# Increment global step & apply ops.
if policy.config["framework"] in ["tf2", "tfe"]:
policy.global_step.assign_add(1)
return tf.no_op()
else:
with tf1.control_dependencies([tf1.assign_add(policy.global_step, 1)]):
return tf.group(actor_op, critic_op)
def gradients_fn(policy, optimizer, loss):
if policy.config["framework"] in ["tf2", "tfe"]:
tape = optimizer.tape
pol_weights = policy.model.policy_variables()
actor_grads_and_vars = list(
zip(tape.gradient(policy.actor_loss, pol_weights), pol_weights))
q_weights = policy.model.q_variables()
critic_grads_and_vars = list(
zip(tape.gradient(policy.critic_loss, q_weights), q_weights))
else:
actor_grads_and_vars = policy._actor_optimizer.compute_gradients(
policy.actor_loss, var_list=policy.model.policy_variables())
critic_grads_and_vars = policy._critic_optimizer.compute_gradients(
policy.critic_loss, var_list=policy.model.q_variables())
# Clip if necessary.
if policy.config["grad_clip"]:
clip_func = partial(
tf.clip_by_norm, clip_norm=policy.config["grad_clip"])
else:
clip_func = tf.identity
# Save grads and vars for later use in `build_apply_op`.
policy._actor_grads_and_vars = [(clip_func(g), v)
for (g, v) in actor_grads_and_vars
if g is not None]
policy._critic_grads_and_vars = [(clip_func(g), v)
for (g, v) in critic_grads_and_vars
if g is not None]
grads_and_vars = policy._actor_grads_and_vars + \
policy._critic_grads_and_vars
return grads_and_vars
def build_ddpg_stats(policy, batch):
stats = {
"mean_q": tf.reduce_mean(policy.q_t),
"max_q": tf.reduce_max(policy.q_t),
"min_q": tf.reduce_min(policy.q_t),
}
return stats
def before_init_fn(policy, obs_space, action_space, config):
# Create global step for counting the number of update operations.
if config["framework"] in ["tf2", "tfe"]:
policy.global_step = get_variable(0, tf_name="global_step")
else:
policy.global_step = tf1.train.get_or_create_global_step()
class ComputeTDErrorMixin:
def __init__(self, loss_fn):
@make_tf_callable(self.get_session(), dynamic_shape=True)
def compute_td_error(obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
# Do forward pass on loss to update td errors attribute
# (one TD-error value per item in batch to update PR weights).
loss_fn(
self, self.model, None, {
SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_t),
SampleBatch.ACTIONS: tf.convert_to_tensor(act_t),
SampleBatch.REWARDS: tf.convert_to_tensor(rew_t),
SampleBatch.NEXT_OBS: tf.convert_to_tensor(obs_tp1),
SampleBatch.DONES: tf.convert_to_tensor(done_mask),
PRIO_WEIGHTS: tf.convert_to_tensor(importance_weights),
})
# `self.td_error` is set in loss_fn.
return self.td_error
self.compute_td_error = compute_td_error
def setup_mid_mixins(policy, obs_space, action_space, config):
ComputeTDErrorMixin.__init__(policy, ddpg_actor_critic_loss)
class TargetNetworkMixin:
def __init__(self, config):
@make_tf_callable(self.get_session())
def update_target_fn(tau):
tau = tf.convert_to_tensor(tau, dtype=tf.float32)
update_target_expr = []
model_vars = self.model.trainable_variables()
target_model_vars = self.target_model.trainable_variables()
assert len(model_vars) == len(target_model_vars), \
(model_vars, target_model_vars)
for var, var_target in zip(model_vars, target_model_vars):
update_target_expr.append(
var_target.assign(tau * var + (1.0 - tau) * var_target))
logger.debug("Update target op {}".format(var_target))
return tf.group(*update_target_expr)
# Hard initial update.
self._do_update = update_target_fn
self.update_target(tau=1.0)
# Support both hard and soft sync.
def update_target(self, tau=None):
self._do_update(np.float32(tau or self.config.get("tau")))
@override(TFPolicy)
def variables(self):
return self.model.variables() + self.target_model.variables()
def setup_late_mixins(policy, obs_space, action_space, config):
TargetNetworkMixin.__init__(policy, config)
def validate_spaces(pid, observation_space, action_space, config):
if not isinstance(action_space, Box):
raise UnsupportedSpaceException(
"Action space ({}) of {} is not supported for "
"DDPG.".format(action_space, pid))
elif len(action_space.shape) > 1:
raise UnsupportedSpaceException(
"Action space ({}) of {} has multiple dimensions "
"{}. ".format(action_space, pid, action_space.shape) +
"Consider reshaping this into a single dimension, "
"using a Tuple action space, or the multi-agent API.")
DDPGTFPolicy = build_tf_policy(
name="DDPGTFPolicy",
get_default_config=lambda: ray.rllib.agents.ddpg.ddpg.DEFAULT_CONFIG,
make_model=build_ddpg_models,
action_distribution_fn=get_distribution_inputs_and_class,
loss_fn=ddpg_actor_critic_loss,
stats_fn=build_ddpg_stats,
postprocess_fn=postprocess_nstep_and_prio,
optimizer_fn=make_ddpg_optimizers,
gradients_fn=gradients_fn,
apply_gradients_fn=build_apply_op,
extra_learn_fetches_fn=lambda policy: {"td_error": policy.td_error},
validate_spaces=validate_spaces,
before_init=before_init_fn,
before_loss_init=setup_mid_mixins,
after_init=setup_late_mixins,
obs_include_prev_action_reward=False,
mixins=[
TargetNetworkMixin,
ComputeTDErrorMixin,
])
|
|
# coding: utf-8
from __future__ import unicode_literals
import base64
import hashlib
import json
import random
import re
import time
from .common import InfoExtractor
from ..aes import aes_encrypt
from ..compat import compat_str
from ..utils import (
bytes_to_intlist,
determine_ext,
intlist_to_bytes,
int_or_none,
strip_jsonp,
unescapeHTML,
unsmuggle_url,
)
def md5_text(s):
if not isinstance(s, compat_str):
s = compat_str(s)
return hashlib.md5(s.encode('utf-8')).hexdigest()
class AnvatoIE(InfoExtractor):
_VALID_URL = r'anvato:(?P<access_key_or_mcp>[^:]+):(?P<id>\d+)'
# Copied from anvplayer.min.js
_ANVACK_TABLE = {
'nbcu_nbcd_desktop_web_prod_93d8ead38ce2024f8f544b78306fbd15895ae5e6': 'NNemUkySjxLyPTKvZRiGntBIjEyK8uqicjMakIaQ',
'nbcu_nbcd_desktop_web_qa_1a6f01bdd0dc45a439043b694c8a031d': 'eSxJUbA2UUKBTXryyQ2d6NuM8oEqaPySvaPzfKNA',
'nbcu_nbcd_desktop_web_acc_eb2ff240a5d4ae9a63d4c297c32716b6c523a129': '89JR3RtUGbvKuuJIiKOMK0SoarLb5MUx8v89RcbP',
'nbcu_nbcd_watchvod_web_prod_e61107507180976724ec8e8319fe24ba5b4b60e1': 'Uc7dFt7MJ9GsBWB5T7iPvLaMSOt8BBxv4hAXk5vv',
'nbcu_nbcd_watchvod_web_qa_42afedba88a36203db5a4c09a5ba29d045302232': 'T12oDYVFP2IaFvxkmYMy5dKxswpLHtGZa4ZAXEi7',
'nbcu_nbcd_watchvod_web_acc_9193214448e2e636b0ffb78abacfd9c4f937c6ca': 'MmobcxUxMedUpohNWwXaOnMjlbiyTOBLL6d46ZpR',
'nbcu_local_monitor_web_acc_f998ad54eaf26acd8ee033eb36f39a7b791c6335': 'QvfIoPYrwsjUCcASiw3AIkVtQob2LtJHfidp9iWg',
'nbcu_cable_monitor_web_acc_a413759603e8bedfcd3c61b14767796e17834077': 'uwVPJLShvJWSs6sWEIuVem7MTF8A4IknMMzIlFto',
'nbcu_nbcd_mcpstage_web_qa_4c43a8f6e95a88dbb40276c0630ba9f693a63a4e': 'PxVYZVwjhgd5TeoPRxL3whssb5OUPnM3zyAzq8GY',
'nbcu_comcast_comcast_web_prod_074080762ad4ce956b26b43fb22abf153443a8c4': 'afnaRZfDyg1Z3WZHdupKfy6xrbAG2MHqe3VfuSwh',
'nbcu_comcast_comcast_web_qa_706103bb93ead3ef70b1de12a0e95e3c4481ade0': 'DcjsVbX9b3uoPlhdriIiovgFQZVxpISZwz0cx1ZK',
'nbcu_comcast_comcastcable_web_prod_669f04817536743563d7331c9293e59fbdbe3d07': '0RwMN2cWy10qhAhOscq3eK7aEe0wqnKt3vJ0WS4D',
'nbcu_comcast_comcastcable_web_qa_3d9d2d66219094127f0f6b09cc3c7bb076e3e1ca': '2r8G9DEya7PCqBceKZgrn2XkXgASjwLMuaFE1Aad',
'hearst_hearst_demo_web_stage_960726dfef3337059a01a78816e43b29ec04dfc7': 'cuZBPXTR6kSdoTCVXwk5KGA8rk3NrgGn4H6e9Dsp',
'anvato_mcpqa_demo_web_stage_18b55e00db5a13faa8d03ae6e41f6f5bcb15b922': 'IOaaLQ8ymqVyem14QuAvE5SndQynTcH5CrLkU2Ih',
'anvato_nextmedia_demo_web_stage_9787d56a02ff6b9f43e9a2b0920d8ca88beb5818': 'Pqu9zVzI1ApiIzbVA3VkGBEQHvdKSUuKpD6s2uaR',
'anvato_scripps_app_web_prod_0837996dbe373629133857ae9eb72e740424d80a': 'du1ccmn7RxzgizwbWU7hyUaGodNlJn7HtXI0WgXW',
'anvato_scripps_app_web_stage_360797e00fe2826be142155c4618cc52fce6c26c': '2PMrQ0BRoqCWl7nzphj0GouIMEh2mZYivAT0S1Su',
'fs2go_fs2go_go_all_prod_21934911ccfafc03a075894ead2260d11e2ddd24': 'RcuHlKikW2IJw6HvVoEkqq2UsuEJlbEl11pWXs4Q',
'fs2go_fs2go_go_web_prod_ead4b0eec7460c1a07783808db21b49cf1f2f9a7': '4K0HTT2u1zkQA2MaGaZmkLa1BthGSBdr7jllrhk5',
'fs2go_fs2go_go_web_stage_407585454a4400355d4391691c67f361': 'ftnc37VKRJBmHfoGGi3kT05bHyeJzilEzhKJCyl3',
'fs2go_fs2go_go_android_stage_44b714db6f8477f29afcba15a41e1d30': 'CtxpPvVpo6AbZGomYUhkKs7juHZwNml9b9J0J2gI',
'anvato_cbslocal_app_web_prod_547f3e49241ef0e5d30c79b2efbca5d92c698f67': 'Pw0XX5KBDsyRnPS0R2JrSrXftsy8Jnz5pAjaYC8s',
'anvato_cbslocal_app_web_stage_547a5f096594cd3e00620c6f825cad1096d28c80': '37OBUhX2uwNyKhhrNzSSNHSRPZpApC3trdqDBpuz',
'fs2go_att_att_web_prod_1042dddd089a05438b6a08f972941176f699ffd8': 'JLcF20JwYvpv6uAGcLWIaV12jKwaL1R8us4b6Zkg',
'fs2go_att_att_web_stage_807c5001955fc114a3331fe027ddc76e': 'gbu1oO1y0JiOFh4SUipt86P288JHpyjSqolrrT1x',
'fs2go_fs2go_tudor_web_prod_a7dd8e5a7cdc830cae55eae6f3e9fee5ee49eb9b': 'ipcp87VCEZXPPe868j3orLqzc03oTy7DXsGkAXXH',
'anvato_mhz_app_web_prod_b808218b30de7fdf60340cbd9831512bc1bf6d37': 'Stlm5Gs6BEhJLRTZHcNquyzxGqr23EuFmE5DCgjX',
'fs2go_charter_charter_web_stage_c2c6e5a68375a1bf00fff213d3ff8f61a835a54c': 'Lz4hbJp1fwL6jlcz4M2PMzghM4jp4aAmybtT5dPc',
'fs2go_charter_charter_web_prod_ebfe3b10f1af215a7321cd3d629e0b81dfa6fa8c': 'vUJsK345A1bVmyYDRhZX0lqFIgVXuqhmuyp1EtPK',
'anvato_epfox_app_web_prod_b3373168e12f423f41504f207000188daf88251b': 'GDKq1ixvX3MoBNdU5IOYmYa2DTUXYOozPjrCJnW7',
'anvato_epfox_app_web_stage_a3c2ce60f8f83ef374a88b68ee73a950f8ab87ce': '2jz2NH4BsXMaDsoJ5qkHMbcczAfIReo2eFYuVC1C',
'fs2go_verizon_verizon_web_stage_08e6df0354a4803f1b1f2428b5a9a382e8dbcd62': 'rKTVapNaAcmnUbGL4ZcuOoY4SE7VmZSQsblPFr7e',
'fs2go_verizon_verizon_web_prod_f909564cb606eff1f731b5e22e0928676732c445': 'qLSUuHerM3u9eNPzaHyUK52obai5MvE4XDJfqYe1',
'fs2go_foxcom_synd_web_stage_f7b9091f00ea25a4fdaaae77fca5b54cdc7e7043': '96VKF2vLd24fFiDfwPFpzM5llFN4TiIGAlodE0Re',
'fs2go_foxcom_synd_web_prod_0f2cdd64d87e4ab6a1d54aada0ff7a7c8387a064': 'agiPjbXEyEZUkbuhcnmVPhe9NNVbDjCFq2xkcx51',
'anvato_own_app_web_stage_1214ade5d28422c4dae9d03c1243aba0563c4dba': 'mzhamNac3swG4WsJAiUTacnGIODi6SWeVWk5D7ho',
'anvato_own_app_web_prod_944e162ed927ec3e9ed13eb68ed2f1008ee7565e': '9TSxh6G2TXOLBoYm9ro3LdNjjvnXpKb8UR8KoIP9',
'anvato_scripps_app_ftv_prod_a10a10468edd5afb16fb48171c03b956176afad1': 'COJ2i2UIPK7xZqIWswxe7FaVBOVgRkP1F6O6qGoH',
'anvato_scripps_app_ftv_stage_77d3ad2bdb021ec37ca2e35eb09acd396a974c9a': 'Q7nnopNLe2PPfGLOTYBqxSaRpl209IhqaEuDZi1F',
'anvato_univision_app_web_stage_551236ef07a0e17718c3995c35586b5ed8cb5031': 'D92PoLS6UitwxDRA191HUGT9OYcOjV6mPMa5wNyo',
'anvato_univision_app_web_prod_039a5c0a6009e637ae8ac906718a79911e0e65e1': '5mVS5u4SQjtw6NGw2uhMbKEIONIiLqRKck5RwQLR',
'nbcu_cnbc_springfield_ios_prod_670207fae43d6e9a94c351688851a2ce': 'M7fqCCIP9lW53oJbHs19OlJlpDrVyc2OL8gNeuTa',
'nbcu_cnbc_springfieldvod_ios_prod_7a5f04b1ceceb0e9c9e2264a44aa236e08e034c2': 'Yia6QbJahW0S7K1I0drksimhZb4UFq92xLBmmMvk',
'anvato_cox_app_web_prod_ce45cda237969f93e7130f50ee8bb6280c1484ab': 'cc0miZexpFtdoqZGvdhfXsLy7FXjRAOgb9V0f5fZ',
'anvato_cox_app_web_stage_c23dbe016a8e9d8c7101d10172b92434f6088bf9': 'yivU3MYHd2eDZcOfmLbINVtqxyecKTOp8OjOuoGJ',
'anvato_chnzero_app_web_stage_b1164d1352b579e792e542fddf13ee34c0eeb46b': 'A76QkXMmVH8lTCfU15xva1mZnSVcqeY4Xb22Kp7m',
'anvato_chnzero_app_web_prod_253d358928dc08ec161eda2389d53707288a730c': 'OA5QI3ZWZZkdtUEDqh28AH8GedsF6FqzJI32596b',
'anvato_discovery_vodpoc_web_stage_9fa7077b5e8af1f8355f65d4fb8d2e0e9d54e2b7': 'q3oT191tTQ5g3JCP67PkjLASI9s16DuWZ6fYmry3',
'anvato_discovery_vodpoc_web_prod_688614983167a1af6cdf6d76343fda10a65223c1': 'qRvRQCTVHd0VVOHsMvvfidyWmlYVrTbjby7WqIuK',
'nbcu_cnbc_springfieldvod_ftv_stage_826040aad1925a46ac5dfb4b3c5143e648c6a30d': 'JQaSb5a8Tz0PT4ti329DNmzDO30TnngTHmvX8Vua',
'nbcu_cnbc_springfield_ftv_stage_826040aad1925a46ac5dfb4b3c5143e648c6a30d': 'JQaSb5a8Tz0PT4ti329DNmzDO30TnngTHmvX8Vua',
'nbcu_nbcd_capture_web_stage_4dd9d585bfb984ebf856dee35db027b2465cc4ae': '0j1Ov4Vopyi2HpBZJYdL2m8ERJVGYh3nNpzPiO8F',
'nbcu_nbcd_watch3_android_prod_7712ca5fcf1c22f19ec1870a9650f9c37db22dcf': '3LN2UB3rPUAMu7ZriWkHky9vpLMXYha8JbSnxBlx',
'nbcu_nbcd_watchvod3_android_prod_0910a3a4692d57c0b5ff4316075bc5d096be45b9': 'mJagcQ2II30vUOAauOXne7ERwbf5S9nlB3IP17lQ',
'anvato_scripps_app_atv_prod_790deda22e16e71e83df58f880cd389908a45d52': 'CB6trI1mpoDIM5o54DNTsji90NDBQPZ4z4RqBNSH',
'nbcu_nbcd_watchv4_android_prod_ff67cef9cb409158c6f8c3533edddadd0b750507': 'j8CHQCUWjlYERj4NFRmUYOND85QNbHViH09UwuKm',
'nbcu_nbcd_watchvodv4_android_prod_a814d781609989dea6a629d50ae4c7ad8cc8e907': 'rkVnUXxdA9rawVLUlDQtMue9Y4Q7lFEaIotcUhjt',
'rvVKpA50qlOPLFxMjrCGf5pdkdQDm7qn': '1J7ZkY5Qz5lMLi93QOH9IveE7EYB3rLl',
'nbcu_dtv_local_web_prod_b266cf49defe255fd4426a97e27c09e513e9f82f': 'HuLnJDqzLa4saCzYMJ79zDRSQpEduw1TzjMNQu2b',
'nbcu_att_local_web_prod_4cef038b2d969a6b7d700a56a599040b6a619f67': 'Q0Em5VDc2KpydUrVwzWRXAwoNBulWUxCq2faK0AV',
'nbcu_dish_local_web_prod_c56dcaf2da2e9157a4266c82a78195f1dd570f6b': 'bC1LWmRz9ayj2AlzizeJ1HuhTfIaJGsDBnZNgoRg',
'nbcu_verizon_local_web_prod_88bebd2ce006d4ed980de8133496f9a74cb9b3e1': 'wzhDKJZpgvUSS1EQvpCQP8Q59qVzcPixqDGJefSk',
'nbcu_charter_local_web_prod_9ad90f7fc4023643bb718f0fe0fd5beea2382a50': 'PyNbxNhEWLzy1ZvWEQelRuIQY88Eub7xbSVRMdfT',
'nbcu_suddenlink_local_web_prod_20fb711725cac224baa1c1cb0b1c324d25e97178': '0Rph41lPXZbb3fqeXtHjjbxfSrNbtZp1Ygq7Jypa',
'nbcu_wow_local_web_prod_652d9ce4f552d9c2e7b5b1ed37b8cb48155174ad': 'qayIBZ70w1dItm2zS42AptXnxW15mkjRrwnBjMPv',
'nbcu_centurylink_local_web_prod_2034402b029bf3e837ad46814d9e4b1d1345ccd5': 'StePcPMkjsX51PcizLdLRMzxMEl5k2FlsMLUNV4k',
'nbcu_atlanticbrd_local_web_prod_8d5f5ecbf7f7b2f5e6d908dd75d90ae3565f682e': 'NtYLb4TFUS0pRs3XTkyO5sbVGYjVf17bVbjaGscI',
'nbcu_nbcd_watchvod_web_dev_08bc05699be47c4f31d5080263a8cfadc16d0f7c': 'hwxi2dgDoSWgfmVVXOYZm14uuvku4QfopstXckhr',
'anvato_nextmedia_app_web_prod_a4fa8c7204aa65e71044b57aaf63711980cfe5a0': 'tQN1oGPYY1nM85rJYePWGcIb92TG0gSqoVpQTWOw',
'anvato_mcp_lin_web_prod_4c36fbfd4d8d8ecae6488656e21ac6d1ac972749': 'GUXNf5ZDX2jFUpu4WT2Go4DJ5nhUCzpnwDRRUx1K',
'anvato_mcp_univision_web_prod_37fe34850c99a3b5cdb71dab10a417dd5cdecafa': 'bLDYF8JqfG42b7bwKEgQiU9E2LTIAtnKzSgYpFUH',
'anvato_mcp_fs2go_web_prod_c7b90a93e171469cdca00a931211a2f556370d0a': 'icgGoYGipQMMSEvhplZX1pwbN69srwKYWksz3xWK',
'anvato_mcp_sps_web_prod_54bdc90dd6ba21710e9f7074338365bba28da336': 'fA2iQdI7RDpynqzQYIpXALVS83NTPr8LLFK4LFsu',
'anvato_mcp_anv_web_prod_791407490f4c1ef2a4bcb21103e0cb1bcb3352b3': 'rMOUZqe9lwcGq2mNgG3EDusm6lKgsUnczoOX3mbg',
'anvato_mcp_gray_web_prod_4c10f067c393ed8fc453d3930f8ab2b159973900': 'rMOUZqe9lwcGq2mNgG3EDusm6lKgsUnczoOX3mbg',
'anvato_mcp_hearst_web_prod_5356c3de0fc7c90a3727b4863ca7fec3a4524a99': 'P3uXJ0fXXditBPCGkfvlnVScpPEfKmc64Zv7ZgbK',
'anvato_mcp_cbs_web_prod_02f26581ff80e5bda7aad28226a8d369037f2cbe': 'mGPvo5ZA5SgjOFAPEPXv7AnOpFUICX8hvFQVz69n',
'anvato_mcp_telemundo_web_prod_c5278d51ad46fda4b6ca3d0ea44a7846a054f582': 'qyT6PXXLjVNCrHaRVj0ugAhalNRS7Ee9BP7LUokD',
'nbcu_nbcd_watchvodv4_web_stage_4108362fba2d4ede21f262fea3c4162cbafd66c7': 'DhaU5lj0W2gEdcSSsnxURq8t7KIWtJfD966crVDk',
'anvato_scripps_app_ios_prod_409c41960c60b308db43c3cc1da79cab9f1c3d93': 'WPxj5GraLTkYCyj3M7RozLqIycjrXOEcDGFMIJPn',
'EZqvRyKBJLrgpClDPDF8I7Xpdp40Vx73': '4OxGd2dEakylntVKjKF0UK9PDPYB6A9W',
'M2v78QkpleXm9hPp9jUXI63x5vA6BogR': 'ka6K32k7ZALmpINkjJUGUo0OE42Md1BQ',
'nbcu_nbcd_desktop_web_prod_93d8ead38ce2024f8f544b78306fbd15895ae5e6_secure': 'NNemUkySjxLyPTKvZRiGntBIjEyK8uqicjMakIaQ',
'X8POa4zPPaKVZHqmWjuEzfP31b1QM9VN': 'Dn5vOY9ooDw7VSl9qztjZI5o0g08mA0z',
'M2v78QkBMpNJlSPp9diX5F2PBmBy6Bog': 'ka6K32kyo7nDZfNkjQCGWf1lpApXMd1B',
'bvJ0dQpav07l0hG5JgfVLF2dv1vARwpP': 'BzoQW24GrJZoJfmNodiJKSPeB9B8NOxj',
'lxQMLg2XZKuEZaWgsqubBxV9INZ6bryY': 'Vm2Mx6noKds9jB71h6urazwlTG3m9x8l',
'04EnjvXeoSmkbJ9ckPs7oY0mcxv7PlyN': 'aXERQP9LMfQVlEDsgGs6eEA1SWznAQ8P',
'mQbO2ge6BFRWVPYCYpU06YvNt80XLvAX': 'E2BV1NGmasN5v7eujECVPJgwflnLPm2A',
'g43oeBzJrCml7o6fa5fRL1ErCdeD8z4K': 'RX34mZ6zVH4Nr6whbxIGLv9WSbxEKo8V',
'VQrDJoP7mtdBzkxhXbSPwGB1coeElk4x': 'j2VejQx0VFKQepAF7dI0mJLKtOVJE18z',
'WxA5NzLRjCrmq0NUgaU5pdMDuZO7RJ4w': 'lyY5ADLKaIOLEgAsGQCveEMAcqnx3rY9',
'M4lpMXB71ie0PjMCjdFzVXq0SeRVqz49': 'n2zVkOqaLIv3GbLfBjcwW51LcveWOZ2e',
'dyDZGEqN8u8nkJZcJns0oxYmtP7KbGAn': 'VXOEqQW9BtEVLajfZQSLEqxgS5B7qn2D',
'E7QNjrVY5u5mGvgu67IoDgV1CjEND8QR': 'rz8AaDmdKIkLmPNhB5ILPJnjS5PnlL8d',
'a4zrqjoKlfzg0dwHEWtP31VqcLBpjm4g': 'LY9J16gwETdGWa3hjBu5o0RzuoQDjqXQ',
'dQP5BZroMsMVLO1hbmT5r2Enu86GjxA6': '7XR3oOdbPF6x3PRFLDCq9RkgsRjAo48V',
'M4lKNBO1NFe0PjMCj1tzVXq0SeRVqzA9': 'n2zoRqGLRUv3GbLfBmTwW51LcveWOZYe',
'nAZ7MZdpGCGg1pqFEbsoJOz2C60mv143': 'dYJgdqA9aT4yojETqGi7yNgoFADxqmXP',
'3y1MERYgOuE9NzbFgwhV6Wv2F0YKvbyz': '081xpZDQgC4VadLTavhWQxrku56DAgXV',
'bmQvmEXr5HWklBMCZOcpE2Z3HBYwqGyl': 'zxXPbVNyMiMAZldhr9FkOmA0fl4aKr2v',
'wA7oDNYldfr6050Hwxi52lPZiVlB86Ap': 'ZYK16aA7ni0d3l3c34uwpxD7CbReMm8Q',
'g43MbKMWmFml7o7sJoSRkXxZiXRvJ3QK': 'RX3oBJonvs4Nr6rUWBCGn3matRGqJPXV',
'mA9VdlqpLS0raGaSDvtoqNrBTzb8XY4q': '0XN4OjBD3fnW7r7IbmtJB4AyfOmlrE2r',
'mAajOwgkGt17oGoFmEuklMP9H0GnW54d': 'lXbBLPGyzikNGeGujAuAJGjZiwLRxyXR',
'vy8vjJ9kbUwrRqRu59Cj5dWZfzYErlAb': 'K8l7gpwaGcBpnAnCLNCmPZRdin3eaQX0',
'xQMWBpR8oHEZaWaSMGUb0avOHjLVYn4Y': 'm2MrN4vEaf9jB7BFy5Srb40jTrN67AYl',
'xyKEmVO3miRr6D6UVkt7oB8jtD6aJEAv': 'g2ddDebqDfqdgKgswyUKwGjbTWwzq923',
'7Qk0wa2D9FjKapacoJF27aLvUDKkLGA0': 'b2kgBEkephJaMkMTL7s1PLe4Ua6WyP2P',
'3QLg6nqmNTJ5VvVTo7f508LPidz1xwyY': 'g2L1GgpraipmAOAUqmIbBnPxHOmw4MYa',
'3y1B7zZjXTE9NZNSzZSVNPZaTNLjo6Qz': '081b5G6wzH4VagaURmcWbN5mT4JGEe2V',
'lAqnwvkw6SG6D8DSqmUg6DRLUp0w3G4x': 'O2pbP0xPDFNJjpjIEvcdryOJtpkVM4X5',
'awA7xd1N0Hr6050Hw2c52lPZiVlB864p': 'GZYKpn4aoT0d3l3c3PiwpxD7CbReMmXQ',
'jQVqPLl9YHL1WGWtR1HDgWBGT63qRNyV': '6X03ne6vrU4oWyWUN7tQVoajikxJR3Ye',
'GQRMR8mL7uZK797t7xH3eNzPIP5dOny1': 'm2vqPWGd4U31zWzSyasDRAoMT1PKRp8o',
'zydq9RdmRhXLkNkfNoTJlMzaF0lWekQB': '3X7LnvE7vH5nkEkSqLiey793Un7dLB8e',
'VQrDzwkB2IdBzjzu9MHPbEYkSB50gR4x': 'j2VebLzoKUKQeEesmVh0gM1eIp9jKz8z',
'mAa2wMamBs17oGoFmktklMP9H0GnW54d': 'lXbgP74xZTkNGeGujVUAJGjZiwLRxy8R',
'7yjB6ZLG6sW8R6RF2xcan1KGfJ5dNoyd': 'wXQkPorvPHZ45N5t4Jf6qwg5Tp4xvw29',
'a4zPpNeWGuzg0m0iX3tPeanGSkRKWXQg': 'LY9oa3QAyHdGW9Wu3Ri5JGeEik7l1N8Q',
'k2rneA2M38k25cXDwwSknTJlxPxQLZ6M': '61lyA2aEVDzklfdwmmh31saPxQx2VRjp',
'bK9Zk4OvPnvxduLgxvi8VUeojnjA02eV': 'o5jANYjbeMb4nfBaQvcLAt1jzLzYx6ze',
'5VD6EydM3R9orHmNMGInGCJwbxbQvGRw': 'w3zjmX7g4vnxzCxElvUEOiewkokXprkZ',
'70X35QbVYVYNPUmP9YfbzI06YqYQk2R1': 'vG4Aj2BMjMjoztB7zeFOnCVPJpJ8lMOa',
'26qYwQVG9p1Bks2GgBckjfDJOXOAMgG1': 'r4ev9X0mv5zqJc0yk5IBDcQOwZw8mnwQ',
'rvVKpA56MBXWlSxMw3cobT5pdkd4Dm7q': '1J7ZkY53pZ645c93owcLZuveE7E8B3rL',
'qN1zdy1zlYL23IWZGWtDvfV6WeWQWkJo': 'qN1zdy1zlYL23IWZGWtDvfV6WeWQWkJo',
'jdKqRGF16dKsBviMDae7IGDl7oTjEbVV': 'Q09l7vhlNxPFErIOK6BVCe7KnwUW5DVV',
'3QLkogW1OUJ5VvPsrDH56DY2u7lgZWyY': 'g2LRE1V9espmAOPhE4ubj4ZdUA57yDXa',
'wyJvWbXGBSdbkEzhv0CW8meou82aqRy8': 'M2wolPvyBIpQGkbT4juedD4ruzQGdK2y',
'7QkdZrzEkFjKap6IYDU2PB0oCNZORmA0': 'b2kN1l96qhJaMkPs9dt1lpjBfwqZoA8P',
'pvA05113MHG1w3JTYxc6DVlRCjErVz4O': 'gQXeAbblBUnDJ7vujbHvbRd1cxlz3AXO',
'mA9blJDZwT0raG1cvkuoeVjLC7ZWd54q': '0XN9jRPwMHnW7rvumgfJZOD9CJgVkWYr',
'5QwRN5qKJTvGKlDTmnf7xwNZcjRmvEy9': 'R2GP6LWBJU1QlnytwGt0B9pytWwAdDYy',
'eyn5rPPbkfw2KYxH32fG1q58CbLJzM40': 'p2gyqooZnS56JWeiDgfmOy1VugOQEBXn',
'3BABn3b5RfPJGDwilbHe7l82uBoR05Am': '7OYZG7KMVhbPdKJS3xcWEN3AuDlLNmXj',
'xA5zNGXD3HrmqMlF6OS5pdMDuZO7RJ4w': 'yY5DAm6r1IOLE3BCVMFveEMAcqnx3r29',
'g43PgW3JZfml7o6fDEURL1ErCdeD8zyK': 'RX3aQn1zrS4Nr6whDgCGLv9WSbxEKo2V',
'lAqp8WbGgiG6D8LTKJcg3O72CDdre1Qx': 'O2pnm6473HNJjpKuVosd3vVeh975yrX5',
'wyJbYEDxKSdbkJ6S6RhW8meou82aqRy8': 'M2wPm7EgRSpQGlAh70CedD4ruzQGdKYy',
'M4lgW28nLCe0PVdtaXszVXq0SeRVqzA9': 'n2zmJvg4jHv3G0ETNgiwW51LcveWOZ8e',
'5Qw3OVvp9FvGKlDTmOC7xwNZcjRmvEQ9': 'R2GzDdml9F1Qlnytw9s0B9pytWwAdD8y',
'vy8a98X7zCwrRqbHrLUjYzwDiK2b70Qb': 'K8lVwzyjZiBpnAaSGeUmnAgxuGOBxmY0',
'g4eGjJLLoiqRD3Pf9oT5O03LuNbLRDQp': '6XqD59zzpfN4EwQuaGt67qNpSyRBlnYy',
'g43OPp9boIml7o6fDOIRL1ErCdeD8z4K': 'RX33alNB4s4Nr6whDPUGLv9WSbxEKoXV',
'xA2ng9OkBcGKzDbTkKsJlx7dUK8R3dA5': 'z2aPnJvzBfObkwGC3vFaPxeBhxoMqZ8K',
'xyKEgBajZuRr6DEC0Kt7XpD1cnNW9gAv': 'g2ddlEBvRsqdgKaI4jUK9PrgfMexGZ23',
'BAogww51jIMa2JnH1BcYpXM5F658RNAL': 'rYWDmm0KptlkGv4FGJFMdZmjs9RDE6XR',
'BAokpg62VtMa2JnH1mHYpXM5F658RNAL': 'rYWryDnlNslkGv4FG4HMdZmjs9RDE62R',
'a4z1Px5e2hzg0m0iMMCPeanGSkRKWXAg': 'LY9eorNQGUdGW9WuKKf5JGeEik7l1NYQ',
'kAx69R58kF9nY5YcdecJdl2pFXP53WyX': 'gXyRxELpbfPvLeLSaRil0mp6UEzbZJ8L',
'BAoY13nwViMa2J2uo2cY6BlETgmdwryL': 'rYWwKzJmNFlkGvGtNoUM9bzwIJVzB1YR',
}
_MCP_TO_ACCESS_KEY_TABLE = {
'qa': 'anvato_mcpqa_demo_web_stage_18b55e00db5a13faa8d03ae6e41f6f5bcb15b922',
'lin': 'anvato_mcp_lin_web_prod_4c36fbfd4d8d8ecae6488656e21ac6d1ac972749',
'univison': 'anvato_mcp_univision_web_prod_37fe34850c99a3b5cdb71dab10a417dd5cdecafa',
'uni': 'anvato_mcp_univision_web_prod_37fe34850c99a3b5cdb71dab10a417dd5cdecafa',
'dev': 'anvato_mcp_fs2go_web_prod_c7b90a93e171469cdca00a931211a2f556370d0a',
'sps': 'anvato_mcp_sps_web_prod_54bdc90dd6ba21710e9f7074338365bba28da336',
'spsstg': 'anvato_mcp_sps_web_prod_54bdc90dd6ba21710e9f7074338365bba28da336',
'anv': 'anvato_mcp_anv_web_prod_791407490f4c1ef2a4bcb21103e0cb1bcb3352b3',
'gray': 'anvato_mcp_gray_web_prod_4c10f067c393ed8fc453d3930f8ab2b159973900',
'hearst': 'anvato_mcp_hearst_web_prod_5356c3de0fc7c90a3727b4863ca7fec3a4524a99',
'cbs': 'anvato_mcp_cbs_web_prod_02f26581ff80e5bda7aad28226a8d369037f2cbe',
'telemundo': 'anvato_mcp_telemundo_web_prod_c5278d51ad46fda4b6ca3d0ea44a7846a054f582'
}
_API_KEY = '3hwbSuqqT690uxjNYBktSQpa5ZrpYYR0Iofx7NcJHyA'
_ANVP_RE = r'<script[^>]+\bdata-anvp\s*=\s*(["\'])(?P<anvp>(?:(?!\1).)+)\1'
_AUTH_KEY = b'\x31\xc2\x42\x84\x9e\x73\xa0\xce'
_TESTS = [{
# from https://www.boston25news.com/news/watch-humpback-whale-breaches-right-next-to-fishing-boat-near-nh/817484874
'url': 'anvato:8v9BEynrwx8EFLYpgfOWcG1qJqyXKlRM:4465496',
'info_dict': {
'id': '4465496',
'ext': 'mp4',
'title': 'VIDEO: Humpback whale breaches right next to NH boat',
'description': 'VIDEO: Humpback whale breaches right next to NH boat. Footage courtesy: Zach Fahey.',
'duration': 22,
'timestamp': 1534855680,
'upload_date': '20180821',
'uploader': 'ANV',
},
'params': {
'skip_download': True,
},
}, {
# from https://sanfrancisco.cbslocal.com/2016/06/17/source-oakland-cop-on-leave-for-having-girlfriend-help-with-police-reports/
'url': 'anvato:DVzl9QRzox3ZZsP9bNu5Li3X7obQOnqP:3417601',
'only_matching': True,
}]
def __init__(self, *args, **kwargs):
super(AnvatoIE, self).__init__(*args, **kwargs)
self.__server_time = None
def _server_time(self, access_key, video_id):
if self.__server_time is not None:
return self.__server_time
self.__server_time = int(self._download_json(
self._api_prefix(access_key) + 'server_time?anvack=' + access_key, video_id,
note='Fetching server time')['server_time'])
return self.__server_time
def _api_prefix(self, access_key):
return 'https://tkx2-%s.anvato.net/rest/v2/' % ('prod' if 'prod' in access_key else 'stage')
def _get_video_json(self, access_key, video_id):
# See et() in anvplayer.min.js, which is an alias of getVideoJSON()
video_data_url = self._api_prefix(access_key) + 'mcp/video/%s?anvack=%s' % (video_id, access_key)
server_time = self._server_time(access_key, video_id)
input_data = '%d~%s~%s' % (server_time, md5_text(video_data_url), md5_text(server_time))
auth_secret = intlist_to_bytes(aes_encrypt(
bytes_to_intlist(input_data[:64]), bytes_to_intlist(self._AUTH_KEY)))
video_data_url += '&X-Anvato-Adst-Auth=' + base64.b64encode(auth_secret).decode('ascii')
anvrid = md5_text(time.time() * 1000 * random.random())[:30]
api = {
'anvrid': anvrid,
'anvts': server_time,
}
api['anvstk'] = md5_text('%s|%s|%d|%s' % (
access_key, anvrid, server_time,
self._ANVACK_TABLE.get(access_key, self._API_KEY)))
return self._download_json(
video_data_url, video_id, transform_source=strip_jsonp,
data=json.dumps({'api': api}).encode('utf-8'))
def _get_anvato_videos(self, access_key, video_id):
video_data = self._get_video_json(access_key, video_id)
formats = []
for published_url in video_data['published_urls']:
video_url = published_url['embed_url']
media_format = published_url.get('format')
ext = determine_ext(video_url)
if ext == 'smil' or media_format == 'smil':
formats.extend(self._extract_smil_formats(video_url, video_id))
continue
tbr = int_or_none(published_url.get('kbps'))
a_format = {
'url': video_url,
'format_id': ('-'.join(filter(None, ['http', published_url.get('cdn_name')]))).lower(),
'tbr': tbr if tbr != 0 else None,
}
if media_format == 'm3u8' and tbr is not None:
a_format.update({
'format_id': '-'.join(filter(None, ['hls', compat_str(tbr)])),
'ext': 'mp4',
})
elif media_format == 'm3u8-variant' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
continue
elif ext == 'mp3' or media_format == 'mp3':
a_format['vcodec'] = 'none'
else:
a_format.update({
'width': int_or_none(published_url.get('width')),
'height': int_or_none(published_url.get('height')),
})
formats.append(a_format)
self._sort_formats(formats)
subtitles = {}
for caption in video_data.get('captions', []):
a_caption = {
'url': caption['url'],
'ext': 'tt' if caption.get('format') == 'SMPTE-TT' else None
}
subtitles.setdefault(caption['language'], []).append(a_caption)
return {
'id': video_id,
'formats': formats,
'title': video_data.get('def_title'),
'description': video_data.get('def_description'),
'tags': video_data.get('def_tags', '').split(','),
'categories': video_data.get('categories'),
'thumbnail': video_data.get('src_image_url') or video_data.get('thumbnail'),
'timestamp': int_or_none(video_data.get(
'ts_published') or video_data.get('ts_added')),
'uploader': video_data.get('mcp_id'),
'duration': int_or_none(video_data.get('duration')),
'subtitles': subtitles,
}
@staticmethod
def _extract_urls(ie, webpage, video_id):
entries = []
for mobj in re.finditer(AnvatoIE._ANVP_RE, webpage):
anvplayer_data = ie._parse_json(
mobj.group('anvp'), video_id, transform_source=unescapeHTML,
fatal=False)
if not anvplayer_data:
continue
video = anvplayer_data.get('video')
if not isinstance(video, compat_str) or not video.isdigit():
continue
access_key = anvplayer_data.get('accessKey')
if not access_key:
mcp = anvplayer_data.get('mcp')
if mcp:
access_key = AnvatoIE._MCP_TO_ACCESS_KEY_TABLE.get(
mcp.lower())
if not access_key:
continue
entries.append(ie.url_result(
'anvato:%s:%s' % (access_key, video), ie=AnvatoIE.ie_key(),
video_id=video))
return entries
def _extract_anvato_videos(self, webpage, video_id):
anvplayer_data = self._parse_json(
self._html_search_regex(
self._ANVP_RE, webpage, 'Anvato player data', group='anvp'),
video_id)
return self._get_anvato_videos(
anvplayer_data['accessKey'], anvplayer_data['video'])
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
self._initialize_geo_bypass({
'countries': smuggled_data.get('geo_countries'),
})
mobj = re.match(self._VALID_URL, url)
access_key, video_id = mobj.group('access_key_or_mcp', 'id')
if access_key not in self._ANVACK_TABLE:
access_key = self._MCP_TO_ACCESS_KEY_TABLE.get(
access_key) or access_key
return self._get_anvato_videos(access_key, video_id)
|
|
# Copyright (c) 2005 Allan Saddi <allan@saddi.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
__author__ = 'Allan Saddi <allan@saddi.com>'
__version__ = '$Revision$'
import sys
import os
import socket
import select
import errno
import signal
import random
import time
try:
import fcntl
except ImportError:
def setCloseOnExec(sock):
pass
else:
def setCloseOnExec(sock):
fcntl.fcntl(sock.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC)
# If running Python < 2.4, require eunuchs module for socket.socketpair().
# See <http://www.inoi.fi/open/trac/eunuchs>.
if not hasattr(socket, 'socketpair'):
try:
import eunuchs.socketpair
except ImportError:
# TODO: Other alternatives? Perhaps using os.pipe()?
raise ImportError, 'Requires eunuchs module for Python < 2.4'
def socketpair():
s1, s2 = eunuchs.socketpair.socketpair()
p, c = (socket.fromfd(s1, socket.AF_UNIX, socket.SOCK_STREAM),
socket.fromfd(s2, socket.AF_UNIX, socket.SOCK_STREAM))
os.close(s1)
os.close(s2)
return p, c
socket.socketpair = socketpair
class PreforkServer(object):
"""
A preforked server model conceptually similar to Apache httpd(2). At
any given time, ensures there are at least minSpare children ready to
process new requests (up to a maximum of maxChildren children total).
If the number of idle children is ever above maxSpare, the extra
children are killed.
If maxRequests is positive, each child will only handle that many
requests in its lifetime before exiting.
jobClass should be a class whose constructor takes at least two
arguments: the client socket and client address. jobArgs, which
must be a list or tuple, is any additional (static) arguments you
wish to pass to the constructor.
jobClass should have a run() method (taking no arguments) that does
the actual work. When run() returns, the request is considered
complete and the child process moves to idle state.
"""
def __init__(self, minSpare=1, maxSpare=5, maxChildren=50,
maxRequests=0, jobClass=None, jobArgs=()):
self._minSpare = minSpare
self._maxSpare = maxSpare
self._maxChildren = max(maxSpare, maxChildren)
self._maxRequests = maxRequests
self._jobClass = jobClass
self._jobArgs = jobArgs
# Internal state of children. Maps pids to dictionaries with two
# members: 'file' and 'avail'. 'file' is the socket to that
# individidual child and 'avail' is whether or not the child is
# free to process requests.
self._children = {}
self._children_to_purge = []
self._last_purge = 0
if minSpare < 1:
raise ValueError("minSpare must be at least 1!")
if maxSpare < minSpare:
raise ValueError("maxSpare must be greater than, or equal to, minSpare!")
def run(self, sock):
"""
The main loop. Pass a socket that is ready to accept() client
connections. Return value will be True or False indiciating whether
or not the loop was exited due to SIGHUP.
"""
# Set up signal handlers.
self._keepGoing = True
self._hupReceived = False
self._installSignalHandlers()
# Don't want operations on main socket to block.
sock.setblocking(0)
# Set close-on-exec
setCloseOnExec(sock)
# Main loop.
while self._keepGoing:
# Maintain minimum number of children. Note that we are checking
# the absolute number of children, not the number of "available"
# children. We explicitly test against _maxSpare to maintain
# an *optimistic* absolute minimum. The number of children will
# always be in the range [_maxSpare, _maxChildren].
while len(self._children) < self._maxSpare:
if not self._spawnChild(sock): break
# Wait on any socket activity from live children.
r = [x['file'] for x in self._children.values()
if x['file'] is not None]
if len(r) == len(self._children) and not self._children_to_purge:
timeout = None
else:
# There are dead children that need to be reaped, ensure
# that they are by timing out, if necessary. Or there are some
# children that need to die.
timeout = 2
w = []
if (time.time() > self._last_purge + 10):
w = [x for x in self._children_to_purge if x.fileno() != -1]
try:
r, w, e = select.select(r, w, [], timeout)
except select.error, e:
if e[0] != errno.EINTR:
raise
# Scan child sockets and tend to those that need attention.
for child in r:
# Receive status byte.
try:
state = child.recv(1)
except socket.error, e:
if e[0] in (errno.EAGAIN, errno.EINTR):
# Guess it really didn't need attention?
continue
raise
# Try to match it with a child. (Do we need a reverse map?)
for pid,d in self._children.items():
if child is d['file']:
if state:
# Set availability status accordingly.
self._children[pid]['avail'] = state != '\x00'
else:
# Didn't receive anything. Child is most likely
# dead.
d = self._children[pid]
d['file'].close()
d['file'] = None
d['avail'] = False
for child in w:
# purging child
child.send('bye, bye')
del self._children_to_purge[self._children_to_purge.index(child)]
self._last_purge = time.time()
# Try to match it with a child. (Do we need a reverse map?)
for pid,d in self._children.items():
if child is d['file']:
d['file'].close()
d['file'] = None
d['avail'] = False
break
# Reap children.
self._reapChildren()
# See who and how many children are available.
availList = filter(lambda x: x[1]['avail'], self._children.items())
avail = len(availList)
if avail < self._minSpare:
# Need to spawn more children.
while avail < self._minSpare and \
len(self._children) < self._maxChildren:
if not self._spawnChild(sock): break
avail += 1
elif avail > self._maxSpare:
# Too many spares, kill off the extras.
pids = [x[0] for x in availList]
pids.sort()
pids = pids[self._maxSpare:]
for pid in pids:
d = self._children[pid]
d['file'].close()
d['file'] = None
d['avail'] = False
# Clean up all child processes.
self._cleanupChildren()
# Restore signal handlers.
self._restoreSignalHandlers()
# Return bool based on whether or not SIGHUP was received.
return self._hupReceived
def _cleanupChildren(self):
"""
Closes all child sockets (letting those that are available know
that it's time to exit). Sends SIGINT to those that are currently
processing (and hopes that it finishses ASAP).
Any children remaining after 10 seconds is SIGKILLed.
"""
# Let all children know it's time to go.
for pid,d in self._children.items():
if d['file'] is not None:
d['file'].close()
d['file'] = None
if not d['avail']:
# Child is unavailable. SIGINT it.
try:
os.kill(pid, signal.SIGINT)
except OSError, e:
if e[0] != errno.ESRCH:
raise
def alrmHandler(signum, frame):
pass
# Set up alarm to wake us up after 10 seconds.
oldSIGALRM = signal.getsignal(signal.SIGALRM)
signal.signal(signal.SIGALRM, alrmHandler)
signal.alarm(10)
# Wait for all children to die.
while len(self._children):
try:
pid, status = os.wait()
except OSError, e:
if e[0] in (errno.ECHILD, errno.EINTR):
break
if self._children.has_key(pid):
del self._children[pid]
signal.signal(signal.SIGALRM, oldSIGALRM)
# Forcefully kill any remaining children.
for pid in self._children.keys():
try:
os.kill(pid, signal.SIGKILL)
except OSError, e:
if e[0] != errno.ESRCH:
raise
def _reapChildren(self):
"""Cleans up self._children whenever children die."""
while True:
try:
pid, status = os.waitpid(-1, os.WNOHANG)
except OSError, e:
if e[0] == errno.ECHILD:
break
raise
if pid <= 0:
break
if self._children.has_key(pid): # Sanity check.
if self._children[pid]['file'] is not None:
self._children[pid]['file'].close()
self._children[pid]['file'] = None
del self._children[pid]
def _spawnChild(self, sock):
"""
Spawn a single child. Returns True if successful, False otherwise.
"""
# This socket pair is used for very simple communication between
# the parent and its children.
parent, child = socket.socketpair()
parent.setblocking(0)
setCloseOnExec(parent)
child.setblocking(0)
setCloseOnExec(child)
try:
pid = os.fork()
except OSError, e:
if e[0] in (errno.EAGAIN, errno.ENOMEM):
return False # Can't fork anymore.
raise
if not pid:
# Child
child.close()
# Put child into its own process group.
pid = os.getpid()
os.setpgid(pid, pid)
# Restore signal handlers.
self._restoreSignalHandlers()
# Close copies of child sockets.
for f in [x['file'] for x in self._children.values()
if x['file'] is not None]:
f.close()
self._children = {}
try:
# Enter main loop.
self._child(sock, parent)
except KeyboardInterrupt:
pass
sys.exit(0)
else:
# Parent
parent.close()
d = self._children[pid] = {}
d['file'] = child
d['avail'] = True
return True
def _isClientAllowed(self, addr):
"""Override to provide access control."""
return True
def _notifyParent(self, parent, msg):
"""Send message to parent, ignoring EPIPE and retrying on EAGAIN"""
while True:
try:
parent.send(msg)
return True
except socket.error, e:
if e[0] == errno.EPIPE:
return False # Parent is gone
if e[0] == errno.EAGAIN:
# Wait for socket change before sending again
select.select([], [parent], [])
else:
raise
def _child(self, sock, parent):
"""Main loop for children."""
requestCount = 0
# Re-seed random module
preseed = ''
# urandom only exists in Python >= 2.4
if hasattr(os, 'urandom'):
try:
preseed = os.urandom(16)
except NotImplementedError:
pass
# Have doubts about this. random.seed will just hash the string
random.seed('%s%s%s' % (preseed, os.getpid(), time.time()))
del preseed
while True:
# Wait for any activity on the main socket or parent socket.
r, w, e = select.select([sock, parent], [], [])
for f in r:
# If there's any activity on the parent socket, it
# means the parent wants us to die or has died itself.
# Either way, exit.
if f is parent:
return
# Otherwise, there's activity on the main socket...
try:
clientSock, addr = sock.accept()
except socket.error, e:
if e[0] == errno.EAGAIN:
# Or maybe not.
continue
raise
setCloseOnExec(clientSock)
# Check if this client is allowed.
if not self._isClientAllowed(addr):
clientSock.close()
continue
# Notify parent we're no longer available.
self._notifyParent(parent, '\x00')
# Do the job.
self._jobClass(clientSock, addr, *self._jobArgs).run()
# If we've serviced the maximum number of requests, exit.
if self._maxRequests > 0:
requestCount += 1
if requestCount >= self._maxRequests:
break
# Tell parent we're free again.
if not self._notifyParent(parent, '\xff'):
return # Parent is gone.
# Signal handlers
def _hupHandler(self, signum, frame):
self._keepGoing = False
self._hupReceived = True
def _intHandler(self, signum, frame):
self._keepGoing = False
def _chldHandler(self, signum, frame):
# Do nothing (breaks us out of select and allows us to reap children).
pass
def _usr1Handler(self, signum, frame):
self._children_to_purge = [x['file'] for x in self._children.values()
if x['file'] is not None]
def _installSignalHandlers(self):
supportedSignals = [signal.SIGINT, signal.SIGTERM]
if hasattr(signal, 'SIGHUP'):
supportedSignals.append(signal.SIGHUP)
if hasattr(signal, 'SIGUSR1'):
supportedSignals.append(signal.SIGUSR1)
self._oldSIGs = [(x,signal.getsignal(x)) for x in supportedSignals]
for sig in supportedSignals:
if hasattr(signal, 'SIGHUP') and sig == signal.SIGHUP:
signal.signal(sig, self._hupHandler)
elif hasattr(signal, 'SIGUSR1') and sig == signal.SIGUSR1:
signal.signal(sig, self._usr1Handler)
else:
signal.signal(sig, self._intHandler)
def _restoreSignalHandlers(self):
"""Restores previous signal handlers."""
for signum,handler in self._oldSIGs:
signal.signal(signum, handler)
if __name__ == '__main__':
class TestJob(object):
def __init__(self, sock, addr):
self._sock = sock
self._addr = addr
def run(self):
print "Client connection opened from %s:%d" % self._addr
self._sock.send('Hello World!\n')
self._sock.setblocking(1)
self._sock.recv(1)
self._sock.close()
print "Client connection closed from %s:%d" % self._addr
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', 8080))
sock.listen(socket.SOMAXCONN)
PreforkServer(maxChildren=10, jobClass=TestJob).run(sock)
|
|
"""
mbed SDK
Copyright (c) 2011-2020 ARM Limited
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import fnmatch
from os.path import join, basename, splitext, dirname, exists
from os import getcwd, getenv
from distutils.spawn import find_executable
from distutils.version import LooseVersion
from tools.toolchains.mbed_toolchain import (
mbedToolchain, TOOLCHAIN_PATHS, should_replace_small_c_lib
)
from tools.utils import run_cmd
class GCC(mbedToolchain):
OFFICIALLY_SUPPORTED = True
LINKER_EXT = '.ld'
LIBRARY_EXT = '.a'
STD_LIB_NAME = "lib%s.a"
DIAGNOSTIC_PATTERN = re.compile('((?P<file>[^:]+):(?P<line>\d+):)(?P<col>\d+):? (?P<severity>warning|[eE]rror|fatal error): (?P<message>.+)')
GCC_RANGE = (LooseVersion("9.0.0"), LooseVersion("10.0.0"))
GCC_VERSION_RE = re.compile(b"\d+\.\d+\.\d+")
DWARF_PRODUCER_RE = re.compile(r'(DW_AT_producer)(.*:\s*)(?P<producer>.*)')
def __init__(self, target, notify=None, macros=None, build_profile=None,
build_dir=None, coverage_patterns=None):
mbedToolchain.__init__(
self,
target,
notify,
macros,
build_profile=build_profile,
build_dir=build_dir,
coverage_patterns=coverage_patterns
)
tool_path = TOOLCHAIN_PATHS['GCC_ARM']
# Add flags for current size setting
c_lib = "std"
if hasattr(target, "c_lib"):
toolchain = "gcc_arm"
if should_replace_small_c_lib(target, toolchain):
target.c_lib = "std"
self.check_c_lib_supported(target, toolchain)
c_lib = target.c_lib
elif hasattr(target, "default_build"):
c_lib = target.default_build
if c_lib == "small":
common_flags = ["-DMBED_RTOS_SINGLE_THREAD", "-D__NEWLIB_NANO"]
self.flags["common"].extend(common_flags)
self.flags["ld"].append("--specs=nano.specs")
self.check_and_add_minimal_printf(target)
if getattr(target, "printf_lib", "std") == "minimal-printf":
minimal_printf_wraps = [
"-Wl,--wrap,printf",
"-Wl,--wrap,sprintf",
"-Wl,--wrap,snprintf",
"-Wl,--wrap,vprintf",
"-Wl,--wrap,vsprintf",
"-Wl,--wrap,vsnprintf",
"-Wl,--wrap,fprintf",
"-Wl,--wrap,vfprintf",
]
# Add the linker option to wrap the f\v\s\printf functions if not
# already added.
for minimal_printf_wrap in minimal_printf_wraps:
if minimal_printf_wrap not in self.flags["ld"]:
self.flags["ld"].append(minimal_printf_wrap)
self.cpu = []
# Enable DOMAIN_NS macro for TF-M NS targets
if target.is_TFM_target:
# Add linking time preprocessor macro DOMAIN_NS
# (DOMAIN_NS is passed to compiler and assembler via CORTEX_SYMBOLS
# in mbedToolchain.get_symbols)
self.flags["ld"].append("-DDOMAIN_NS=1")
core = target.core_without_NS
cpu = {
"Cortex-M0+": "cortex-m0plus",
"Cortex-M4F": "cortex-m4",
"Cortex-M7F": "cortex-m7",
"Cortex-M7FD": "cortex-m7",
"Cortex-M33": "cortex-m33+nodsp",
"Cortex-M33E": "cortex-m33",
"Cortex-M33F": "cortex-m33+nodsp",
"Cortex-M33FE": "cortex-m33"}.get(core, core)
if cpu == "cortex-m33+nodsp":
self.cpu.append("-march=armv8-m.main")
elif cpu == "cortex-m33":
self.cpu.append("-march=armv8-m.main+dsp")
else:
self.cpu.append("-mcpu={}".format(cpu.lower()))
if target.core.startswith("Cortex-M"):
self.cpu.append("-mthumb")
# FPU handling, M7 possibly to have double FPU
if core == "Cortex-M4F":
self.cpu.append("-mfpu=fpv4-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
elif core == "Cortex-M7F" or core.startswith("Cortex-M33F"):
self.cpu.append("-mfpu=fpv5-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
elif core == "Cortex-M7FD":
self.cpu.append("-mfpu=fpv5-d16")
self.cpu.append("-mfloat-abi=softfp")
if target.core == "Cortex-A9" or target.core == "Cortex-A5":
self.cpu.append("-mthumb-interwork")
self.cpu.append("-marm")
self.cpu.append("-march=armv7-a")
self.cpu.append("-mfpu=vfpv3")
self.cpu.append("-mfloat-abi=hard")
self.cpu.append("-mno-unaligned-access")
self.flags["common"] += self.cpu
self.coverage_supported = True
main_cc = join(tool_path, "arm-none-eabi-gcc")
main_cppc = join(tool_path, "arm-none-eabi-g++")
self.asm = [main_cc] + self.flags['asm'] + self.flags["common"]
self.cc = [main_cc]
self.cppc = [main_cppc]
self.cc += self.flags['c'] + self.flags['common']
self.cppc += self.flags['cxx'] + self.flags['common']
self.flags['ld'] += self.cpu
self.ld = [join(tool_path, "arm-none-eabi-gcc")]
self.ld += self.flags['ld'] + self.flags['common']
self.sys_libs = ["stdc++", "supc++", "m", "c", "gcc", "nosys"]
self.preproc = [join(tool_path, "arm-none-eabi-cpp"), "-E", "-P"]
self.ar = join(tool_path, "arm-none-eabi-ar")
self.elf2bin = join(tool_path, "arm-none-eabi-objcopy")
self.objdump = join(tool_path, "arm-none-eabi-objdump")
self.use_distcc = (bool(getenv("DISTCC_POTENTIAL_HOSTS", False))
and not getenv("MBED_DISABLE_DISTCC", False))
# create copies of gcc/ld options as coverage build options, and injects extra coverage options
self.coverage_cc = self.cc + ["--coverage", "-DENABLE_LIBGCOV_PORT"]
self.coverage_cppc = self.cppc + ["--coverage", "-DENABLE_LIBGCOV_PORT"]
self.coverage_ld = self.ld + ['--coverage', '-Wl,--wrap,GREENTEA_SETUP', '-Wl,--wrap,_Z25GREENTEA_TESTSUITE_RESULTi']
# for gcc coverage options remove MBED_DEBUG macro (this is required by code coverage function)
for flag in ["-DMBED_DEBUG"]:
if flag in self.coverage_cc:
self.coverage_cc.remove(flag)
if flag in self.coverage_cppc:
self.coverage_cppc.remove(flag)
# for lg coverage options remove exit wrapper (this is required by code coverage function)
for flag in ['-Wl,--wrap,exit', '-Wl,--wrap,atexit']:
if flag in self.coverage_ld:
self.coverage_ld.remove(flag)
def version_check(self):
stdout, _, retcode = run_cmd([self.cc[0], "--version"], redirect=True)
msg = None
match = self.GCC_VERSION_RE.search(stdout.encode("utf-8"))
if match:
found_version = LooseVersion(match.group(0).decode('utf-8'))
else:
found_version = None
min_ver, max_ver = self.GCC_RANGE
if found_version and (found_version < min_ver
or found_version >= max_ver):
msg = ("Compiler version mismatch: Have {}; "
"expected version >= {} and < {}"
.format(found_version, min_ver, max_ver))
elif not match:
msg = ("Compiler version mismatch: Could not detect version; "
"expected version >= {} and < {}"
.format(min_ver, max_ver))
if msg:
self.notify.cc_info({
"message": msg,
"file": "",
"line": "",
"col": "",
"severity": "Warning",
})
def is_not_supported_error(self, output):
return "error: #error [NOT_SUPPORTED]" in output
def parse_output(self, output):
# The warning/error notification is multiline
msg = None
for line in output.splitlines():
match = self.DIAGNOSTIC_PATTERN.search(line)
if match is not None:
if msg is not None:
self.notify.cc_info(msg)
msg = None
msg = {
'severity': match.group('severity').lower(),
'file': match.group('file'),
'line': match.group('line'),
'col': match.group('col'),
'message': match.group('message'),
'text': '',
'target_name': self.target.name,
'toolchain_name': self.name
}
if msg is not None:
self.notify.cc_info(msg)
def get_dep_option(self, object):
base, _ = splitext(object)
dep_path = base + '.d'
return ["-MD", "-MF", dep_path]
def get_config_option(self, config_header):
return ['-include', config_header]
def get_compile_options(self, defines, includes, for_asm=False):
opts = ['-D%s' % d for d in defines]
if self.RESPONSE_FILES:
opts += ['@%s' % self.get_inc_file(includes)]
else:
opts += ["-I%s" % i for i in includes]
config_header = self.get_config_header()
if config_header is not None:
opts = opts + self.get_config_option(config_header)
return opts
def match_coverage_patterns(self, source):
"""Check whether the give source file match with coverage patterns, if so return True. """
for pattern in self.coverage_patterns:
if fnmatch.fnmatch(source, pattern):
return True
return False
def assemble(self, source, object, includes):
# Build assemble command
cmd = self.asm + self.get_compile_options(
self.get_symbols(True), includes
) + ["-o", object, source]
# Return command array, don't execute
return [cmd]
def compile(self, cc, source, object, includes):
# Build compile command
cmd = cc + self.get_compile_options(self.get_symbols(), includes)
cmd.extend(self.get_dep_option(object))
cmd.extend(["-o", object, source])
if self.use_distcc:
cmd = ["distcc"] + cmd
return [cmd]
def compile_c(self, source, object, includes):
if self.coverage_patterns and self.match_coverage_patterns(source):
return self.compile(self.coverage_cc, source, object, includes)
return self.compile(self.cc, source, object, includes)
def compile_cpp(self, source, object, includes):
if self.coverage_patterns and self.match_coverage_patterns(source):
return self.compile(self.coverage_cppc, source, object, includes)
return self.compile(self.cppc, source, object, includes)
def link(self, output, objects, libraries, lib_dirs, mem_map):
libs = []
for l in libraries:
name, _ = splitext(basename(l))
libs.append("-l%s" % name[3:])
libs.extend(["-l%s" % l for l in self.sys_libs])
# Preprocess
if mem_map:
preproc_output = join(dirname(output), ".link_script.ld")
cmd = (
self.preproc + [mem_map] + self.ld[1:] + ["-o", preproc_output] +
self.get_compile_options(self.get_symbols(), [])
)
self.notify.cc_verbose("Preproc: %s" % ' '.join(cmd))
self.default_cmd(cmd)
mem_map = preproc_output
# NOTE: GCC_ARM_LTO_WORKAROUND
# This is a workaround for the GCC not using the strong symbols from
# C files to override the weak symbols from ASM files. This GCC bug is only
# present when building with the link-time optimizer (LTO) enabled. For
# more details please see:
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83967
#
# This can be fixed by changing the order of object files in the linker
# command; objects providing the weak symbols and compiled from assembly
# must be listed before the objects providing the strong symbols.
# To keep things simple, ALL object files from ASM are listed before
# other object files.
asm_objects = []
if '-flto' in self.ld:
asm_objects = self.get_asm_objects(objects)
reorg_objects = (
[o for o in objects if o in asm_objects] +
[o for o in objects if o not in asm_objects]
)
# Build linker command
map_file = splitext(output)[0] + ".map"
cmd = (
(self.coverage_ld if self.coverage_patterns else self.ld) +
["-o", output, "-Wl,-Map=%s" % map_file] +
reorg_objects +
["-Wl,--start-group"] +
libs +
["-Wl,--end-group"]
)
if mem_map:
cmd.extend(['-T', mem_map])
for L in lib_dirs:
cmd.extend(['-L', L])
cmd.extend(libs)
if self.RESPONSE_FILES:
# Split link command to linker executable + response file
cmd_linker = cmd[0]
link_files = self.get_link_file(cmd[1:])
cmd = [cmd_linker, "@%s" % link_files]
# Exec command
self.notify.cc_verbose("Link: %s" % ' '.join(cmd))
self.default_cmd(cmd)
def archive(self, objects, lib_path):
if self.RESPONSE_FILES:
param = ["@%s" % self.get_arch_file(objects)]
else:
param = objects
# Exec command
self.default_cmd([self.ar, 'rcs', lib_path] + param)
def binary(self, resources, elf, bin):
# Build binary command
_, fmt = splitext(bin)
bin_arg = {'.bin': 'binary', '.hex': 'ihex'}[fmt]
cmd = [self.elf2bin, "-O", bin_arg, elf, bin]
# Exec command
self.notify.cc_verbose("FromELF: %s" % ' '.join(cmd))
self.default_cmd(cmd)
@staticmethod
def name_mangle(name):
return "_Z%i%sv" % (len(name), name)
@staticmethod
def make_ld_define(name, value):
return "-D%s=%s" % (name, value)
@staticmethod
def redirect_symbol(source, sync, build_dir):
return "-Wl,--defsym=%s=%s" % (source, sync)
@staticmethod
def check_executable():
"""Returns True if the executable (arm-none-eabi-gcc) location
specified by the user exists OR the executable can be found on the
PATH. Returns False otherwise."""
if (
not TOOLCHAIN_PATHS['GCC_ARM'] or
not exists(TOOLCHAIN_PATHS['GCC_ARM'])
):
if find_executable('arm-none-eabi-gcc'):
TOOLCHAIN_PATHS['GCC_ARM'] = ''
return True
else:
return False
else:
exec_name = join(TOOLCHAIN_PATHS['GCC_ARM'], 'arm-none-eabi-gcc')
return exists(exec_name) or exists(exec_name + '.exe')
def check_if_obj_from_asm(self, obj_file):
"""Check if obj_file was build by the GNU Assembler."""
dw_producer = ''
cmd = [self.objdump, '--dwarf=info', obj_file]
stdout, stderr, rc = run_cmd(cmd, work_dir=getcwd(), chroot=self.CHROOT)
if rc != 0:
return False
match = self.DWARF_PRODUCER_RE.search(stdout)
if match:
dw_producer = match.group('producer')
return 'GNU AS' in dw_producer
def get_asm_objects(self, objects):
"""Return a list of object files built from ASM."""
return [o for o in objects if self.check_if_obj_from_asm(o)]
class GCC_ARM(GCC):
pass
|
|
"""Tests for deCONZ config flow."""
import asyncio
import pydeconz
from homeassistant.components import ssdp
from homeassistant.components.deconz import config_flow
from .test_gateway import API_KEY, BRIDGEID, setup_deconz_integration
from tests.common import MockConfigEntry
async def test_flow_1_discovered_bridge(hass, aioclient_mock):
"""Test that config flow for one discovered bridge works."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[{"id": BRIDGEID, "internalipaddress": "1.2.3.4", "internalport": 80}],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["title"] == BRIDGEID
assert result["data"] == {
config_flow.CONF_HOST: "1.2.3.4",
config_flow.CONF_PORT: 80,
config_flow.CONF_API_KEY: API_KEY,
}
async def test_flow_2_discovered_bridges(hass, aioclient_mock):
"""Test that config flow works for multiple discovered bridges."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[
{"id": BRIDGEID, "internalipaddress": "1.2.3.4", "internalport": 80},
{"id": "1234E567890A", "internalipaddress": "5.6.7.8", "internalport": 80},
],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={config_flow.CONF_HOST: "1.2.3.4"}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["title"] == BRIDGEID
assert result["data"] == {
config_flow.CONF_HOST: "1.2.3.4",
config_flow.CONF_PORT: 80,
config_flow.CONF_API_KEY: API_KEY,
}
async def test_flow_manual_configuration(hass, aioclient_mock):
"""Test that config flow works with manual configuration after no discovered bridges."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={config_flow.CONF_HOST: "1.2.3.4", config_flow.CONF_PORT: 80},
)
assert result["type"] == "form"
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["title"] == BRIDGEID
assert result["data"] == {
config_flow.CONF_HOST: "1.2.3.4",
config_flow.CONF_PORT: 80,
config_flow.CONF_API_KEY: API_KEY,
}
async def test_manual_configuration_after_discovery_timeout(hass, aioclient_mock):
"""Test failed discovery fallbacks to manual configuration."""
aioclient_mock.get(pydeconz.utils.URL_DISCOVER, exc=asyncio.TimeoutError)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "init"
assert not hass.config_entries.flow._progress[result["flow_id"]].bridges
async def test_manual_configuration_after_discovery_ResponseError(hass, aioclient_mock):
"""Test failed discovery fallbacks to manual configuration."""
aioclient_mock.get(pydeconz.utils.URL_DISCOVER, exc=config_flow.ResponseError)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "init"
assert not hass.config_entries.flow._progress[result["flow_id"]].bridges
async def test_manual_configuration_update_configuration(hass, aioclient_mock):
"""Test that manual configuration can update existing config entry."""
gateway = await setup_deconz_integration(hass)
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={config_flow.CONF_HOST: "2.3.4.5", config_flow.CONF_PORT: 80},
)
assert result["type"] == "form"
assert result["step_id"] == "link"
aioclient_mock.post(
"http://2.3.4.5:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
aioclient_mock.get(
f"http://2.3.4.5:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[config_flow.CONF_HOST] == "2.3.4.5"
async def test_manual_configuration_dont_update_configuration(hass, aioclient_mock):
"""Test that _create_entry work and that bridgeid can be requested."""
await setup_deconz_integration(hass)
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={config_flow.CONF_HOST: "1.2.3.4", config_flow.CONF_PORT: 80},
)
assert result["type"] == "form"
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_manual_configuration_timeout_get_bridge(hass, aioclient_mock):
"""Test that _create_entry handles a timeout."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={config_flow.CONF_HOST: "1.2.3.4", config_flow.CONF_PORT: 80},
)
assert result["type"] == "form"
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config", exc=asyncio.TimeoutError
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "abort"
assert result["reason"] == "no_bridges"
async def test_link_get_api_key_ResponseError(hass, aioclient_mock):
"""Test config flow should abort if no API key was possible to retrieve."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[{"id": BRIDGEID, "internalipaddress": "1.2.3.4", "internalport": 80}],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
aioclient_mock.post("http://1.2.3.4:80/api", exc=pydeconz.errors.ResponseError)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
assert result["errors"] == {"base": "no_key"}
async def test_flow_ssdp_discovery(hass, aioclient_mock):
"""Test that config flow for one discovered bridge works."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
data={
ssdp.ATTR_SSDP_LOCATION: "http://1.2.3.4:80/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.DECONZ_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": "ssdp"},
)
assert result["type"] == "form"
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["title"] == BRIDGEID
assert result["data"] == {
config_flow.CONF_HOST: "1.2.3.4",
config_flow.CONF_PORT: 80,
config_flow.CONF_API_KEY: API_KEY,
}
async def test_ssdp_discovery_not_deconz_bridge(hass):
"""Test a non deconz bridge being discovered over ssdp."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
data={ssdp.ATTR_UPNP_MANUFACTURER_URL: "not deconz bridge"},
context={"source": "ssdp"},
)
assert result["type"] == "abort"
assert result["reason"] == "not_deconz_bridge"
async def test_ssdp_discovery_update_configuration(hass):
"""Test if a discovered bridge is configured but updates with new attributes."""
gateway = await setup_deconz_integration(hass)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
data={
ssdp.ATTR_SSDP_LOCATION: "http://2.3.4.5:80/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.DECONZ_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": "ssdp"},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[config_flow.CONF_HOST] == "2.3.4.5"
async def test_ssdp_discovery_dont_update_configuration(hass):
"""Test if a discovered bridge has already been configured."""
gateway = await setup_deconz_integration(hass)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
data={
ssdp.ATTR_SSDP_LOCATION: "http://1.2.3.4:80/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.DECONZ_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": "ssdp"},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[config_flow.CONF_HOST] == "1.2.3.4"
async def test_ssdp_discovery_dont_update_existing_hassio_configuration(hass):
"""Test to ensure the SSDP discovery does not update an Hass.io entry."""
gateway = await setup_deconz_integration(hass, source="hassio")
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
data={
ssdp.ATTR_SSDP_LOCATION: "http://1.2.3.4:80/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.DECONZ_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": "ssdp"},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[config_flow.CONF_HOST] == "1.2.3.4"
async def test_flow_hassio_discovery(hass):
"""Test hassio discovery flow works."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
data={
"addon": "Mock Addon",
config_flow.CONF_HOST: "mock-deconz",
config_flow.CONF_PORT: 80,
config_flow.CONF_SERIAL: BRIDGEID,
config_flow.CONF_API_KEY: API_KEY,
},
context={"source": "hassio"},
)
assert result["type"] == "form"
assert result["step_id"] == "hassio_confirm"
assert result["description_placeholders"] == {"addon": "Mock Addon"}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["result"].data == {
config_flow.CONF_HOST: "mock-deconz",
config_flow.CONF_PORT: 80,
config_flow.CONF_API_KEY: API_KEY,
}
async def test_hassio_discovery_update_configuration(hass):
"""Test we can update an existing config entry."""
gateway = await setup_deconz_integration(hass)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
data={
config_flow.CONF_HOST: "2.3.4.5",
config_flow.CONF_PORT: 8080,
config_flow.CONF_API_KEY: "updated",
config_flow.CONF_SERIAL: BRIDGEID,
},
context={"source": "hassio"},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[config_flow.CONF_HOST] == "2.3.4.5"
assert gateway.config_entry.data[config_flow.CONF_PORT] == 8080
assert gateway.config_entry.data[config_flow.CONF_API_KEY] == "updated"
async def test_hassio_discovery_dont_update_configuration(hass):
"""Test we can update an existing config entry."""
await setup_deconz_integration(hass)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
data={
config_flow.CONF_HOST: "1.2.3.4",
config_flow.CONF_PORT: 80,
config_flow.CONF_API_KEY: API_KEY,
config_flow.CONF_SERIAL: BRIDGEID,
},
context={"source": "hassio"},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_option_flow(hass):
"""Test config flow options."""
entry = MockConfigEntry(domain=config_flow.DOMAIN, data={}, options=None)
hass.config_entries._entries.append(entry)
flow = await hass.config_entries.options.async_create_flow(
entry.entry_id, context={"source": "test"}, data=None
)
result = await flow.async_step_init()
assert result["type"] == "form"
assert result["step_id"] == "deconz_devices"
result = await flow.async_step_deconz_devices(
user_input={
config_flow.CONF_ALLOW_CLIP_SENSOR: False,
config_flow.CONF_ALLOW_DECONZ_GROUPS: False,
}
)
assert result["type"] == "create_entry"
assert result["data"] == {
config_flow.CONF_ALLOW_CLIP_SENSOR: False,
config_flow.CONF_ALLOW_DECONZ_GROUPS: False,
}
|
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import _
from webnotes.utils import cint
import webnotes.model.doctype
from webnotes.model.doc import validate_name
@webnotes.whitelist()
def rename_doc(doctype, old, new, force=False, merge=False):
"""
Renames a doc(dt, old) to doc(dt, new) and
updates all linked fields of type "Link" or "Select" with "link:"
"""
if not webnotes.conn.exists(doctype, old):
return
force = cint(force)
merge = cint(merge)
# get doclist of given doctype
doclist = webnotes.model.doctype.get(doctype)
# call on_rename
obj = webnotes.get_obj(doctype, old)
if hasattr(obj, 'on_rename'):
new = obj.on_rename(new, old, merge) or new
new = validate_rename(doctype, new, doclist, merge, force)
if not merge:
rename_parent_and_child(doctype, old, new, doclist)
# update link fields' values
link_fields = get_link_fields(doctype)
update_link_field_values(link_fields, old, new)
if doctype=='DocType':
rename_doctype(doctype, old, new, force)
update_attachments(doctype, old, new)
if merge:
webnotes.delete_doc(doctype, old)
return new
def update_attachments(doctype, old, new):
try:
webnotes.conn.sql("""update `tabFile Data` set attached_to_name=%s
where attached_to_name=%s and attached_to_doctype=%s""", (new, old, doctype))
except Exception, e:
if e.args[0]!=1054: # in patch?
raise e
def rename_parent_and_child(doctype, old, new, doclist):
# rename the doc
webnotes.conn.sql("update `tab%s` set name=%s where name=%s" \
% (doctype, '%s', '%s'), (new, old))
update_child_docs(old, new, doclist)
def validate_rename(doctype, new, doclist, merge, force):
exists = webnotes.conn.exists(doctype, new)
if merge and not exists:
webnotes.msgprint("%s: %s does not exist, select a new target to merge." % (doctype, new), raise_exception=1)
if (not merge) and exists:
webnotes.msgprint("%s: %s exists, select a new, new name." % (doctype, new), raise_exception=1)
if not webnotes.has_permission(doctype, "write"):
webnotes.msgprint("You need write permission to rename", raise_exception=1)
if not force and not doclist[0].allow_rename:
webnotes.msgprint("%s cannot be renamed" % doctype, raise_exception=1)
# validate naming like it's done in doc.py
new = validate_name(doctype, new, merge=merge)
return new
def rename_doctype(doctype, old, new, force=False):
# change options for fieldtype Table
update_parent_of_fieldtype_table(old, new)
# change options where select options are hardcoded i.e. listed
select_fields = get_select_fields(old, new)
update_link_field_values(select_fields, old, new)
update_select_field_values(old, new)
# change parenttype for fieldtype Table
update_parenttype_values(old, new)
# rename comments
webnotes.conn.sql("""update tabComment set comment_doctype=%s where comment_doctype=%s""",
(new, old))
def update_child_docs(old, new, doclist):
# update "parent"
child_doctypes = (d.options for d in doclist
if d.doctype=='DocField' and d.fieldtype=='Table')
for child in child_doctypes:
webnotes.conn.sql("update `tab%s` set parent=%s where parent=%s" \
% (child, '%s', '%s'), (new, old))
def update_link_field_values(link_fields, old, new):
update_list = []
# update values
for field in link_fields:
# if already updated, do not do it again
if [field['parent'], field['fieldname']] in update_list:
continue
update_list.append([field['parent'], field['fieldname']])
if field['issingle']:
webnotes.conn.sql("""\
update `tabSingles` set value=%s
where doctype=%s and field=%s and value=%s""",
(new, field['parent'], field['fieldname'], old))
else:
webnotes.conn.sql("""\
update `tab%s` set `%s`=%s
where `%s`=%s""" \
% (field['parent'], field['fieldname'], '%s',
field['fieldname'], '%s'),
(new, old))
def get_link_fields(doctype):
# get link fields from tabDocField
link_fields = webnotes.conn.sql("""\
select parent, fieldname,
(select ifnull(issingle, 0) from tabDocType dt
where dt.name = df.parent) as issingle
from tabDocField df
where
df.parent not like "old%%%%" and df.parent != '0' and
((df.options=%s and df.fieldtype='Link') or
(df.options='link:%s' and df.fieldtype='Select'))""" \
% ('%s', doctype), doctype, as_dict=1)
# get link fields from tabCustom Field
custom_link_fields = webnotes.conn.sql("""\
select dt as parent, fieldname,
(select ifnull(issingle, 0) from tabDocType dt
where dt.name = df.dt) as issingle
from `tabCustom Field` df
where
df.dt not like "old%%%%" and df.dt != '0' and
((df.options=%s and df.fieldtype='Link') or
(df.options='link:%s' and df.fieldtype='Select'))""" \
% ('%s', doctype), doctype, as_dict=1)
# add custom link fields list to link fields list
link_fields += custom_link_fields
# remove fields whose options have been changed using property setter
property_setter_link_fields = webnotes.conn.sql("""\
select ps.doc_type as parent, ps.field_name as fieldname,
(select ifnull(issingle, 0) from tabDocType dt
where dt.name = ps.doc_type) as issingle
from `tabProperty Setter` ps
where
ps.property_type='options' and
ps.field_name is not null and
(ps.value=%s or ps.value='link:%s')""" \
% ('%s', doctype), doctype, as_dict=1)
link_fields += property_setter_link_fields
return link_fields
def update_parent_of_fieldtype_table(old, new):
webnotes.conn.sql("""\
update `tabDocField` set options=%s
where fieldtype='Table' and options=%s""", (new, old))
webnotes.conn.sql("""\
update `tabCustom Field` set options=%s
where fieldtype='Table' and options=%s""", (new, old))
webnotes.conn.sql("""\
update `tabProperty Setter` set value=%s
where property='options' and value=%s""", (new, old))
def get_select_fields(old, new):
"""
get select type fields where doctype's name is hardcoded as
new line separated list
"""
# get link fields from tabDocField
select_fields = webnotes.conn.sql("""\
select parent, fieldname,
(select ifnull(issingle, 0) from tabDocType dt
where dt.name = df.parent) as issingle
from tabDocField df
where
df.parent not like "old%%%%" and df.parent != '0' and
df.parent != %s and df.fieldtype = 'Select' and
df.options not like "link:%%%%" and
(df.options like "%%%%%s%%%%")""" \
% ('%s', old), new, as_dict=1)
# get link fields from tabCustom Field
custom_select_fields = webnotes.conn.sql("""\
select dt as parent, fieldname,
(select ifnull(issingle, 0) from tabDocType dt
where dt.name = df.dt) as issingle
from `tabCustom Field` df
where
df.dt not like "old%%%%" and df.dt != '0' and
df.dt != %s and df.fieldtype = 'Select' and
df.options not like "link:%%%%" and
(df.options like "%%%%%s%%%%")""" \
% ('%s', old), new, as_dict=1)
# add custom link fields list to link fields list
select_fields += custom_select_fields
# remove fields whose options have been changed using property setter
property_setter_select_fields = webnotes.conn.sql("""\
select ps.doc_type as parent, ps.field_name as fieldname,
(select ifnull(issingle, 0) from tabDocType dt
where dt.name = ps.doc_type) as issingle
from `tabProperty Setter` ps
where
ps.doc_type != %s and
ps.property_type='options' and
ps.field_name is not null and
ps.value not like "link:%%%%" and
(ps.value like "%%%%%s%%%%")""" \
% ('%s', old), new, as_dict=1)
select_fields += property_setter_select_fields
return select_fields
def update_select_field_values(old, new):
webnotes.conn.sql("""\
update `tabDocField` set options=replace(options, %s, %s)
where
parent != %s and parent not like "old%%%%" and
fieldtype = 'Select' and options not like "link:%%%%" and
(options like "%%%%\\n%s%%%%" or options like "%%%%%s\\n%%%%")""" % \
('%s', '%s', '%s', old, old), (old, new, new))
webnotes.conn.sql("""\
update `tabCustom Field` set options=replace(options, %s, %s)
where
dt != %s and dt not like "old%%%%" and
fieldtype = 'Select' and options not like "link:%%%%" and
(options like "%%%%\\n%s%%%%" or options like "%%%%%s\\n%%%%")""" % \
('%s', '%s', '%s', old, old), (old, new, new))
webnotes.conn.sql("""\
update `tabProperty Setter` set value=replace(value, %s, %s)
where
doc_type != %s and field_name is not null and
property='options' and value not like "link%%%%" and
(value like "%%%%\\n%s%%%%" or value like "%%%%%s\\n%%%%")""" % \
('%s', '%s', '%s', old, old), (old, new, new))
def update_parenttype_values(old, new):
child_doctypes = webnotes.conn.sql("""\
select options, fieldname from `tabDocField`
where parent=%s and fieldtype='Table'""", new, as_dict=1)
custom_child_doctypes = webnotes.conn.sql("""\
select options, fieldname from `tabCustom Field`
where dt=%s and fieldtype='Table'""", new, as_dict=1)
child_doctypes += custom_child_doctypes
fields = [d['fieldname'] for d in child_doctypes]
property_setter_child_doctypes = webnotes.conn.sql("""\
select value as options from `tabProperty Setter`
where doc_type=%s and property='options' and
field_name in ("%s")""" % ('%s', '", "'.join(fields)),
new)
child_doctypes += property_setter_child_doctypes
child_doctypes = (d['options'] for d in child_doctypes)
for doctype in child_doctypes:
webnotes.conn.sql("""\
update `tab%s` set parenttype=%s
where parenttype=%s""" % (doctype, '%s', '%s'),
(new, old))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
import six
from heat.common import exception
from heat.common import identifier
from heat.engine.clients.os import keystone
from heat.engine import dependencies
from heat.engine import resource as res
from heat.engine import service
from heat.engine import stack
from heat.engine import stack_lock
from heat.engine import template as templatem
from heat.objects import stack as stack_object
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import fakes as test_fakes
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
policy_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "alarming",
"Resources" : {
"WebServerScaleDownPolicy" : {
"Type" : "AWS::AutoScaling::ScalingPolicy",
"Properties" : {
"AdjustmentType" : "ChangeInCapacity",
"AutoScalingGroupName" : "",
"Cooldown" : "60",
"ScalingAdjustment" : "-1"
}
},
"Random" : {
"Type" : "OS::Heat::RandomString"
}
}
}
'''
class StackResourcesServiceTest(common.HeatTestCase):
def setUp(self):
super(StackResourcesServiceTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='stack_resource_test_tenant')
self.eng = service.EngineService('a-host', 'a-topic')
self.eng.thread_group_mgr = tools.DummyThreadGroupManager()
self.eng.engine_id = 'engine-fake-uuid'
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
@mock.patch.object(stack.Stack, 'load')
def _test_describe_stack_resource(self, mock_load):
mock_load.return_value = self.stack
# Patch _resolve_all_attributes or it tries to call novaclient
self.patchobject(res.Resource, '_resolve_all_attributes',
return_value=None)
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertIn('resource_identity', r)
self.assertIn('description', r)
self.assertIn('updated_time', r)
self.assertIn('stack_identity', r)
self.assertIsNotNone(r['stack_identity'])
self.assertIn('stack_name', r)
self.assertEqual(self.stack.name, r['stack_name'])
self.assertIn('metadata', r)
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertIn('attributes', r)
self.assertEqual('WebServer', r['resource_name'])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@tools.stack_context('service_stack_resource_describe__test_stack')
def test_stack_resource_describe(self):
self._test_describe_stack_resource()
@mock.patch.object(service.EngineService, '_get_stack')
def test_stack_resource_describe_nonexist_stack(self, mock_get):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
mock_get.side_effect = exception.EntityNotFound(
entity='Stack', name='test')
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, non_exist_identifier, 'WebServer')
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
mock_get.assert_called_once_with(self.ctx, non_exist_identifier)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resource_describe_nonexist_test_stack')
def test_stack_resource_describe_nonexist_resource(self, mock_load):
mock_load.return_value = self.stack
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, self.stack.identifier(), 'foo')
self.assertEqual(exception.ResourceNotFound, ex.exc_info[0])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@tools.stack_context('service_resource_describe_noncreated_test_stack',
create_res=False)
def test_stack_resource_describe_noncreated_resource(self):
self._test_describe_stack_resource()
@mock.patch.object(service.EngineService, '_authorize_stack_user')
@tools.stack_context('service_resource_describe_user_deny_test_stack')
def test_stack_resource_describe_stack_user_deny(self, mock_auth):
self.ctx.roles = [cfg.CONF.heat_stack_user_role]
mock_auth.return_value = False
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, self.stack.identifier(), 'foo')
self.assertEqual(exception.Forbidden, ex.exc_info[0])
mock_auth.assert_called_once_with(self.ctx, mock.ANY, 'foo')
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_describe_test_stack')
def test_stack_resources_describe(self, mock_load):
mock_load.return_value = self.stack
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
'WebServer')
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_identity', r)
self.assertIn('description', r)
self.assertIn('updated_time', r)
self.assertIn('stack_identity', r)
self.assertIsNotNone(r['stack_identity'])
self.assertIn('stack_name', r)
self.assertEqual(self.stack.name, r['stack_name'])
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_describe_no_filter_test_stack')
def test_stack_resources_describe_no_filter(self, mock_load):
mock_load.return_value = self.stack
resources = self.eng.describe_stack_resources(
self.ctx, self.stack.identifier(), None)
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(service.EngineService, '_get_stack')
def test_stack_resources_describe_bad_lookup(self, mock_get):
mock_get.side_effect = TypeError
self.assertRaises(TypeError,
self.eng.describe_stack_resources,
self.ctx, None, 'WebServer')
mock_get.assert_called_once_with(self.ctx, None)
def test_stack_resources_describe_nonexist_stack(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resources,
self.ctx, non_exist_identifier, 'WebServer')
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
@tools.stack_context('find_phys_res_stack')
def test_find_physical_resource(self):
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
None)
phys_id = resources[0]['physical_resource_id']
result = self.eng.find_physical_resource(self.ctx, phys_id)
self.assertIsInstance(result, dict)
resource_identity = identifier.ResourceIdentifier(**result)
self.assertEqual(self.stack.identifier(), resource_identity.stack())
self.assertEqual('WebServer', resource_identity.resource_name)
def test_find_physical_resource_nonexist(self):
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.find_physical_resource,
self.ctx, 'foo')
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack')
def test_stack_resources_list(self, mock_load):
mock_load.return_value = self.stack
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier())
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_identity', r)
self.assertIn('updated_time', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack_with_depth')
def test_stack_resources_list_with_depth(self, mock_load):
mock_load.return_value = self.stack
resources = six.itervalues(self.stack)
self.stack.iter_resources = mock.Mock(return_value=resources)
self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
2)
self.stack.iter_resources.assert_called_once_with(2,
filters=None)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack_with_max_depth')
def test_stack_resources_list_with_max_depth(self, mock_load):
mock_load.return_value = self.stack
resources = six.itervalues(self.stack)
self.stack.iter_resources = mock.Mock(return_value=resources)
self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
99)
max_depth = cfg.CONF.max_nested_stack_depth
self.stack.iter_resources.assert_called_once_with(max_depth,
filters=None)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack')
def test_stack_resources_filter_type(self, mock_load):
mock_load.return_value = self.stack
resources = six.itervalues(self.stack)
self.stack.iter_resources = mock.Mock(return_value=resources)
filters = {'type': 'AWS::EC2::Instance'}
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
filters=filters)
self.stack.iter_resources.assert_called_once_with(
0, filters={})
self.assertIn('AWS::EC2::Instance', resources[0]['resource_type'])
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack')
def test_stack_resources_filter_type_not_found(self, mock_load):
mock_load.return_value = self.stack
resources = six.itervalues(self.stack)
self.stack.iter_resources = mock.Mock(return_value=resources)
filters = {'type': 'NonExisted'}
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
filters=filters)
self.stack.iter_resources.assert_called_once_with(
0, filters={})
self.assertEqual(0, len(resources))
@mock.patch.object(stack.Stack, 'load')
def test_stack_resources_list_deleted_stack(self, mock_load):
stk = tools.setup_stack('resource_list_deleted_stack', self.ctx)
stack_id = stk.identifier()
mock_load.return_value = stk
tools.clean_up_stack(stk)
resources = self.eng.list_stack_resources(self.ctx, stack_id)
self.assertEqual(1, len(resources))
res = resources[0]
self.assertEqual('DELETE', res['resource_action'])
self.assertEqual('COMPLETE', res['resource_status'])
@mock.patch.object(service.EngineService, '_get_stack')
def test_stack_resources_list_nonexist_stack(self, mock_get):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
mock_get.side_effect = exception.EntityNotFound(entity='Stack',
name='test')
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.list_stack_resources,
self.ctx, non_exist_identifier)
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
mock_get.assert_called_once_with(self.ctx, non_exist_identifier,
show_deleted=True)
def _stack_create(self, stack_name):
self.patchobject(keystone.KeystoneClientPlugin, '_create',
return_value=test_fakes.FakeKeystoneClient())
stk = tools.get_stack(stack_name, self.ctx, policy_template)
stk.store()
stk.create()
s = stack_object.Stack.get_by_id(self.ctx, stk.id)
self.patchobject(service.EngineService, '_get_stack', return_value=s)
return stk
def test_signal_reception_async(self):
self.eng.thread_group_mgr = tools.DummyThreadGroupMgrLogStart()
stack_name = 'signal_reception_async'
self.stack = self._stack_create(stack_name)
test_data = {'food': 'yum'}
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data)
self.assertEqual([(self.stack.id, mock.ANY)],
self.eng.thread_group_mgr.started)
@mock.patch.object(res.Resource, 'signal')
def test_signal_reception_sync(self, mock_signal):
mock_signal.return_value = None
stack_name = 'signal_reception_sync'
self.stack = self._stack_create(stack_name)
test_data = {'food': 'yum'}
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data,
sync_call=True)
mock_signal.assert_called_once_with(mock.ANY, False)
def test_signal_reception_no_resource(self):
stack_name = 'signal_reception_no_resource'
self.stack = self._stack_create(stack_name)
test_data = {'food': 'yum'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal, self.ctx,
dict(self.stack.identifier()),
'resource_does_not_exist',
test_data)
self.assertEqual(exception.ResourceNotFound, ex.exc_info[0])
@mock.patch.object(stack.Stack, 'load')
@mock.patch.object(service.EngineService, '_get_stack')
def test_signal_reception_unavailable_resource(self, mock_get, mock_load):
stack_name = 'signal_reception_unavailable_resource'
stk = tools.get_stack(stack_name, self.ctx, policy_template)
stk.store()
self.stack = stk
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
mock_load.return_value = stk
mock_get.return_value = s
test_data = {'food': 'yum'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal, self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data)
self.assertEqual(exception.ResourceNotAvailable, ex.exc_info[0])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY,
use_stored_context=mock.ANY)
mock_get.assert_called_once_with(self.ctx, self.stack.identifier())
@mock.patch.object(res.Resource, 'signal')
def test_signal_returns_metadata(self, mock_signal):
mock_signal.return_value = None
self.stack = self._stack_create('signal_reception')
rsrc = self.stack['WebServerScaleDownPolicy']
test_metadata = {'food': 'yum'}
rsrc.metadata_set(test_metadata)
md = self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
self.assertEqual(test_metadata, md)
mock_signal.assert_called_once_with(mock.ANY, False)
def test_signal_unset_invalid_hook(self):
self.stack = self._stack_create('signal_unset_invalid_hook')
details = {'unset_hook': 'invalid_hook'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal,
self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
details)
msg = 'Invalid hook type "invalid_hook"'
self.assertIn(msg, six.text_type(ex.exc_info[1]))
self.assertEqual(exception.InvalidBreakPointHook,
ex.exc_info[0])
def test_signal_unset_not_defined_hook(self):
self.stack = self._stack_create('signal_unset_not_defined_hook')
details = {'unset_hook': 'pre-update'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal,
self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
details)
msg = ('The "pre-update" hook is not defined on '
'AWSScalingPolicy "WebServerScaleDownPolicy"')
self.assertIn(msg, six.text_type(ex.exc_info[1]))
self.assertEqual(exception.InvalidBreakPointHook,
ex.exc_info[0])
@mock.patch.object(res.Resource, 'metadata_update')
@mock.patch.object(res.Resource, 'signal')
@mock.patch.object(service.EngineService, '_get_stack')
def test_signal_calls_metadata_update(self, mock_get, mock_signal,
mock_update):
# fake keystone client
self.patchobject(keystone.KeystoneClientPlugin, '_create',
return_value=test_fakes.FakeKeystoneClient())
stk = tools.get_stack('signal_reception', self.ctx, policy_template)
self.stack = stk
stk.store()
stk.create()
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
mock_get.return_value = s
mock_signal.return_value = True
# this will be called once for the Random resource
mock_update.return_value = None
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
mock_get.assert_called_once_with(self.ctx, self.stack.identifier())
mock_signal.assert_called_once_with(mock.ANY, False)
mock_update.assert_called_once_with()
@mock.patch.object(res.Resource, 'metadata_update')
@mock.patch.object(res.Resource, 'signal')
@mock.patch.object(service.EngineService, '_get_stack')
def test_signal_no_calls_metadata_update(self, mock_get, mock_signal,
mock_update):
# fake keystone client
self.patchobject(keystone.KeystoneClientPlugin, '_create',
return_value=test_fakes.FakeKeystoneClient())
stk = tools.get_stack('signal_reception', self.ctx, policy_template)
self.stack = stk
stk.store()
stk.create()
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
mock_get.return_value = s
mock_signal.return_value = False
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
mock_get.assert_called_once_with(self.ctx, self.stack.identifier())
mock_signal.assert_called_once_with(mock.ANY, False)
# this will never be called
self.assertEqual(0, mock_update.call_count)
def test_lazy_load_resources(self):
stack_name = 'lazy_load_test'
lazy_load_template = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
'bar': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'foo'},
}
}
}
}
templ = templatem.Template(lazy_load_template)
stk = stack.Stack(self.ctx, stack_name, templ)
self.assertIsNone(stk._resources)
self.assertIsNone(stk._dependencies)
resources = stk.resources
self.assertIsInstance(resources, dict)
self.assertEqual(2, len(resources))
self.assertIsInstance(resources.get('foo'),
generic_rsrc.GenericResource)
self.assertIsInstance(resources.get('bar'),
generic_rsrc.ResourceWithProps)
stack_dependencies = stk.dependencies
self.assertIsInstance(stack_dependencies, dependencies.Dependencies)
self.assertEqual(2, len(stack_dependencies.graph()))
@tools.stack_context('service_mark_healthy_create_complete_test_stk')
def test_mark_healthy_in_create_complete(self):
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', False,
resource_status_reason='noop')
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertIn('resource_action', r)
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertEqual(r['resource_action'], 'CREATE')
self.assertEqual(r['resource_status'], 'COMPLETE')
self.assertEqual(r['resource_status_reason'], 'state changed')
@tools.stack_context('service_mark_unhealthy_create_complete_test_stk')
def test_mark_unhealthy_in_create_complete(self):
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True,
resource_status_reason='Some Reason')
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertEqual(r['resource_action'], 'CHECK')
self.assertEqual(r['resource_status'], 'FAILED')
self.assertEqual(r['resource_status_reason'], 'Some Reason')
@tools.stack_context('service_mark_healthy_check_failed_test_stk')
def test_mark_healthy_check_failed(self):
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True,
resource_status_reason='Some Reason')
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertEqual(r['resource_action'], 'CHECK')
self.assertEqual(r['resource_status'], 'FAILED')
self.assertEqual(r['resource_status_reason'], 'Some Reason')
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', False,
resource_status_reason='Good Reason')
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertEqual(r['resource_action'], 'CHECK')
self.assertEqual(r['resource_status'], 'COMPLETE')
self.assertEqual(r['resource_status_reason'], 'Good Reason')
@tools.stack_context('service_mark_unhealthy_check_failed_test_stack')
def test_mark_unhealthy_check_failed(self):
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True,
resource_status_reason='Some Reason')
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertEqual(r['resource_action'], 'CHECK')
self.assertEqual(r['resource_status'], 'FAILED')
self.assertEqual(r['resource_status_reason'], 'Some Reason')
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True,
resource_status_reason='New Reason')
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertEqual(r['resource_action'], 'CHECK')
self.assertEqual(r['resource_status'], 'FAILED')
self.assertEqual(r['resource_status_reason'], 'New Reason')
@tools.stack_context('service_mark_unhealthy_invalid_value_test_stk')
def test_mark_unhealthy_invalid_value(self):
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_mark_unhealthy,
self.ctx,
self.stack.identifier(),
'WebServer', "This is wrong",
resource_status_reason="Some Reason")
self.assertEqual(exception.Invalid, ex.exc_info[0])
@tools.stack_context('service_mark_unhealthy_none_reason_test_stk')
def test_mark_unhealthy_none_reason(self):
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True)
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertEqual(r['resource_action'], 'CHECK')
self.assertEqual(r['resource_status'], 'FAILED')
self.assertEqual(r['resource_status_reason'],
'state changed by resource_mark_unhealthy api')
@tools.stack_context('service_mark_unhealthy_empty_reason_test_stk')
def test_mark_unhealthy_empty_reason(self):
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True,
resource_status_reason="")
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertEqual(r['resource_action'], 'CHECK')
self.assertEqual(r['resource_status'], 'FAILED')
self.assertEqual(r['resource_status_reason'],
'state changed by resource_mark_unhealthy api')
@tools.stack_context('service_mark_unhealthy_lock_no_converge_test_stk')
def test_mark_unhealthy_lock_no_convergence(self):
mock_acquire = self.patchobject(stack_lock.StackLock,
'acquire',
return_value=None)
mock_release = self.patchobject(stack_lock.StackLock,
'release',
return_value=None)
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True,
resource_status_reason="")
mock_acquire.assert_called_once_with()
mock_release.assert_called_once_with()
@tools.stack_context('service_mark_unhealthy_lock_converge_test_stk',
convergence=True)
def test_mark_unhealthy_stack_lock_convergence(self):
mock_acquire = self.patchobject(res.Resource,
'_acquire',
return_value=None)
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True,
resource_status_reason="")
mock_acquire.assert_called_once_with(self.eng.engine_id)
@tools.stack_context('service_mark_unhealthy_lockexc_converge_test_stk',
convergence=True)
def test_mark_unhealthy_stack_lock_exc_convergence(self):
def _acquire(*args, **kwargs):
raise exception.UpdateInProgress(self.stack.name)
self.patchobject(
res.Resource,
'_acquire',
return_value=None,
side_effect=exception.UpdateInProgress(self.stack.name))
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_mark_unhealthy,
self.ctx,
self.stack.identifier(),
'WebServer', True,
resource_status_reason="")
self.assertEqual(exception.ActionInProgress, ex.exc_info[0])
@tools.stack_context('service_mark_unhealthy_lockexc_no_converge_test_stk')
def test_mark_unhealthy_stack_lock_exc_no_convergence(self):
self.patchobject(
stack_lock.StackLock,
'acquire',
return_value=None,
side_effect=exception.ActionInProgress(
stack_name=self.stack.name,
action=self.stack.action))
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_mark_unhealthy,
self.ctx,
self.stack.identifier(),
'WebServer', True,
resource_status_reason="")
self.assertEqual(exception.ActionInProgress, ex.exc_info[0])
|
|
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import subprocess
import time
import tempfile
from abc import abstractmethod
from inspect import getsource
from textwrap import dedent
from uuid import uuid4
import pytest
from toil.provisioners import cluster_factory
from toil.provisioners.aws import get_current_aws_zone
from toil.provisioners.aws.awsProvisioner import AWSProvisioner
from toil.test import (ToilTest,
integrative,
needs_fetchable_appliance,
needs_aws_ec2,
slow,
timeLimit)
from toil.version import exactPython
log = logging.getLogger(__name__)
class AWSProvisionerBenchTest(ToilTest):
"""
Tests for the AWS provisioner that don't actually provision anything.
"""
# Needs to talk to EC2 for image discovery
@needs_aws_ec2
def testAMIFinding(self):
for zone in ['us-west-2a', 'eu-central-1a', 'sa-east-1b']:
provisioner = AWSProvisioner('fakename', 'mesos', zone, 10000, None, None)
ami = provisioner._discoverAMI()
# Make sure we got an AMI and it looks plausible
assert(ami.startswith('ami-'))
@needs_aws_ec2
@needs_fetchable_appliance
@slow
@integrative
class AbstractAWSAutoscaleTest(ToilTest):
def __init__(self, methodName):
super(AbstractAWSAutoscaleTest, self).__init__(methodName=methodName)
self.keyName = os.environ.get('TOIL_AWS_KEYNAME', 'id_rsa')
self.instanceTypes = ["m5a.large"]
self.clusterName = 'aws-provisioner-test-' + str(uuid4())
self.numWorkers = ['2']
self.numSamples = 2
self.spotBid = 0.15
self.zone = get_current_aws_zone()
assert self.zone is not None, "Could not determine AWS availability zone to test in; is TOIL_AWS_ZONE set?"
# We can't dump our user script right in /tmp or /home, because hot
# deploy refuses to zip up those whole directories. So we make sure to
# have a subdirectory to upload the script to.
self.scriptDir = '/tmp/t'
# Where should we put our virtualenv?
self.venvDir = '/tmp/venv'
# Where should we put our data to work on?
# Must exist in the Toil container; the leader will try to rsync to it
# (for the SSE key) and not create it.
self.dataDir = '/tmp'
# What filename should we use for our script (without path)?
# Can be changed by derived tests.
self.scriptName = 'test_script.py'
def python(self):
"""
Return the full path to the venv Python on the leader.
"""
return os.path.join(self.venvDir, 'bin/python')
def pip(self):
"""
Return the full path to the venv pip on the leader.
"""
return os.path.join(self.venvDir, 'bin/pip')
def script(self):
"""
Return the full path to the user script on the leader.
"""
return os.path.join(self.scriptDir, self.scriptName)
def data(self, filename):
"""
Return the full path to the data file with the given name on the leader.
"""
return os.path.join(self.dataDir, filename)
def destroyCluster(self):
"""
Destroy the cluster we built, if it exists.
Succeeds if the cluster does not currently exist.
"""
subprocess.check_call(['toil', 'destroy-cluster', '-p=aws', '-z', self.zone, self.clusterName])
def setUp(self):
"""
Set up for the test.
Must be overridden to call this method and set self.jobStore.
"""
super(AbstractAWSAutoscaleTest, self).setUp()
# Make sure that destroy works before we create any clusters.
# If this fails, no tests will run.
self.destroyCluster()
def tearDown(self):
# Note that teardown will run even if the test crashes.
super(AbstractAWSAutoscaleTest, self).tearDown()
self.destroyCluster()
subprocess.check_call(['toil', 'clean', self.jobStore])
def sshUtil(self, command):
"""
Run the given command on the cluster.
Raise subprocess.CalledProcessError if it fails.
"""
cmd = ['toil', 'ssh-cluster', '--insecure', '-p=aws', '-z', self.zone, self.clusterName] + command
log.info("Running %s.", str(cmd))
p = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# Put in non-blocking mode. See https://stackoverflow.com/a/59291466
os.set_blocking(p.stdout.fileno(), False)
os.set_blocking(p.stderr.fileno(), False)
out_buffer = b''
err_buffer = b''
loops_since_line = 0
running = True
while running:
# While the process is running, see if it stopped
running = (p.poll() == None)
# Also collect its output
out_data = p.stdout.read()
if out_data:
out_buffer += out_data
while out_buffer.find(b'\n') != -1:
# And log every full line
cut = out_buffer.find(b'\n')
log.info('STDOUT: %s', out_buffer[0:cut].decode('utf-8', errors='ignore'))
loops_since_line = 0
out_buffer = out_buffer[cut+1:]
# Same for the error
err_data = p.stderr.read()
if err_data:
err_buffer += err_data
while err_buffer.find(b'\n') != -1:
cut = err_buffer.find(b'\n')
log.info('STDERR: %s', err_buffer[0:cut].decode('utf-8', errors='ignore'))
loops_since_line = 0
err_buffer = err_buffer[cut+1:]
loops_since_line += 1
if loops_since_line > 60:
log.debug('...waiting...')
loops_since_line = 0
time.sleep(1)
# At the end, log the last lines
if out_buffer:
log.info('STDOUT: %s', out_buffer.decode('utf-8', errors='ignore'))
if err_buffer:
log.info('STDOUT: %s', err_buffer.decode('utf-8', errors='ignore'))
if p.returncode != 0:
# It failed
log.error("Failed to run %s.", str(cmd))
raise subprocess.CalledProcessError(p.returncode, ' '.join(cmd))
def rsyncUtil(self, src, dest):
subprocess.check_call(['toil', 'rsync-cluster', '--insecure', '-p=aws', '-z', self.zone, self.clusterName] + [src, dest])
def createClusterUtil(self, args=None):
args = [] if args is None else args
command = ['toil', 'launch-cluster', '-p=aws', '-z', self.zone, f'--keyPairName={self.keyName}',
'--leaderNodeType=t2.medium', self.clusterName] + args
log.debug('Launching cluster: %s', command)
# Try creating the cluster
subprocess.check_call(command)
# If we fail, tearDown will destroy the cluster.
def getMatchingRoles(self):
return list(self.cluster._boto2.local_roles())
def launchCluster(self):
self.createClusterUtil()
def getRootVolID(self):
instances = self.cluster._getNodesInCluster(both=True)
instances.sort(key=lambda x: x.launch_time)
leader = instances[0] # assume leader was launched first
from boto.ec2.blockdevicemapping import BlockDeviceType
rootBlockDevice = leader.block_device_mapping["/dev/xvda"]
assert isinstance(rootBlockDevice, BlockDeviceType)
return rootBlockDevice.volume_id
@abstractmethod
def _getScript(self):
"""Download the test script needed by the inheriting unit test class."""
raise NotImplementedError()
def putScript(self, content: str):
"""
Helper method for _getScript to inject a script file at the configured script path, from text.
"""
cluster = cluster_factory(provisioner='aws', zone=self.zone, clusterName=self.clusterName)
leader = cluster.getLeader()
self.sshUtil(['mkdir', '-p', self.scriptDir])
with tempfile.NamedTemporaryFile(mode='w') as t:
# use appliance ssh method instead of sshutil so we can specify input param
t.write(content)
# This works to make writes visible on non-Windows
t.flush()
leader.injectFile(t.name, self.script(), 'toil_leader')
@abstractmethod
def _runScript(self, toilOptions):
"""
Modify the provided Toil options to suit the test Toil script, then run the script with
those arguments.
:param toilOptions: List of Toil command line arguments. This list may need to be
modified to suit the test script's requirements.
"""
raise NotImplementedError()
def _test(self, preemptableJobs=False):
"""Does the work of the testing. Many features' tests are thrown in here in no particular order."""
self.launchCluster()
# get the leader so we know the IP address - we don't need to wait since create cluster
# already insures the leader is running
self.cluster = cluster_factory(provisioner='aws', zone=self.zone, clusterName=self.clusterName)
self.leader = self.cluster.getLeader()
self.sshUtil(['mkdir', '-p', self.scriptDir])
self.sshUtil(['mkdir', '-p', self.dataDir])
assert len(self.getMatchingRoles()) == 1
# --never-download prevents silent upgrades to pip, wheel and setuptools
venv_command = ['virtualenv', '--system-site-packages', '--python', exactPython, '--never-download', self.venvDir]
self.sshUtil(venv_command)
upgrade_command = [self.pip(), 'install', 'setuptools==28.7.1', 'pyyaml==3.12']
self.sshUtil(upgrade_command)
log.info('Set up script...')
self._getScript()
toilOptions = [self.jobStore,
'--workDir=/var/lib/toil',
'--clean=always',
'--retryCount=2',
'--logDebug',
'--logFile=' + os.path.join(self.scriptDir, 'sort.log')
]
if preemptableJobs:
toilOptions.extend(['--defaultPreemptable'])
log.info('Run script...')
self._runScript(toilOptions)
assert len(self.getMatchingRoles()) == 1
from boto.exception import EC2ResponseError
volumeID = self.getRootVolID()
self.cluster.destroyCluster()
for attempt in range(6):
# https://github.com/BD2KGenomics/toil/issues/1567
# retry this for up to 1 minute until the volume disappears
try:
self.cluster._boto2.ec2.get_all_volumes(volume_ids=[volumeID])
time.sleep(10)
except EC2ResponseError as e:
if e.status == 400 and 'InvalidVolume.NotFound' in e.code:
break
else:
raise
else:
self.fail('Volume with ID %s was not cleaned up properly' % volumeID)
assert len(self.getMatchingRoles()) == 0
@integrative
@pytest.mark.timeout(1800)
class AWSAutoscaleTest(AbstractAWSAutoscaleTest):
def __init__(self, name):
super(AWSAutoscaleTest, self).__init__(name)
self.clusterName = 'provisioner-test-' + str(uuid4())
self.requestedLeaderStorage = 80
self.scriptName = 'sort.py'
def setUp(self):
super(AWSAutoscaleTest, self).setUp()
self.jobStore = 'aws:%s:autoscale-%s' % (self.awsRegion(), uuid4())
def _getScript(self):
fileToSort = os.path.join(os.getcwd(), str(uuid4()))
with open(fileToSort, 'w') as f:
# Fixme: making this file larger causes the test to hang
f.write('01234567890123456789012345678901')
self.rsyncUtil(os.path.join(self._projectRootPath(), 'src/toil/test/sort/sort.py'), ':' + self.script())
self.rsyncUtil(fileToSort, ':' + self.data('sortFile'))
os.unlink(fileToSort)
def _runScript(self, toilOptions):
toilOptions.extend(['--provisioner=aws', '--batchSystem=mesos',
'--nodeTypes=' + ",".join(self.instanceTypes),
'--maxNodes=' + ",".join(self.numWorkers)])
runCommand = [self.python(), self.script(), '--fileToSort=' + self.data('sortFile'), '--sseKey=' + self.data('sortFile')]
runCommand.extend(toilOptions)
self.sshUtil(runCommand)
def launchCluster(self):
# add arguments to test that we can specify leader storage
self.createClusterUtil(args=['--leaderStorage', str(self.requestedLeaderStorage)])
def getRootVolID(self):
"""
Adds in test to check that EBS volume is build with adequate size.
Otherwise is functionally equivalent to parent.
:return: volumeID
"""
volumeID = super(AWSAutoscaleTest, self).getRootVolID()
rootVolume = self.cluster._boto2.ec2.get_all_volumes(volume_ids=[volumeID])[0]
# test that the leader is given adequate storage
self.assertGreaterEqual(rootVolume.size, self.requestedLeaderStorage)
return volumeID
@integrative
@needs_aws_ec2
def testAutoScale(self):
self.instanceTypes = ["m5a.large"]
self.numWorkers = ['2']
self._test()
@integrative
@needs_aws_ec2
def testSpotAutoScale(self):
self.instanceTypes = ["m5a.large:%f" % self.spotBid]
self.numWorkers = ['2']
self._test(preemptableJobs=True)
@integrative
@needs_aws_ec2
def testSpotAutoScaleBalancingTypes(self):
self.instanceTypes = ["m5.large/m5a.large:%f" % self.spotBid]
self.numWorkers = ['2']
self._test(preemptableJobs=True)
@integrative
@pytest.mark.timeout(1200)
class AWSStaticAutoscaleTest(AWSAutoscaleTest):
"""Runs the tests on a statically provisioned cluster with autoscaling enabled."""
def __init__(self, name):
super(AWSStaticAutoscaleTest, self).__init__(name)
self.requestedNodeStorage = 20
def launchCluster(self):
from boto.ec2.blockdevicemapping import BlockDeviceType
from toil.lib.ec2 import wait_instances_running
self.createClusterUtil(args=['--leaderStorage', str(self.requestedLeaderStorage),
'--nodeTypes', ",".join(self.instanceTypes),
'-w', ",".join(self.numWorkers),
'--nodeStorage', str(self.requestedLeaderStorage)])
self.cluster = cluster_factory(provisioner='aws', zone=self.zone, clusterName=self.clusterName)
nodes = self.cluster._getNodesInCluster(both=True)
nodes.sort(key=lambda x: x.launch_time)
# assuming that leader is first
workers = nodes[1:]
# test that two worker nodes were created
self.assertEqual(2, len(workers))
# test that workers have expected storage size
# just use the first worker
worker = workers[0]
worker = next(wait_instances_running(self.cluster._boto2.ec2, [worker]))
rootBlockDevice = worker.block_device_mapping["/dev/xvda"]
self.assertTrue(isinstance(rootBlockDevice, BlockDeviceType))
rootVolume = self.cluster._boto2.ec2.get_all_volumes(volume_ids=[rootBlockDevice.volume_id])[0]
self.assertGreaterEqual(rootVolume.size, self.requestedNodeStorage)
def _runScript(self, toilOptions):
# Autoscale even though we have static nodes
toilOptions.extend(['--provisioner=aws', '--batchSystem=mesos',
'--nodeTypes=' + ",".join(self.instanceTypes),
'--maxNodes=' + ",".join(self.numWorkers)])
runCommand = [self.python(), self.script(), '--fileToSort=' + self.data('sortFile')]
runCommand.extend(toilOptions)
self.sshUtil(runCommand)
@integrative
@pytest.mark.timeout(1200)
class AWSManagedAutoscaleTest(AWSAutoscaleTest):
"""Runs the tests on a self-scaling Kubernetes cluster."""
def __init__(self, name):
super().__init__(name)
self.requestedNodeStorage = 20
def launchCluster(self):
from boto.ec2.blockdevicemapping import BlockDeviceType
from toil.lib.ec2 import wait_instances_running
self.createClusterUtil(args=['--leaderStorage', str(self.requestedLeaderStorage),
'--nodeTypes', ",".join(self.instanceTypes),
'--workers', ",".join([f'0-{c}' for c in self.numWorkers]),
'--nodeStorage', str(self.requestedLeaderStorage),
'--clusterType', 'kubernetes'])
self.cluster = cluster_factory(provisioner='aws', zone=self.zone, clusterName=self.clusterName)
def _runScript(self, toilOptions):
# Don't use the provisioner, and use Kubernetes instead of Mesos
toilOptions.extend(['--batchSystem=kubernetes'])
runCommand = [self.python(), self.script(), '--fileToSort=' + self.data('sortFile')]
runCommand.extend(toilOptions)
self.sshUtil(runCommand)
@integrative
@pytest.mark.timeout(1200)
class AWSAutoscaleTestMultipleNodeTypes(AbstractAWSAutoscaleTest):
def __init__(self, name):
super(AWSAutoscaleTestMultipleNodeTypes, self).__init__(name)
self.clusterName = 'provisioner-test-' + str(uuid4())
def setUp(self):
super(AWSAutoscaleTestMultipleNodeTypes, self).setUp()
self.jobStore = 'aws:%s:autoscale-%s' % (self.awsRegion(), uuid4())
def _getScript(self):
sseKeyFile = os.path.join(os.getcwd(), 'keyFile')
with open(sseKeyFile, 'w') as f:
f.write('01234567890123456789012345678901')
self.rsyncUtil(os.path.join(self._projectRootPath(), 'src/toil/test/sort/sort.py'), ':' + self.script())
self.rsyncUtil(sseKeyFile, ':' + self.data('keyFile'))
os.unlink(sseKeyFile)
def _runScript(self, toilOptions):
#Set memory requirements so that sort jobs can be run
# on small instances, but merge jobs must be run on large
# instances
toilOptions.extend(['--provisioner=aws', '--batchSystem=mesos',
'--nodeTypes=' + ",".join(self.instanceTypes),
'--maxNodes=' + ",".join(self.numWorkers)])
runCommand = [self.python(), self.script(), '--fileToSort=/home/s3am/bin/asadmin', '--sortMemory=0.6G', '--mergeMemory=3.0G']
runCommand.extend(toilOptions)
runCommand.append('--sseKey=' + self.data('keyFile'))
self.sshUtil(runCommand)
@integrative
@needs_aws_ec2
def testAutoScale(self):
self.instanceTypes = ["t2.small", "m5a.large"]
self.numWorkers = ['2', '1']
self._test()
@integrative
@pytest.mark.timeout(1200)
class AWSRestartTest(AbstractAWSAutoscaleTest):
"""This test insures autoscaling works on a restarted Toil run."""
def __init__(self, name):
super(AWSRestartTest, self).__init__(name)
self.clusterName = 'restart-test-' + str(uuid4())
self.scriptName = 'restartScript.py'
def setUp(self):
super(AWSRestartTest, self).setUp()
self.instanceTypes = ['t2.small']
self.numWorkers = ['1']
self.jobStore = 'aws:%s:restart-%s' % (self.awsRegion(), uuid4())
def _getScript(self):
def restartScript():
import argparse
import os
from toil.job import Job
def f0(job):
if 'FAIL' in os.environ:
raise RuntimeError('failed on purpose')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
Job.Runner.addToilOptions(parser)
options = parser.parse_args()
rootJob = Job.wrapJobFn(f0, cores=0.5, memory='50 M', disk='50 M')
Job.Runner.startToil(rootJob, options)
script = dedent('\n'.join(getsource(restartScript).split('\n')[1:]))
self.putScript(script)
def _runScript(self, toilOptions):
# Use the provisioner in the workflow
toilOptions.extend(['--provisioner=aws', '--batchSystem=mesos',
'--nodeTypes=' + ",".join(self.instanceTypes),
'--maxNodes=' + ",".join(self.numWorkers)])
# clean = onSuccess
disallowedOptions = ['--clean=always', '--retryCount=2']
newOptions = [option for option in toilOptions if option not in disallowedOptions]
try:
# include a default memory - on restart the minimum memory requirement is the default, usually 2 GB
command = [self.python(), self.script(), '--setEnv', 'FAIL=true', '--defaultMemory=50000000']
command.extend(newOptions)
self.sshUtil(command)
except subprocess.CalledProcessError:
pass
else:
self.fail('Command succeeded when we expected failure')
with timeLimit(600):
command = [self.python(), self.script(), '--restart', '--defaultMemory=50000000']
command.extend(toilOptions)
self.sshUtil(command)
def testAutoScaledCluster(self):
self._test()
@integrative
@pytest.mark.timeout(1200)
class PreemptableDeficitCompensationTest(AbstractAWSAutoscaleTest):
def __init__(self, name):
super(PreemptableDeficitCompensationTest, self).__init__(name)
self.clusterName = 'deficit-test-' + str(uuid4())
self.scriptName = 'userScript.py'
def setUp(self):
super(PreemptableDeficitCompensationTest, self).setUp()
self.instanceTypes = ['m5a.large:0.01', "m5a.large"] # instance needs to be available on the spot market
self.numWorkers = ['1', '1']
self.jobStore = 'aws:%s:deficit-%s' % (self.awsRegion(), uuid4())
def test(self):
self._test(preemptableJobs=True)
def _getScript(self):
def userScript():
from toil.common import Toil
from toil.job import Job
# Because this is the only job in the pipeline and because it is preemptable,
# there will be no non-preemptable jobs. The non-preemptable scaler will therefore
# not request any nodes initially. And since we made it impossible for the
# preemptable scaler to allocate any nodes (using an abnormally low spot bid),
# we will observe a deficit of preemptable nodes that the non-preemptable scaler will
# compensate for by spinning up non-preemptable nodes instead.
#
def job(job, disk='10M', cores=1, memory='10M', preemptable=True):
pass
if __name__ == '__main__':
options = Job.Runner.getDefaultArgumentParser().parse_args()
with Toil(options) as toil:
if toil.config.restart:
toil.restart()
else:
toil.start(Job.wrapJobFn(job))
script = dedent('\n'.join(getsource(userScript).split('\n')[1:]))
self.putScript(script)
def _runScript(self, toilOptions):
toilOptions.extend(['--provisioner=aws', '--batchSystem=mesos',
'--nodeTypes=' + ",".join(self.instanceTypes),
'--maxNodes=' + ",".join(self.numWorkers)])
toilOptions.extend(['--preemptableCompensation=1.0'])
command = [self.python(), self.script()]
command.extend(toilOptions)
self.sshUtil(command)
|
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
)
import pandas._testing as tm
class TestDataFrameSubclassing:
def test_frame_subclassing_and_slicing(self):
# Subclass frame and ensure it returns the right class on slicing it
# In reference to PR 9632
class CustomSeries(Series):
@property
def _constructor(self):
return CustomSeries
def custom_series_function(self):
return "OK"
class CustomDataFrame(DataFrame):
"""
Subclasses pandas DF, fills DF with simulation results, adds some
custom plotting functions.
"""
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
@property
def _constructor(self):
return CustomDataFrame
_constructor_sliced = CustomSeries
def custom_frame_function(self):
return "OK"
data = {"col1": range(10), "col2": range(10)}
cdf = CustomDataFrame(data)
# Did we get back our own DF class?
assert isinstance(cdf, CustomDataFrame)
# Do we get back our own Series class after selecting a column?
cdf_series = cdf.col1
assert isinstance(cdf_series, CustomSeries)
assert cdf_series.custom_series_function() == "OK"
# Do we get back our own DF class after slicing row-wise?
cdf_rows = cdf[1:5]
assert isinstance(cdf_rows, CustomDataFrame)
assert cdf_rows.custom_frame_function() == "OK"
# Make sure sliced part of multi-index frame is custom class
mcol = MultiIndex.from_tuples([("A", "A"), ("A", "B")])
cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
assert isinstance(cdf_multi["A"], CustomDataFrame)
mcol = MultiIndex.from_tuples([("A", ""), ("B", "")])
cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
assert isinstance(cdf_multi2["A"], CustomSeries)
def test_dataframe_metadata(self):
df = tm.SubclassedDataFrame(
{"X": [1, 2, 3], "Y": [1, 2, 3]}, index=["a", "b", "c"]
)
df.testattr = "XXX"
assert df.testattr == "XXX"
assert df[["X"]].testattr == "XXX"
assert df.loc[["a", "b"], :].testattr == "XXX"
assert df.iloc[[0, 1], :].testattr == "XXX"
# see gh-9776
assert df.iloc[0:1, :].testattr == "XXX"
# see gh-10553
unpickled = tm.round_trip_pickle(df)
tm.assert_frame_equal(df, unpickled)
assert df._metadata == unpickled._metadata
assert df.testattr == unpickled.testattr
def test_indexing_sliced(self):
# GH 11559
df = tm.SubclassedDataFrame(
{"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["a", "b", "c"]
)
res = df.loc[:, "X"]
exp = tm.SubclassedSeries([1, 2, 3], index=list("abc"), name="X")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.iloc[:, 1]
exp = tm.SubclassedSeries([4, 5, 6], index=list("abc"), name="Y")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc[:, "Z"]
exp = tm.SubclassedSeries([7, 8, 9], index=list("abc"), name="Z")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc["a", :]
exp = tm.SubclassedSeries([1, 4, 7], index=list("XYZ"), name="a")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.iloc[1, :]
exp = tm.SubclassedSeries([2, 5, 8], index=list("XYZ"), name="b")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc["c", :]
exp = tm.SubclassedSeries([3, 6, 9], index=list("XYZ"), name="c")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
def test_subclass_attr_err_propagation(self):
# GH 11808
class A(DataFrame):
@property
def bar(self):
return self.i_dont_exist
with pytest.raises(AttributeError, match=".*i_dont_exist.*"):
A().bar
def test_subclass_align(self):
# GH 12983
df1 = tm.SubclassedDataFrame(
{"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE")
)
df2 = tm.SubclassedDataFrame(
{"c": [1, 2, 4], "d": [1, 2, 4]}, index=list("ABD")
)
res1, res2 = df1.align(df2, axis=0)
exp1 = tm.SubclassedDataFrame(
{"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},
index=list("ABCDE"),
)
exp2 = tm.SubclassedDataFrame(
{"c": [1, 2, np.nan, 4, np.nan], "d": [1, 2, np.nan, 4, np.nan]},
index=list("ABCDE"),
)
assert isinstance(res1, tm.SubclassedDataFrame)
tm.assert_frame_equal(res1, exp1)
assert isinstance(res2, tm.SubclassedDataFrame)
tm.assert_frame_equal(res2, exp2)
res1, res2 = df1.a.align(df2.c)
assert isinstance(res1, tm.SubclassedSeries)
tm.assert_series_equal(res1, exp1.a)
assert isinstance(res2, tm.SubclassedSeries)
tm.assert_series_equal(res2, exp2.c)
def test_subclass_align_combinations(self):
# GH 12983
df = tm.SubclassedDataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE"))
s = tm.SubclassedSeries([1, 2, 4], index=list("ABD"), name="x")
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = tm.SubclassedDataFrame(
{"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},
index=list("ABCDE"),
)
# name is lost when
exp2 = tm.SubclassedSeries(
[1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x"
)
assert isinstance(res1, tm.SubclassedDataFrame)
tm.assert_frame_equal(res1, exp1)
assert isinstance(res2, tm.SubclassedSeries)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
assert isinstance(res1, tm.SubclassedSeries)
tm.assert_series_equal(res1, exp2)
assert isinstance(res2, tm.SubclassedDataFrame)
tm.assert_frame_equal(res2, exp1)
def test_subclass_iterrows(self):
# GH 13977
df = tm.SubclassedDataFrame({"a": [1]})
for i, row in df.iterrows():
assert isinstance(row, tm.SubclassedSeries)
tm.assert_series_equal(row, df.loc[i])
def test_subclass_stack(self):
# GH 15564
df = tm.SubclassedDataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["a", "b", "c"],
columns=["X", "Y", "Z"],
)
res = df.stack()
exp = tm.SubclassedSeries(
[1, 2, 3, 4, 5, 6, 7, 8, 9], index=[list("aaabbbccc"), list("XYZXYZXYZ")]
)
tm.assert_series_equal(res, exp)
def test_subclass_stack_multi(self):
# GH 15564
df = tm.SubclassedDataFrame(
[[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]],
index=MultiIndex.from_tuples(
list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"]
),
columns=MultiIndex.from_tuples(
list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"]
),
)
exp = tm.SubclassedDataFrame(
[
[10, 12],
[11, 13],
[20, 22],
[21, 23],
[30, 32],
[31, 33],
[40, 42],
[41, 43],
],
index=MultiIndex.from_tuples(
list(zip(list("AAAABBBB"), list("ccddccdd"), list("yzyzyzyz"))),
names=["aaa", "ccc", "yyy"],
),
columns=Index(["W", "X"], name="www"),
)
res = df.stack()
tm.assert_frame_equal(res, exp)
res = df.stack("yyy")
tm.assert_frame_equal(res, exp)
exp = tm.SubclassedDataFrame(
[
[10, 11],
[12, 13],
[20, 21],
[22, 23],
[30, 31],
[32, 33],
[40, 41],
[42, 43],
],
index=MultiIndex.from_tuples(
list(zip(list("AAAABBBB"), list("ccddccdd"), list("WXWXWXWX"))),
names=["aaa", "ccc", "www"],
),
columns=Index(["y", "z"], name="yyy"),
)
res = df.stack("www")
tm.assert_frame_equal(res, exp)
def test_subclass_stack_multi_mixed(self):
# GH 15564
df = tm.SubclassedDataFrame(
[
[10, 11, 12.0, 13.0],
[20, 21, 22.0, 23.0],
[30, 31, 32.0, 33.0],
[40, 41, 42.0, 43.0],
],
index=MultiIndex.from_tuples(
list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"]
),
columns=MultiIndex.from_tuples(
list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"]
),
)
exp = tm.SubclassedDataFrame(
[
[10, 12.0],
[11, 13.0],
[20, 22.0],
[21, 23.0],
[30, 32.0],
[31, 33.0],
[40, 42.0],
[41, 43.0],
],
index=MultiIndex.from_tuples(
list(zip(list("AAAABBBB"), list("ccddccdd"), list("yzyzyzyz"))),
names=["aaa", "ccc", "yyy"],
),
columns=Index(["W", "X"], name="www"),
)
res = df.stack()
tm.assert_frame_equal(res, exp)
res = df.stack("yyy")
tm.assert_frame_equal(res, exp)
exp = tm.SubclassedDataFrame(
[
[10.0, 11.0],
[12.0, 13.0],
[20.0, 21.0],
[22.0, 23.0],
[30.0, 31.0],
[32.0, 33.0],
[40.0, 41.0],
[42.0, 43.0],
],
index=MultiIndex.from_tuples(
list(zip(list("AAAABBBB"), list("ccddccdd"), list("WXWXWXWX"))),
names=["aaa", "ccc", "www"],
),
columns=Index(["y", "z"], name="yyy"),
)
res = df.stack("www")
tm.assert_frame_equal(res, exp)
def test_subclass_unstack(self):
# GH 15564
df = tm.SubclassedDataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["a", "b", "c"],
columns=["X", "Y", "Z"],
)
res = df.unstack()
exp = tm.SubclassedSeries(
[1, 4, 7, 2, 5, 8, 3, 6, 9], index=[list("XXXYYYZZZ"), list("abcabcabc")]
)
tm.assert_series_equal(res, exp)
def test_subclass_unstack_multi(self):
# GH 15564
df = tm.SubclassedDataFrame(
[[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]],
index=MultiIndex.from_tuples(
list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"]
),
columns=MultiIndex.from_tuples(
list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"]
),
)
exp = tm.SubclassedDataFrame(
[[10, 20, 11, 21, 12, 22, 13, 23], [30, 40, 31, 41, 32, 42, 33, 43]],
index=Index(["A", "B"], name="aaa"),
columns=MultiIndex.from_tuples(
list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("cdcdcdcd"))),
names=["www", "yyy", "ccc"],
),
)
res = df.unstack()
tm.assert_frame_equal(res, exp)
res = df.unstack("ccc")
tm.assert_frame_equal(res, exp)
exp = tm.SubclassedDataFrame(
[[10, 30, 11, 31, 12, 32, 13, 33], [20, 40, 21, 41, 22, 42, 23, 43]],
index=Index(["c", "d"], name="ccc"),
columns=MultiIndex.from_tuples(
list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("ABABABAB"))),
names=["www", "yyy", "aaa"],
),
)
res = df.unstack("aaa")
tm.assert_frame_equal(res, exp)
def test_subclass_unstack_multi_mixed(self):
# GH 15564
df = tm.SubclassedDataFrame(
[
[10, 11, 12.0, 13.0],
[20, 21, 22.0, 23.0],
[30, 31, 32.0, 33.0],
[40, 41, 42.0, 43.0],
],
index=MultiIndex.from_tuples(
list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"]
),
columns=MultiIndex.from_tuples(
list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"]
),
)
exp = tm.SubclassedDataFrame(
[
[10, 20, 11, 21, 12.0, 22.0, 13.0, 23.0],
[30, 40, 31, 41, 32.0, 42.0, 33.0, 43.0],
],
index=Index(["A", "B"], name="aaa"),
columns=MultiIndex.from_tuples(
list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("cdcdcdcd"))),
names=["www", "yyy", "ccc"],
),
)
res = df.unstack()
tm.assert_frame_equal(res, exp)
res = df.unstack("ccc")
tm.assert_frame_equal(res, exp)
exp = tm.SubclassedDataFrame(
[
[10, 30, 11, 31, 12.0, 32.0, 13.0, 33.0],
[20, 40, 21, 41, 22.0, 42.0, 23.0, 43.0],
],
index=Index(["c", "d"], name="ccc"),
columns=MultiIndex.from_tuples(
list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("ABABABAB"))),
names=["www", "yyy", "aaa"],
),
)
res = df.unstack("aaa")
tm.assert_frame_equal(res, exp)
def test_subclass_pivot(self):
# GH 15564
df = tm.SubclassedDataFrame(
{
"index": ["A", "B", "C", "C", "B", "A"],
"columns": ["One", "One", "One", "Two", "Two", "Two"],
"values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0],
}
)
pivoted = df.pivot(index="index", columns="columns", values="values")
expected = tm.SubclassedDataFrame(
{
"One": {"A": 1.0, "B": 2.0, "C": 3.0},
"Two": {"A": 1.0, "B": 2.0, "C": 3.0},
}
)
expected.index.name, expected.columns.name = "index", "columns"
tm.assert_frame_equal(pivoted, expected)
def test_subclassed_melt(self):
# GH 15564
cheese = tm.SubclassedDataFrame(
{
"first": ["John", "Mary"],
"last": ["Doe", "Bo"],
"height": [5.5, 6.0],
"weight": [130, 150],
}
)
melted = pd.melt(cheese, id_vars=["first", "last"])
expected = tm.SubclassedDataFrame(
[
["John", "Doe", "height", 5.5],
["Mary", "Bo", "height", 6.0],
["John", "Doe", "weight", 130],
["Mary", "Bo", "weight", 150],
],
columns=["first", "last", "variable", "value"],
)
tm.assert_frame_equal(melted, expected)
def test_subclassed_wide_to_long(self):
# GH 9762
np.random.seed(123)
x = np.random.randn(3)
df = tm.SubclassedDataFrame(
{
"A1970": {0: "a", 1: "b", 2: "c"},
"A1980": {0: "d", 1: "e", 2: "f"},
"B1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A": ["a", "b", "c", "d", "e", "f"],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = tm.SubclassedDataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
long_frame = pd.wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(long_frame, expected)
def test_subclassed_apply(self):
# GH 19822
def check_row_subclass(row):
assert isinstance(row, tm.SubclassedSeries)
def stretch(row):
if row["variable"] == "height":
row["value"] += 0.5
return row
df = tm.SubclassedDataFrame(
[
["John", "Doe", "height", 5.5],
["Mary", "Bo", "height", 6.0],
["John", "Doe", "weight", 130],
["Mary", "Bo", "weight", 150],
],
columns=["first", "last", "variable", "value"],
)
df.apply(lambda x: check_row_subclass(x))
df.apply(lambda x: check_row_subclass(x), axis=1)
expected = tm.SubclassedDataFrame(
[
["John", "Doe", "height", 6.0],
["Mary", "Bo", "height", 6.5],
["John", "Doe", "weight", 130],
["Mary", "Bo", "weight", 150],
],
columns=["first", "last", "variable", "value"],
)
result = df.apply(lambda x: stretch(x), axis=1)
assert isinstance(result, tm.SubclassedDataFrame)
tm.assert_frame_equal(result, expected)
expected = tm.SubclassedDataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])
result = df.apply(lambda x: tm.SubclassedSeries([1, 2, 3]), axis=1)
assert isinstance(result, tm.SubclassedDataFrame)
tm.assert_frame_equal(result, expected)
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="expand")
assert isinstance(result, tm.SubclassedDataFrame)
tm.assert_frame_equal(result, expected)
expected = tm.SubclassedSeries([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])
result = df.apply(lambda x: [1, 2, 3], axis=1)
assert not isinstance(result, tm.SubclassedDataFrame)
tm.assert_series_equal(result, expected)
@pytest.mark.filterwarnings("ignore:.*None will no longer:FutureWarning")
def test_subclassed_reductions(self, all_reductions):
# GH 25596
df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
result = getattr(df, all_reductions)()
assert isinstance(result, tm.SubclassedSeries)
def test_subclassed_count(self):
df = tm.SubclassedDataFrame(
{
"Person": ["John", "Myla", "Lewis", "John", "Myla"],
"Age": [24.0, np.nan, 21.0, 33, 26],
"Single": [False, True, True, True, False],
}
)
result = df.count()
assert isinstance(result, tm.SubclassedSeries)
df = tm.SubclassedDataFrame({"A": [1, 0, 3], "B": [0, 5, 6], "C": [7, 8, 0]})
result = df.count()
assert isinstance(result, tm.SubclassedSeries)
df = tm.SubclassedDataFrame(
[[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]],
index=MultiIndex.from_tuples(
list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"]
),
columns=MultiIndex.from_tuples(
list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"]
),
)
with tm.assert_produces_warning(FutureWarning):
result = df.count(level=1)
assert isinstance(result, tm.SubclassedDataFrame)
df = tm.SubclassedDataFrame()
result = df.count()
assert isinstance(result, tm.SubclassedSeries)
def test_isin(self):
df = tm.SubclassedDataFrame(
{"num_legs": [2, 4], "num_wings": [2, 0]}, index=["falcon", "dog"]
)
result = df.isin([0, 2])
assert isinstance(result, tm.SubclassedDataFrame)
def test_duplicated(self):
df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
result = df.duplicated()
assert isinstance(result, tm.SubclassedSeries)
df = tm.SubclassedDataFrame()
result = df.duplicated()
assert isinstance(result, tm.SubclassedSeries)
@pytest.mark.parametrize("idx_method", ["idxmax", "idxmin"])
def test_idx(self, idx_method):
df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
result = getattr(df, idx_method)()
assert isinstance(result, tm.SubclassedSeries)
def test_dot(self):
df = tm.SubclassedDataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
s = tm.SubclassedSeries([1, 1, 2, 1])
result = df.dot(s)
assert isinstance(result, tm.SubclassedSeries)
df = tm.SubclassedDataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
s = tm.SubclassedDataFrame([1, 1, 2, 1])
result = df.dot(s)
assert isinstance(result, tm.SubclassedDataFrame)
def test_memory_usage(self):
df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
result = df.memory_usage()
assert isinstance(result, tm.SubclassedSeries)
result = df.memory_usage(index=False)
assert isinstance(result, tm.SubclassedSeries)
@td.skip_if_no_scipy
def test_corrwith(self):
index = ["a", "b", "c", "d", "e"]
columns = ["one", "two", "three", "four"]
df1 = tm.SubclassedDataFrame(
np.random.randn(5, 4), index=index, columns=columns
)
df2 = tm.SubclassedDataFrame(
np.random.randn(4, 4), index=index[:4], columns=columns
)
correls = df1.corrwith(df2, axis=1, drop=True, method="kendall")
assert isinstance(correls, (tm.SubclassedSeries))
def test_asof(self):
N = 3
rng = pd.date_range("1/1/1990", periods=N, freq="53s")
df = tm.SubclassedDataFrame(
{
"A": [np.nan, np.nan, np.nan],
"B": [np.nan, np.nan, np.nan],
"C": [np.nan, np.nan, np.nan],
},
index=rng,
)
result = df.asof(rng[-2:])
assert isinstance(result, tm.SubclassedDataFrame)
result = df.asof(rng[-2])
assert isinstance(result, tm.SubclassedSeries)
result = df.asof("1989-12-31")
assert isinstance(result, tm.SubclassedSeries)
def test_idxmin_preserves_subclass(self):
# GH 28330
df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
result = df.idxmin()
assert isinstance(result, tm.SubclassedSeries)
def test_idxmax_preserves_subclass(self):
# GH 28330
df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
result = df.idxmax()
assert isinstance(result, tm.SubclassedSeries)
def test_equals_subclass(self):
# https://github.com/pandas-dev/pandas/pull/34402
# allow subclass in both directions
df1 = DataFrame({"a": [1, 2, 3]})
df2 = tm.SubclassedDataFrame({"a": [1, 2, 3]})
assert df1.equals(df2)
assert df2.equals(df1)
|
|
"""Provide some CSV Files
first four columns need to be
ID,Station,Latitude,Longitude
"""
import datetime
import re
import sys
try:
from zoneinfo import ZoneInfo # type: ignore
except ImportError:
from backports.zoneinfo import ZoneInfo
# third party
import requests
import ephem
import pytz
import pandas as pd
from pandas.io.sql import read_sql
from paste.request import parse_formvars
from pyiem.util import get_dbconn, utc
# DOT plows
# RWIS sensor data
# River gauges
# Ag data (4" soil temps)
# Moon
def figurePhase(p1, p2):
"""Return a string of the moon phase!"""
if p2 < p1: # Waning!
if p1 < 0.1:
return "New Moon"
if p1 < 0.4:
return "Waning Crescent"
if p1 < 0.6:
return "Last Quarter"
if p1 < 0.9:
return "Waning Gibbous"
return "Full Moon"
if p1 < 0.1:
return "New Moon"
if p1 < 0.4:
return "Waxing Crescent"
if p1 < 0.6:
return "First Quarter"
if p1 < 0.9:
return "Waxing Gibbous"
return "Full Moon"
def do_moon(lon, lat):
"""Moon fun."""
moon = ephem.Moon()
obs = ephem.Observer()
obs.lat = str(lat)
obs.long = str(lon)
obs.date = utc().strftime("%Y/%m/%d %H:%M")
r1 = obs.next_rising(moon).datetime().replace(tzinfo=datetime.timezone.utc)
p1 = moon.moon_phase
obs.date = r1.strftime("%Y/%m/%d %H:%M")
s1 = (
obs.next_setting(moon).datetime().replace(tzinfo=datetime.timezone.utc)
)
# Figure out the next rise time
obs.date = s1.strftime("%Y/%m/%d %H:%M")
r2 = obs.next_rising(moon).datetime().replace(tzinfo=datetime.timezone.utc)
p2 = moon.moon_phase
obs.date = r2.strftime("%Y/%m/%d %H:%M")
s2 = (
obs.next_setting(moon).datetime().replace(tzinfo=datetime.timezone.utc)
)
label = figurePhase(p1, p2)
# Figure out the timezone
cursor = get_dbconn("mesosite").cursor()
cursor.execute(
"select tzid from tz_world WHERE "
"st_contains(geom, st_setsrid(ST_Point(%s, %s), 4326))",
(lon, lat),
)
if cursor.rowcount == 0:
tzid = "UTC"
else:
tzid = cursor.fetchone()[0]
tz = ZoneInfo(tzid)
return pd.DataFrame(
{
"longitude": lon,
"latitude": lat,
"moon_rise_date": r1.astimezone(tz).strftime("%Y/%m/%d"),
"moon_rise_time": r1.astimezone(tz).strftime("%-I:%M %P"),
"moon_set_date": s1.astimezone(tz).strftime("%Y/%m/%d"),
"moon_set_time": s1.astimezone(tz).strftime("%-I:%M %P"),
"percent_illum_at_rise": round(p1 * 100, 4),
"phase": label,
"next_moon_rise_date": r2.astimezone(tz).strftime("%Y/%m/%d"),
"next_moon_rise_time": r2.astimezone(tz).strftime("%-I:%M %P"),
"next_moon_set_date": s2.astimezone(tz).strftime("%Y/%m/%d"),
"next_moon_set_time": s2.astimezone(tz).strftime("%-I:%M %P"),
"next_percent_illum_at_rise": round(p2 * 100, 4),
"timezone": tzid,
},
index=[0],
)
def do_iaroadcond():
"""Iowa DOT Road Conditions as dots"""
pgconn = get_dbconn("postgis")
df = read_sql(
"""
select b.idot_id as locationid,
replace(b.longname, ',', ' ') as locationname,
ST_y(ST_transform(ST_centroid(b.geom),4326)) as latitude,
ST_x(ST_transform(ST_centroid(b.geom),4326)) as longitude, cond_code
from roads_base b JOIN roads_current c on (c.segid = b.segid)
""",
pgconn,
)
return df
def do_webcams(network):
"""direction arrows"""
pgconn = get_dbconn("mesosite")
df = read_sql(
"""
select cam as locationid, w.name as locationname, st_y(geom) as latitude,
st_x(geom) as longitude, drct
from camera_current c JOIN webcams w on (c.cam = w.id)
WHERE c.valid > (now() - '30 minutes'::interval) and w.network = %s
""",
pgconn,
params=(network,),
)
return df
def do_iowa_azos(date, itoday=False):
"""Dump high and lows for Iowa ASOS + AWOS"""
pgconn = get_dbconn("iem")
df = read_sql(
f"""
select id as locationid, n.name as locationname, st_y(geom) as latitude,
st_x(geom) as longitude, s.day, s.max_tmpf::int as high,
s.min_tmpf::int as low, coalesce(pday, 0) as precip
from stations n JOIN summary_{date.year} s on (n.iemid = s.iemid)
WHERE n.network in ('IA_ASOS', 'AWOS') and s.day = %s
""",
pgconn,
params=(date,),
index_col="locationid",
)
if itoday:
# Additionally, piggy back rainfall totals
df2 = read_sql(
"""
SELECT id as station,
sum(phour) as precip720,
sum(case when valid >= (now() - '168 hours'::interval)
then phour else 0 end) as precip168,
sum(case when valid >= (now() - '72 hours'::interval)
then phour else 0 end) as precip72,
sum(case when valid >= (now() - '48 hours'::interval)
then phour else 0 end) as precip48,
sum(case when valid >= (now() - '24 hours'::interval)
then phour else 0 end) as precip24
from hourly h JOIN stations t on (h.iemid = t.iemid)
where t.network in ('IA_ASOS', 'AWOS')
and valid >= now() - '720 hours'::interval
and phour > 0.005 GROUP by id
""",
pgconn,
index_col="station",
)
for col in [
"precip24",
"precip48",
"precip72",
"precip168",
"precip720",
]:
df[col] = df2[col]
# make sure the new column is >= precip
df.loc[df[col] < df["precip"], col] = df["precip"]
df = df.reset_index()
return df
def do_iarwis():
"""Dump RWIS data"""
pgconn = get_dbconn("iem")
df = read_sql(
"""
select id as locationid, n.name as locationname, st_y(geom) as latitude,
st_x(geom) as longitude, tsf0 as pavetmp1, tsf1 as pavetmp2,
tsf2 as pavetmp3, tsf3 as pavetmp4
from stations n JOIN current s on (n.iemid = s.iemid)
WHERE n.network in ('IA_RWIS', 'WI_RWIS', 'IL_RWIS') and
s.valid > (now() - '2 hours'::interval)
""",
pgconn,
)
# Compute simple average in whole degree F
df["paveavg"] = (
df[["pavetmp1", "pavetmp2", "pavetmp3", "pavetmp4"]]
.mean(axis=1)
.map(lambda x: f"{x:.0f}" if not pd.isna(x) else "")
)
df = df[df["paveavg"] != ""]
for col in range(1, 5):
df[f"pavetmp{col}"] = df[f"pavetmp{col}"].map(
lambda x: f"{x:.0f}" if not pd.isna(x) else ""
)
return df
def do_ahps_obs(nwsli):
"""Create a dataframe with AHPS river stage and CFS information"""
pgconn = get_dbconn("hml")
cursor = pgconn.cursor()
# Get metadata
cursor.execute(
"""
SELECT name, st_x(geom), st_y(geom), tzname from stations
where id = %s and network ~* 'DCP'
""",
(nwsli,),
)
row = cursor.fetchone()
latitude = row[2]
longitude = row[1]
stationname = row[0]
tzinfo = pytz.timezone(row[3])
# Figure out which keys we have
cursor.execute(
"""
with obs as (
select distinct key from hml_observed_data where station = %s
and valid > now() - '3 days'::interval)
SELECT k.id, k.label from hml_observed_keys k JOIN obs o on (k.id = o.key)
""",
(nwsli,),
)
if cursor.rowcount == 0:
return "NO DATA"
plabel = cursor.fetchone()[1]
slabel = cursor.fetchone()[1]
df = read_sql(
"""
WITH primaryv as (
SELECT valid, value from hml_observed_data WHERE station = %s
and key = get_hml_observed_key(%s) and valid > now() - '1 day'::interval
), secondaryv as (
SELECT valid, value from hml_observed_data WHERE station = %s
and key = get_hml_observed_key(%s) and valid > now() - '1 day'::interval
)
SELECT p.valid at time zone 'UTC' as valid,
p.value as primary_value, s.value as secondary_value,
'O' as type
from primaryv p LEFT JOIN secondaryv s ON (p.valid = s.valid)
WHERE p.valid > (now() - '72 hours'::interval)
ORDER by p.valid DESC
""",
pgconn,
params=(nwsli, plabel, nwsli, slabel),
index_col=None,
)
sys.stderr.write(str(plabel))
sys.stderr.write(str(slabel))
df["locationid"] = nwsli
df["locationname"] = stationname
df["latitude"] = latitude
df["longitude"] = longitude
df["Time"] = (
df["valid"]
.dt.tz_localize(pytz.UTC)
.dt.tz_convert(tzinfo)
.dt.strftime("%m/%d/%Y %H:%M")
)
df[plabel] = df["primary_value"]
df[slabel] = df["secondary_value"]
# we have to do the writing from here
res = "Observed Data:,,\n"
res += "|Date|,|Stage|,|--Flow-|\n"
odf = df[df["type"] == "O"]
for _, row in odf.iterrows():
res += (
f"{row['Time']},{row['Stage[ft]']:.2f}ft,"
f"{row['Flow[kcfs]']:.1f}kcfs\n"
)
return res
def do_ahps_fx(nwsli):
"""Create a dataframe with AHPS river stage and CFS information"""
pgconn = get_dbconn("hml")
cursor = pgconn.cursor()
# Get metadata
cursor.execute(
"SELECT name, st_x(geom), st_y(geom), tzname from stations "
"where id = %s and network ~* 'DCP'",
(nwsli,),
)
row = cursor.fetchone()
latitude = row[2]
longitude = row[1]
stationname = row[0]
tzinfo = pytz.timezone(row[3])
# Get the last forecast
cursor.execute(
"""
select id, forecast_sts at time zone 'UTC',
generationtime at time zone 'UTC', primaryname, primaryunits,
secondaryname, secondaryunits
from hml_forecast where station = %s
and generationtime > now() - '7 days'::interval
ORDER by issued DESC LIMIT 1
""",
(nwsli,),
)
row = cursor.fetchone()
primaryname = row[3]
generationtime = row[2]
primaryunits = row[4]
secondaryname = row[5]
secondaryunits = row[6]
# Get the latest forecast
df = read_sql(
f"""
SELECT valid at time zone 'UTC' as valid,
primary_value, secondary_value, 'F' as type from
hml_forecast_data_{generationtime.year} WHERE hml_forecast_id = %s
ORDER by valid ASC
""",
pgconn,
params=(row[0],),
index_col=None,
)
# Get the obs
plabel = f"{primaryname}[{primaryunits}]"
slabel = f"{secondaryname}[{secondaryunits}]"
df["locationid"] = nwsli
df["locationname"] = stationname
df["latitude"] = latitude
df["longitude"] = longitude
df["Time"] = (
df["valid"]
.dt.tz_localize(pytz.UTC)
.dt.tz_convert(tzinfo)
.dt.strftime("%m/%d/%Y %H:%M")
)
df[plabel] = df["primary_value"]
df[slabel] = df["secondary_value"]
# we have to do the writing from here
res = f"Forecast Data (Issued {generationtime:%m-%d-%Y %H:%M:%S} UTC):,\n"
res += "|Date|,|Stage|,|--Flow-|\n"
odf = df[df["type"] == "F"]
for _, row in odf.iterrows():
res += "%s,%.2fft,%.1fkcfs\n" % (
row["Time"],
row["Stage[ft]"],
row["Flow[kcfs]"],
)
return res
def feet(val, suffix="'"):
"""Make feet indicator"""
if pd.isnull(val) or val == "":
return ""
return "%.1f%s" % (val, suffix)
def do_ahps(nwsli):
"""Create a dataframe with AHPS river stage and CFS information"""
pgconn = get_dbconn("hml")
cursor = pgconn.cursor()
# Get metadata
cursor.execute(
"SELECT name, st_x(geom), st_y(geom), tzname from stations "
"where id = %s and network ~* 'DCP'",
(nwsli,),
)
row = cursor.fetchone()
latitude = row[2]
longitude = row[1]
stationname = row[0].replace(",", " ")
tzinfo = pytz.timezone(row[3])
# Get the last forecast
cursor.execute(
"""
select id, forecast_sts at time zone 'UTC',
generationtime at time zone 'UTC', primaryname, primaryunits,
secondaryname, secondaryunits
from hml_forecast where station = %s
and generationtime > now() - '7 days'::interval
ORDER by issued DESC LIMIT 1
""",
(nwsli,),
)
if cursor.rowcount == 0:
return "NO DATA"
row = cursor.fetchone()
generationtime = row[2]
y = "{}".format(generationtime.year)
# Figure out which keys we have
cursor.execute(
"""
with obs as (
select distinct key from hml_observed_data where station = %s
and valid > now() - '3 days'::interval)
SELECT k.id, k.label from hml_observed_keys k JOIN obs o on (k.id = o.key)
""",
(nwsli,),
)
if cursor.rowcount == 0:
return "NO DATA"
lookupkey = 14
for _row in cursor:
if _row[1].find("[ft]") > 0:
lookupkey = _row[0]
break
# get observations
odf = read_sql(
"""
SELECT valid at time zone 'UTC' as valid,
value from hml_observed_data WHERE station = %s
and key = %s and valid > now() - '3 day'::interval
and extract(minute from valid) = 0
ORDER by valid DESC
""",
pgconn,
params=(nwsli, lookupkey),
index_col=None,
)
# hoop jumping to get a timestamp in the local time of this sensor
# see akrherz/iem#187
odf["obtime"] = (
odf["valid"]
.dt.tz_localize(pytz.UTC)
.dt.tz_convert(tzinfo)
.dt.strftime("%a. %-I %p")
)
# Get the latest forecast
df = read_sql(
f"""
SELECT valid at time zone 'UTC' as valid,
primary_value, secondary_value, 'F' as type from
hml_forecast_data_{y} WHERE hml_forecast_id = %s
ORDER by valid ASC
""",
pgconn,
params=(row[0],),
index_col=None,
)
# Get the obs
# plabel = "{}[{}]".format(primaryname, primaryunits)
# slabel = "{}[{}]".format(secondaryname, secondaryunits)
odf = odf.rename(columns={"value": "obstage"})
df = df.join(odf[["obtime", "obstage"]], how="outer")
# hoop jumping to get a timestamp in the local time of this sensor
# see akrherz/iem#187
df["forecasttime"] = (
df["valid"]
.dt.tz_localize(pytz.UTC)
.dt.tz_convert(tzinfo)
.dt.strftime("%a. %-I %p")
)
df["forecaststage"] = df["primary_value"]
# df[slabel] = df['secondary_value']
# we have to do the writing from here
res = (
"locationid,locationname,latitude,longitude,obtime,obstage,"
"obstage2,obstagetext,forecasttime,forecaststage,forecaststage1,"
"forecaststage2,forecaststage3,highestvalue,highestvalue2,"
"highestvaluedate\n"
)
res += ",,,,,,,,,,,,,,,\n,,,,,,,,,,,,,,,\n"
maxrow = df.sort_values("forecaststage", ascending=False).iloc[0]
for idx, row in df.iterrows():
fs = (
row["forecaststage"] if not pd.isnull(row["forecaststage"]) else ""
)
res += ("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n") % (
nwsli if idx == 0 else "",
stationname if idx == 0 else "",
latitude if idx == 0 else "",
longitude if idx == 0 else "",
row["obtime"],
row["obstage"],
feet(row["obstage"]),
"Unknown" if idx == 0 else "",
(row["forecasttime"] if row["forecasttime"] != "NaT" else ""),
feet(row["forecaststage"], "ft"),
fs,
feet(row["forecaststage"]),
fs,
"" if idx > 0 else maxrow["forecaststage"],
"" if idx > 0 else feet(maxrow["forecaststage"]),
"" if idx > 0 else maxrow["forecasttime"],
)
return res
def do_uvi():
"""UVI index."""
PATTERN = re.compile(
r"(?P<c1>[A-Z\s]+)\s+(?P<s1>[A-Z][A-Z])\s+(?P<u1>\d+)\s+"
r"(?P<c2>[A-Z\s]+)\s+(?P<s2>[A-Z][A-Z])\s+(?P<u2>\d+)",
)
URL = (
"https://www.cpc.ncep.noaa.gov/"
"products/stratosphere/uv_index/bulletin.txt"
)
req = requests.get(URL, timeout=20)
rows = []
for line in req.content.decode("ascii").split("\n"):
m = PATTERN.match(line)
if not m:
continue
data = m.groupdict()
for i in ["1", "2"]:
rows.append(
{
"City": data[f"c{i}"].strip(),
"State": data[f"s{i}"].strip(),
"UVI": data[f"u{i}"].strip(),
}
)
return pd.DataFrame(rows)
def router(appname):
"""Process and return dataframe"""
if appname.startswith("ahpsobs_"):
df = do_ahps_obs(appname[8:].upper()) # we write ourselves and exit
elif appname.startswith("ahpsfx_"):
df = do_ahps_fx(appname[7:].upper()) # we write ourselves and exit
elif appname.startswith("ahps_"):
df = do_ahps(appname[5:].upper()) # we write ourselves and exit
elif appname == "iaroadcond":
df = do_iaroadcond()
elif appname == "iarwis":
df = do_iarwis()
elif appname == "iowayesterday":
df = do_iowa_azos(datetime.date.today() - datetime.timedelta(days=1))
elif appname == "iowatoday":
df = do_iowa_azos(datetime.date.today(), True)
elif appname == "kcrgcitycam":
df = do_webcams("KCRG")
elif appname == "uvi":
df = do_uvi()
elif appname.startswith("moon"):
tokens = appname.replace(".txt", "").split("_")
df = do_moon(float(tokens[1]), float(tokens[2]))
else:
df = "ERROR, unknown report specified"
return df
def application(environ, start_response):
"""Do Something"""
form = parse_formvars(environ)
appname = form.get("q")
res = router(appname)
start_response("200 OK", [("Content-type", "text/plain")])
if isinstance(res, pd.DataFrame):
return [res.to_csv(None, index=False).encode("ascii")]
return [res.encode("ascii")]
def test_hml():
"""Can we do it?"""
do_ahps("DBQI4")
|
|
"""Small plotting-related utility functions."""
from __future__ import print_function, division
import colorsys
import os
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib as mpl
import matplotlib.colors as mplcol
import matplotlib.pyplot as plt
from .external.six.moves.urllib.request import urlopen, urlretrieve
from .external.six.moves.http_client import HTTPException
__all__ = ["desaturate", "saturate", "set_hls_values",
"despine", "get_dataset_names", "load_dataset"]
def remove_na(arr):
"""Helper method for removing NA values from array-like.
Parameters
----------
arr : array-like
The array-like from which to remove NA values.
Returns
-------
clean_arr : array-like
The original array with NA values removed.
"""
return arr[pd.notnull(arr)]
def sort_df(df, *args, **kwargs):
"""Wrapper to handle different pandas sorting API pre/post 0.17."""
try:
return df.sort_values(*args, **kwargs)
except AttributeError:
return df.sort(*args, **kwargs)
def ci_to_errsize(cis, heights):
"""Convert intervals to error arguments relative to plot heights.
Parameters
----------
cis: 2 x n sequence
sequence of confidence interval limits
heights : n sequence
sequence of plot heights
Returns
-------
errsize : 2 x n array
sequence of error size relative to height values in correct
format as argument for plt.bar
"""
cis = np.atleast_2d(cis).reshape(2, -1)
heights = np.atleast_1d(heights)
errsize = []
for i, (low, high) in enumerate(np.transpose(cis)):
h = heights[i]
elow = h - low
ehigh = high - h
errsize.append([elow, ehigh])
errsize = np.asarray(errsize).T
return errsize
def pmf_hist(a, bins=10):
"""Return arguments to plt.bar for pmf-like histogram of an array.
Parameters
----------
a: array-like
array to make histogram of
bins: int
number of bins
Returns
-------
x: array
left x position of bars
h: array
height of bars
w: float
width of bars
"""
n, x = np.histogram(a, bins)
h = n / n.sum()
w = x[1] - x[0]
return x[:-1], h, w
def desaturate(color, prop):
"""Decrease the saturation channel of a color by some percent.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
prop : float
saturation channel of color will be multiplied by this value
Returns
-------
new_color : rgb tuple
desaturated color code in RGB tuple representation
"""
# Check inputs
if not 0 <= prop <= 1:
raise ValueError("prop must be between 0 and 1")
# Get rgb tuple rep
rgb = mplcol.colorConverter.to_rgb(color)
# Convert to hls
h, l, s = colorsys.rgb_to_hls(*rgb)
# Desaturate the saturation channel
s *= prop
# Convert back to rgb
new_color = colorsys.hls_to_rgb(h, l, s)
return new_color
def saturate(color):
"""Return a fully saturated color with the same hue.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
Returns
-------
new_color : rgb tuple
saturated color code in RGB tuple representation
"""
return set_hls_values(color, s=1)
def set_hls_values(color, h=None, l=None, s=None): # noqa
"""Independently manipulate the h, l, or s channels of a color.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
h, l, s : floats between 0 and 1, or None
new values for each channel in hls space
Returns
-------
new_color : rgb tuple
new color code in RGB tuple representation
"""
# Get rgb tuple representation
rgb = mplcol.colorConverter.to_rgb(color)
vals = list(colorsys.rgb_to_hls(*rgb))
for i, val in enumerate([h, l, s]):
if val is not None:
vals[i] = val
rgb = colorsys.hls_to_rgb(*vals)
return rgb
def axlabel(xlabel, ylabel, **kwargs):
"""Grab current axis and label it."""
ax = plt.gca()
ax.set_xlabel(xlabel, **kwargs)
ax.set_ylabel(ylabel, **kwargs)
def despine(fig=None, ax=None, top=True, right=True, left=False,
bottom=False, offset=None, trim=False):
"""Remove the top and right spines from plot(s).
fig : matplotlib figure, optional
Figure to despine all axes of, default uses current figure.
ax : matplotlib axes, optional
Specific axes object to despine.
top, right, left, bottom : boolean, optional
If True, remove that spine.
offset : int or dict, optional
Absolute distance, in points, spines should be moved away
from the axes (negative values move spines inward). A single value
applies to all spines; a dict can be used to set offset values per
side.
trim : bool, optional
If True, limit spines to the smallest and largest major tick
on each non-despined axis.
Returns
-------
None
"""
# Get references to the axes we want
if fig is None and ax is None:
axes = plt.gcf().axes
elif fig is not None:
axes = fig.axes
elif ax is not None:
axes = [ax]
for ax_i in axes:
for side in ["top", "right", "left", "bottom"]:
# Toggle the spine objects
is_visible = not locals()[side]
ax_i.spines[side].set_visible(is_visible)
if offset is not None and is_visible:
try:
val = offset.get(side, 0)
except AttributeError:
val = offset
_set_spine_position(ax_i.spines[side], ('outward', val))
# Potentially move the ticks
if left and not right:
maj_on = any(t.tick1On for t in ax_i.yaxis.majorTicks)
min_on = any(t.tick1On for t in ax_i.yaxis.minorTicks)
ax_i.yaxis.set_ticks_position("right")
for t in ax_i.yaxis.majorTicks:
t.tick2On = maj_on
for t in ax_i.yaxis.minorTicks:
t.tick2On = min_on
if bottom and not top:
maj_on = any(t.tick1On for t in ax_i.xaxis.majorTicks)
min_on = any(t.tick1On for t in ax_i.xaxis.minorTicks)
ax_i.xaxis.set_ticks_position("top")
for t in ax_i.xaxis.majorTicks:
t.tick2On = maj_on
for t in ax_i.xaxis.minorTicks:
t.tick2On = min_on
if trim:
# clip off the parts of the spines that extend past major ticks
xticks = ax_i.get_xticks()
if xticks.size:
firsttick = np.compress(xticks >= min(ax_i.get_xlim()),
xticks)[0]
lasttick = np.compress(xticks <= max(ax_i.get_xlim()),
xticks)[-1]
ax_i.spines['bottom'].set_bounds(firsttick, lasttick)
ax_i.spines['top'].set_bounds(firsttick, lasttick)
newticks = xticks.compress(xticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_xticks(newticks)
yticks = ax_i.get_yticks()
if yticks.size:
firsttick = np.compress(yticks >= min(ax_i.get_ylim()),
yticks)[0]
lasttick = np.compress(yticks <= max(ax_i.get_ylim()),
yticks)[-1]
ax_i.spines['left'].set_bounds(firsttick, lasttick)
ax_i.spines['right'].set_bounds(firsttick, lasttick)
newticks = yticks.compress(yticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_yticks(newticks)
def _set_spine_position(spine, position):
"""
Set the spine's position without resetting an associated axis.
As of matplotlib v. 1.0.0, if a spine has an associated axis, then
spine.set_position() calls axis.cla(), which resets locators, formatters,
etc. We temporarily replace that call with axis.reset_ticks(), which is
sufficient for our purposes.
"""
axis = spine.axis
if axis is not None:
cla = axis.cla
axis.cla = axis.reset_ticks
spine.set_position(position)
if axis is not None:
axis.cla = cla
def _kde_support(data, bw, gridsize, cut, clip):
"""Establish support for a kernel density estimate."""
support_min = max(data.min() - bw * cut, clip[0])
support_max = min(data.max() + bw * cut, clip[1])
return np.linspace(support_min, support_max, gridsize)
def percentiles(a, pcts, axis=None):
"""Like scoreatpercentile but can take and return array of percentiles.
Parameters
----------
a : array
data
pcts : sequence of percentile values
percentile or percentiles to find score at
axis : int or None
if not None, computes scores over this axis
Returns
-------
scores: array
array of scores at requested percentiles
first dimension is length of object passed to ``pcts``
"""
scores = []
try:
n = len(pcts)
except TypeError:
pcts = [pcts]
n = 0
for i, p in enumerate(pcts):
if axis is None:
score = stats.scoreatpercentile(a.ravel(), p)
else:
score = np.apply_along_axis(stats.scoreatpercentile, axis, a, p)
scores.append(score)
scores = np.asarray(scores)
if not n:
scores = scores.squeeze()
return scores
def ci(a, which=95, axis=None):
"""Return a percentile range from an array of values."""
p = 50 - which / 2, 50 + which / 2
return percentiles(a, p, axis)
def sig_stars(p):
"""Return a R-style significance string corresponding to p values."""
if p < 0.001:
return "***"
elif p < 0.01:
return "**"
elif p < 0.05:
return "*"
elif p < 0.1:
return "."
return ""
def iqr(a):
"""Calculate the IQR for an array of numbers."""
a = np.asarray(a)
q1 = stats.scoreatpercentile(a, 25)
q3 = stats.scoreatpercentile(a, 75)
return q3 - q1
def get_dataset_names():
"""Report available example datasets, useful for reporting issues."""
# delayed import to not demand bs4 unless this function is actually used
from bs4 import BeautifulSoup
http = urlopen('https://github.com/mwaskom/seaborn-data/')
gh_list = BeautifulSoup(http)
return [l.text.replace('.csv', '')
for l in gh_list.find_all("a", {"class": "js-navigation-open"})
if l.text.endswith('.csv')]
def get_data_home(data_home=None):
"""Return the path of the seaborn data directory.
This is used by the ``load_dataset`` function.
If the ``data_home`` argument is not specified, the default location
is ``~/seaborn-data``.
Alternatively, a different default location can be specified using the
environment variable ``SEABORN_DATA``.
"""
if data_home is None:
data_home = os.environ.get('SEABORN_DATA',
os.path.join('~', 'seaborn-data'))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
def load_dataset(name, cache=True, data_home=None, **kws):
"""Load a dataset from the online repository (requires internet).
Parameters
----------
name : str
Name of the dataset (`name`.csv on
https://github.com/mwaskom/seaborn-data). You can obtain list of
available datasets using :func:`get_dataset_names`
cache : boolean, optional
If True, then cache data locally and use the cache on subsequent calls
data_home : string, optional
The directory in which to cache data. By default, uses ~/seaborn-data/
kws : dict, optional
Passed to pandas.read_csv
"""
path = ("https://raw.githubusercontent.com/"
"mwaskom/seaborn-data/master/{}.csv")
full_path = path.format(name)
if cache:
cache_path = os.path.join(get_data_home(data_home),
os.path.basename(full_path))
if not os.path.exists(cache_path):
urlretrieve(full_path, cache_path)
full_path = cache_path
df = pd.read_csv(full_path, **kws)
if df.iloc[-1].isnull().all():
df = df.iloc[:-1]
# Set some columns as a categorical type with ordered levels
if name == "tips":
df["day"] = pd.Categorical(df["day"], ["Thur", "Fri", "Sat", "Sun"])
df["sex"] = pd.Categorical(df["sex"], ["Male", "Female"])
df["time"] = pd.Categorical(df["time"], ["Lunch", "Dinner"])
df["smoker"] = pd.Categorical(df["smoker"], ["Yes", "No"])
if name == "flights":
df["month"] = pd.Categorical(df["month"], df.month.unique())
if name == "exercise":
df["time"] = pd.Categorical(df["time"], ["1 min", "15 min", "30 min"])
df["kind"] = pd.Categorical(df["kind"], ["rest", "walking", "running"])
df["diet"] = pd.Categorical(df["diet"], ["no fat", "low fat"])
if name == "titanic":
df["class"] = pd.Categorical(df["class"], ["First", "Second", "Third"])
df["deck"] = pd.Categorical(df["deck"], list("ABCDEFG"))
return df
def axis_ticklabels_overlap(labels):
"""Return a boolean for whether the list of ticklabels have overlaps.
Parameters
----------
labels : list of ticklabels
Returns
-------
overlap : boolean
True if any of the labels overlap.
"""
if not labels:
return False
try:
bboxes = [l.get_window_extent() for l in labels]
overlaps = [b.count_overlaps(bboxes) for b in bboxes]
return max(overlaps) > 1
except RuntimeError:
# Issue on macosx backend rasies an error in the above code
return False
def axes_ticklabels_overlap(ax):
"""Return booleans for whether the x and y ticklabels on an Axes overlap.
Parameters
----------
ax : matplotlib Axes
Returns
-------
x_overlap, y_overlap : booleans
True when the labels on that axis overlap.
"""
return (axis_ticklabels_overlap(ax.get_xticklabels()),
axis_ticklabels_overlap(ax.get_yticklabels()))
def categorical_order(values, order=None):
"""Return a list of unique data values.
Determine an ordered list of levels in ``values``.
Parameters
----------
values : list, array, Categorical, or Series
Vector of "categorical" values
order : list-like, optional
Desired order of category levels to override the order determined
from the ``values`` object.
Returns
-------
order : list
Ordered list of category levels not including null values.
"""
if order is None:
if hasattr(values, "categories"):
order = values.categories
else:
try:
order = values.cat.categories
except (TypeError, AttributeError):
try:
order = values.unique()
except AttributeError:
order = pd.unique(values)
try:
np.asarray(values).astype(np.float)
order = np.sort(order)
except (ValueError, TypeError):
order = order
order = filter(pd.notnull, order)
return list(order)
def get_color_cycle():
"""Return the list of colors in the current matplotlib color cycle."""
try:
cyl = mpl.rcParams['axes.prop_cycle']
try:
# matplotlib 1.5 verifies that axes.prop_cycle *is* a cycler
# but no garuantee that there's a `color` key.
# so users could have a custom rcParmas w/ no color...
return [x['color'] for x in cyl]
except KeyError:
pass
except KeyError:
pass
return mpl.rcParams['axes.color_cycle']
def relative_luminance(color):
"""Calculate the relative luminance of a color according to W3C standards
Parameters
----------
color : matplotlib color or sequence of matplotlib colors
Hex code, rgb-tuple, or html color name.
Returns
-------
luminance : float(s) between 0 and 1
"""
rgb = mpl.colors.colorConverter.to_rgba_array(color)[:, :3]
rgb = np.where(rgb <= .03928, rgb / 12.92, ((rgb + .055) / 1.055) ** 2.4)
lum = rgb.dot([.2126, .7152, .0722])
try:
return lum.item()
except ValueError:
return lum
def to_utf8(obj):
"""Return a Unicode string representing a Python object.
Unicode strings (i.e. type ``unicode`` in Python 2.7 and type ``str`` in
Python 3.x) are returned unchanged.
Byte strings (i.e. type ``str`` in Python 2.7 and type ``bytes`` in
Python 3.x) are returned as UTF-8-encoded strings.
For other objects, the method ``__str__()`` is called, and the result is
returned as a UTF-8-encoded string.
Parameters
----------
obj : object
Any Python object
Returns
-------
s : unicode (Python 2.7) / str (Python 3.x)
UTF-8-encoded string representation of ``obj``
"""
if isinstance(obj, str):
try:
# If obj is a string, try to return it as a Unicode-encoded
# string:
return obj.decode("utf-8")
except AttributeError:
# Python 3.x strings are already Unicode, and do not have a
# decode() method, so the unchanged string is returned
return obj
try:
if isinstance(obj, unicode):
# do not attemt a conversion if string is already a Unicode
# string:
return obj
else:
# call __str__() for non-string object, and return the
# result to Unicode:
return obj.__str__().decode("utf-8")
except NameError:
# NameError is raised in Python 3.x as type 'unicode' is not
# defined.
if isinstance(obj, bytes):
return obj.decode("utf-8")
else:
return obj.__str__()
def _network(t=None, url='https://google.com'):
"""
Decorator that will skip a test if `url` is unreachable.
Parameters
----------
t : function, optional
url : str, optional
"""
import nose
if t is None:
return lambda x: _network(x, url=url)
def wrapper(*args, **kwargs):
# attempt to connect
try:
f = urlopen(url)
except (IOError, HTTPException):
raise nose.SkipTest()
else:
f.close()
return t(*args, **kwargs)
return wrapper
|
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>]
.. modified by: James Ge(james.ge@gmail.com)
"""
import abc
class Frequency(object):
"""Enum like class for bar frequencies. Valid values are:
* **Frequency.TRADE**: The bar represents a single trade.
* **Frequency.SECOND**: The bar summarizes the trading activity during 1 second.
* **Frequency.MINUTE**: The bar summarizes the trading activity during 1 minute.
* **Frequency.HOUR**: The bar summarizes the trading activity during 1 hour.
* **Frequency.DAY**: The bar summarizes the trading activity during 1 day.
* **Frequency.WEEK**: The bar summarizes the trading activity during 1 week.
* **Frequency.MONTH**: The bar summarizes the trading activity during 1 month.
"""
# It is important for frequency values to get bigger for bigger windows.
TRADE = -1
SECOND = 1
MINUTE = 60
HOUR = 60*60
DAY = 24*60*60
WEEK = 24*60*60*7
MONTH = 24*60*60*31
class Bar(object):
"""A Bar is a summary of the trading activity for a security in a given period.
.. note::
This is a base class and should not be used directly.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def setUseAdjustedValue(self, useAdjusted):
raise NotImplementedError()
@abc.abstractmethod
def getUseAdjValue(self):
raise NotImplementedError()
@abc.abstractmethod
def getDateTime(self):
"""Returns the :class:`datetime.datetime`."""
raise NotImplementedError()
@abc.abstractmethod
def getOpen(self, adjusted=False):
"""Returns the opening price."""
raise NotImplementedError()
@abc.abstractmethod
def getHigh(self, adjusted=False):
"""Returns the highest price."""
raise NotImplementedError()
@abc.abstractmethod
def getLow(self, adjusted=False):
"""Returns the lowest price."""
raise NotImplementedError()
@abc.abstractmethod
def getClose(self, adjusted=False):
"""Returns the closing price."""
raise NotImplementedError()
@abc.abstractmethod
def getVolume(self):
"""Returns the volume."""
raise NotImplementedError()
@abc.abstractmethod
def getAmount(self):
"""Returns the volume."""
raise NotImplementedError()
@abc.abstractmethod
def getAdjClose(self):
"""Returns the adjusted closing price."""
raise NotImplementedError()
@abc.abstractmethod
def getFrequency(self):
"""The bar's period."""
raise NotImplementedError()
def getTypicalPrice(self):
"""Returns the typical price."""
return (self.getHigh() + self.getLow() + self.getClose()) / 3.0
@abc.abstractmethod
def getPrice(self):
"""Returns the closing or adjusted closing price."""
raise NotImplementedError()
def getExtraColumns(self):
return {}
class BasicBar(Bar):
# Optimization to reduce memory footprint.
__slots__ = (
'__dateTime',
'__open',
'__close',
'__high',
'__low',
'__volume',
'__amount',
'__adjClose',
'__frequency',
'__useAdjustedValue',
'__extra',
)
def __init__(self, dateTime, open_, high, low, close, volume, amount, adjClose, frequency, extra={}):
if high < low:
raise Exception("high < low on %s" % (dateTime))
elif high < open_:
raise Exception("high < open on %s" % (dateTime))
elif high < close:
raise Exception("high < close on %s" % (dateTime))
elif low > open_:
raise Exception("low > open on %s" % (dateTime))
elif low > close:
raise Exception("low > close on %s" % (dateTime))
self.__dateTime = dateTime
self.__open = open_
self.__close = close
self.__high = high
self.__low = low
self.__volume = volume
self.__amount = amount
self.__adjClose = adjClose
self.__frequency = frequency
self.__useAdjustedValue = False
self.__extra = extra
def __setstate__(self, state):
(self.__dateTime,
self.__open,
self.__close,
self.__high,
self.__low,
self.__volume,
self.__amount,
self.__adjClose,
self.__frequency,
self.__useAdjustedValue,
self.__extra) = state
def __getstate__(self):
return (
self.__dateTime,
self.__open,
self.__close,
self.__high,
self.__low,
self.__volume,
self.__amount,
self.__adjClose,
self.__frequency,
self.__useAdjustedValue,
self.__extra
)
def setUseAdjustedValue(self, useAdjusted):
if useAdjusted and self.__adjClose is None:
raise Exception("Adjusted close is not available")
self.__useAdjustedValue = useAdjusted
def getUseAdjValue(self):
return self.__useAdjustedValue
def getDateTime(self):
return self.__dateTime
def getOpen(self, adjusted=False):
if adjusted:
if self.__adjClose is None:
raise Exception("Adjusted close is missing")
return self.__adjClose * self.__open / float(self.__close)
else:
return self.__open
def getHigh(self, adjusted=False):
if adjusted:
if self.__adjClose is None:
raise Exception("Adjusted close is missing")
return self.__adjClose * self.__high / float(self.__close)
else:
return self.__high
def getLow(self, adjusted=False):
if adjusted:
if self.__adjClose is None:
raise Exception("Adjusted close is missing")
return self.__adjClose * self.__low / float(self.__close)
else:
return self.__low
def getClose(self, adjusted=False):
if adjusted:
if self.__adjClose is None:
raise Exception("Adjusted close is missing")
return self.__adjClose
else:
return self.__close
def getVolume(self):
return self.__volume
def getAmount(self):
return self.__amount
def getAdjClose(self):
return self.__adjClose
def getFrequency(self):
return self.__frequency
def getPrice(self):
if self.__useAdjustedValue:
return self.__adjClose
else:
return self.__close
def getExtraColumns(self):
return self.__extra
class Bars(object):
"""A group of :class:`Bar` objects.
:param barDict: A map of instrument to :class:`Bar` objects.
:type barDict: map.
.. note::
All bars must have the same datetime.
"""
def __init__(self, barDict):
if len(barDict) == 0:
raise Exception("No bars supplied")
# Check that bar datetimes are in sync
firstDateTime = None
firstInstrument = None
for instrument, currentBar in barDict.iteritems():
if firstDateTime is None:
firstDateTime = currentBar.getDateTime()
firstInstrument = instrument
elif currentBar.getDateTime() != firstDateTime:
raise Exception("Bar data times are not in sync. %s %s != %s %s" % (
instrument,
currentBar.getDateTime(),
firstInstrument,
firstDateTime
))
self.__barDict = barDict
self.__dateTime = firstDateTime
def __getitem__(self, instrument):
"""Returns the :class:`pyalgotrade.bar.Bar` for the given instrument.
If the instrument is not found an exception is raised."""
return self.__barDict[instrument]
def __contains__(self, instrument):
"""Returns True if a :class:`pyalgotrade.bar.Bar` for the given instrument is available."""
return instrument in self.__barDict
def items(self):
return self.__barDict.items()
def keys(self):
return self.__barDict.keys()
def getInstruments(self):
"""Returns the instrument symbols."""
return self.__barDict.keys()
def getDateTime(self):
"""Returns the :class:`datetime.datetime` for this set of bars."""
return self.__dateTime
def getBar(self, instrument):
"""Returns the :class:`pyalgotrade.bar.Bar` for the given instrument or None if the instrument is not found."""
return self.__barDict.get(instrument, None)
class BasicTick(object):
# Optimization to reduce memory footprint.
__slots__ = (
'__dateTime',
'__open',
'__close',
'__high',
'__low',
'__volume',
'__amount',
'__bp',
'__bv',
'__ap',
'__av',
'__preclose',
'__new_price',
'__bought_amount',
'__sold_amount',
'__bought_volume',
'__sold_volume',
'__frequency',
'__extra',
'__useAdjustedValue',
)
def __init__(self, dateTime, open_, high, low, close, volume, amount, bp, bv, ap, av, preclose\
, new_price, bought_amount, sold_amount, bought_volume, sold_volume, frequency, extra={}):
if high < low:
raise Exception("high < low on %s" % (dateTime))
elif high < open_:
raise Exception("high < open on %s" % (dateTime))
elif high < close:
raise Exception("high < close on %s" % (dateTime))
elif low > open_:
raise Exception("low > open on %s" % (dateTime))
elif low > close:
raise Exception("low > close on %s" % (dateTime))
self.__dateTime = dateTime
self.__open = open_
self.__close = close
self.__high = high
self.__low = low
self.__volume = volume
self.__amount = amount
self.__bp = bp
self.__ap = ap
self.__bv = bv
self.__av = av
self.__preclose = preclose
self.__bought_amount = bought_amount
self.__sold_amount = sold_amount
self.__bought_volume = bought_volume
self.__sold_volume = sold_volume
self.__frequency = frequency
self.__extra = extra
self.__useAdjustedValue = False
def __setstate__(self, state):
( self.__dateTime,
self.__open,
self.__close,
self.__high,
self.__low,
self.__volume,
self.__amount,
self.__bp,
self.__ap,
self.__bv,
self.__av,
self.__preclose,
self.__bought_amount,
self.__sold_amount,
self.__bought_volume,
self.__sold_volume,
self.__frequency,
self.__extra) = state,
def __getstate__(self):
return ( self.__dateTime,
self.__open,
self.__close,
self.__high,
self.__low,
self.__volume,
self.__amount,
self.__bp,
self.__ap,
self.__bv,
self.__av,
self.__preclose,
self.__bought_amount,
self.__sold_amount,
self.__bought_volume,
self.__sold_volume,
self.__frequency,
self.__extra)
def getDateTime(self):
return self.__dateTime
def getOpen(self, adjusted=False):
if adjusted:
if self.__adjClose is None:
raise Exception("Adjusted close is missing")
return self.__adjClose * self.__open / float(self.__close)
else:
return self.__open
def getHigh(self, adjusted=False):
if adjusted:
if self.__adjClose is None:
raise Exception("Adjusted close is missing")
return self.__adjClose * self.__high / float(self.__close)
else:
return self.__high
def getLow(self, adjusted=False):
if adjusted:
if self.__adjClose is None:
raise Exception("Adjusted close is missing")
return self.__adjClose * self.__low / float(self.__close)
else:
return self.__low
def getClose(self, adjusted=False):
return self.__close
def getVolume(self):
return self.__volume
def getAmount(self):
return self.__amount
def getFrequency(self):
return self.__frequency
def getBp(self):
return self.__bp
def getBv(self):
return self.__bv
def getAp(self):
return self.__ap
def getAv(self):
return self.__av
def getPreclose(self):
return self.__preclose
def getBoughtVolume(self):
return self.__bought_volume
def getBoughtAmount(self):
return self.__bought_amount
def getSoldVolume(self):
return self.__sold_volume
def getSoldAmount(self):
return self.__sold_amount
def getExtraColumns(self):
return self.__extra
def setUseAdjustedValue(self, useAdjusted):
if useAdjusted and self.__adjClose is None:
raise Exception("Adjusted close is not available")
self.__useAdjustedValue = useAdjusted
def getUseAdjValue(self):
return self.__useAdjustedValue
def getAdjClose(self):
return self.__close
def getPrice(self):
return self.__close
class Ticks(object):
"""A group of :class:`Bar` objects.
:param barDict: A map of instrument to :class:`Bar` objects.
:type barDict: map.
.. note::
All bars must have the same datetime.
"""
def __init__(self, barDict):
if len(barDict) == 0:
raise Exception("No bars supplied")
# Check that bar datetimes are in sync
firstDateTime = None
firstInstrument = None
for instrument, currentBar in barDict.iteritems():
if firstDateTime is None:
firstDateTime = currentBar.getDateTime()
firstInstrument = instrument
elif currentBar.getDateTime() != firstDateTime:
raise Exception("Bar data times are not in sync. %s %s != %s %s" % (
instrument,
currentBar.getDateTime(),
firstInstrument,
firstDateTime
))
self.__barDict = barDict
self.__dateTime = firstDateTime
def __getitem__(self, instrument):
"""Returns the :class:`pyalgotrade.bar.Bar` for the given instrument.
If the instrument is not found an exception is raised."""
return self.__barDict[instrument]
def __contains__(self, instrument):
"""Returns True if a :class:`pyalgotrade.bar.Bar` for the given instrument is available."""
return instrument in self.__barDict
def items(self):
return self.__barDict.items()
def keys(self):
return self.__barDict.keys()
def getInstruments(self):
"""Returns the instrument symbols."""
return self.__barDict.keys()
def getDateTime(self):
"""Returns the :class:`datetime.datetime` for this set of bars."""
return self.__dateTime
def getBar(self, instrument):
"""Returns the :class:`pyalgotrade.bar.Bar` for the given instrument or None if the instrument is not found."""
return self.__barDict.get(instrument, None)
|
|
#
# Copyright 2010-2013 Red Hat, Inc. and/or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Refer to the README and COPYING files for full details of the license.
#
import os
import platform
import time
import locale
import unicodedata
import logging
# avoid pep8 warnings
def import_json():
try:
import json
return json
except ImportError:
import simplejson
return simplejson
json = import_json()
__REPLACEMENT_CHAR = u'\ufffd'
# Set taken from http://www.w3.org/TR/xml11/#NT-RestrictedChar
__RESTRICTED_CHARS = set(range(8 + 1))\
.union(set(range(0xB, 0xC + 1)))\
.union(set(range(0xE, 0x1F + 1)))\
.union(set(range(0x7F, 0x84 + 1)))\
.union(set(range(0x86, 0x9F + 1)))
def _string_convert(str):
"""
This function tries to convert the given string to an unicode string
"""
if isinstance(str, unicode):
return str
try:
return str.decode(locale.getpreferredencoding(), 'strict')
except UnicodeError:
try:
return str.decode(locale.getpreferredencoding(), 'replace')
except UnicodeError:
# unrepresentable string
return u'????'
def _filter_xml_chars(u):
"""
The set of characters allowed in XML documents is described in
http://www.w3.org/TR/xml11/#charsets
"Char" is defined as any unicode character except the surrogate blocks,
\ufffe and \uffff.
"RestrictedChar" is defiend as the code points in __RESTRICTED_CHARS above
It's a little hard to follow, but the uposhot is an XML document must
contain only characters in Char that are not in RestrictedChar.
Note that Python's xmlcharrefreplace option is not relevant here -
that's about handling charaters which can't be encoded in a given charset
encoding, not which aren't permitted in XML.
"""
def filter_xml_char(c):
if ord(c) > 0x10ffff:
return __REPLACEMENT_CHAR # Outside Unicode range
elif unicodedata.category(c) == 'Cs':
return __REPLACEMENT_CHAR # Surrogate pair code point
elif ord(c) == 0xFFFE or ord(c) == 0xFFFF:
return __REPLACEMENT_CHAR # Specifically excluded code points
elif ord(c) in __RESTRICTED_CHARS:
return __REPLACEMENT_CHAR
else:
return c
if not isinstance(u, unicode):
raise TypeError
return ''.join(filter_xml_char(c) for c in u)
def _filter_object(obj):
"""
Apply _filter_xml_chars and _string_check on all strings in the given
object
"""
def filt(o):
if isinstance(o, dict):
return dict(map(filt, o.iteritems()))
if isinstance(o, list):
return map(filt, o)
if isinstance(o, tuple):
return tuple(map(filt, o))
if isinstance(o, basestring):
return _filter_xml_chars(_string_convert(o))
return o
return filt(obj)
class VirtIoStream(object):
# Python on Windows 7 returns 'Microsoft' rather than 'Windows' as
# documented.
is_windows = platform.system() in ['Windows', 'Microsoft']
is_test = False
def __init__(self, vport_name):
if self.is_test:
from test_port import get_test_port
self._vport = get_test_port(vport_name)
self._read = self._vport.read
self._write = self._vport.write
elif self.is_windows:
from WinFile import WinFile
self._vport = WinFile(vport_name)
self._read = self._vport.read
self._write = self._vport.write
else:
self._vport = os.open(vport_name, os.O_RDWR)
self._read = self._os_read
self._write = self._os_write
def _os_read(self, size):
return os.read(self._vport, size)
def _os_write(self, buffer):
return os.write(self._vport, buffer)
def read(self, size):
return self._read(size)
def write(self, buffer):
return self._write(buffer)
class VirtIoChannel:
def __init__(self, vport_name):
self._stream = VirtIoStream(vport_name)
self._buffer = ''
def _readbuffer(self):
buffer = self._stream.read(4096)
if buffer:
self._buffer += buffer
else:
# read() returns immediately (non-blocking) if no one is
# listening on the other side of the virtio-serial port.
# So in order not to be in a tight-loop and waste CPU
# time, we just sleep for a while and hope someone will
# be there when we will awake from our nap.
time.sleep(1)
def _readline(self):
newline = self._buffer.find('\n')
while newline < 0:
self._readbuffer()
newline = self._buffer.find('\n')
if newline >= 0:
line, self._buffer = self._buffer.split('\n', 1)
else:
line = None
return line
def _parseLine(self, line):
try:
args = json.loads(line.decode('utf8'))
name = args['__name__']
del args['__name__']
except:
name = None
args = None
return (name, args)
def read(self):
return self._parseLine(self._readline())
def write(self, name, args={}):
count = 5
i = 0
if not isinstance(name, str):
raise TypeError("1nd arg must be a str.")
if not isinstance(args, dict):
raise TypeError("2nd arg must be a dict.")
args['__name__'] = name
args = _filter_object(args)
message = (json.dumps(args) + '\n').encode('utf8')
while len(message) > 0 and i < count:
written = self._stream.write(message)
logging.debug("Written %s" % message[:written])
logging.debug("message = %s " % message)
message = message[written:]
i += 1
def _create_vio():
if (platform.system() == 'Windows') or (platform.system() == 'Microsoft'):
vport_name = '\\\\.\\Global\\com.redhat.rhevm.vdsm'
else:
vport_name = '/dev/virtio-ports/com.redhat.rhevm.vdsm'
return VirtIoChannel(vport_name)
def _test_write():
vio = _create_vio()
vio.write('network-interfaces',
{'interfaces': [{
'name': 'eth0',
'inet': ['10.0.0.2'],
'inet6': ['fe80::213:20ff:fef5:f9d6'],
'hw': '00:1a:4a:23:10:00'}]})
vio.write('applications', {'applications': ['kernel-2.6.32-131.4.1.el6',
'rhev-agent-2.3.11-1.el6']})
def _test_read():
vio = _create_vio()
line = vio.read()
while line:
print line
line = vio.read()
if __name__ == "__main__":
_test_read()
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.appengine.tools.devappserver2.http_runtime."""
import base64
import os
import re
import shutil
import subprocess
import tempfile
import time
import unittest
import google
import mox
from google.appengine.api import appinfo
from google.appengine.tools.devappserver2 import http_proxy
from google.appengine.tools.devappserver2 import http_runtime
from google.appengine.tools.devappserver2 import instance
from google.appengine.tools.devappserver2 import login
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import safe_subprocess
from google.appengine.tools.devappserver2 import wsgi_test_utils
class MockMessage(object):
def __init__(self, headers):
self.headers = headers
def __iter__(self):
return iter(set(name for name, _ in self.headers))
def getheaders(self, name):
return [value for header_name, value in self.headers if header_name == name]
class FakeHttpResponse(object):
def __init__(self, status, reason, headers, body):
self.body = body
self.has_read = False
self.partial_read_error = None
self.status = status
self.reason = reason
self.headers = headers
self.msg = MockMessage(headers)
def read(self, amt=None):
if not self.has_read:
self.has_read = True
return self.body
elif self.partial_read_error:
raise self.partial_read_error
else:
return ''
def getheaders(self):
return self.headers
# We use a fake Tee to avoid the complexity of a real Tee's thread racing with
# the mocking framework and possibly surviving (and calling stderr.readline())
# after a test case completes.
class FakeTee(object):
def __init__(self, buf):
self.buf = buf
def get_buf(self):
return self.buf
def join(self, unused_timeout):
pass
class ModuleConfigurationStub(object):
def __init__(self, application_root='/tmp', error_handlers=None):
self.application_root = application_root
self.error_handlers = error_handlers
class HttpRuntimeProxyTest(wsgi_test_utils.WSGITestCase):
def setUp(self):
self.mox = mox.Mox()
self.tmpdir = tempfile.mkdtemp()
module_configuration = ModuleConfigurationStub(
application_root=self.tmpdir,
error_handlers=[
appinfo.ErrorHandlers(error_code='over_quota', file='foo.html'),
appinfo.ErrorHandlers(error_code='default', file='error.html'),
])
self.runtime_config = runtime_config_pb2.Config()
self.runtime_config.app_id = 'app'
self.runtime_config.version_id = 'version'
self.runtime_config.api_port = 12345
self.runtime_config.application_root = self.tmpdir
self.runtime_config.datacenter = 'us1'
self.runtime_config.instance_id = 'abc3dzac4'
self.runtime_config.auth_domain = 'gmail.com'
self.runtime_config_getter = lambda: self.runtime_config
self.proxy = http_runtime.HttpRuntimeProxy(
['/runtime'], self.runtime_config_getter, module_configuration,
env={'foo': 'bar'})
self.proxy._port = 23456
self.process = self.mox.CreateMock(subprocess.Popen)
self.process.stdin = self.mox.CreateMockAnything()
self.process.stdout = self.mox.CreateMockAnything()
self.process.stderr = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(safe_subprocess, 'start_process')
self.mox.StubOutWithMock(login, 'get_user_info')
self.url_map = appinfo.URLMap(url=r'/(get|post).*',
script=r'\1.py')
self.mox.StubOutWithMock(http_proxy.HttpProxy, 'wait_for_connection')
http_proxy.HttpProxy.wait_for_connection()
def tearDown(self):
shutil.rmtree(self.tmpdir)
self.mox.UnsetStubs()
def test_start_and_quit(self):
## Test start()
# start()
safe_subprocess.start_process(
['/runtime'],
base64.b64encode(self.runtime_config.SerializeToString()),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'foo': 'bar'},
cwd=self.tmpdir).AndReturn(self.process)
self.process.stdout.readline().AndReturn('30000')
self.proxy._stderr_tee = FakeTee('')
self.mox.ReplayAll()
self.proxy.start()
self.mox.VerifyAll()
self.mox.ResetAll()
## Test quit()
self.process.kill()
self.mox.ReplayAll()
self.proxy.quit()
self.mox.VerifyAll()
def test_start_bad_port(self):
safe_subprocess.start_process(
['/runtime'],
base64.b64encode(self.runtime_config.SerializeToString()),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'foo': 'bar'},
cwd=self.tmpdir).AndReturn(self.process)
self.process.stdout.readline().AndReturn('hello 30001')
header = "bad runtime process port ['hello 30001']\n\n"
stderr0 = "I've just picked up a fault in the AE35 unit.\n"
stderr1 = "It's going to go 100% failure in 72 hours.\n"
self.proxy._stderr_tee = FakeTee(stderr0 + stderr1)
self.mox.ReplayAll()
self.proxy.start()
expected_headers = {
'Content-Type': 'text/plain',
'Content-Length': str(len(header) + len(stderr0) + len(stderr1)),
}
self.assertResponse('500 Internal Server Error', expected_headers,
header + stderr0 + stderr1,
self.proxy.handle, {},
url_map=self.url_map,
match=re.match(self.url_map.url, '/get%20request'),
request_id='request id',
request_type=instance.NORMAL_REQUEST)
self.mox.VerifyAll()
class HttpRuntimeProxyFileFlavorTest(wsgi_test_utils.WSGITestCase):
def setUp(self):
self.mox = mox.Mox()
self.tmpdir = tempfile.mkdtemp()
module_configuration = ModuleConfigurationStub(application_root=self.tmpdir)
self.runtime_config = runtime_config_pb2.Config()
self.runtime_config.app_id = 'app'
self.runtime_config.version_id = 'version'
self.runtime_config.api_port = 12345
self.runtime_config.application_root = self.tmpdir
self.runtime_config.datacenter = 'us1'
self.runtime_config.instance_id = 'abc3dzac4'
self.runtime_config.auth_domain = 'gmail.com'
self.runtime_config_getter = lambda: self.runtime_config
self.proxy = http_runtime.HttpRuntimeProxy(
['/runtime'], self.runtime_config_getter, module_configuration,
env={'foo': 'bar'},
start_process_flavor=http_runtime.START_PROCESS_FILE)
self.mox.StubOutWithMock(self.proxy, '_process_lock')
self.process = self.mox.CreateMock(subprocess.Popen)
self.process.stdin = self.mox.CreateMockAnything()
self.process.stdout = self.mox.CreateMockAnything()
self.process.stderr = self.mox.CreateMockAnything()
self.process.child_out = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(safe_subprocess, 'start_process_file')
self.mox.StubOutWithMock(os, 'remove')
self.mox.StubOutWithMock(time, 'sleep')
self.url_map = appinfo.URLMap(url=r'/(get|post).*',
script=r'\1.py')
self.mox.StubOutWithMock(http_proxy.HttpProxy, 'wait_for_connection')
http_proxy.HttpProxy.wait_for_connection()
def tearDown(self):
shutil.rmtree(self.tmpdir)
self.mox.UnsetStubs()
def test_basic(self):
"""Basic functionality test of START_PROCESS_FILE flavor."""
# start()
# As the lock is mocked out, this provides a mox expectation.
with self.proxy._process_lock:
safe_subprocess.start_process_file(
args=['/runtime'],
input_string=self.runtime_config.SerializeToString(),
env={'foo': 'bar'},
cwd=self.tmpdir,
stderr=subprocess.PIPE).AndReturn(self.process)
self.process.poll().AndReturn(None)
self.process.child_out.seek(0).AndReturn(None)
self.process.child_out.read().AndReturn('1234\n')
self.process.child_out.close().AndReturn(None)
self.process.child_out.name = '/tmp/c-out.ABC'
os.remove('/tmp/c-out.ABC').AndReturn(None)
self.proxy._stderr_tee = FakeTee('')
self.mox.ReplayAll()
self.proxy.start()
self.assertEquals(1234, self.proxy._proxy._port)
self.mox.VerifyAll()
def test_slow_shattered(self):
"""The port number is received slowly in chunks."""
# start()
# As the lock is mocked out, this provides a mox expectation.
with self.proxy._process_lock:
safe_subprocess.start_process_file(
args=['/runtime'],
input_string=self.runtime_config.SerializeToString(),
env={'foo': 'bar'},
cwd=self.tmpdir,
stderr=subprocess.PIPE).AndReturn(self.process)
for response, sleeptime in [
('', .125), ('43', .25), ('4321', .5), ('4321\n', None)]:
self.process.poll().AndReturn(None)
self.process.child_out.seek(0).AndReturn(None)
self.process.child_out.read().AndReturn(response)
if sleeptime is not None:
time.sleep(sleeptime).AndReturn(None)
self.process.child_out.close().AndReturn(None)
self.process.child_out.name = '/tmp/c-out.ABC'
os.remove('/tmp/c-out.ABC').AndReturn(None)
self.proxy._stderr_tee = FakeTee('')
self.mox.ReplayAll()
self.proxy.start()
self.assertEquals(4321, self.proxy._proxy._port)
self.mox.VerifyAll()
def test_runtime_instance_dies_immediately(self):
"""Runtime instance dies without sending a port."""
# start()
# As the lock is mocked out, this provides a mox expectation.
with self.proxy._process_lock:
safe_subprocess.start_process_file(
args=['/runtime'],
input_string=self.runtime_config.SerializeToString(),
env={'foo': 'bar'},
cwd=self.tmpdir,
stderr=subprocess.PIPE).AndReturn(self.process)
self.process.poll().AndReturn(1)
self.process.child_out.close().AndReturn(None)
self.process.child_out.name = '/tmp/c-out.ABC'
os.remove('/tmp/c-out.ABC').AndReturn(None)
header = "bad runtime process port ['']\n\n"
stderr0 = 'Go away..\n'
self.proxy._stderr_tee = FakeTee(stderr0)
time.sleep(.1).AndReturn(None)
self.mox.ReplayAll()
self.proxy.start()
expected_headers = {
'Content-Type': 'text/plain',
'Content-Length': str(len(header) + len(stderr0)),
}
self.assertResponse('500 Internal Server Error', expected_headers,
header + stderr0,
self.proxy.handle, {},
url_map=self.url_map,
match=re.match(self.url_map.url, '/get%20request'),
request_id='request id',
request_type=instance.NORMAL_REQUEST)
self.mox.VerifyAll()
def test_runtime_instance_invalid_response(self):
"""Runtime instance does not terminate port with a newline."""
# start()
# As the lock is mocked out, this provides a mox expectation.
with self.proxy._process_lock:
safe_subprocess.start_process_file(
args=['/runtime'],
input_string=self.runtime_config.SerializeToString(),
env={'foo': 'bar'},
cwd=self.tmpdir,
stderr=subprocess.PIPE).AndReturn(self.process)
for response, sleeptime in [
('30000', .125), ('30000', .25), ('30000', .5), ('30000', 1.0),
('30000', 2.0), ('30000', 4.0), ('30000', 8.0), ('30000', 16.0),
('30000', 32.0), ('30000', None)]:
self.process.poll().AndReturn(None)
self.process.child_out.seek(0).AndReturn(None)
self.process.child_out.read().AndReturn(response)
if sleeptime is not None:
time.sleep(sleeptime).AndReturn(None)
self.process.child_out.close().AndReturn(None)
self.process.child_out.name = '/tmp/c-out.ABC'
os.remove('/tmp/c-out.ABC').AndReturn(None)
header = "bad runtime process port ['']\n\n"
stderr0 = 'Go away..\n'
self.proxy._stderr_tee = FakeTee(stderr0)
time.sleep(.1)
self.mox.ReplayAll()
self.proxy.start()
expected_headers = {
'Content-Type': 'text/plain',
'Content-Length': str(len(header) + len(stderr0)),
}
self.assertResponse('500 Internal Server Error', expected_headers,
header + stderr0,
self.proxy.handle, {},
url_map=self.url_map,
match=re.match(self.url_map.url, '/get%20request'),
request_id='request id',
request_type=instance.NORMAL_REQUEST)
self.mox.VerifyAll()
if __name__ == '__main__':
unittest.main()
|
|
# -*- python -*-
# ex: set syntax=python:
import logging
import urllib2
import json
import string
import re
from password import *
from buildbot.status.web.hooks.github import GitHubEventHandler
from dateutil.parser import parse as dateparse
from twisted.python import log
builders_common="arch,"
builders_linux="centos7,centos8,centosstream8,debian10,fedora33,builtin,"
builders_freebsd="freebsd12,freebsd13,freebsd14"
builders_push_master=builders_common+builders_linux+builders_freebsd+"coverage"
builders_push_release=builders_common+builders_linux+builders_freebsd
builders_pr_master=builders_common+builders_linux+builders_freebsd
builders_pr_release=builders_common+builders_linux+builders_freebsd
# Default builders for non-top PR commits
builders_pr_minimum="arch"
def query_url(url, token=None):
log.msg("Making request to '%s'" % url)
request = urllib2.Request(url)
if token:
request.add_header("Authorization", "token %s" % token)
response = urllib2.urlopen(request)
return json.loads(response.read())
#
# Custom class to determine how to handle incoming Github changes.
#
class CustomGitHubEventHandler(GitHubEventHandler):
valid_props = [
('^Build[-\s]linux:\s*(yes|no)\s*$', 'override-buildlinux'),
('^Build[-\s]zfs:\s*(yes|no)\s*$', 'override-buildzfs'),
('^Built[-\s]in:\s*(yes|no)\s*$', 'override-builtin'),
('^Check[-\s]lint:\s*(yes|no)\s*$', 'override-checklint'),
('^Configure[-|\s]zfs:(.*)$', 'override-configzfs'),
('^Perf[-|\s]zts:\s*(yes|no)\s*$', 'override-perfzts'),
('^Perf[-|\s]pts:\s*(yes|no)\s*$', 'override-perfpts'),
]
def parse_comments(self, comments, default_category):
category = default_category
# Extract any overrides for builders for this commit
# Requires-builders: build arch distro test perf none
category_pattern = '^Requires-builders:\s*([ ,a-zA-Z0-9]+)'
m = re.search(category_pattern, comments, re.I | re.M)
if m is not None:
category = m.group(1).lower();
# If Requires-builders contains 'none', then skip this commit
none_pattern = '.*none.*'
m = re.search(none_pattern, category, re.I | re.M)
if m is not None:
category = ""
return category
def handle_push_commit(self, payload, commit, branch):
created_at = dateparse(commit['timestamp'])
comments = commit['message']
# Assemble the list of modified files.
files = []
for kind in ('added', 'modified', 'removed'):
files.extend(commit.get(kind, []))
# Extract if the commit message has property overrides
props = { }
for prop in CustomGitHubEventHandler.valid_props:
step_pattern = prop[0]
m = re.search(step_pattern, comments, re.I | re.M)
if m is not None:
prop_name = prop[1]
props[prop_name] = json.dumps(m.group(1).lower())
match = re.match("master", branch)
if match:
category = self.parse_comments(comments, builders_push_master)
else:
# Extract if the commit message has property overrides
# For 0.8 and earlier releases include the legacy builders.
category = self.parse_comments(comments, builders_push_release)
props['branch'] = branch
# Enabled performance testing on pushes by default.
props['perfpts'] = json.dumps("yes")
props['perfzts'] = json.dumps("yes")
change = {
'revision' : commit['id'],
'when_timestamp': created_at,
'branch': branch,
'revlink' : commit['url'],
'repository': payload['repository']['url'],
'project' : payload['repository']['full_name'],
'properties' : props,
'category': category,
'author': "%s <%s>" % (commit['author']['name'],
commit['author']['email']),
'comments' : comments,
'files' : files,
}
if callable(self._codebase):
change['codebase'] = self._codebase(payload)
elif self._codebase is not None:
change['codebase'] = self._codebase
return change
def handle_push(self, payload):
changes = []
refname = payload['ref']
log.msg("Processing GitHub Push `%s'" % refname)
# We only care about regular heads, i.e. branches
match = re.match(r"^refs\/heads\/(.+)$", refname)
if not match:
log.msg("Ignoring refname `%s': Not a branch" % refname)
return changes, 'git'
branch = match.group(1)
if payload.get('deleted'):
log.msg("Branch `%s' deleted, ignoring" % branch)
return changes, 'git'
nr = 0
for commit in payload['commits']:
nr += 1
if not commit.get('distinct', True):
log.msg('Commit `%s` is a non-distinct commit, ignoring...' %
(commit['id'],))
continue
if nr > 10:
log.msg('Commit `%s` exceeds push limit (%d > 5), ignoring...' %
(commit['id'], nr))
continue
change = self.handle_push_commit(payload, commit, branch)
changes.append(change)
log.msg("Received %d changes pushed from github" % len(changes))
return changes, 'git'
def handle_pull_request_commit(self, payload, commit, nr, commits_nr,
kernel_pr):
pr_number = payload['number']
refname = 'refs/pull/%d/head' % (pr_number,)
created_at = dateparse(payload['pull_request']['created_at'])
branch = payload['pull_request']['base']['ref']
comments = commit['commit']['message'] + "\n\n"
# Assemble the list of modified files.
changed_files = []
for f in commit['files']:
changed_files.append(f['filename'])
# Extract if the commit message has property overrides
props = { }
for prop in CustomGitHubEventHandler.valid_props:
step_pattern = prop[0]
m = re.search(step_pattern, comments, re.I | re.M)
if m is not None:
prop_name = prop[1]
props[prop_name] = json.dumps(m.group(1).lower())
# Annotate the head commit to allow special handling.
if commit['sha'] == payload['pull_request']['head']['sha']:
# For 0.8 and earlier releases include the legacy builders.
match = re.match("master", branch)
if match:
category = builders_pr_master
else:
category = builders_pr_release
else:
category = builders_pr_minimum
# Extract if the commit message has property overrides
category = self.parse_comments(comments, category)
if kernel_pr:
if re.search(kernel_pattern, comments, re.I | re.M) is None:
comments = comments + kernel_pr + "\n"
comments = comments + "Pull-request: #%d part %d/%d\n" % (
pr_number, nr, commits_nr)
props['branch'] = json.dumps(branch)
props['pr_number'] = json.dumps(pr_number)
# Disabled performance testing on PRs by default.
props['perfpts'] = json.dumps("no")
props['perfzts'] = json.dumps("no")
change = {
'revision' : commit['sha'],
'when_timestamp': created_at,
'branch': refname,
'revlink' : commit['html_url'],
'repository': payload['repository']['clone_url'],
'project' : payload['repository']['name'],
'properties' : props,
'category': category,
'author': "%s <%s>" % (commit['commit']['committer']['name'],
commit['commit']['committer']['email']),
'comments' : comments,
'files' : changed_files,
}
if callable(self._codebase):
change['codebase'] = self._codebase(payload)
elif self._codebase is not None:
change['codebase'] = self._codebase
return change
def handle_pull_request(self, payload):
changes = []
pr_number = payload['number']
commits_nr = payload['pull_request']['commits']
log.msg('Processing GitHub PR #%d' % pr_number, logLevel=logging.DEBUG)
action = payload.get('action')
if action not in ('opened', 'reopened', 'synchronize'):
log.msg("GitHub PR #%d %s, ignoring" % (pr_number, action))
return changes, 'git'
# When receiving a large PR only test the top commit.
if commits_nr > 5:
commit_url = payload['pull_request']['base']['repo']['commits_url'][:-6]
commit_url += "/" + payload['pull_request']['head']['sha']
commit = query_url(commit_url, token=github_token)
change = self.handle_pull_request_commit(payload, commit,
commits_nr, commits_nr, None)
changes.append(change)
# Compile all commits in the stack and test the top commit.
else:
commits_url = payload['pull_request']['commits_url']
commits = query_url(commits_url, token=github_token)
kernel_pr = None
kernel_pattern = '^Requires-kernel:\s*([a-zA-Z0-9_\-\:\/\+\.]+)'
for commit in commits:
comments = commit['commit']['message']
m = re.search(kernel_pattern, comments, re.I | re.M)
if m is not None:
kernel_pr = 'Requires-kernel: %s' % m.group(1)
break
nr = 0
for commit in commits:
nr += 1
commit = query_url(commit['url'], token=github_token)
change = self.handle_pull_request_commit(payload, commit,
nr, commits_nr, kernel_pr)
changes.append(change)
log.msg("Received %d changes from GitHub Pull Request #%d" % (
len(changes), pr_number))
return changes, 'git'
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ConnectionMonitorsOperations:
"""ConnectionMonitorsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
parameters: "_models.ConnectionMonitor",
**kwargs: Any
) -> "_models.ConnectionMonitorResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
parameters: "_models.ConnectionMonitor",
**kwargs: Any
) -> AsyncLROPoller["_models.ConnectionMonitorResult"]:
"""Create or update a connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters that define the operation to create a connection monitor.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.ConnectionMonitor
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ConnectionMonitorResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.ConnectionMonitorResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> "_models.ConnectionMonitorResult":
"""Gets a connection monitor by name.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ConnectionMonitorResult":
"""Update tags of the specified connection monitor.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters supplied to update connection monitor tags.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Stops the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
async def begin_start(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Starts the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
async def _query_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> "_models.ConnectionMonitorQueryResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self._query_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_query_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
async def begin_query(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ConnectionMonitorQueryResult"]:
"""Query a snapshot of the most recent connection states.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name given to the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ConnectionMonitorQueryResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.ConnectionMonitorQueryResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._query_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def list(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ConnectionMonitorListResult"]:
"""Lists all connection monitors for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConnectionMonitorListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.ConnectionMonitorListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'} # type: ignore
|
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
import numbers
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from dashboard.pinpoint import mann_whitney_u
from dashboard.pinpoint.models import attempt as attempt_module
from dashboard.pinpoint.models import change as change_module
from dashboard.pinpoint.models import quest as quest_module
# We want this to be fast to minimize overhead while waiting for tasks to
# finish, but don't want to consume too many resources.
_TASK_INTERVAL = 10
_DEFAULT_MAX_ATTEMPTS = 2
_SIGNIFICANCE_LEVEL = 0.5
_DIFFERENT = 'different'
_PENDING = 'pending'
_SAME = 'same'
_UNKNOWN = 'unknown'
def JobFromId(job_id):
"""Get a Job object from its ID. Its ID is just its urlsafe key.
Users of Job should not have to import ndb. This function maintains an
abstraction layer that separates users from the Datastore details.
"""
job_key = ndb.Key(urlsafe=job_id)
return job_key.get()
class Job(ndb.Model):
"""A Pinpoint job."""
created = ndb.DateTimeProperty(required=True, auto_now_add=True)
updated = ndb.DateTimeProperty(required=True, auto_now=True)
# The name of the Task Queue task this job is running on. If it's not present,
# the job isn't running.
task = ndb.StringProperty()
# Request parameters.
configuration = ndb.StringProperty(required=True)
test_suite = ndb.StringProperty()
test = ndb.StringProperty()
metric = ndb.StringProperty()
# If True, the service should pick additional Changes to run (bisect).
# If False, only run the Changes explicitly added by the user.
auto_explore = ndb.BooleanProperty(required=True)
state = ndb.PickleProperty(required=True)
@classmethod
def New(cls, configuration, test_suite, test, metric, auto_explore):
# Get list of quests.
quests = [quest_module.FindIsolated(configuration)]
if test_suite:
quests.append(quest_module.RunTest(configuration, test_suite, test))
if metric:
quests.append(quest_module.ReadValue(metric))
# Create job.
return cls(
configuration=configuration,
test_suite=test_suite,
test=test,
metric=metric,
auto_explore=auto_explore,
state=_JobState(quests, _DEFAULT_MAX_ATTEMPTS))
@property
def job_id(self):
return self.key.urlsafe()
@property
def running(self):
return bool(self.task)
def AddChange(self, change):
self.state.AddChange(change)
def Start(self):
task = taskqueue.add(queue_name='job-queue', url='/run/' + self.job_id,
countdown=_TASK_INTERVAL)
self.task = task.name
def Run(self):
if self.auto_explore:
self.state.Explore()
work_left = self.state.ScheduleWork()
# Schedule moar task.
if work_left:
self.Start()
else:
self.task = None
def AsDict(self):
if self.running:
status = 'RUNNING'
else:
status = 'COMPLETED'
return {
'job_id': self.job_id,
'configuration': self.configuration,
'test_suite': self.test_suite,
'test': self.test,
'metric': self.metric,
'auto_explore': self.auto_explore,
'created': self.created.strftime('%Y-%m-%d %H:%M:%S %Z'),
'updated': self.updated.strftime('%Y-%m-%d %H:%M:%S %Z'),
'status': status,
'state': self.state.AsDict(),
}
class _JobState(object):
"""The internal state of a Job.
Wrapping the entire internal state of a Job in a PickleProperty allows us to
use regular Python objects, with constructors, dicts, and object references.
We lose the ability to index and query the fields, but it's all internal
anyway. Everything queryable should be on the Job object.
"""
def __init__(self, quests, max_attempts):
"""Create a _JobState.
Args:
quests: A sequence of quests to run on each Change.
max_attempts: The max number of attempts to automatically run per Change.
"""
# _quests is mutable. Any modification should mutate the existing list
# in-place rather than assign a new list, because every Attempt references
# this object and will be updated automatically if it's mutated.
self._quests = list(quests)
# _changes can be in arbitrary order. Client should not assume that the
# list of Changes is sorted in any particular order.
self._changes = []
# A mapping from a Change to a list of Attempts on that Change.
self._attempts = {}
self._max_attempts = max_attempts
def AddAttempt(self, change):
assert change in self._attempts
self._attempts[change].append(attempt_module.Attempt(self._quests, change))
def AddChange(self, change, index=None):
if index:
self._changes.insert(index, change)
else:
self._changes.append(change)
self._attempts[change] = []
self.AddAttempt(change)
def Explore(self):
"""Compare Changes and bisect by adding additional Changes as needed.
For every pair of adjacent Changes, compare their results as probability
distributions. If more information is needed to establish statistical
confidence, add an additional Attempt. If the results are different, find
the midpoint of the Changes and add it to the Job.
The midpoint can only be added if the second Change represents a commit that
comes after the first Change. Otherwise, this method won't explore further.
For example, if Change A is repo@abc, and Change B is repo@abc + patch,
there's no way to pick additional Changes to try.
"""
# Compare every pair of Changes.
# TODO: The list may Change while iterating through it.
for index in xrange(1, len(self._changes)):
change_a = self._changes[index - 1]
change_b = self._changes[index]
comparison_result = self._Compare(change_a, change_b)
if comparison_result == _DIFFERENT:
# Different: Bisect and add an additional Change to the job.
try:
midpoint = change_module.Change.Midpoint(change_a, change_b)
except change_module.NonLinearError:
midpoint = None
if midpoint:
logging.info('Adding Change %s.', midpoint)
self.AddChange(midpoint, index)
elif comparison_result == _SAME:
# The same: Do nothing.
continue
elif comparison_result == _UNKNOWN:
# Unknown: Add an Attempt to the Change with the fewest Attempts.
change = min(change_a, change_b, key=lambda c: len(self._attempts[c]))
self.AddAttempt(change)
def ScheduleWork(self):
work_left = False
for attempts in self._attempts.itervalues():
for attempt in attempts:
if attempt.completed:
continue
attempt.ScheduleWork()
work_left = True
return work_left
def AsDict(self):
comparisons = []
for index in xrange(1, len(self._changes)):
change_a = self._changes[index - 1]
change_b = self._changes[index]
comparisons.append(self._Compare(change_a, change_b))
# result_values is a 3D array. result_values[change][quest] is a list of
# all the result values for that Change and Quest.
result_values = []
for change in self._changes:
change_result_values = []
change_results_per_quest = _CombineResultsPerQuest(self._attempts[change])
for quest in self._quests:
change_result_values.append(map(str, change_results_per_quest[quest]))
result_values.append(change_result_values)
return {
'quests': map(str, self._quests),
'changes': map(str, self._changes),
'comparisons': comparisons,
'result_values': result_values,
}
def _Compare(self, change_a, change_b):
attempts_a = self._attempts[change_a]
attempts_b = self._attempts[change_b]
if any(not attempt.completed for attempt in attempts_a + attempts_b):
return _PENDING
results_a = _CombineResultsPerQuest(attempts_a)
results_b = _CombineResultsPerQuest(attempts_b)
if any(_CompareResults(results_a[quest], results_b[quest]) == _DIFFERENT
for quest in self._quests):
return _DIFFERENT
# Here, "the same" means that we fail to reject the null hypothesis. We can
# never be completely sure that the two Changes have the same results, but
# we've run everything that we planned to, and didn't detect any difference.
if (len(attempts_a) >= self._max_attempts and
len(attempts_b) >= self._max_attempts):
return _SAME
return _UNKNOWN
def _CombineResultsPerQuest(attempts):
aggregate_results = collections.defaultdict(list)
for attempt in attempts:
if not attempt.completed:
continue
for quest, results in attempt.result_values.iteritems():
aggregate_results[quest] += results
return aggregate_results
def _CompareResults(results_a, results_b):
if len(results_a) == 0 or len(results_b) == 0:
return _UNKNOWN
results_a = map(_ConvertToNumber, results_a)
results_b = map(_ConvertToNumber, results_b)
try:
p_value = mann_whitney_u.MannWhitneyU(results_a, results_b)
except ValueError:
return _UNKNOWN
if p_value < _SIGNIFICANCE_LEVEL:
return _DIFFERENT
else:
return _UNKNOWN
def _ConvertToNumber(obj):
# We want the results_values to provide both a message that can be shown to
# the user for why something failed, and also something comparable that can
# be used for bisect. Therefore, they contain the thrown Exceptions. This
# function then converts them into comparable numbers for bisect.
if isinstance(obj, numbers.Number):
return obj
elif isinstance(obj, Exception):
return hash(obj.__class__)
else:
return hash(obj)
|
|
import abc
import uuid
import socket
import datetime
from coilmq.exception import ProtocolError, AuthError
from coilmq.util import frames
from coilmq.util.frames import Frame, ErrorFrame, ReceiptFrame, ConnectedFrame
from coilmq.util.concurrency import CoilThreadingTimer
SEND = 'SEND'
CONNECT = 'CONNECT'
MESSAGE = 'MESSAGE'
ERROR = 'ERROR'
CONNECTED = 'CONNECTED'
SUBSCRIBE = 'SUBSCRIBE'
UNSUBSCRIBE = 'UNSUBSCRIBE'
BEGIN = 'BEGIN'
COMMIT = 'COMMIT'
ABORT = 'ABORT'
ACK = 'ACK'
DISCONNECT = 'DISCONNECT'
VALID_COMMANDS = ['message', 'connect', 'connected', 'error', 'send',
'subscribe', 'unsubscribe', 'begin', 'commit', 'abort', 'ack', 'disconnect', 'nack', 'stomp']
class STOMP(object):
__metaclass__ = abc.ABCMeta
def __init__(self, engine):
self.engine = engine
def stomp(self, frame):
self.connect(frame)
@abc.abstractmethod
def process_frame(self, frame):
raise NotImplementedError
@abc.abstractmethod
def connect(self, frame):
raise NotImplementedError
@abc.abstractmethod
def send(self, frame):
raise NotImplementedError
@abc.abstractmethod
def subscribe(self, frame):
raise NotImplementedError
@abc.abstractmethod
def unsubscribe(self, frame):
raise NotImplementedError
@abc.abstractmethod
def begin(self, frame):
raise NotImplementedError
@abc.abstractmethod
def commit(self, frame):
raise NotImplementedError
@abc.abstractmethod
def abort(self, frame):
raise NotImplementedError
@abc.abstractmethod
def ack(self, frame):
raise NotImplementedError
@abc.abstractmethod
def disconnect(self, frame):
raise NotImplementedError
class STOMP10(STOMP):
def process_frame(self, frame):
"""
Dispatches a received frame to the appropriate internal method.
@param frame: The frame that was received.
@type frame: C{stompclient.frame.Frame}
"""
cmd_method = frame.cmd.lower()
if not cmd_method in VALID_COMMANDS:
raise ProtocolError("Invalid STOMP command: {}".format(frame.cmd))
method = getattr(self, cmd_method, None)
if not self.engine.connected and method not in (self.connect, self.stomp):
raise ProtocolError("Not connected.")
try:
transaction = frame.headers.get('transaction')
if not transaction or method in (self.begin, self.commit, self.abort):
method(frame)
else:
if not transaction in self.engine.transactions:
raise ProtocolError(
"Invalid transaction specified: %s" % transaction)
self.engine.transactions[transaction].append(frame)
except Exception as e:
self.engine.log.error("Error processing STOMP frame: %s" % e)
self.engine.log.exception(e)
try:
self.engine.connection.send_frame(ErrorFrame(str(e), str(e)))
except Exception as e: # pragma: no cover
self.engine.log.error("Could not send error frame: %s" % e)
self.engine.log.exception(e)
else:
# The protocol is not especially clear here (not sure why I'm surprised)
# about the expected behavior WRT receipts and errors. We will assume that
# the RECEIPT frame should not be sent if there is an error frame.
# Also we'll assume that a transaction should not preclude sending the receipt
# frame.
# import pdb; pdb.set_trace()
if frame.headers.get('receipt') and method != self.connect:
self.engine.connection.send_frame(ReceiptFrame(
receipt=frame.headers.get('receipt')))
def connect(self, frame, response=None):
"""
Handle CONNECT command: Establishes a new connection and checks auth (if applicable).
"""
self.engine.log.debug("CONNECT")
if self.engine.authenticator:
login = frame.headers.get('login')
passcode = frame.headers.get('passcode')
if not self.engine.authenticator.authenticate(login, passcode):
raise AuthError("Authentication failed for %s" % login)
self.engine.connected = True
response = response or Frame(frames.CONNECTED)
response.headers['session'] = uuid.uuid4()
# TODO: Do we want to do anything special to track sessions?
# (Actually, I don't think the spec actually does anything with this at all.)
self.engine.connection.send_frame(response)
def send(self, frame):
"""
Handle the SEND command: Delivers a message to a queue or topic (default).
"""
dest = frame.headers.get('destination')
if not dest:
raise ProtocolError('Missing destination for SEND command.')
if dest.startswith('/queue/'):
self.engine.queue_manager.send(frame)
else:
self.engine.topic_manager.send(frame)
def subscribe(self, frame):
"""
Handle the SUBSCRIBE command: Adds this connection to destination.
"""
ack = frame.headers.get('ack')
reliable = ack and ack.lower() == 'client'
self.engine.connection.reliable_subscriber = reliable
dest = frame.headers.get('destination')
if not dest:
raise ProtocolError('Missing destination for SUBSCRIBE command.')
if dest.startswith('/queue/'):
self.engine.queue_manager.subscribe(self.engine.connection, dest)
else:
self.engine.topic_manager.subscribe(self.engine.connection, dest)
def unsubscribe(self, frame):
"""
Handle the UNSUBSCRIBE command: Removes this connection from destination.
"""
dest = frame.headers.get('destination')
if not dest:
raise ProtocolError('Missing destination for UNSUBSCRIBE command.')
if dest.startswith('/queue/'):
self.engine.queue_manager.unsubscribe(self.engine.connection, dest)
else:
self.engine.topic_manager.unsubscribe(self.engine.connection, dest)
def begin(self, frame):
"""
Handles BEGING command: Starts a new transaction.
"""
if not frame.transaction:
raise ProtocolError("Missing transaction for BEGIN command.")
self.engine.transactions[frame.transaction] = []
def commit(self, frame):
"""
Handles COMMIT command: Commits specified transaction.
"""
if not frame.transaction:
raise ProtocolError("Missing transaction for COMMIT command.")
if not frame.transaction in self.engine.transactions:
raise ProtocolError("Invalid transaction: %s" % frame.transaction)
for tframe in self.engine.transactions[frame.transaction]:
del tframe.headers['transaction']
self.process_frame(tframe)
self.engine.queue_manager.clear_transaction_frames(
self.engine.connection, frame.transaction)
del self.engine.transactions[frame.transaction]
def abort(self, frame):
"""
Handles ABORT command: Rolls back specified transaction.
"""
if not frame.transaction:
raise ProtocolError("Missing transaction for ABORT command.")
if not frame.transaction in self.engine.transactions:
raise ProtocolError("Invalid transaction: %s" % frame.transaction)
self.engine.queue_manager.resend_transaction_frames(
self.engine.connection, frame.transaction)
del self.engine.transactions[frame.transaction]
def ack(self, frame):
"""
Handles the ACK command: Acknowledges receipt of a message.
"""
if not frame.message_id:
raise ProtocolError("No message-id specified for ACK command.")
self.engine.queue_manager.ack(self.engine.connection, frame)
def disconnect(self, frame):
"""
Handles the DISCONNECT command: Unbinds the connection.
Clients are supposed to send this command, but in practice it should not be
relied upon.
"""
self.engine.log.debug("Disconnect")
self.engine.unbind()
class STOMP11(STOMP10):
SUPPORTED_VERSIONS = {'1.0', '1.1'}
def __init__(self, engine, send_heartbeat_interval=100, receive_heartbeat_interval=100, *args, **kwargs):
super(STOMP11, self).__init__(engine)
self.last_hb = datetime.datetime.now()
self.last_hb_sent = datetime.datetime.now()
self.timer = CoilThreadingTimer()
# flags to control heartbeating
self.send_hb = self.receive_hb = False
self.send_heartbeat_interval = datetime.timedelta(milliseconds=send_heartbeat_interval)
self.receive_heartbeat_interval = datetime.timedelta(milliseconds=receive_heartbeat_interval)
def enable_heartbeat(self, cx, cy, response):
if self.send_heartbeat_interval and cy:
self.send_heartbeat_interval = max(self.send_heartbeat_interval, datetime.timedelta(milliseconds=cy))
self.timer.schedule(max(self.send_heartbeat_interval, datetime.timedelta(milliseconds=cy)).total_seconds(), self.send_heartbeat)
if self.receive_heartbeat_interval and cx:
self.timer.schedule(max(self.send_heartbeat_interval, datetime.timedelta(milliseconds=cx)).total_seconds(),
self.receive_heartbeat)
self.timer.start()
response.headers['heart-beat'] = '{0},{1}'.format(int(self.send_heartbeat_interval.microseconds / 1000),
int(self.receive_heartbeat_interval.microseconds / 1000))
def disable_heartbeat(self):
self.timer.stop()
def send_heartbeat(self):
# screw it, just send an error frame
self.engine.connection.send_frame(ErrorFrame('heartbeat'))
def receive_heartbeat(self):
ago = datetime.datetime.now() - self.last_hb
if ago > self.receive_heartbeat_interval:
self.engine.log.debug("No heartbeat was received for {0} seconds".format(ago.total_seconds()))
self.engine.unbind()
def connect(self, frame, response=None):
connected_frame = Frame(frames.CONNECTED)
self._negotiate_protocol(frame, connected_frame)
heart_beat = frame.headers.get('heart-beat', '0,0')
if heart_beat:
self.enable_heartbeat(*map(int, heart_beat.split(',')), response=connected_frame)
super(STOMP11, self).connect(frame, response=connected_frame)
def nack(self, frame):
"""
Handles the NACK command: Unacknowledges receipt of a message.
For now, this is just a placeholder to implement this version of the protocol
"""
if not frame.headers.get('message-id'):
raise ProtocolError("No message-id specified for NACK command.")
if not frame.headers.get('subscription'):
raise ProtocolError("No subscription specified for NACK command.")
def _negotiate_protocol(self, frame, response):
client_versions = frame.headers.get('accept-version')
if not client_versions:
raise ProtocolError('No version specified')
common = set(client_versions.split(',')) & self.SUPPORTED_VERSIONS
if not common:
versions = ','.join(self.SUPPORTED_VERSIONS)
self.engine.connection.send_frame(Frame(
frames.ERROR,
headers={'version': versions, 'content-type': frames.TEXT_PLAIN},
body='Supported protocol versions are {0}'.format(versions)
))
else:
response.headers['version'] = max(common)
protocol_class = PROTOCOL_MAP[response.headers['version']]
if type(self) is not protocol_class:
self.engine.protocol = protocol_class(self.engine)
self.engine.protocol.connect(frame, response=response)
class STOMP12(STOMP11):
SUPPORTED_VERSIONS = STOMP11.SUPPORTED_VERSIONS.union({'1.2', })
def connect(self, frame, response=None):
host = frame.headers.get('host')
if not host:
raise ProtocolError('"host" header is required')
if host != socket.getfqdn():
raise ProtocolError('Virtual hosting is not supported or host is unknown')
super(STOMP12, self).connect(frame, response)
PROTOCOL_MAP = {'1.0': STOMP10, '1.1': STOMP11, '1.2': STOMP12}
|
|
"""Test the Yeelight light."""
import logging
from yeelight import (
BulbException,
BulbType,
HSVTransition,
LightType,
PowerMode,
RGBTransition,
SceneClass,
SleepTransition,
TemperatureTransition,
transitions,
)
from yeelight.flow import Flow
from yeelight.main import _MODEL_SPECS
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_KELVIN,
ATTR_RGB_COLOR,
ATTR_TRANSITION,
FLASH_LONG,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.components.yeelight import (
ATTR_COUNT,
ATTR_TRANSITIONS,
CONF_CUSTOM_EFFECTS,
CONF_FLOW_PARAMS,
CONF_MODE_MUSIC,
CONF_NIGHTLIGHT_SWITCH,
CONF_SAVE_ON_CHANGE,
CONF_TRANSITION,
DEFAULT_MODE_MUSIC,
DEFAULT_NIGHTLIGHT_SWITCH,
DEFAULT_SAVE_ON_CHANGE,
DEFAULT_TRANSITION,
DOMAIN,
YEELIGHT_HSV_TRANSACTION,
YEELIGHT_RGB_TRANSITION,
YEELIGHT_SLEEP_TRANSACTION,
YEELIGHT_TEMPERATURE_TRANSACTION,
)
from homeassistant.components.yeelight.light import (
ATTR_MINUTES,
ATTR_MODE,
EFFECT_DISCO,
EFFECT_FACEBOOK,
EFFECT_FAST_RANDOM_LOOP,
EFFECT_STOP,
EFFECT_TWITTER,
EFFECT_WHATSAPP,
SERVICE_SET_AUTO_DELAY_OFF_SCENE,
SERVICE_SET_COLOR_FLOW_SCENE,
SERVICE_SET_COLOR_SCENE,
SERVICE_SET_COLOR_TEMP_SCENE,
SERVICE_SET_HSV_SCENE,
SERVICE_SET_MODE,
SERVICE_START_FLOW,
SUPPORT_YEELIGHT,
SUPPORT_YEELIGHT_RGB,
SUPPORT_YEELIGHT_WHITE_TEMP,
YEELIGHT_COLOR_EFFECT_LIST,
YEELIGHT_MONO_EFFECT_LIST,
YEELIGHT_TEMP_ONLY_EFFECT_LIST,
)
from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry
from homeassistant.setup import async_setup_component
from homeassistant.util.color import (
color_hs_to_RGB,
color_hs_to_xy,
color_RGB_to_hs,
color_RGB_to_xy,
color_temperature_kelvin_to_mired,
color_temperature_mired_to_kelvin,
)
from . import (
ENTITY_LIGHT,
ENTITY_NIGHTLIGHT,
IP_ADDRESS,
MODULE,
NAME,
PROPERTIES,
UNIQUE_NAME,
_mocked_bulb,
_patch_discovery,
)
from tests.async_mock import MagicMock, patch
from tests.common import MockConfigEntry
CONFIG_ENTRY_DATA = {
CONF_HOST: IP_ADDRESS,
CONF_TRANSITION: DEFAULT_TRANSITION,
CONF_MODE_MUSIC: DEFAULT_MODE_MUSIC,
CONF_SAVE_ON_CHANGE: DEFAULT_SAVE_ON_CHANGE,
CONF_NIGHTLIGHT_SWITCH: DEFAULT_NIGHTLIGHT_SWITCH,
}
async def test_services(hass: HomeAssistant, caplog):
"""Test Yeelight services."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
**CONFIG_ENTRY_DATA,
CONF_MODE_MUSIC: True,
CONF_SAVE_ON_CHANGE: True,
CONF_NIGHTLIGHT_SWITCH: True,
},
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with _patch_discovery(MODULE), patch(f"{MODULE}.Bulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
async def _async_test_service(service, data, method, payload=None, domain=DOMAIN):
err_count = len([x for x in caplog.records if x.levelno == logging.ERROR])
# success
mocked_method = MagicMock()
setattr(type(mocked_bulb), method, mocked_method)
await hass.services.async_call(domain, service, data, blocking=True)
if payload is None:
mocked_method.assert_called_once()
elif type(payload) == list:
mocked_method.assert_called_once_with(*payload)
else:
mocked_method.assert_called_once_with(**payload)
assert (
len([x for x in caplog.records if x.levelno == logging.ERROR]) == err_count
)
# failure
mocked_method = MagicMock(side_effect=BulbException)
setattr(type(mocked_bulb), method, mocked_method)
await hass.services.async_call(domain, service, data, blocking=True)
assert (
len([x for x in caplog.records if x.levelno == logging.ERROR])
== err_count + 1
)
# turn_on
brightness = 100
color_temp = 200
transition = 1
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_BRIGHTNESS: brightness,
ATTR_COLOR_TEMP: color_temp,
ATTR_FLASH: FLASH_LONG,
ATTR_EFFECT: EFFECT_STOP,
ATTR_TRANSITION: transition,
},
blocking=True,
)
mocked_bulb.turn_on.assert_called_once_with(
duration=transition * 1000,
light_type=LightType.Main,
power_mode=PowerMode.NORMAL,
)
mocked_bulb.turn_on.reset_mock()
mocked_bulb.start_music.assert_called_once()
mocked_bulb.set_brightness.assert_called_once_with(
brightness / 255 * 100, duration=transition * 1000, light_type=LightType.Main
)
mocked_bulb.set_color_temp.assert_called_once_with(
color_temperature_mired_to_kelvin(color_temp),
duration=transition * 1000,
light_type=LightType.Main,
)
mocked_bulb.start_flow.assert_called_once() # flash
mocked_bulb.stop_flow.assert_called_once_with(light_type=LightType.Main)
# turn_on nightlight
await _async_test_service(
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_NIGHTLIGHT},
"turn_on",
payload={
"duration": DEFAULT_TRANSITION,
"light_type": LightType.Main,
"power_mode": PowerMode.MOONLIGHT,
},
domain="light",
)
# turn_off
await _async_test_service(
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_TRANSITION: transition},
"turn_off",
domain="light",
payload={"duration": transition * 1000, "light_type": LightType.Main},
)
# set_mode
mode = "rgb"
await _async_test_service(
SERVICE_SET_MODE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_MODE: "rgb"},
"set_power_mode",
[PowerMode[mode.upper()]],
)
# start_flow
await _async_test_service(
SERVICE_START_FLOW,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_TRANSITIONS: [{YEELIGHT_TEMPERATURE_TRANSACTION: [1900, 2000, 60]}],
},
"start_flow",
)
# set_color_scene
await _async_test_service(
SERVICE_SET_COLOR_SCENE,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_RGB_COLOR: [10, 20, 30],
ATTR_BRIGHTNESS: 50,
},
"set_scene",
[SceneClass.COLOR, 10, 20, 30, 50],
)
# set_hsv_scene
await _async_test_service(
SERVICE_SET_HSV_SCENE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_HS_COLOR: [180, 50], ATTR_BRIGHTNESS: 50},
"set_scene",
[SceneClass.HSV, 180, 50, 50],
)
# set_color_temp_scene
await _async_test_service(
SERVICE_SET_COLOR_TEMP_SCENE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_KELVIN: 4000, ATTR_BRIGHTNESS: 50},
"set_scene",
[SceneClass.CT, 4000, 50],
)
# set_color_flow_scene
await _async_test_service(
SERVICE_SET_COLOR_FLOW_SCENE,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_TRANSITIONS: [{YEELIGHT_TEMPERATURE_TRANSACTION: [1900, 2000, 60]}],
},
"set_scene",
)
# set_auto_delay_off_scene
await _async_test_service(
SERVICE_SET_AUTO_DELAY_OFF_SCENE,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_MINUTES: 1, ATTR_BRIGHTNESS: 50},
"set_scene",
[SceneClass.AUTO_DELAY_OFF, 50, 1],
)
# test _cmd wrapper error handler
err_count = len([x for x in caplog.records if x.levelno == logging.ERROR])
type(mocked_bulb).turn_on = MagicMock()
type(mocked_bulb).set_brightness = MagicMock(side_effect=BulbException)
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_BRIGHTNESS: 50},
blocking=True,
)
assert (
len([x for x in caplog.records if x.levelno == logging.ERROR]) == err_count + 1
)
async def test_device_types(hass: HomeAssistant):
"""Test different device types."""
mocked_bulb = _mocked_bulb()
properties = {**PROPERTIES}
properties.pop("active_mode")
properties["color_mode"] = "3"
mocked_bulb.last_properties = properties
async def _async_setup(config_entry):
with patch(f"{MODULE}.Bulb", return_value=mocked_bulb):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
async def _async_test(
bulb_type,
model,
target_properties,
nightlight_properties=None,
name=UNIQUE_NAME,
entity_id=ENTITY_LIGHT,
):
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
**CONFIG_ENTRY_DATA,
CONF_NIGHTLIGHT_SWITCH: False,
},
)
config_entry.add_to_hass(hass)
mocked_bulb.bulb_type = bulb_type
model_specs = _MODEL_SPECS.get(model)
type(mocked_bulb).get_model_specs = MagicMock(return_value=model_specs)
await _async_setup(config_entry)
state = hass.states.get(entity_id)
assert state.state == "on"
target_properties["friendly_name"] = name
target_properties["flowing"] = False
target_properties["night_light"] = True
assert dict(state.attributes) == target_properties
await hass.config_entries.async_unload(config_entry.entry_id)
await config_entry.async_remove(hass)
registry = await entity_registry.async_get_registry(hass)
registry.async_clear_config_entry(config_entry.entry_id)
# nightlight
if nightlight_properties is None:
return
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
**CONFIG_ENTRY_DATA,
CONF_NIGHTLIGHT_SWITCH: True,
},
)
config_entry.add_to_hass(hass)
await _async_setup(config_entry)
assert hass.states.get(entity_id).state == "off"
state = hass.states.get(f"{entity_id}_nightlight")
assert state.state == "on"
nightlight_properties["friendly_name"] = f"{name} nightlight"
nightlight_properties["icon"] = "mdi:weather-night"
nightlight_properties["flowing"] = False
nightlight_properties["night_light"] = True
assert dict(state.attributes) == nightlight_properties
await hass.config_entries.async_unload(config_entry.entry_id)
await config_entry.async_remove(hass)
registry.async_clear_config_entry(config_entry.entry_id)
bright = round(255 * int(PROPERTIES["bright"]) / 100)
current_brightness = round(255 * int(PROPERTIES["current_brightness"]) / 100)
ct = color_temperature_kelvin_to_mired(int(PROPERTIES["ct"]))
hue = int(PROPERTIES["hue"])
sat = int(PROPERTIES["sat"])
hs_color = (round(hue / 360 * 65536, 3), round(sat / 100 * 255, 3))
rgb_color = color_hs_to_RGB(*hs_color)
xy_color = color_hs_to_xy(*hs_color)
bg_bright = round(255 * int(PROPERTIES["bg_bright"]) / 100)
bg_ct = color_temperature_kelvin_to_mired(int(PROPERTIES["bg_ct"]))
bg_rgb = int(PROPERTIES["bg_rgb"])
bg_rgb_color = ((bg_rgb >> 16) & 0xFF, (bg_rgb >> 8) & 0xFF, bg_rgb & 0xFF)
bg_hs_color = color_RGB_to_hs(*bg_rgb_color)
bg_xy_color = color_RGB_to_xy(*bg_rgb_color)
nl_br = round(255 * int(PROPERTIES["nl_br"]) / 100)
# Default
await _async_test(
None,
"mono",
{
"effect_list": YEELIGHT_MONO_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"brightness": bright,
},
)
# White
await _async_test(
BulbType.White,
"mono",
{
"effect_list": YEELIGHT_MONO_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"brightness": bright,
},
)
# Color
model_specs = _MODEL_SPECS["color"]
await _async_test(
BulbType.Color,
"color",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT_RGB,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": current_brightness,
"color_temp": ct,
"hs_color": hs_color,
"rgb_color": rgb_color,
"xy_color": xy_color,
},
{"supported_features": 0},
)
# WhiteTemp
model_specs = _MODEL_SPECS["ceiling1"]
await _async_test(
BulbType.WhiteTemp,
"ceiling1",
{
"effect_list": YEELIGHT_TEMP_ONLY_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT_WHITE_TEMP,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": current_brightness,
"color_temp": ct,
},
{
"effect_list": YEELIGHT_TEMP_ONLY_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"brightness": nl_br,
},
)
# WhiteTempMood
properties.pop("power")
properties["main_power"] = "on"
model_specs = _MODEL_SPECS["ceiling4"]
await _async_test(
BulbType.WhiteTempMood,
"ceiling4",
{
"friendly_name": NAME,
"effect_list": YEELIGHT_TEMP_ONLY_EFFECT_LIST,
"flowing": False,
"night_light": True,
"supported_features": SUPPORT_YEELIGHT_WHITE_TEMP,
"min_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["max"]
),
"max_mireds": color_temperature_kelvin_to_mired(
model_specs["color_temp"]["min"]
),
"brightness": current_brightness,
"color_temp": ct,
},
{
"effect_list": YEELIGHT_TEMP_ONLY_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT,
"brightness": nl_br,
},
)
await _async_test(
BulbType.WhiteTempMood,
"ceiling4",
{
"effect_list": YEELIGHT_COLOR_EFFECT_LIST,
"supported_features": SUPPORT_YEELIGHT_RGB,
"min_mireds": color_temperature_kelvin_to_mired(6500),
"max_mireds": color_temperature_kelvin_to_mired(1700),
"brightness": bg_bright,
"color_temp": bg_ct,
"hs_color": bg_hs_color,
"rgb_color": bg_rgb_color,
"xy_color": bg_xy_color,
},
name=f"{UNIQUE_NAME} ambilight",
entity_id=f"{ENTITY_LIGHT}_ambilight",
)
async def test_effects(hass: HomeAssistant):
"""Test effects."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_CUSTOM_EFFECTS: [
{
CONF_NAME: "mock_effect",
CONF_FLOW_PARAMS: {
ATTR_COUNT: 3,
ATTR_TRANSITIONS: [
{YEELIGHT_HSV_TRANSACTION: [300, 50, 500, 50]},
{YEELIGHT_RGB_TRANSITION: [100, 100, 100, 300, 30]},
{YEELIGHT_TEMPERATURE_TRANSACTION: [3000, 200, 20]},
{YEELIGHT_SLEEP_TRANSACTION: [800]},
],
},
},
],
},
},
)
config_entry = MockConfigEntry(
domain=DOMAIN,
data=CONFIG_ENTRY_DATA,
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with _patch_discovery(MODULE), patch(f"{MODULE}.Bulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_LIGHT).attributes.get(
"effect_list"
) == YEELIGHT_COLOR_EFFECT_LIST + ["mock_effect"]
async def _async_test_effect(name, target=None, called=True):
mocked_start_flow = MagicMock()
type(mocked_bulb).start_flow = mocked_start_flow
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_EFFECT: name},
blocking=True,
)
if not called:
return
mocked_start_flow.assert_called_once()
if target is None:
return
args, _ = mocked_start_flow.call_args
flow = args[0]
assert flow.count == target.count
assert flow.action == target.action
assert str(flow.transitions) == str(target.transitions)
effects = {
"mock_effect": Flow(
count=3,
transitions=[
HSVTransition(300, 50, 500, 50),
RGBTransition(100, 100, 100, 300, 30),
TemperatureTransition(3000, 200, 20),
SleepTransition(800),
],
),
EFFECT_DISCO: Flow(transitions=transitions.disco()),
EFFECT_FAST_RANDOM_LOOP: None,
EFFECT_WHATSAPP: Flow(count=2, transitions=transitions.pulse(37, 211, 102)),
EFFECT_FACEBOOK: Flow(count=2, transitions=transitions.pulse(59, 89, 152)),
EFFECT_TWITTER: Flow(count=2, transitions=transitions.pulse(0, 172, 237)),
}
for name, target in effects.items():
await _async_test_effect(name, target)
await _async_test_effect("not_existed", called=False)
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
import os
import plistlib
import shutil
import signal
import tempfile
import xml.parsers.expat
from telemetry import decorators
from telemetry.core import util
import telemetry.core.platform as platform
import telemetry.core.platform.power_monitor as power_monitor
class PowerMetricsPowerMonitor(power_monitor.PowerMonitor):
def __init__(self, backend):
super(PowerMetricsPowerMonitor, self).__init__()
self._powermetrics_process = None
self._backend = backend
self._output_filename = None
self._ouput_directory = None
@property
def binary_path(self):
return '/usr/bin/powermetrics'
def StartMonitoringPowerAsync(self):
assert not self._powermetrics_process, (
"Must call StopMonitoringPowerAsync().")
SAMPLE_INTERVAL_MS = 1000 / 20 # 20 Hz, arbitrary.
# Empirically powermetrics creates an empty output file immediately upon
# starting. We detect file creation as a signal that measurement has
# started. In order to avoid various race conditions in tempfile creation
# we create a temp directory and have powermetrics create it's output
# there rather than say, creating a tempfile, deleting it and reusing its
# name.
self._ouput_directory = tempfile.mkdtemp()
self._output_filename = os.path.join(self._ouput_directory,
'powermetrics.output')
args = ['-f', 'plist',
'-i', '%d' % SAMPLE_INTERVAL_MS,
'-u', self._output_filename]
self._powermetrics_process = self._backend.LaunchApplication(
self.binary_path, args, elevate_privilege=True)
# Block until output file is written to ensure this function call is
# synchronous in respect to powermetrics starting.
def _OutputFileExists():
return os.path.isfile(self._output_filename)
timeout_sec = 2 * (SAMPLE_INTERVAL_MS / 1000.)
util.WaitFor(_OutputFileExists, timeout_sec)
@decorators.Cache
def CanMonitorPowerAsync(self):
mavericks_or_later = (self._backend.GetOSVersionName() >=
platform.mac_platform_backend.MAVERICKS)
binary_path = self.binary_path
return mavericks_or_later and self._backend.CanLaunchApplication(
binary_path)
@staticmethod
def _ParsePlistString(plist_string):
"""Wrapper to parse a plist from a string and catch any errors.
Sometimes powermetrics will exit in the middle of writing it's output,
empirically it seems that it always writes at least one sample in it's
entirety so we can safely ignore any errors in it's output.
Returns:
Parser output on succesful parse, None on parse error.
"""
try:
return plistlib.readPlistFromString(plist_string)
except xml.parsers.expat.ExpatError:
return None
@staticmethod
def ParsePowerMetricsOutput(powermetrics_output):
"""Parse output of powermetrics command line utility.
Returns:
Dictionary in the format returned by StopMonitoringPowerAsync() or None
if |powermetrics_output| is empty - crbug.com/353250 .
"""
if len(powermetrics_output) == 0:
logging.warning("powermetrics produced zero length output")
return None
# Container to collect samples for running averages.
# out_path - list containing the key path in the output dictionary.
# src_path - list containing the key path to get the data from in
# powermetrics' output.
def ConstructMetric(out_path, src_path):
RunningAverage = collections.namedtuple('RunningAverage', [
'out_path', 'src_path', 'samples'])
return RunningAverage(out_path, src_path, [])
# List of RunningAverage objects specifying metrics we want to aggregate.
metrics = [
ConstructMetric(
['component_utilization', 'whole_package', 'average_frequency_mhz'],
['processor','freq_hz']),
ConstructMetric(
['component_utilization', 'whole_package', 'idle_percent'],
['processor','packages', 0, 'c_state_ratio'])]
def DataWithMetricKeyPath(metric, powermetrics_output):
"""Retrieve the sample from powermetrics' output for a given metric.
Args:
metric: The RunningAverage object we want to collect a new sample for.
powermetrics_output: Dictionary containing powermetrics output.
Returns:
The sample corresponding to |metric|'s keypath."""
# Get actual data corresponding to key path.
out_data = powermetrics_output
for k in metric.src_path:
out_data = out_data[k]
assert type(out_data) in [int, float], (
"Was expecting a number: %s (%s)" % (type(out_data), out_data))
return float(out_data)
power_samples = []
sample_durations = []
total_energy_consumption_mwh = 0
# powermetrics outputs multiple plists separated by null terminators.
raw_plists = powermetrics_output.split('\0')
raw_plists = [x for x in raw_plists if len(x) > 0]
# -------- Examine contents of first plist for systems specs. --------
plist = PowerMetricsPowerMonitor._ParsePlistString(raw_plists[0])
if not plist:
logging.warning("powermetrics produced invalid output, output length: "
"%d" % len(powermetrics_output))
return {}
if 'GPU' in plist:
metrics.extend([
ConstructMetric(
['component_utilization', 'gpu', 'average_frequency_mhz'],
['GPU', 0, 'freq_hz']),
ConstructMetric(
['component_utilization', 'gpu', 'idle_percent'],
['GPU', 0, 'c_state_ratio'])])
# There's no way of knowing ahead of time how many cpus and packages the
# current system has. Iterate over cores and cpus - construct metrics for
# each one.
if 'processor' in plist:
core_dict = plist['processor']['packages'][0]['cores']
num_cores = len(core_dict)
cpu_num = 0
for core_idx in xrange(num_cores):
num_cpus = len(core_dict[core_idx]['cpus'])
base_src_path = ['processor', 'packages', 0, 'cores', core_idx]
for cpu_idx in xrange(num_cpus):
base_out_path = ['component_utilization', 'cpu%d' % cpu_num]
# C State ratio is per-package, component CPUs of that package may
# have different frequencies.
metrics.append(ConstructMetric(
base_out_path + ['average_frequency_mhz'],
base_src_path + ['cpus', cpu_idx, 'freq_hz']))
metrics.append(ConstructMetric(
base_out_path + ['idle_percent'],
base_src_path + ['c_state_ratio']))
cpu_num += 1
# -------- Parse Data Out of Plists --------
for raw_plist in raw_plists:
plist = PowerMetricsPowerMonitor._ParsePlistString(raw_plist)
if not plist:
continue
# Duration of this sample.
sample_duration_ms = int(plist['elapsed_ns']) / 10**6
sample_durations.append(sample_duration_ms)
if 'processor' not in plist:
continue
processor = plist['processor']
energy_consumption_mw = int(processor.get('package_watts', 0)) * 10**3
total_energy_consumption_mwh += (energy_consumption_mw *
(sample_duration_ms / 3600000.))
power_samples.append(energy_consumption_mw)
for m in metrics:
m.samples.append(DataWithMetricKeyPath(m, plist))
# -------- Collect and Process Data --------
out_dict = {}
out_dict['identifier'] = 'powermetrics'
# Raw power usage samples.
if power_samples:
out_dict['power_samples_mw'] = power_samples
out_dict['energy_consumption_mwh'] = total_energy_consumption_mwh
def StoreMetricAverage(metric, sample_durations, out):
"""Calculate average value of samples in a metric and store in output
path as specified by metric.
Args:
metric: A RunningAverage object containing samples to average.
sample_durations: A list which parallels the samples list containing
the time slice for each sample.
out: The output dicat, average is stored in the location specified by
metric.out_path.
"""
if len(metric.samples) == 0:
return
assert len(metric.samples) == len(sample_durations)
avg = 0
for i in xrange(len(metric.samples)):
avg += metric.samples[i] * sample_durations[i]
avg /= sum(sample_durations)
# Store data in output, creating empty dictionaries as we go.
for k in metric.out_path[:-1]:
if not out.has_key(k):
out[k] = {}
out = out[k]
out[metric.out_path[-1]] = avg
for m in metrics:
StoreMetricAverage(m, sample_durations, out_dict)
return out_dict
def StopMonitoringPowerAsync(self):
assert self._powermetrics_process, (
"StartMonitoringPowerAsync() not called.")
# Tell powermetrics to take an immediate sample.
try:
self._powermetrics_process.send_signal(signal.SIGINFO)
self._powermetrics_process.send_signal(signal.SIGTERM)
(power_stdout, power_stderr) = self._powermetrics_process.communicate()
returncode = self._powermetrics_process.returncode
assert returncode in [0, -15], (
"""powermetrics error
return code=%d
stdout=(%s)
stderr=(%s)""" % (returncode, power_stdout, power_stderr))
with open(self._output_filename, 'rb') as output_file:
powermetrics_output = output_file.read()
return PowerMetricsPowerMonitor.ParsePowerMetricsOutput(
powermetrics_output)
finally:
shutil.rmtree(self._ouput_directory)
self._ouput_directory = None
self._output_filename = None
self._powermetrics_process = None
|
|
from packaging.version import Version
import os
import numpy as np
import pandas as pd
import yaml
import mlflow
from mlflow import pyfunc
from mlflow.exceptions import MlflowException
from mlflow.models import Model
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.signature import ModelSignature
from mlflow.models.utils import ModelInputExample, _save_example
from mlflow.utils.model_utils import (
_get_flavor_configuration,
_validate_and_copy_code_paths,
_add_code_from_conf_to_system_path,
)
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import (
_mlflow_conda_env,
_validate_env_arguments,
_process_pip_requirements,
_process_conda_env,
_CONDA_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
)
from mlflow.utils.requirements_utils import _get_pinned_requirement
from mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS
from mlflow.utils.file_utils import write_to
from mlflow.utils.autologging_utils import (
autologging_integration,
safe_patch,
batch_metrics_logger,
)
FLAVOR_NAME = "gluon"
_MODEL_SAVE_PATH = "net"
def load_model(model_uri, ctx, dst_path=None):
"""
Load a Gluon model from a local file or a run.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param ctx: Either CPU or GPU.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
:return: A Gluon model instance.
.. code-block:: python
:caption: Example
# Load persisted model as a Gluon model, make inferences against an NDArray
model = mlflow.gluon.load_model("runs:/" + gluon_random_data_run.info.run_id + "/model")
model(nd.array(np.random.rand(1000, 1, 32)))
"""
import mxnet as mx
from mxnet import gluon
from mxnet import sym
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
_add_code_from_conf_to_system_path(local_model_path, flavor_conf)
model_arch_path = os.path.join(local_model_path, "data", _MODEL_SAVE_PATH) + "-symbol.json"
model_params_path = os.path.join(local_model_path, "data", _MODEL_SAVE_PATH) + "-0000.params"
if Version(mx.__version__) >= Version("2.0.0"):
return gluon.SymbolBlock.imports(
model_arch_path, input_names=["data"], param_file=model_params_path, ctx=ctx
)
else:
symbol = sym.load(model_arch_path)
inputs = sym.var("data", dtype="float32")
net = gluon.SymbolBlock(symbol, inputs)
net.collect_params().load(model_params_path, ctx)
return net
class _GluonModelWrapper:
def __init__(self, gluon_model):
self.gluon_model = gluon_model
def predict(self, data):
"""
:param data: Either a pandas DataFrame or a numpy array containing input array values.
If the input is a DataFrame, it will be converted to an array first by a
`ndarray = df.values`.
:return: Model predictions. If the input is a pandas.DataFrame, the predictions are returned
in a pandas.DataFrame. If the input is a numpy array, the predictions are returned
as either a numpy.ndarray or a plain list for hybrid models.
"""
import mxnet as mx
if isinstance(data, pd.DataFrame):
ndarray = mx.nd.array(data.values)
preds = self.gluon_model(ndarray)
if isinstance(preds, mx.ndarray.ndarray.NDArray):
preds = preds.asnumpy()
return pd.DataFrame(preds)
elif isinstance(data, np.ndarray):
ndarray = mx.nd.array(data)
preds = self.gluon_model(ndarray)
if isinstance(preds, mx.ndarray.ndarray.NDArray):
preds = preds.asnumpy()
return preds
else:
raise TypeError("Input data should be pandas.DataFrame or numpy.ndarray")
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
:param path: Local filesystem path to the MLflow Model with the ``gluon`` flavor.
"""
import mxnet as mx
m = load_model(path, mx.current_context())
return _GluonModelWrapper(m)
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="mxnet"))
def save_model(
gluon_model,
path,
mlflow_model=None,
conda_env=None,
code_paths=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Save a Gluon model to a path on the local file system.
:param gluon_model: Gluon model to be saved. Must be already hybridized.
:param path: Local path where the model is to be saved.
:param mlflow_model: MLflow model config this flavor is being added to.
:param conda_env: {{ conda_env }}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
.. code-block:: python
:caption: Example
from mxnet.gluon import Trainer
from mxnet.gluon.contrib import estimator
from mxnet.gluon.loss import SoftmaxCrossEntropyLoss
from mxnet.gluon.nn import HybridSequential
from mxnet.metric import Accuracy
import mlflow
# Build, compile, and train your model
gluon_model_path = ...
net = HybridSequential()
with net.name_scope():
...
net.hybridize()
net.collect_params().initialize()
softmax_loss = SoftmaxCrossEntropyLoss()
trainer = Trainer(net.collect_params())
est = estimator.Estimator(net=net, loss=softmax_loss, metrics=Accuracy(), trainer=trainer)
est.fit(train_data=train_data, epochs=100, val_data=validation_data)
# Save the model as an MLflow Model
mlflow.gluon.save_model(net, gluon_model_path)
"""
import mxnet as mx
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException("Path '{}' already exists".format(path))
data_subpath = "data"
data_path = os.path.join(path, data_subpath)
os.makedirs(data_path)
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
# The epoch argument of the export method does not play any role in selecting
# a specific epoch's parameters, and is there only for display purposes.
gluon_model.export(os.path.join(data_path, _MODEL_SAVE_PATH))
pyfunc.add_to_model(
mlflow_model, loader_module="mlflow.gluon", env=_CONDA_ENV_FILE_NAME, code=code_dir_subpath
)
mlflow_model.add_flavor(FLAVOR_NAME, mxnet_version=mx.__version__, code=code_dir_subpath)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
inferred_reqs = mlflow.models.infer_pip_requirements(
path,
FLAVOR_NAME,
fallback=default_reqs,
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
def get_default_pip_requirements():
"""
:return: A list of default pip requirements for MLflow Models produced by this flavor.
Calls to :func:`save_model()` and :func:`log_model()` produce a pip environment
that, at minimum, contains these requirements.
"""
return [_get_pinned_requirement("mxnet")]
def get_default_conda_env():
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
return _mlflow_conda_env(additional_pip_deps=get_default_pip_requirements())
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="mxnet"))
def log_model(
gluon_model,
artifact_path,
conda_env=None,
code_paths=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Log a Gluon model as an MLflow artifact for the current run.
:param gluon_model: Gluon model to be saved. Must be already hybridized.
:param artifact_path: Run-relative artifact path.
:param conda_env: {{ conda_env }}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param registered_model_name: If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
.. code-block:: python
:caption: Example
from mxnet.gluon import Trainer
from mxnet.gluon.contrib import estimator
from mxnet.gluon.loss import SoftmaxCrossEntropyLoss
from mxnet.gluon.nn import HybridSequential
from mxnet.metric import Accuracy
import mlflow
# Build, compile, and train your model
net = HybridSequential()
with net.name_scope():
...
net.hybridize()
net.collect_params().initialize()
softmax_loss = SoftmaxCrossEntropyLoss()
trainer = Trainer(net.collect_params())
est = estimator.Estimator(net=net, loss=softmax_loss, metrics=Accuracy(), trainer=trainer)
# Log metrics and log the model
with mlflow.start_run():
est.fit(train_data=train_data, epochs=100, val_data=validation_data)
mlflow.gluon.log_model(net, "model")
"""
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.gluon,
gluon_model=gluon_model,
conda_env=conda_env,
code_paths=code_paths,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
@autologging_integration(FLAVOR_NAME)
def autolog(
log_models=True,
disable=False,
exclusive=False,
disable_for_unsupported_versions=False,
silent=False,
registered_model_name=None,
): # pylint: disable=unused-argument
"""
Enables (or disables) and configures autologging from Gluon to MLflow.
Logs loss and any other metrics specified in the fit
function, and optimizer data as parameters. Model checkpoints
are logged as artifacts to a 'models' directory.
:param log_models: If ``True``, trained models are logged as MLflow model artifacts.
If ``False``, trained models are not logged.
:param disable: If ``True``, disables the MXNet Gluon autologging integration. If ``False``,
enables the MXNet Gluon autologging integration.
:param exclusive: If ``True``, autologged content is not logged to user-created fluent runs.
If ``False``, autologged content is logged to the active fluent run,
which may be user-created.
:param disable_for_unsupported_versions: If ``True``, disable autologging for versions of
gluon that have not been tested against this version of the MLflow client
or are incompatible.
:param silent: If ``True``, suppress all event logs and warnings from MLflow during MXNet Gluon
autologging. If ``False``, show all events and warnings during MXNet Gluon
autologging.
:param registered_model_name: If given, each time a model is trained, it is registered as a
new model version of the registered model with this name.
The registered model is created if it does not already exist.
"""
from mxnet.gluon.contrib.estimator import Estimator
from mlflow.gluon._autolog import __MLflowGluonCallback
def getGluonCallback(metrics_logger):
return __MLflowGluonCallback(log_models, metrics_logger)
def fit(original, self, *args, **kwargs):
# Wrap `fit` execution within a batch metrics logger context.
run_id = mlflow.active_run().info.run_id
with batch_metrics_logger(run_id) as metrics_logger:
mlflowGluonCallback = getGluonCallback(metrics_logger)
if len(args) >= 4:
args = (*args[:3], args[3] + [mlflowGluonCallback], *args[4:])
elif "event_handlers" in kwargs:
kwargs["event_handlers"] += [mlflowGluonCallback]
else:
kwargs["event_handlers"] = [mlflowGluonCallback]
result = original(self, *args, **kwargs)
return result
safe_patch(FLAVOR_NAME, Estimator, "fit", fit, manage_run=True)
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind with different proxy configuration.
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args=args)
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:9711", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 9711)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:9711", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 9711)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
|
|
"""Support for Denon Network Receivers."""
import logging
import telnetlib
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Music station"
SUPPORT_DENON = (
SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
)
SUPPORT_MEDIA_MODES = (
SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
NORMAL_INPUTS = {
"Cd": "CD",
"Dvd": "DVD",
"Blue ray": "BD",
"TV": "TV",
"Satellite / Cable": "SAT/CBL",
"Game": "GAME",
"Game2": "GAME2",
"Video Aux": "V.AUX",
"Dock": "DOCK",
}
MEDIA_MODES = {
"Tuner": "TUNER",
"Media server": "SERVER",
"Ipod dock": "IPOD",
"Net/USB": "NET/USB",
"Rapsody": "RHAPSODY",
"Napster": "NAPSTER",
"Pandora": "PANDORA",
"LastFM": "LASTFM",
"Flickr": "FLICKR",
"Favorites": "FAVORITES",
"Internet Radio": "IRADIO",
"USB/IPOD": "USB/IPOD",
}
# Sub-modes of 'NET/USB'
# {'USB': 'USB', 'iPod Direct': 'IPD', 'Internet Radio': 'IRP',
# 'Favorites': 'FVP'}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Denon platform."""
denon = DenonDevice(config[CONF_NAME], config[CONF_HOST])
if denon.update():
add_entities([denon])
class DenonDevice(MediaPlayerEntity):
"""Representation of a Denon device."""
def __init__(self, name, host):
"""Initialize the Denon device."""
self._name = name
self._host = host
self._pwstate = "PWSTANDBY"
self._volume = 0
# Initial value 60dB, changed if we get a MVMAX
self._volume_max = 60
self._source_list = NORMAL_INPUTS.copy()
self._source_list.update(MEDIA_MODES)
self._muted = False
self._mediasource = ""
self._mediainfo = ""
self._should_setup_sources = True
def _setup_sources(self, telnet):
# NSFRN - Network name
nsfrn = self.telnet_request(telnet, "NSFRN ?")[len("NSFRN ") :]
if nsfrn:
self._name = nsfrn
# SSFUN - Configured sources with (optional) names
self._source_list = {}
for line in self.telnet_request(telnet, "SSFUN ?", all_lines=True):
ssfun = line[len("SSFUN") :].split(" ", 1)
source = ssfun[0]
if len(ssfun) == 2 and ssfun[1]:
configured_name = ssfun[1]
else:
# No name configured, reusing the source name
configured_name = source
self._source_list[configured_name] = source
# SSSOD - Deleted sources
for line in self.telnet_request(telnet, "SSSOD ?", all_lines=True):
source, status = line[len("SSSOD") :].split(" ", 1)
if status == "DEL":
for pretty_name, name in self._source_list.items():
if source == name:
del self._source_list[pretty_name]
break
@classmethod
def telnet_request(cls, telnet, command, all_lines=False):
"""Execute `command` and return the response."""
_LOGGER.debug("Sending: %s", command)
telnet.write(command.encode("ASCII") + b"\r")
lines = []
while True:
line = telnet.read_until(b"\r", timeout=0.2)
if not line:
break
lines.append(line.decode("ASCII").strip())
_LOGGER.debug("Received: %s", line)
if all_lines:
return lines
return lines[0] if lines else ""
def telnet_command(self, command):
"""Establish a telnet connection and sends `command`."""
telnet = telnetlib.Telnet(self._host)
_LOGGER.debug("Sending: %s", command)
telnet.write(command.encode("ASCII") + b"\r")
telnet.read_very_eager() # skip response
telnet.close()
def update(self):
"""Get the latest details from the device."""
try:
telnet = telnetlib.Telnet(self._host)
except OSError:
return False
if self._should_setup_sources:
self._setup_sources(telnet)
self._should_setup_sources = False
self._pwstate = self.telnet_request(telnet, "PW?")
for line in self.telnet_request(telnet, "MV?", all_lines=True):
if line.startswith("MVMAX "):
# only grab two digit max, don't care about any half digit
self._volume_max = int(line[len("MVMAX ") : len("MVMAX XX")])
continue
if line.startswith("MV"):
self._volume = int(line[len("MV") :])
self._muted = self.telnet_request(telnet, "MU?") == "MUON"
self._mediasource = self.telnet_request(telnet, "SI?")[len("SI") :]
if self._mediasource in MEDIA_MODES.values():
self._mediainfo = ""
answer_codes = [
"NSE0",
"NSE1X",
"NSE2X",
"NSE3X",
"NSE4",
"NSE5",
"NSE6",
"NSE7",
"NSE8",
]
for line in self.telnet_request(telnet, "NSE", all_lines=True):
self._mediainfo += f"{line[len(answer_codes.pop(0)) :]}\n"
else:
self._mediainfo = self.source
telnet.close()
return True
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._pwstate == "PWSTANDBY":
return STATE_OFF
if self._pwstate == "PWON":
return STATE_ON
return None
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume / self._volume_max
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
return self._muted
@property
def source_list(self):
"""Return the list of available input sources."""
return sorted(list(self._source_list))
@property
def media_title(self):
"""Return the current media info."""
return self._mediainfo
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self._mediasource in MEDIA_MODES.values():
return SUPPORT_DENON | SUPPORT_MEDIA_MODES
return SUPPORT_DENON
@property
def source(self):
"""Return the current input source."""
for pretty_name, name in self._source_list.items():
if self._mediasource == name:
return pretty_name
def turn_off(self):
"""Turn off media player."""
self.telnet_command("PWSTANDBY")
def volume_up(self):
"""Volume up media player."""
self.telnet_command("MVUP")
def volume_down(self):
"""Volume down media player."""
self.telnet_command("MVDOWN")
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.telnet_command(f"MV{round(volume * self._volume_max):02}")
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
mute_status = "ON" if mute else "OFF"
self.telnet_command(f"MU{mute_status})")
def media_play(self):
"""Play media player."""
self.telnet_command("NS9A")
def media_pause(self):
"""Pause media player."""
self.telnet_command("NS9B")
def media_stop(self):
"""Pause media player."""
self.telnet_command("NS9C")
def media_next_track(self):
"""Send the next track command."""
self.telnet_command("NS9D")
def media_previous_track(self):
"""Send the previous track command."""
self.telnet_command("NS9E")
def turn_on(self):
"""Turn the media player on."""
self.telnet_command("PWON")
def select_source(self, source):
"""Select input source."""
self.telnet_command(f"SI{self._source_list.get(source)}")
|
|
import fastlmm.association as association
import scipy as sp
from sklearn import linear_model
from sklearn import cross_validation
from sklearn.grid_search import GridSearchCV
#!! from fastlmm.external.sklearn.metrics.scorer import SCORERS, Scorer
from fastlmm import inference
import fastlmm.util.util as util
import pdb
class testCV(association.varcomp_test):
__slots__ = ["G0","greater_is_better","model","scoring","n_folds",
"n_folds_params","verbose","n_jobs_grid",
"data_permutation","scores_null","params_null","nullModel",
"altModel"]
def __init__(self,Y,X=None, G0=None, appendbias=False, model = None,
n_folds_params=10, n_folds=10, scoring = None, verbose = False,
n_jobs_grid=1, data_permutation=None, nested=True, greater_is_better=None,
nullModel=None, altModel=None):#Note that this code will break as we don't know what it does
association.varcomp_test.__init__(self,Y=Y,X=X,appendbias=appendbias)
assert model is None, "Shouldn't we remove this parameter?"
self.n_jobs_grid = n_jobs_grid
self.verbose = verbose
self.n_folds = n_folds
self.n_folds_params = n_folds_params
self.G0=G0
self.nullModel = nullModel
self.altModel = altModel
if data_permutation is None:
data_permutation = util.generatePermutation(self.Y.shape[0],93828231) #permute with an arbitrary seed
self.data_permutation = data_permutation
if 'param_grid' in nullModel:
param_grid = nullModel['param_grid']
else:
param_grid = self._getParamGrid(G0, None, nullModel)
if scoring is None:
(self.scoring, self.greater_is_better) = self._getScoring()
else:
self.scoring = scoring
self.greater_is_better = greater_is_better
model = self._getModel(nullModel, param_grid)
nested = self._isNested(nullModel)
self.scores_null, self.params_null = self.score_nestedCV(None, model, param_grid,
self.nullModel['effect'], nested)
def _getScoring(self):
if self.nullModel['link'] == 'linear':
scoring = 'mse'
greater_is_better=False
elif self.nullModel['link'] == 'logistic':
scoring = 'binomial'
greater_is_better=True
else:
assert False, 'Unknown link function.'
return (scoring, greater_is_better)
def _getModel(self, modelDesc, param_grid):
if modelDesc['effect']=='fixed':
return self._getFixedEffectModel(modelDesc['penalty'], modelDesc['link'],
param_grid)
elif modelDesc['effect']=='mixed':
return self._getMixedEffectModel(modelDesc['link'], modelDesc['approx'],
param_grid)
else:
assert False
def _getFixedEffectModel(self, penalty, link, param_grid):
assert penalty in set(['l1','l2'])
if link == 'linear':
if penalty == 'l2':
model = linear_model.Ridge(alpha=param_grid['alpha'][0], fit_intercept=True,
solver = 'auto')
elif penalty == 'l1':
model = linear_model.LassoCV(alphas=param_grid['alpha'] ,cv=self.n_folds_params,
precompute='auto',fit_intercept=True)
elif link == 'logistic':
model = linear_model.LogisticRegression(penalty=penalty, dual=False,
tol=0.0001, C=param_grid['C'][0], fit_intercept=True,
intercept_scaling=1, class_weight=None, random_state=None)
else:
assert False, 'Unknown link function.'
return model
def _getMixedEffectModel(self, link, approx, param_grid):
C = inference.makeBin2KernelAsEstimator(link, approx)
return C()
def _isNested(self, modelDesc):
if modelDesc['effect'] == 'fixed' and modelDesc['link'] == 'linear'\
and modelDesc['penalty'] == 'l1':
return False
return True
def _getParamGrid(self, G0, G1, modelDesc):
if modelDesc['effect'] == 'fixed':
return self._getParamGridFixedEffectModel(G0, G1, modelDesc['link'])
elif modelDesc['effect'] == 'mixed':
return self._getParamGridMixedEffectModel(G0, G1)
assert False
def _getParamGridFixedEffectModel(self, G0, G1, link):
if link == 'linear':
param_grid = dict(alpha=0.5*sp.logspace(-5, 5, 20))
elif link == 'logistic':
param_grid = dict(C=sp.logspace(-5, 5, 20))
else:
assert False
return param_grid
def _getParamGridMixedEffectModel(self, G0, G1):
param_grid = dict(sig02=sp.arange(0.0,2.1,0.4),
sig12=sp.arange(0.0,2.1,0.4),
sign2=[None],
beta=[None])
if G0 is None:
param_grid['sig02'] = [0.0]
if G1 is None:
param_grid['sig12'] = [0.0]
return param_grid
def testG(self, G1, type='',i_exclude=None):
pv=1.0
stat=1.0
if 'param_grid' in self.altModel:
param_grid = self.altModel['param_grid']
else:
param_grid = self._getParamGrid(self.G0, G1, self.altModel)
model = self._getModel(self.altModel, param_grid)
nested = self._isNested(self.altModel)
scores,params = self.score_nestedCV(G1, model, param_grid, self.altModel['effect'], nested)
if self.greater_is_better:
stat = 2.0*(scores - self.scores_null).mean()
else:
stat = 2.0*(self.scores_null-scores).mean()
test={
'pv':pv,
'stat':stat,
'scores':scores,
'scores0':self.scores_null,
'params':params,
'params0':self.params_null,
'type':type # is it OK to have an object here instead of a name?
}
return test
# the effect parameter should not be used here, but I dont have a better an idea for now
def score_nestedCV(self, G1, model, param_grid, effect, nested):
k_fold = cross_validation.KFold(n=self.Y.shape[0], n_folds=self.n_folds, indices=True)
i_fold=0
scores = sp.zeros(self.n_folds)
params = list()
for train, test in k_fold:
(trainData, trainY) = self._packData(G1, train, effect)
(testData, testY) = self._packData(G1, test, effect)
if nested:
clf = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs = self.n_jobs_grid,
cv=self.n_folds_params, scoring=self.scoring, verbose=self.verbose)
clf.fit(trainData, trainY.flatten())
params.append(clf.best_params_)
scores[i_fold] = clf.score(testData, testY.flatten(), method_scorer=False)
else:
model.fit(trainData, trainY.flatten())
scores[i_fold] = SCORERS[self.scoring](model, testData, testY.flatten())
i_fold+=1
return scores,params
def _packData(self, G1, indices2select, effect):
if effect == 'fixed':
if G1 is None and self.G0 is None:
data = self.X[self.data_permutation][indices2select]
elif G1 is None:
data = sp.column_stack((self.G0[self.data_permutation][indices2select],
self.X[self.data_permutation][indices2select]))
elif self.G0 is None:
data = sp.column_stack((G1[self.data_permutation][indices2select],
self.X[self.data_permutation][indices2select]))
else:
data = sp.column_stack((self.G0[self.data_permutation][indices2select],
G1[self.data_permutation][indices2select],
self.X[self.data_permutation][indices2select]))
elif effect == 'mixed':
X = self.X[self.data_permutation]
if self.G0 is not None:
G0 = self.G0[self.data_permutation]
if G1 is not None:
G1 = G1[self.data_permutation]
data = []
for i in range(len(indices2select)):
lis = [X[indices2select[i]]]
if G0 is not None:
lis.append( G0[indices2select[i]] )
if G1 is not None:
lis.append( G1[indices2select[i]] )
data.append( lis )
else:
assert False, 'Unkown effect type.'
return (data, self.Y[self.data_permutation][indices2select])
|
|
import hashlib
import operator
from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.utils import truncate_name
from django.db.models.fields.related import ManyToManyField
from django.db.transaction import atomic
from django.utils.encoding import force_bytes
from django.utils.log import getLogger
from django.utils.six.moves import reduce
from django.utils import six
logger = getLogger('django.db.backends.schema')
def _related_objects(old_field, new_field):
# Returns (old_relation, new_relation) tuples.
return zip(
old_field.model._meta.get_all_related_objects(),
new_field.model._meta.get_all_related_objects()
)
class BaseDatabaseSchemaEditor(object):
"""
This class (and its subclasses) are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
It is intended to eventually completely replace DatabaseCreation.
This class should be used by creating an instance for each set of schema
changes (e.g. a syncdb run, a migration file), and by first calling start(),
then the relevant actions, and then commit(). This is necessary to allow
things like circular foreign key references - FKs will only be created once
commit() is called.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_create_table_unique = "UNIQUE (%(columns)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)"
sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_fk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
sql_create_inline_fk = None
sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
def __init__(self, connection, collect_sql=False):
self.connection = connection
self.collect_sql = collect_sql
if self.collect_sql:
self.collected_sql = []
# State-managing methods
def __enter__(self):
self.deferred_sql = []
if self.connection.features.can_rollback_ddl:
self.atomic = atomic(self.connection.alias)
self.atomic.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
if self.connection.features.can_rollback_ddl:
self.atomic.__exit__(exc_type, exc_value, traceback)
# Core utility functions
def execute(self, sql, params=[]):
"""
Executes the given SQL statement, with optional parameters.
"""
# Log the command we're running, then run it
logger.debug("%s; (params %r)" % (sql, params))
if self.collect_sql:
ending = "" if sql.endswith(";") else ";"
if params is not None:
self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending)
else:
self.collected_sql.append(sql + ending)
else:
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
# Field <-> database mapping functions
def column_sql(self, model, field, include_default=False):
"""
Takes a field and returns its column definition.
The field must already have had set_attributes_from_name called.
"""
# Get the column's type and use that as the basis of the SQL
db_params = field.db_parameters(connection=self.connection)
sql = db_params['type']
params = []
# Check for fields that aren't actually columns (e.g. M2M)
if sql is None:
return None, None
# Work out nullability
null = field.null
# If we were told to include a default value, do so
default_value = self.effective_default(field)
include_default = include_default and not self.skip_default(field)
if include_default and default_value is not None:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
sql += " DEFAULT %s" % self.prepare_default(default_value)
else:
sql += " DEFAULT %s"
params += [default_value]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (field.empty_strings_allowed and not field.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if null and not self.connection.features.implied_column_null:
sql += " NULL"
elif not null:
sql += " NOT NULL"
# Primary key/unique outputs
if field.primary_key:
sql += " PRIMARY KEY"
elif field.unique:
sql += " UNIQUE"
# Optionally add the tablespace if it's an implicitly indexed column
tablespace = field.db_tablespace or model._meta.db_tablespace
if tablespace and self.connection.features.supports_tablespaces and field.unique:
sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True)
# Return the sql
return sql, params
def skip_default(self, field):
"""
Some backends don't accept default values for certain columns types
(i.e. MySQL longtext and longblob).
"""
return False
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError('subclasses of BaseDatabaseSchemaEditor for backends which have requires_literal_defaults must provide a prepare_default() method')
def effective_default(self, field):
"""
Returns a field's effective database default value
"""
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
if field.get_internal_type() == "BinaryField":
default = six.binary_type()
else:
default = six.text_type()
else:
default = None
# If it's a callable, call it
if six.callable(default):
default = default()
# Run it through the field's get_db_prep_save method so we can send it
# to the database.
default = field.get_db_prep_save(default, self.connection)
return default
def quote_value(self, value):
"""
Returns a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
so this is safe).
"""
raise NotImplementedError()
# Actions
def create_model(self, model):
"""
Takes a model and creates a table for it in the database.
Will also create any accompanying indexes or unique constraints.
"""
# Create column SQL, add FK deferreds if needed
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Autoincrement SQL (for backends with inline variant)
col_type_suffix = field.db_type_suffix(connection=self.connection)
if col_type_suffix:
definition += " %s" % col_type_suffix
params.extend(extra_params)
# FK
if field.rel and field.db_constraint:
to_table = field.rel.to._meta.db_table
to_column = field.rel.to._meta.get_field(field.rel.field_name).column
if self.connection.features.supports_foreign_keys:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
elif self.sql_create_inline_fk:
definition += " " + self.sql_create_inline_fk % {
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
# Add the SQL to our big list
column_sqls.append("%s %s" % (
self.quote_name(field.column),
definition,
))
# Autoincrement SQL (for backends with post table definition variant)
if field.get_internal_type() == "AutoField":
autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
# Add any unique_togethers
for fields in model._meta.unique_together:
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
column_sqls.append(self.sql_create_table_unique % {
"columns": ", ".join(self.quote_name(column) for column in columns),
})
# Make the table
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
"definition": ", ".join(column_sqls)
}
if model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
if tablespace_sql:
sql += ' ' + tablespace_sql
self.execute(sql, params)
# Add any field index and index_together's (deferred as SQLite3 _remake_table needs it)
self.deferred_sql.extend(self._model_indexes_sql(model))
# Make M2M tables
for field in model._meta.local_many_to_many:
if field.rel.through._meta.auto_created:
self.create_model(field.rel.through)
def delete_model(self, model):
"""
Deletes a model from the database.
"""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.rel.through._meta.auto_created:
self.delete_model(field.rel.through)
# Delete the table
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_unique_together)
news = set(tuple(fields) for fields in new_unique_together)
# Deleted uniques
for fields in olds.difference(news):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
constraint_names = self._constraint_names(model, columns, unique=True)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(self._delete_constraint_sql(self.sql_delete_unique, model, constraint_names[0]))
# Created uniques
for fields in news.difference(olds):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
self.execute(self._create_unique_sql(model, columns))
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_index_together)
news = set(tuple(fields) for fields in new_index_together)
# Deleted indexes
for fields in olds.difference(news):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
constraint_names = self._constraint_names(model, list(columns), index=True)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(self._delete_constraint_sql(self.sql_delete_index, model, constraint_names[0]))
# Created indexes
for field_names in news.difference(olds):
fields = [model._meta.get_field_by_name(field)[0] for field in field_names]
self.execute(self._create_index_sql(model, fields, suffix="_idx"))
def alter_db_table(self, model, old_db_table, new_db_table):
"""
Renames the table a model points to.
"""
if old_db_table == new_db_table:
return
self.execute(self.sql_rename_table % {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
})
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""
Moves a model's table between tablespaces
"""
self.execute(self.sql_retablespace_table % {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if ((isinstance(field, ManyToManyField) or field.get_internal_type() == 'ManyToManyField') and
field.rel.through._meta.auto_created):
return self.create_model(field.rel.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if not self.skip_default(field) and field.default is not None:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(field.column),
}
}
self.execute(sql)
# Add an index, if required
if field.db_index and not field.unique:
self.deferred_sql.append(self._create_index_sql(model, [field]))
# Add any FK constraints later
if field.rel and self.connection.features.supports_foreign_keys and field.db_constraint:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if ((isinstance(field, ManyToManyField) or field.get_internal_type() == 'ManyToManyField') and
field.rel.through._meta.auto_created):
return self.delete_model(field.rel.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
# Drop any FK constraints, MySQL requires explicit deletion
if field.rel:
fk_names = self._constraint_names(model, [field.column], foreign_key=True)
for fk_name in fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allows a field's type, uniqueness, nullability, default, column,
constraints etc. to be modified.
Requires a copy of the old field as well so we can only perform
changes that are required.
If strict is true, raises errors if the old column does not match old_field precisely.
"""
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if ((old_type is None and old_field.rel is None) or
(new_type is None and new_field.rel is None)):
raise ValueError(
"Cannot alter field %s into %s - they do not properly define "
"db_type (are you using PostGIS 1.5 or badly-written custom "
"fields?)" % (old_field, new_field)
)
elif old_type is None and new_type is None and (
old_field.rel.through and new_field.rel.through and
old_field.rel.through._meta.auto_created and
new_field.rel.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None and new_type is None and (old_field.rel.through and new_field.rel.through and not old_field.rel.through._meta.auto_created and not new_field.rel.through._meta.auto_created):
# Both sides have through models; this is a no-op.
return
elif old_type is None or new_type is None:
raise ValueError("Cannot alter field %s into %s - they are not compatible types (you cannot alter to or from M2M fields, or add or remove through= on M2M fields)" % (
old_field,
new_field,
))
self._alter_field(model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict)
def _alter_field(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False):
"""Actually perform a "physical" (non-ManyToMany) field update."""
# Drop any FK constraints, we'll remake them later
fks_dropped = set()
if old_field.rel and old_field.db_constraint:
fk_names = self._constraint_names(model, [old_field.column], foreign_key=True)
if strict and len(fk_names) != 1:
raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % (
len(fk_names),
model._meta.db_table,
old_field.column,
))
for fk_name in fk_names:
fks_dropped.add((old_field.column,))
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Has unique been removed?
if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)):
# Find the unique constraint for this field
constraint_names = self._constraint_names(model, [old_field.column], unique=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_unique, model, constraint_name))
# Drop incoming FK constraints if we're a primary key and things are going
# to change.
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for _old_rel, new_rel in _related_objects(old_field, new_field):
rel_fk_names = self._constraint_names(
new_rel.model, [new_rel.field.column], foreign_key=True
)
for fk_name in rel_fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, new_rel.model, fk_name))
# Removed an index? (no strict check, as multiple indexes are possible)
if (old_field.db_index and not new_field.db_index and
not old_field.unique and not
(not new_field.unique and old_field.unique)):
# Find the index for this field
index_names = self._constraint_names(model, [old_field.column], index=True)
for index_name in index_names:
self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name))
# Change check constraints?
if old_db_params['check'] != new_db_params['check'] and old_db_params['check']:
constraint_names = self._constraint_names(model, [old_field.column], check=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_check, model, constraint_name))
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type))
# Next, start accumulating actions to do
actions = []
null_actions = []
post_actions = []
# Type change?
if old_type != new_type:
fragment, other_actions = self._alter_column_type_sql(
model._meta.db_table, old_field, new_field, new_type
)
actions.append(fragment)
post_actions.extend(other_actions)
# When changing a column NULL constraint to NOT NULL with a given
# default value, we need to perform 4 steps:
# 1. Add a default for new incoming writes
# 2. Update existing NULL rows with new default
# 3. Replace NULL constraint with NOT NULL
# 4. Drop the default again.
# Default change?
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
needs_database_default = (
old_default != new_default and
new_default is not None and
not self.skip_default(new_field)
)
if needs_database_default:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": self.prepare_default(new_default),
},
[],
))
else:
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
))
# Nullability change?
if old_field.null != new_field.null:
if new_field.null:
null_actions.append((
self.sql_alter_column_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
else:
null_actions.append((
self.sql_alter_column_not_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
# Only if we have a default and there is a change from NULL to NOT NULL
four_way_default_alteration = (
new_field.has_default() and
(old_field.null and not new_field.null)
)
if actions or null_actions:
if not four_way_default_alteration:
# If we don't have to do a 4-way default alteration we can
# directly run a (NOT) NULL alteration
actions = actions + null_actions
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters and actions:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), reduce(operator.add, params))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if four_way_default_alteration:
# Update existing rows with default value
self.execute(
self.sql_update_with_default % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
)
# Since we didn't run a NOT NULL change before we need to do it
# now
for sql, params in null_actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if post_actions:
for sql, params in post_actions:
self.execute(sql, params)
# Added a unique?
if not old_field.unique and new_field.unique:
self.execute(self._create_unique_sql(model, [new_field.column]))
# Added an index?
if (not old_field.db_index and new_field.db_index and
not new_field.unique and not
(not old_field.unique and new_field.unique)):
self.execute(self._create_index_sql(model, [new_field], suffix="_uniq"))
# Type alteration on primary key? Then we need to alter the column
# referring to us.
rels_to_update = []
if old_field.primary_key and new_field.primary_key and old_type != new_type:
rels_to_update.extend(_related_objects(old_field, new_field))
# Changed to become primary key?
# Note that we don't detect unsetting of a PK, as we assume another field
# will always come along and replace it.
if not old_field.primary_key and new_field.primary_key:
# First, drop the old PK
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of PK constraints for %s" % (
len(constraint_names),
model._meta.db_table,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_pk, model, constraint_name))
# Make the new one
self.execute(
self.sql_create_pk % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_pk")),
"columns": self.quote_name(new_field.column),
}
)
# Update all referencing columns
rels_to_update.extend(_related_objects(old_field, new_field))
# Handle our type alters on the other end of rels from the PK stuff above
for old_rel, new_rel in rels_to_update:
rel_db_params = new_rel.field.db_parameters(connection=self.connection)
rel_type = rel_db_params['type']
fragment, other_actions = self._alter_column_type_sql(
new_rel.model._meta.db_table, old_rel.field, new_rel.field, rel_type
)
self.execute(
self.sql_alter_column % {
"table": self.quote_name(new_rel.model._meta.db_table),
"changes": fragment[0],
},
fragment[1],
)
for sql, params in other_actions:
self.execute(sql, params)
# Does it have a foreign key?
if (new_field.rel and
(fks_dropped or not old_field.rel or not old_field.db_constraint) and
new_field.db_constraint):
self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s"))
# Rebuild FKs that pointed to us if we previously had to drop them
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.get_all_related_objects():
self.execute(self._create_fk_sql(rel.model, rel.field, "_fk"))
# Does it have check constraints we need to add?
if old_db_params['check'] != new_db_params['check'] and new_db_params['check']:
self.execute(
self.sql_create_check % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_check")),
"column": self.quote_name(new_field.column),
"check": new_db_params['check'],
}
)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if needs_database_default:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(new_field.column),
}
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
"""
Hook to specialize column type alteration for different backends,
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Should return two things; an SQL fragment of (sql, params) to insert
into an ALTER TABLE statement, and a list of extra (sql, params) tuples
to run once the field is altered.
"""
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
),
[],
)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
# Rename the through table
if old_field.rel.through._meta.db_table != new_field.rel.through._meta.db_table:
self.alter_db_table(old_field.rel.through, old_field.rel.through._meta.db_table, new_field.rel.through._meta.db_table)
# Repoint the FK to the other side
self.alter_field(
new_field.rel.through,
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.rel.through._meta.get_field_by_name(old_field.m2m_reverse_field_name())[0],
new_field.rel.through._meta.get_field_by_name(new_field.m2m_reverse_field_name())[0],
)
self.alter_field(
new_field.rel.through,
# for self-referential models we need to alter field from the other end too
old_field.rel.through._meta.get_field_by_name(old_field.m2m_field_name())[0],
new_field.rel.through._meta.get_field_by_name(new_field.m2m_field_name())[0],
)
def _create_index_name(self, model, column_names, suffix=""):
"""
Generates a unique name for an index/unique constraint.
"""
# If there is just one column in the index, use a default algorithm from Django
if len(column_names) == 1 and not suffix:
return truncate_name(
'%s_%s' % (model._meta.db_table, BaseDatabaseCreation._digest(column_names[0])),
self.connection.ops.max_name_length()
)
# Else generate the name for the index using a different algorithm
table_name = model._meta.db_table.replace('"', '').replace('.', '_')
index_unique_name = '_%x' % abs(hash((table_name, ','.join(column_names))))
max_length = self.connection.ops.max_name_length() or 200
# If the index name is too long, truncate it
index_name = ('%s_%s%s%s' % (table_name, column_names[0], index_unique_name, suffix)).replace('"', '').replace('.', '_')
if len(index_name) > max_length:
part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix))
index_name = '%s%s' % (table_name[:(max_length - len(part))], part)
# It shouldn't start with an underscore (Oracle hates this)
if index_name[0] == "_":
index_name = index_name[1:]
# If it's STILL too long, just hash it down
if len(index_name) > max_length:
index_name = hashlib.md5(force_bytes(index_name)).hexdigest()[:max_length]
# It can't start with a number on Oracle, so prepend D if we need to
if index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _create_index_sql(self, model, fields, suffix="", sql=None):
"""
Return the SQL statement to create the index for one or several fields.
`sql` can be specified if the syntax differs from the standard (GIS
indexes, ...).
"""
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
columns = [field.column for field in fields]
sql_create_index = sql or self.sql_create_index
return sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix=suffix)),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": tablespace_sql,
}
def _model_indexes_sql(self, model):
"""
Return all index SQL statements (field indexes, index_together) for the
specified model, as a list.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for field in model._meta.local_fields:
if field.db_index and not field.unique:
output.append(self._create_index_sql(model, [field], suffix=""))
for field_names in model._meta.index_together:
fields = [model._meta.get_field_by_name(field)[0] for field in field_names]
output.append(self._create_index_sql(model, fields, suffix="_idx"))
return output
def _rename_field_sql(self, table, old_field, new_field, new_type):
return self.sql_rename_column % {
"table": self.quote_name(table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
}
def _create_fk_sql(self, model, field, suffix):
from_table = model._meta.db_table
from_column = field.column
to_table = field.related_field.model._meta.db_table
to_column = field.related_field.column
suffix = suffix % {
"to_table": to_table,
"to_column": to_column,
}
return self.sql_create_fk % {
"table": self.quote_name(from_table),
"name": self.quote_name(self._create_index_name(model, [from_column], suffix=suffix)),
"column": self.quote_name(from_column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
def _create_unique_sql(self, model, columns):
return self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix="_uniq")),
"columns": ", ".join(self.quote_name(column) for column in columns),
}
def _delete_constraint_sql(self, template, model, name):
return template % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(name),
}
def _constraint_names(self, model, column_names=None, unique=None,
primary_key=None, index=None, foreign_key=None,
check=None):
"""
Returns all constraint names matching the columns and conditions
"""
column_names = list(column_names) if column_names else None
with self.connection.cursor() as cursor:
constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict['columns']:
if unique is not None and infodict['unique'] != unique:
continue
if primary_key is not None and infodict['primary_key'] != primary_key:
continue
if index is not None and infodict['index'] != index:
continue
if check is not None and infodict['check'] != check:
continue
if foreign_key is not None and not infodict['foreign_key']:
continue
result.append(name)
return result
|
|
# -*- coding: utf-8 -*-
def talk(data):
#ear.startListening() #fix onclick micro
if data:
if data!="":
if data[0:2]=="l ":
data=data.replace("l ", "l'")
data=data.replace(" l ", " l'")
mouth.speak(unicode(data,'utf-8'))
if IsInmoovArduino==1 and MoveEyesRandom==1:
if random.randint(1,3)==1:
i01.head.eyeX.moveTo(0)
sleep(2)
i01.head.eyeX.moveTo(180)
sleep(1)
i01.head.eyeX.moveTo(90)
def talkBlocking(data):
if data!="":
mouth.speakBlocking(unicode(data,'utf-8'))
def Parse(utfdata):
#Light(1,1,0)
utfdata = urllib2.urlopen(utfdata).read()
utfdata = utfdata.replace("'", "'").replace("http://fr.answers.yahoo.com/question/ind...", "")
try:
utfdata = utfdata.decode( "utf8" ).replace(" : ", random.choice(troat))
except:
pass
#print utfdata
#Light(1,1,1)
return utfdata;
##############################################################
# We listen when the robot is starting to speak to avoid ear listening
# If you click on the webkit mic icon, this trick is broken
##############################################################
def onEndSpeaking(text):
global IcanStartToEar
global MoveHeadRandom
global RamdomSpeak
global Ispeak
global TimeNoSpeak
MoveHeadTimer.stopClock()
Ispeak=0
VieAleatoire.startClock()
TimeNoSpeak="OFF"
#Light(0,0,0)
MoveHeadRandom=1
if IcanStartToEar==1:
try:
ear.resumeListening()
except:
pass
WebkitSpeachReconitionFix.startClock()
IcanStartToEar=1
RamdomSpeak=0
#sleep(0.2)
def onStartSpeaking(text):
global RamdomSpeak
global MoveHeadRandom
global Ispeak
if DEBUG==1:
print "dbg : RamdomSpeak:",RamdomSpeak
global RobotIsSleepingSoft
global IcanEarOnlyKnowsWords
if RamdomSpeak==0:
if RobotIsSleepingSoft==1:
if IsInmoovArduino==1:
HeadSide.attach()
i01.head.attach()
RobotIsSleepingSoft=0
PaupiereAttach(1)
sleep(0.1)
PositionPaupiere(180,180,0.3)
sleep(3)
clockPaupiere.startClock()
IcanEarOnlyKnowsWords=-1
Ispeak=1
WebkitSpeachReconitionFix.stopClock()
if 'non' in text: # or 'no' in text HARD CODED LANGUAGE CLEAN LATER
No('no')
MoveHeadRandom=0
#print("no detected")
if 'oui' in text or 'yes' in text or '#LAUGH01#' in text:
Yes('yes')
#print("yes detected")
MoveHeadRandom=0
if MoveHeadRandom==1:
MoveHeadTimer.startClock()
try:
ear.pauseListening()
except:
pass
global TimeNoSpeak
TimeNoSpeak="OFF"
VieAleatoire.stopClock()
#Light(1,1,1)
##############################################################
#We intercept what the robot is listen to change some values
#here we replace ' by space because AIML doesn't like '
##############################################################
def onRecognized(text):
if DEBUG==1:
print "onRecognized : ",text
#print text.replace("'", " ")
global Ispeak
global ParrotMod
if Ispeak==0:
if ParrotMod==0:
chatBot.getResponse(text.replace("'", " "))
else:
chatBot.getResponse("SAY " + text)
#we close pictures
image.exitFS()
image.closeAll()
def Light(ROUGE_V,VERT_V,BLEU_V):
if IhaveLights==1 and IsInmoovArduino==1:
right.digitalWrite(ROUGE,ROUGE_V)
right.digitalWrite(VERT,VERT_V)
right.digitalWrite(BLEU,BLEU_V)
##############################################################
# Cette fonction permet d'afficher une date personnalis(mardi, le 10 juin, 1975, 12h38 .....)
##############################################################
def getDate(query, ID):
answer = ( wdf.getTime(query,ID,"day") +" " +wdf.getTime(query,ID,"month") + " " + wdf.getTime(query,ID,"year"))
#print " La date est : " + answer
chatBot.getResponse("say Le " + answer)
def DisplayPic(pic):
r=0
try:
r=image.displayFullScreen(pic,1)
except:
chatBot.getResponse("PICTUREPROBLEM")
pass
time.sleep(0.1)
try:
r=image.displayFullScreen(pic,1)
except:
pass
def UpdateBotName(botname):
if str(chatBot.getPredicate("default","bot_id"))=="unknown":
bot_id=hashlib.md5(str(time.time()).encode('utf-8')).hexdigest()
else:
bot_id=str(chatBot.getPredicate("default","bot_id"))
RetourServer=Parse("http://www.myai.cloud/shared_memory.php?action=UpdateBotName&bot_id="+urllib2.quote(bot_id)+"&botname="+urllib2.quote(botname.replace("'", " ")))
#print "http://www.myai.cloud/shared_memory.php?action=UpdateBotName&bot_id="+urllib2.quote(bot_id)+"&botname="+urllib2.quote(botname.replace("'", " "))
chatBot.setPredicate("default","bot_id",bot_id)
chatBot.setPredicate("default","botname",botname)
chatBot.savePredicates()
def CheckVersion():
RetourServer=Parse("http://www.myai.cloud/version.html")
#print str(RetourServer)+' '+str(version)
if str(RetourServer)==str(version):
print "software is OK"
#chatBot.getResponse("IAMUPDATED")
else:
chatBot.getResponse("INEEDUPDATE")
sleep(3)
def PlayUtub(q,num):
if q=="stop" and num==0:
subprocess.Popen("taskkill /F /T /PID %i"%proc1.pid , shell=True)
sleep(2)
webgui.startBrowser("http://localhost:8888/#/service/i01.ear")
else:
webgui.startBrowser("http://www.myai.cloud/utub/?num="+str(num)+"&q="+str(q).encode('utf-8'))
#print "http://www.myai.cloud/utub/?num="+str(num)+"&q="+str(q).encode('utf-8')
def ShutDown():
talkBlocking("Extinction")
MoveHeadRandom=0
sleep(1)
if IsInmoovArduino==1:
i01.setHeadSpeed(RotHeadSpeed+0.1, NeckSpeed+0.1)
i01.moveHead(90,90)
HeadSide.moveTo(90)
clockPaupiere.stopClock()
sleep(0.2)
PositionPaupiere(0,0,0.5)
sleep(5)
PaupiereServoGauche.detach()
HeadSide.detach()
i01.detach()
sleep(1)
runtime.exit()
def IdontUnderstand():
global IcanEarOnlyKnowsWords
if IcanEarOnlyKnowsWords<=0:
chatBot.getResponse("IDONTUNDERSTAND")
else:
print "robot doesnt understand"
#runtime.shutdown()
|
|
#!/usr/bin/python
"""
Contains the following classes:
* LocationWrap
* LocationHits
* LocationHitsContainer
* GeoJSONer
* Geocoder
* Geolocator
* LatLng
This file converts a given list of locations into geojson.
At a high-level, it does the following:
* Retrieves lat/lng coordinates for each given location from
geonames db
* Applies weights (if weights is on)
* Creates a geojson object from locations
"""
import geojson
import re
from ast import literal_eval
from app.models import Location
from app.weighter import Weightifier
ADMIN_FEATURE_CLASS = 'A'
ADMIN_FEATURE_CODES = [
'ADM1', # admin1
'ADM2', # admin2
'ADM3', # admin3
'ADM4' # admin4
]
POPULATED_PLACE_FEATURE_CLASS = 'P'
POPULATED_PLACE_FEATURE_CODES = [
'PPLA', # admin1
'PPLA2', # admin2
'PPLA3', # admin3
'PPLA4' # admin4
]
class LocationWrap(object):
"""
Wrapper for an app.models.Location object
Adds needed weight and admin names attributes
Used by geolocator and weighter
"""
def __init__(self, location, weight=0, adminnames=None):
self.location = location
self._weight = weight
self.adminnames = adminnames
def name(self):
"""
:returns: 'name' of wrapped location
"""
return self.location.name
def geonameid(self):
"""
:returns: 'geonameid' of wrapped location
"""
return self.location.geonameid
def admin1name(self):
"""
:returns: 'admin1name' of wrapped location
"""
name = None
if self.adminnames:
name = self.adminnames.admin1name
return name
def admin2name(self):
"""
:returns: 'admin2name' of wrapped location
"""
name = None
if self.adminnames:
name = self.adminnames.admin2name
return name
def admin3name(self):
"""
:returns: 'admin3name' of wrapped location
"""
name = None
if self.adminnames:
name = self.adminnames.admin3name
return name
def admin4name(self):
"""
:returns: 'admin4name' of wrapped location
"""
name = None
if self.adminnames:
name = self.adminnames.admin4name
return name
def countryname(self):
"""
:returns: 'countryname' of wrapped location
"""
name = None
if self.adminnames:
name = self.adminnames.countryname
return name
def latitude(self):
"""
:returns: 'latitude' of wrapped location
"""
return self.location.latitude
def longitude(self):
"""
:returns: 'longitude' of wrapped location
"""
return self.location.longitude
def weight(self):
"""
:returns: weight value of location
"""
return self._weight
def set_adminnames(self, location_admin_names):
"""
:param app.weighter.LocationAdminNames location_admin_names:
admin names
:returns: None
"""
self.adminnames = location_admin_names
return
def index_of_admin_name(self, admin_name):
"""
:param str admin_name: a name of a Location
:returns: the index of the admin name that matches admin_name;
otherwise -1
"""
adminNum = -1
if self.admin4name() == admin_name:
adminNum = 4
elif self.admin3name() == admin_name:
adminNum = 3
elif self.admin2name() == admin_name:
adminNum = 2
elif self.admin1name() == admin_name:
adminNum = 1
elif self.countryname() == admin_name:
adminNum = 0
return adminNum
def increment_weight_on_match(self, location_name):
"""
If location_name matches any of this location's admin names,
then weight += 1
:param str location_name: name of a location
:returns: bool -- True if match; otherwise False
"""
matched = False
if self.admin1name() == location_name:
self._weight += 1
matched = True
if self.admin2name() == location_name:
self._weight += 1
matched = True
if self.admin3name() == location_name:
self._weight += 1
matched = True
if self.admin4name() == location_name:
self._weight += 1
matched = True
if self.countryname() == location_name:
self._weight += 1
matched = True
return matched
def names_list(self):
"""
Returns location's name and all admin names as one list
EDIT: Returns only admin names
:returns: list of names
"""
names = []
if self.adminnames:
try:
names.extend(self.adminnames.list())
except AttributeError:
pass
return names
def __eq__(self, other):
"""
Compares two LocationWraps
:param LocationWrap other: other LocationWrap
:returns: True if equal; otherwise False
"""
return (isinstance(other, LocationWrap) and
self.location == other.location and
self._weight == other._weight and
self.adminnames == other.adminnames)
def __repr__(self):
return "<LocationWrap(location=%s, weight=%s)" % (
str(self.location.name), str(self._weight))
class LocationHits(object):
"""
A wrapper for a list of app.geolocator.LocationWrap objects
This is used as a container to store all hits for a specific Location
from the geonames db.
For Example:
* Location = 'Phoenix'
* Geonames returns 15 Phoenixes
* All Phoenixes will be put into a LocationHits object
Also serves as an iterator
"""
def __init__(self, name, locations):
self.index = -1
self.name = name
self.locations = locations
def __iter__(self):
"""
This makes the LocationHits class an iterator
"""
self.index = -1
return self
def next(self):
"""
Called when LocationHits is used as the iterator of a for loop
For example:
` for hit in LocationHits:
` # calls LocationHits.next each iterator
:returns: the location at self.index or None if iteration is complete
"""
if self.index >= len(self.locations)-1:
raise StopIteration
else:
self.index += 1
return self.locations[self.index]
def increment_weight_on_match(self, location_name):
"""
Checks each location to see if any of its admin names matches
location_name. If it does, then it increments its weight.
:param str location_name: name of a location
:returns: list of LocationWraps that match location_name
"""
matched_locations = []
for l in self.locations:
matched = l.increment_weight_on_match(location_name)
if matched:
matched_locations.append(l)
return matched_locations
def max_weight(self):
"""
Returns the max weight value within all locations
:returns: int
"""
weights = list()
for wrap in self.locations:
weights.append(wrap.weight())
if len(weights) > 0:
return max(weights)
else:
return -1
def __len__(self):
"""
length == number of locations within LocationHits object
:returns: int
"""
length = 0
if self.locations:
length = len(self.locations)
return length
def __eq__(self, other):
"""
Compares two LocationHits
:param LocationHits other: other LocationHits
:returns: True if equal; otherwise False
"""
return (isinstance(other, LocationHits) and
self.name == other.name and
self.locations == other.locations)
def __repr__(self):
return "<LocationHits(len(locations)=%s, locations=%s)" % (
str(len(self.locations)), str(self.locations))
class LocationHitsContainer(object):
"""
A container for a one or more app.geolocator.LocationHits objects
"""
def __init__(self):
self.hits = []
def append(self, location_hits):
"""
Appends the given LocationHits object to the hits list
:param app.geolocator.LocationHits location_hits: object to append
:returns: None
"""
self.hits.append(location_hits)
def increment_weight_on_match(self, location_admin_names):
"""
Checks each location to see if it or any of its admin names matches
location_name. If it does, then it increments its weight.
:param app.weighter.LocationAdminNames location_admin_names: admin
names of a location
:returns: None
"""
for hits in self.hits:
hits.increment_weight_on_match(location_admin_names)
return
def __len__(self):
"""
Returns total number of contained locations
NOTE: does NOT return number of LocationHits
:returns: int
"""
length = 0
for h in self.hits:
length += len(h)
return length
def __eq__(self, other):
"""
Compares two LocationHitsContainer objects
:param LocationHitsContainer other: other LocationHitsContainer
:returns: True if equal; otherwise False
"""
return (isinstance(other, LocationHitsContainer) and
self.hits == other.hits)
def __repr__(self):
return "<LocationHitsContainer(len(hits)=%s)>" % (str(len(self.hits)))
class GeoJSONer(object):
"""
Responsible for geojson creation and manipulation.
"""
def __init__(self):
self.features = []
return
def _convert_to_feature(self, location):
"""
Converts the given LocationWrap object to a geojson.Feature
object
:param app.geolocator.LocationWrap location: object to convert
:returns: geojson.Feature
"""
geometry = {
'type': 'Point',
'coordinates': [
location.latitude(),
location.longitude()
]
}
properties = {
'weight': location.weight(),
'name': location.name(),
'countryname': location.countryname(),
'admin1name': location.admin1name(),
'admin2name': location.admin2name(),
'admin3name': location.admin3name(),
'admin4name': location.admin4name(),
}
feature = geojson.Feature(location.name(), geometry, properties)
return feature
def append(self, location):
"""
Converts the given LocationWrap object to a geojson.Feature object and
appends it to the feature list
:param app.geolocator.LocationWrap location: object to append
:returns: None
"""
feature = self._convert_to_feature(location)
self.features.append(feature)
return
def geojson(self):
"""
Returns the features array as a geojson.FeatureCollection
"""
return geojson.FeatureCollection(self.features)
def __repr__(self):
return "<GeoJSONer()>"
class Geocoder(object):
"""
Used to find coordinates of tagged locations
"""
FT = 'P.PPL'
"""feature type denoting a populated place (doesn't quite work)"""
def __init__(self):
return
def _wrap_location(self, location):
"""
Converts the given Location object to a LocationWrap
:param app.models.Location location: location object to convert
:returns: app.geolocator.LocationWrap
"""
return LocationWrap(location)
def geocode(self, location):
"""
Queries the geonames database and retrieves all matching locations
:param str location: location name to query for
:returns: app.geolocator.LocationHits object
"""
matches = Location.query.filter_by(
name=location).order_by('id').all()
matches = map(self._wrap_location, matches)
return LocationHits(location, matches)
def __repr__(self):
return "<Geocoder()>"
class Geolocator(object):
"""
Master geolocation class
Uses Geocoder and GeoJSONer and Weightifier to find coordinates for and
apply weights to all tagged locations.
"""
def __init__(self):
self.geocoder = Geocoder()
self.geojsoner = GeoJSONer()
self.weightifier = Weightifier()
return
def _build_container(self, locations):
"""
Builds a LocationHitsContainer from the given locations
:param list locations: list of app.models.Location objects to geolocate
:returns: LocationHitsContainer
"""
container = LocationHitsContainer()
for l in locations:
container.append(self.geocoder.geocode(l))
return container
def _apply_weights(self, container, weights, accuracy):
"""
Uses the Weightifier to apply weights to the container
:param LocationHitsContainer container: container of locations
:param bool weights: flag indicating if weights should be calculated or
not
:param int accuracy: level of accuracy to use when calculating weights
(must be greater than 0 and less than or equal to 5)
:returns: modified container
"""
if weights:
if accuracy > 5:
accuracy = 5
container = self.weightifier.gather_all_names(container, accuracy)
container = self.weightifier.weightify(container)
return container
def _build_geojson(self, container):
"""
Iterates through locations in container and builds GeoJSON file
:param LocationHitsContainer container: container of locations
:returns: None
"""
for hits in container.hits:
for l in hits:
self.geojsoner.append(l)
return
def geolocate(self, locations, weights=True, accuracy=1):
"""
Given a list of tagged locations from the NLP tagger, this will convert
each location to a app.geolocator.LocationWrap, find the coordinates of
each, apply weighting, and convert to geojson
:param list locations: list of app.models.Location objects to geolocate
:param bool weights: flag indicating if weights should be calculated or
not (defaults to True)
:param int accuracy: level of accuracy to use when calculating weights
(defaults to 1) (must be greater than 0 and less than or equal to 5)
* 1 - weights will be found for all matches to countrycode
* 2 - weights will be found for all matches to the above and
admin1code
* 3 - weights will be found for all matches to the above and
admin2code
* 4 - weights will be found for all matches to the above and
admin3code
* 5 - weights will be found for all matches to the above and
admin4code
:returns: None
"""
# build the container
container = self._build_container(locations)
# apply weights
container = self._apply_weights(container, weights, accuracy)
# build the geojson
self._build_geojson(container)
return
def geojson(self):
"""
Returns the geojson of all geolocated locations
:returns: geojson.FeatureCollection
"""
return self.geojsoner.geojson()
def __repr__(self):
return "<Geolocator()>"
class LatLng():
"""
A small container class that represents a Latitude/Longitude coordinate
pair
"""
def __init__(self, identity, lat, lng):
self.identity = identity
self.lat = lat
self.lng = lng
def __repr__(self):
return "<LatLng(identity=%s, lat=%s, lng=%s)>" % (
str(self.identity), str(self.lat), str(self.lng))
def RetrieveLatLngs(feature_collection):
"""
Retrieves all the LatLng coordinates from a given geojson object
By doing this with a regex, the operation of retrieving latlngs is
very flexible and we are not strictly limited to geojson of the
geojson library
"""
p = re.findall(
r"\[\-*\d+\.*\d*\,\s\-*\d+\.*\d*\]",
str(feature_collection))
coordinates_set = []
# go through p list
for n in p:
m = literal_eval(n)
coordinates_set.append(m)
# push n to
# now can access elements in coordinates_set as a set.
# print coordinates_set
latlngs = []
for i, n in enumerate(coordinates_set):
latlngs.append(LatLng(i, n[0], n[1]))
return latlngs
|
|
# -*- coding: utf-8 -*-
# Copyright 2016 Open Permissions Platform Coalition
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
Scopes
------
The following scopes are available:
- read
- write
- delegate
The "read" scope permits the client to read a protected resource.
This is the default scope.
The "write" scope permits a client to write to a protected resource.
Unilke "read", "write" must also include the identity of the
resource, within "[]" brackets, e.g. "write[1234]" allows writing to a
resource identified by 1234. Services may also be identied with it's registered
URL, e.g. "write[http://test.com]". The auth service verifies whether the
client is permitted to write to the resource before issuing an access
token.
The "delegate" scope is used to delegate writing to a resource, e.g.
the onboarding service accesses the repository on the client's behalf
To access the resource, the delegate will exchange the token for a new
"write" token.
The delegate scope has the form
delegate[<service id or url>]:write[<resource id or url>]
Where service id or url is the delegate service's ID or URL (e.g. the
onbooarding service URL), and the resource id or URL is the protected
resource's ID or URL (e.g. the repository ID).
The advantage of using "delegate" instead of "write" is that the token can
only be used by the specified delegate (assuming the delegate keeps their
credentials secure), and the delegate will only be able to write to the
specified resource.
"""
import re
from collections import defaultdict, namedtuple
from functools import partial
import couch
from perch import views, Repository, Service
from tornado.gen import coroutine, Return
from .exceptions import InvalidScope, Unauthorized
READ = 'read'
READ_REGEX = re.compile(r'^read\[(?P<resource_id>.+)\]$')
WRITE = 'write'
WRITE_REGEX = re.compile(r'^write\[(?P<resource_id>.+)\]$')
DELEGATE = 'delegate'
DELEGATE_REGEX = re.compile(r'^delegate\[(?P<delegate_id>.+)\]:(?P<delegated_action>read|write)\[(?P<resource_id>.+)\]$')
ACCESS_MAPPING = {
READ: 'r',
WRITE: 'w'
}
RESOURCE_TYPES = {
Repository.resource_type: Repository,
Service.resource_type: Service,
}
Access = namedtuple('Access', ['access', 'delegate_id'])
class Scope(object):
def __init__(self, scope):
self.scope = scope
# read is True if the scope is for reading any resource
self.read = False
try:
self._group()
except KeyError:
raise InvalidScope('Invalid action')
def __str__(self):
return self.scope
def __repr__(self):
return '<Scope: {}>'.format(self.scope)
def _group(self):
"""
Group scope string by actions and resources
Raises InvalidScope the scope is invalid
"""
self.resources = defaultdict(set)
self.delegates = defaultdict(set)
for x in self.scope.split():
if x.startswith(READ):
self._add_read(x)
elif x.startswith(WRITE):
self._add_write(x)
elif x.startswith(DELEGATE):
self._add_delegate(x)
else:
raise InvalidScope('Scope has missing elements')
def _add_read(self, scope):
"""Add 'read' scope to self.resources"""
access = ACCESS_MAPPING[READ]
matched = re.match(READ_REGEX, scope)
if not matched:
self.read = True
else:
resource_id = matched.group('resource_id')
self.resources[resource_id].add(Access(access, None))
def _add_write(self, scope):
"""Add 'write' scope to self.resources"""
access = ACCESS_MAPPING[WRITE]
matched = re.match(WRITE_REGEX, scope)
if not matched:
raise InvalidScope('Write scope requires a resource ID')
resource_id = matched.group('resource_id')
self.resources[resource_id].add(Access(access, None))
def _add_delegate(self, scope):
"""Add 'delegate' scope to self.delegates & self.resources"""
matched = re.match(DELEGATE_REGEX, scope)
if not matched:
raise InvalidScope('Invalid delegate scope')
resource_id = matched.group('resource_id')
delegate_id = matched.group('delegate_id')
access = ACCESS_MAPPING[matched.group('delegated_action')]
self.delegates[delegate_id].add(Access(access, None))
self.resources[resource_id].add(Access(access, delegate_id))
def within_scope(self, access, resource_id):
"""Is accessing the resource within this scope"""
if access in ('r', 'rw') and self.read is True:
return True
access_set = {Access(x, None) for x in access if x in 'rw'}
return bool(access_set & (self.resources[resource_id] | self.delegates[resource_id]))
@coroutine
def validate(self, client):
"""
Validate the requested OAuth2 scope
If a "write" or "delegate" scope is requested then also checks access
to the resource and delegate
:param scope: tornado.httputil.HTTPServerRequest
:param client: the client object. Used to check the client is
authorized for the requested scope
:param default_scope: the default scope if not included in the request
:raise:
InvalidScope: The scope is invalid
Unauthorized: The client is not authorized for the scope
"""
resource_func = partial(self._check_access_resource, client)
delegate_func = partial(self._check_access_delegate, client)
yield [self._check_access_resources(resource_func, self.resources),
self._check_access_resources(delegate_func, self.delegates)]
@coroutine
def _check_access_resources(self, func, resources):
"""Check resources exist and then call func for each resource"""
grouped = {'ids': {}, 'urls': {}}
for k, v in resources.items():
if k.startswith('http'):
grouped['urls'][k] = v
else:
grouped['ids'][k] = v
yield [self._check_access_resource_ids(func, grouped['ids']),
self._check_access_resource_urls(func, grouped['urls'])]
@coroutine
def _check_access_resource_ids(self, func, resources):
"""
Check resource identified by an ID exist and then call func for
each resource
"""
if not resources:
raise Return()
for resource_id in resources:
try:
doc = yield views.service_and_repository.first(key=resource_id)
except couch.NotFound:
raise InvalidScope('Scope contains an unknown resource ID')
resource = RESOURCE_TYPES[doc['value']['type']](**doc['value'])
try:
yield resource.get_parent()
except couch.NotFound:
raise InvalidScope('Invalid resource - missing parent')
func(resource, resources[resource_id])
@coroutine
def _check_access_resource_urls(self, func, resources):
"""
Check resource identified by an URL exist and then call func for each
resource
"""
for url in resources:
try:
resource = yield Service.get_by_location(url)
except couch.NotFound:
raise InvalidScope("Scope contains an unknown location: '{}'"
.format(url))
func(resource, resources[url])
def _concatenate_access(self, access):
"""Concatenate a resource's access"""
return ''.join(sorted(list({x.access for x in access})))
def _check_access_resource(self, client, resource, access):
"""Check the client has access to the resource"""
requested_access = self._concatenate_access(access)
has_access = client.authorized(requested_access, resource)
if not has_access:
raise Unauthorized(
"Client '{}' does not have '{}' access to '{}'"
.format(client.id, requested_access, resource.id))
def _check_access_delegate(self, client, delegate, access):
"""Check delegate is the correct type and check access"""
if delegate.type != Service.resource_type:
raise InvalidScope("Only services can be delegates. '{}' is a '{}'"
.format(delegate.id, delegate.type))
self._check_access_resource(client, delegate, access)
|
|
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import copy
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import socket
import sys
import traceback
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import importutils
import six
from six import moves
_PY26 = sys.version_info[0:2] == (2, 6)
from nova.openstack.common._i18n import _
from nova.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error.')
]
DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
'oslo.messaging=INFO', 'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN',
"keystonemiddleware=WARN", "routes.middleware=WARN",
"stevedore=WARN"]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=DEFAULT_LOG_LEVELS,
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message.'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message.'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
def list_opts():
"""Entry point for oslo.config-generator."""
return [(None, copy.deepcopy(common_cli_opts)),
(None, copy.deepcopy(logging_cli_opts)),
(None, copy.deepcopy(generic_log_opts)),
(None, copy.deepcopy(log_opts)),
]
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
def isEnabledFor(self, level):
if _PY26:
# This method was added in python 2.7 (and it does the exact
# same logic, so we need to do the exact same logic so that
# python 2.6 has this capability as well).
return self.logger.isEnabledFor(level)
else:
return super(BaseLoggerAdapter, self).isEnabledFor(level)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
if six.PY3:
# In Python 3, the code fails because the 'manager' attribute
# cannot be found when using a LoggerAdapter as the
# underlying logger. Work around this issue.
self._logger.manager = self._logger.logger.manager
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(msg, six.text_type):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except (moves.configparser.Error, KeyError) as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string=None,
default_log_levels=None):
# Just in case the caller is not setting the
# default_log_level. This is insurance because
# we introduced the default_log_level parameter
# later in a backwards in-compatible change
if default_log_levels is not None:
cfg.set_defaults(
log_opts,
default_log_levels=default_log_levels)
if logging_context_format_string is not None:
cfg.set_defaults(
log_opts,
logging_context_format_string=logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"oslo.messaging.notify.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
if CONF.use_syslog:
try:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
except socket.error:
log_root.error('Unable to add syslog handler. Verify that syslog '
'is running.')
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(record.msg, six.text_type):
record.msg = six.text_type(record.msg)
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = CONF.logging_context_format_string
else:
fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
fmt += " " + CONF.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
|
|
# Copyright (c) 2013-2015 University Corporation for Atmospheric Research/Unidata.
# Distributed under the terms of the MIT License.
# SPDX-License-Identifier: MIT
"""Support making data requests to the radar data query service (radar server) on a TDS.
This includes forming proper queries as well as parsing the returned catalog.
"""
from collections import namedtuple
import xml.etree.ElementTree as ET
from .catalog import TDSCatalog
from .http_util import BadQueryError, DataQuery, HTTPEndPoint, urljoin
class RadarQuery(DataQuery):
"""Represent a query to the THREDDS radar server.
Expands on the queries supported by :class:`~siphon.http_util.DataQuery` to add queries
specific to the radar data query service.
"""
def stations(self, *stns):
"""Specify one or more stations for the query.
This modifies the query in-place, but returns `self` so that multiple
queries can be chained together on one line.
This replaces any existing spatial queries that have been set.
Parameters
----------
stns : one or more strings
One or more names of variables to request
Returns
-------
self : RadarQuery
Returns self for chaining calls
"""
self._set_query(self.spatial_query, stn=stns)
return self
class RadarServer(HTTPEndPoint):
"""Wrap access to the THREDDS radar query service (radar server).
Simplifies access via HTTP to the radar server endpoint. Parses the metadata, provides
query catalog results download and parsing based on the appropriate query.
Attributes
----------
metadata : :class:`~siphon.metadata.TDSCatalogMetadata`
Contains the result of parsing the radar server endpoint's dataset.xml. This has
information about the time and space coverage, as well as full information
about all of the variables.
variables : set(str)
Names of all variables available in this dataset
stations : dict[str, Station]
Mapping of station ID to a :class:`Station`, which is a namedtuple containing the
station's id, name, latitude, longitude, and elevation.
"""
def __init__(self, url):
"""Create a RadarServer instance.
Parameters
----------
url : str
The base URL for the endpoint
"""
xmlfile = '/dataset.xml'
if url.endswith(xmlfile):
url = url[:-len(xmlfile)]
super(RadarServer, self).__init__(url)
def _get_metadata(self):
ds_cat = TDSCatalog(self.url_path('dataset.xml'))
self.metadata = ds_cat.metadata
self.variables = {k.split('/')[0] for k in self.metadata['variables'].keys()}
self._get_stations()
def _get_stations(self, station_file='stations.xml'):
resp = self.get_path(station_file)
self.stations = parse_station_table(ET.fromstring(resp.text))
def query(self):
"""Return a new query for the radar server.
Returns
-------
RadarQuery
The new query
"""
return RadarQuery()
def validate_query(self, query):
"""Validate a query.
Determines whether `query` is well-formed. This includes checking for all
required parameters, as well as checking parameters for valid values.
Parameters
----------
query : RadarQuery
The query to validate
Returns
-------
valid : bool
Whether `query` is valid.
"""
valid = True
# Make sure all stations are in the table
if 'stn' in query.spatial_query:
valid = valid and all(stid in self.stations
for stid in query.spatial_query['stn'])
if query.var:
valid = valid and all(var in self.variables for var in query.var)
return valid
def get_catalog(self, query):
"""Fetch a parsed THREDDS catalog from the radar server.
Requests a catalog of radar data files data from the radar server given the
parameters in `query` and returns a :class:`~siphon.catalog.TDSCatalog` instance.
Parameters
----------
query : RadarQuery
The parameters to send to the radar server
Returns
-------
catalog : TDSCatalog
The catalog of matching data files
Raises
------
:class:`~siphon.http_util.BadQueryError`
When the query cannot be handled by the server
See Also
--------
get_catalog_raw
"""
# TODO: Refactor TDSCatalog so we don't need two requests, or to do URL munging
try:
url = self._base[:-1] if self._base[-1] == '/' else self._base
url += '?' + str(query)
return TDSCatalog(url)
except ET.ParseError:
raise BadQueryError(self.get_catalog_raw(query))
def get_catalog_raw(self, query):
"""Fetch THREDDS catalog XML from the radar server.
Requests a catalog of radar data files data from the radar server given the
parameters in `query` and returns the raw XML.
Parameters
----------
query : RadarQuery
The parameters to send to the radar server
Returns
-------
catalog : bytes
The XML of the catalog of matching data files
See Also
--------
get_catalog
"""
return self.get_query(query).content
def get_radarserver_datasets(server):
"""Get datasets from a THREDDS radar server's top-level catalog.
This is a helper function to construct the appropriate catalog URL from the
server URL, fetch the catalog, and return the contained catalog references.
Parameters
----------
server : string
The base URL to the THREDDS server
Returns
-------
datasets : dict[str, :class:`~siphon.catalog.CatalogRef`]
Mapping of dataset name to the catalog reference
"""
if server[-1] != '/':
server += '/'
return TDSCatalog(urljoin(server, 'radarServer/catalog.xml')).catalog_refs
#
# The remainder of the file is not considered part of the public API.
# Use at your own risk!
#
Station = namedtuple('Station', 'id elevation latitude longitude name')
def parse_station_table(root):
"""Parse station list XML file."""
stations = [parse_xml_station(elem) for elem in root.findall('station')]
return {st.id: st for st in stations}
def parse_xml_station(elem):
"""Create a :class:`Station` instance from an XML tag."""
stid = elem.attrib['id']
name = elem.find('name').text
lat = float(elem.find('latitude').text)
lon = float(elem.find('longitude').text)
elev = float(elem.find('elevation').text)
return Station(id=stid, elevation=elev, latitude=lat, longitude=lon, name=name)
|
|
""" Python script to generate a Python extension module from a clib
header file. """
import sys
_class = ''
_newclass = 1
def getargs(line):
"""Get the function name and arguments."""
i1 = line.find('(')
i2 = line.find(')')
if (i1 < 0 or i2 < 0):
raise 'syntax error: missing open or close quote'
nm = line[:i1].split()
nm = nm[-1]
argline = line[i1+1:i2]
args = argline.split(',')
for n in range(len(args)): args[n] = args[n].split()
v = []
for a in args:
if len(a) <= 1:
pass
elif len(a) == 2:
v.append(a)
elif len(a) == 3 and a[0] == 'const':
v.append([a[0]+' '+a[1],a[2]])
else:
print 'a = ',a, args
print 'line = ',line
raise 'malformed argument: '
return nm, v
_c2fout = {'int*':'integer', 'integer*':'integer',
'double*':'double precision', 'doublereal*':'double precision',
'char*':'character*(*)'}
_c2fin = {'const int*':'integer', 'const integer*':'integer',
'const double*':'double precision',
'const doublereal*':'double precision',
'const char*':'character*(*)'}
_c2fret = {'int':'integer', 'integer':'integer',
'double':'double precision',
'status_t':'integer',
'doublereal':'double precision'}
def writeinterface(fint, rtype, name, args):
s = ' '+_c2fret[rtype] + ' function '
if name[-1] == '_':
name = name[:-1]
s += name + '('
argstr=''
for a in args:
if a[0] <> 'ftnlen':
argstr += a[1] + ', '
if len(argstr) > 0:
argstr = argstr[:-2]
s += argstr+')\n'
fint.write(s)
for a in args:
if a[0][0] == 'd':
arr = '(*)'
else:
arr = ''
if a[0] in _c2fin:
fint.write(' '+_c2fin[a[0]]+', intent(in) :: '+a[1]+arr+'\n')
elif a[0] in _c2fout:
fint.write(' '+_c2fout[a[0]]+', intent(out) :: '+a[1]+arr+'\n')
fint.write(' end function '+name+'\n\n')
def writef90(fmod, rtype, otype, hndl, name, args):
subroutine = 0
if rtype == 'status_t':
subroutine = 1
if subroutine:
s = ' subroutine '
else:
s = ' '+_c2fret[rtype] + ' function '
if name[-1] == '_':
name = name[:-1]
wname = 'ct'+name[1:]
s += wname + '('
argstr='self, '
for a in args[1:]:
if a[0] <> 'ftnlen':
argstr += a[1] + ', '
if len(argstr) > 0:
argstr = argstr[:-2]
s += argstr+')\n'
fmod.write(s)
fmod.write(""" implicit none
type("""+otype+'), intent(inout) :: self\n')
for a in args[1:]:
if a[0][0] == 'd':
arr = '(*)'
else:
arr = ''
if a[0] in _c2fin:
fmod.write(' '+_c2fin[a[0]]+', intent(in) :: '+a[1]+arr+'\n')
elif a[0] in _c2fout:
fmod.write(' '+_c2fout[a[0]]+', intent(out) :: '+a[1]+arr+'\n')
if subroutine:
s = ' self%err = '+name+'(self%'+hndl+', '
else:
s = ' '+wname+' = '+name+'(self%'+hndl+', '
argstr = ''
for a in args[1:]:
if a[0] <> 'ftnlen':
argstr += a[1] + ', '
argstr = argstr[:-2]
s += argstr+')'
if subroutine:
fmod.write(s+'\n end subroutine '+wname+'\n\n')
else:
fmod.write(s+'\n end function '+wname+'\n\n')
fname = sys.argv[1] # fctxml
otype = sys.argv[2] # XML_Node
hndl = sys.argv[3] # xml_id
base, ext = fname.split('.')
modname = base
f = open(fname,'r')
fint = open(base+'_interface.f90','w')
#fmod = open(modname+'.f90','w')
fmod = open('cantera_'+modname+'.f90','w')
lines = f.readlines()
f.close()
#_rtypes = ['int', 'double', 'integer']
infunc = 0
funcline = ''
extern = 0
fint.write('module '+base+'\n')
fint.write('interface\n')
for line in lines:
toks = line.split()
if len(toks) > 0 and toks[0][:2] <> '//':
if toks[0] == 'extern':
extern = 1
if extern:
if not infunc:
if line.find('CANTERA_CAPI') > 0:
infunc = 1
funcline = line
elif infunc:
funcline += line
last = toks[-1]
if infunc and last[-1] == '{':
infunc = 0
name, args = getargs(funcline)
toks = funcline.split()
a = writeinterface(fint, toks[0], name, args)
funcline = ''
fint.write('end interface\n')
fint.write('end module '+base+'\n')
fint.close()
fmod.write('module '+modname+'\n')
fmod.write(' use '+base+"""
type """+otype+"""
integer :: """+hndl+"""
end type """+otype+"""
contains
""")
for line in lines:
toks = line.split()
if len(toks) > 0 and toks[0][:2] <> '//':
if toks[0] == 'extern':
extern = 1
if extern:
if not infunc:
if line.find('CANTERA_CAPI') > 0:
infunc = 1
funcline = line
elif infunc:
funcline += line
last = toks[-1]
if infunc and last[-1] == '{':
infunc = 0
name, args = getargs(funcline)
toks = funcline.split()
a = writef90(fmod, toks[0], otype, hndl, name, args)
funcline = ''
fmod.write('end module '+base+'\n')
fmod.close()
|
|
# -*- coding: utf-8 -*-
#
# tree.py
#
# (c) D.C.-G. 2014
#
# Tree widget for albow
#
from albow.widget import Widget
from albow.menu import Menu
from albow.fields import IntField, FloatField, TextFieldWrapped
from albow.controls import CheckBox, AttrRef, Label, Button
from albow.dialogs import ask, alert, input_text_buttons
from albow.translate import _
from extended_widgets import ChoiceButton
from theme import ThemeProperty
from layout import Column, Row
from dialogs import Dialog
from palette_view import PaletteView
from scrollpanel import ScrollRow
from utils import blit_in_rect
from pygame import image, Surface, Rect, SRCALPHA, draw, event
import copy
#-----------------------------------------------------------------------------
item_types_map = {dict: ("Compound", None, {}),
int: ("Integer", IntField, 0),
float: ("Floating point", FloatField, 0.0),
unicode: ("Text", TextFieldWrapped, ""),
bool: ("Boolean", CheckBox, True),
}
def setup_map_types_item(mp=None):
if not mp:
mp = item_types_map
map_types_item = {}
for k, v in mp.items():
if v[0] in map_types_item.keys():
_v = map_types_item.pop(v[0])
map_types_item[u"%s (%s)"%(_(v[0]), _v[0].__name__)] = _v
map_types_item[u"%s (%s)"%(_(v[0]), k.__name__)] = (k, v[1], v[2])
else:
map_types_item[v[0]] = (k, v[1], v[2])
return map_types_item
map_types_item = setup_map_types_item()
#-----------------------------------------------------------------------------
# Tree item builder methods
def create_base_item(self, i_type, i_name, i_value):
return i_name, type(i_type)(i_value)
create_dict = create_int = create_float = create_unicode = create_bool = create_base_item
#-----------------------------------------------------------------------------
class SetupNewItemPanel(Dialog):
def __init__(self, type_string, types=map_types_item, ok_action=None):
self.type_string = type_string
self.ok_action = ok_action
title = Label("Choose default data")
self.t, widget, self.v = types[type_string]
self.n = u""
w_name = TextFieldWrapped(ref=AttrRef(self, 'n'))
self.w_value = self.get_widget(widget)
col = Column([Column([title,]), Label(_("Item Type: %s")%type_string, doNotTranslate=True), Row([Label("Name"), w_name], margin=0), Row([Label("Value"), self.w_value], margin=0), Row([Button("OK", action=ok_action or self.dismiss_ok), Button("Cancel", action=self.dismiss)], margin=0)], margin=0, spacing=2)
Dialog.__init__(self, client=col)
def dismiss_ok(self):
self.dismiss((self.t, self.n, getattr(self.w_value, 'value', map_types_item.get(self.type_string, [None,] * 3)[2])))
def get_widget(self, widget):
if hasattr(widget, 'value'):
value = widget(value=self.v)
elif hasattr(widget, 'text'):
value = widget(text=self.v)
elif widget is None:
value = Label("This item type is a container. Add chlidren later.")
else:
msg = "*** Error in SelectItemTypePanel.__init__():\n Widget <%s> has no 'text' or 'value' member."%widget
print msg
value = Label(msg)
return value
#-----------------------------------------------------------------------------
class SelectItemTypePanel(Dialog):
def __init__(self, title, responses, default=None, ok_action=None):
self.response = responses[0]
self.ok_action = ok_action
title = Label(title)
self.w_type = ChoiceButton(responses)
col = Column([title, self.w_type, Row([Button("OK", action=ok_action or self.dismiss_ok), Button("Cancel", action=ok_action or self.dismiss)], margin=0)], margin=0, spacing=2)
Dialog.__init__(self, client=col)
def dismiss_ok(self):
self.dismiss(self.w_type.selectedChoice)
#-----------------------------------------------------------------------------
def select_item_type(ok_action, types=map_types_item):
if len(types) > 1:
choices = types.keys()
choices.sort()
result = SelectItemTypePanel("Choose item type", responses=choices, default=None).present()
else:
result = types.keys()[0]
if type(result) in (str, unicode):
return SetupNewItemPanel(result, types, ok_action).present()
return None
#-----------------------------------------------------------------------------
class TreeRow(ScrollRow):
def click_item(self, n, e):
self.parent.click_item(n, e.local)
def mouse_down(self, e):
if e.button == 3:
_e = event.Event(e.type, {'alt': e.alt, 'meta': e.meta, 'ctrl': e.ctrl,
'shift': e.shift, 'button': 1, 'cmd': e.cmd,
'local': e.local, 'pos': e.pos,
'num_clicks': e.num_clicks})
ScrollRow.mouse_down(self, _e)
self.parent.show_menu(e.local)
else:
ScrollRow.mouse_down(self, e)
#-----------------------------------------------------------------------------
class Tree(Column):
"""..."""
rows = []
row_margin = 2
column_margin = 2
bullet_size = ThemeProperty('bullet_size')
bullet_color_active = ThemeProperty('bullet_color_active')
bullet_color_inactive = ThemeProperty('bullet_color_inactive')
def __init__(self, *args, **kwargs):
self.menu = [("Add", "add_item"),
("Delete", "delete_item"),
("New child", "add_child"),
("Rename", "rename_item"),
("", ""),
("Cut", "cut_item"),
("Copy", "copy_item"),
("Paste", "paste_item"),
("Paste as child", "paste_child"),
]
if not hasattr(self, 'map_types_item'):
global map_types_item
self.map_types_item = setup_map_types_item()
self.selected_item_index = None
self.selected_item = None
self.clicked_item = None
self.copyBuffer = kwargs.pop('copyBuffer', None)
self._parent = kwargs.pop('_parent', None)
self.styles = kwargs.pop('styles', {})
self.compound_types = [dict,] + kwargs.pop('compound_types', [])
self.item_types = self.compound_types + kwargs.pop('item_types', [a[0] for a in self.map_types_item.values()] or [int, float, unicode, bool])
for t in self.item_types:
if 'create_%s'%t.__name__ in globals().keys():
setattr(self, 'create_%s'%t.__name__, globals()['create_%s'%t.__name__])
self.show_fields = kwargs.pop('show_fields', False)
self.deployed = []
self.data = data = kwargs.pop("data", {})
self.draw_zebra = draw_zebra = kwargs.pop('draw_zebra', True)
# self.inner_width = kwargs.pop('inner_width', 'auto')
self.inner_width = kwargs.pop('inner_width', 500)
self.__num_rows = len(data.keys())
self.build_layout()
# row_height = self.font.size(' ')[1]
row_height = self.font.get_linesize()
self.treeRow = treeRow = TreeRow((self.inner_width, row_height), 10, draw_zebra=draw_zebra)
Column.__init__(self, [treeRow,], **kwargs)
def cut_item(self):
self.copyBuffer = ([] + self.selected_item, 1)
self.delete_item()
def copy_item(self):
self.copyBuffer = ([] + self.selected_item, 0)
def paste_item(self):
parent = self.get_item_parent(self.selected_item)
name = self.copyBuffer[0][3]
old_name = u"%s"%self.copyBuffer[0][3]
if self.copyBuffer[1] == 0:
name = input_text_buttons("Choose a name", 300, self.copyBuffer[0][3])
else:
old_name = ""
if name and type(name) in (str, unicode) and name != old_name:
new_item = copy.deepcopy(self.copyBuffer[0][9])
if hasattr(new_item, 'name'):
new_item.name = name
self.add_item_to(parent, (name, new_item))
def paste_child(self):
name = self.copyBuffer[0][3]
old_name = u"%s"%self.copyBuffer[0][3]
names = []
children = self.get_item_children(self.selected_item)
if children:
names = [a[3] for a in children]
if name in names:
name = input_text_buttons("Choose a name", 300, self.copyBuffer[0][3])
else:
old_name = ""
if name and type(name) in (str, unicode) and name != old_name:
new_item = copy.deepcopy(self.copyBuffer[0][9])
if hasattr(new_item, 'name'):
new_item.name = name
self.add_item_to(self.selected_item, (name, new_item))
@staticmethod
def add_item_to_dict(parent, name, item):
parent[name] = item
def add_item_to(self, parent, (name, item)):
if parent is None:
tp = 'dict'
parent = self.data
else:
tp = parent[7].__name__
parent = parent[9]
if not name:
i = 0
name = 'Item %03d'%i
while name in self.data.keys():
i += 1
name = 'Item %03d'%i
meth = getattr(self, 'add_item_to_%s'%tp, None)
if meth:
meth(parent, name, item)
self.build_layout()
else:
alert(_("No function implemented to add items to %s type.")%type(parent).__name__, doNotTranslate=True)
def add_item(self, types_item=None):
r = select_item_type(None, types_item or self.map_types_item)
if type(r) in (list, tuple):
t, n, v = r
meth = getattr(self, 'create_%s'%t.__name__, None)
if meth:
new_item = meth(self, t, n, v)
self.add_item_to(self.get_item_parent(self.selected_item), new_item)
def add_child(self, types_item=None):
r = select_item_type(None, types_item or self.map_types_item)
if type(r) in (list, tuple):
t, n, v = r
meth = getattr(self, 'create_%s'%t.__name__, None)
if meth:
new_item = meth(self, t, n, v)
self.add_item_to(self.selected_item, new_item)
def delete_item(self):
parent = self.get_item_parent(self.selected_item) or self.data
del parent[self.selected_item]
self.selected_item_index = None
self.selected_item = None
self.build_layout()
def rename_item(self):
result = input_text_buttons("Choose a name", 300, self.selected_item[3])
if type(result) in (str, unicode):
self.selected_item[3] = result
self.build_layout()
def get_item_parent(self, item):
if item:
pid = item[4]
for itm in self.rows:
if pid == itm[6]:
return itm
def get_item_children(self, item):
children = []
if item:
if item[6] in self.deployed:
cIds = item[5]
idx = self.rows.index(item)
for child in self.rows[idx:]:
if child[8] == item[8] + 1 and child[4] == item[6]:
children.append(child)
else:
k = item[3]
v = item[9]
lvl = item[8]
id = item[6]
aId = len(self.rows) + 1
meth = getattr(self, 'parse_%s'%v.__class__.__name__, None)
if meth is not None:
_v = meth(k, v)
else:
_v = v
ks = _v.keys()
ks.sort()
ks.reverse()
for a in ks:
b = _v[a]
itm = [lvl + 1, a, b, id, [], aId]
itm = [None, None, None, a, id, [], aId, type(b), lvl + 1, b]
children.insert(0, itm)
aId += 1
return children
def show_menu(self, pos):
if self.menu:
m = Menu("Menu", self.menu, handler=self)
i = m.present(self, pos)
if i > -1:
meth = getattr(self, self.menu[i][1], None)
if meth:
meth()
def cut_item_enabled(self):
return self.selected_item is not None
def copy_item_enabled(self):
return self.cut_item_enabled()
def paste_item_enabled(self):
return self.copyBuffer is not None
def paste_child_enabled(self):
if not self.selected_item:
return False
return self.paste_item_enabled() and self.selected_item[7] in self.compound_types
def add_item_enabled(self):
return True
def add_child_enabled(self):
if not self.selected_item:
return False
return self.selected_item[7] in self.compound_types
def delete_item_enabled(self):
return self.selected_item is not None
def rename_item_enabled(self):
return self.selected_item is not None
def build_layout(self):
data = self.data
parent = 0
children = []
keys = data.keys()
keys.sort()
items = [[0, a, data[a], parent, children, keys.index(a) + 1] for a in keys]
rows = []
w = 50
aId = len(items) + 1
while items:
lvl, k, v, p, c, id = items.pop(0)
_c = False
fields = []
c = [] + c
if type(v) in self.compound_types:
meth = getattr(self, 'parse_%s'%v.__class__.__name__, None)
if meth is not None:
_v = meth(k, v)
else:
_v = v
ks = _v.keys()
ks.sort()
ks.reverse()
for a in ks:
b = _v[a]
if id in self.deployed:
itm = [lvl + 1, a, b, id, [], aId]
items.insert(0, itm)
c.append(aId)
_c = True
aId += 1
else:
if type(v) in (list, tuple):
fields = v
elif type(v) not in self.compound_types or hasattr(self._parent, 'build_%s'%k.lower()):
fields = [v,]
head = Surface((self.bullet_size * (lvl + 1) + self.font.size(k)[0], self.bullet_size), SRCALPHA)
if _c:
meth = getattr(self, 'draw_%s_bullet'%{False: 'closed', True: 'opened'}[id in self.deployed])
else:
meth = getattr(self, 'draw_%s_bullet'%v.__class__.__name__, None)
if not meth:
meth = self.draw_deadend_bullet
bg, fg, shape, text = self.styles.get(type(v),
({True: self.bullet_color_active, False: self.bullet_color_inactive}[_c],
self.fg_color, 'square', ''),
)
try:
meth(head, bg, fg, shape, text, k, lvl)
except:
pass
rows.append([head, fields, [w] * len(fields), k, p, c, id, type(v), lvl, v])
self.rows = rows
return rows
def deploy(self, id):
if id in self.deployed:
self.deployed.remove(id)
else:
self.deployed.append(id)
self.build_layout()
def click_item(self, n, pos):
"""..."""
self.clicked_item = row = self.rows[n]
r = self.get_bullet_rect(row[0], row[8])
x = pos[0]
if self.margin + r.left - self.treeRow.hscroll <= x <= self.margin + self.treeRow.margin + r.right - self.treeRow.hscroll:
id = row[6]
self.deploy(id)
else:
self.select_item(n)
def select_item(self, n):
self.selected_item_index = n
self.selected_item = self.rows[n]
def get_bullet_rect(self, surf, lvl):
r = Rect(0, 0, self.bullet_size, self.bullet_size)
r.left = self.bullet_size * lvl
r.inflate_ip(-4, -4)
return r
def draw_item_text(self, surf, r, text):
buf = self.font.render(unicode(text), True, self.fg_color)
blit_in_rect(surf, buf, Rect(r.right, r.top, surf.get_width() - r.right, r.height), 'c')
def draw_deadend_bullet(self, surf, bg, fg, shape, text, item_text, lvl):
r = self.get_bullet_rect(surf, lvl)
draw.polygon(surf, bg, [r.midtop, r.midright, r.midbottom, r.midleft])
self.draw_item_text(surf, r, item_text)
def draw_closed_bullet(self, surf, bg, fg, shape, text, item_text, lvl):
r = self.get_bullet_rect(surf, lvl)
draw.polygon(surf, bg, [r.topleft, r.midright, r.bottomleft])
self.draw_item_text(surf, r, item_text)
def draw_opened_bullet(self, surf, bg, fg, shape, text, item_text, lvl):
r = self.get_bullet_rect(surf, lvl)
draw.polygon(surf, bg, [r.topleft, r.midbottom, r.topright])
self.draw_item_text(surf, r, item_text)
def draw_tree_cell(self, surf, i, data, cell_rect, column):
"""..."""
if type(data) in (str, unicode):
self.draw_text_cell(surf, i, data, cell_rect, 'l', self.font)
else:
self.draw_image_cell(surf, i, data, cell_rect, column)
@staticmethod
def draw_image_cell(surf, i, data, cell_rect, column):
"""..."""
blit_in_rect(surf, data, cell_rect, 'l')
def draw_text_cell(self, surf, i, data, cell_rect, align, font):
buf = font.render(unicode(data), True, self.fg_color)
blit_in_rect(surf, buf, cell_rect, align)
def num_rows(self):
return len(self.rows)
def row_data(self, row):
return self.rows[row]
def column_info(self, row_data):
m = self.column_margin
d = 2 * m
x = 0
for i in range(0,2):
if i < 1:
width = self.width
data = row_data[i]
yield i, x + m, width - d, None, data
x += width
if self.show_fields:
for i in range(len(row_data[2])):
width = 50 * (i + 1)
data = row_data[2][i]
if type(data) != (str, unicode):
data = repr(data)
yield i, x + m, width - d, None, data
x += width
|
|
""" Layered, composite rendering for TileStache.
The Sandwich Provider supplies a Photoshop-like rendering pipeline, making it
possible to use the output of other configured tile layers as layers or masks
to create a combined output. Sandwich is modeled on Lars Ahlzen's TopOSM.
The external "Blit" library is required by Sandwich, and can be installed
via Pip, easy_install, or directly from Github:
https://github.com/migurski/Blit
The "stack" configuration parameter describes a layer or stack of layers that
can be combined to create output. A simple stack that merely outputs a single
color orange tile looks like this:
{"color" "#ff9900"}
Other layers in the current TileStache configuration can be reference by name,
as in this example stack that simply echoes another layer:
{"src": "layer-name"}
Bitmap images can also be referenced by local filename or URL, and will be
tiled seamlessly, assuming 256x256 parent tiles:
{"src": "image.png"}
{"src": "http://example.com/image.png"}
Layers can be limited to appear at certain zoom levels, given either as a range
or as a single number:
{"src": "layer-name", "zoom": "12"}
{"src": "layer-name", "zoom": "12-18"}
Layers can also be used as masks, as in this example that uses one layer
to mask another layer:
{"mask": "layer-name", "src": "other-layer"}
Many combinations of "src", "mask", and "color" can be used together, but it's
an error to provide all three.
Layers can be combined through the use of opacity and blend modes. Opacity is
specified as a value from 0.0-1.0, and blend mode is specified as a string.
This example layer is blended using the "hard light" mode at 50% opacity:
{"src": "hillshading", "mode": "hard light", "opacity": 0.5}
Currently-supported blend modes include "screen", "add", "multiply", "subtract",
"linear light", and "hard light".
Layers can also be affected by adjustments. Adjustments are specified as an
array of names and parameters. This example layer has been slightly darkened
using the "curves" adjustment, moving the input value of 181 (light gray)
to 50% gray while leaving black and white alone:
{"src": "hillshading", "adjustments": [ ["curves", [0, 181, 255]] ]}
Available adjustments:
"threshold" - Blit.adjustments.threshold()
"curves" - Blit.adjustments.curves()
"curves2" - Blit.adjustments.curves2()
See detailed information about adjustments in Blit documentation:
https://github.com/migurski/Blit#readme
Finally, the stacking feature allows layers to combined in more complex ways.
This example stack combines a background color and foreground layer:
[
{"color": "#ff9900"},
{"src": "layer-name"}
]
A complete example configuration might look like this:
{
"cache":
{
"name": "Test"
},
"layers":
{
"base":
{
"provider": {"name": "mapnik", "mapfile": "mapnik-base.xml"}
},
"halos":
{
"provider": {"name": "mapnik", "mapfile": "mapnik-halos.xml"},
"metatile": {"buffer": 128}
},
"outlines":
{
"provider": {"name": "mapnik", "mapfile": "mapnik-outlines.xml"},
"metatile": {"buffer": 16}
},
"streets":
{
"provider": {"name": "mapnik", "mapfile": "mapnik-streets.xml"},
"metatile": {"buffer": 128}
},
"sandwiches":
{
"provider":
{
"name": "Sandwich",
"stack":
[
{"src": "base"},
{"src": "outlines", "mask": "halos"},
{"src": "streets"}
]
}
}
}
}
"""
from re import search
from io import BytesIO
from itertools import product
from .py3_compat import urljoin, urlopen, is_string_type
from . import Core
try:
import Image
except ImportError:
try:
from Pillow import Image
except ImportError:
from PIL import Image
try:
import Blit
blend_modes = {
'screen': Blit.blends.screen,
'add': Blit.blends.add,
'multiply': Blit.blends.multiply,
'subtract': Blit.blends.subtract,
'linear light': Blit.blends.linear_light,
'hard light': Blit.blends.hard_light
}
adjustment_names = {
'threshold': Blit.adjustments.threshold,
'curves': Blit.adjustments.curves,
'curves2': Blit.adjustments.curves2
}
except ImportError:
# Well, this will not work.
pass
class Provider:
""" Sandwich Provider.
Stack argument is a list of layer dictionaries described in module docs.
"""
def __init__(self, layer, stack):
self.layer = layer
self.config = layer.config
self.stack = stack
@staticmethod
def prepareKeywordArgs(config_dict):
""" Convert configured parameters to keyword args for __init__().
"""
return {'stack': config_dict['stack']}
def renderTile(self, width, height, srs, coord):
rendered = self.draw_stack(coord, dict())
if rendered.size() == (width, height):
return rendered.image()
else:
return rendered.image().resize((width, height))
def draw_stack(self, coord, tiles):
""" Render this image stack.
Given a coordinate, return an output image with the results of all the
layers in this stack pasted on in turn.
Final argument is a dictionary used to temporarily cache results
of layers retrieved from layer_bitmap(), to speed things up in case
of repeatedly-used identical images.
"""
# start with an empty base
rendered = Blit.Color(0, 0, 0, 0)
for layer in self.stack:
if 'zoom' in layer and not in_zoom(coord, layer['zoom']):
continue
#
# Prepare pixels from elsewhere.
#
source_name, mask_name, color_name = [layer.get(k, None) for k in ('src', 'mask', 'color')]
if source_name and color_name and mask_name:
raise Core.KnownUnknown("You can't specify src, color and mask together in a Sandwich Layer: %s, %s, %s" % (repr(source_name), repr(color_name), repr(mask_name)))
if source_name and source_name not in tiles:
if source_name in self.config.layers:
tiles[source_name] = layer_bitmap(self.config.layers[source_name], coord)
else:
tiles[source_name] = local_bitmap(source_name, self.config, coord, self.layer.dim)
if mask_name and mask_name not in tiles:
tiles[mask_name] = layer_bitmap(self.config.layers[mask_name], coord)
#
# Build up the foreground layer.
#
if source_name and color_name:
# color first, then layer
foreground = make_color(color_name).blend(tiles[source_name])
elif source_name:
foreground = tiles[source_name]
elif color_name:
foreground = make_color(color_name)
elif mask_name:
raise Core.KnownUnknown("You have to provide more than just a mask to Sandwich Layer: %s" % repr(mask_name))
else:
raise Core.KnownUnknown("You have to provide at least some combination of src, color and mask to Sandwich Layer")
#
# Do the final composition with adjustments and blend modes.
#
for (name, args) in layer.get('adjustments', []):
adjustfunc = adjustment_names.get(name)(*args)
foreground = foreground.adjust(adjustfunc)
opacity = float(layer.get('opacity', 1.0))
blendfunc = blend_modes.get(layer.get('mode', None), None)
if mask_name:
rendered = rendered.blend(foreground, tiles[mask_name], opacity, blendfunc)
else:
rendered = rendered.blend(foreground, None, opacity, blendfunc)
return rendered
def local_bitmap(source, config, coord, dim):
""" Return Blit.Bitmap representation of a raw image.
"""
address = urljoin(config.dirpath, source)
bytes_ = urlopen(address).read()
image = Image.open(BytesIO(bytes_)).convert('RGBA')
coord = coord.zoomBy(8)
w, h, col, row = image.size[0], image.size[1], int(coord.column), int(coord.row)
x = w * (col / w) - col
y = h * (row / h) - row
output = Image.new('RGBA', (dim, dim))
for (x, y) in product(range(x, dim, w), range(y, dim, h)):
# crop the top-left if needed
xmin = 0 if x > 0 else -x
ymin = 0 if y > 0 else -y
# don't paste up and to the left
x = x if x >= 0 else 0
y = y if y >= 0 else 0
output.paste(image.crop((xmin, ymin, w, h)), (x, y))
return Blit.Bitmap(output)
def layer_bitmap(layer, coord):
""" Return Blit.Bitmap representation of tile from a given layer.
Uses TileStache.getTile(), so caches are read and written as normal.
"""
from . import getTile
mime, body = getTile(layer, coord, 'png')
image = Image.open(BytesIO(body)).convert('RGBA')
return Blit.Bitmap(image)
def in_zoom(coord, range):
""" Return True if the coordinate zoom is within the textual range.
Range might look like "1-10" or just "5".
"""
zooms = search("^(\d+)-(\d+)$|^(\d+)$", range)
if not zooms:
raise Core.KnownUnknown("Bad zoom range in a Sandwich Layer: %s" % repr(range))
min_zoom, max_zoom, at_zoom = zooms.groups()
if min_zoom is not None and max_zoom is not None:
min_zoom, max_zoom = int(min_zoom), int(max_zoom)
elif at_zoom is not None:
min_zoom, max_zoom = int(at_zoom), int(at_zoom)
else:
min_zoom, max_zoom = 0, float('inf')
return min_zoom <= coord.zoom and coord.zoom <= max_zoom
def make_color(color):
""" Convert colors expressed as HTML-style RGB(A) strings to Blit.Color.
Examples:
white: "#ffffff", "#fff", "#ffff", "#ffffffff"
black: "#000000", "#000", "#000f", "#000000ff"
null: "#0000", "#00000000"
orange: "#f90", "#ff9900", "#ff9900ff"
transparent orange: "#f908", "#ff990088"
"""
if not is_string_type(color):
raise Core.KnownUnknown('Color must be a string: %s' % repr(color))
if color[0] != '#':
raise Core.KnownUnknown('Color must start with hash: "%s"' % color)
if len(color) not in (4, 5, 7, 9):
raise Core.KnownUnknown('Color must have three, four, six or eight hex chars: "%s"' % color)
if len(color) == 4:
color = ''.join([color[i] for i in (0, 1, 1, 2, 2, 3, 3)])
elif len(color) == 5:
color = ''.join([color[i] for i in (0, 1, 1, 2, 2, 3, 3, 4, 4)])
try:
r = int(color[1:3], 16)
g = int(color[3:5], 16)
b = int(color[5:7], 16)
a = len(color) == 7 and 0xFF or int(color[7:9], 16)
except ValueError:
raise Core.KnownUnknown('Color must be made up of valid hex chars: "%s"' % color)
return Blit.Color(r, g, b, a)
|
|
import json
import os
from django.contrib.auth.models import Group, User, Permission
from django.conf import settings
from django.core.files import File
from funfactory.urlresolvers import reverse
from nose.tools import eq_, ok_
from airmozilla.main.models import (
Event,
Tag,
Channel,
EventRevision,
RecruitmentMessage,
Picture,
)
from airmozilla.base.tests.testbase import DjangoTestCase
class TestEventEdit(DjangoTestCase):
other_image = 'airmozilla/manage/tests/other_logo.png'
third_image = 'airmozilla/manage/tests/other_logo_reversed.png'
def _event_to_dict(self, event):
from airmozilla.main.views import EventEditView
return EventEditView.event_to_dict(event)
def test_link_to_edit(self):
event = Event.objects.get(title='Test event')
response = self.client.get(reverse('main:event', args=(event.slug,)))
eq_(response.status_code, 200)
url = reverse('main:event_edit', args=(event.slug,))
ok_(url not in response.content)
self._login()
response = self.client.get(reverse('main:event', args=(event.slug,)))
eq_(response.status_code, 200)
ok_(url in response.content)
def test_cant_view(self):
event = Event.objects.get(title='Test event')
url = reverse('main:event_edit', args=(event.slug,))
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
response = self.client.post(url, {})
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
def test_edit_title(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
response = self.client.get(url)
eq_(response.status_code, 302)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
data = self._event_to_dict(event)
previous = json.dumps(data)
data = {
'event_id': event.id,
'previous': previous,
'title': 'Different title',
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# this should have created 2 EventRevision objects.
initial, current = EventRevision.objects.all().order_by('created')
eq_(initial.event, event)
eq_(current.event, event)
eq_(initial.user, None)
eq_(current.user, user)
eq_(initial.title, 'Test event')
eq_(current.title, 'Different title')
# reload the event
event = Event.objects.get(pk=event.pk)
eq_(event.title, 'Different title')
def test_edit_channel(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
main_channel = Channel.objects.get(
slug=settings.DEFAULT_CHANNEL_SLUG
)
assert main_channel in event.channels.all()
url = reverse('main:event_edit', args=(event.slug,))
old_channel = Channel.objects.create(
name='Old', slug='old', never_show=True
)
bad_channel = Channel.objects.create(
name='Bad', slug='bad', never_show=True
)
good_channel = Channel.objects.create(
name='Good', slug='good',
)
event.channels.add(old_channel)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
# the Good channel should be a choice
html = '<option value="{0}">{1}</option>'.format(
good_channel.id, good_channel.name
)
ok_(html in response.content)
# the Main channel should be in there and already selected
html = '<option value="{0}" selected="selected">{1}</option>'.format(
main_channel.id, main_channel.name
)
ok_(html in response.content)
# the Old channel should be in there and already selected
html = '<option value="{0}" selected="selected">{1}</option>'.format(
old_channel.id, old_channel.name
)
ok_(html in response.content)
# the bad channel shouldn't even be a choice
html = '<option value="{0}">{1}</option>'.format(
bad_channel.id, bad_channel.name
)
ok_(html not in response.content)
def test_edit_nothing(self):
"""basically pressing save without changing anything"""
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
data = self._event_to_dict(event)
previous = json.dumps(data)
data = {
'event_id': event.id,
'previous': previous,
'title': event.title,
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
self._login()
response = self.client.post(url, data)
eq_(response.status_code, 302)
ok_(not EventRevision.objects.all())
def test_edit_no_image(self):
"""basically pressing save without changing anything"""
event = Event.objects.get(title='Test event')
event.placeholder_img = None
event.save()
url = reverse('main:event_edit', args=(event.slug,))
data = self._event_to_dict(event)
previous = json.dumps(data)
data = {
'event_id': event.id,
'previous': previous,
'title': event.title,
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
self._login()
response = self.client.post(url, data)
eq_(response.status_code, 200)
ok_('Events needs to have a picture' in
response.context['form'].errors['__all__'])
ok_('Events needs to have a picture' in response.content)
def test_bad_edit_title(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
self._login()
data = self._event_to_dict(event)
previous = json.dumps(data)
data = {
'event_id': event.id,
'previous': previous,
'title': '',
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
response = self.client.post(url, data)
eq_(response.status_code, 200)
ok_('This field is required' in response.content)
def test_edit_on_bad_url(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=('xxx',))
response = self.client.get(url)
eq_(response.status_code, 404)
old_slug = event.slug
event.slug = 'new-slug'
event.save()
data = self._event_to_dict(event)
previous = json.dumps(data)
data = {
'previous': previous,
'title': event.title,
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
url = reverse('main:event_edit', args=(old_slug,))
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
url = reverse('main:event_edit', args=(event.slug,))
response = self.client.get(url)
# because you're not allowed to view it
eq_(response.status_code, 302)
url = reverse('main:event_edit', args=(event.slug,))
response = self.client.post(url, data)
# because you're not allowed to view it, still
eq_(response.status_code, 302)
def test_edit_all_simple_fields(self):
"""similar to test_edit_title() but changing all fields
other than the placeholder_img
"""
event = Event.objects.get(title='Test event')
event.tags.add(Tag.objects.create(name='testing'))
self._attach_file(event, self.main_image)
assert event.tags.all()
assert event.channels.all()
url = reverse('main:event_edit', args=(event.slug,))
self._login()
data = self._event_to_dict(event)
previous = json.dumps(data)
new_channel = Channel.objects.create(
name='New Stuff',
slug='new-stuff'
)
new_channel2 = Channel.objects.create(
name='New Stuff II',
slug='new-stuff-2'
)
data = {
'event_id': event.id,
'previous': previous,
'title': 'Different title',
'short_description': 'new short description',
'description': 'new description',
'additional_links': 'new additional_links',
'tags': 'newtag',
'channels': [new_channel.pk, new_channel2.pk]
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# this should have created 2 EventRevision objects.
initial, current = EventRevision.objects.all().order_by('created')
eq_(initial.event, event)
eq_(initial.title, 'Test event')
eq_(current.title, 'Different title')
# reload the event
event = Event.objects.get(pk=event.pk)
eq_(event.title, 'Different title')
eq_(event.description, 'new description')
eq_(event.short_description, 'new short description')
eq_(event.additional_links, 'new additional_links')
eq_(
sorted(x.name for x in event.tags.all()),
['newtag']
)
eq_(
sorted(x.name for x in event.channels.all()),
['New Stuff', 'New Stuff II']
)
def test_edit_recruitmentmessage(self):
"""Change the revision message from nothing, to something
to another one.
"""
event = Event.objects.get(title='Test event')
event.tags.add(Tag.objects.create(name='testing'))
self._attach_file(event, self.main_image)
assert event.tags.all()
assert event.channels.all()
url = reverse('main:event_edit', args=(event.slug,))
user = self._login()
data = self._event_to_dict(event)
previous = json.dumps(data)
msg1 = RecruitmentMessage.objects.create(
text='Web Developer',
url='http://careers.mozilla.com/123',
active=True
)
msg2 = RecruitmentMessage.objects.create(
text='C++ Developer',
url='http://careers.mozilla.com/456',
active=True
)
msg3 = RecruitmentMessage.objects.create(
text='Fortran Developer',
url='http://careers.mozilla.com/000',
active=False # Note!
)
# if you don't have the right permission, you can't see this choice
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Recruitment message' not in response.content)
# give the user the necessary permission
recruiters = Group.objects.create(name='Recruiters')
permission = Permission.objects.get(
codename='change_recruitmentmessage'
)
recruiters.permissions.add(permission)
user.groups.add(recruiters)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Recruitment message' in response.content)
ok_(msg1.text in response.content)
ok_(msg2.text in response.content)
ok_(msg3.text not in response.content) # not active
with open('airmozilla/manage/tests/firefox.png') as fp:
picture = Picture.objects.create(file=File(fp))
data = {
'event_id': event.id,
'previous': previous,
'recruitmentmessage': msg1.pk,
'title': event.title,
'picture': picture.id,
'description': event.description,
'short_description': event.short_description,
'channels': [x.id for x in event.channels.all()],
'tags': [x.name for x in event.tags.all()],
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# this should have created 2 EventRevision objects.
initial, current = EventRevision.objects.all().order_by('created')
eq_(initial.event, event)
ok_(not initial.recruitmentmessage)
eq_(current.recruitmentmessage, msg1)
# reload the event
event = Event.objects.get(pk=event.pk)
eq_(event.recruitmentmessage, msg1)
# now change it to another message
data = self._event_to_dict(event)
previous = json.dumps(data)
data['recruitmentmessage'] = msg2.pk
data['previous'] = previous
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# reload the event
event = Event.objects.get(pk=event.pk)
eq_(event.recruitmentmessage, msg2)
initial, __, current = (
EventRevision.objects.all().order_by('created')
)
eq_(current.recruitmentmessage, msg2)
# lastly, change it to blank
data = self._event_to_dict(event)
previous = json.dumps(data)
data['recruitmentmessage'] = ''
data['previous'] = previous
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# reload the event
event = Event.objects.get(pk=event.pk)
eq_(event.recruitmentmessage, None)
initial, __, __, current = (
EventRevision.objects.all().order_by('created')
)
eq_(current.recruitmentmessage, None)
def test_edit_placeholder_img(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
self._login()
old_placeholder_img_path = event.placeholder_img.path
data = self._event_to_dict(event)
previous = json.dumps(data)
with open(self.other_image) as fp:
data = {
'event_id': event.id,
'previous': previous,
'title': event.title,
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()],
'placeholder_img': fp,
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# this should have created 2 EventRevision objects.
initial, current = EventRevision.objects.all().order_by('created')
ok_(initial.placeholder_img)
ok_(current.placeholder_img)
# reload the event
event = Event.objects.get(pk=event.pk)
new_placeholder_img_path = event.placeholder_img.path
ok_(old_placeholder_img_path != new_placeholder_img_path)
ok_(os.path.isfile(old_placeholder_img_path))
ok_(os.path.isfile(new_placeholder_img_path))
def test_edit_placeholder_img_to_unselect_picture(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
# also, let's pretend the event has a picture already selected
with open(self.main_image) as fp:
picture = Picture.objects.create(file=File(fp))
event.picture = picture
event.save()
url = reverse('main:event_edit', args=(event.slug,))
self._login()
old_placeholder_img_path = event.placeholder_img.path
data = self._event_to_dict(event)
previous = json.dumps(data)
with open(self.other_image) as fp:
data = {
'event_id': event.id,
'previous': previous,
'title': event.title,
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()],
'placeholder_img': fp,
# this is a hidden field you can't not send
'picture': picture.id,
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# this should have created 2 EventRevision objects.
initial, current = EventRevision.objects.all().order_by('created')
ok_(initial.placeholder_img)
ok_(current.placeholder_img)
ok_(not current.picture)
# reload the event
event = Event.objects.get(pk=event.pk)
ok_(not event.picture)
new_placeholder_img_path = event.placeholder_img.path
ok_(old_placeholder_img_path != new_placeholder_img_path)
ok_(os.path.isfile(old_placeholder_img_path))
ok_(os.path.isfile(new_placeholder_img_path))
def test_set_new_placeholder_img_and_unselect_picture(self):
event = Event.objects.get(title='Test event')
event.placeholder_img = None
event.save()
# also, let's pretend the event has a picture already selected
with open(self.main_image) as fp:
picture = Picture.objects.create(file=File(fp))
event.picture = picture
event.save()
url = reverse('main:event_edit', args=(event.slug,))
self._login()
data = self._event_to_dict(event)
ok_(not data.get('placeholder_img'))
previous = json.dumps(data)
with open(self.other_image) as fp:
data = {
'event_id': event.id,
'previous': previous,
'title': event.title,
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()],
'placeholder_img': fp,
# this is a hidden field you can't not send
'picture': picture.id,
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# this should have created 2 EventRevision objects.
initial, current = EventRevision.objects.all().order_by('created')
ok_(not initial.placeholder_img)
ok_(current.placeholder_img)
ok_(not current.picture)
# reload the event
event = Event.objects.get(pk=event.pk)
ok_(not event.picture)
new_placeholder_img_path = event.placeholder_img.path
ok_(os.path.isfile(new_placeholder_img_path))
initial, current = EventRevision.objects.all().order_by('created')
ok_(current.placeholder_img)
diff_url = reverse(
'main:event_difference',
args=(event.slug, initial.id,)
)
response = self.client.get(diff_url)
eq_(response.status_code, 200)
diff_url = reverse(
'main:event_change',
args=(event.slug, current.id,)
)
response = self.client.get(diff_url)
eq_(response.status_code, 200)
def test_edit_conflict(self):
"""You can't edit the title if someone else edited it since the
'previous' JSON dump was taken."""
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
self._login()
data = self._event_to_dict(event)
previous = json.dumps(data)
event.title = 'Sneak Edit'
event.save()
data = {
'event_id': event.id,
'previous': previous,
'title': 'Different title',
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
response = self.client.post(url, data)
eq_(response.status_code, 200)
ok_('Conflict error!' in response.content)
def test_edit_conflict_on_placeholder_img(self):
"""You can't edit the title if someone else edited it since the
'previous' JSON dump was taken."""
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
self._login()
data = self._event_to_dict(event)
previous = json.dumps(data)
self._attach_file(event, self.other_image)
with open(self.third_image) as fp:
data = {
'event_id': event.id,
'previous': previous,
'title': event.title,
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()],
'placeholder_img': fp
}
response = self.client.post(url, data)
eq_(response.status_code, 200)
ok_('Conflict error!' in response.content)
def test_edit_conflict_near_miss(self):
"""If the event changes between the time you load the edit page
and you pressing 'Save' it shouldn't be a problem as long as
you're changing something different."""
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
self._login()
data = self._event_to_dict(event)
previous = json.dumps(data)
event.title = 'Sneak Edit'
event.save()
data = {
'event_id': event.id,
'previous': previous,
'title': 'Test event',
'short_description': 'new short description',
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
event = Event.objects.get(pk=event.pk)
eq_(event.title, 'Sneak Edit')
eq_(event.short_description, 'new short description')
def test_view_revision_change_links(self):
event = Event.objects.get(title='Test event')
event.tags.add(Tag.objects.create(name='testing'))
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
user = self._login()
data = self._event_to_dict(event)
previous = json.dumps(data)
data = {
'event_id': event.id,
'previous': previous,
'title': 'Test event',
'short_description': 'new short description',
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
eq_(EventRevision.objects.filter(event=event).count(), 2)
base_revision = EventRevision.objects.get(
event=event,
user__isnull=True
)
user_revision = EventRevision.objects.get(
event=event,
user=user
)
# reload the event edit page
response = self.client.get(url)
eq_(response.status_code, 200)
# because there's no difference between this and the event now
# we should NOT have a link to see the difference for the user_revision
ok_(
reverse('main:event_difference',
args=(event.slug, user_revision.pk))
not in response.content
)
# but there should be a link to the change
ok_(
reverse('main:event_change',
args=(event.slug, user_revision.pk))
in response.content
)
# since the base revision doesn't have any changes there shouldn't
# be a link to it
ok_(
reverse('main:event_change',
args=(event.slug, base_revision.pk))
not in response.content
)
# but there should be a link to the change
ok_(
reverse('main:event_difference',
args=(event.slug, base_revision.pk))
in response.content
)
def test_cant_view_all_revision_changes(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
# base revision
base_revision = EventRevision.objects.create_from_event(event)
# change the event without saving so we can make a new revision
event.title = 'Different title'
user = User.objects.create_user(
'mary', 'mary@mozilla.com', 'secret'
)
user_revision = EventRevision.objects.create_from_event(
event,
user=user
)
change_url = reverse(
'main:event_change',
args=(event.slug, user_revision.pk)
)
difference_url = reverse(
'main:event_difference',
args=(event.slug, base_revision.pk)
)
# you're not allowed to view these if you're not signed in
response = self.client.get(change_url)
eq_(response.status_code, 302)
response = self.client.get(difference_url)
eq_(response.status_code, 302)
def test_view_revision_change(self):
event = Event.objects.get(title='Test event')
event.tags.add(Tag.objects.create(name='testing'))
self._attach_file(event, self.main_image)
# base revision
base_revision = EventRevision.objects.create_from_event(event)
# change the event without saving so we can make a new revision
event.title = 'Different title'
event.description = 'New description'
event.short_description = 'New short description'
event.additional_links = 'New additional links'
event.save()
user = User.objects.create_user(
'bob', 'bob@mozilla.com', 'secret'
)
user_revision = EventRevision.objects.create_from_event(
event,
user=user
)
user_revision.tags.add(Tag.objects.create(name='newtag'))
user_revision.channels.remove(Channel.objects.get(name='Main'))
user_revision.channels.add(
Channel.objects.create(name='Web dev', slug='webdev')
)
with open(self.other_image, 'rb') as f:
img = File(f)
user_revision.placeholder_img.save(
os.path.basename(self.other_image),
img
)
# view the change
url = reverse('main:event_change', args=(event.slug, user_revision.pk))
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Different title' in response.content)
ok_('New description' in response.content)
ok_('New short description' in response.content)
ok_('New additional links' in response.content)
ok_('Web dev' in response.content)
ok_('newtag, testing' in response.content)
event.tags.add(Tag.objects.create(name='newtag'))
event.channels.remove(Channel.objects.get(name='Main'))
event.channels.add(
Channel.objects.get(name='Web dev')
)
# view the difference
url = reverse(
'main:event_difference',
args=(event.slug, base_revision.pk))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Different title' in response.content)
ok_('New description' in response.content)
ok_('New short description' in response.content)
ok_('New additional links' in response.content)
ok_('Web dev' in response.content)
ok_('newtag, testing' in response.content)
def test_view_revision_change_on_recruitmentmessage(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
# base revision
EventRevision.objects.create_from_event(event)
user = User.objects.create_user(
'bob', 'bob@mozilla.com', 'secret'
)
user_revision = EventRevision.objects.create_from_event(
event,
user=user
)
msg1 = RecruitmentMessage.objects.create(
text='Web Developer',
url='http://careers.mozilla.com/123',
active=True
)
user_revision.recruitmentmessage = msg1
user_revision.save()
# view the change
url = reverse('main:event_change', args=(event.slug, user_revision.pk))
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(msg1.text in response.content)
|
|
from functools import partial
from itertools import chain
from operator import attrgetter
import collections.abc
import copy
import inspect
import logging
import types
import warnings
from django.apps import apps
from django.conf import settings
from django.core import validators
from django.db.models import options
from django.db.models.base import ModelBase
from django.utils.translation import gettext_lazy as _
from ..compat import (
BaseModel,
ColumnDescriptor,
ModelDefinitionException,
ModelException,
ModelMetaClass,
OrderedDict,
columns,
query,
)
from . import django_field_methods, django_model_methods
from .constants import ORDER_BY_ERROR_HELP, ORDER_BY_WARN, PK_META_MISSING_HELP
log = logging.getLogger(__name__)
_django_manager_attr_names = (
"objects",
"default_manager",
"_default_manager",
"base_manager",
"_base_manager",
)
class DjangoCassandraOptions(options.Options):
default_field_error_messages = {
"invalid_choice": _("Value %(value)r is not a valid choice."),
"null": _("This field cannot be null."),
"blank": _("This field cannot be blank."),
"unique": _("%(model_name)s with this %(field_label)s " "already exists."),
"unique_for_date": _(
"%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."
),
}
def __init__(self, *args, **kwargs):
self.model_inst = kwargs.pop("cls")
self._defined_columns = self.model_inst._defined_columns
# Add Django attibutes to Columns
self._give_columns_django_field_attributes()
# Call Django to create _meta object
super(DjangoCassandraOptions, self).__init__(*args, **kwargs)
self._private_fields_name = "private_fields"
if hasattr(self, "virtual_fields"):
# Django < 1.10
self._private_fields_name = "virtual_fields"
# Add Columns as Django Fields
for column in self._defined_columns.values():
self.add_field(column)
self.setup_pk()
# Set further _meta attributes explicitly
self.proxy_for_model = self.concrete_model = self.model_inst
self.managed = False
self.swappable = False
def can_migrate(self, *args, **kwargs):
return False
def get_all_related_objects_with_model(self, *args, **kwargs):
return []
@property
def related_objects(self):
return []
def setup_pk(self):
self.pk = self.model_inst._get_explicit_pk_column()
def add_field(self, field, **kwargs):
"""Add each field as a private field."""
getattr(self, self._private_fields_name).append(field)
self._expire_cache(reverse=True)
self._expire_cache(reverse=False)
def _get_fields(self, *args, **kwargs):
fields = self._defined_columns.values()
return options.make_immutable_fields_list("get_fields()", fields)
def _set_column_django_attributes(self, cql_column, name):
allow_null = (
(
not cql_column.required
and not cql_column.is_primary_key
and not cql_column.partition_key
)
or cql_column.has_default
and not cql_column.required
)
cql_column.error_messages = self.default_field_error_messages
cql_column.empty_values = list(validators.EMPTY_VALUES)
cql_column.db_index = cql_column.index
cql_column.serialize = True
cql_column.unique = cql_column.is_primary_key
cql_column.hidden = False
cql_column.auto_created = False
cql_column.help_text = ""
cql_column.blank = allow_null
cql_column.null = allow_null
cql_column.choices = []
cql_column.flatchoices = []
cql_column.validators = []
cql_column.editable = True
cql_column.concrete = True
cql_column.many_to_many = False
cql_column.many_to_one = False
cql_column.one_to_many = False
cql_column.one_to_one = False
cql_column.is_relation = False
cql_column.remote_field = None
cql_column.unique_for_date = None
cql_column.unique_for_month = None
cql_column.unique_for_year = None
cql_column.db_column = None
cql_column.rel = None
cql_column.attname = name
cql_column.field = cql_column
cql_column.model = self.model_inst
cql_column.name = cql_column.db_field_name
cql_column.verbose_name = cql_column.db_field_name
cql_column._verbose_name = cql_column.db_field_name
cql_column.field.related_query_name = lambda: None
def _give_columns_django_field_attributes(self):
"""
Add Django Field attributes to each cqlengine.Column instance.
So that the Django Options class may interact with it as if it were
a Django Field.
"""
methods_to_add = (
django_field_methods.value_from_object,
django_field_methods.value_to_string,
django_field_methods.get_attname,
django_field_methods.get_cache_name,
django_field_methods.pre_save,
django_field_methods.get_prep_value,
django_field_methods.get_choices,
django_field_methods.get_choices_default,
django_field_methods.save_form_data,
django_field_methods.formfield,
django_field_methods.get_db_prep_value,
django_field_methods.get_db_prep_save,
django_field_methods.db_type_suffix,
django_field_methods.select_format,
django_field_methods.get_internal_type,
django_field_methods.get_attname_column,
django_field_methods.check,
django_field_methods._check_field_name,
django_field_methods._check_db_index,
django_field_methods.deconstruct,
django_field_methods.run_validators,
django_field_methods.clean,
django_field_methods.get_db_converters,
django_field_methods.get_prep_lookup,
django_field_methods.get_db_prep_lookup,
django_field_methods.get_filter_kwargs_for_object,
django_field_methods.set_attributes_from_name,
django_field_methods.db_parameters,
django_field_methods.get_pk_value_on_save,
django_field_methods.get_col,
)
for name, cql_column in self._defined_columns.items():
self._set_column_django_attributes(cql_column=cql_column, name=name)
for method in methods_to_add:
try:
method_name = method.func_name
except AttributeError:
# python 3
method_name = method.__name__
new_method = types.MethodType(method, cql_column)
setattr(cql_column, method_name, new_method)
class DjangoCassandraModelMetaClass(ModelMetaClass, ModelBase):
def __new__(cls, name, bases, attrs):
parents = [b for b in bases if isinstance(b, DjangoCassandraModelMetaClass)]
if not parents:
return super(ModelBase, cls).__new__(cls, name, bases, attrs)
for attr in _django_manager_attr_names:
setattr(cls, attr, None)
# ################################################################
# start code taken from python-driver 3.3.0 ModelMetaClass.__new__
# ################################################################
column_dict = OrderedDict()
primary_keys = OrderedDict()
pk_name = None
# get inherited properties
inherited_columns = OrderedDict()
for base in bases:
for k, v in getattr(base, "_defined_columns", {}).items():
inherited_columns.setdefault(k, v)
# short circuit __abstract__ inheritance
is_abstract = attrs["__abstract__"] = attrs.get("__abstract__", False)
# short circuit __discriminator_value__ inheritance
attrs["__discriminator_value__"] = attrs.get("__discriminator_value__")
# TODO __default__ttl__ should be removed in the next major release
options = attrs.get("__options__") or {}
attrs["__default_ttl__"] = options.get("default_time_to_live")
column_definitions = [
(k, v) for k, v in attrs.items() if isinstance(v, columns.Column)
]
column_definitions = sorted(column_definitions, key=lambda x: x[1].position)
is_polymorphic_base = any(
[c[1].discriminator_column for c in column_definitions]
)
column_definitions = [x for x in inherited_columns.items()] + column_definitions
discriminator_columns = [
c for c in column_definitions if c[1].discriminator_column
]
is_polymorphic = len(discriminator_columns) > 0
if len(discriminator_columns) > 1:
raise ModelDefinitionException(
"only one discriminator_column can be defined in a model, "
"{0} found".format(len(discriminator_columns))
)
if attrs["__discriminator_value__"] and not is_polymorphic:
raise ModelDefinitionException(
"__discriminator_value__ specified, but no base columns defined "
"with discriminator_column=True"
)
discriminator_column_name, discriminator_column = (
discriminator_columns[0] if discriminator_columns else (None, None)
)
if isinstance(
discriminator_column, (columns.BaseContainerColumn, columns.Counter)
):
raise ModelDefinitionException(
"counter and container columns cannot be used as discriminator columns"
)
# find polymorphic base class
polymorphic_base = None
if is_polymorphic and not is_polymorphic_base:
def _get_polymorphic_base(bases):
for base in bases:
if getattr(base, "_is_polymorphic_base", False):
return base
klass = _get_polymorphic_base(base.__bases__)
if klass:
return klass
polymorphic_base = _get_polymorphic_base(bases)
defined_columns = OrderedDict(column_definitions)
# check for primary key
if not is_abstract and not any([v.primary_key for k, v in column_definitions]):
raise ModelDefinitionException("At least 1 primary key is required.")
counter_columns = [
c for c in defined_columns.values() if isinstance(c, columns.Counter)
]
data_columns = [
c
for c in defined_columns.values()
if not c.primary_key and not isinstance(c, columns.Counter)
]
if counter_columns and data_columns:
raise ModelDefinitionException("counter models may not have data columns")
has_partition_keys = any(v.partition_key for (k, v) in column_definitions)
def _transform_column(col_name, col_obj):
column_dict[col_name] = col_obj
if col_obj.primary_key:
primary_keys[col_name] = col_obj
col_obj.set_column_name(col_name)
# set properties
attrs[col_name] = ColumnDescriptor(col_obj)
partition_key_index = 0
# transform column definitions
for k, v in column_definitions:
# don't allow a column with the same name as a built-in attribute or method
if k in BaseModel.__dict__:
raise ModelDefinitionException(
"column '{0}' conflicts with built-in attribute/method".format(k)
)
# counter column primary keys are not allowed
if (v.primary_key or v.partition_key) and isinstance(v, columns.Counter):
raise ModelDefinitionException(
"counter columns cannot be used as primary keys"
)
# this will mark the first primary key column as a partition
# key, if one hasn't been set already
if not has_partition_keys and v.primary_key:
v.partition_key = True
has_partition_keys = True
if v.partition_key:
v._partition_key_index = partition_key_index
partition_key_index += 1
overriding = column_dict.get(k)
if overriding:
v.position = overriding.position
v.partition_key = overriding.partition_key
v._partition_key_index = overriding._partition_key_index
_transform_column(k, v)
partition_keys = OrderedDict(
k for k in primary_keys.items() if k[1].partition_key
)
clustering_keys = OrderedDict(
k for k in primary_keys.items() if not k[1].partition_key
)
if attrs.get("__compute_routing_key__", True):
key_cols = [c for c in partition_keys.values()]
partition_key_index = dict(
(col.db_field_name, col._partition_key_index) for col in key_cols
)
key_cql_types = [c.cql_type for c in key_cols]
key_serializer = staticmethod(
lambda parts, proto_version: [
t.to_binary(p, proto_version) for t, p in zip(key_cql_types, parts)
]
)
else:
partition_key_index = {}
key_serializer = staticmethod(lambda parts, proto_version: None)
# setup partition key shortcut
if len(partition_keys) == 0:
if not is_abstract:
raise ModelException("at least one partition key must be defined")
if len(partition_keys) == 1:
pk_name = [x for x in partition_keys.keys()][0]
attrs["pk"] = attrs[pk_name]
else:
# composite partition key case, get/set a tuple of values
def _get(s):
return tuple(s._values[c].getval() for c in partition_keys.keys())
def _set(s, val):
return tuple(
s._values[c].setval(v) for (c, v) in zip(partition_keys.keys(), val)
)
attrs["pk"] = property(_get, _set)
# some validation
col_names = set()
for v in column_dict.values():
# check for duplicate column names
if v.db_field_name in col_names:
raise ModelException(
"{0} defines the column '{1}' more than once".format(
name, v.db_field_name
)
)
if v.clustering_order and not (v.primary_key and not v.partition_key):
raise ModelException(
"clustering_order may be specified only for clustering primary keys"
)
if v.clustering_order and v.clustering_order.lower() not in ("asc", "desc"):
raise ModelException(
"invalid clustering order '{0}' for column '{1}'".format(
repr(v.clustering_order), v.db_field_name
)
)
col_names.add(v.db_field_name)
# create db_name -> model name map for loading
db_map = {}
for col_name, field in column_dict.items():
db_field = field.db_field_name
if db_field != col_name:
db_map[db_field] = col_name
# add management members to the class
attrs["_columns"] = column_dict
attrs["_primary_keys"] = primary_keys
attrs["_defined_columns"] = defined_columns
# maps the database field to the models key
attrs["_db_map"] = db_map
attrs["_pk_name"] = pk_name
attrs["_dynamic_columns"] = {}
attrs["_partition_keys"] = partition_keys
attrs["_partition_key_index"] = partition_key_index
attrs["_key_serializer"] = key_serializer
attrs["_clustering_keys"] = clustering_keys
attrs["_has_counter"] = len(counter_columns) > 0
# add polymorphic management attributes
attrs["_is_polymorphic_base"] = is_polymorphic_base
attrs["_is_polymorphic"] = is_polymorphic
attrs["_polymorphic_base"] = polymorphic_base
attrs["_discriminator_column"] = discriminator_column
attrs["_discriminator_column_name"] = discriminator_column_name
attrs["_discriminator_map"] = {} if is_polymorphic_base else None
# setup class exceptions
DoesNotExistBase = None
for base in bases:
DoesNotExistBase = getattr(base, "DoesNotExist", None)
if DoesNotExistBase is not None:
break
DoesNotExistBase = DoesNotExistBase or attrs.pop(
"DoesNotExist", BaseModel.DoesNotExist
)
attrs["DoesNotExist"] = type("DoesNotExist", (DoesNotExistBase,), {})
MultipleObjectsReturnedBase = None
for base in bases:
MultipleObjectsReturnedBase = getattr(base, "MultipleObjectsReturned", None)
if MultipleObjectsReturnedBase is not None:
break
MultipleObjectsReturnedBase = MultipleObjectsReturnedBase or attrs.pop(
"MultipleObjectsReturned", BaseModel.MultipleObjectsReturned
)
attrs["MultipleObjectsReturned"] = type(
"MultipleObjectsReturned", (MultipleObjectsReturnedBase,), {}
)
# create the class and add a QuerySet to it
klass = super(ModelBase, cls).__new__(cls, name, bases, attrs)
udts = []
for col in column_dict.values():
columns.resolve_udts(col, udts)
# for user_type in set(udts):
# user_type.register_for_keyspace(klass._get_keyspace())
# ################################################################
# end code taken from python-driver 3.3.0 ModelMetaClass.__new__
# ################################################################
klass._deferred = False
if not is_abstract:
klass = cls._add_django_meta_and_register_model(
klass=klass, attrs=attrs, name=name
)
return klass
def add_to_class(cls, name, value):
django_meta_default_names = options.DEFAULT_NAMES
# patch django so Meta.get_pk_field can be specified these models
options.DEFAULT_NAMES = django_meta_default_names + ("get_pk_field",)
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, "contribute_to_class"):
value.contribute_to_class(cls, name)
else:
try:
setattr(cls, name, value)
except AttributeError:
raise AttributeError("failed to set attribute {}".format(name))
options.DEFAULT_NAMES = django_meta_default_names
@classmethod
def _add_django_meta_and_register_model(cls, klass, attrs, name):
# Create the class.
module = attrs.get("__module__")
if not module:
return klass
new_class = klass
attr_meta = attrs.pop("Meta", None)
abstract = getattr(attr_meta, "abstract", False)
if not attr_meta:
meta = getattr(new_class, "Meta", None)
else:
meta = attr_meta
if meta:
meta.managed = False
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, "app_label", None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and isn't in an application in "
"INSTALLED_APPS." % (module, name)
)
else:
app_label = app_config.label
# Add _meta/Options attribute to the model
new_class.add_to_class(
"_meta", DjangoCassandraOptions(meta, app_label, cls=new_class)
)
# Add manager to the model
for manager_attr in _django_manager_attr_names:
new_class.add_to_class(manager_attr, new_class.objects)
# Register the model
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
@classmethod
def check(cls, **kwargs):
errors = []
return errors
def convert_pk_field_names_to_real(model, field_names):
"""
Convert field names including 'pk' to the real field names:
>>> convert_pk_field_names_to_real(['pk', 'another_field'])
['real_pk_field', 'another_field']
"""
pk_field_names = tuple(f.name for f in model._get_primary_key_columns())
def append_field(field_name):
if field_name not in real_field_names:
real_field_names.append(field_name)
real_field_names = []
for name in field_names:
if name == "pk":
for real_pk_field_name in pk_field_names:
append_field(real_pk_field_name)
elif name == "-pk":
for real_pk_field_name in pk_field_names:
append_field("-" + real_pk_field_name)
else:
append_field(name)
return real_field_names
class ReadOnlyDjangoCassandraQuerySet(list):
name = "objects"
use_in_migrations = False
def __init__(self, data, model_class):
if not isinstance(data, collections.abc.Iterable):
raise TypeError("ReadOnlyDjangoCassandraQuerySet requires iterable data")
super(ReadOnlyDjangoCassandraQuerySet, self).__init__(data)
self.model = model_class
self.query = StubQuery(model=self.model)
@property
def objects(self):
return self
def first(self):
return next(iter(self), None)
def _clone(self):
return copy.deepcopy(self)
def all(self):
return self
def get_queryset(self):
return self
def count(self):
return len(self)
def exists(self):
return len(self) > 0
def values_list(self, *fields, **kwargs):
fields = convert_pk_field_names_to_real(model=self.model, field_names=fields)
values_list = []
for model_record in self:
values_list_item = []
for field_name in fields:
values_list_item.append(model_record[field_name])
values_list.append(values_list_item)
if kwargs.get("flat") is True:
values_list = list(chain.from_iterable(values_list))
return values_list
def _raise_not_implemented(self, method_name):
raise NotImplementedError(
"You cannot .{}() on a DjangoCassandraQuerySet which "
"has been ordered using python".format(method_name)
)
def filter(self, **kwargs):
self._raise_not_implemented(method_name="filter")
def get(self, **kwargs):
self._raise_not_implemented(method_name="get")
def distinct(self, *args, **kwargs):
self._raise_not_implemented(method_name="distinct")
def limit(self, *args, **kwargs):
self._raise_not_implemented(method_name="limit")
def only(self, *args, **kwargs):
self._raise_not_implemented(method_name="only")
def create(self, *args, **kwargs):
self._raise_not_implemented(method_name="create")
def delete(self, *args, **kwargs):
self._raise_not_implemented(method_name="delete")
def defer(self, *args, **kwargs):
self._raise_not_implemented(method_name="defer")
def exclude(self, *args, **kwargs):
self._raise_not_implemented(method_name="defer")
class StubQuery(object):
def __init__(self, model):
self.model = model
self.order_by = ["pk"]
@property
def select_related(self):
return False
def add_context(self, *args, **kwargs):
pass
def get_context(self, *args, **kwargs):
return {}
def get_meta(self):
return self.model._meta
def _prepare(self, field):
return self
class DjangoCassandraQuerySet(query.ModelQuerySet):
name = "objects"
use_in_migrations = False
def __init__(self, *args, **kwargs):
super(query.ModelQuerySet, self).__init__(*args, **kwargs)
self._allow_filtering = True
self.query = StubQuery(model=self.model)
def _select_fields(self):
if self._defer_fields or self._only_fields:
fields = self.model._columns.keys()
if self._defer_fields:
fields = [f for f in fields if f not in self._defer_fields]
elif self._only_fields:
fields = self._only_fields
return [self.model._columns[f].db_field_name for f in fields]
return super(query.ModelQuerySet, self)._select_fields()
def count(self):
if self._count is None:
self._count = super(query.ModelQuerySet, self).count()
return self._count
def get_queryset(self):
if len(self._where) > 0:
return super(query.ModelQuerySet, self).filter()
else:
return super(query.ModelQuerySet, self).all()
def exclude(self, *args, **kwargs):
new_queryset = []
for model in self.get_queryset():
should_exclude_model = False
for field_name, field_value in kwargs.items():
if getattr(model, field_name) == field_value:
should_exclude_model = True
break
if not should_exclude_model:
new_queryset.append(model)
return ReadOnlyDjangoCassandraQuerySet(new_queryset, model_class=self.model)
def python_order_by(self, qset, colnames):
if not isinstance(qset, list):
raise TypeError("qset must be a list")
colnames = convert_pk_field_names_to_real(
model=self.model, field_names=colnames
)
any_cols_revesed = any(c.startswith("-") for c in colnames)
if any_cols_revesed:
for col in colnames:
should_reverse = col.startswith("-")
if should_reverse:
col = col[1:]
qset.sort(key=attrgetter(col), reverse=should_reverse)
else:
new_colnames = []
for col in colnames:
if col == "pk":
pk_cols = self.model._get_primary_key_column_names()
for pk_name in pk_cols:
new_colnames.append(pk_name)
else:
new_colnames.append(col)
try:
qset.sort(key=attrgetter(*new_colnames))
except AttributeError:
msg = "Can't resolve one of column names: {}".format(*new_colnames)
raise query.QueryException(msg)
return ReadOnlyDjangoCassandraQuerySet(qset, model_class=self.model)
def exists(self):
return self.count() > 0
def get(self, *args, **kwargs):
obj = super(DjangoCassandraQuerySet, self).get(*args, **kwargs)
obj.pk = getattr(obj, obj._get_explicit_pk_column().name)
return obj
def order_by(self, *colnames):
if len(colnames) == 0:
clone = copy.deepcopy(self)
clone._order = []
return clone
order_using_python = False
conditions = []
for col in colnames:
try:
if hasattr(col, "resolve_expression"):
warnings.warn("Sorting by Django DB Expressions is not supported")
continue
conditions.append(
'"{0}" {1}'.format(*self._get_ordering_condition(col))
)
except query.QueryException as exc:
order_by_exception = "Can't order" in str(
exc
) or "Can't resolve the column name" in str(exc)
if order_by_exception:
order_using_python = settings.CASSANDRA_FALLBACK_ORDER_BY_PYTHON
if order_using_python:
log.debug('ordering in python column "%s"', col)
msg = ORDER_BY_WARN.format(col=col, exc=exc)
warnings.warn(msg)
else:
raise query.QueryException(
"{exc}\n\n"
"{help}".format(exc=exc, help=ORDER_BY_ERROR_HELP)
)
else:
raise exc
clone = copy.deepcopy(self)
if order_using_python is True:
return self.python_order_by(qset=list(clone), colnames=colnames)
else:
clone._order.extend(conditions)
return clone
def values_list(self, *fields, **kwargs):
if "pk" in fields:
fields = convert_pk_field_names_to_real(
model=self.model, field_names=fields
)
super_values_list = super(DjangoCassandraQuerySet, self).values_list
return super_values_list(*fields, **kwargs)
def _clone(self):
return copy.deepcopy(self)
def iterator(self, *args, **kwargs):
return super(query.ModelQuerySet, self).all()
class DjangoCassandraModel(BaseModel, metaclass=DjangoCassandraModelMetaClass):
__queryset__ = DjangoCassandraQuerySet
__abstract__ = True
__table_name__ = None
__table_name_case_sensitive__ = False
__keyspace__ = None
__options__ = None
__discriminator_value__ = None
__compute_routing_key__ = True
def __init__(self, *args, **kwargs):
super(DjangoCassandraModel, self).__init__(*args, **kwargs)
methods = inspect.getmembers(django_model_methods, inspect.isfunction)
for method_name, method in methods:
new_method = partial(method, self)
setattr(self, method_name, new_method)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
@classmethod
def get(cls, *args, **kwargs):
raise AttributeError("model has no attribute 'get'")
@classmethod
def filter(cls, *args, **kwargs):
raise AttributeError("model has no attribute 'filter'")
@classmethod
def all(cls, *args, **kwargs):
raise AttributeError("model has no attribute 'all'")
@classmethod
def _get_primary_key_columns(cls):
return tuple(c for c in cls._columns.values() if c.is_primary_key is True)
@classmethod
def _get_primary_key_column_names(cls):
return tuple(c.name for c in cls._get_primary_key_columns())
@classmethod
def _get_column(cls, name):
"""
Based on cqlengine.models.BaseModel._get_column.
But to work with 'pk'
"""
if name == "pk":
return cls._meta.get_field(cls._meta.pk.name)
return cls._columns[name]
@classmethod
def _get_explicit_pk_column(cls):
try:
if len(cls._primary_keys) > 1:
try:
pk_field = cls.Meta.get_pk_field
except AttributeError:
raise RuntimeError(PK_META_MISSING_HELP.format(cls))
return cls._primary_keys[pk_field]
else:
return list(cls._primary_keys.values())[0]
except IndexError:
return None
|
|
#!/usr/bin/env python
"""
Search and Store Tweets utility.
Search for tweets in the Twitter API for given search query, then store
the tweet and the tweet author data, adding new objects or updating existing
ones.
The input may be command-line text for easy ad hoc queries, or the name of
an existing Campaign in the db so that its search query string can be
looked up.
Add the default label as Campaign for processed Tweets to signal that the Tweet
was processed by this script. If looking up Tweets using the search query from
a Campaign specified in arguments, then also allocate the processed Tweets
to that Campaign.
Add the default label as Category for Profiles which are processed due to
processing their Tweets.
The persist value is set based on an answer here:
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
TODO: Consolidate use of writeToDB and persist in this repo.
"""
import argparse
import os
import sys
from sqlobject import SQLObjectNotFound
# Allow imports to be done when executing this file directly.
sys.path.insert(
0,
os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
),
)
import lib
import lib.text_handling
import lib.twitter_api.authentication
import lib.twitter_api.search
import lib.tweets
from lib import database as db
from lib.config import AppConf
from lib.db_query.tweets.campaigns import (
printAvailableCampaigns,
printCampaignsAndTweets,
)
from models import Campaign
conf = AppConf()
UTILITY_CATEGORY = UTILITY_CAMPAIGN = conf.get("Labels", "search")
# Create initial global API connection object, which needs to be set.
API_CONN = None
def search(query, pageCount=1, extended=True):
"""
Do a Search API query for one or more pages of tweets matching the query.
After every 100 or slightly fewer tweets, a new request will be done to
get the next page.
:param query: Query text to search on the Twitter API.
:param pageCount: Count pages of tweets to fetch. Each page contains 100
tweets, which is the Search API's limit.
:param extended: If True, get the expanded tweet message instead of the
truncated form.
:return: Iterable of tweets, across pages if necessary.
"""
pages = lib.twitter_api.search.fetchTweetsPaging(
API_CONN, searchQuery=query, pageCount=pageCount, extended=extended
)
for page in pages:
for fetchedTweet in page:
yield fetchedTweet
def storeTweets(fetchedTweets, persist=True):
"""
Search the Twitter Search API for tweets matching input search terms.
Tweets are created or updated and their authors are created or updated
as Profile records.
This function does not care about the pages, just individual tweets and
it logs when every 100 tweets are stored. This will roughly line up with
pages which can up to 100 tweets on them for Search API.
:param fetchedTweets: Iterable of tweets from the Twitter API.
:param persist. Default True. If set to False, does not store data
in the database and only prints to stdout.
:return profileRecs: List of local Profile records inserted or updated.
Defaults to empty list. Note, this will double count a profile which comes
up multiple times.
:return tweetRecs: List of local Tweet records inserted or updated.
Defaults to empty list.
"""
processedTweets = 0
profileRecs = []
tweetRecs = []
for fetchedTweet in fetchedTweets:
processedTweets += 1
if persist:
profileRec = lib.tweets.insertOrUpdateProfile(fetchedTweet.author)
profileRecs.append(profileRec)
data, tweetRec = lib.tweets.insertOrUpdateTweet(fetchedTweet, profileRec.id)
tweetRecs.append(tweetRec)
if processedTweets % 100 == 0:
print("Stored so far: {}".format(processedTweets))
else:
# Assume attribute which comes in extended mode, otherwise fall
# back to the standard mode one.
try:
text = fetchedTweet.full_text
except AttributeError:
text = fetchedTweet.text
print(
"{index:3d} @{screenName}: {message}".format(
index=processedTweets,
screenName=fetchedTweet.author.screen_name,
message=lib.text_handling.flattenText(text),
)
)
print("Stored at end of search: {}".format(processedTweets))
print()
return profileRecs, tweetRecs
def assignCategories(profileRecs):
if profileRecs:
try:
utilityCategoryRec = db.Category.byName(UTILITY_CATEGORY)
except SQLObjectNotFound:
utilityCategoryRec = db.Category(name=UTILITY_CATEGORY)
lib.tweets.bulkAssignProfileCategory(
categoryID=utilityCategoryRec.id,
profileIDs=(profile.id for profile in profileRecs),
)
def assignCustomCampaign(customCampaignRec, tweetRecs):
if customCampaignRec:
# Reset generator to first item, after using it above within
# the bulk assign function.
tweetIDs = (tweet.id for tweet in tweetRecs)
lib.tweets.bulkAssignTweetCampaign(
campaignID=customCampaignRec.id, tweetIDs=tweetIDs
)
def assignCampaigns(tweetRecs, utilityCampaignRec, customCampaignRec):
if tweetRecs:
# print "Assigning utility's campaign links... ",
lib.tweets.bulkAssignTweetCampaign(
campaignID=utilityCampaignRec.id, tweetIDs=(tweet.id for tweet in tweetRecs)
)
assignCustomCampaign(customCampaignRec, tweetRecs)
@lib.timeit
def searchStoreAndLabel(
query, pageCount, persist, utilityCampaignRec, customCampaignRec
):
"""
Fetch and store tweet data then assign labels.
:param str query: Twitter API search query.
:param int pageCount: Count of pages of tweets to fetch.
:param bool persist: If True, persist data, otherwise just print.
TODO Can this be moved to a variable on class, or global variable
or env variable so it doesn't have to get passed down to functions?
:param models.tweets.Campaign utilityCampaignRec:
:param models.tweets.Campaign customCampaignRec:
:return: Tuple of processed profile and tweet counts.
"""
fetchedTweets = search(query, pageCount)
# TODO Improve this - should values still be returned here. Should
# we break early on assigning with no persist - how do the other functions
# operate?
# Should the logic to print only be moved out to here (and a new function),
# then skip steps below.
# Also keep in mind that the verbose option has index for current tweet.
profileRecs, tweetRecs = storeTweets(fetchedTweets, persist)
profileCount = len(profileRecs)
tweetCount = len(tweetRecs)
if persist:
print("Profiles: {:,d}".format(profileCount))
print("Tweets: {:,d}".format(tweetCount))
assignCategories(profileRecs)
assignCampaigns(tweetRecs, utilityCampaignRec, customCampaignRec)
return profileCount, tweetCount
def run(maxPages, persist, campaignName=None, query=None):
"""
Get labels first before attempting to do searches and then find labels
are missing.
:param maxPages: Count.
:param persist: Flag.
:param campaignName: Custom campaign name to label tweets with.
:param query: Search query.
:return: Tuple of processed profile and tweet counts.
"""
global API_CONN
utilityCampaignRec = Campaign.getOrCreate(UTILITY_CAMPAIGN, None)
if query:
customCampaignRec = None
else:
customCampaignRec = Campaign.getOrRaise(campaignName)
query = customCampaignRec.searchQuery
assert query, (
"Use the Campaign Manager to set a search query"
" for the campaign: {0}".format(campaignName)
)
# Process the category and campaign records above before fetching
# data from the API.
print("Search query: {0}".format(query))
# Use app auth here for up to 480 search requests per window, rather
# than 180 when using the user auth.
API_CONN = lib.twitter_api.authentication.getAppOnlyConnection()
profileCount, tweetCount = searchStoreAndLabel(
query,
maxPages,
persist,
utilityCampaignRec,
customCampaignRec,
)
return profileCount, tweetCount
def main():
"""
Handle command-line arguments to search for tweets, store data for
Tweet and Profile objects and then assign labels.
"""
parser = argparse.ArgumentParser(
description="""\
Utility to search for tweets and then the store tweet and profile data locally.
Search with either an ad hoc query, or the name of a stored Campaign which
has a search query set. To create or edit a Campaign, use the Campaign Manager
utility.
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
view = parser.add_argument_group("View", "Print data to stdout")
view.add_argument(
"-a",
"--available",
action="store_true",
help="""Output available Campaigns in db, with Tweet counts and
search query for each.""",
)
view.add_argument(
"-t",
"--tweets",
action="store_true",
help="Output local Tweets grouped by Campaign.",
)
view.add_argument(
"-s",
"--search-help",
action="store_true",
help="""Print guide for writing search queries, with examples of
syntax safe for the command-line. See Twitter's search
documentation for full rules.""",
)
fetch = parser.add_argument_group(
"Fetch", "Select a search query to get Tweets from Twitter Search API."
)
fetch.add_argument(
"-c",
"--campaign",
help="""Name of existing campaign in the db. If supplied and the
Campaign record has a query string, fetch Tweets from the Twitter
Search API and store. Then assign the given custom Campaign name
to processed Tweets. This argument may not be used with the
--query argument.
""",
)
fetch.add_argument(
"-q",
"--query",
help="""Word or phrase to search on the Twitter API as an ad hoc
query which is not associated with a Campaign. This argument
may not be used with the --campaign argument.""",
)
fetch.add_argument(
"-p",
"--pages",
metavar="N",
type=int,
default=1,
help="Default 1. Max count of pages of tweets to get for the search "
" query, where each page contains up to 100 tweets.",
)
fetch.add_argument(
"--persist",
dest="persist",
action="store_true",
help="(DEFAULT) Store fetched tweets and profiles in the database.",
)
fetch.add_argument(
"--no-persist",
dest="persist",
action="store_false",
help="Print fetched tweet and profile data without storing.",
)
fetch.set_defaults(persist=True)
args = parser.parse_args()
if args.available:
printAvailableCampaigns()
return
if args.tweets:
printCampaignsAndTweets()
return
if args.search_help:
print(search.getSearchQueryHelp())
return
if not (args.query or args.campaign):
raise ValueError("Either query or campaign args must be set.")
run(args.pages, args.persist, args.campaign, args.query)
if __name__ == "__main__":
main()
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import weakref
from typing import Any
# External imports
from mock import patch
# Bokeh imports
from bokeh.core.enums import HoldPolicy
from bokeh.core.has_props import ModelDef, OverrideDef, PropertyDef
from bokeh.core.properties import (
Instance,
Int,
List,
Nullable,
Override,
)
from bokeh.core.property.vectorization import Field, Value
from bokeh.core.serialization import (
Deserializer,
MapRep,
ObjectRefRep,
Ref,
)
from bokeh.core.types import ID
from bokeh.document.events import (
ColumnsPatchedEvent,
ColumnsStreamedEvent,
ModelChangedEvent,
RootAddedEvent,
RootRemovedEvent,
SessionCallbackAdded,
SessionCallbackRemoved,
TitleChangedEvent,
)
from bokeh.document.json import ModelChanged, PatchJson
from bokeh.io.doc import curdoc
from bokeh.model import DataModel
from bokeh.models import ColumnDataSource
from bokeh.protocol.messages.patch_doc import patch_doc
from bokeh.server.contexts import BokehSessionContext
from bokeh.util.logconfig import basicConfig
from _util_document import (
AnotherModelInTestDocument,
ModelThatOverridesName,
ModelWithSpecInTestDocument,
SomeModelInTestDocument,
)
# Module under test
import bokeh.document.document as document # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
class SomeDataModel(DataModel):
prop0 = Int()
prop1 = Int(default=111)
prop2 = List(Int, default=[1, 2, 3])
class DerivedDataModel(SomeDataModel):
prop3 = Int()
prop4 = Int(default=112)
prop5 = List(Int, default=[1, 2, 3, 4])
prop6 = Instance(SomeDataModel)
prop7 = Nullable(Instance(SomeDataModel))
prop2 = Override(default=119)
class CDSDerivedDataModel(ColumnDataSource, DataModel):
prop0 = Int()
prop1 = Int(default=111)
prop2 = List(Int, default=[1, 2, 3])
data = Override(default={"default_column": [4, 5, 6]})
class CDSDerivedDerivedDataModel(CDSDerivedDataModel):
prop3 = Instance(SomeDataModel, default=SomeDataModel(prop0=-1))
data = Override(default={"default_column": [7, 8, 9]})
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class TestDocumentHold:
@pytest.mark.parametrize('policy', HoldPolicy)
@patch("bokeh.document.callbacks.DocumentCallbackManager.hold")
def test_hold(self, mock_hold, policy) -> None:
d = document.Document()
d.hold(policy)
assert mock_hold.called
assert mock_hold.call_args[0] == (policy,)
assert mock_hold.call_args[1] == {}
@patch("bokeh.document.callbacks.DocumentCallbackManager.unhold")
def test_unhold(self, mock_unhold) -> None:
d = document.Document()
d.unhold()
assert mock_unhold.called
assert mock_unhold.call_args[0] == ()
assert mock_unhold.call_args[1] == {}
class TestDocument:
def test_basic(self) -> None:
d = document.Document()
assert not d.roots
assert d.template_variables == {}
assert d.session_context is None
def test_session_context(self) -> None:
d = document.Document()
assert d.session_context is None
sc = BokehSessionContext(None, None, d)
d._session_context = weakref.ref(sc)
assert d.session_context is sc
def test_add_roots(self) -> None:
d = document.Document()
assert not d.roots
d.add_root(AnotherModelInTestDocument())
assert len(d.roots) == 1
assert next(iter(d.roots)).document == d
def test_roots_preserves_insertion_order(self) -> None:
d = document.Document()
assert not d.roots
roots = [
AnotherModelInTestDocument(),
AnotherModelInTestDocument(),
AnotherModelInTestDocument(),
]
for r in roots:
d.add_root(r)
assert len(d.roots) == 3
assert type(d.roots) is list
roots_iter = iter(d.roots)
assert next(roots_iter) is roots[0]
assert next(roots_iter) is roots[1]
assert next(roots_iter) is roots[2]
def test_title(self) -> None:
d = document.Document()
assert d.title == document.DEFAULT_TITLE
d.title = "Foo"
assert d.title == "Foo"
def test_all_models(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
m = SomeModelInTestDocument()
m2 = AnotherModelInTestDocument()
m.child = m2
d.add_root(m)
assert len(d.roots) == 1
assert len(d.models) == 2
m.child = None
assert len(d.models) == 1
m.child = m2
assert len(d.models) == 2
d.remove_root(m)
assert len(d.models) == 0
def test_get_model_by_id(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
m = SomeModelInTestDocument()
m2 = AnotherModelInTestDocument()
m.child = m2
d.add_root(m)
assert len(d.roots) == 1
assert len(d.models) == 2
assert d.get_model_by_id(m.id) == m
assert d.get_model_by_id(m2.id) == m2
assert d.get_model_by_id("not a valid ID") is None
def test_get_model_by_name(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
m = SomeModelInTestDocument(name="foo")
m2 = AnotherModelInTestDocument(name="bar")
m.child = m2
d.add_root(m)
assert len(d.roots) == 1
assert len(d.models) == 2
assert d.get_model_by_name(m.name) == m
assert d.get_model_by_name(m2.name) == m2
assert d.get_model_by_name("not a valid name") is None
def test_get_model_by_changed_name(self) -> None:
d = document.Document()
m = SomeModelInTestDocument(name="foo")
d.add_root(m)
assert d.get_model_by_name("foo") == m
m.name = "bar"
assert d.get_model_by_name("foo") == None
assert d.get_model_by_name("bar") == m
def test_get_model_by_changed_from_none_name(self) -> None:
d = document.Document()
m = SomeModelInTestDocument(name=None)
d.add_root(m)
assert d.get_model_by_name("bar") == None
m.name = "bar"
assert d.get_model_by_name("bar") == m
def test_get_model_by_changed_to_none_name(self) -> None:
d = document.Document()
m = SomeModelInTestDocument(name="bar")
d.add_root(m)
assert d.get_model_by_name("bar") == m
m.name = None
assert d.get_model_by_name("bar") == None
def test_can_get_name_overriding_model_by_name(self) -> None:
d = document.Document()
m = ModelThatOverridesName(name="foo")
d.add_root(m)
assert d.get_model_by_name("foo") == m
m.name = "bar"
assert d.get_model_by_name("bar") == m
def test_cannot_get_model_with_duplicate_name(self) -> None:
d = document.Document()
m = SomeModelInTestDocument(name="foo")
m2 = SomeModelInTestDocument(name="foo")
d.add_root(m)
d.add_root(m2)
got_error = False
try:
d.get_model_by_name("foo")
except ValueError as e:
got_error = True
assert 'Found more than one' in repr(e)
assert got_error
d.remove_root(m)
assert d.get_model_by_name("foo") == m2
def test_select(self) -> None:
# we aren't trying to replace test_query here, only test
# our wrappers around it, so no need to try every kind of
# query
d = document.Document()
root1 = SomeModelInTestDocument(foo=42, name='a')
child1 = SomeModelInTestDocument(foo=43, name='b')
root2 = SomeModelInTestDocument(foo=44, name='c')
root3 = SomeModelInTestDocument(foo=44, name='d')
child3 = SomeModelInTestDocument(foo=45, name='c')
root4 = AnotherModelInTestDocument(bar=20, name='A')
root1.child = child1
root3.child = child3
d.add_root(root1)
d.add_root(root2)
d.add_root(root3)
d.add_root(root4)
# select()
assert {root1} == set(d.select(dict(foo=42)))
assert {root1} == set(d.select(dict(name="a")))
assert {root2, child3} == set(d.select(dict(name="c")))
assert set() == set(d.select(dict(name="nope")))
# select() on object
assert set() == set(root3.select(dict(name="a")))
assert {child3} == set(root3.select(dict(name="c")))
# select_one()
assert root3 == d.select_one(dict(name='d'))
assert None == d.select_one(dict(name='nope'))
got_error = False
try:
d.select_one(dict(name='c'))
except ValueError as e:
got_error = True
assert 'Found more than one' in repr(e)
assert got_error
# select_one() on object
assert None == root3.select_one(dict(name='a'))
assert child3 == root3.select_one(dict(name='c'))
# set_select()
d.set_select(dict(foo=44), dict(name="c"))
assert {root2, child3, root3} == set(d.select(dict(name="c")))
# set_select() on object
root3.set_select(dict(name='c'), dict(foo=57))
assert {child3, root3} == set(d.select(dict(foo=57)))
assert {child3, root3} == set(root3.select(dict(foo=57)))
# set_select() on class
d.set_select(SomeModelInTestDocument, dict(name='new_name'))
assert len(d.select(dict(name='new_name'))) == 5
# set_select() on different class
assert len(d.select(dict(name="A"))) == 1
d.set_select(AnotherModelInTestDocument, dict(name="B"))
assert {root4} == set(d.select(dict(name="B")))
def test_all_models_with_multiple_references(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = SomeModelInTestDocument()
root2 = SomeModelInTestDocument()
child1 = AnotherModelInTestDocument()
root1.child = child1
root2.child = child1
d.add_root(root1)
d.add_root(root2)
assert len(d.roots) == 2
assert len(d.models) == 3
root1.child = None
assert len(d.models) == 3
root2.child = None
assert len(d.models) == 2
root1.child = child1
assert len(d.models) == 3
root2.child = child1
assert len(d.models) == 3
d.remove_root(root1)
assert len(d.models) == 2
d.remove_root(root2)
assert len(d.models) == 0
def test_all_models_with_cycles(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = SomeModelInTestDocument()
root2 = SomeModelInTestDocument()
child1 = SomeModelInTestDocument()
root1.child = child1
root2.child = child1
child1.child = root1
print("adding root1")
d.add_root(root1)
print("adding root2")
d.add_root(root2)
assert len(d.roots) == 2
assert len(d.models) == 3
print("clearing child of root1")
root1.child = None
assert len(d.models) == 3
print("clearing child of root2")
root2.child = None
assert len(d.models) == 2
print("putting child1 back in root1")
root1.child = child1
assert len(d.models) == 3
print("Removing root1")
d.remove_root(root1)
assert len(d.models) == 1
print("Removing root2")
d.remove_root(root2)
assert len(d.models) == 0
def test_change_notification(self) -> None:
d = document.Document()
assert not d.roots
m = AnotherModelInTestDocument()
d.add_root(m)
assert len(d.roots) == 1
assert m.bar == 1
assert curdoc() is not d
events = []
curdoc_from_listener = []
def listener(event):
curdoc_from_listener.append(curdoc())
events.append(event)
d.on_change(listener)
m.bar = 42
assert events
event = events[0]
assert isinstance(event, ModelChangedEvent)
assert event.document == d
assert event.model == m
assert event.attr == 'bar'
assert event.new == 42
assert len(curdoc_from_listener) == 1
assert curdoc_from_listener[0] is d
def test_stream_notification(self) -> None:
d = document.Document()
assert not d.roots
m = ColumnDataSource(data=dict(a=[10], b=[20]))
d.add_root(m)
assert len(d.roots) == 1
assert curdoc() is not d
events = []
curdoc_from_listener = []
def listener(event):
curdoc_from_listener.append(curdoc())
events.append(event)
d.on_change(listener)
m.stream(dict(a=[11, 12], b=[21, 22]), 200)
assert events
event = events[0]
assert isinstance(event, ColumnsStreamedEvent)
assert event.document == d
assert event.model == m
assert event.attr == "data"
assert event.data == dict(a=[11, 12], b=[21, 22])
assert event.rollover == 200
assert len(curdoc_from_listener) == 1
assert curdoc_from_listener[0] is d
def test_patch_notification(self) -> None:
d = document.Document()
assert not d.roots
m = ColumnDataSource(data=dict(a=[10,11], b=[20,21]))
d.add_root(m)
assert len(d.roots) == 1
assert curdoc() is not d
events = []
curdoc_from_listener = []
def listener(event):
curdoc_from_listener.append(curdoc())
events.append(event)
d.on_change(listener)
m.patch(dict(a=[(0, 1)], b=[(0,0), (1,1)]))
assert events
event = events[0]
assert isinstance(event, ColumnsPatchedEvent)
assert event.document == d
assert event.model == m
assert event.attr == "data"
assert event.patches == dict(a=[(0, 1)], b=[(0,0), (1,1)])
assert len(curdoc_from_listener) == 1
assert curdoc_from_listener[0] is d
def test_change_notification_removal(self) -> None:
d = document.Document()
assert not d.roots
m = AnotherModelInTestDocument()
d.add_root(m)
assert len(d.roots) == 1
assert m.bar == 1
events = []
def listener(event):
events.append(event)
d.on_change(listener)
m.bar = 42
assert len(events) == 1
assert events[0].new == 42
d.remove_on_change(listener)
m.bar = 43
assert len(events) == 1
def test_notification_of_roots(self) -> None:
d = document.Document()
assert not d.roots
events = []
def listener(event):
events.append(event)
d.on_change(listener)
m = AnotherModelInTestDocument(bar=1)
d.add_root(m)
assert len(d.roots) == 1
assert len(events) == 1
assert isinstance(events[0], RootAddedEvent)
assert events[0].model == m
m2 = AnotherModelInTestDocument(bar=2)
d.add_root(m2)
assert len(d.roots) == 2
assert len(events) == 2
assert isinstance(events[1], RootAddedEvent)
assert events[1].model == m2
d.remove_root(m)
assert len(d.roots) == 1
assert len(events) == 3
assert isinstance(events[2], RootRemovedEvent)
assert events[2].model == m
d.remove_root(m2)
assert len(d.roots) == 0
assert len(events) == 4
assert isinstance(events[3], RootRemovedEvent)
assert events[3].model == m2
def test_notification_of_title(self) -> None:
d = document.Document()
assert not d.roots
assert d.title == document.DEFAULT_TITLE
events = []
def listener(event):
events.append(event)
d.on_change(listener)
d.title = "Foo"
assert d.title == "Foo"
assert len(events) == 1
assert isinstance(events[0], TitleChangedEvent)
assert events[0].document is d
assert events[0].title == "Foo"
def test_add_remove_periodic_callback(self) -> None:
d = document.Document()
events = []
def listener(event):
events.append(event)
d.on_change(listener)
assert len(d.session_callbacks) == 0
assert not events
def cb(): pass
callback_obj = d.add_periodic_callback(cb, 1)
assert len(d.session_callbacks) == len(events) == 1
assert isinstance(events[0], SessionCallbackAdded)
assert callback_obj == d.session_callbacks[0] == events[0].callback
assert callback_obj.period == 1
d.remove_periodic_callback(callback_obj)
assert len(d.session_callbacks) == 0
assert len(events) == 2
assert isinstance(events[0], SessionCallbackAdded)
assert isinstance(events[1], SessionCallbackRemoved)
def test_add_remove_timeout_callback(self) -> None:
d = document.Document()
events = []
def listener(event):
events.append(event)
d.on_change(listener)
assert len(d.session_callbacks) == 0
assert not events
def cb(): pass
callback_obj = d.add_timeout_callback(cb, 1)
assert len(d.session_callbacks) == len(events) == 1
assert isinstance(events[0], SessionCallbackAdded)
assert callback_obj == d.session_callbacks[0] == events[0].callback
assert callback_obj.timeout == 1
d.remove_timeout_callback(callback_obj)
assert len(d.session_callbacks) == 0
assert len(events) == 2
assert isinstance(events[0], SessionCallbackAdded)
assert isinstance(events[1], SessionCallbackRemoved)
def test_add_partial_callback(self) -> None:
from functools import partial
d = document.Document()
events = []
def listener(event):
events.append(event)
d.on_change(listener)
assert len(d.session_callbacks) == 0
assert not events
def _cb(): pass
cb = partial(_cb)
callback_obj = d.add_timeout_callback(cb, 1)
assert len(d.session_callbacks) == len(events) == 1
assert isinstance(events[0], SessionCallbackAdded)
assert callback_obj == d.session_callbacks[0] == events[0].callback
assert callback_obj.timeout == 1
def test_add_remove_next_tick_callback(self) -> None:
d = document.Document()
events = []
def listener(event):
events.append(event)
d.on_change(listener)
assert len(d.session_callbacks) == 0
assert not events
def cb(): pass
callback_obj = d.add_next_tick_callback(cb)
assert len(d.session_callbacks) == len(events) == 1
assert isinstance(events[0], SessionCallbackAdded)
assert callback_obj == d.session_callbacks[0] == events[0].callback
d.remove_next_tick_callback(callback_obj)
assert len(d.session_callbacks) == 0
assert len(events) == 2
assert isinstance(events[0], SessionCallbackAdded)
assert isinstance(events[1], SessionCallbackRemoved)
def test_periodic_callback_gets_curdoc(self) -> None:
d = document.Document()
assert curdoc() is not d
curdoc_from_cb = []
def cb():
curdoc_from_cb.append(curdoc())
callback_obj = d.add_periodic_callback(cb, 1)
callback_obj.callback()
assert len(curdoc_from_cb) == 1
assert curdoc_from_cb[0] is d
def test_timeout_callback_gets_curdoc(self) -> None:
d = document.Document()
assert curdoc() is not d
curdoc_from_cb = []
def cb():
curdoc_from_cb.append(curdoc())
callback_obj = d.add_timeout_callback(cb, 1)
callback_obj.callback()
assert len(curdoc_from_cb) == 1
assert curdoc_from_cb[0] is d
def test_next_tick_callback_gets_curdoc(self) -> None:
d = document.Document()
assert curdoc() is not d
curdoc_from_cb = []
def cb():
curdoc_from_cb.append(curdoc())
callback_obj = d.add_next_tick_callback(cb)
callback_obj.callback()
assert len(curdoc_from_cb) == 1
assert curdoc_from_cb[0] is d
def test_model_callback_gets_curdoc(self) -> None:
d = document.Document()
m = AnotherModelInTestDocument(bar=42)
d.add_root(m)
assert curdoc() is not d
curdoc_from_cb = []
def cb(attr, old, new):
curdoc_from_cb.append(curdoc())
m.on_change('bar', cb)
m.bar = 43
assert len(curdoc_from_cb) == 1
assert curdoc_from_cb[0] is d
def test_clear(self) -> None:
d = document.Document()
assert not d.roots
assert d.title == document.DEFAULT_TITLE
d.add_root(AnotherModelInTestDocument())
d.add_root(AnotherModelInTestDocument())
d.title = "Foo"
assert len(d.roots) == 2
assert d.title == "Foo"
d.clear()
assert not d.roots
assert len(d.models) == 0
assert d.title == "Foo" # do not reset title
def test_serialization_one_model(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = SomeModelInTestDocument()
d.add_root(root1)
d.title = "Foo"
json = d.to_json()
copy = document.Document.from_json(json)
assert len(copy.roots) == 1
assert copy.title == "Foo"
def test_serialization_more_models(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = SomeModelInTestDocument(foo=42)
root2 = SomeModelInTestDocument(foo=43)
child1 = SomeModelInTestDocument(foo=44)
root1.child = child1
root2.child = child1
d.add_root(root1)
d.add_root(root2)
assert len(d.roots) == 2
json = d.to_json()
copy = document.Document.from_json(json)
assert len(copy.roots) == 2
foos = []
for r in copy.roots:
foos.append(r.foo)
foos.sort()
assert [42,43] == foos
some_root = next(iter(copy.roots))
assert some_root.child.foo == 44
def test_serialization_data_models(self) -> None:
#obj0 = SomeDataModel()
#obj1 = DerivedDataModel(prop6=obj0)
#obj2 = CDSDerivedDataModel()
#obj3 = CDSDerivedDerivedDataModel()
doc = document.Document()
#doc.add_root(obj0)
#doc.add_root(obj1)
#doc.add_root(obj2)
#doc.add_root(obj3)
json = doc.to_json()
assert json["defs"] == [
ModelDef(
type="model",
name="test_document.SomeDataModel",
properties=[
PropertyDef(name="prop0", kind="Any", default=0),
PropertyDef(name="prop1", kind="Any", default=111),
PropertyDef(name="prop2", kind="Any", default=[1, 2, 3]),
],
),
ModelDef(
type="model",
name="test_document.DerivedDataModel",
extends=Ref(id=ID("test_document.SomeDataModel")),
properties=[
PropertyDef(name="prop3", kind="Any", default=0),
PropertyDef(name="prop4", kind="Any", default=112),
PropertyDef(name="prop5", kind="Any", default=[1, 2, 3, 4]),
PropertyDef(name="prop6", kind="Any"),
PropertyDef(name="prop7", kind="Any", default=None),
],
overrides=[
OverrideDef(name="prop2", default=119),
],
),
ModelDef(
type="model",
name="test_document.CDSDerivedDataModel",
extends=Ref(id=ID("ColumnDataSource")),
properties=[
PropertyDef(name="prop0", kind="Any", default=0),
PropertyDef(name="prop1", kind="Any", default=111),
PropertyDef(name="prop2", kind="Any", default=[1, 2, 3]),
],
overrides=[
OverrideDef(name="data", default=MapRep(type="map", entries=[("default_column", [4, 5, 6])])),
],
),
ModelDef(
type="model",
name="test_document.CDSDerivedDerivedDataModel",
extends=Ref(id=ID("test_document.CDSDerivedDataModel")),
properties=[
PropertyDef(
name="prop3",
kind="Any",
default=ObjectRefRep(
type="object",
name="test_document.SomeDataModel",
id=CDSDerivedDerivedDataModel.prop3.property._default.ref["id"],
attributes=dict(prop0=-1),
),
),
],
overrides=[
OverrideDef(name="data", default=MapRep(type="map", entries=[("default_column", [7, 8, 9])])),
],
),
]
# TODO: assert json["roots"]["references"] == ...
def test_serialization_has_version(self) -> None:
from bokeh import __version__
d = document.Document()
json = d.to_json()
assert json['version'] == __version__
def test_patch_integer_property(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = SomeModelInTestDocument(foo=42)
root2 = SomeModelInTestDocument(foo=43)
child1 = SomeModelInTestDocument(foo=44)
root1.child = child1
root2.child = child1
d.add_root(root1)
d.add_root(root2)
assert len(d.roots) == 2
event1 = ModelChangedEvent(d, root1, 'foo', 57)
patch1 = patch_doc.create([event1]).content
d.apply_json_patch(patch1)
assert root1.foo == 57
event2 = ModelChangedEvent(d, child1, 'foo', 67)
patch2 = patch_doc.create([event2]).content
d.apply_json_patch(patch2)
assert child1.foo == 67
def test_patch_spec_property(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = ModelWithSpecInTestDocument(foo=42)
d.add_root(root1)
assert len(d.roots) == 1
def patch_test(new_value: Any):
event1 = ModelChangedEvent(d, root1, 'foo', new_value)
patch1 = patch_doc.create([event1]).content
d.apply_json_patch(patch1)
if isinstance(new_value, dict):
return root1.lookup('foo').get_value(root1)
else:
return root1.foo
assert patch_test(57) == 57
assert 'data' == root1.foo_units
assert patch_test(dict(value=58)) == Value(58)
assert 'data' == root1.foo_units
assert patch_test(dict(value=58, units='screen')) == Value(58, units='screen')
assert 'screen' == root1.foo_units
assert patch_test(dict(value=59, units='screen')) == Value(59, units='screen')
assert 'screen' == root1.foo_units
assert patch_test(dict(value=59, units='data')) == Value(59)
assert 'data' == root1.foo_units
assert patch_test(dict(value=60, units='data')) == Value(60)
assert 'data' == root1.foo_units
assert patch_test(dict(value=60, units='data')) == Value(60)
assert 'data' == root1.foo_units
assert patch_test(61) == 61
assert 'data' == root1.foo_units
root1.foo = "a_string" # so "woot" gets set as a string
assert patch_test("woot") == "woot"
assert 'data' == root1.foo_units
assert patch_test(dict(field="woot2")) == Field("woot2")
assert 'data' == root1.foo_units
assert patch_test(dict(field="woot2", units='screen')) == Field("woot2", units='screen')
assert 'screen' == root1.foo_units
assert patch_test(dict(field="woot3")) == Field("woot3", units="screen")
assert 'screen' == root1.foo_units
assert patch_test(dict(value=70)) == Value(70, units="screen")
assert 'screen' == root1.foo_units
root1.foo = 123 # so 71 gets set as a number
assert patch_test(71) == 71
assert 'screen' == root1.foo_units
def test_patch_reference_property(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = SomeModelInTestDocument(foo=42)
root2 = SomeModelInTestDocument(foo=43)
child1 = SomeModelInTestDocument(foo=44)
child2 = SomeModelInTestDocument(foo=45)
child3 = SomeModelInTestDocument(foo=46, child=child2)
root1.child = child1
root2.child = child1
d.add_root(root1)
d.add_root(root2)
assert len(d.roots) == 2
assert child1.id in d.models
assert child2.id not in d.models
assert child3.id not in d.models
event1 = ModelChangedEvent(d, root1, 'child', child3)
patch1 = patch_doc.create([event1]).content
d.apply_json_patch(patch1)
assert root1.child.id == child3.id
assert root1.child.child.id == child2.id
assert child1.id in d.models
assert child2.id in d.models
assert child3.id in d.models
# put it back how it was before
event2 = ModelChangedEvent(d, root1, 'child', child1)
patch2 = patch_doc.create([event2]).content
d.apply_json_patch(patch2)
assert root1.child.id == child1.id
assert root1.child.child is None
assert child1.id in d.models
assert child2.id not in d.models
assert child3.id not in d.models
def test_patch_two_properties_at_once(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = SomeModelInTestDocument(foo=42)
child1 = SomeModelInTestDocument(foo=43)
root1.child = child1
d.add_root(root1)
assert len(d.roots) == 1
assert root1.child == child1
assert root1.foo == 42
assert root1.child.foo == 43
child2 = SomeModelInTestDocument(foo=44)
event1 = ModelChangedEvent(d, root1, 'foo', 57)
event2 = ModelChangedEvent(d, root1, 'child', child2)
patch1 = patch_doc.create([event1, event2]).content
d.apply_json_patch(patch1)
assert root1.foo == 57
assert root1.child.foo == 44
def test_patch_a_reference_with_implicit_reference_set(self) -> None:
m0 = SomeModelInTestDocument(foo=0, child=None)
m1 = SomeModelInTestDocument(foo=1, child=m0)
m2 = SomeModelInTestDocument(foo=2, child=m1)
m3 = SomeModelInTestDocument(foo=3, child=m2)
doc = document.Document()
doc.add_root(m3)
patch = PatchJson(
events=[
ModelChanged(
kind="ModelChanged",
model=m2.ref,
attr="child",
new=m0.ref,
),
],
references=[], # known models are not included by bokehjs to improve performance (e.g. reduce payload size)
)
assert m2.child == m1
doc.apply_json_patch(patch)
assert m2.child == m0
# a more realistic set of models instead of fake models
def test_scatter(self) -> None:
import numpy as np
from bokeh.io.doc import set_curdoc
from bokeh.plotting import figure
d = document.Document()
set_curdoc(d)
assert not d.roots
assert len(d.models) == 0
p1 = figure(tools=[])
N = 10
x = np.linspace(0, 4 * np.pi, N)
y = np.sin(x)
p1.scatter(x, y, color="#FF00FF", nonselection_fill_color="#FFFF00", nonselection_fill_alpha=1)
# figure does not automatically add itself to the document
d.add_root(p1)
assert len(d.roots) == 1
def test_event_handles_new_callbacks_in_event_callback(self) -> None:
from bokeh.models import Button
d = document.Document()
button1 = Button(label="1")
button2 = Button(label="2")
def clicked_1():
button2.on_event('button_click', clicked_2)
d.add_root(button2)
def clicked_2():
pass
button1.on_event('button_click', clicked_1)
d.add_root(button1)
decoder = Deserializer(references=[button1])
event = decoder.decode(dict(
type="event",
name="button_click",
values=dict(model=dict(id=button1.id)),
))
try:
d.callbacks.trigger_event(event)
except RuntimeError:
pytest.fail("trigger_event probably did not copy models before modifying")
# TODO test serialize/deserialize with list-and-dict-valued properties
# TODO test replace_with_json
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# needed for caplog tests to function
basicConfig()
|
|
# Standard library imports.
import sys
# System library imports.
from pyface.qt import QtCore, QtGui
# Enthought library imports.
from traits.api import DelegatesTo, Instance, on_trait_change, provides
# Local imports.
from pyface.tasks.i_advanced_editor_area_pane import IAdvancedEditorAreaPane
from pyface.tasks.i_editor_area_pane import MEditorAreaPane
from editor_area_pane import EditorAreaDropFilter
from main_window_layout import MainWindowLayout, PaneItem
from task_pane import TaskPane
from util import set_focus
###############################################################################
# 'AdvancedEditorAreaPane' class.
###############################################################################
@provides(IAdvancedEditorAreaPane)
class AdvancedEditorAreaPane(TaskPane, MEditorAreaPane):
""" The toolkit-specific implementation of an AdvancedEditorAreaPane.
See the IAdvancedEditorAreaPane interface for API documentation.
"""
#### Private interface ####################################################
_main_window_layout = Instance(MainWindowLayout)
###########################################################################
# 'TaskPane' interface.
###########################################################################
def create(self, parent):
""" Create and set the toolkit-specific control that represents the
pane.
"""
self.control = control = EditorAreaWidget(self, parent)
self._filter = EditorAreaDropFilter(self)
self.control.installEventFilter(self._filter)
# Add shortcuts for scrolling through tabs.
if sys.platform == 'darwin':
next_seq = 'Ctrl+}'
prev_seq = 'Ctrl+{'
else:
next_seq = 'Ctrl+PgDown'
prev_seq = 'Ctrl+PgUp'
shortcut = QtGui.QShortcut(QtGui.QKeySequence(next_seq), self.control)
shortcut.activated.connect(self._next_tab)
shortcut = QtGui.QShortcut(QtGui.QKeySequence(prev_seq), self.control)
shortcut.activated.connect(self._previous_tab)
# Add shortcuts for switching to a specific tab.
mod = 'Ctrl+' if sys.platform == 'darwin' else 'Alt+'
mapper = QtCore.QSignalMapper(self.control)
mapper.mapped.connect(self._activate_tab)
for i in xrange(1, 10):
sequence = QtGui.QKeySequence(mod + str(i))
shortcut = QtGui.QShortcut(sequence, self.control)
shortcut.activated.connect(mapper.map)
mapper.setMapping(shortcut, i - 1)
def destroy(self):
""" Destroy the toolkit-specific control that represents the pane.
"""
self.control.removeEventFilter(self._filter)
self._filter = None
for editor in self.editors:
editor_widget = editor.control.parent()
self.control.destroy_editor_widget(editor_widget)
editor.editor_area = None
super(AdvancedEditorAreaPane, self).destroy()
###########################################################################
# 'IEditorAreaPane' interface.
###########################################################################
def activate_editor(self, editor):
""" Activates the specified editor in the pane.
"""
editor_widget = editor.control.parent()
editor_widget.setVisible(True)
editor_widget.raise_()
editor.control.setFocus()
self.active_editor = editor
def add_editor(self, editor):
""" Adds an editor to the pane.
"""
editor.editor_area = self
editor_widget = EditorWidget(editor, self.control)
self.control.add_editor_widget(editor_widget)
self.editors.append(editor)
def remove_editor(self, editor):
""" Removes an editor from the pane.
"""
editor_widget = editor.control.parent()
self.editors.remove(editor)
self.control.remove_editor_widget(editor_widget)
editor.editor_area = None
if not self.editors:
self.active_editor = None
###########################################################################
# 'IAdvancedEditorAreaPane' interface.
###########################################################################
def get_layout(self):
""" Returns a LayoutItem that reflects the current state of the editors.
"""
return self._main_window_layout.get_layout_for_area(
QtCore.Qt.LeftDockWidgetArea)
def set_layout(self, layout):
""" Applies a LayoutItem to the editors in the pane.
"""
if layout is not None:
self._main_window_layout.set_layout_for_area(
layout, QtCore.Qt.LeftDockWidgetArea)
###########################################################################
# Private interface.
###########################################################################
def _activate_tab(self, index):
""" Activates the tab with the specified index, if there is one.
"""
widgets = self.control.get_dock_widgets_ordered()
if index < len(widgets):
self.activate_editor(widgets[index].editor)
def _next_tab(self):
""" Activate the tab after the currently active tab.
"""
if self.active_editor:
widgets = self.control.get_dock_widgets_ordered()
index = widgets.index(self.active_editor.control.parent()) + 1
if index < len(widgets):
self.activate_editor(widgets[index].editor)
def _previous_tab(self):
""" Activate the tab before the currently active tab.
"""
if self.active_editor:
widgets = self.control.get_dock_widgets_ordered()
index = widgets.index(self.active_editor.control.parent()) - 1
if index >= 0:
self.activate_editor(widgets[index].editor)
def _get_label(self, editor):
""" Return a tab label for an editor.
"""
label = editor.name
if editor.dirty:
label = '*' + label
return label
#### Trait initializers ###################################################
def __main_window_layout_default(self):
return EditorAreaMainWindowLayout(editor_area=self)
#### Trait change handlers ################################################
@on_trait_change('editors:[dirty, name]')
def _update_label(self, editor, name, new):
editor.control.parent().update_title()
@on_trait_change('editors:tooltip')
def _update_tooltip(self, editor, name, new):
editor.control.parent().update_tooltip()
###############################################################################
# Auxillary classes.
###############################################################################
class EditorAreaMainWindowLayout(MainWindowLayout):
""" A MainWindowLayout for implementing AdvancedEditorAreaPane.
Used for getting and setting layouts for the pane.
"""
#### 'MainWindowLayout' interface #########################################
control = DelegatesTo('editor_area')
#### 'TaskWindowLayout' interface #########################################
editor_area = Instance(AdvancedEditorAreaPane)
###########################################################################
# 'MainWindowLayout' abstract interface.
###########################################################################
def _get_dock_widget(self, pane):
""" Returns the QDockWidget associated with a PaneItem.
"""
try:
editor = self.editor_area.editors[pane.id]
return editor.control.parent()
except IndexError:
return None
def _get_pane(self, dock_widget):
""" Returns a PaneItem for a QDockWidget.
"""
for i, editor in enumerate(self.editor_area.editors):
if editor.control == dock_widget.widget():
return PaneItem(id=i)
return None
class EditorAreaWidget(QtGui.QMainWindow):
""" An auxillary widget for implementing AdvancedEditorAreaPane.
"""
###########################################################################
# 'EditorAreaWidget' interface.
###########################################################################
def __init__(self, editor_area, parent=None):
super(EditorAreaWidget, self).__init__(parent)
self.editor_area = editor_area
self.reset_drag()
# Fish out the rubber band used by Qt to indicate a drop region. We use
# it to determine which dock widget is the hover widget.
for child in self.children():
if isinstance(child, QtGui.QRubberBand):
child.installEventFilter(self)
self._rubber_band = child
break
# Monitor focus changes so we can set the active editor.
QtGui.QApplication.instance().focusChanged.connect(self._focus_changed)
# Configure the QMainWindow.
# FIXME: Currently animation is not supported.
self.setAcceptDrops(True)
self.setAnimated(False)
self.setDockNestingEnabled(True)
self.setDocumentMode(True)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setTabPosition(QtCore.Qt.AllDockWidgetAreas,
QtGui.QTabWidget.North)
def add_editor_widget(self, editor_widget):
""" Adds a dock widget to the editor area.
"""
editor_widget.installEventFilter(self)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, editor_widget)
# Try to place the editor in a sensible spot.
top_left = None
for widget in self.get_dock_widgets():
if top_left is None or (widget.pos().manhattanLength() <
top_left.pos().manhattanLength()):
top_left = widget
if top_left:
self.tabifyDockWidget(top_left, editor_widget)
top_left.set_title_bar(False)
# Qt will not give the dock widget focus by default.
self.editor_area.activate_editor(editor_widget.editor)
def destroy_editor_widget(self, editor_widget):
""" Destroys a dock widget in the editor area.
"""
editor_widget.hide()
editor_widget.removeEventFilter(self)
editor_widget.editor.destroy()
self.removeDockWidget(editor_widget)
def get_dock_widgets(self):
""" Gets all visible dock widgets.
"""
return [ child for child in self.children()
if isinstance(child, QtGui.QDockWidget) and child.isVisible() ]
def get_dock_widgets_for_bar(self, tab_bar):
""" Get the dock widgets, in order, attached to given tab bar.
Because QMainWindow locks this info down, we have resorted to a hack.
"""
pos = tab_bar.pos()
key = lambda w: QtGui.QVector2D(pos - w.pos()).lengthSquared()
all_widgets = self.get_dock_widgets()
if all_widgets:
current = min(all_widgets, key=key)
widgets = self.tabifiedDockWidgets(current)
widgets.insert(tab_bar.currentIndex(), current)
return widgets
return []
def get_dock_widgets_ordered(self, visible_only=False):
""" Gets all dock widgets in left-to-right, top-to-bottom order.
"""
def compare(one, two):
y = cmp(one.pos().y(), two.pos().y())
return cmp(one.pos().x(), two.pos().x()) if y == 0 else y
children = []
for child in self.children():
if (child.isWidgetType() and child.isVisible() and
((isinstance(child, QtGui.QTabBar) and not visible_only) or
(isinstance(child, QtGui.QDockWidget) and
(visible_only or not self.tabifiedDockWidgets(child))))):
children.append(child)
children.sort(cmp=compare)
widgets = []
for child in children:
if isinstance(child, QtGui.QTabBar):
widgets.extend(self.get_dock_widgets_for_bar(child))
else:
widgets.append(child)
return widgets
def remove_editor_widget(self, editor_widget):
""" Removes a dock widget from the editor area.
"""
# Get the tabs in this editor's dock area before removing it.
tabified = self.tabifiedDockWidgets(editor_widget)
if tabified:
widgets = self.get_dock_widgets_ordered()
tabified = [widget for widget in widgets \
if widget in tabified or widget == editor_widget]
visible = self.get_dock_widgets_ordered(visible_only=True)
# Destroy and remove the editor. Get the active widget first, since it
# may be destroyed!
next_widget = self.editor_area.active_editor.control.parent()
self.destroy_editor_widget(editor_widget)
# Ensure that the appropriate editor is activated.
editor_area = self.editor_area
choices = tabified if len(tabified) >= 2 else visible
if len(choices) >= 2 and editor_widget == next_widget:
i = choices.index(editor_widget)
next_widget = choices[i+1] if i+1 < len(choices) else choices[i-1]
editor_area.activate_editor(next_widget.editor)
# Update tab bar hide state.
if len(tabified) == 2:
next_widget.editor.control.parent().set_title_bar(True)
if editor_area.hide_tab_bar and len(editor_area.editors) == 1:
editor_area.editors[0].control.parent().set_title_bar(False)
def reset_drag(self):
""" Clear out all drag state.
"""
self._drag_widget = None
self._hover_widget = None
self._tear_handled = False
self._tear_widgets = []
def set_hover_widget(self, widget):
""" Set the dock widget being 'hovered over' during a drag.
"""
old_widget = self._hover_widget
self._hover_widget = widget
if old_widget:
if old_widget in self._tear_widgets:
if len(self._tear_widgets) == 1:
old_widget.set_title_bar(True)
elif not self.tabifiedDockWidgets(old_widget):
old_widget.set_title_bar(True)
if widget:
if widget in self._tear_widgets:
if len(self._tear_widgets) == 1:
widget.set_title_bar(False)
elif len(self.tabifiedDockWidgets(widget)) == 1:
widget.set_title_bar(False)
###########################################################################
# Event handlers.
###########################################################################
def childEvent(self, event):
""" Reimplemented to gain access to the tab bars as they are created.
"""
super(EditorAreaWidget, self).childEvent(event)
if event.polished():
child = event.child()
if isinstance(child, QtGui.QTabBar):
# Use UniqueConnections since Qt recycles the tab bars.
child.installEventFilter(self)
child.currentChanged.connect(self._tab_index_changed,
QtCore.Qt.UniqueConnection)
child.setTabsClosable(True)
child.setUsesScrollButtons(True)
child.tabCloseRequested.connect(self._tab_close_requested,
QtCore.Qt.UniqueConnection)
# FIXME: We would like to have the tabs movable, but this
# confuses the QMainWindowLayout. For now, we disable this.
#child.setMovable(True)
def eventFilter(self, obj, event):
""" Reimplemented to dispatch to sub-handlers.
"""
if isinstance(obj, QtGui.QDockWidget):
return self._filter_dock_widget(obj, event)
elif isinstance(obj, QtGui.QRubberBand):
return self._filter_rubber_band(obj, event)
elif isinstance(obj, QtGui.QTabBar):
return self._filter_tab_bar(obj, event)
return False
def _filter_dock_widget(self, widget, event):
""" Support hover widget state tracking.
"""
if self._drag_widget and event.type() == QtCore.QEvent.Resize:
if widget.geometry() == self._rubber_band.geometry():
self.set_hover_widget(widget)
elif self._drag_widget == widget and event.type() == QtCore.QEvent.Move:
if len(self._tear_widgets) == 1 and not self._tear_handled:
widget = self._tear_widgets[0]
widget.set_title_bar(True)
self._tear_handled = True
elif self._drag_widget == widget and \
event.type() == QtCore.QEvent.MouseButtonRelease:
self.reset_drag()
return False
def _filter_rubber_band(self, rubber_band, event):
""" Support hover widget state tracking.
"""
if self._drag_widget and event.type() in (QtCore.QEvent.Resize,
QtCore.QEvent.Move):
self.set_hover_widget(None)
return False
def _filter_tab_bar(self, tab_bar, event):
""" Support 'tearing off' a tab.
"""
if event.type() == QtCore.QEvent.MouseMove:
if tab_bar.rect().contains(event.pos()):
self.reset_drag()
else:
if not self._drag_widget:
index = tab_bar.currentIndex()
self._tear_widgets = self.get_dock_widgets_for_bar(tab_bar)
self._drag_widget = widget = self._tear_widgets.pop(index)
pos = QtCore.QPoint(0, 0)
press_event = QtGui.QMouseEvent(
QtCore.QEvent.MouseButtonPress, pos,
widget.mapToGlobal(pos), QtCore.Qt.LeftButton,
QtCore.Qt.LeftButton, event.modifiers())
QtCore.QCoreApplication.sendEvent(widget, press_event)
return True
event = QtGui.QMouseEvent(
QtCore.QEvent.MouseMove, event.pos(), event.globalPos(),
event.button(), event.buttons(), event.modifiers())
QtCore.QCoreApplication.sendEvent(self._drag_widget, event)
return True
elif event.type() == QtCore.QEvent.ToolTip:
# QDockAreaLayout forces the tooltips to be QDockWidget.windowTitle,
# so we provide the tooltips manually.
widgets = self.get_dock_widgets_for_bar(tab_bar)
index = tab_bar.tabAt(event.pos())
tooltip = widgets[index].editor.tooltip if index >= 0 else ''
if tooltip:
QtGui.QToolTip.showText(event.globalPos(), tooltip, tab_bar)
return True
return False
def focusInEvent(self, event):
""" Assign focus to the active editor, if possible.
"""
active_editor = self.editor_area.active_editor
if active_editor:
set_focus(active_editor.control)
###########################################################################
# Signal handlers.
###########################################################################
def _focus_changed(self, old, new):
""" Handle an application-level focus change.
"""
if new is not None:
for editor in self.editor_area.editors:
control = editor.control
if control is not None and control.isAncestorOf(new):
self.editor_area.active_editor = focused = editor
break
else:
if not self.editor_area.editors:
self.editor_area.active_editor = None
def _tab_index_changed(self, index):
""" Handle a tab selection.
"""
widgets = self.get_dock_widgets_for_bar(self.sender())
if index < len(widgets):
editor_widget = widgets[index]
editor_widget.editor.control.setFocus()
def _tab_close_requested(self, index):
""" Handle a tab close request.
"""
editor_widget = self.get_dock_widgets_for_bar(self.sender())[index]
editor_widget.editor.close()
class EditorWidget(QtGui.QDockWidget):
""" An auxillary widget for implementing AdvancedEditorAreaPane.
"""
def __init__(self, editor, parent=None):
super(EditorWidget, self).__init__(parent)
self.editor = editor
self.editor.create(self)
self.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea)
self.setFeatures(QtGui.QDockWidget.DockWidgetClosable |
QtGui.QDockWidget.DockWidgetMovable)
self.setWidget(editor.control)
self.update_title()
# Update the minimum size.
contents_minsize = editor.control.minimumSize()
style = self.style()
contents_minsize.setHeight(contents_minsize.height()
+ style.pixelMetric(style.PM_DockWidgetHandleExtent))
self.setMinimumSize(contents_minsize)
self.dockLocationChanged.connect(self.update_title_bar)
self.visibilityChanged.connect(self.update_title_bar)
def update_title(self):
title = self.editor.editor_area._get_label(self.editor)
self.setWindowTitle(title)
title_bar = self.titleBarWidget()
if isinstance(title_bar, EditorTitleBarWidget):
title_bar.setTabText(0, title)
def update_tooltip(self):
title_bar = self.titleBarWidget()
if isinstance(title_bar, EditorTitleBarWidget):
title_bar.setTabToolTip(0, self.editor.tooltip)
def update_title_bar(self):
if self not in self.parent()._tear_widgets:
tabbed = self.parent().tabifiedDockWidgets(self)
self.set_title_bar(not tabbed)
def set_title_bar(self, title_bar):
current = self.titleBarWidget()
editor_area = self.editor.editor_area
if title_bar and editor_area and (not editor_area.hide_tab_bar or
len(editor_area.editors) > 1):
if not isinstance(current, EditorTitleBarWidget):
self.setTitleBarWidget(EditorTitleBarWidget(self))
elif current is None or isinstance(current, EditorTitleBarWidget):
self.setTitleBarWidget(QtGui.QWidget())
class EditorTitleBarWidget(QtGui.QTabBar):
""" An auxillary widget for implementing AdvancedEditorAreaPane.
"""
def __init__(self, editor_widget):
super(EditorTitleBarWidget, self).__init__(editor_widget)
self.addTab(editor_widget.windowTitle())
self.setTabToolTip(0, editor_widget.editor.tooltip)
self.setDocumentMode(True)
self.setExpanding(False)
self.setTabsClosable(True)
self.tabCloseRequested.connect(editor_widget.editor.close)
def mousePressEvent(self, event):
self.parent().parent()._drag_widget = self.parent()
event.ignore()
def mouseMoveEvent(self, event):
event.ignore()
def mouseReleaseEvent(self, event):
event.ignore()
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the API endpoint."""
import random
import re
from six.moves import StringIO
import boto
import boto.connection
from boto.ec2 import regioninfo
from boto import exception as boto_exc
# newer versions of boto use their own wrapper on top of httplib.HTTPResponse
if hasattr(boto.connection, 'HTTPResponse'):
httplib = boto.connection
else:
from six.moves import http_client as httplib
import fixtures
from oslo_utils import versionutils
import webob
from nova.api import auth
from nova.api import ec2
from nova.api.ec2 import ec2utils
from nova import block_device
from nova import context
from nova import exception
from nova import test
from nova.tests.unit import matchers
class FakeHttplibSocket(object):
"""a fake socket implementation for httplib.HTTPResponse, trivial."""
def __init__(self, response_string):
self.response_string = response_string
self._buffer = StringIO(response_string)
def makefile(self, _mode, _other):
"""Returns the socket's internal buffer."""
return self._buffer
class FakeHttplibConnection(object):
"""A fake httplib.HTTPConnection for boto to use
requests made via this connection actually get translated and routed into
our WSGI app, we then wait for the response and turn it back into
the HTTPResponse that boto expects.
"""
def __init__(self, app, host, is_secure=False):
self.app = app
self.host = host
def request(self, method, path, data, headers):
req = webob.Request.blank(path)
req.method = method
req.body = data
req.headers = headers
req.headers['Accept'] = 'text/html'
req.host = self.host
# Call the WSGI app, get the HTTP response
resp = str(req.get_response(self.app))
# For some reason, the response doesn't have "HTTP/1.0 " prepended; I
# guess that's a function the web server usually provides.
resp = "HTTP/1.0 %s" % resp
self.sock = FakeHttplibSocket(resp)
self.http_response = httplib.HTTPResponse(self.sock)
# NOTE(vish): boto is accessing private variables for some reason
self._HTTPConnection__response = self.http_response
self.http_response.begin()
def getresponse(self):
return self.http_response
def getresponsebody(self):
return self.sock.response_string
def close(self):
"""Required for compatibility with boto/tornado."""
pass
class XmlConversionTestCase(test.NoDBTestCase):
"""Unit test api xml conversion."""
def test_number_conversion(self):
conv = ec2utils._try_convert
self.assertIsNone(conv('None'))
self.assertEqual(conv('True'), True)
self.assertEqual(conv('TRUE'), True)
self.assertEqual(conv('true'), True)
self.assertEqual(conv('False'), False)
self.assertEqual(conv('FALSE'), False)
self.assertEqual(conv('false'), False)
self.assertEqual(conv('0'), 0)
self.assertEqual(conv('42'), 42)
self.assertEqual(conv('3.14'), 3.14)
self.assertEqual(conv('-57.12'), -57.12)
self.assertEqual(conv('0x57'), 0x57)
self.assertEqual(conv('-0x57'), -0x57)
self.assertEqual(conv('-'), '-')
self.assertEqual(conv('-0'), 0)
self.assertEqual(conv('0.0'), 0.0)
self.assertEqual(conv('1e-8'), 0.0)
self.assertEqual(conv('-1e-8'), 0.0)
self.assertEqual(conv('0xDD8G'), '0xDD8G')
self.assertEqual(conv('0XDD8G'), '0XDD8G')
self.assertEqual(conv('-stringy'), '-stringy')
self.assertEqual(conv('stringy'), 'stringy')
self.assertEqual(conv('add'), 'add')
self.assertEqual(conv('remove'), 'remove')
self.assertEqual(conv(''), '')
class Ec2utilsTestCase(test.NoDBTestCase):
def test_ec2_id_to_id(self):
self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28)
self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
def test_bad_ec2_id(self):
self.assertRaises(exception.InvalidEc2Id,
ec2utils.ec2_id_to_id,
'badone')
def test_id_to_ec2_id(self):
self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c')
self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b')
def test_dict_from_dotted_str(self):
in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'),
('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'),
('BlockDeviceMapping.1.Ebs.VolumeSize', '80'),
('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'),
('BlockDeviceMapping.2.DeviceName', '/dev/sdc'),
('BlockDeviceMapping.2.VirtualName', 'ephemeral0')]
expected_dict = {
'block_device_mapping': {
'1': {'device_name': '/dev/sda1',
'ebs': {'snapshot_id': 'snap-0000001c',
'volume_size': 80,
'delete_on_termination': False}},
'2': {'device_name': '/dev/sdc',
'virtual_name': 'ephemeral0'}}}
out_dict = ec2utils.dict_from_dotted_str(in_str)
self.assertThat(out_dict, matchers.DictMatches(expected_dict))
def test_properties_root_defice_name(self):
mappings = [{"device": "/dev/sda1", "virtual": "root"}]
properties0 = {'mappings': mappings}
properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
root_device_name = block_device.properties_root_device_name(
properties0)
self.assertEqual(root_device_name, '/dev/sda1')
root_device_name = block_device.properties_root_device_name(
properties1)
self.assertEqual(root_device_name, '/dev/sdb')
def test_regex_from_ec2_regex(self):
def _test_re(ec2_regex, expected, literal, match=True):
regex = ec2utils.regex_from_ec2_regex(ec2_regex)
self.assertEqual(regex, expected)
if match:
self.assertIsNotNone(re.match(regex, literal))
else:
self.assertIsNone(re.match(regex, literal))
# wildcards
_test_re('foo', '\Afoo\Z(?s)', 'foo')
_test_re('foo', '\Afoo\Z(?s)', 'baz', match=False)
_test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar')
_test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar', match=False)
_test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'foo QUUX bar')
# backslashes and escaped wildcards
_test_re('foo\\', '\Afoo\\\\\Z(?s)', 'foo\\')
_test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'zork QUUX bar', match=False)
_test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo?bar')
_test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo bar', match=False)
_test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo*bar')
_test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo bar', match=False)
# analog to the example given in the EC2 API docs
ec2_regex = '\*nova\?\\end'
expected = r'\A[*]nova[?]\\end\Z(?s)'
literal = r'*nova?\end'
_test_re(ec2_regex, expected, literal)
def test_mapping_prepend_dev(self):
mappings = [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': 'sdb1'},
{'virtual': 'swap',
'device': '/dev/sdb2'},
{'virtual': 'ephemeral0',
'device': 'sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
expected_result = [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': '/dev/sdb1'},
{'virtual': 'swap',
'device': '/dev/sdb2'},
{'virtual': 'ephemeral0',
'device': '/dev/sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
self.assertThat(block_device.mappings_prepend_dev(mappings),
matchers.DictListMatches(expected_result))
class ApiEc2TestCase(test.TestCase):
"""Unit test for the cloud controller on an EC2 API."""
def setUp(self):
super(ApiEc2TestCase, self).setUp()
self.host = '127.0.0.1'
# NOTE(vish): skipping the Authorizer
roles = ['sysadmin', 'netadmin']
ctxt = context.RequestContext('fake', 'fake', roles=roles)
self.app = auth.InjectContext(ctxt, ec2.FaultWrapper(
ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor()
), 'nova.api.ec2.cloud.CloudController'))))
self.useFixture(fixtures.FakeLogger('boto'))
def expect_http(self, host=None, is_secure=False, api_version=None):
"""Returns a new EC2 connection."""
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
is_secure=False,
region=regioninfo.RegionInfo(None, 'test', self.host),
port=8773,
path='/services/Cloud')
if api_version:
self.ec2.APIVersion = api_version
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
self.http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
if versionutils.is_compatible('2.14', boto.Version, same_major=False):
self.ec2.new_http_connection(host or self.host, 8773,
is_secure).AndReturn(self.http)
elif versionutils.is_compatible('2', boto.Version, same_major=False):
self.ec2.new_http_connection(host or '%s:8773' % (self.host),
is_secure).AndReturn(self.http)
else:
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
return self.http
def test_xmlns_version_matches_request_version(self):
self.expect_http(api_version='2010-10-30')
self.mox.ReplayAll()
# Any request should be fine
self.ec2.get_all_instances()
self.assertIn(self.ec2.APIVersion, self.http.getresponsebody(),
'The version in the xmlns of the response does '
'not match the API version given in the request.')
def test_describe_instances(self):
"""Test that, after creating a user and a project, the describe
instances call to the API works properly.
"""
self.expect_http()
self.mox.ReplayAll()
self.assertEqual(self.ec2.get_all_instances(), [])
def test_terminate_invalid_instance(self):
# Attempt to terminate an invalid instance.
self.expect_http()
self.mox.ReplayAll()
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.terminate_instances, "i-00000005")
def test_get_all_key_pairs(self):
"""Test that, after creating a user and project and generating
a key pair, that the API call to list key pairs works properly.
"""
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
self.expect_http()
self.mox.ReplayAll()
self.ec2.create_key_pair(keyname)
rv = self.ec2.get_all_key_pairs()
results = [k for k in rv if k.name == keyname]
self.assertEqual(len(results), 1)
def test_create_duplicate_key_pair(self):
"""Test that, after successfully generating a keypair,
requesting a second keypair with the same name fails sanely.
"""
self.expect_http()
self.mox.ReplayAll()
self.ec2.create_key_pair('test')
try:
self.ec2.create_key_pair('test')
except boto_exc.EC2ResponseError as e:
if e.code == 'InvalidKeyPair.Duplicate':
pass
else:
self.assertEqual('InvalidKeyPair.Duplicate', e.code)
else:
self.fail('Exception not raised.')
def test_get_all_security_groups(self):
# Test that we can retrieve security groups.
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
def test_create_delete_security_group(self):
# Test that we can create a security group.
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
self.ec2.create_security_group(security_group_name, 'test group')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 2)
self.assertIn(security_group_name, [group.name for group in rv])
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
def test_group_name_valid_chars_security_group(self):
"""Test that we sanely handle invalid security group names.
EC2 API Spec states we should only accept alphanumeric characters,
spaces, dashes, and underscores. Amazon implementation
accepts more characters - so, [:print:] is ok.
"""
bad_strict_ec2 = "aa \t\x01\x02\x7f"
bad_amazon_ec2 = "aa #^% -=99"
test_raise = [
(True, bad_amazon_ec2, "test desc"),
(True, "test name", bad_amazon_ec2),
(False, bad_strict_ec2, "test desc"),
]
for t in test_raise:
self.expect_http()
self.mox.ReplayAll()
self.flags(ec2_strict_validation=t[0])
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.create_security_group,
t[1],
t[2])
test_accept = [
(False, bad_amazon_ec2, "test desc"),
(False, "test name", bad_amazon_ec2),
]
for t in test_accept:
self.expect_http()
self.mox.ReplayAll()
self.flags(ec2_strict_validation=t[0])
self.ec2.create_security_group(t[1], t[2])
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(t[1])
def test_group_name_valid_length_security_group(self):
"""Test that we sanely handle invalid security group names.
API Spec states that the length should not exceed 255 char.
"""
self.expect_http()
self.mox.ReplayAll()
# Test block group_name > 255 chars
security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc")
for x in range(random.randint(256, 266)))
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.create_security_group,
security_group_name,
'test group')
def test_authorize_revoke_security_group_cidr(self):
"""Test that we can add and remove CIDR based rules
to a security group
"""
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize('tcp', 80, 81, '0.0.0.0/0')
group.authorize('icmp', -1, -1, '0.0.0.0/0')
group.authorize('udp', 80, 81, '0.0.0.0/0')
group.authorize('tcp', 1, 65535, '0.0.0.0/0')
group.authorize('udp', 1, 65535, '0.0.0.0/0')
group.authorize('icmp', 1, 0, '0.0.0.0/0')
group.authorize('icmp', 0, 1, '0.0.0.0/0')
group.authorize('icmp', 0, 0, '0.0.0.0/0')
def _assert(message, *args):
try:
group.authorize(*args)
except boto_exc.EC2ResponseError as e:
self.assertEqual(e.status, 400, 'Expected status to be 400')
self.assertIn(message, e.error_message)
else:
raise self.failureException('EC2ResponseError not raised')
# Invalid CIDR address
_assert('Invalid CIDR', 'tcp', 80, 81, '0.0.0.0/0444')
# Missing ports
_assert('Not enough parameters', 'tcp', '0.0.0.0/0')
# from port cannot be greater than to port
_assert('Invalid port range', 'tcp', 100, 1, '0.0.0.0/0')
# For tcp, negative values are not allowed
_assert('Invalid port range', 'tcp', -1, 1, '0.0.0.0/0')
# For tcp, valid port range 1-65535
_assert('Invalid port range', 'tcp', 1, 65599, '0.0.0.0/0')
# Invalid Cidr for ICMP type
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4')
# Invalid protocol
_assert('Invalid IP protocol', 'xyz', 1, 14, '0.0.0.0/0')
# Invalid port
_assert('Invalid input received: To and From ports must be integers',
'tcp', " ", "81", '0.0.0.0/0')
# Invalid icmp port
_assert('Invalid input received: '
'Type and Code must be integers for ICMP protocol type',
'icmp', " ", "81", '0.0.0.0/0')
# Invalid CIDR Address
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0')
# Invalid CIDR Address
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0/')
# Invalid Cidr ports
_assert('Invalid port range', 'icmp', 1, 256, '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
group = [grp for grp in rv if grp.name == security_group_name][0]
self.assertEqual(len(group.rules), 8)
self.assertEqual(int(group.rules[0].from_port), 80)
self.assertEqual(int(group.rules[0].to_port), 81)
self.assertEqual(len(group.rules[0].grants), 1)
self.assertEqual(str(group.rules[0].grants[0]), '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke('tcp', 80, 81, '0.0.0.0/0')
group.revoke('icmp', -1, -1, '0.0.0.0/0')
group.revoke('udp', 80, 81, '0.0.0.0/0')
group.revoke('tcp', 1, 65535, '0.0.0.0/0')
group.revoke('udp', 1, 65535, '0.0.0.0/0')
group.revoke('icmp', 1, 0, '0.0.0.0/0')
group.revoke('icmp', 0, 1, '0.0.0.0/0')
group.revoke('icmp', 0, 0, '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
def test_authorize_revoke_security_group_cidr_v6(self):
"""Test that we can add and remove CIDR based rules
to a security group for IPv6
"""
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize('tcp', 80, 81, '::/0')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
group = [grp for grp in rv if grp.name == security_group_name][0]
self.assertEqual(len(group.rules), 1)
self.assertEqual(int(group.rules[0].from_port), 80)
self.assertEqual(int(group.rules[0].to_port), 81)
self.assertEqual(len(group.rules[0].grants), 1)
self.assertEqual(str(group.rules[0].grants[0]), '::/0')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke('tcp', 80, 81, '::/0')
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
def test_authorize_revoke_security_group_foreign_group(self):
"""Test that we can grant and revoke another security group access
to a security group
"""
self.expect_http()
self.mox.ReplayAll()
rand_string = 'sdiuisudfsdcnpaqwertasd'
security_group_name = "".join(random.choice(rand_string)
for x in range(random.randint(4, 8)))
other_security_group_name = "".join(random.choice(rand_string)
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
other_group = self.ec2.create_security_group(other_security_group_name,
'some other group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize(src_group=other_group)
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
# I don't bother checkng that we actually find it here,
# because the create/delete unit test further up should
# be good enough for that.
for group in rv:
if group.name == security_group_name:
self.assertEqual(len(group.rules), 3)
self.assertEqual(len(group.rules[0].grants), 1)
self.assertEqual(str(group.rules[0].grants[0]),
'%s-%s' % (other_security_group_name, 'fake'))
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
for group in rv:
if group.name == security_group_name:
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke(src_group=other_group)
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.ec2.delete_security_group(other_security_group_name)
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects and functions that manage rights for various user actions."""
import logging
from core.domain import activity_services
from core.domain import config_domain
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
import feconf
import utils
current_user_services = models.Registry.import_current_user_services()
(collection_models, exp_models,) = models.Registry.import_models([
models.NAMES.collection, models.NAMES.exploration
])
# IMPORTANT: Ensure that all changes to how these cmds are interpreted preserve
# backward-compatibility with previous exploration snapshots in the datastore.
# Do not modify the definitions of CMD keys that already exist.
CMD_CREATE_NEW = 'create_new'
CMD_CHANGE_ROLE = 'change_role'
CMD_CHANGE_EXPLORATION_STATUS = 'change_exploration_status'
CMD_CHANGE_COLLECTION_STATUS = 'change_collection_status'
CMD_CHANGE_PRIVATE_VIEWABILITY = 'change_private_viewability'
CMD_RELEASE_OWNERSHIP = 'release_ownership'
CMD_UPDATE_FIRST_PUBLISHED_MSEC = 'update_first_published_msec'
ACTIVITY_STATUS_PRIVATE = feconf.ACTIVITY_STATUS_PRIVATE
ACTIVITY_STATUS_PUBLIC = feconf.ACTIVITY_STATUS_PUBLIC
ACTIVITY_STATUS_PUBLICIZED = feconf.ACTIVITY_STATUS_PUBLICIZED
ROLE_OWNER = 'owner'
ROLE_EDITOR = 'editor'
ROLE_VIEWER = 'viewer'
ROLE_NONE = 'none'
ROLE_ADMIN = 'admin'
ROLE_MODERATOR = 'moderator'
class ActivityRights(object):
"""Domain object for the rights/publication status of an activity (an
exploration or a collection).
"""
def __init__(self, exploration_id, owner_ids, editor_ids, viewer_ids,
community_owned=False, cloned_from=None,
status=ACTIVITY_STATUS_PRIVATE,
viewable_if_private=False, first_published_msec=None):
self.id = exploration_id
self.owner_ids = owner_ids
self.editor_ids = editor_ids
self.viewer_ids = viewer_ids
self.community_owned = community_owned
self.cloned_from = cloned_from
self.status = status
self.viewable_if_private = viewable_if_private
self.first_published_msec = first_published_msec
def validate(self):
"""Validates an ActivityRights object.
Raises:
utils.ValidationError: if any of the owners, editors and viewers
lists overlap, or if a community-owned exploration has owners,
editors or viewers specified.
"""
if self.community_owned:
if self.owner_ids or self.editor_ids or self.viewer_ids:
raise utils.ValidationError(
'Community-owned explorations should have no owners, '
'editors or viewers specified.')
if self.community_owned and self.status == ACTIVITY_STATUS_PRIVATE:
raise utils.ValidationError(
'Community-owned explorations cannot be private.')
if self.status != ACTIVITY_STATUS_PRIVATE and self.viewer_ids:
raise utils.ValidationError(
'Public explorations should have no viewers specified.')
owner_editor = set(self.owner_ids).intersection(set(self.editor_ids))
owner_viewer = set(self.owner_ids).intersection(set(self.viewer_ids))
editor_viewer = set(self.editor_ids).intersection(set(self.viewer_ids))
if owner_editor:
raise utils.ValidationError(
'A user cannot be both an owner and an editor: %s' %
owner_editor)
if owner_viewer:
raise utils.ValidationError(
'A user cannot be both an owner and a viewer: %s' %
owner_viewer)
if editor_viewer:
raise utils.ValidationError(
'A user cannot be both an owner and an editor: %s' %
editor_viewer)
def to_dict(self):
"""Returns a dict suitable for use by the frontend."""
if self.community_owned:
return {
'cloned_from': self.cloned_from,
'status': self.status,
'community_owned': True,
'owner_names': [],
'editor_names': [],
'viewer_names': [],
'viewable_if_private': self.viewable_if_private,
}
else:
return {
'cloned_from': self.cloned_from,
'status': self.status,
'community_owned': False,
'owner_names': user_services.get_human_readable_user_ids(
self.owner_ids),
'editor_names': user_services.get_human_readable_user_ids(
self.editor_ids),
'viewer_names': user_services.get_human_readable_user_ids(
self.viewer_ids),
'viewable_if_private': self.viewable_if_private,
}
def _get_activity_rights_from_model(activity_rights_model, activity_type):
return ActivityRights(
activity_rights_model.id,
activity_rights_model.owner_ids,
activity_rights_model.editor_ids,
activity_rights_model.viewer_ids,
community_owned=activity_rights_model.community_owned,
cloned_from=(
activity_rights_model.cloned_from
if activity_type == feconf.ACTIVITY_TYPE_EXPLORATION else None),
status=activity_rights_model.status,
viewable_if_private=activity_rights_model.viewable_if_private,
first_published_msec=activity_rights_model.first_published_msec
)
def _save_activity_rights(
committer_id, activity_rights, activity_type, commit_message,
commit_cmds):
"""Saves an ExplorationRights or CollectionRights domain object to the
datastore.
"""
activity_rights.validate()
if activity_type == feconf.ACTIVITY_TYPE_EXPLORATION:
model_cls = exp_models.ExplorationRightsModel
elif activity_type == feconf.ACTIVITY_TYPE_COLLECTION:
model_cls = collection_models.CollectionRightsModel
model = model_cls.get(activity_rights.id, strict=False)
model.owner_ids = activity_rights.owner_ids
model.editor_ids = activity_rights.editor_ids
model.viewer_ids = activity_rights.viewer_ids
model.community_owned = activity_rights.community_owned
model.status = activity_rights.status
model.viewable_if_private = activity_rights.viewable_if_private
model.first_published_msec = activity_rights.first_published_msec
model.commit(committer_id, commit_message, commit_cmds)
# Update summary of changed activity (note that the activity rights id is the
# same as the activity id).
# TODO(msl): get rid of inline imports by refactoring code
def _update_exploration_summary(activity_rights):
from core.domain import exp_services
exp_services.update_exploration_summary(
activity_rights.id, None)
def _update_collection_summary(activity_rights):
from core.domain import collection_services
collection_services.update_collection_summary(
activity_rights.id, None)
def _update_activity_summary(activity_type, activity_rights):
if activity_type == feconf.ACTIVITY_TYPE_EXPLORATION:
_update_exploration_summary(activity_rights)
elif activity_type == feconf.ACTIVITY_TYPE_COLLECTION:
_update_collection_summary(activity_rights)
def update_activity_first_published_msec(
activity_type, activity_id, first_published_msec):
"""Updates the first_published_msec field for an activity. This is only
called during the one-off job ExplorationFirstPublishedOneOffJob. Callers
are responsible for ensuring that this value is not already set before
updating it.
"""
activity_rights = _get_activity_rights(activity_type, activity_id)
commit_cmds = [{
'cmd': CMD_UPDATE_FIRST_PUBLISHED_MSEC,
'old_first_published_msec': activity_rights.first_published_msec,
'new_first_published_msec': first_published_msec
}]
activity_rights.first_published_msec = first_published_msec
_save_activity_rights(
feconf.SYSTEM_COMMITTER_ID, activity_rights, activity_type,
'set first published time in msec', commit_cmds)
def create_new_exploration_rights(exploration_id, committer_id):
exploration_rights = ActivityRights(
exploration_id, [committer_id], [], [])
commit_cmds = [{'cmd': CMD_CREATE_NEW}]
exp_models.ExplorationRightsModel(
id=exploration_rights.id,
owner_ids=exploration_rights.owner_ids,
editor_ids=exploration_rights.editor_ids,
viewer_ids=exploration_rights.viewer_ids,
community_owned=exploration_rights.community_owned,
status=exploration_rights.status,
viewable_if_private=exploration_rights.viewable_if_private,
first_published_msec=exploration_rights.first_published_msec,
).commit(committer_id, 'Created new exploration', commit_cmds)
subscription_services.subscribe_to_exploration(
committer_id, exploration_id)
def get_exploration_rights(exploration_id, strict=True):
"""Retrieves the rights for this exploration from the datastore."""
model = exp_models.ExplorationRightsModel.get(
exploration_id, strict=strict)
if model is None:
return None
return _get_activity_rights_from_model(
model, feconf.ACTIVITY_TYPE_EXPLORATION)
def is_exploration_private(exploration_id):
exploration_rights = get_exploration_rights(exploration_id)
return exploration_rights.status == ACTIVITY_STATUS_PRIVATE
def is_exploration_public(exploration_id):
exploration_rights = get_exploration_rights(exploration_id)
return exploration_rights.status == ACTIVITY_STATUS_PUBLIC
def is_exploration_cloned(exploration_id):
exploration_rights = get_exploration_rights(exploration_id)
return bool(exploration_rights.cloned_from)
def create_new_collection_rights(collection_id, committer_id):
collection_rights = ActivityRights(collection_id, [committer_id], [], [])
commit_cmds = [{'cmd': CMD_CREATE_NEW}]
collection_models.CollectionRightsModel(
id=collection_rights.id,
owner_ids=collection_rights.owner_ids,
editor_ids=collection_rights.editor_ids,
viewer_ids=collection_rights.viewer_ids,
community_owned=collection_rights.community_owned,
status=collection_rights.status,
viewable_if_private=collection_rights.viewable_if_private,
first_published_msec=collection_rights.first_published_msec
).commit(committer_id, 'Created new collection', commit_cmds)
subscription_services.subscribe_to_collection(committer_id, collection_id)
def get_collection_rights(collection_id, strict=True):
"""Retrieves the rights for this collection from the datastore."""
model = collection_models.CollectionRightsModel.get(
collection_id, strict=strict)
if model is None:
return None
return _get_activity_rights_from_model(
model, feconf.ACTIVITY_TYPE_COLLECTION)
def get_collection_owner_names(collection_id):
"""Retrieves the owners for this collection from the datastore."""
collection_rights = get_collection_rights(collection_id)
return user_services.get_human_readable_user_ids(
collection_rights.owner_ids)
def is_collection_private(collection_id):
collection_rights = get_collection_rights(collection_id)
return collection_rights.status == ACTIVITY_STATUS_PRIVATE
def is_collection_public(collection_id):
collection_rights = get_collection_rights(collection_id)
return collection_rights.status == ACTIVITY_STATUS_PUBLIC
def _get_activity_rights(activity_type, activity_id):
"""This function returns None if this function fails to retrieve the rights
object for a given activity based on its type. If the activity_type value
provided is unknown, an Exception is raised.
"""
if activity_type == feconf.ACTIVITY_TYPE_EXPLORATION:
return get_exploration_rights(activity_id, strict=False)
elif activity_type == feconf.ACTIVITY_TYPE_COLLECTION:
return get_collection_rights(activity_id, strict=False)
else:
raise Exception(
'Cannot get activity rights for unknown activity type: %s' % (
activity_type))
class Actor(object):
"""Domain object for a user with various rights."""
def __init__(self, user_id):
# Note that this may be None.
self.user_id = user_id
# Value of None is a placeholder. This property gets initialized
# when the first call to `is_admin()` is made.
self._is_admin = None
# Value of None is a placeholder. This property gets initialized
# when the first call to `is_moderator()` is made.
self._is_moderator = None
def is_admin(self):
if self._is_admin is None:
self._is_admin = self.user_id in config_domain.ADMIN_IDS.value
return self._is_admin
def is_moderator(self):
if self._is_moderator is None:
self._is_moderator = (
self.is_admin() or
self.user_id in config_domain.MODERATOR_IDS.value)
return self._is_moderator
def _is_owner(self, rights_object):
return (
rights_object.community_owned or
self.user_id in rights_object.owner_ids)
def _has_editing_rights(self, rights_object):
return (rights_object.community_owned or
self.user_id in rights_object.editor_ids or
self.user_id in rights_object.owner_ids)
def _has_viewing_rights(self, rights_object):
return (rights_object.status != ACTIVITY_STATUS_PRIVATE or
self.user_id in rights_object.viewer_ids or
self.user_id in rights_object.editor_ids or
self.user_id in rights_object.owner_ids)
def _can_play(self, rights_object):
if rights_object.status == ACTIVITY_STATUS_PRIVATE:
return (self._has_viewing_rights(rights_object)
or rights_object.viewable_if_private
or self.is_moderator())
else:
return True
def _can_edit(self, rights_object):
return (
self._has_editing_rights(rights_object) or (
self.is_moderator() and (
rights_object.status != ACTIVITY_STATUS_PRIVATE)
)
)
def _can_delete(self, rights_object):
is_deleting_own_private_object = (
rights_object.status == ACTIVITY_STATUS_PRIVATE and
self._is_owner(rights_object))
is_mod_deleting_public_object = (
rights_object.status == ACTIVITY_STATUS_PUBLIC and
self.is_moderator())
return (
is_deleting_own_private_object or is_mod_deleting_public_object)
def is_owner(self, activity_type, activity_id):
activity_rights = _get_activity_rights(activity_type, activity_id)
if activity_rights is None:
return False
return self._is_owner(activity_rights)
def has_editing_rights(self, activity_type, activity_id):
"""Whether this user has editing rights for this activity.
This is true if the activity is community-owned, or if the user is in
the owner/editor list for the activity.
"""
activity_rights = _get_activity_rights(activity_type, activity_id)
if activity_rights is None:
return False
return self._has_editing_rights(activity_rights)
def has_viewing_rights(self, activity_type, activity_id):
activity_rights = _get_activity_rights(activity_type, activity_id)
if activity_rights is None:
return False
return self._has_viewing_rights(activity_rights)
def can_play(self, activity_type, activity_id):
"""Whether the user can play the reader view of this activity."""
activity_rights = _get_activity_rights(activity_type, activity_id)
if activity_rights is None:
return False
return self._can_play(activity_rights)
def can_view(self, activity_type, activity_id):
"""Whether the user can view the editor page for this activity."""
return self.can_play(activity_type, activity_id)
def can_edit(self, activity_type, activity_id):
# TODO(sll): Add a check here for whether a user is banned or not,
# rather than having this check in the controller.
activity_rights = _get_activity_rights(activity_type, activity_id)
if activity_rights is None:
return False
return self._can_edit(activity_rights)
def can_delete(self, activity_type, activity_id):
activity_rights = _get_activity_rights(activity_type, activity_id)
if activity_rights is None:
return False
return self._can_delete(activity_rights)
def can_change_private_viewability(
self, activity_type, activity_id):
"""Note that this requires the activity in question to be private."""
return self.can_publish(activity_type, activity_id)
def can_publish(self, activity_type, activity_id):
activity_rights = _get_activity_rights(activity_type, activity_id)
if activity_rights is None:
return False
if activity_rights.status != ACTIVITY_STATUS_PRIVATE:
return False
if activity_rights.cloned_from:
return False
return self.is_owner(activity_type, activity_id) or self.is_admin()
def can_unpublish(self, activity_type, activity_id):
activity_rights = _get_activity_rights(activity_type, activity_id)
if activity_rights is None:
return False
if activity_rights.status != ACTIVITY_STATUS_PUBLIC:
return False
if activity_rights.community_owned:
return False
return self.is_moderator()
def can_modify_roles(self, activity_type, activity_id):
activity_rights = _get_activity_rights(activity_type, activity_id)
if activity_rights is None:
return False
if activity_rights.community_owned or activity_rights.cloned_from:
return False
return self.is_admin() or self.is_owner(activity_type, activity_id)
def can_release_ownership(self, activity_type, activity_id):
activity_rights = _get_activity_rights(activity_type, activity_id)
if activity_rights is None:
return False
if activity_rights.status == ACTIVITY_STATUS_PRIVATE:
return False
return self.can_modify_roles(activity_type, activity_id)
def can_publicize(self, activity_type, activity_id):
activity_rights = _get_activity_rights(activity_type, activity_id)
if activity_rights is None:
return False
if activity_rights.status != ACTIVITY_STATUS_PUBLIC:
return False
return self.is_moderator()
def can_unpublicize(self, activity_type, activity_id):
activity_rights = _get_activity_rights(activity_type, activity_id)
if activity_rights is None:
return False
if activity_rights.status != ACTIVITY_STATUS_PUBLICIZED:
return False
return self.is_moderator()
def _assign_role(
committer_id, assignee_id, new_role, activity_id, activity_type):
"""Args:
- committer_id: str. The user_id of the user who is performing the action.
- activity_rights: The storage object for the rights of this activity (
one of: ExplorationRightsModel or CollectionRightsModel).
- assignee_id: str. The user_id of the user whose role is being changed.
- new_role: str. The name of the new role: either 'owner', 'editor' or
'viewer'.
- activity_id: str. The ID of the exploration or collection.
- activity_type: str. One of feconf.ACTIVITY_TYPE_EXPLORATION or
feconf.ACTIVITY_TYPE_COLLECTION.
"""
if not Actor(committer_id).can_modify_roles(activity_type, activity_id):
logging.error(
'User %s tried to allow user %s to be a(n) %s of activity %s '
'but was refused permission.' % (
committer_id, assignee_id, new_role, activity_id))
raise Exception(
'UnauthorizedUserException: Could not assign new role.')
activity_rights = _get_activity_rights(activity_type, activity_id)
assignee_username = user_services.get_username(assignee_id)
old_role = ROLE_NONE
if new_role == ROLE_OWNER:
if Actor(assignee_id)._is_owner(activity_rights): # pylint: disable=protected-access
raise Exception('This user already owns this %s.' % activity_type)
activity_rights.owner_ids.append(assignee_id)
if assignee_id in activity_rights.viewer_ids:
activity_rights.viewer_ids.remove(assignee_id)
old_role = ROLE_VIEWER
if assignee_id in activity_rights.editor_ids:
activity_rights.editor_ids.remove(assignee_id)
old_role = ROLE_EDITOR
elif new_role == ROLE_EDITOR:
if Actor(assignee_id)._has_editing_rights(activity_rights): # pylint: disable=protected-access
raise Exception(
'This user already can edit this %s.' % activity_type)
if activity_rights.community_owned:
raise Exception(
'Community-owned %ss can be edited by anyone.' % activity_type)
activity_rights.editor_ids.append(assignee_id)
if assignee_id in activity_rights.viewer_ids:
activity_rights.viewer_ids.remove(assignee_id)
old_role = ROLE_VIEWER
elif new_role == ROLE_VIEWER:
if Actor(assignee_id)._has_viewing_rights(activity_rights): # pylint: disable=protected-access
raise Exception(
'This user already can view this %s.' % activity_type)
if activity_rights.status != ACTIVITY_STATUS_PRIVATE:
raise Exception(
'Public %ss can be viewed by anyone.' % activity_type)
activity_rights.viewer_ids.append(assignee_id)
else:
raise Exception('Invalid role: %s' % new_role)
commit_message = 'Changed role of %s from %s to %s' % (
assignee_username, old_role, new_role)
commit_cmds = [{
'cmd': CMD_CHANGE_ROLE,
'assignee_id': assignee_id,
'old_role': old_role,
'new_role': new_role
}]
_save_activity_rights(committer_id, activity_rights, activity_type,
commit_message, commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _release_ownership_of_activity(committer_id, activity_id, activity_type):
if not Actor(committer_id).can_release_ownership(
activity_type, activity_id):
logging.error(
'User %s tried to release ownership of %s %s but was '
'refused permission.' % (committer_id, activity_type, activity_id))
raise Exception(
'The ownership of this %s cannot be released.' % activity_type)
activity_rights = _get_activity_rights(activity_type, activity_id)
activity_rights.community_owned = True
activity_rights.owner_ids = []
activity_rights.editor_ids = []
activity_rights.viewer_ids = []
commit_cmds = [{
'cmd': CMD_RELEASE_OWNERSHIP,
}]
_save_activity_rights(
committer_id, activity_rights, activity_type,
'%s ownership released to the community.' % activity_type, commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _change_activity_status(
committer_id, activity_id, activity_type, new_status, commit_message):
"""Change the status of an activity. Commits changes.
Args:
- committer_id: str. The id of the user who is performing the update
action.
- activity_id: str. The id of the collection or activity.
- activity_type: str. One of feconf.ACTIVITY_TYPE_EXPLORATION or
feconf.ACTIVITY_TYPE_COLLECTION.
- new_status: str. The new status of the exploration.
- commit_message: str. The human-written commit message for this change.
"""
activity_rights = _get_activity_rights(activity_type, activity_id)
old_status = activity_rights.status
activity_rights.status = new_status
if activity_type == feconf.ACTIVITY_TYPE_EXPLORATION:
cmd_type = CMD_CHANGE_EXPLORATION_STATUS
elif activity_type == feconf.ACTIVITY_TYPE_COLLECTION:
cmd_type = CMD_CHANGE_COLLECTION_STATUS
commit_cmds = [{
'cmd': cmd_type,
'old_status': old_status,
'new_status': new_status
}]
if new_status != ACTIVITY_STATUS_PRIVATE:
activity_rights.viewer_ids = []
if activity_rights.first_published_msec is None:
activity_rights.first_published_msec = (
utils.get_current_time_in_millisecs())
_save_activity_rights(
committer_id, activity_rights, activity_type, commit_message,
commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _publish_activity(committer_id, activity_id, activity_type):
if not Actor(committer_id).can_publish(activity_type, activity_id):
logging.error(
'User %s tried to publish %s %s but was refused '
'permission.' % (committer_id, activity_type, activity_id))
raise Exception('This %s cannot be published.' % activity_type)
_change_activity_status(
committer_id, activity_id, activity_type, ACTIVITY_STATUS_PUBLIC,
'%s published.' % activity_type)
def _unpublish_activity(committer_id, activity_id, activity_type):
if not Actor(committer_id).can_unpublish(activity_type, activity_id):
logging.error(
'User %s tried to unpublish %s %s but was refused '
'permission.' % (committer_id, activity_type, activity_id))
raise Exception('This %s cannot be unpublished.' % activity_type)
_change_activity_status(
committer_id, activity_id, activity_type, ACTIVITY_STATUS_PRIVATE,
'%s unpublished.' % activity_type)
activity_services.remove_featured_activity(activity_type, activity_id)
def _publicize_activity(committer_id, activity_id, activity_type):
if not Actor(committer_id).can_publicize(activity_type, activity_id):
logging.error(
'User %s tried to publicize %s %s but was refused '
'permission.' % (committer_id, activity_type, activity_id))
raise Exception('This %s cannot be marked as "featured".' % (
activity_type))
_change_activity_status(
committer_id, activity_id, activity_type, ACTIVITY_STATUS_PUBLICIZED,
'%s publicized.' % activity_type)
def _unpublicize_activity(committer_id, activity_id, activity_type):
"""Unpublicizes an exploration. Commits changes."""
if not Actor(committer_id).can_unpublicize(activity_type, activity_id):
logging.error(
'User %s tried to unpublicize exploration %s but was refused '
'permission.' % (committer_id, activity_id))
raise Exception('This exploration cannot be unmarked as "featured".')
_change_activity_status(
committer_id, activity_id, activity_type, ACTIVITY_STATUS_PUBLIC,
'Exploration unpublicized.')
# Rights functions for activities.
def assign_role_for_exploration(
committer_id, exploration_id, assignee_id, new_role):
"""Assign `assignee_id` to the given role and subscribes the assignee
to future exploration updates.
The caller should ensure that assignee_id corresponds to a valid user in
the system.
Args:
- committer_id: str. The user_id of the user who is performing the action.
- exploration_id: str. The exploration id.
- assignee_id: str. The user_id of the user whose role is being changed.
- new_role: str. The name of the new role: either 'owner', 'editor' or
'viewer'.
"""
_assign_role(
committer_id, assignee_id, new_role, exploration_id,
feconf.ACTIVITY_TYPE_EXPLORATION)
if new_role in [ROLE_OWNER, ROLE_EDITOR]:
subscription_services.subscribe_to_exploration(
assignee_id, exploration_id)
def release_ownership_of_exploration(committer_id, exploration_id):
"""Releases ownership of an exploration to the community.
Commits changes.
"""
_release_ownership_of_activity(
committer_id, exploration_id, feconf.ACTIVITY_TYPE_EXPLORATION)
def set_private_viewability_of_exploration(
committer_id, exploration_id, viewable_if_private):
"""Sets the viewable_if_private attribute for an exploration's rights
object. If viewable_if_private is True, this allows an private exploration
to be viewed by anyone with the link.
"""
if not Actor(committer_id).can_change_private_viewability(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id):
logging.error(
'User %s tried to change private viewability of exploration %s '
'but was refused permission.' % (committer_id, exploration_id))
raise Exception(
'The viewability status of this exploration cannot be changed.')
exploration_rights = get_exploration_rights(exploration_id)
old_viewable_if_private = exploration_rights.viewable_if_private
if old_viewable_if_private == viewable_if_private:
raise Exception(
'Trying to change viewability status of this exploration to %s, '
'but that is already the current value.' % viewable_if_private)
exploration_rights.viewable_if_private = viewable_if_private
commit_cmds = [{
'cmd': CMD_CHANGE_PRIVATE_VIEWABILITY,
'old_viewable_if_private': old_viewable_if_private,
'new_viewable_if_private': viewable_if_private,
}]
commit_message = (
'Made exploration viewable to anyone with the link.'
if viewable_if_private else
'Made exploration viewable only to invited playtesters.')
_save_activity_rights(
committer_id, exploration_rights, feconf.ACTIVITY_TYPE_EXPLORATION,
commit_message, commit_cmds)
_update_exploration_summary(exploration_rights)
def publish_exploration(committer_id, exploration_id):
"""This is called by the publish_exploration_and_update_user_profiles
function in exp_services.py. It publishes an exploration and
commits changes.
It is the responsibility of the caller to check that the exploration is
valid prior to publication.
"""
_publish_activity(
committer_id, exploration_id, feconf.ACTIVITY_TYPE_EXPLORATION)
def unpublish_exploration(committer_id, exploration_id):
"""Unpublishes an exploration. Commits changes."""
_unpublish_activity(
committer_id, exploration_id, feconf.ACTIVITY_TYPE_EXPLORATION)
def publicize_exploration(committer_id, exploration_id):
"""Publicizes an exploration. Commits changes.
It is the responsibility of the caller to check that the exploration is
valid prior to publicizing it.
"""
_publicize_activity(
committer_id, exploration_id, feconf.ACTIVITY_TYPE_EXPLORATION)
def unpublicize_exploration(committer_id, exploration_id):
"""Unpublicizes an exploration. Commits changes."""
_unpublicize_activity(
committer_id, exploration_id, feconf.ACTIVITY_TYPE_EXPLORATION)
# Rights functions for collections.
def assign_role_for_collection(
committer_id, collection_id, assignee_id, new_role):
"""Assign `assignee_id` to the given role and subscribes the assignee
to future collection updates.
The caller should ensure that assignee_id corresponds to a valid user in
the system.
Args:
- committer_id: str. The user_id of the user who is performing the action.
- collection_id: str. The collection id.
- assignee_id: str. The user_id of the user whose role is being changed.
- new_role: str. The name of the new role: either 'owner', 'editor' or
'viewer'.
"""
_assign_role(
committer_id, assignee_id, new_role, collection_id,
feconf.ACTIVITY_TYPE_COLLECTION)
if new_role in [ROLE_OWNER, ROLE_EDITOR]:
subscription_services.subscribe_to_collection(
assignee_id, collection_id)
def release_ownership_of_collection(committer_id, collection_id):
"""Releases ownership of an collection to the community.
Commits changes.
"""
_release_ownership_of_activity(
committer_id, collection_id, feconf.ACTIVITY_TYPE_COLLECTION)
def publish_collection(committer_id, collection_id):
"""This is called by the publish_collection_and_update_user_profiles
function in collection_services.py. It publishes an collection and commits
changes.
It is the responsibility of the caller to check that the collection is
valid prior to publication.
"""
_publish_activity(
committer_id, collection_id, feconf.ACTIVITY_TYPE_COLLECTION)
def unpublish_collection(committer_id, collection_id):
"""Unpublishes an collection. Commits changes."""
_unpublish_activity(
committer_id, collection_id, feconf.ACTIVITY_TYPE_COLLECTION)
def publicize_collection(committer_id, collection_id):
"""Publicizes an collection. Commits changes.
It is the responsibility of the caller to check that the collection is
valid prior to publicizing it.
"""
_publicize_activity(
committer_id, collection_id, feconf.ACTIVITY_TYPE_COLLECTION)
def unpublicize_collection(committer_id, collection_id):
"""Unpublicizes an collection. Commits changes."""
_unpublicize_activity(
committer_id, collection_id, feconf.ACTIVITY_TYPE_COLLECTION)
|
|
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import sys
import salt.states.linux_acl as linux_acl
from salt.exceptions import CommandExecutionError
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase, skipIf
@skipIf(not sys.platform.startswith("linux"), "Test for Linux only")
class LinuxAclTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.states.linux_acl
"""
def setup_loader_modules(self):
return {linux_acl: {}}
# 'present' function tests: 1
def test_present(self):
"""
Test to ensure a Linux ACL is present
"""
self.maxDiff = None
name = "/root"
acl_type = "users"
acl_name = "damian"
perms = "rwx"
mock = MagicMock(
side_effect=[
{name: {acl_type: [{acl_name: {"octal": 5}}]}},
{name: {acl_type: [{acl_name: {"octal": 5}}]}},
{name: {acl_type: [{acl_name: {"octal": 5}}]}},
{name: {acl_type: [{}]}},
{name: {acl_type: [{}]}},
{name: {acl_type: [{}]}},
{
name: {acl_type: [{acl_name: {"octal": 7}}]},
name + "/foo": {acl_type: [{acl_name: {"octal": 5}}]},
},
{
name: {acl_type: [{acl_name: {"octal": 7}}]},
name + "/foo": {acl_type: [{acl_name: {"octal": 7}}]},
},
{name: {acl_type: ""}},
{
name: {"defaults": {"users": [{acl_name: {"octal": 7}}]}},
name + "/foo": {"defaults": {"users": [{acl_name: {"octal": 7}}]}},
},
{
name: {"defaults": {"users": [{acl_name: {"octal": 7}}]}},
name + "/foo": {"defaults": {"users": [{acl_name: {"octal": 7}}]}},
},
{
name: {"defaults": {"users": [{acl_name: {"octal": 7}}]}},
name + "/foo": {"defaults": {"users": [{acl_name: {"octal": 7}}]}},
},
]
)
mock_modfacl = MagicMock(return_value=True)
with patch.dict(linux_acl.__salt__, {"acl.getfacl": mock}):
# Update - test=True
with patch.dict(linux_acl.__opts__, {"test": True}):
comt = "Updated permissions will be applied for {}: r-x -> {}".format(
acl_name, perms
)
ret = {
"name": name,
"comment": comt,
"changes": {
"new": {
"acl_name": acl_name,
"acl_type": acl_type,
"perms": perms,
},
"old": {
"acl_name": acl_name,
"acl_type": acl_type,
"perms": "r-x",
},
},
"result": None,
}
self.assertDictEqual(
linux_acl.present(name, acl_type, acl_name, perms), ret
)
# Update - test=False
with patch.dict(linux_acl.__salt__, {"acl.modfacl": mock_modfacl}):
with patch.dict(linux_acl.__opts__, {"test": False}):
comt = "Updated permissions for {}".format(acl_name)
ret = {
"name": name,
"comment": comt,
"changes": {
"new": {
"acl_name": acl_name,
"acl_type": acl_type,
"perms": perms,
},
"old": {
"acl_name": acl_name,
"acl_type": acl_type,
"perms": "r-x",
},
},
"result": True,
}
self.assertDictEqual(
linux_acl.present(name, acl_type, acl_name, perms), ret
)
# Update - modfacl error
with patch.dict(
linux_acl.__salt__,
{
"acl.modfacl": MagicMock(
side_effect=CommandExecutionError("Custom err")
)
},
):
with patch.dict(linux_acl.__opts__, {"test": False}):
comt = "Error updating permissions for {}: Custom err".format(
acl_name
)
ret = {
"name": name,
"comment": comt,
"changes": {},
"result": False,
}
self.assertDictEqual(
linux_acl.present(name, acl_type, acl_name, perms), ret
)
# New - test=True
with patch.dict(linux_acl.__salt__, {"acl.modfacl": mock_modfacl}):
with patch.dict(linux_acl.__opts__, {"test": True}):
comt = "New permissions will be applied for {}: {}".format(
acl_name, perms
)
ret = {
"name": name,
"comment": comt,
"changes": {
"new": {
"acl_name": acl_name,
"acl_type": acl_type,
"perms": perms,
}
},
"result": None,
}
self.assertDictEqual(
linux_acl.present(name, acl_type, acl_name, perms), ret
)
# New - test=False
with patch.dict(linux_acl.__salt__, {"acl.modfacl": mock_modfacl}):
with patch.dict(linux_acl.__opts__, {"test": False}):
comt = "Applied new permissions for {}".format(acl_name)
ret = {
"name": name,
"comment": comt,
"changes": {
"new": {
"acl_name": acl_name,
"acl_type": acl_type,
"perms": perms,
}
},
"result": True,
}
self.assertDictEqual(
linux_acl.present(name, acl_type, acl_name, perms), ret
)
# New - modfacl error
with patch.dict(
linux_acl.__salt__,
{
"acl.modfacl": MagicMock(
side_effect=CommandExecutionError("Custom err")
)
},
):
with patch.dict(linux_acl.__opts__, {"test": False}):
comt = "Error updating permissions for {}: Custom err".format(
acl_name
)
ret = {
"name": name,
"comment": comt,
"changes": {},
"result": False,
}
self.assertDictEqual(
linux_acl.present(name, acl_type, acl_name, perms), ret
)
# New - recurse true
with patch.dict(linux_acl.__salt__, {"acl.getfacl": mock}):
# Update - test=True
with patch.dict(linux_acl.__opts__, {"test": True}):
comt = (
"Updated permissions will be applied for {}: rwx -> {}".format(
acl_name, perms
)
)
ret = {
"name": name,
"comment": comt,
"changes": {
"new": {
"acl_name": acl_name,
"acl_type": acl_type,
"perms": perms,
},
"old": {
"acl_name": acl_name,
"acl_type": acl_type,
"perms": "rwx",
},
},
"result": None,
}
self.assertDictEqual(
linux_acl.present(
name, acl_type, acl_name, perms, recurse=True
),
ret,
)
# New - recurse true - nothing to do
with patch.dict(linux_acl.__salt__, {"acl.getfacl": mock}):
# Update - test=True
with patch.dict(linux_acl.__opts__, {"test": True}):
comt = "Permissions are in the desired state"
ret = {"name": name, "comment": comt, "changes": {}, "result": True}
self.assertDictEqual(
linux_acl.present(
name, acl_type, acl_name, perms, recurse=True
),
ret,
)
# No acl type
comt = "ACL Type does not exist"
ret = {"name": name, "comment": comt, "result": False, "changes": {}}
self.assertDictEqual(
linux_acl.present(name, acl_type, acl_name, perms), ret
)
# default recurse false - nothing to do
with patch.dict(linux_acl.__salt__, {"acl.getfacl": mock}):
# Update - test=True
with patch.dict(linux_acl.__opts__, {"test": True}):
comt = "Permissions are in the desired state"
ret = {"name": name, "comment": comt, "changes": {}, "result": True}
self.assertDictEqual(
linux_acl.present(
name, "d:" + acl_type, acl_name, perms, recurse=False
),
ret,
)
# default recurse false - nothing to do
with patch.dict(linux_acl.__salt__, {"acl.getfacl": mock}):
# Update - test=True
with patch.dict(linux_acl.__opts__, {"test": True}):
comt = "Permissions are in the desired state"
ret = {"name": name, "comment": comt, "changes": {}, "result": True}
self.assertDictEqual(
linux_acl.present(
name, "d:" + acl_type, acl_name, perms, recurse=False
),
ret,
)
# default recurse true - nothing to do
with patch.dict(linux_acl.__salt__, {"acl.getfacl": mock}):
# Update - test=True
with patch.dict(linux_acl.__opts__, {"test": True}):
comt = "Permissions are in the desired state"
ret = {"name": name, "comment": comt, "changes": {}, "result": True}
self.assertDictEqual(
linux_acl.present(
name, "d:" + acl_type, acl_name, perms, recurse=True
),
ret,
)
# 'absent' function tests: 2
def test_absent(self):
"""
Test to ensure a Linux ACL does not exist
"""
name = "/root"
acl_type = "users"
acl_name = "damian"
perms = "rwx"
ret = {"name": name, "result": None, "comment": "", "changes": {}}
mock = MagicMock(
side_effect=[
{name: {acl_type: [{acl_name: {"octal": "A"}}]}},
{name: {acl_type: ""}},
]
)
with patch.dict(linux_acl.__salt__, {"acl.getfacl": mock}):
with patch.dict(linux_acl.__opts__, {"test": True}):
comt = "Removing permissions"
ret.update({"comment": comt})
self.assertDictEqual(
linux_acl.absent(name, acl_type, acl_name, perms), ret
)
comt = "ACL Type does not exist"
ret.update({"comment": comt, "result": False})
self.assertDictEqual(linux_acl.absent(name, acl_type, acl_name, perms), ret)
# 'list_present' function tests: 1
def test_list_present(self):
"""
Test to ensure a Linux ACL is present
"""
self.maxDiff = None
name = "/root"
acl_type = "user"
acl_names = ["root", "damian", "homer"]
acl_comment = {"owner": "root", "group": "root", "file": "/root"}
perms = "rwx"
mock = MagicMock(
side_effect=[
{
name: {
acl_type: [
{acl_names[0]: {"octal": "A"}},
{acl_names[1]: {"octal": "A"}},
{acl_names[2]: {"octal": "A"}},
],
"comment": acl_comment,
}
},
{
name: {
acl_type: [
{acl_names[0]: {"octal": "A"}},
{acl_names[1]: {"octal": "A"}},
],
"comment": acl_comment,
}
},
{
name: {
acl_type: [
{acl_names[0]: {"octal": "A"}},
{acl_names[1]: {"octal": "A"}},
]
}
},
{name: {acl_type: [{}]}},
{name: {acl_type: [{}]}},
{name: {acl_type: [{}]}},
{name: {acl_type: ""}},
]
)
mock_modfacl = MagicMock(return_value=True)
with patch.dict(linux_acl.__salt__, {"acl.getfacl": mock}):
# Update - test=True
with patch.dict(linux_acl.__opts__, {"test": True}):
comt = "Updated permissions will be applied for {}: A -> {}".format(
acl_names, perms
)
expected = {
"name": name,
"comment": comt,
"changes": {
"new": {
"acl_name": ", ".join(acl_names),
"acl_type": acl_type,
"perms": 7,
},
"old": {
"acl_name": ", ".join(acl_names),
"acl_type": acl_type,
"perms": "A",
},
},
"result": None,
}
ret = linux_acl.list_present(name, acl_type, acl_names, perms)
self.assertDictEqual(ret, expected)
# Update - test=False
with patch.dict(linux_acl.__salt__, {"acl.modfacl": mock_modfacl}):
with patch.dict(linux_acl.__opts__, {"test": False}):
comt = "Applied new permissions for {}".format(", ".join(acl_names))
expected = {
"name": name,
"comment": comt,
"changes": {
"new": {
"acl_name": ", ".join(acl_names),
"acl_type": acl_type,
"perms": "rwx",
}
},
"result": True,
}
ret = linux_acl.list_present(name, acl_type, acl_names, perms)
self.assertDictEqual(expected, ret)
# Update - modfacl error
with patch.dict(
linux_acl.__salt__,
{
"acl.modfacl": MagicMock(
side_effect=CommandExecutionError("Custom err")
)
},
):
with patch.dict(linux_acl.__opts__, {"test": False}):
comt = "Error updating permissions for {}: Custom err".format(
acl_names
)
expected = {
"name": name,
"comment": comt,
"changes": {},
"result": False,
}
ret = linux_acl.list_present(name, acl_type, acl_names, perms)
self.assertDictEqual(expected, ret)
# New - test=True
with patch.dict(linux_acl.__salt__, {"acl.modfacl": mock_modfacl}):
with patch.dict(linux_acl.__opts__, {"test": True}):
comt = "New permissions will be applied for {}: {}".format(
acl_names, perms
)
expected = {
"name": name,
"comment": comt,
"changes": {
"new": {
"acl_name": ", ".join(acl_names),
"acl_type": acl_type,
"perms": perms,
}
},
"result": None,
}
ret = linux_acl.list_present(name, acl_type, acl_names, perms)
self.assertDictEqual(expected, ret)
# New - test=False
with patch.dict(linux_acl.__salt__, {"acl.modfacl": mock_modfacl}):
with patch.dict(linux_acl.__opts__, {"test": False}):
comt = "Applied new permissions for {}".format(", ".join(acl_names))
expected = {
"name": name,
"comment": comt,
"changes": {
"new": {
"acl_name": ", ".join(acl_names),
"acl_type": acl_type,
"perms": perms,
}
},
"result": True,
}
ret = linux_acl.list_present(name, acl_type, acl_names, perms)
self.assertDictEqual(expected, ret)
# New - modfacl error
with patch.dict(
linux_acl.__salt__,
{
"acl.modfacl": MagicMock(
side_effect=CommandExecutionError("Custom err")
)
},
):
with patch.dict(linux_acl.__opts__, {"test": False}):
comt = "Error updating permissions for {}: Custom err".format(
acl_names
)
expected = {
"name": name,
"comment": comt,
"changes": {},
"result": False,
}
ret = linux_acl.list_present(name, acl_type, acl_names, perms)
self.assertDictEqual(expected, ret)
# No acl type
comt = "ACL Type does not exist"
expected = {
"name": name,
"comment": comt,
"result": False,
"changes": {},
}
ret = linux_acl.list_present(name, acl_type, acl_names, perms)
self.assertDictEqual(expected, ret)
# 'list_absent' function tests: 2
def test_list_absent(self):
"""
Test to ensure a Linux ACL does not exist
"""
name = "/root"
acl_type = "users"
acl_names = ["damian", "homer"]
perms = "rwx"
ret = {"name": name, "result": None, "comment": "", "changes": {}}
mock = MagicMock(
side_effect=[
{
name: {
acl_type: [
{acl_names[0]: {"octal": "A"}, acl_names[1]: {"octal": "A"}}
]
}
},
{name: {acl_type: ""}},
]
)
with patch.dict(linux_acl.__salt__, {"acl.getfacl": mock}):
with patch.dict(linux_acl.__opts__, {"test": True}):
comt = "Removing permissions"
ret.update({"comment": comt})
self.assertDictEqual(
linux_acl.list_absent(name, acl_type, acl_names, perms), ret
)
comt = "ACL Type does not exist"
ret.update({"comment": comt, "result": False})
self.assertDictEqual(linux_acl.list_absent(name, acl_type, acl_names), ret)
def test_absent_recursive(self):
"""
Test to ensure a Linux ACL does not exist
"""
name = "/root"
acl_type = "users"
acl_name = "damian"
perms = "rwx"
ret = {"name": name, "result": None, "comment": "", "changes": {}}
mock = MagicMock(
side_effect=[
{
name: {acl_type: [{acl_name: {"octal": 7}}]},
name + "/foo": {acl_type: [{acl_name: {"octal": "A"}}]},
}
]
)
with patch.dict(linux_acl.__salt__, {"acl.getfacl": mock}):
with patch.dict(linux_acl.__opts__, {"test": True}):
comt = "Removing permissions"
ret.update({"comment": comt})
self.assertDictEqual(
linux_acl.absent(name, acl_type, acl_name, perms, recurse=True), ret
)
|
|
import base64
from functools import reduce
import operator
from django.db.models.query_utils import Q
from django.db.transaction import atomic
from rest_condition import Or
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes, list_route
from rest_framework.exceptions import ValidationError, ParseError
from rest_framework.generics import get_object_or_404, ListAPIView, \
ListCreateAPIView
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet
from account.models import Profile, SCHOOL_TYPES
from account.utils import format_phonenumber
from authx.permissions import IsAdminUser, SelfOnly
from friend.api.v1.serializers import FriendProfileSerializer
from friend.api.v1.social_routes import calculate_topN_social_routes
from friend.models import are_friends, PhoneContactRecord
from tag.api.v1.serializers import TagSerializer
from tag.models import TaggedItem, Tag
#from friend.api.v1.filtersets import SocialProfileFilterSet
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def has_friendship(request, version=None):
profile = get_object_or_404(Profile.objects.filter(user=request.query_params.get('user_id')))
self_profile = get_object_or_404(Profile.objects.filter(user=request.user))
result = {'is_friend': are_friends(self_profile, profile)}
return Response(result)
class FriendTagsListView(ListAPIView):
#permission_classes = (AllowAny, )
# queryset = Tag.objects.all()
serializer_class = TagSerializer
# I will pass the filterset on this scenario and follow the easy implementation as the drf suggests
# refer to http://www.django-rest-framework.org/api-guide/filtering/
def get_queryset(self):
# we might migrate it back to user module?
pcr_qs = PhoneContactRecord.objects.filter(
from_profile__user_id=self.kwargs['user_id']
).values_list('to_profile_id', flat=True)
if not pcr_qs.exists():
return Tag.objects.none()
to_profile_qs = Profile.objects.filter(id__in=pcr_qs)
extra_filters = {'%s__object_id__in'%(TaggedItem.tag_relname(), ): to_profile_qs}
tag_qs = TaggedItem.tags_for(Profile, **extra_filters)
return tag_qs
friend_tags = FriendTagsListView.as_view()
class AlumniProfileListView(ListAPIView):
serializer_class = FriendProfileSerializer
permission_classes = (IsAuthenticated,)
search_fields = ('occupations__name', 'tags__name',
'user__nickname', 'college__name', 'high_school__name', )
def get_queryset(self):
user = self.request.user
profile_qs = Profile.objects.filter(user__isnull=False).exclude(user=user)\
.select_related('user', 'college', 'high_school') \
.prefetch_related('tags')
# refer to http://www.django-rest-framework.org/api-guide/filtering/#filtering-against-query-parameters
# according to doc, filtering against query_params just like this
valid_school_type_dict = dict(SCHOOL_TYPES)
user_profile = get_object_or_404(Profile, user=user)
school_type = self.request.query_params.get('school_type')
if school_type and school_type not in valid_school_type_dict:
raise ValidationError('Invalid school_type, valid options are %s' % (','.join(valid_school_type_dict.keys())))
if school_type:
school = getattr(user_profile, school_type)
if school:
# should be a plain new q_obj
profile_qs = profile_qs.filter( Q(**{'%s'%school_type: school}) )
else:
profile_qs = profile_qs.none()
else:
q_objs = []
for school_type in valid_school_type_dict.keys():
school = getattr(user_profile, school_type)
if school is None:
continue
q_objs.append( Q(**{'%s'%school_type: school}) )
# avoid list all profiles
if not q_objs:
#raise ValidationError('No %s set for current user'%school_type)
return Profile.objects.none()
q_obj = reduce(operator.or_, q_objs)
profile_qs = profile_qs.filter(q_obj)
return profile_qs
alumni = AlumniProfileListView.as_view()
class PhoneContactProfileListView(ListCreateAPIView):
serializer_class = FriendProfileSerializer
permission_classes = (IsAuthenticated,
Or(IsAdminUser, SelfOnly),)
search_fields = ('occupations__name', 'tags__name',
'user__nickname', 'college__name', 'high_school__name', )
def get_queryset(self):
to_profile_id_qs = PhoneContactRecord.objects \
.filter(from_profile__user__id=self.request.user.id) \
.values_list('to_profile_id', flat=True)
profile_qs = Profile.objects.filter(id__in=to_profile_id_qs) \
.select_related('user', 'college', 'high_school') \
.prefetch_related('tags')
return profile_qs
@atomic
def create(self, request, *args, **kwargs):
bulk = isinstance(request.data, list)
data = request.data if bulk else [request.data]
# no more serializers that is not necessary for this creation
# data should be a list of strings
formatted_phone_nums = []
for phone_num_str in data:
formatted_phone_num = format_phonenumber(phone_num_str, quiet=True)
if not formatted_phone_num:
continue
formatted_phone_nums.append(formatted_phone_num)
p_qs = Profile.objects.filter(phone_num__in=formatted_phone_nums).only('id', 'phone_num')
records = []
# create for those not in the system
phone_numm_profile_dict = dict((p.phone_numm, p) for p in p_qs)
p_qs = list(p_qs)
for formatted_phone_num in formatted_phone_nums:
if formatted_phone_num not in phone_numm_profile_dict:
p = Profile(phone_num=formatted_phone_num)
p.save()
p_qs.append(p)
from_profile = get_object_or_404(Profile, user=self.request.user)
for p in p_qs:
records.append(
PhoneContactRecord(from_profile=from_profile, to_profile=p, to_phone_num=p.phone_num)
)
PhoneContactRecord.objects.filter(from_profile=from_profile).delete()
PhoneContactRecord.objects.bulk_create(records)
return Response(len(records), status=status.HTTP_201_CREATED)
phone_contacts = PhoneContactProfileListView.as_view()
class PhoneContactCountView(PhoneContactProfileListView):
'''
just for speed up on and reduce the network transmission
'''
def get(self, request, *args, **kwargs):
qs = self.get_queryset()
#return Response(qs.none().count())
return Response(qs.count())
phone_contact_count = PhoneContactCountView.as_view()
class SocialProfileListView(ReadOnlyModelViewSet):
serializer_class = FriendProfileSerializer
permission_classes = (AllowAny, )
queryset = Profile.objects.filter(user__isnull=False)
# filter_class = SocialProfileFilterSet
# user__username is pretty rare
search_fields = ('occupations__name', 'tags__name',
'user__nickname', 'college__name', 'high_school__name', )
def _prepare_route_result(self, route, id_profile_dict):
'''
route must be a dict
return {
total_weight: n,
weights: [n...],
items: [
{id: n, display_name: xxx, is_from_mobile_contact: True/False,},
{...},
{...},
],
route_code: xxx,
}
'''
profile_ids = []
items = []
for profile_id in route.pop('profile_ids', []):
profile_ids.append('%s' % profile_id)
profile = id_profile_dict[profile_id]
items.append(profile)
route_code = base64.urlsafe_b64encode( ','.join(profile_ids).encode('utf-8') )
# We need string instead of bytes
route_code = route_code.decode('utf-8')
route['route_code'] = route_code
route.update({
'route_code': route_code,
'items': items,
})
return route
def _extract_route_profile_ids(self, route_code):
try:
profile_ids = base64.urlsafe_b64decode(route_code.encode('utf-8'))
profile_ids = profile_ids.decode('utf-8')
profile_ids = [int(profile_id) for profile_id in profile_ids.split(',')]
except Exception as e:
raise ParseError('route_code is malformed')
return profile_ids
@list_route(['GET',],
url_path='social-routes'
, permission_classes=[IsAuthenticated,]
)
def social_routes(self, request, *args, **kwargs):
'''
return a list of [
{profile_id: ''}
]
'''
target_user_id = request.query_params.get('target_user')
dest_profile = get_object_or_404(Profile, user_id=target_user_id)
src_profile = get_object_or_404(Profile, user=request.user)
routes = calculate_topN_social_routes(src_profile, dest_profile, 10)
profile_id_set = set()
for route in routes:
profile_id_set.update(route['profile_ids'])
p_qs = Profile.objects.filter(id__in=profile_id_set).values('id',
'user__nickname', 'user__username', 'phone_num')
id_profile_dict = {}
for p in p_qs:
nickname = p.pop('user__nickname')
username = p.pop('user__username')
p.update({
'display_name': nickname or username,
})
# if display_name is '' then from user's own mobile
if p['display_name']:
p.pop('phone_num')
id_profile_dict[ p['id'] ] = p
result = []
for route in routes:
result.append( self._prepare_route_result(route, id_profile_dict) )
return Response(result)
@list_route(['GET',],
url_path='social-route-detail'
#, permission_classes=[IsAuthenticated,]
)
def social_route_detail(self, request, *args, **kwargs):
route_code = request.query_params.get('route_code')
profile_ids = self._extract_route_profile_ids(route_code)
queryset = Profile.objects.filter(id__in=profile_ids)
serializer = self.get_serializer(queryset, many=True)
profile_id_idx_dict = {}
for idx, profile_id in enumerate(profile_ids):
profile_id_idx_dict[profile_id] = idx
result = [0]*len(profile_ids)
for profile in serializer.data:
idx = profile_id_idx_dict.get(profile['id'])
result[idx] = profile
return Response(result)
|
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
import logging
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from src import utils
import src.file_utils as fu
import tfcode.nav_utils as nu
from tfcode import tf_utils
setup_train_step_kwargs = nu.default_train_step_kwargs
compute_losses_multi_or = nu.compute_losses_multi_or
get_repr_from_image = nu.get_repr_from_image
_save_d_at_t = nu.save_d_at_t
_save_all = nu.save_all
_eval_ap = nu.eval_ap
_eval_dist = nu.eval_dist
_plot_trajectories = nu.plot_trajectories
def lstm_online(cell_fn, num_steps, inputs, state, varscope):
# inputs is B x num_steps x C, C channels.
# state is 2 tuple with B x 1 x C1, B x 1 x C2
# Output state is always B x 1 x C
inputs = tf.unstack(inputs, axis=1, num=num_steps)
state = tf.unstack(state, axis=1, num=1)[0]
outputs = []
if num_steps > 1:
varscope.reuse_variables()
for s in range(num_steps):
output, state = cell_fn(inputs[s], state)
outputs.append(output)
outputs = tf.stack(outputs, axis=1)
state = tf.stack([state], axis=1)
return outputs, state
def _inputs(problem, lstm_states, lstm_state_dims):
# Set up inputs.
with tf.name_scope('inputs'):
n_views = problem.n_views
inputs = []
inputs.append(('orig_maps', tf.float32,
(problem.batch_size, 1, None, None, 1)))
inputs.append(('goal_loc', tf.float32,
(problem.batch_size, problem.num_goals, 2)))
# For initing LSTM.
inputs.append(('rel_goal_loc_at_start', tf.float32,
(problem.batch_size, problem.num_goals,
problem.rel_goal_loc_dim)))
common_input_data, _ = tf_utils.setup_inputs(inputs)
inputs = []
inputs.append(('imgs', tf.float32, (problem.batch_size, None, n_views,
problem.img_height, problem.img_width,
problem.img_channels)))
# Goal location as a tuple of delta location and delta theta.
inputs.append(('rel_goal_loc', tf.float32, (problem.batch_size, None,
problem.rel_goal_loc_dim)))
if problem.outputs.visit_count:
inputs.append(('visit_count', tf.int32, (problem.batch_size, None, 1)))
inputs.append(('last_visit', tf.int32, (problem.batch_size, None, 1)))
for i, (state, dim) in enumerate(zip(lstm_states, lstm_state_dims)):
inputs.append((state, tf.float32, (problem.batch_size, 1, dim)))
if problem.outputs.egomotion:
inputs.append(('incremental_locs', tf.float32,
(problem.batch_size, None, 2)))
inputs.append(('incremental_thetas', tf.float32,
(problem.batch_size, None, 1)))
inputs.append(('step_number', tf.int32, (1, None, 1)))
inputs.append(('node_ids', tf.int32, (problem.batch_size, None,
problem.node_ids_dim)))
inputs.append(('perturbs', tf.float32, (problem.batch_size, None,
problem.perturbs_dim)))
# For plotting result plots
inputs.append(('loc_on_map', tf.float32, (problem.batch_size, None, 2)))
inputs.append(('gt_dist_to_goal', tf.float32, (problem.batch_size, None, 1)))
step_input_data, _ = tf_utils.setup_inputs(inputs)
inputs = []
inputs.append(('executed_actions', tf.int32, (problem.batch_size, None)))
inputs.append(('rewards', tf.float32, (problem.batch_size, None)))
inputs.append(('action_sample_wts', tf.float32, (problem.batch_size, None)))
inputs.append(('action', tf.int32, (problem.batch_size, None,
problem.num_actions)))
train_data, _ = tf_utils.setup_inputs(inputs)
train_data.update(step_input_data)
train_data.update(common_input_data)
return common_input_data, step_input_data, train_data
def _add_summaries(m, summary_mode, arop_full_summary_iters):
summarize_ops = [m.lr_op, m.global_step_op, m.sample_gt_prob_op,
m.total_loss_op, m.data_loss_op, m.reg_loss_op] + m.acc_ops
summarize_names = ['lr', 'global_step', 'sample_gt_prob_op', 'total_loss',
'data_loss', 'reg_loss'] + \
['acc_{:d}'.format(i) for i in range(len(m.acc_ops))]
to_aggregate = [0, 0, 0, 1, 1, 1] + [1]*len(m.acc_ops)
scope_name = 'summary'
with tf.name_scope(scope_name):
s_ops = nu.add_default_summaries(summary_mode, arop_full_summary_iters,
summarize_ops, summarize_names,
to_aggregate, m.action_prob_op,
m.input_tensors, scope_name=scope_name)
m.summary_ops = {summary_mode: s_ops}
def visit_count_fc(visit_count, last_visit, embed_neurons, wt_decay, fc_dropout):
with tf.variable_scope('embed_visit_count'):
visit_count = tf.reshape(visit_count, shape=[-1])
last_visit = tf.reshape(last_visit, shape=[-1])
visit_count = tf.clip_by_value(visit_count, clip_value_min=-1,
clip_value_max=15)
last_visit = tf.clip_by_value(last_visit, clip_value_min=-1,
clip_value_max=15)
visit_count = tf.one_hot(visit_count, depth=16, axis=1, dtype=tf.float32,
on_value=10., off_value=0.)
last_visit = tf.one_hot(last_visit, depth=16, axis=1, dtype=tf.float32,
on_value=10., off_value=0.)
f = tf.concat([visit_count, last_visit], 1)
x, _ = tf_utils.fc_network(
f, neurons=embed_neurons, wt_decay=wt_decay, name='visit_count_embed',
offset=0, batch_norm_param=None, dropout_ratio=fc_dropout,
is_training=is_training)
return x
def lstm_setup(name, x, batch_size, is_single_step, lstm_dim, lstm_out,
num_steps, state_input_op):
# returns state_name, state_init_op, updated_state_op, out_op
with tf.name_scope('reshape_'+name):
sh = x.get_shape().as_list()
x = tf.reshape(x, shape=[batch_size, -1, sh[-1]])
with tf.variable_scope(name) as varscope:
cell = tf.contrib.rnn.LSTMCell(
num_units=lstm_dim, forget_bias=1.0, state_is_tuple=False,
num_proj=lstm_out, use_peepholes=True,
initializer=tf.random_uniform_initializer(-0.01, 0.01, seed=0),
cell_clip=None, proj_clip=None)
sh = [batch_size, 1, lstm_dim+lstm_out]
state_init_op = tf.constant(0., dtype=tf.float32, shape=sh)
fn = lambda ns: lstm_online(cell, ns, x, state_input_op, varscope)
out_op, updated_state_op = tf.cond(is_single_step, lambda: fn(1), lambda:
fn(num_steps))
return name, state_init_op, updated_state_op, out_op
def combine_setup(name, combine_type, embed_img, embed_goal, num_img_neuorons=None,
num_goal_neurons=None):
with tf.name_scope(name + '_' + combine_type):
if combine_type == 'add':
# Simple concat features from goal and image
out = embed_img + embed_goal
elif combine_type == 'multiply':
# Multiply things together
re_embed_img = tf.reshape(
embed_img, shape=[-1, num_img_neuorons / num_goal_neurons,
num_goal_neurons])
re_embed_goal = tf.reshape(embed_goal, shape=[-1, num_goal_neurons, 1])
x = tf.matmul(re_embed_img, re_embed_goal, transpose_a=False, transpose_b=False)
out = slim.flatten(x)
elif combine_type == 'none' or combine_type == 'imgonly':
out = embed_img
elif combine_type == 'goalonly':
out = embed_goal
else:
logging.fatal('Undefined combine_type: %s', combine_type)
return out
def preprocess_egomotion(locs, thetas):
with tf.name_scope('pre_ego'):
pre_ego = tf.concat([locs, tf.sin(thetas), tf.cos(thetas)], 2)
sh = pre_ego.get_shape().as_list()
pre_ego = tf.reshape(pre_ego, [-1, sh[-1]])
return pre_ego
def setup_to_run(m, args, is_training, batch_norm_is_training, summary_mode):
# Set up the model.
tf.set_random_seed(args.solver.seed)
task_params = args.navtask.task_params
num_steps = task_params.num_steps
num_goals = task_params.num_goals
num_actions = task_params.num_actions
num_actions_ = num_actions
n_views = task_params.n_views
batch_norm_is_training_op = \
tf.placeholder_with_default(batch_norm_is_training, shape=[],
name='batch_norm_is_training_op')
# Setup the inputs
m.input_tensors = {}
lstm_states = []; lstm_state_dims = [];
state_names = []; updated_state_ops = []; init_state_ops = [];
if args.arch.lstm_output:
lstm_states += ['lstm_output']
lstm_state_dims += [args.arch.lstm_output_dim+task_params.num_actions]
if args.arch.lstm_ego:
lstm_states += ['lstm_ego']
lstm_state_dims += [args.arch.lstm_ego_dim + args.arch.lstm_ego_out]
lstm_states += ['lstm_img']
lstm_state_dims += [args.arch.lstm_img_dim + args.arch.lstm_img_out]
elif args.arch.lstm_img:
# An LSTM only on the image
lstm_states += ['lstm_img']
lstm_state_dims += [args.arch.lstm_img_dim + args.arch.lstm_img_out]
else:
# No LSTMs involved here.
None
m.input_tensors['common'], m.input_tensors['step'], m.input_tensors['train'] = \
_inputs(task_params, lstm_states, lstm_state_dims)
with tf.name_scope('check_size'):
is_single_step = tf.equal(tf.unstack(tf.shape(m.input_tensors['step']['imgs']),
num=6)[1], 1)
images_reshaped = tf.reshape(m.input_tensors['step']['imgs'],
shape=[-1, task_params.img_height, task_params.img_width,
task_params.img_channels], name='re_image')
rel_goal_loc_reshaped = tf.reshape(m.input_tensors['step']['rel_goal_loc'],
shape=[-1, task_params.rel_goal_loc_dim], name='re_rel_goal_loc')
x, vars_ = get_repr_from_image(
images_reshaped, task_params.modalities, task_params.data_augment,
args.arch.encoder, args.solver.freeze_conv, args.solver.wt_decay,
is_training)
# Reshape into nice things so that these can be accumulated over time steps
# for faster backprop.
sh_before = x.get_shape().as_list()
m.encoder_output = tf.reshape(
x, shape=[task_params.batch_size, -1, n_views] + sh_before[1:])
x = tf.reshape(m.encoder_output, shape=[-1] + sh_before[1:])
# Add a layer to reduce dimensions for a fc layer.
if args.arch.dim_reduce_neurons > 0:
ks = 1; neurons = args.arch.dim_reduce_neurons;
init_var = np.sqrt(2.0/(ks**2)/neurons)
batch_norm_param = args.arch.batch_norm_param
batch_norm_param['is_training'] = batch_norm_is_training_op
m.conv_feat = slim.conv2d(
x, neurons, kernel_size=ks, stride=1, normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_param, padding='SAME', scope='dim_reduce',
weights_regularizer=slim.l2_regularizer(args.solver.wt_decay),
weights_initializer=tf.random_normal_initializer(stddev=init_var))
reshape_conv_feat = slim.flatten(m.conv_feat)
sh = reshape_conv_feat.get_shape().as_list()
m.reshape_conv_feat = tf.reshape(reshape_conv_feat,
shape=[-1, sh[1]*n_views])
# Restore these from a checkpoint.
if args.solver.pretrained_path is not None:
m.init_fn = slim.assign_from_checkpoint_fn(args.solver.pretrained_path,
vars_)
else:
m.init_fn = None
# Hit the goal_location with a bunch of fully connected layers, to embed it
# into some space.
with tf.variable_scope('embed_goal'):
batch_norm_param = args.arch.batch_norm_param
batch_norm_param['is_training'] = batch_norm_is_training_op
m.embed_goal, _ = tf_utils.fc_network(
rel_goal_loc_reshaped, neurons=args.arch.goal_embed_neurons,
wt_decay=args.solver.wt_decay, name='goal_embed', offset=0,
batch_norm_param=batch_norm_param, dropout_ratio=args.arch.fc_dropout,
is_training=is_training)
if args.arch.embed_goal_for_state:
with tf.variable_scope('embed_goal_for_state'):
batch_norm_param = args.arch.batch_norm_param
batch_norm_param['is_training'] = batch_norm_is_training_op
m.embed_goal_for_state, _ = tf_utils.fc_network(
m.input_tensors['common']['rel_goal_loc_at_start'][:,0,:],
neurons=args.arch.goal_embed_neurons, wt_decay=args.solver.wt_decay,
name='goal_embed', offset=0, batch_norm_param=batch_norm_param,
dropout_ratio=args.arch.fc_dropout, is_training=is_training)
# Hit the goal_location with a bunch of fully connected layers, to embed it
# into some space.
with tf.variable_scope('embed_img'):
batch_norm_param = args.arch.batch_norm_param
batch_norm_param['is_training'] = batch_norm_is_training_op
m.embed_img, _ = tf_utils.fc_network(
m.reshape_conv_feat, neurons=args.arch.img_embed_neurons,
wt_decay=args.solver.wt_decay, name='img_embed', offset=0,
batch_norm_param=batch_norm_param, dropout_ratio=args.arch.fc_dropout,
is_training=is_training)
# For lstm_ego, and lstm_image, embed the ego motion, accumulate it into an
# LSTM, combine with image features and accumulate those in an LSTM. Finally
# combine what you get from the image LSTM with the goal to output an action.
if args.arch.lstm_ego:
ego_reshaped = preprocess_egomotion(m.input_tensors['step']['incremental_locs'],
m.input_tensors['step']['incremental_thetas'])
with tf.variable_scope('embed_ego'):
batch_norm_param = args.arch.batch_norm_param
batch_norm_param['is_training'] = batch_norm_is_training_op
m.embed_ego, _ = tf_utils.fc_network(
ego_reshaped, neurons=args.arch.ego_embed_neurons,
wt_decay=args.solver.wt_decay, name='ego_embed', offset=0,
batch_norm_param=batch_norm_param, dropout_ratio=args.arch.fc_dropout,
is_training=is_training)
state_name, state_init_op, updated_state_op, out_op = lstm_setup(
'lstm_ego', m.embed_ego, task_params.batch_size, is_single_step,
args.arch.lstm_ego_dim, args.arch.lstm_ego_out, num_steps*num_goals,
m.input_tensors['step']['lstm_ego'])
state_names += [state_name]
init_state_ops += [state_init_op]
updated_state_ops += [updated_state_op]
# Combine the output with the vision features.
m.img_ego_op = combine_setup('img_ego', args.arch.combine_type_ego,
m.embed_img, out_op,
args.arch.img_embed_neurons[-1],
args.arch.lstm_ego_out)
# LSTM on these vision features.
state_name, state_init_op, updated_state_op, out_op = lstm_setup(
'lstm_img', m.img_ego_op, task_params.batch_size, is_single_step,
args.arch.lstm_img_dim, args.arch.lstm_img_out, num_steps*num_goals,
m.input_tensors['step']['lstm_img'])
state_names += [state_name]
init_state_ops += [state_init_op]
updated_state_ops += [updated_state_op]
m.img_for_goal = out_op
num_img_for_goal_neurons = args.arch.lstm_img_out
elif args.arch.lstm_img:
# LSTM on just the image features.
state_name, state_init_op, updated_state_op, out_op = lstm_setup(
'lstm_img', m.embed_img, task_params.batch_size, is_single_step,
args.arch.lstm_img_dim, args.arch.lstm_img_out, num_steps*num_goals,
m.input_tensors['step']['lstm_img'])
state_names += [state_name]
init_state_ops += [state_init_op]
updated_state_ops += [updated_state_op]
m.img_for_goal = out_op
num_img_for_goal_neurons = args.arch.lstm_img_out
else:
m.img_for_goal = m.embed_img
num_img_for_goal_neurons = args.arch.img_embed_neurons[-1]
if args.arch.use_visit_count:
m.embed_visit_count = visit_count_fc(
m.input_tensors['step']['visit_count'],
m.input_tensors['step']['last_visit'], args.arch.goal_embed_neurons,
args.solver.wt_decay, args.arch.fc_dropout, is_training=is_training)
m.embed_goal = m.embed_goal + m.embed_visit_count
m.combined_f = combine_setup('img_goal', args.arch.combine_type,
m.img_for_goal, m.embed_goal,
num_img_for_goal_neurons,
args.arch.goal_embed_neurons[-1])
# LSTM on the combined representation.
if args.arch.lstm_output:
name = 'lstm_output'
# A few fully connected layers here.
with tf.variable_scope('action_pred'):
batch_norm_param = args.arch.batch_norm_param
batch_norm_param['is_training'] = batch_norm_is_training_op
x, _ = tf_utils.fc_network(
m.combined_f, neurons=args.arch.pred_neurons,
wt_decay=args.solver.wt_decay, name='pred', offset=0,
batch_norm_param=batch_norm_param, dropout_ratio=args.arch.fc_dropout)
if args.arch.lstm_output_init_state_from_goal:
# Use the goal embedding to initialize the LSTM state.
# UGLY CLUGGY HACK: if this is doing computation for a single time step
# then this will not involve back prop, so we can use the state input from
# the feed dict, otherwise we compute the state representation from the
# goal and feed that in. Necessary for using goal location to generate the
# state representation.
m.embed_goal_for_state = tf.expand_dims(m.embed_goal_for_state, dim=1)
state_op = tf.cond(is_single_step, lambda: m.input_tensors['step'][name],
lambda: m.embed_goal_for_state)
state_name, state_init_op, updated_state_op, out_op = lstm_setup(
name, x, task_params.batch_size, is_single_step,
args.arch.lstm_output_dim,
num_actions_,
num_steps*num_goals, state_op)
init_state_ops += [m.embed_goal_for_state]
else:
state_op = m.input_tensors['step'][name]
state_name, state_init_op, updated_state_op, out_op = lstm_setup(
name, x, task_params.batch_size, is_single_step,
args.arch.lstm_output_dim,
num_actions_, num_steps*num_goals, state_op)
init_state_ops += [state_init_op]
state_names += [state_name]
updated_state_ops += [updated_state_op]
out_op = tf.reshape(out_op, shape=[-1, num_actions_])
if num_actions_ > num_actions:
m.action_logits_op = out_op[:,:num_actions]
m.baseline_op = out_op[:,num_actions:]
else:
m.action_logits_op = out_op
m.baseline_op = None
m.action_prob_op = tf.nn.softmax(m.action_logits_op)
else:
# A few fully connected layers here.
with tf.variable_scope('action_pred'):
batch_norm_param = args.arch.batch_norm_param
batch_norm_param['is_training'] = batch_norm_is_training_op
out_op, _ = tf_utils.fc_network(
m.combined_f, neurons=args.arch.pred_neurons,
wt_decay=args.solver.wt_decay, name='pred', offset=0,
num_pred=num_actions_,
batch_norm_param=batch_norm_param,
dropout_ratio=args.arch.fc_dropout, is_training=is_training)
if num_actions_ > num_actions:
m.action_logits_op = out_op[:,:num_actions]
m.baseline_op = out_op[:,num_actions:]
else:
m.action_logits_op = out_op
m.baseline_op = None
m.action_prob_op = tf.nn.softmax(m.action_logits_op)
m.train_ops = {}
m.train_ops['step'] = m.action_prob_op
m.train_ops['common'] = [m.input_tensors['common']['orig_maps'],
m.input_tensors['common']['goal_loc'],
m.input_tensors['common']['rel_goal_loc_at_start']]
m.train_ops['state_names'] = state_names
m.train_ops['init_state'] = init_state_ops
m.train_ops['updated_state'] = updated_state_ops
m.train_ops['batch_norm_is_training_op'] = batch_norm_is_training_op
# Flat list of ops which cache the step data.
m.train_ops['step_data_cache'] = [tf.no_op()]
if args.solver.freeze_conv:
m.train_ops['step_data_cache'] = [m.encoder_output]
else:
m.train_ops['step_data_cache'] = []
ewma_decay = 0.99 if is_training else 0.0
weight = tf.ones_like(m.input_tensors['train']['action'], dtype=tf.float32,
name='weight')
m.reg_loss_op, m.data_loss_op, m.total_loss_op, m.acc_ops = \
compute_losses_multi_or(
m.action_logits_op, m.input_tensors['train']['action'],
weights=weight, num_actions=num_actions,
data_loss_wt=args.solver.data_loss_wt,
reg_loss_wt=args.solver.reg_loss_wt, ewma_decay=ewma_decay)
if args.solver.freeze_conv:
vars_to_optimize = list(set(tf.trainable_variables()) - set(vars_))
else:
vars_to_optimize = None
m.lr_op, m.global_step_op, m.train_op, m.should_stop_op, m.optimizer, \
m.sync_optimizer = tf_utils.setup_training(
m.total_loss_op,
args.solver.initial_learning_rate,
args.solver.steps_per_decay,
args.solver.learning_rate_decay,
args.solver.momentum,
args.solver.max_steps,
args.solver.sync,
args.solver.adjust_lr_sync,
args.solver.num_workers,
args.solver.task,
vars_to_optimize=vars_to_optimize,
clip_gradient_norm=args.solver.clip_gradient_norm,
typ=args.solver.typ, momentum2=args.solver.momentum2,
adam_eps=args.solver.adam_eps)
if args.arch.sample_gt_prob_type == 'inverse_sigmoid_decay':
m.sample_gt_prob_op = tf_utils.inverse_sigmoid_decay(args.arch.isd_k,
m.global_step_op)
elif args.arch.sample_gt_prob_type == 'zero':
m.sample_gt_prob_op = tf.constant(-1.0, dtype=tf.float32)
elif args.arch.sample_gt_prob_type.split('_')[0] == 'step':
step = int(args.arch.sample_gt_prob_type.split('_')[1])
m.sample_gt_prob_op = tf_utils.step_gt_prob(
step, m.input_tensors['step']['step_number'][0,0,0])
m.sample_action_type = args.arch.action_sample_type
m.sample_action_combine_type = args.arch.action_sample_combine_type
_add_summaries(m, summary_mode, args.summary.arop_full_summary_iters)
m.init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
m.saver_op = tf.train.Saver(keep_checkpoint_every_n_hours=4,
write_version=tf.train.SaverDef.V2)
return m
|
|
import datetime
from dateutil.parser import parse
from django.db import models
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.hashers import make_password
from charityadmin.apps.timeslots.models import days_of_week_choices, days_of_week_list, Client, Volunteer, ClientOpening, ClientOpeningMetadata, VolunteerCommitment, VolunteerCommitmentMetadata
from charityadmin.apps.timeslots.widgets import SplitDateTimeFieldWithLabels
class UserForm(forms.Form):
email = forms.EmailField(max_length=100)
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
class ClientForm(forms.ModelForm):
# for admin eyes only
class Meta:
model = Client
fields = '__all__'
class VolunteerForm(forms.ModelForm):
# for admin eyes only
class Meta:
model = Volunteer
fields = '__all__'
class VolunteerSignupForm(forms.ModelForm):
# this one is end-user facing
email = forms.EmailField(max_length=100, required=True)
email_confirm = forms.EmailField(max_length=100, required=True)
first_name = forms.CharField(max_length=30, required=True)
last_name = forms.CharField(max_length=30, required=True)
password = forms.CharField(max_length=100, widget=forms.widgets.PasswordInput())
class Meta:
model = Volunteer
# exclude = ['user', 'trained', 'clients']
fields = ['first_name', 'last_name', 'email', 'email_confirm', 'password', 'phone']
def __init__(self, *args, **kwargs):
super(VolunteerSignupForm, self).__init__(*args, **kwargs)
self.fields['email_confirm'].label = "Confirm Email"
if (self.instance.pk):
user = self.instance.user
self.initial['email'] = user.email
self.initial['first_name'] = user.first_name
self.initial['last_name'] = user.last_name
def clean_password(self):
if not self.instance.pk and not self.cleaned_data['password']:
raise forms.ValidationError("Password is required")
return self.cleaned_data['password']
def clean(self):
if not self.cleaned_data['email'] == self.cleaned_data['email_confirm']:
raise forms.ValidationError("Please confirm your email addresses match")
return self.cleaned_data
def save(self, commit=True):
if (self.instance.pk):
# Editing an existing Volunteer
user = self.instance.user
changed = False
if self.cleaned_data['email'] != user.email:
user.email = self.cleaned_data['email']
changed = True
if self.cleaned_data['first_name'] != user.first_name:
user.first_name = self.cleaned_data['first_name']
changed = True
if self.cleaned_data['last_name'] != user.last_name:
user.last_name = self.cleaned_data['last_name']
changed = True
if changed:
user.save()
else:
# Creating a new volunteer
first = self.cleaned_data['first_name']
last = self.cleaned_data['last_name']
email = self.cleaned_data['email']
password = self.cleaned_data['password']
user, created = User.objects.get_or_create(username=email, defaults={'email': email, 'first_name': first, 'last_name': last, 'password': make_password(password)})
vol = super(VolunteerSignupForm, self).save(commit=False)
vol.user = user
super(VolunteerSignupForm, self).save(commit=commit)
class OpeningExceptionForm(forms.Form):
clientOpening = forms.CharField(max_length=10, widget=forms.widgets.HiddenInput())
date = forms.DateTimeField(widget=forms.widgets.HiddenInput())
class CommitmentExceptionForm(forms.Form):
commitment = forms.CharField(max_length=10, widget=forms.widgets.HiddenInput())
date = forms.DateTimeField(widget=forms.widgets.HiddenInput())
class OpeningForm(forms.ModelForm):
time = forms.CharField(max_length=10, label="Arrival Time", required=False)
metadata = forms.CharField(max_length=30, required=False)
# for days of week, alternating days of week
daysOfWeek = forms.MultipleChoiceField(label="Days of Week", widget=forms.widgets.CheckboxSelectMultiple(), choices=days_of_week_choices, required=False)
# for day of month
dayOfMonth = forms.CharField(label="Day of Month (ex: 15)", max_length=2, required=False)
# for one off date
oneOffDate = forms.DateField(label="One-Off Date (ex: 12/31/13)", required=False)
class Meta:
model = ClientOpening
fields = ('client', 'type', 'daysOfWeek', 'dayOfMonth', 'oneOffDate', 'metadata', 'time', 'startDate', 'endDate', 'notes')
widgets = {
'notes': forms.Textarea(attrs={'cols': 80, 'rows': 4, 'class': 'notes'}),
'startDate': SplitDateTimeFieldWithLabels(),
'endDate': SplitDateTimeFieldWithLabels()
}
def __init__(self, *args, **kwargs):
# patch the initial data to include the metadata values
super(OpeningForm, self).__init__(*args, **kwargs)
self.initial['metadata'] = self.instance.get_all_metadata_string()
metadataset = self.instance.get_all_metadata_list()
openingtype = self.initial['type']
if len(metadataset) > 0:
if openingtype in ["Days of Week", "Days of Alt Week"]:
self.initial['daysOfWeek'] = metadataset
elif openingtype == "One-Off":
self.initial['oneOffDate'] = list(metadataset)[0]
else:
self.initial['dayOfMonth'] = list(metadataset)[0]
# self.fields['metadata'].widget.attrs['class'] = 'hidden'
self.fields['client'].widget.attrs['class'] = 'hidden'
self.fields['metadata'].widget.attrs['class'] = 'hidden'
if self.initial['startDate'] is not None:
self.initial['time'] = self.initial['startDate'].time()
def clean(self):
commitmenttype = self.cleaned_data['type']
if commitmenttype in ["Days of Week", "Days of Alt Week"]:
self.cleaned_data['metadata'] = ''.join(self.cleaned_data['daysOfWeek'])
elif commitmenttype == "One-Off":
specificDate = self.cleaned_data['oneOffDate']
self.cleaned_data['metadata'] = specificDate.strftime('%Y-%m-%d')
else:
# Day of Month
specificDate = self.cleaned_data['dayOfMonth']
err_message = "Day of Month Openings require a number between 1 and 31"
try:
specific_int = int(specificDate)
except ValueError:
raise forms.ValidationError(err_message)
if not (1 <= specific_int <= 31):
raise forms.ValidationError(err_message)
self.cleaned_data['metadata'] = specific_int
# on clean, set the StartDate time based on the time field
# and the EndDate time to midnight
if self.cleaned_data['time']:
time = self.cleaned_data['time']
try:
time = parse(time)
except ValueError:
raise forms.ValidationError("Arrival Time requires a standard time format (e.g., 9:00pm or 10am)")
self.cleaned_data['time'] = time
self.cleaned_data['startDate'] = self.cleaned_data['startDate'].replace(hour=time.hour, minute=time.minute, second=0, microsecond=0)
if self.cleaned_data['endDate'] is not None:
self.cleaned_data['endDate'] = self.cleaned_data['endDate'].replace(hour=23, minute=59, second=59, microsecond=0)
else:
raise forms.ValidationError("The Arrival Time is required")
return self.cleaned_data
def save(self, commit=True):
# on save, wipe out the existing metadatas and create new ones
self.instance.metadata.all().delete()
type = self.cleaned_data['type']
metadatastring = self.cleaned_data['metadata']
metadata = list()
if type == "Days of Week" or type == "Days of Alt Week":
# convert metadata string to list of days
for day in days_of_week_list:
if metadatastring.find(day) != -1:
metadata.append(day)
else:
metadata = [metadatastring]
for item in metadata:
md = ClientOpeningMetadata.objects.create(clientOpening=self.instance, metadata=item)
return super(OpeningForm, self).save(commit=commit)
class CommitmentForm(forms.ModelForm):
metadata = forms.CharField(max_length=30, required=False)
# for days of week, alternating days of week
daysOfWeek = forms.MultipleChoiceField(label="Days of Week", widget=forms.widgets.CheckboxSelectMultiple(), choices=days_of_week_choices, required=False)
# for day of month
dayOfMonth = forms.CharField(label="Day of Month (ex: 15)", max_length=2, required=False)
# for one off date
oneOffDate = forms.DateField(label="One-Off Date (ex: 12/31/13)", required=False)
class Meta:
model = VolunteerCommitment
fields = ('clientOpening', 'volunteer', 'type', 'daysOfWeek', 'dayOfMonth', 'oneOffDate', 'metadata', 'startDate', 'endDate', 'notes')
widgets = {
'notes': forms.Textarea(attrs={'cols': 80, 'rows': 4, 'class': 'notes'}),
'startDate': SplitDateTimeFieldWithLabels(),
'endDate': SplitDateTimeFieldWithLabels()
}
def __init__(self, *args, **kwargs):
# patch the initial data to include the metadata values
super(CommitmentForm, self).__init__(*args, **kwargs)
self.initial['metadata'] = self.instance.get_all_metadata_string()
metadataset = self.instance.get_all_metadata_list()
commitmenttype = self.initial['type']
opening = ClientOpening.objects.get(id=self.initial['clientOpening'])
if commitmenttype in ["Days of Week", "Days of Alt Week"]:
# days of week
if len(metadataset) > 0:
self.initial['daysOfWeek'] = metadataset
choicessubset = ((k, k) for k in opening.get_all_metadata_list())
self.fields['daysOfWeek'].choices = choicessubset
elif commitmenttype == "One-Off":
# one-off
self.initial['oneOffDate'] = list(opening.get_all_metadata_list())[0]
self.fields['startDate'].widget.attrs['class'] = 'hidden'
self.fields['endDate'].widget.attrs['class'] = 'hidden'
else:
# day of month
if len(metadataset) > 0:
self.initial['dayOfMonth'] = list(metadataset)[0]
self.fields['clientOpening'].widget.attrs['class'] = 'hidden'
self.fields['volunteer'].widget.attrs['class'] = 'admin-only'
self.fields['type'].widget.attrs['class'] = 'hidden'
self.fields['metadata'].widget.attrs['class'] = 'hidden'
self.fields['dayOfMonth'].widget.attrs['class'] = 'hidden'
self.fields['oneOffDate'].widget.attrs['class'] = 'hidden'
def clean_type(self):
# get type from clientopening (we might want to make commitments have different types than openings, but for now lets keep it simple)
opening = self.cleaned_data['clientOpening']
return opening.type
def clean(self):
commitmenttype = self.cleaned_data['type']
if commitmenttype in ["Days of Week", "Days of Alt Week"]:
self.cleaned_data['metadata'] = ''.join(self.cleaned_data['daysOfWeek'])
elif commitmenttype == "One-Off":
# specificDate = self.cleaned_data['oneOffDate']
# try:
# datetime.datetime.strptime(specificDate, '%Y-%m-%d')
# except ValueError:
# raise forms.ValidationError("One-Off Openings require a date in the format YYYY-MM-DD")
self.cleaned_data['metadata'] = self.cleaned_data['clientOpening'].get_all_metadata_list()
else:
# Day of Month
specificDate = self.cleaned_data['dayOfMonth']
err_message = "Day of Month Openings require a number between 1 and 31"
try:
specific_int = int(specificDate)
except ValueError:
raise forms.ValidationError(err_message)
if not (1 <= specific_int <= 31):
raise forms.ValidationError(err_message)
self.cleaned_data['metadata'] = specific_int
if not self.cleaned_data['metadata']:
raise forms.ValidationError("The metadata field is required.")
# set start and end date times correctly
self.cleaned_data['startDate'] = self.cleaned_data['startDate'].replace(hour=0, minute=0, second=0, microsecond=0)
if self.cleaned_data['endDate']:
self.cleaned_data['endDate'] = self.cleaned_data['endDate'].replace(hour=11, minute=59, second=59, microsecond=0)
return self.cleaned_data
def save(self, commit=True):
# on save, wipe out the existing metadatas and create new ones
self.instance.metadata.all().delete()
metadatastring = self.cleaned_data['metadata']
metadata = list()
commitmenttype = self.cleaned_data['type']
if commitmenttype == "Days of Week" or commitmenttype == "Days of Alt Week":
# convert metadata string to list of days
for day in days_of_week_list:
if metadatastring.find(day) != -1:
metadata.append(day)
else:
metadata = metadatastring
for item in metadata:
md = VolunteerCommitmentMetadata.objects.create(volunteerCommitment=self.instance, metadata=item)
return super(CommitmentForm, self).save(commit=commit)
|
|
# Copyright 2012 VMware, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from abc import ABCMeta
from abc import abstractmethod
import copy
import eventlet
import httplib
import time
import six
import six.moves.urllib.parse as urlparse
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import ctrl_conn_to_str
LOG = logging.getLogger(__name__)
DEFAULT_REQUEST_TIMEOUT = 30
DEFAULT_HTTP_TIMEOUT = 10
DEFAULT_RETRIES = 2
DEFAULT_REDIRECTS = 2
DEFAULT_API_REQUEST_POOL_SIZE = 1000
DEFAULT_MAXIMUM_REQUEST_ID = 4294967295
DOWNLOAD_TIMEOUT = 180
@six.add_metaclass(ABCMeta)
class ApiRequest(object):
'''An abstract baseclass for all ApiRequest implementations.
This defines the interface and property structure for both eventlet and
gevent-based ApiRequest classes.
'''
# List of allowed status codes.
ALLOWED_STATUS_CODES = [
httplib.OK,
httplib.CREATED,
httplib.NO_CONTENT,
httplib.MOVED_PERMANENTLY,
httplib.TEMPORARY_REDIRECT,
httplib.BAD_REQUEST,
httplib.UNAUTHORIZED,
httplib.FORBIDDEN,
httplib.NOT_FOUND,
httplib.CONFLICT,
httplib.INTERNAL_SERVER_ERROR,
httplib.SERVICE_UNAVAILABLE
]
@abstractmethod
def start(self):
pass
@abstractmethod
def join(self):
pass
@abstractmethod
def copy(self):
pass
def _issue_request(self):
'''Issue a request to a provider.'''
conn = (self._client_conn or
self._api_client.acquire_connection(True,
copy.copy(self._headers),
rid=self._rid()))
if conn is None:
error = Exception(_("No API connections available"))
self._request_error = error
return error
url = self._url
LOG.debug(_("[%(rid)d] Issuing - request url: %(conn)s "
"body: %(body)s"),
{'rid': self._rid(), 'conn': self._request_str(conn, url),
'body': self._body})
issued_time = time.time()
is_conn_error = False
is_conn_service_unavail = False
response = None
try:
redirects = 0
while (redirects <= self._redirects):
# Update connection with user specified request timeout,
# the connect timeout is usually smaller so we only set
# the request timeout after a connection is established
if conn.sock is None:
conn.connect()
conn.sock.settimeout(self._http_timeout)
elif conn.sock.gettimeout() != self._http_timeout:
conn.sock.settimeout(self._http_timeout)
headers = copy.copy(self._headers)
cookie = self._api_client.auth_cookie(conn)
if cookie:
headers["Cookie"] = cookie
gen = self._api_client.config_gen
if gen:
headers["X-Nvp-Wait-For-Config-Generation"] = gen
LOG.debug(_("Setting X-Nvp-Wait-For-Config-Generation "
"request header: '%s'"), gen)
try:
conn.request(self._method, url, self._body, headers)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.warn(_("[%(rid)d] Exception issuing request: "
"%(e)s"),
{'rid': self._rid(), 'e': e})
response = conn.getresponse()
response.body = response.read()
response.headers = response.getheaders()
elapsed_time = time.time() - issued_time
LOG.debug(_("[%(rid)d] Completed request '%(conn)s': "
"%(status)s (%(elapsed)s seconds)"),
{'rid': self._rid(),
'conn': self._request_str(conn, url),
'status': response.status,
'elapsed': elapsed_time})
new_gen = response.getheader('X-Nvp-Config-Generation', None)
if new_gen:
LOG.debug(_("Reading X-Nvp-config-Generation response "
"header: '%s'"), new_gen)
if (self._api_client.config_gen is None or
self._api_client.config_gen < int(new_gen)):
self._api_client.config_gen = int(new_gen)
if response.status == httplib.UNAUTHORIZED:
if cookie is None and self._url != "/ws.v1/login":
# The connection still has no valid cookie despite
# attemps to authenticate and the request has failed
# with unauthorized status code. If this isn't a
# a request to authenticate, we should abort the
# request since there is no point in retrying.
self._abort = True
else:
# If request is unauthorized, clear the session cookie
# for the current provider so that subsequent requests
# to the same provider triggers re-authentication.
self._api_client.set_auth_cookie(conn, None)
self._api_client.set_auth_cookie(conn, None)
elif response.status == httplib.SERVICE_UNAVAILABLE:
is_conn_service_unavail = True
if response.status not in [httplib.MOVED_PERMANENTLY,
httplib.TEMPORARY_REDIRECT]:
break
elif redirects >= self._redirects:
LOG.info(_("[%d] Maximum redirects exceeded, aborting "
"request"), self._rid())
break
redirects += 1
conn, url = self._redirect_params(conn, response.headers,
self._client_conn is None)
if url is None:
response.status = httplib.INTERNAL_SERVER_ERROR
break
LOG.info(_("[%(rid)d] Redirecting request to: %(conn)s"),
{'rid': self._rid(),
'conn': self._request_str(conn, url)})
# yield here, just in case we are not out of the loop yet
eventlet.greenthread.sleep(0)
# If we receive any of these responses, then
# our server did not process our request and may be in an
# errored state. Raise an exception, which will cause the
# the conn to be released with is_conn_error == True
# which puts the conn on the back of the client's priority
# queue.
if (response.status == httplib.INTERNAL_SERVER_ERROR and
response.status > httplib.NOT_IMPLEMENTED):
LOG.warn(_("[%(rid)d] Request '%(method)s %(url)s' "
"received: %(status)s"),
{'rid': self._rid(), 'method': self._method,
'url': self._url, 'status': response.status})
raise Exception(_('Server error return: %s'), response.status)
return response
except Exception as e:
if isinstance(e, httplib.BadStatusLine):
msg = (_("Invalid server response"))
else:
msg = unicode(e)
if response is None:
elapsed_time = time.time() - issued_time
LOG.warn(_("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
"(%(elapsed)s seconds)"),
{'rid': self._rid(), 'conn': self._request_str(conn, url),
'msg': msg, 'elapsed': elapsed_time})
self._request_error = e
is_conn_error = True
return e
finally:
# Make sure we release the original connection provided by the
# acquire_connection() call above.
if self._client_conn is None:
self._api_client.release_connection(conn, is_conn_error,
is_conn_service_unavail,
rid=self._rid())
def _redirect_params(self, conn, headers, allow_release_conn=False):
"""Process redirect response, create new connection if necessary.
Args:
conn: connection that returned the redirect response
headers: response headers of the redirect response
allow_release_conn: if redirecting to a different server,
release existing connection back to connection pool.
Returns: Return tuple(conn, url) where conn is a connection object
to the redirect target and url is the path of the API request
"""
url = None
for name, value in headers:
if name.lower() == "location":
url = value
break
if not url:
LOG.warn(_("[%d] Received redirect status without location header"
" field"), self._rid())
return (conn, None)
# Accept location with the following format:
# 1. /path, redirect to same node
# 2. scheme://hostname:[port]/path where scheme is https or http
# Reject others
# 3. e.g. relative paths, unsupported scheme, unspecified host
result = urlparse.urlparse(url)
if not result.scheme and not result.hostname and result.path:
if result.path[0] == "/":
if result.query:
url = "%s?%s" % (result.path, result.query)
else:
url = result.path
return (conn, url) # case 1
else:
LOG.warn(_("[%(rid)d] Received invalid redirect location: "
"'%(url)s'"), {'rid': self._rid(), 'url': url})
return (conn, None) # case 3
elif result.scheme not in ["http", "https"] or not result.hostname:
LOG.warn(_("[%(rid)d] Received malformed redirect "
"location: %(url)s"), {'rid': self._rid(), 'url': url})
return (conn, None) # case 3
# case 2, redirect location includes a scheme
# so setup a new connection and authenticate
if allow_release_conn:
self._api_client.release_connection(conn)
conn_params = (result.hostname, result.port, result.scheme == "https")
conn = self._api_client.acquire_redirect_connection(conn_params, True,
self._headers)
if result.query:
url = "%s?%s" % (result.path, result.query)
else:
url = result.path
return (conn, url)
def _rid(self):
'''Return current request id.'''
return self._request_id
@property
def request_error(self):
'''Return any errors associated with this instance.'''
return self._request_error
def _request_str(self, conn, url):
'''Return string representation of connection.'''
return "%s %s/%s" % (self._method, ctrl_conn_to_str(conn), url)
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Apache.
# usage: ./merge-zeppelin-pr.py (see config env vars below)
#
# This utility assumes you already have local a Zeppelin git folder and that you
# have added remotes corresponding to both (i) the github apache Zeppelin
# mirror and (ii) the apache git repo.
import json
import os
import re
import subprocess
import sys
import urllib2
try:
import jira.client
JIRA_IMPORTED = True
except ImportError:
JIRA_IMPORTED = False
# Location of your Zeppelin git development area
ZEPPELIN_HOME = os.environ.get("ZEPPELIN_HOME", os.getcwd())
# Remote name which points to the Github site
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "apache-github")
# Remote name which points to Apache git
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache")
# ASF JIRA username
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", "moon")
# ASF JIRA password
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", "00000")
GITHUB_BASE = "https://github.com/apache/incubator-zeppelin/pull"
GITHUB_API_BASE = "https://api.github.com/repos/apache/incubator-zeppelin"
JIRA_BASE = "https://issues.apache.org/jira/browse"
JIRA_API_BASE = "https://issues.apache.org/jira"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
os.chdir(ZEPPELIN_HOME)
def get_json(url):
try:
return json.load(urllib2.urlopen(url))
except urllib2.HTTPError as e:
print "Unable to fetch URL, exiting: %s" % url
sys.exit(-1)
def fail(msg):
print msg
clean_up()
sys.exit(-1)
def run_cmd(cmd):
print cmd
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
original_head = run_cmd("git rev-parse HEAD")[:8]
def clean_up():
print "Restoring head pointer to %s" % original_head
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print "Deleting local branch %s" % branch
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
commit_date = run_cmd(['git', 'log', '%s' % pr_branch_name, '-1',
'--pretty=format:%ad'])
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
# We remove @ symbols from the body to avoid triggering e-mails
# to people every time someone creates a public fork of Zeppelin.
merge_message_flags += ["-m", body.replace("@", "")]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close the PR
merge_message_flags += [
"-m",
"Closes #%s from %s and squashes the following commits:" % (pr_num, pr_repo_desc)]
for c in commits:
merge_message_flags += ["-m", c]
run_cmd(['git', 'commit', '--author="%s"' % primary_author, '--date="%s"' % commit_date] + merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = raw_input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num, pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
try:
run_cmd("git cherry-pick -sx %s" % merge_hash)
except Exception as e:
msg = "Error cherry-picking: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and finish the cherry-pick. Finished?"
continue_maybe(msg)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
return pick_ref
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released versions
if branch == "master":
return versions[0]
else:
branch_ver = branch.replace("branch-", "")
return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
def resolve_jira_issue(merge_branches, comment, default_jira_id=""):
asf_jira = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
jira_id = raw_input("Enter a JIRA id [%s]: " % default_jira_id)
if jira_id == "":
jira_id = default_jira_id
try:
issue = asf_jira.issue(jira_id)
except Exception as e:
fail("ASF JIRA could not find %s\n%s" % (jira_id, e))
cur_status = issue.fields.status.name
cur_summary = issue.fields.summary
cur_assignee = issue.fields.assignee
if cur_assignee is None:
cur_assignee = "NOT ASSIGNED!!!"
else:
cur_assignee = cur_assignee.displayName
if cur_status == "Resolved" or cur_status == "Closed":
fail("JIRA issue %s already has status '%s'" % (jira_id, cur_status))
print ("=== JIRA %s ===" % jira_id)
print ("summary\t\t%s\nassignee\t%s\nstatus\t\t%s\nurl\t\t%s/%s\n" % (
cur_summary, cur_assignee, cur_status, JIRA_BASE, jira_id))
versions = asf_jira.project_versions("ZEPPELIN")
versions = sorted(versions, key=lambda x: x.name, reverse=True)
versions = filter(lambda x: x.raw['released'] is False, versions)
# Consider only x.y.z versions
versions = filter(lambda x: re.match('\d+\.\d+\.\d+', x.name), versions)
default_fix_versions = map(lambda x: fix_version_from_branch(x, versions).name, merge_branches)
for v in default_fix_versions:
# Handles the case where we have forked a release branch but not yet made the release.
# In this case, if the PR is committed to the master branch and the release branch, we
# only consider the release branch to be the fix version. E.g. it is not valid to have
# both 1.1.0 and 1.0.0 as fix versions.
(major, minor, patch) = v.split(".")
if patch == "0":
previous = "%s.%s.%s" % (major, int(minor) - 1, 0)
if previous in default_fix_versions:
default_fix_versions = filter(lambda x: x != v, default_fix_versions)
default_fix_versions = ",".join(default_fix_versions)
fix_versions = raw_input("Enter comma-separated fix version(s) [%s]: " % default_fix_versions)
if fix_versions == "":
fix_versions = default_fix_versions
fix_versions = fix_versions.replace(" ", "").split(",")
def get_version_json(version_str):
return filter(lambda v: v.name == version_str, versions)[0].raw
jira_fix_versions = map(lambda v: get_version_json(v), fix_versions)
resolve = filter(lambda a: a['name'] == "Resolve Issue", asf_jira.transitions(jira_id))[0]
asf_jira.transition_issue(
jira_id, resolve["id"], fixVersions=jira_fix_versions, comment=comment)
print "Succesfully resolved %s with fixVersions=%s!" % (jira_id, fix_versions)
def resolve_jira_issues(title, merge_branches, comment):
jira_ids = re.findall("ZEPPELIN-[0-9]{3,5}", title)
if len(jira_ids) == 0:
resolve_jira_issue(merge_branches, comment)
for jira_id in jira_ids:
resolve_jira_issue(merge_branches, comment, jira_id)
#branches = get_json("%s/branches" % GITHUB_API_BASE)
#branch_names = filter(lambda x: x.startswith("branch-"), [x['name'] for x in branches])
# Assumes branch names can be sorted lexicographically
#latest_branch = sorted(branch_names, reverse=True)[0]
latest_branch = "master"
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
pr_events = get_json("%s/issues/%s/events" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
# Merged pull requests don't appear as merged in the GitHub API;
# Instead, they're closed by asfgit.
merge_commits = \
[e for e in pr_events if e["actor"]["login"] == "asfgit" and e["event"] == "closed"]
if merge_commits:
merge_hash = merge_commits[0]["commit_id"]
message = get_json("%s/commits/%s" % (GITHUB_API_BASE, merge_hash))["commit"]["message"]
print "Pull request %s has already been merged, assuming you want to backport" % pr_num
commit_is_downloaded = run_cmd(['git', 'rev-parse', '--quiet', '--verify',
"%s^{commit}" % merge_hash]).strip() != ""
if not commit_is_downloaded:
fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num)
print "Found commit %s:\n%s" % (merge_hash, message)
cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print ("\n=== Pull Request #%s ===" % pr_num)
print ("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref)
pick_prompt = "Would you like to pick %s into another branch?" % merge_hash
while raw_input("\n%s (y/n): " % pick_prompt).lower() == "y":
merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash, latest_branch)]
if JIRA_IMPORTED:
if JIRA_USERNAME and JIRA_PASSWORD:
continue_maybe("Would you like to update an associated JIRA?")
jira_comment = "Issue resolved by pull request %s\n[%s/%s]" % (pr_num, GITHUB_BASE, pr_num)
resolve_jira_issues(title, merged_refs, jira_comment)
else:
print "JIRA_USERNAME and JIRA_PASSWORD not set"
print "Exiting without trying to close the associated JIRA."
else:
print "Could not find jira library. Run 'sudo pip install jira' to install."
print "Exiting without trying to close the associated JIRA."
|
|
"""Reports doit execution status/results"""
import sys
import time
import datetime
import json
from io import StringIO
class ConsoleReporter(object):
"""Default reporter. print results on console/terminal (stdout/stderr)
@ivar show_out (bool): include captured stdout on failure report
@ivar show_err (bool): include captured stderr on failure report
"""
# short description, used by the help system
desc = 'console output'
def __init__(self, outstream, options):
# save non-succesful result information (include task errors)
self.failures = []
self.runtime_errors = []
self.show_out = options.get('show_out', True)
self.show_err = options.get('show_err', True)
self.outstream = outstream
def write(self, text):
self.outstream.write(text)
def initialize(self, tasks):
"""called just after tasks have benn loaded before execution starts"""
pass
def get_status(self, task):
"""called when task is selected (check if up-to-date)"""
pass
def execute_task(self, task):
"""called when excution starts"""
# ignore tasks that do not define actions
# ignore private/hidden tasks (tasks that start with an underscore)
if task.actions and (task.name[0] != '_'):
self.write('. %s\n' % task.title())
def add_failure(self, task, exception):
"""called when excution finishes with a failure"""
self.failures.append({'task': task, 'exception':exception})
def add_success(self, task):
"""called when excution finishes successfuly"""
pass
def skip_uptodate(self, task):
"""skipped up-to-date task"""
if task.name[0] != '_':
self.write("-- %s\n" % task.title())
def skip_ignore(self, task):
"""skipped ignored task"""
self.write("!! %s\n" % task.title())
def cleanup_error(self, exception):
"""error during cleanup"""
sys.stderr.write(exception.get_msg())
def runtime_error(self, msg):
"""error from doit (not from a task execution)"""
# saved so they are displayed after task failures messages
self.runtime_errors.append(msg)
def teardown_task(self, task):
"""called when starts the execution of teardown action"""
pass
def complete_run(self):
"""called when finshed running all tasks"""
# if test fails print output from failed task
for result in self.failures:
self.write("#"*40 + "\n")
msg = '%s - taskid:%s\n' % (result['exception'].get_name(),
result['task'].name)
self.write(msg)
self.write(result['exception'].get_msg())
self.write("\n")
task = result['task']
if self.show_out:
out = "".join([a.out for a in task.actions if a.out])
self.write("%s\n" % out)
if self.show_err:
err = "".join([a.err for a in task.actions if a.err])
self.write("%s\n" % err)
if self.runtime_errors:
self.write("#"*40 + "\n")
self.write("Execution aborted.\n")
self.write("\n".join(self.runtime_errors))
self.write("\n")
class ExecutedOnlyReporter(ConsoleReporter):
"""No output for skipped (up-to-date) and group tasks
Produces zero output unless a task is executed
"""
desc = 'console, no output for skipped (up-to-date) and group tasks'
def skip_uptodate(self, task):
"""skipped up-to-date task"""
pass
def skip_ignore(self, task):
"""skipped ignored task"""
pass
class ZeroReporter(ConsoleReporter):
"""Report only internal errors from doit"""
desc = 'report only inetrnal errors from doit'
def _just_pass(self, *args):
"""over-write base to do nothing"""
pass
get_status = execute_task = add_failure = add_success \
= skip_uptodate = skip_ignore = teardown_task = complete_run \
= _just_pass
def runtime_error(self, msg):
sys.stderr.write(msg)
class TaskResult(object):
"""result object used by JsonReporter"""
# FIXME what about returned value from python-actions ?
def __init__(self, task):
self.task = task
self.result = None # fail, success, up-to-date, ignore
self.out = None # stdout from task
self.err = None # stderr from task
self.error = None # error from doit (exception traceback)
self.started = None # datetime when task execution started
self.elapsed = None # time (in secs) taken to execute task
self._started_on = None # timestamp
self._finished_on = None # timestamp
def start(self):
"""called when task starts its execution"""
self._started_on = time.time()
def set_result(self, result, error=None):
"""called when task finishes its execution"""
self._finished_on = time.time()
self.result = result
line_sep = "\n<------------------------------------------------>\n"
self.out = line_sep.join([a.out for a in self.task.actions if a.out])
self.err = line_sep.join([a.err for a in self.task.actions if a.err])
self.error = error
def to_dict(self):
"""convert result data to dictionary"""
if self._started_on is not None:
started = datetime.datetime.utcfromtimestamp(self._started_on)
self.started = str(started)
self.elapsed = self._finished_on - self._started_on
return {'name': self.task.name,
'result': self.result,
'out': self.out,
'err': self.err,
'error': self.error,
'started': self.started,
'elapsed': self.elapsed}
class JsonReporter(object):
"""output results in JSON format
- out (str)
- err (str)
- tasks (list - dict):
- name (str)
- result (str)
- out (str)
- err (str)
- error (str)
- started (str)
- elapsed (float)
"""
desc = 'output in JSON format'
def __init__(self, outstream, options=None): #pylint: disable=W0613
# options parameter is not used
# json result is sent to stdout when doit finishes running
self.t_results = {}
# when using json reporter output can not contain any other output
# than the json data. so anything that is sent to stdout/err needs to
# be captured.
self._old_out = sys.stdout
sys.stdout = StringIO()
self._old_err = sys.stderr
sys.stderr = StringIO()
self.outstream = outstream
# runtime and cleanup errors
self.errors = []
def get_status(self, task):
"""called when task is selected (check if up-to-date)"""
self.t_results[task.name] = TaskResult(task)
def execute_task(self, task):
"""called when excution starts"""
self.t_results[task.name].start()
def add_failure(self, task, exception):
"""called when excution finishes with a failure"""
self.t_results[task.name].set_result('fail', exception.get_msg())
def add_success(self, task):
"""called when excution finishes successfuly"""
self.t_results[task.name].set_result('success')
def skip_uptodate(self, task):
"""skipped up-to-date task"""
self.t_results[task.name].set_result('up-to-date')
def skip_ignore(self, task):
"""skipped ignored task"""
self.t_results[task.name].set_result('ignore')
def cleanup_error(self, exception):
"""error during cleanup"""
self.errors.append(exception.get_msg())
def runtime_error(self, msg):
"""error from doit (not from a task execution)"""
self.errors.append(msg)
def teardown_task(self, task):
"""called when starts the execution of teardown action"""
pass
def complete_run(self):
"""called when finshed running all tasks"""
# restore stdout
log_out = sys.stdout.getvalue()
sys.stdout = self._old_out
log_err = sys.stderr.getvalue()
sys.stderr = self._old_err
# add errors together with stderr output
if self.errors:
log_err += "\n".join(self.errors)
task_result_list = [
tr.to_dict() for tr in self.t_results.values()]
json_data = {'tasks': task_result_list,
'out': log_out,
'err': log_err}
# indent not available on simplejson 1.3 (debian etch)
# json.dump(json_data, sys.stdout, indent=4)
json.dump(json_data, self.outstream)
|
|
# Copyright 2012, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# Handle transport and serialization callbacks for Go-style RPC servers.
#
# This is pretty simple. The client initiates an HTTP CONNECT and then
# hijacks the socket. The client is synchronous, but implements deadlines.
import errno
import select
import socket
import ssl
import time
import urlparse
_lastStreamResponseError = 'EOS'
class GoRpcError(Exception):
pass
class TimeoutError(GoRpcError):
pass
# The programmer has misused an API, but the underlying
# connection is still salvagable.
class ProgrammingError(GoRpcError):
pass
# Error field from response raised as an exception
class AppError(GoRpcError):
pass
def make_header(method, sequence_id):
return {'ServiceMethod': method,
'Seq': sequence_id}
class GoRpcRequest(object):
header = None # standard fields that route the request on the server side
body = None # the actual request object - usually a dictionary
def __init__(self, header, args):
self.header = header
self.body = args
@property
def sequence_id(self):
return self.header['Seq']
class GoRpcResponse(object):
# the request header is echoed back to detect error and out-of-sequence bugs
# {'ServiceMethod': method,
# 'Seq': sequence_id,
# 'Error': error_string}
header = None
reply = None # the decoded object - usually a dictionary
@property
def error(self):
return self.header['Error']
@property
def sequence_id(self):
return self.header['Seq']
default_read_buffer_size = 8192
# A single socket wrapper to handle request/response conversation for this
# protocol. Internal, use GoRpcClient instead.
class _GoRpcConn(object):
def __init__(self, timeout):
self.conn = None
# NOTE(msolomon) since the deadlines are approximate in the code, set
# timeout to oversample to minimize waiting in the extreme failure mode.
# FIXME(msolomon) reimplement using deadlines
self.socket_timeout = timeout / 10.0
self.buf = []
def dial(self, uri, keyfile=None, certfile=None):
parts = urlparse.urlparse(uri)
conhost, conport = parts.netloc.split(':')
try:
conip = socket.gethostbyname(conhost)
except NameError:
conip = socket.getaddrinfo(conhost, None)[0][4][0]
self.conn = socket.create_connection(
(conip, int(conport)), self.socket_timeout)
if parts.scheme == 'https':
self.conn = ssl.wrap_socket(self.conn, keyfile=keyfile, certfile=certfile)
self.conn.sendall('CONNECT %s HTTP/1.0\n\n' % parts.path)
data = ''
while True:
try:
d = self.conn.recv(1024)
except socket.error as e:
if e.args[0] == errno.EINTR:
continue
raise
if not d:
raise GoRpcError(
'Unexpected EOF in handshake to %s:%s %s' %
(str(conip), str(conport), parts.path))
data += d
if '\n\n' in data:
return
def close(self):
if self.conn:
self.conn.close()
self.conn = None
def write_request(self, request_data):
self.conn.sendall(request_data)
# tries to read some bytes, returns None if it can't because of a timeout
def read_some(self, size=None):
if size is None:
size = default_read_buffer_size
try:
data = self.conn.recv(size)
if not data:
# We only read when we expect data - if we get nothing this probably
# indicates that the server hung up. This exception ensures the client
# tears down properly.
raise socket.error(errno.EPIPE, 'unexpected EOF in read')
except socket.timeout:
# catch the timeout and return empty data for now - this breaks the call
# and lets the deadline get caught with reasonable precision.
return None
except ssl.SSLError as e:
# another possible timeout condition with SSL wrapper
if 'timed out' in str(e):
return None
raise
except socket.error as e:
if e.args[0] == errno.EINTR:
# We were interrupted, let the caller retry.
return None
raise
return data
def is_closed(self):
if self.conn is None:
return True
# make sure the socket hasn't gone away
fileno = self.conn.fileno()
poll = select.poll()
poll.register(fileno)
ready = poll.poll(0)
if ready:
_, event = ready[0]
if event & select.POLLIN:
return True
return False
class GoRpcClient(object):
def __init__(self, uri, timeout, certfile=None, keyfile=None):
self.uri = uri
self.timeout = timeout
self.start_time = None
# FIXME(msolomon) make this random initialized?
self.seq = 0
self.conn = None
self.data = None
self.certfile = certfile
self.keyfile = keyfile
def dial(self):
if self.conn:
self.close()
conn = _GoRpcConn(self.timeout)
try:
conn.dial(self.uri, self.certfile, self.keyfile)
except socket.timeout as e:
raise TimeoutError(e, self.timeout, 'dial', self.uri)
except ssl.SSLError as e:
# another possible timeout condition with SSL wrapper
if 'timed out' in str(e):
raise TimeoutError(e, self.timeout, 'ssl-dial', self.uri)
raise GoRpcError(e)
except socket.error as e:
raise GoRpcError(e)
self.conn = conn
def close(self):
if self.conn:
self.conn.close()
self.conn = None
self.start_time = None
def is_closed(self):
if self.conn:
return self.conn.is_closed()
return True
__del__ = close
def next_sequence_id(self):
self.seq += 1
return self.seq
# return encoded request data, including header
def encode_request(self, req):
raise NotImplementedError
# fill response with decoded data, and returns a tuple
# (bytes to consume if a response was read,
# how many bytes are still to read if no response was read and we know)
def decode_response(self, response, data):
raise NotImplementedError
def _check_deadline_exceeded(self, timeout):
if (time.time() - self.start_time) > timeout:
raise socket.timeout('deadline exceeded')
# logic to read the next response off the wire
def _read_response(self, response, timeout):
if self.start_time is None:
raise ProgrammingError('no request pending')
if not self.conn:
raise GoRpcError(
'_read_response - closed client: %s' %
(time.time() - self.start_time))
# get some data if we don't have any so we have somewhere to start
if self.data is None:
while True:
self.data = self.conn.read_some()
if self.data:
break
self._check_deadline_exceeded(timeout)
# now try to decode, and read more if we need to
while True:
consumed, extra_needed = self.decode_response(response, self.data)
if consumed:
data_len = len(self.data)
if data_len > consumed:
# we have extra data, keep it
self.data = self.data[consumed:]
else:
# no extra data, nothing to keep
self.data = None
return
else:
# we don't have enough data, read more, and check the timeout
# every time
while True:
more_data = self.conn.read_some(extra_needed)
if more_data:
break
self._check_deadline_exceeded(timeout)
self.data += more_data
# Perform an rpc, raising a GoRpcError, on errant situations.
# Pass in a response object if you don't want a generic one created.
def call(self, method, request, response=None):
if not self.conn:
raise GoRpcError('call - closed client', method)
try:
h = make_header(method, self.next_sequence_id())
req = GoRpcRequest(h, request)
self.start_time = time.time()
self.conn.write_request(self.encode_request(req))
if response is None:
response = GoRpcResponse()
self._read_response(response, self.timeout)
self.start_time = None
except socket.timeout as e:
# tear down - can't guarantee a clean conversation
self.close()
raise TimeoutError(e, self.timeout, method)
except socket.error as e:
# tear down - better chance of recovery by reconnecting
self.close()
raise GoRpcError(e, method)
except ssl.SSLError as e:
# tear down - better chance of recovery by reconnecting
self.close()
if 'timed out' in str(e):
raise TimeoutError(e, self.timeout, method)
raise GoRpcError(e, method)
if response.error:
raise AppError(response.error, method)
if response.sequence_id != req.sequence_id:
# tear down - off-by-one error in the connection somewhere
self.close()
raise GoRpcError('request sequence mismatch', response.sequence_id,
req.sequence_id, method)
return response
# Perform a streaming rpc call
# This method doesn't fetch any result, use stream_next to get them
def stream_call(self, method, request):
if not self.conn:
raise GoRpcError('stream_call - closed client', method)
try:
h = make_header(method, self.next_sequence_id())
req = GoRpcRequest(h, request)
self.start_time = time.time()
self.conn.write_request(self.encode_request(req))
except socket.timeout as e:
# tear down - can't guarantee a clean conversation
self.close()
raise TimeoutError(e, self.timeout, method)
except socket.error as e:
# tear down - better chance of recovery by reconnecting
self.close()
raise GoRpcError(e, method)
except ssl.SSLError as e:
# tear down - better chance of recovery by reconnecting
self.close()
if 'timed out' in str(e):
raise TimeoutError(e, self.timeout, method)
raise GoRpcError(e, method)
# Returns the next value, or None if we're done.
# Note the timeout is longer as we don't mind for streaming queries
# since they get their own bigger connection pool on the vttablet side
# FIXME(alainjobart) The timeout needs to be passed in, not inferred,
# otherwise it's going to be hard to debug... Anyway, the value
# for the timeout will most likely be 300s here, as timeout is usually 30s.
def stream_next(self):
try:
response = GoRpcResponse()
self._read_response(response, self.timeout * 10)
except socket.timeout as e:
# tear down - can't guarantee a clean conversation
self.close()
raise TimeoutError(e, self.timeout)
except socket.error as e:
# tear down - better chance of recovery by reconnecting
self.close()
raise GoRpcError(e)
except ssl.SSLError as e:
# tear down - better chance of recovery by reconnecting
self.close()
if 'timed out' in str(e):
raise TimeoutError(e, self.timeout)
raise GoRpcError(e)
if response.sequence_id != self.seq:
# tear down - off-by-one error in the connection somewhere
self.close()
raise GoRpcError('request sequence mismatch', response.sequence_id,
self.seq)
if response.error:
self.start_time = None
if response.error == _lastStreamResponseError:
return None
else:
raise AppError(response.error)
else:
self.start_time = time.time()
return response
|
|
# encoding=utf-8
#################################
# Author: ideawu
# Link: http://www.ideawu.net/
#################################
import sys, os, shutil, datetime
import antlr3
import antlr3.tree
from ExprLexer import ExprLexer
from ExprParser import ExprParser
class CpyEngine:
def compile(self, srcfile, base_dir, output_dir):
#fp = codecs.open(sys.argv[1], 'r', 'utf-8')
fp = open(srcfile, 'r')
char_stream = antlr3.ANTLRInputStream(fp)
lexer = ExprLexer(char_stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = ExprParser(tokens)
r = parser.prog()
# this is the root of the AST
root = r.tree
#print (root.toStringTree())
#print '-------'
nodes = antlr3.tree.CommonTreeNodeStream(root)
nodes.setTokenStream(tokens)
from Eval import Eval
eval = Eval(nodes)
#######################################
head, tail = os.path.split(srcfile)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
try:
os.chmod(output_dir, 0777)
except: pass
if not os.path.exists(output_dir + '/__init__.py'):
fp = open(output_dir + '/__init__.py', 'w')
fp.close()
try:
os.chmod(output_dir + '/__init__.py', 0777)
except: pass
dstfile = os.path.normpath(output_dir + '/' + tail.split('.')[0] + '.py')
#print 'compile: %-30s=> %s' % (srcfile, dstfile)
cpy = CpyBuilder(dstfile, base_dir, output_dir)
eval.prog(cpy)
return dstfile
class CpyBuilder:
compiled_files = set()
def __init__(self, dstfile, base_dir, output_dir):
self.vars = -1
self.if_depth = 0
self.block_depth = 0
self.switch_expr_stack = []
self.switch_continue_stack = []
self.class_stack = []
self.base_dir = base_dir
self.output_dir = output_dir
self.fp = open(dstfile, 'w')
try:
os.chmod(dstfile, 0777)
except: pass
self.write('# encoding=utf-8\n')
self.write('# Generated by cpy\n');
self.write('# ' + datetime.datetime.now().isoformat(' ') + '\n');
self.write('import os, sys\n')
self.write('from sys import stdin, stdout\n\n')
def tmp_var(self, name = ''):
self.vars += 1
return '_cpy_%s_%d' %(name, self.vars)
def close(self):
self.fp.close()
def write(self, text):
text = text.encode('utf-8')
self.fp.write(text)
# debug
#sys.stdout.write(text)
def indent(self):
return '\t' * self.block_depth
def _compile_dir(self, rel_path):
mods = []
files = os.listdir(self.base_dir + '/' + rel_path)
for f in files:
if f.endswith('.cpy'):
mods.append(f[0: -4])
if f.endswith('.py'):
mods.append(f[0: -3])
self._compile(rel_path, f)
return mods
def _compile(self, rel_path, f):
base_dir = os.path.normpath(self.base_dir + '/' + rel_path)
srcfile = os.path.normpath(base_dir + '/' + f)
output_dir = os.path.normpath(self.output_dir + '/' + rel_path)
if f.endswith('.py'):
head, tail = os.path.split(f)
#print 'copy: %-30s=> %s' % (srcfile, output_dir + '/' + tail)
shutil.copy(srcfile, output_dir + '/' + tail)
try:
os.chmod(output_dir + '/' + tail, 0777)
except: pass
elif f.endswith('.cpy'):
if srcfile in self.compiled_files:
return
self.compiled_files.add(srcfile)
e = CpyEngine()
d = e.compile(srcfile, base_dir, output_dir)
def op_import(self, member, all):
ps = member.split('.')
package = []
while True:
if len(ps) == 0:
break
p = ps.pop(0)
package.append(p)
rel_path = '/'.join(package);
path = self.base_dir + '/' + rel_path
if os.path.isdir(path):
if len(ps) == 0:
mods = self._compile_dir(rel_path)
if all == '*':
for m in mods:
self.write(self.indent())
self.write('from %s import %s\n' %(member, m))
else:
self.write(self.indent())
self.write('import %s\n' % member)
break
elif os.path.isfile(path + '.cpy') or os.path.isfile(path + '.py'):
filename = os.path.basename(path)
rel_path = '/'.join(package[ : -1]);
if os.path.isfile(path + '.cpy'):
self._compile(rel_path, filename + '.cpy')
else:
self._compile(rel_path, filename + '.py')
if len(ps) == 0:
if all == '*':
self.write(self.indent())
self.write('from %s import *\n' % member)
else:
self.write(self.indent())
self.write('import %s\n' % member)
break
elif len(ps) == 1:
mod = '.'.join(package)
cls = ps[-1]
self.write(self.indent())
self.write('from %s import %s\n' %(mod, cls))
else:
# error
print ("Cpy error: invalid module '%s'" % member)
sys.exit(0)
break
else:
self.write(self.indent())
if all == '*':
self.write('from %s import *\n' % member)
else:
ps = member.split('.')
if len(ps) == 1:
self.write('import %s\n' % member)
else:
self.write('from %s import %s\n' %('.'.join(ps[0 : -1]), ps[-1]))
break
def block_enter(self):
self.block_depth += 1
self.write(self.indent() + 'pass\n')
def block_leave(self):
self.block_depth -= 1
def if_enter(self):
self.write('\n')
self.write(self.indent())
self.if_depth += 1
def if_leave(self):
self.if_depth -= 1
def op_if(self, expr):
self.write('if %s:\n' % expr)
def op_else(self):
self.write(self.indent() + 'else:\n')
def op_else_if(self):
self.write(self.indent() + 'el')
def stmt(self, text):
self.write(self.indent() + text + '\n')
def op_assign(self, id, val, op):
text = '%s %s %s' % (id, op, val)
return text
def op_inc(self, id):
return id + ' += 1';
def op_dec(self, id):
return id + ' -= 1';
def op_call(self, text):
self.write(self.indent() + text + '\n')
def op_print(self, text):
self.write(self.indent())
self.write('print %s\n' % text)
def op_printf(self, format, text):
self.write(self.indent())
if text == None:
self.write('sys.stdout.write(%s)\n' % (format))
else:
self.write('sys.stdout.write(%s %% (%s))\n' % (format, text))
def op_while(self, expr):
self.write('\n')
self.write(self.indent())
self.write('while %s:\n' % expr)
def op_do_while_enter(self):
self.write('\n')
self.write(self.indent())
self.write('while True:\n')
def op_do_while_leave(self, expr):
self.write('\n')
self.block_depth += 1
self.write(self.indent())
self.write('if %s:\n' % expr)
self.block_depth += 1
self.write(self.indent())
self.write('continue')
self.block_depth -= 1
self.write('\n')
self.write(self.indent())
self.write('break')
self.block_depth -= 1
def op_switch_enter(self, expr):
self.write('\n')
self.switch_expr_stack.append(expr)
var = '_continue_%d' % len(self.switch_expr_stack)
self.switch_continue_stack.append(var)
self.write(self.indent() + '# {{{ switch: ' + expr + '\n')
self.write(self.indent())
self.write(var + ' = False\n')
self.write(self.indent())
self.write('while True:\n')
self.block_depth += 1
def op_switch_leave(self):
self.write(self.indent() + 'break\n')
var = self.switch_continue_stack[-1]
self.write(self.indent())
self.write('if %s:\n' % var)
self.block_depth += 1
self.write(self.indent())
self.write('continue\n')
self.block_depth -= 1
self.block_depth -= 1
self.write(self.indent() + '# }}} switch\n\n')
self.switch_expr_stack.pop()
self.switch_continue_stack.pop()
def op_case_enter(self):
self.write(self.indent())
self.write('if False')
self.block_depth += 1
def op_case_test(self, expr):
self.write(' or ((%s) == %s)' % (self.switch_expr_stack[-1], expr))
def op_case(self):
self.write(':\n')
self.write(self.indent())
self.write('pass\n')
def op_case_leave(self):
self.block_depth -= 1
def op_break(self):
self.write(self.indent())
self.write('break\n')
def op_continue(self):
if self.switch_expr_stack:
var = self.switch_continue_stack[-1]
self.write(self.indent())
self.write(var + ' = True\n')
self.write(self.indent())
self.write('break\n')
else:
self.write(self.indent())
self.write('continue\n')
def op_return(self, expr):
self.write(self.indent())
if expr == None: expr = ''
self.write('return %s\n' % expr)
def op_default_enter(self):
self.write(self.indent() + '### default\n')
def op_default_leave(self):
pass
def op_function(self, id, params):
self.write('\n')
if len(self.class_stack) > 0:
# in class
if params == None or params == '':
params = 'this'
else:
params = 'this, ' + params
else:
if params == None:
params = ''
self.write(self.indent() + 'def ' + id + '(' + params + '):\n')
def op_foreach(self, expr, k, vals):
self.write('\n')
tmp_var_ref = self.tmp_var('r')
tmp_var_l = self.tmp_var('l')
tmp_var_k = self.tmp_var('k')
tmp_var_is_dict = self.tmp_var('b')
self.write(self.indent())
self.write('%s = %s = %s\n' %(tmp_var_ref, tmp_var_l, expr))
self.write(self.indent())
self.write('if type(%s).__name__ == \'dict\': %s=True; %s=%s.iterkeys()\n' %(tmp_var_ref, tmp_var_is_dict, tmp_var_l, tmp_var_ref))
self.write(self.indent())
self.write('else: %s=False;' %tmp_var_is_dict)
if k != None:
self.write('%s=-1' %k)
self.write('\n')
self.write(self.indent())
self.write('for %s in %s:\n' %(tmp_var_k, tmp_var_l))
if k == None:
self.block_depth += 1
self.write(self.indent())
self.write('if %s: %s=%s[%s]\n' %(tmp_var_is_dict, vals, tmp_var_ref, tmp_var_k))
self.write(self.indent())
self.write('else: %s=%s\n' %(vals, tmp_var_k))
self.block_depth -= 1
else:
self.block_depth += 1
self.write(self.indent())
self.write('if %s: %s=%s; %s=%s[%s]\n' %(tmp_var_is_dict, k, tmp_var_k, vals, tmp_var_ref, tmp_var_k))
self.write(self.indent())
self.write('else: %s += 1; %s=%s\n' %(k, vals, tmp_var_k))
self.block_depth -= 1
def op_throw(self, expr):
self.write(self.indent())
self.write('raise %s\n' % expr)
def op_try(self):
self.write(self.indent())
self.write('try:\n')
def op_catch(self, type, var):
self.write(self.indent())
if var == None:
self.write('except %s:\n' % type)
else:
self.write('except %s , %s:\n' %(type, var))
def op_finally(self):
self.write(self.indent())
self.write('finally:\n')
def op_class_enter(self, name, parent):
self.class_stack.append([])
self.write(self.indent())
if parent == None:
self.write('class %s(object):\n' % name)
else:
self.write('class %s(%s):\n' % (name, parent))
self.block_depth += 1
self.write(self.indent())
self.write('pass\n')
def op_class_leave(self):
self.class_stack.pop()
self.write('\n')
self.block_depth -= 1
def op_var_def(self, is_static, id, val):
if is_static:
self.write(self.indent())
if val == None:
s = '%s = None' % id
else:
s = '%s = %s' % (id, val)
self.write(s)
else:
if val == None:
s = 'this.%s = None' % id
else:
s = 'this.%s = %s' % (id, val)
self.class_stack[-1].append(s)
def op_construct(self, params):
self.write('\n')
self.op_function('__init__', params)
self.block_depth += 1
for s in self.class_stack[-1]:
self.write(self.indent())
self.write(s + '\n')
self.block_depth -= 1
|
|
#!/usr/bin/env python
# To install dependencies, see https://github.com/timrdf/DataFAQs/wiki/Errors
import sys
from rdflib import *
from surf import *
from surf.query import a, select
import rdflib
rdflib.plugin.register('sparql', rdflib.query.Processor, 'rdfextras.sparql.processor', 'Processor')
rdflib.plugin.register('sparql', rdflib.query.Result, 'rdfextras.sparql.query', 'SPARQLQueryResult')
if len(sys.argv) != 4:
print "usage: cross-reference.py http://some.owl someont.owl prefix"
print
print " http://some.owl - web URL of the OWL e.g. http://dvcs.w3.org/hg/prov/raw-file/default/ontology/ProvenanceOntology.owl"
print " some.owl - local copy of the OWL e.g. ProvenanceOntology.owl"
print " prefix - prefix to use e.g. 'prov'"
sys.exit(1)
ont_url = sys.argv[1] # http://dvcs.w3.org/hg/prov/raw-file/default/ontology/ProvenanceOntology.owl
ont_local = sys.argv[2] # ProvenanceOntology.owl
PREFIX = sys.argv[3] # prov
ns.register(prov='http://www.w3.org/ns/prov#')
ns.register(dcat='http://www.w3.org/ns/dcat#')
ns.register(void='http://rdfs.org/ns/void#')
prefixes = dict(prov=str(ns.PROV), dcat=str(ns.DCAT), void=str(ns.VOID))
# as rdflib
graph = Graph()
graph.parse(ont_local) # from file
# as SuRF
store = Store(reader='rdflib', writer='rdflib', rdflib_store = 'IOMemory')
session = Session(store)
store.load_triples(source=ont_url) # From URL
DatatypeProperties = session.get_class(ns.OWL["DatatypeProperty"])
ObjectProperties = session.get_class(ns.OWL["ObjectProperty"])
Classes = session.get_class(ns.OWL["Class"])
qualifiedFormsQ = '''
prefix owl: <http://www.w3.org/2002/07/owl#>
prefix prov: <http://www.w3.org/ns/prov#>
select distinct ?unqualified ?qualified ?involvement
where {
graph <http://www.w3.org/ns/prov#> {
?unqualified prov:qualifiedForm []; prov:category "CATEGORY" .
optional {
?unqualified prov:qualifiedForm ?qualified .
?qualified a owl:ObjectProperty
}
optional {
?unqualified prov:qualifiedForm ?involvement .
?involvement a owl:Class
}
}
} order by ?unqualified
'''
results = graph.query('select distinct ?cat where { [] prov:category ?cat } order by ?cat', initNs=prefixes)
categories = {}
for bindings in results:
categories[bindings] = True # distinct operator is not being recognized. Need to reimplement here.
for category in categories.keys():
print category
glanceName = 'at-a-glance-'+category+'.html'
crossName = 'cross-reference-'+category+'.html'
qualsName = 'qualifed-forms-'+category+'.html'
if not(os.path.exists(glanceName)) and not(os.path.exists(crossName)) and not(os.path.exists(qualsName)):
print ' '+glanceName + ' ' + crossName
glance = open(glanceName, 'w')
cross = open(crossName, 'w')
quals = open(qualsName, 'w')
# Classes # # # # # # # # # # # # # # # # #
glance.write('\n')
glance.write('<div\n') # We want to include in multiple places: id="'+PREFIX+'-'+category+'-owl-classes-at-a-glance"\n')
glance.write(' class="'+PREFIX+'-'+category+' owl-classes at-a-glance">\n')
glance.write(' <ul class="hlist">\n')
ordered = {}
ordered['classes'] = []
for owlClass in Classes.all():
if owlClass.prov_category.first == category and owlClass.subject.startswith('http://www.w3.org/ns/prov#'):
ordered['classes'].append(owlClass.subject)
ordered['classes'].sort()
# at-a-glance
for uri in ordered['classes']:
owlClass = session.get_resource(uri,Classes)
qname = owlClass.subject.split('#')
glance.write(' <li>\n')
glance.write(' <a href="#'+qname[1]+'">'+PREFIX+':'+qname[1]+'</a>\n')
glance.write(' </li>\n')
glance.write(' </ul>\n')
glance.write('</div>\n')
# Properties # # # # # # # # # # # # # # # # #
glance.write('\n')
glance.write('<div\n') # We want to include in multiple places: id="'+PREFIX+'-'+category+'-owl-properties-at-a-glance"\n')
glance.write(' class="'+PREFIX+'-'+category+' owl-properties at-a-glance">\n')
glance.write(' <ul class="hlist">\n')
propertyTypes = {}
ordered['properties'] = []
for property in DatatypeProperties.all():
if property.prov_category.first == category:
ordered['properties'].append(property.subject)
propertyTypes[property.subject] = "datatype-property"
for property in ObjectProperties.all():
if property.prov_category.first == category:
ordered['properties'].append(property.subject)
propertyTypes[property.subject] = "object-property"
ordered['properties'].sort()
# at-a-glance
for uri in ordered['properties']:
property = []
if propertyTypes[uri] == 'datatype-property':
property = session.get_resource(uri,DatatypeProperties)
else:
property = session.get_resource(uri,ObjectProperties)
qname = property.subject.split('#')
glance.write(' <li class="'+propertyTypes[uri]+'">\n')
glance.write(' <a href="#'+qname[1]+'">'+PREFIX+':'+qname[1]+'</a>\n')
glance.write(' </li>\n')
glance.write(' </ul>\n')
glance.write('</div>\n')
# Classes # # # # # # # # # # # # # # # # #
# cross-reference
cross.write('<div\n') # We want to include it multiple times: id="'+PREFIX+'-'+category+'-owl-classes-crossreference"\n')
cross.write(' class="'+PREFIX+'-'+category+' owl-classes crossreference"\n')
cross.write(' xmlns:dcterms="http://purl.org/dc/terms/"\n')
cross.write(' xmlns:prov="http://www.w3.org/ns/prov#">\n')
for uri in ordered['classes']:
owlClass = session.get_resource(uri,Classes)
qname = owlClass.subject.split('#')
cross.write('\n')
cross.write(' <div id="'+qname[1]+'" class="entity">\n')
cross.write(' <h3>\n')
cross.write(' Class: <a href="#'+qname[1]+'"><span class="dotted" title="'+uri+'">'+PREFIX+':'+qname[1]+'</span></a>\n')
cross.write(' <span class="backlink">\n')
#cross.write(' back to <a href="#toc">ToC</a> or\n')
cross.write(' back to <a href="#'+PREFIX+'-'+category+'-owl-terms-at-a-glance">'+category+' classes</a>\n')
cross.write(' </span>\n')
cross.write(' </h3>\n')
# class
#cross.write(' <p><strong>IRI:</strong><a href="'+uri+'">'+uri+'</a></p>\n')
# cross.write(' <p><strong>IRI:</strong>'+uri+'</p>\n')
# class prov:definition
for definition in owlClass.prov_definition: # TODO: not done for properties. How to reconcile def vs comments vs editorNote?
cross.write(' <div class="definition"><p>'+definition+'</p>\n')
cross.write(' </div>\n')
# Example taken from http://dvcs.w3.org/hg/prov/file/tip/examples/eg-24-prov-o-html-examples/rdf/create/rdf
# <pre about="#example-for-class-Entity" typeof="prov:Entity"
# rel="prov:wasQuotedFrom" resource="http://dvcs.w3.org/hg/prov/raw-file/tip/examples/eg-24-prov-o-html-examples/rdf/create/rdf/class_Entity.ttl"
# property="prov:value">{% escape %}{% include "includes/prov/examples/eg-24-prov-o-html-examples/rdf/create/rdf/class_Entity.ttl"%}{% endescape %}</pre>
cross.write('\n')
cross.write(' <div about="#example-for-class-'+qname[1]+'" typeof="prov:Entity" class="example">\n')
cross.write(' <span rel="dcterms:subject" resource="'+owlClass.subject+'"></span>\n')
cross.write(' <strong>Example</strong>\n')
cross.write(' <pre rel="prov:wasQuotedFrom" resource="http://dvcs.w3.org/hg/prov/raw-file/tip/examples/eg-24-prov-o-html-examples/rdf/create/rdf/class_'+qname[1]+'.ttl"\n')
cross.write(' property="prov:value">')
cross.write('{% escape %}{% include "includes/prov/examples/eg-24-prov-o-html-examples/rdf/create/rdf/class_'+qname[1]+'.ttl"%}{% endescape %}</pre>\n')
cross.write(' </div>\n')
cross.write('\n')
cross.write('\n')
cross.write(' <div about="#pml-mapping-for-class-'+qname[1]+'" typeof="prov:Entity" class="example">\n')
cross.write(' <span rel="dcterms:subject" resource="'+owlClass.subject+'"></span>\n')
cross.write(' <strong>PML Mapping</strong>\n')
cross.write(' <pre rel="prov:wasQuotedFrom" resource="http://dvcs.w3.org/hg/prov/raw-file/tip/examples/eg-29-pml-mappings/rdf/create/rdf/class_'+qname[1]+'.ttl"\n')
cross.write(' property="prov:value">')
cross.write('{% escape %}{% include "includes/prov/examples/eg-29-pml-mappings/rdf/create/rdf/class_'+qname[1]+'.ttl"%}{% endescape %}</pre>\n')
cross.write(' </div>\n')
cross.write('\n')
# cross.write(' <dl class="description">\n')
# class rdfs:comment
# for comment in owlClass.rdfs_comment:
# cross.write(' <dd class="comment"><p>'+comment+'</p>\n')
# cross.write(' </dd>\n')
# class prov:component ?component
# if len(owlClass.prov_component) > 0 and False:
# cross.write('\n')
# cross.write(' <dt>in PROV component</dt>\n')
# cross.write(' <dd class="component-'+owlClass.prov_component.first+'">\n')
# for component in owlClass.prov_component:
# cross.write(' <a title="'+component+'" href="#component-'+component+'">'+component+'</a>\n')
# cross.write(' </dd>\n')
# class rdfs:subClassOf ?super
# if len(owlClass.rdfs_subClassOf) > 0:
# cross.write('\n')
# cross.write(' <dt>is subclass of</dt>\n')
# cross.write(' <dd>\n')
# for super in owlClass.rdfs_subClassOf:
# if super.subject.startswith('http://www.w3.org/ns/prov#'):
# qname = super.subject.split('#')
# cross.write(' <a title="'+super.subject+'" href="#'+qname[1]+'" class="owlclass">'+PREFIX+':'+qname[1]+'</a>\n')
# cross.write(' </dd>\n')
# ?p rdfs:domain class
# if len(owlClass.is_rdfs_domain_of) > 0:
# cross.write('\n')
# cross.write(' <dt>in domain of</dt>\n')
# cross.write(' <dd>\n')
# for p in owlClass.is_rdfs_domain_of:
# pqname = p.subject.split('#')
# cross.write(' <a title="'+p.subject+'" href="#'+pqname[1]+'">'+PREFIX+':'+pqname[1]+'</a>')
# if ns.OWL['DatatypeProperty'] in p.rdf_type:
# cross.write('<sup class="type-dp" title="data property">dp</sup>\n')
# else:
# cross.write('<sup class="type-op" title="object property">op</sup>\n')
# cross.write(' </dd>\n')
# ?property rdfs:domain ( ... class ... )
# propertiesThatUnionMeInDomain = set()
# for triple in graph.triples((None, ns.RDFS['domain'], None)):
# for union in graph.triples((triple[2],ns.OWL['unionOf'],None)):
# for classInDomain in graph.items(union[2]):
# if classInDomain == owlClass.subject:
# propertiesThatUnionMeInDomain.add(triple[0])
# if len(propertiesThatUnionMeInDomain) > 0:
# cross.write('\n')
# cross.write(' <dt>a domain of</dt>\n')
# cross.write(' <dd>\n')
# for p in propertiesThatUnionMeInDomain:
# pqname = p.split('#')
# print ' ' + owlClass.subject + ' in domain of ' + pqname[1]
# cross.write(' <a title="'+p+'" href="#'+pqname[1]+'">'+PREFIX+':'+pqname[1]+'</a>')
# class rdfs:subClassOf ?super . ?property rdfs:domain ?super .
# query = select('?super ?property').where((owlClass.subject, ns.RDFS['subClassOf'],'?super'),
# ('?property', ns.RDFS['domain'], '?super'))
# results = store.execute(query)
# ignoreSupers = [ns.PROV['Entity'], ns.PROV['Involvement'], ns.PROV['Dictionary']]
# if len(results) > 0:
# #print owlClass.subject
# cross.write('\n')
# cross.write(' <dt>parent is in domain of</dt>\n')
# cross.write(' <dd>\n')
# for p in results:
# if p[0] not in ignoreSupers:
# #print ' ' + p[0] + ' ' + p[1]
# pqname = p[1].split('#')
# cross.write(' <a title="'+p[1]+'" href="#'+pqname[1]+'">'+PREFIX+':'+pqname[1]+'</a>')
#if ns.OWL['DatatypeProperty'] in p.rdf_type:
# cross.write('<sup class="type-dp" title="data property">dp</sup>\n')
#else:
# cross.write('<sup class="type-op" title="object property">op</sup>\n')
# cross.write(' </dd>\n')
# ?p rdfs:range class
# if len(owlClass.is_rdfs_range_of) > 0:
# cross.write('\n')
# cross.write(' <dt>in range of</dt>\n')
# cross.write(' <dd>\n')
# for p in owlClass.is_rdfs_range_of:
# pqname = p.subject.split('#')
# cross.write(' <a title="'+p.subject+'" href="#'+pqname[1]+'">'+PREFIX+':'+pqname[1]+'</a>')
# if ns.OWL['DatatypeProperty'] in p.rdf_type:
# cross.write('<sup class="type-dp" title="data property">dp</sup>\n')
# else:
# cross.write('<sup class="type-op" title="object property">op</sup>\n')
# cross.write(' </dd>\n')
#
# # ?sub rdfs:subClassOf class
# if len(owlClass.is_rdfs_subClassOf_of) > 0:
# es = '' # plural form grammar
# if len(owlClass.is_rdfs_subClassOf_of) > 1:
# es="es"
# cross.write('\n')
# cross.write(' <dt>has subclass'+es+'</dt>\n')
# cross.write(' <dd>\n')
# for sub in owlClass.is_rdfs_subClassOf_of:
# qname = sub.subject.split('#')
# cross.write(' <a title="'+sub.subject+'" href="#'+qname[1]+'" class="owlclass">'+PREFIX+':'+qname[1]+'</a>\n')
# cross.write(' </dd>\n')
# class prov:unqualifiedForm ?p
# if len(owlClass.prov_unqualifiedForm) > 0:
#print owlClass.subject
#print owlClass.prov_unqualifiedForm.first
# qname = owlClass.prov_unqualifiedForm.first.subject.split('#')
# cross.write('\n')
# cross.write(' <dt>qualifies</dt>\n')
# cross.write(' <dd>\n')
# cross.write(' <a title="'+owlClass.prov_unqualifiedForm.first.subject+'" href="#'+qname[1]+'" class="owlproperty">'+PREFIX+':'+qname[1]+'</a>\n')
# cross.write(' </dd>\n')
# cross.write(' </dl>\n')
cross.write(' </div>\n')
cross.write('</div>\n')
# Properties # # # # # # # # # # # # # # # # #
# cross-reference
cross.write('<div\n') # We want to include it multiple times: id="'+PREFIX+'-'+category+'-owl-properties-crossreference"\n')
cross.write(' class="'+PREFIX+'-'+category+' owl-properties crossreference"\n')
cross.write(' xmlns:prov="http://www.w3.org/ns/prov#">\n')
for uri in ordered['properties']:
property = []
if propertyTypes[uri] == 'datatype-property':
property = session.get_resource(uri,DatatypeProperties)
else:
property = session.get_resource(uri,ObjectProperties)
qname = property.subject.split('#')
cross.write(' <div id="'+qname[1]+'" class="entity">\n')
cross.write(' <h3>\n')
cross.write(' Property: <a href="#'+qname[1]+'"><span class="dotted" title="'+uri+'">'+PREFIX+':'+qname[1]+'</span></a>')
if ns.OWL['DatatypeProperty'] in property.rdf_type:
cross.write('<sup class="type-dp" title="data property">dp</sup>\n')
else:
cross.write('<sup class="type-op" title="object property">op</sup>\n')
cross.write(' <span class="backlink">\n')
cross.write(' back to <a href="#'+PREFIX+'-'+category+'-owl-terms-at-a-glance">'+category+' properties</a>\n')
cross.write(' </span>\n')
cross.write(' </h3>\n')
# property
#cross.write(' <p><strong>IRI:</strong><a href="'+uri+'">'+uri+'</a></p>\n')
# cross.write(' <p><strong>IRI:</strong>'+uri+'</p>\n')
# Example taken from http://dvcs.w3.org/hg/prov/file/tip/examples/eg-24-prov-o-html-examples/rdf/create/rdf
cross.write('\n')
cross.write(' <div about="#example-for-property-'+qname[1]+'" typeof="prov:Entity" class="example">\n')
cross.write(' <span rel="dcterms:subject" resource="'+property.subject+'"></span>\n')
cross.write(' <strong>Example</strong>\n')
cross.write(' <pre rel="prov:wasQuotedFrom" resource="http://dvcs.w3.org/hg/prov/raw-file/tip/examples/eg-24-prov-o-html-examples/rdf/create/rdf/property_'+qname[1]+'.ttl"\n')
cross.write(' property="prov:value">')
cross.write('{% escape %}{% include "includes/prov/examples/eg-24-prov-o-html-examples/rdf/create/rdf/property_'+qname[1]+'.ttl"%}{% endescape %}</pre>\n')
cross.write(' </div>\n')
cross.write('\n')
# Example taken from http://dvcs.w3.org/hg/prov/file/tip/examples/eg-24-prov-o-html-examples/rdf/create/rdf
cross.write('\n')
cross.write(' <div about="#pml-mapping-for-property-'+qname[1]+'" typeof="prov:Entity" class="example">\n')
cross.write(' <span rel="dcterms:subject" resource="'+property.subject+'"></span>\n')
cross.write(' <strong>PML mapping</strong>\n')
cross.write(' <pre rel="prov:wasQuotedFrom" resource="http://dvcs.w3.org/hg/prov/raw-file/tip/examples/eg-29-pml-mappings/rdf/create/rdf/property_'+qname[1]+'.ttl"\n')
cross.write(' property="prov:value">')
cross.write('{% escape %}{% include "includes/prov/examples/eg-29-pml-mappings/rdf/create/rdf/property_'+qname[1]+'.ttl"%}{% endescape %}</pre>\n')
cross.write(' </div>\n')
cross.write('\n')
cross.write(' <div class="description">\n')
# property rdfs:comment
for comment in property.rdfs_comment:
cross.write(' <div class="comment"><p>'+comment+'</p>\n')
cross.write(' </div>\n')
# Characteristics
# characteristics = {ns.OWL['FunctionalProperty']: 'http://www.w3.org/TR/owl2-syntax/#Functional_Object_Properties',
# ns.OWL['InverseFunctionalProperty']: 'http://www.w3.org/TR/owl2-syntax/#Inverse-Functional_Object_Properties',
# ns.OWL['TransitiveProperty']: 'http://www.w3.org/TR/owl2-syntax/#Transitive_Object_Properties',
# ns.OWL['SymmetricProperty']: 'http://www.w3.org/TR/owl2-syntax/#Symmetric_Object_Properties',
## ns.OWL['AsymmetricProperty']: 'http://www.w3.org/TR/owl2-syntax/#Asymmetric_Object_Properties',
# ns.OWL['ReflexiveProperty']: 'http://www.w3.org/TR/owl2-syntax/#Reflexive_Object_Properties',
# ns.OWL['IrreflexiveProperty']: 'http://www.w3.org/TR/owl2-syntax/#Irreflexive_Object_Properties'}
# has = False
# for characteristic in characteristics.keys():
# if characteristic in property.rdf_type:
# has = True
# if has:
# cross.write(' <p><strong>has characteristics</strong>')
# comma = ''
# for characteristic in characteristics.keys():
# if characteristic in property.rdf_type:
# qname = characteristic.split('#')
# cross.write(comma+' <a href="'+characteristics[characteristic]+'">'+qname[1].replace('Property','').replace('F',' F')+'</a>')
# comma = ', '
# cross.write(' </p>\n')
# cross.write(' <dl>\n')
# property prov:component ?component
# if len(property.prov_component) > 0 and False:
# cross.write('\n')
# cross.write(' <dt>in PROV component</dt>\n')
# cross.write(' <dd class="component-'+property.prov_component.first+'">\n')
# for component in property.prov_component:
# cross.write(' <a title="'+component+'" href="#component-'+component+'">'+component+'</a>\n')
# cross.write(' </dd>\n')
# property rdfs:subPropertyOf ?super
# do = False
# for super in property.rdfs_subPropertyOf:
# qname = super.subject.split('#')
# if qname[0] == 'http://www.w3.org/ns/prov':
# do = True
# if do:
# cross.write('\n')
# cross.write(' <dt>has super-properties</dt>\n')
# cross.write(' <dd>\n')
# cross.write(' <ul>\n')
# for super in property.rdfs_subPropertyOf:
# qname = super.subject.split('#')
# if qname[0] == 'http://www.w3.org/ns/prov':
# cross.write(' <li>\n')
# cross.write(' <a title="'+super.subject+'" href="#'+qname[1]+'" class="owlclass">'+PREFIX+':'+qname[1]+'</a>\n')
# cross.write(' </li>\n')
# cross.write(' </ul>\n')
# cross.write(' </dd>\n')
# property rdfs:domain ?class
# if len(property.rdfs_domain) > 0:
# cross.write('\n')
# cross.write(' <dt>has domain</dt>\n')
# cross.write(' <dd>\n')
# cross.write(' <ul>\n')
# for domain in property.rdfs_domain:
# qname = domain.subject.split('#')
# cross.write(' <li>\n')
# if domain.subject.startswith('http://www.w3.org/ns/prov#'):
# cross.write(' <a title="'+domain.subject+'" href="#'+qname[1]+'" class="owlclass">'+PREFIX+':'+qname[1]+'</a>\n')
# else:
# # NOTE: This processes ALL [ owl:unionOf () ], so if there are more than one it will duplicate.
# # Part of the problem is that SuRF might be giving different bnode IDs than rdflib.
# #print property.subject + ' has a union domain that includes:'
# for triple in graph.triples((property.subject, ns.RDFS['domain'], None)):
# for union in graph.triples((triple[2],ns.OWL['unionOf'],None)):
# orString = ''
# for classInDomain in graph.items(union[2]):
# qname = classInDomain.split('#')
# #print ' ' + qname[1]
# cross.write(' '+orString+'<a title="'+classInDomain+'" href="#'+qname[1]+'" class="owlclass">'+PREFIX+':'+qname[1]+'</a>\n')
# orString = ' or '
## cross.write(' </li>\n')
# cross.write(' </ul>\n')
# cross.write(' </dd>\n')
# property rdfs:range ?class
# if len(property.rdfs_range) > 0:
# cross.write('\n')
# cross.write(' <dt>has range</dt>\n')
# cross.write(' <dd>\n')
# cross.write(' <ul>\n')
# for range in property.rdfs_range:
# cross.write(' <li>\n')
# try:
# qname = range.subject.split('#')
# if qname[0] == 'http://www.w3.org/ns/prov':
# cross.write(' <a title="'+range.subject+'" href="#'+qname[1]+'" class="owlclass">'+PREFIX+':'+qname[1]+'</a>\n')
# elif qname[0] == 'http://www.w3.org/2002/07/owl':
# noop = 'noop'
# else:
# cross.write(' '+str(range)+'\n')
# except:
# cross.write(' '+str(range)+'\n')
# cross.write(' </li>\n')
# cross.write(' </ul>\n')
# cross.write(' </dd>\n')
# property owl:inverseOf ?inverse
# if len(property.owl_inverseOf) > 0:
# cross.write('\n')
# cross.write(' <dt>has inverse</dt>\n')
## cross.write(' <dd>\n')
# cross.write(' <ul>\n')
# for inverse in property.owl_inverseOf:
# cross.write(' <li>\n')
# try:
# qname = inverse.subject.split('#')
# if qname[0] == 'http://www.w3.org/ns/prov':
# cross.write(' <a title="'+inverse.subject+'" href="#'+qname[1]+'" class="owlclass">'+PREFIX+':'+qname[1]+'</a>\n')
# elif qname[0] == 'http://www.w3.org/2002/07/owl':
# noop = 'noop'
# else:
# cross.write(' '+str(inverse)+'\n')
# except:
# cross.write(' '+str(inverse)+'\n')
# cross.write(' </li>\n')
# cross.write(' </ul>\n')
# cross.write(' </dd>\n')
# ?sub rdfs:subPropertyOf property
# do = False
# for sub in property.is_rdfs_subPropertyOf_of:
# qname = sub.subject.split('#')
# if qname[0] == 'http://www.w3.org/ns/prov':
# do = True
# if do:
# cross.write('\n')
# cross.write(' <dt>has sub-properties</dt>\n')
# cross.write(' <dd>\n')
# cross.write(' <ul>\n')
# for sub in property.is_rdfs_subPropertyOf_of:
# qname = sub.subject.split('#')
# if qname[0] == 'http://www.w3.org/ns/prov':
# cross.write(' <li>\n')
# cross.write(' <a title="'+sub.subject+'" href="#'+qname[1]+'" class="owlclass">'+PREFIX+':'+qname[1]+'</a>\n')
# cross.write(' </li>\n')
# cross.write(' </ul>\n')
# cross.write(' </dd>\n')
#
# property prov:qualifiedForm ?class, ?property
# if len(property.prov_qualifiedForm) > 0:
# qname = property.prov_qualifiedForm.first.subject.split('#')
# cross.write('\n')
# cross.write(' <dt>can be qualified with</dt>\n')
# cross.write(' <dd>\n')
# cross.write(' <ul>\n')
# for qualified in property.prov_qualifiedForm:
# qname = qualified.subject.split('#')
# owlType=''
# if ns.OWL['Class'] in qualified.rdf_type:
# owlType='class'
# else:
# owlType='property'
# cross.write(' <li>\n')
# cross.write(' <a title="'+qualified.subject+'" href="#'+qname[1]+'" class="owl'+owlType+'">'+PREFIX+':'+qname[1]+'</a>\n')
# cross.write(' </li>\n')
# cross.write(' </ul>\n')
# cross.write(' </dd>\n')
# property prov:unqualifiedForm ?p
# if len(property.prov_unqualifiedForm) > 0:
# qname = property.prov_unqualifiedForm.first.subject.split('#')
# cross.write('\n')
# cross.write(' <dt>qualifies</dt>\n')
# cross.write(' <dd>\n')
# cross.write(' <a title="'+property.prov_unqualifiedForm.first.subject+'" href="#'+qname[1]+'" class="owlproperty">'+PREFIX+':'+qname[1]+'</a>\n')
# cross.write(' </dd>\n')
# cross.write('\n')
# cross.write(' </dl>\n')
cross.write(' </div>\n') # e.g. <div class="description">
cross.write(' </div>\n') # e.g. <div id="wasGeneratedBy" class="entity">
cross.write('\n')
cross.write('</div>\n') # e.g. <div id="prov-starting-point-owl-classes-crossreference"
# n = ''
# if category.lower()[0] in ['a','e','i,','o','u']:
# n = 'n'
# quals.write('<table class="qualified-forms">\n')
# quals.write(' <caption>Qualification Property and Involvement Class used to qualify a'+n+' '+category.capitalize()+' Property.</caption>\n')
# quals.write(' <tr>\n')
# qname = property.subject.split('#')
# quals.write(' <th>'+category.capitalize()+' Property</th>\n')
# quals.write(' <th>Qualification Property</th>\n')
# quals.write(' <th>Involvement Class</th>\n')
# quals.write(' <th>Object Property</th>\n')
# quals.write(' </tr>\n')
# for uri in ordered['properties']:
# property = []
# if propertyTypes[uri] == 'datatype-property':
# property = session.get_resource(uri,DatatypeProperties)
# else:
# property = session.get_resource(uri,ObjectProperties) # e.g. http://www.w3.org/ns/prov#actedOnBehalfOf
### if len(property.prov_qualifiedForm) > 0:
# qualProp = 'no'
# qualClass = 'no'
# objectProp = 'no'
# for qualified in property.prov_qualifiedForm:
# if len(qualified.rdf_type) == 0:
# print 'ERROR on qualifiedForm annotation for ' + property.subject
# print qualified + ' is not defined'
## elif ns.OWL['Class'] in qualified.rdf_type:
# qualClass = qualified # e.g. http://www.w3.org/ns/prov#Responsibility
# for super in qualClass.rdfs_subClassOf:
# qname = super.subject.split('#')
# if ( qname[1] == 'EntityInvolvement' ):
# objectProp = 'entity'
# elif ( qname[1] == 'ActivityInvolvement' ):
# objectProp = 'activity'
# elif ( qname[1] == 'AgentInvolvement' ):
# objectProp = 'agent'
# elif ( qname[1] == 'CollectionInvolvement' ):
# objectProp = 'collection'
# elif ( qname[1] == 'DictionaryInvolvement' ):
# objectProp = 'collection'
# else:
# if "qualified" in qualified.subject:
# qualProp = qualified # e.g. http://www.w3.org/ns/prov#qualifiedResponsibility
# else:
# qualProp = 'no' # Avoiding prov:startedAtTime, prov:atTime, prov:Start, null
# if qualProp != 'no' and qualClass != 'no' and objectProp != 'no':
# quals.write(' <tr>\n')
# qname = property.subject.split('#')
# quals.write(' <td><a title="'+property.subject+'" href="#'+qname[1]+'" class="owlproperty">'+PREFIX+':'+qname[1]+'</a></td>\n')
# qname = qualProp.subject.split('#')
# quals.write(' <td><a title="'+qualProp.subject+'" href="#'+qname[1]+'" class="owlproperty">'+PREFIX+':'+qname[1]+'</a></td>\n')
# qname = qualClass.subject.split('#')
# quals.write(' <td><a title="'+qualClass.subject+'" href="#'+qname[1]+'" class="owlclass">'+PREFIX+':'+qname[1]+'</a></td>\n')
# quals.write(' <td><a title="'+qname[0]+'#'+objectProp+'" href="#'+objectProp+'" class="owlproperty">'+PREFIX+':'+objectProp+'</a></td>\n')
# quals.write(' </tr>\n')
# quals.write('</table>\n')
glance.close()
cross.close()
# quals.close()
else:
print ' '+glanceName + ' or ' + crossName + " already exists. Not modifying."
#inversesName = 'inverse-names.html'
#if not(os.path.exists(inversesName)):
## inverses = open(inversesName, 'w')
# inverses.write('<table class="inverse-names">\n')
# inverses.write(' <caption>Names of inverses</caption>\n')
# inverses.write(' <tr>\n')
# inverses.write(' <th>PROV-O Property</th>\n')
# inverses.write(' <th>Recommended inverse name</th>\n')
# inverses.write(' </tr>\n')
# for property in ObjectProperties.all():
# qname = property.subject.split('#')
# if property.prov_inverse:
# inverses.write(' <tr>\n')
# inverses.write(' <td><a title="'+property.subject+'" href="#'+qname[1]+'" class="owlproperty">'+PREFIX+':'+qname[1]+'</a></td>\n')
# inverses.write(' <td>prov:'+property.prov_inverse.first+'</td>\n')
# inverses.write(' </tr>\n')
# inverses.write('<table>\n')
# inverses.close()
|
|
"""
sentry.web.urls
~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
try:
from django.conf.urls import include, patterns, url
except ImportError:
# django < 1.5 compat
from django.conf.urls.defaults import include, patterns, url # NOQA
from django.conf import settings
from sentry.web import api
from sentry.web.frontend import (
alerts, accounts, generic, groups, events,
admin, users, explore, explore_code,
)
import sentry.web.frontend.projects.general
import sentry.web.frontend.projects.keys
import sentry.web.frontend.projects.plugins
import sentry.web.frontend.projects.quotas
import sentry.web.frontend.projects.rules
import sentry.web.frontend.projects.tags
__all__ = ('urlpatterns',)
from sentry.web.frontend.accept_organization_invite import AcceptOrganizationInviteView
from sentry.web.frontend.access_group_migration import AccessGroupMigrationView
from sentry.web.frontend.auth_link_identity import AuthLinkIdentityView
from sentry.web.frontend.auth_login import AuthLoginView
from sentry.web.frontend.auth_logout import AuthLogoutView
from sentry.web.frontend.auth_organization_login import AuthOrganizationLoginView
from sentry.web.frontend.auth_provider_login import AuthProviderLoginView
from sentry.web.frontend.home import HomeView
from sentry.web.frontend.help_index import HelpIndexView
from sentry.web.frontend.help_page import HelpPageView
from sentry.web.frontend.help_platform_details import HelpPlatformDetailsView
from sentry.web.frontend.help_platform_index import HelpPlatformIndexView
from sentry.web.frontend.mailgun_inbound_webhook import MailgunInboundWebhookView
from sentry.web.frontend.organization_api_keys import OrganizationApiKeysView
from sentry.web.frontend.organization_api_key_settings import OrganizationApiKeySettingsView
from sentry.web.frontend.organization_audit_log import OrganizationAuditLogView
from sentry.web.frontend.organization_auth_settings import OrganizationAuthSettingsView
from sentry.web.frontend.organization_home import OrganizationHomeView
from sentry.web.frontend.organization_members import OrganizationMembersView
from sentry.web.frontend.organization_member_settings import OrganizationMemberSettingsView
from sentry.web.frontend.organization_stats import OrganizationStatsView
from sentry.web.frontend.organization_settings import OrganizationSettingsView
from sentry.web.frontend.create_organization import CreateOrganizationView
from sentry.web.frontend.create_organization_member import CreateOrganizationMemberView
from sentry.web.frontend.create_project import CreateProjectView
from sentry.web.frontend.create_team import CreateTeamView
from sentry.web.frontend.project_issue_tracking import ProjectIssueTrackingView
from sentry.web.frontend.project_notifications import ProjectNotificationsView
from sentry.web.frontend.project_release_tracking import ProjectReleaseTrackingView
from sentry.web.frontend.project_settings import ProjectSettingsView
from sentry.web.frontend.release_webhook import ReleaseWebhookView
from sentry.web.frontend.remove_organization import RemoveOrganizationView
from sentry.web.frontend.remove_project import RemoveProjectView
from sentry.web.frontend.remove_team import RemoveTeamView
from sentry.web.frontend.team_settings import TeamSettingsView
def init_all_applications():
"""
Forces import of all applications to ensure code is registered.
"""
from django.db.models import get_apps, get_models
for app in get_apps():
try:
get_models(app)
except Exception:
continue
init_all_applications()
urlpatterns = patterns('')
if settings.DEBUG:
import sentry.web.frontend.debug.mail
urlpatterns += patterns('',
url(r'^debug/mail/new-event/$',
sentry.web.frontend.debug.mail.new_event),
url(r'^debug/mail/new-note/$',
sentry.web.frontend.debug.mail.new_note),
url(r'^debug/mail/request-access/$',
sentry.web.frontend.debug.mail.request_access),
url(r'^debug/mail/access-approved/$',
sentry.web.frontend.debug.mail.access_approved),
)
urlpatterns += patterns('',
# Store endpoints first since they are the most active
url(r'^api/store/$', api.StoreView.as_view(),
name='sentry-api-store'),
url(r'^api/(?P<project_id>[\w_-]+)/store/$', api.StoreView.as_view(),
name='sentry-api-store'),
url(r'^_static/(?P<module>[^/]+)/(?P<path>.*)$', generic.static_media,
name='sentry-media'),
# API
url(r'^api/0/', include('sentry.api.urls')),
url(r'^api/hooks/mailgun/inbound/', MailgunInboundWebhookView.as_view(),
name='sentry-mailgun-inbound-hook'),
url(r'^api/hooks/release/(?P<plugin_id>[^/]+)/(?P<project_id>[^/]+)/(?P<signature>[^/]+)/', ReleaseWebhookView.as_view(),
name='sentry-release-hook'),
# Auth
url(r'^auth/link/(?P<organization_slug>[^/]+)/$', AuthLinkIdentityView.as_view(),
name='sentry-auth-link-identity'),
url(r'^auth/login/$', AuthLoginView.as_view(),
name='sentry-login'),
url(r'^auth/login/(?P<organization_slug>[^/]+)/$', AuthOrganizationLoginView.as_view(),
name='sentry-auth-organization'),
url(r'^auth/sso/$', AuthProviderLoginView.as_view(),
name='sentry-auth-sso'),
url(r'^auth/logout/$', AuthLogoutView.as_view(),
name='sentry-logout'),
# Account
url(r'^login-redirect/$', accounts.login_redirect,
name='sentry-login-redirect'),
url(r'^register/$', accounts.register,
name='sentry-register'),
url(r'^account/sudo/$', 'sudo.views.sudo',
{'template_name': 'sentry/account/sudo.html'},
name='sentry-sudo'),
url(r'^account/recover/$', accounts.recover,
name='sentry-account-recover'),
url(r'^account/recover/confirm/(?P<user_id>[\d]+)/(?P<hash>[0-9a-zA-Z]+)/$', accounts.recover_confirm,
name='sentry-account-recover-confirm'),
url(r'^account/settings/$', accounts.settings,
name='sentry-account-settings'),
url(r'^account/settings/appearance/$', accounts.appearance_settings,
name='sentry-account-settings-appearance'),
url(r'^account/settings/identities/$', accounts.list_identities,
name='sentry-account-settings-identities'),
url(r'^account/settings/notifications/$', accounts.notification_settings,
name='sentry-account-settings-notifications'),
url(r'^account/settings/social/', include('social_auth.urls')),
# Help
url(r'^docs/$', HelpIndexView.as_view(),
name='sentry-help'),
url(r'^docs/api/', include('sentry.api.help_urls')),
url(r'^docs/(?P<page_id>[\d]+)/(?P<page_slug>[^\/]+)/$', HelpPageView.as_view(),
name='sentry-help-page'),
url(r'^docs/platforms/$', HelpPlatformIndexView.as_view(),
name='sentry-help-platform-list'),
url(r'^docs/platforms/(?P<platform>[^\/]+)/$', HelpPlatformDetailsView.as_view(),
name='sentry-help-platform'),
# Admin
url(r'^manage/$', admin.overview,
name='sentry-admin-overview'),
url(r'^manage/status/environment/$', admin.status_env,
name='sentry-admin-status'),
url(r'^manage/status/packages/$', admin.status_packages,
name='sentry-admin-packages-status'),
url(r'^manage/status/mail/$', admin.status_mail,
name='sentry-admin-mail-status'),
# Admin - Teams
url(r'^manage/teams/$', admin.manage_teams,
name='sentry-admin-teams'),
# Admin - Projects
url(r'^manage/projects/$', admin.manage_projects,
name='sentry-admin-projects'),
# Admin - Users
url(r'^manage/users/$', admin.manage_users,
name='sentry-admin-users'),
url(r'^manage/users/new/$', admin.create_new_user,
name='sentry-admin-new-user'),
url(r'^manage/users/(?P<user_id>\d+)/$', admin.edit_user,
name='sentry-admin-edit-user'),
url(r'^manage/users/(?P<user_id>\d+)/remove/$', admin.remove_user,
name='sentry-admin-remove-user'),
url(r'^manage/users/(?P<user_id>\d+)/projects/$', admin.list_user_projects,
name='sentry-admin-list-user-projects'),
# Admin - Plugins
url(r'^manage/plugins/(?P<slug>[\w_-]+)/$', admin.configure_plugin,
name='sentry-admin-configure-plugin'),
# Organizations
url(r'^(?P<organization_slug>[\w_-]+)/$', OrganizationHomeView.as_view(),
name='sentry-organization-home'),
url(r'^organizations/new/$', CreateOrganizationView.as_view(),
name='sentry-create-organization'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/access-groups/$', AccessGroupMigrationView.as_view(),
name='sentry-organization-access-group-migration'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/api-keys/$', OrganizationApiKeysView.as_view(),
name='sentry-organization-api-keys'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/api-keys/(?P<key_id>[\w_-]+)$', OrganizationApiKeySettingsView.as_view(),
name='sentry-organization-api-key-settings'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/auth/$', OrganizationAuthSettingsView.as_view(),
name='sentry-organization-auth-settings'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/audit-log/$', OrganizationAuditLogView.as_view(),
name='sentry-organization-audit-log'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/members/$', OrganizationMembersView.as_view(),
name='sentry-organization-members'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/members/new/$', CreateOrganizationMemberView.as_view(),
name='sentry-create-organization-member'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/members/(?P<member_id>\d+)/$', OrganizationMemberSettingsView.as_view(),
name='sentry-organization-member-settings'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/stats/$', OrganizationStatsView.as_view(),
name='sentry-organization-stats'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/settings/$', OrganizationSettingsView.as_view(),
name='sentry-organization-settings'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/teams/(?P<team_slug>[\w_-]+)/settings/$', TeamSettingsView.as_view(),
name='sentry-manage-team'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/teams/(?P<team_slug>[\w_-]+)/remove/$', RemoveTeamView.as_view(),
name='sentry-remove-team'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/teams/new/$', CreateTeamView.as_view(),
name='sentry-create-team'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/projects/new/$', CreateProjectView.as_view(),
name='sentry-create-project'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/remove/$', RemoveOrganizationView.as_view(),
name='sentry-remove-organization'),
url(r'^accept/(?P<member_id>\d+)/(?P<token>\w+)/$', AcceptOrganizationInviteView.as_view(),
name='sentry-accept-invite'),
# Settings - Projects
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/get-started/$',
sentry.web.frontend.projects.general.get_started,
name='sentry-get-started'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_slug>[\w_-]+)/issue-tracking/$',
ProjectIssueTrackingView.as_view(),
name='sentry-project-issue-tracking'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_slug>[\w_-]+)/release-tracking/$',
ProjectReleaseTrackingView.as_view(),
name='sentry-project-release-tracking'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_slug>[\w_-]+)/settings/$',
ProjectSettingsView.as_view(),
name='sentry-manage-project'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/keys/$',
sentry.web.frontend.projects.keys.manage_project_keys,
name='sentry-manage-project-keys'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/keys/new/$',
sentry.web.frontend.projects.keys.new_project_key,
name='sentry-new-project-key'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/keys/(?P<key_id>\d+)/edit/$',
sentry.web.frontend.projects.keys.edit_project_key,
name='sentry-edit-project-key'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/keys/(?P<key_id>\d+)/remove/$',
sentry.web.frontend.projects.keys.remove_project_key,
name='sentry-remove-project-key'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/keys/(?P<key_id>\d+)/disable/$',
sentry.web.frontend.projects.keys.disable_project_key,
name='sentry-disable-project-key'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/keys/(?P<key_id>\d+)/enable/$',
sentry.web.frontend.projects.keys.enable_project_key,
name='sentry-enable-project-key'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/plugins/$',
sentry.web.frontend.projects.plugins.manage_plugins,
name='sentry-manage-project-plugins'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/plugins/(?P<slug>[\w_-]+)/$',
sentry.web.frontend.projects.plugins.configure_project_plugin,
name='sentry-configure-project-plugin'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/plugins/(?P<slug>[\w_-]+)/reset/$',
sentry.web.frontend.projects.plugins.reset_project_plugin,
name='sentry-reset-project-plugin'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/plugins/(?P<slug>[\w_-]+)/disable/$',
sentry.web.frontend.projects.plugins.disable_project_plugin,
name='sentry-disable-project-plugin'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/plugins/(?P<slug>[\w_-]+)/enable/$',
sentry.web.frontend.projects.plugins.enable_project_plugin,
name='sentry-enable-project-plugin'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_slug>[\w_-]+)/remove/$',
RemoveProjectView.as_view(),
name='sentry-remove-project'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/tags/$',
sentry.web.frontend.projects.tags.manage_project_tags,
name='sentry-manage-project-tags'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/quotas/$',
sentry.web.frontend.projects.quotas.manage_project_quotas,
name='sentry-manage-project-quotas'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_slug>[\w_-]+)/notifications/$',
ProjectNotificationsView.as_view(),
name='sentry-project-notifications'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/rules/$',
sentry.web.frontend.projects.rules.list_rules,
name='sentry-project-rules'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/rules/(?P<rule_id>\d+)/edit/$',
sentry.web.frontend.projects.rules.create_or_edit_rule,
name='sentry-edit-project-rule'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/rules/(?P<rule_id>\d+)/remove/$',
sentry.web.frontend.projects.rules.remove_rule,
name='sentry-remove-project-rule'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/rules/new/$',
sentry.web.frontend.projects.rules.create_or_edit_rule,
name='sentry-new-project-rule'),
# Generic
url(r'^$', HomeView.as_view(),
name='sentry'),
# crossdomain.xml
url(r'^crossdomain\.xml$', api.crossdomain_xml_index,
name='sentry-api-crossdomain-xml-index'),
url(r'^api/(?P<project_id>[\w_-]+)/crossdomain\.xml$', api.crossdomain_xml,
name='sentry-api-crossdomain-xml'),
# Generic API
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/poll/$', api.poll,
name='sentry-api-poll'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/resolve/$', api.resolve,
name='sentry-api-resolve'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/bookmark/$', api.bookmark,
name='sentry-api-bookmark'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/clear/$', api.clear,
name='sentry-api-clear'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>[\w_-]+)/remove/$', api.remove_group,
name='sentry-api-remove-group'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<team_slug>[\w_-]+)/groups/trends/$', api.get_group_trends,
name='sentry-api-groups-trends'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<team_slug>[\w_-]+)/groups/newest/$', api.get_new_groups,
name='sentry-api-groups-new'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<team_slug>[\w_-]+)/groups/resolved/$', api.get_resolved_groups,
name='sentry-api-groups-resolved'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>[\w_-]+)/set/public/$', api.make_group_public,
name='sentry-api-set-group-public'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>[\w_-]+)/set/private/$', api.make_group_private, name='sentry-api-set-group-private'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>[\w_-]+)/set/resolved/$', api.resolve_group,
name='sentry-api-set-group-resolve'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>[\w_-]+)/set/muted/$', api.mute_group,
name='sentry-api-set-group-mute'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>[\w_-]+)/set/unresolved/$', api.unresolve_group,
name='sentry-api-set-group-unresolve'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>[\w_-]+)/tags/(?P<tag_name>[^/]+)/$', api.get_group_tags,
name='sentry-api-group-tags'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<team_slug>[\w_-]+)/stats/$', api.get_stats,
name='sentry-api-stats'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/tags/search/$', api.search_tags,
name='sentry-api-search-tags'),
url(r'^api/(?P<organization_slug>[\w_-]+)/users/search/$', api.search_users,
name='sentry-api-search-users'),
url(r'^api/(?P<organization_slug>[\w_-]+)/projects/search/$', api.search_projects,
name='sentry-api-search-projects'),
# TV dashboard
url(r'^(?P<organization_slug>[\w_-]+)/teams/(?P<team_slug>[\w_-]+)/wall/$', groups.wall_display,
name='sentry-wall'),
# Team-wide alerts
url(r'^(?P<organization_slug>[\w_-]+)/teams/(?P<team_slug>[\w_-]+)/show/alerts/$', alerts.alert_list,
name='sentry-alerts'),
# Explore - Users
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/explore/users/$',
users.user_list, name='sentry-users'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/explore/users/(?P<user_id>\d+)/$',
users.user_details, name='sentry-user-details'),
# Explore - Code
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/explore/code/$', explore_code.list_tag,
{'selection': 'filenames'}, name='sentry-explore-code'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/explore/code/by/function/$', explore_code.list_tag,
{'selection': 'functions'}, name='sentry-explore-code-by-function'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/explore/code/by/filename/(?P<tag_id>\d+)/$',
explore_code.tag_details, {'selection': 'filenames'}, name='sentry-explore-code-details'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/explore/code/by/function/(?P<tag_id>\d+)/$',
explore_code.tag_details, {'selection': 'functions'}, name='sentry-explore-code-details-by-function'),
# Explore
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/explore/$', explore.tag_list,
name='sentry-explore'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/explore/(?P<key>[^\/]+)/$', explore.tag_value_list,
name='sentry-explore-tag'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/explore/(?P<key>[^\/]+)/(?P<value_id>\d+)/$', explore.tag_value_details,
name='sentry-explore-tag-value'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/$', groups.group,
name='sentry-group'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/events/$', groups.group_event_list,
name='sentry-group-events'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/events/(?P<event_id>\d+)/$', groups.group,
name='sentry-group-event'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/events/(?P<event_id>\d+)/replay/$', events.replay_event,
name='sentry-replay'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/events/(?P<event_id_or_latest>(\d+|latest))/json/$', groups.group_event_details_json,
name='sentry-group-event-json'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/actions/(?P<slug>[\w_-]+)/', groups.group_plugin_action,
name='sentry-group-plugin-action'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/tags/$', groups.group_tag_list,
name='sentry-group-tags'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/tags/(?P<tag_name>[^/]+)/$', groups.group_tag_details,
name='sentry-group-tag-details'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/alerts/$', alerts.alert_list,
name='sentry-alerts'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/alerts/(?P<alert_id>\d+)/$', alerts.alert_details,
name='sentry-alert-details'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/alerts/(?P<alert_id>\d+)/resolve/$', alerts.resolve_alert,
name='sentry-resolve-alert'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/stream/$', groups.group_list),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/$', groups.group_list,
name='sentry-stream'),
url(r'^(?P<organization_slug>[\w_-]+)/teams/(?P<team_slug>[\w_-]+)/$', groups.dashboard,
name='sentry-team-dashboard'),
# Legacy
url(r'^(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/$', groups.redirect_to_group,
name='sentry-group'),
)
|
|
'''
Created on Jan 29, 2014
@author: vital
'''
import pyfits
import CodeTools.vitools as vit
import numpy as np
import scipy as sp
def Fits2Data(FolderName,FileName):
#Get the fits file main data and header
Data, Header_0 = pyfits.getdata(FolderName + FileName, 1, header=True)
if ('WHTJOINW' in Header_0) or ("STALIGHT" in Header_0) or ("NEBUSPEC" in Header_0):
x = Data['Wave']
y = Data['Int']
return y, x, [Header_0]
#Procedure for dr10 spectra
#It returns the spectra redshift corrected and in absolute units
elif ("COEFF0" in Header_0 and "dr10" in FileName):
FitsFile = pyfits.open(FolderName + FileName)
Spectra = FitsFile[1].data
Header_2 = FitsFile[2].data
Header_3 = FitsFile[3].data
Int = Spectra['flux']
Int_abs = Int / 1e17
WavelenRange = 10.0**Spectra['loglam']
SDSS_z = float(Header_2["z"][0] + 1)
Wavelength_z = WavelenRange / SDSS_z
Headers = (Header_0,Header_2,Header_3)
return Int_abs, Wavelength_z, Headers
else:
if Data.ndim == 1:
Int = Data
else:
Int = Data[0]
if "COEFF0" in Header_0:
dw = 10.0**Header_0['COEFF1'] # dw = 0.862936 INDEF (Wavelength interval per pixel)
Wmin = 10.0**Header_0['COEFF0']
pixels = Header_0['NAXIS1'] # nw = 3801 number of output pixels
Wmax = Wmin + dw * pixels
WavelenRange = sp.linspace(Wmin,Wmax,pixels,endpoint=False)
return Int, WavelenRange, [Header_0]
elif "LTV1" in Header_0:
StartingPix = -1 * Header_0['LTV1'] # LTV1 = -261.
Wmin_CCD = Header_0['CRVAL1']
dw = Header_0['CD1_1'] # dw = 0.862936 INDEF (Wavelength interval per pixel)
pixels = Header_0['NAXIS1'] # nw = 3801 number of output pixels
Wmin = Wmin_CCD + dw * StartingPix
Wmax = Wmin + dw * pixels
WavelenRange = np.linspace(Wmin,Wmax,pixels,endpoint=False)
return Int, WavelenRange, [Header_0]
else:
Wmin = Header_0['CRVAL1']
dw = Header_0['CD1_1'] # dw = 0.862936 INDEF (Wavelength interval per pixel)
pixels = Header_0['NAXIS1'] # nw = 3801 number of output pixels
Wmax = Wmin + dw * pixels
WavelenRange = np.linspace(Wmin,Wmax,pixels,endpoint=False)
return WavelenRange, Int, [Header_0]
def Fits2Data_Old(FolderName,FileName):
FitsFile = pyfits.open(FolderName + FileName)
TotalInt = FitsFile[0].data
if TotalInt.ndim == 1:
Int = TotalInt
MaxInten = max(Int)
else:
Int = TotalInt[0,0,:]
MaxInten = max(Int)
N_Inten = Int / MaxInten
if "COEFF0" in FitsFile[0].header:
dw = 10.0**FitsFile[0].header['COEFF1'] # dw = 0.862936 INDEF (Wavelength interval per pixel)
Wmin = 10.0**FitsFile[0].header['COEFF0'] + dw
pixels = FitsFile[0].header['NAXIS1'] # nw = 3801 number of output pixels
Wmax = Wmin + dw * pixels
WavelenRange = sp.linspace(Wmin,Wmax,pixels,endpoint=False)
FitsFile.close()
return Int, N_Inten, WavelenRange, Wmin, Wmax
else:
if "LTV1" in FitsFile[0].header:
StartingPix = -1 * FitsFile[0].header['LTV1'] # LTV1 = -261.
Wmin_CCD = FitsFile[0].header['CRVAL1']
dw = FitsFile[0].header['CD1_1'] # dw = 0.862936 INDEF (Wavelength interval per pixel)
pixels = FitsFile[0].header['NAXIS1'] # nw = 3801 number of output pixels
Wmin = Wmin_CCD + dw * StartingPix
Wmax = Wmin + dw * pixels
WavelenRange = sp.linspace(Wmin,Wmax,pixels,endpoint=False)
FitsFile.close()
return Int, N_Inten, WavelenRange, Wmin, Wmax
else:
Wmin = FitsFile[0].header['CRVAL1']
dw = FitsFile[0].header['CD1_1'] # dw = 0.862936 INDEF (Wavelength interval per pixel)
pixels = FitsFile[0].header['NAXIS1'] # nw = 3801 number of output pixels
Wmax = Wmin + dw * pixels
WavelenRange = sp.linspace(Wmin,Wmax,pixels,endpoint=False)
FitsFile.close()
return Int, N_Inten, WavelenRange, Wmin, Wmax
def StarlightFileManager(FileFolder, FileName):
DataFile = open(FileFolder + FileName,"r")
StarlightOutput = DataFile.readlines()
DataFile.close()
## Synthesis Results - Best model ##
Chi2Line = vit.LineFinder(StarlightOutput, "[chi2/Nl_eff]")
AdevLine = vit.LineFinder(StarlightOutput, "[adev (%)]")
SumXdevLine = vit.LineFinder(StarlightOutput, "[sum-of-x (%)]")
v0_min_Line = vit.LineFinder(StarlightOutput, "[v0_min (km/s)]")
vd_min_Line = vit.LineFinder(StarlightOutput, "[vd_min (km/s)]")
Av_min_Line = vit.LineFinder(StarlightOutput, "[AV_min (mag)]")
Nl_eff_line = vit.LineFinder(StarlightOutput, "[Nl_eff]")
SignalToNoise_Line = vit.LineFinder(StarlightOutput, "## S/N")
l_norm_Line = vit.LineFinder(StarlightOutput, "## Normalization info") + 1
llow_norm_Line = vit.LineFinder(StarlightOutput, "## Normalization info") + 2
lupp_norm_Line = vit.LineFinder(StarlightOutput, "## Normalization info") + 3
NormFlux_Line = vit.LineFinder(StarlightOutput, "## Normalization info") + 4
SpecLine = vit.LineFinder(StarlightOutput, "## Synthetic spectrum (Best Model) ##l_obs f_obs f_syn wei") #Location of my Spectrum in starlight output
#Quality of fit
Chi2 = float(StarlightOutput[Chi2Line].split()[0])
Adev = float(StarlightOutput[AdevLine].split()[0])
SumXdev = float(StarlightOutput[SumXdevLine].split()[0])
Nl_eff = float(StarlightOutput[Nl_eff_line].split()[0])
v0_min = float(StarlightOutput[v0_min_Line].split()[0])
vd_min = float(StarlightOutput[vd_min_Line].split()[0])
Av_min = float(StarlightOutput[Av_min_Line].split()[0])
#Signal to noise configuration
SignalToNoise_lowWave = float(StarlightOutput[SignalToNoise_Line + 1].split()[0])
SignalToNoise_upWave = float(StarlightOutput[SignalToNoise_Line + 2].split()[0])
SignalToNoise_magnitudeWave = float(StarlightOutput[SignalToNoise_Line + 3].split()[0])
#Flux normailzation parameters
l_norm = float(StarlightOutput[l_norm_Line].split()[0])
llow_norm = float(StarlightOutput[llow_norm_Line].split()[0])
lupp_norm = float(StarlightOutput[lupp_norm_Line].split()[0])
FluxNorm = float(StarlightOutput[NormFlux_Line].split()[0])
Parameters = (Chi2, Adev, SumXdev, Nl_eff, v0_min, vd_min, Av_min, SignalToNoise_lowWave, SignalToNoise_upWave, SignalToNoise_magnitudeWave, l_norm, llow_norm, lupp_norm)
#Spectra pixels location
Pixels_Number = int(StarlightOutput[SpecLine+1].split()[0]) #Number of pixels in the spectra
Ind_i = SpecLine+2 #First pixel location
Ind_f = Ind_i + Pixels_Number #Final pixel location
Input_Wavelength = np.zeros(Pixels_Number)
Input_Flux = np.zeros(Pixels_Number)
Output_Flux = np.zeros(Pixels_Number)
Output_Mask = np.zeros(Pixels_Number)
for i in range(Ind_i, Ind_f):
Index = i - Ind_i
Line = StarlightOutput[i].split()
Input_Wavelength[Index] = float(Line[0])
Input_Flux[Index] = float(Line[1])*FluxNorm
Output_Flux[Index] = float(Line[2])*FluxNorm
Output_Mask[Index] = float(Line[3])
MaskPixels = [[],[]] #The 0 tag
ClippedPixels = [[],[]] #The -1 tag
FlagPixels = [[],[]] #The -2 tag
for j in range(len(Output_Mask)):
PixelTag = Output_Mask[j]
Wave = Input_Wavelength[j]
if PixelTag == 0:
MaskPixels[0].append(Wave)
MaskPixels[1].append(Input_Flux[j])
if PixelTag == -1:
ClippedPixels[0].append(Wave)
ClippedPixels[1].append(Input_Flux[j])
if PixelTag == -2:
FlagPixels[0].append(Wave)
FlagPixels[1].append(Input_Flux[j])
return Input_Wavelength, Input_Flux, Output_Flux, MaskPixels, ClippedPixels, FlagPixels, Parameters
def Fnu_to_FmAB(Flux_nu):
Flux_mAB = -2.5 * np.log10(Flux_nu) - 48.6
return Flux_mAB
def Flam_to_Fnu(Flux_lam, Wavelength):
c_AperS = 2.99792458e18
Flux_nu = (np.power(Wavelength,2)/c_AperS) * Flux_lam
return Flux_nu
def Fnu_to_Flam(Flux_nu, Wavelength):
c_AperS = 2.99792458e18
Flux_lam = (c_AperS / np.power(Wavelength, 2)) * Flux_nu
return Flux_lam
def mAb_to_Fnu(Flux_mAB):
fnu_zero = 3.68e-20
Flux_nu = fnu_zero * np.power(10, -0.4 * (Flux_mAB))
return Flux_nu
def Gauss_Function(x,*Funct_Coeff):
A, Mu, Sigma = Funct_Coeff
return A*np.exp(-(x-Mu)**2/(2.*Sigma**2))
# def TheoHBetaCoefficients():
#
# #Theoretical coefficients
# TextLines = vit.File2Lines("/home/vital/Dropbox/Astrophysics/Data/","cHBeta_TheoCoefficients.csv")
#
# TheoCHBetaTable = []
#
# for i in range(len(TextLines)):
# row = TextLines[i].split()
# TheoCHBetaTable.append(row)
#
# return TheoCHBetaTable
def Reddening_CCM98(Flux,Wave,EBV,R_V,):
x = 1 / (Wave / 10000)
y = x - 1.82
y_coeffs = np.array([np.ones(len(y)), y, np.power(y, 2), np.power(y, 3), np.power(y, 4), np.power(y, 5), np.power(y, 6), np.power(y, 7)])
a_coeffs = np.array([1, 0.17699, -0.50447, -0.02427, 0.72085, 0.01979, -0.77530, 0.32999])
b_coeffs = np.array([0, 1.41338, 2.28305, 1.07233, -5.38434, -0.62251, 5.30260, -2.09002])
a_x = np.dot(a_coeffs,y_coeffs)
b_x = np.dot(b_coeffs,y_coeffs)
A_V = EBV * R_V
A_lambda = (a_x + b_x / R_V) * A_V
Flux_New = Flux * np.power(10, 0.4 * A_lambda)
# Fig2=plt.figure(figsize=(16,10))
# Axes = Fig2.add_subplot(111)
# Axes.plot(Wave,Flux_New)
# Axes.plot(Wave,Flux)
# plt.show()
return Flux_New
def Reddening_CCM98_err(Flux,Wave,EBV,R_V,FolderName,FileLog):
x = 1 / (Wave / 10000)
y = x - 1.82
y_coeffs = np.array([np.ones(len(y)), y, np.power(y, 2), np.power(y, 3), np.power(y, 4), np.power(y, 5), np.power(y, 6), np.power(y, 7)])
a_coeffs = np.array([1, 0.17699, -0.50447, -0.02427, 0.72085, 0.01979, -0.77530, 0.32999])
b_coeffs = np.array([0, 1.41338, 2.28305, 1.07233, -5.38434, -0.62251, 5.30260, -2.09002])
a_x = np.dot(a_coeffs,y_coeffs)
b_x = np.dot(b_coeffs,y_coeffs)
A_V = EBV * R_V
A_lambda = (a_x + b_x / R_V) * A_V
Flux_New = Flux * np.power(10, 0.4 * A_lambda)
# Fig2=plt.figure(figsize=(16,10))
# Axes = Fig2.add_subplot(111)
# Axes.plot(Wave,Flux_New)
# Axes.plot(Wave,Flux)
# plt.show()
return Flux_New
def Reddening_SingleLine_CCM98(Flux,Wave,EBV,R_V,):
x = 1 / (Wave / 10000)
y = x - 1.82
y_coeffs = np.array([np.ones(len(y)), y, np.power(y, 2), np.power(y, 3), np.power(y, 4), np.power(y, 5), np.power(y, 6), np.power(y, 7)])
a_coeffs = np.array([1, 0.17699, -0.50447, -0.02427, 0.72085, 0.01979, -0.77530, 0.32999])
b_coeffs = np.array([0, 1.41338, 2.28305, 1.07233, -5.38434, -0.62251, 5.30260, -2.09002])
a_x = np.dot(a_coeffs,y_coeffs)
b_x = np.dot(b_coeffs,y_coeffs)
A_V = EBV * R_V
A_lambda = (a_x + b_x / R_V) * A_V
Flux_New = Flux * np.power(10, 0.4 * A_lambda)
return Flux_New
def Reddening_f_Cal(wave,R_v):
y=(1/(wave/10000))
y_beta = (1/(4861.333/10000))
dm_beta = 0.74*y_beta-0.34+0.341*R_v-1.014
if y <= 2.29:
dm_lam = 0.74*y-0.34+0.341*R_v-1.014
else:
dm_lam = 0.43*y+0.37+0.341*R_v-1.014
f = dm_lam/dm_beta
return f
def Log_2_Parameter(FileFolder, ObjectCode, ParameterToFind):
LogExtension = "_log.txt"
Log_File_Address = FileFolder + ObjectCode + LogExtension
Obj_Log = vit.File2Table(Log_File_Address, "")
Index = "None"
for i in range(len(Obj_Log)):
Item = Obj_Log[i][0]
if ParameterToFind == Item:
Index = i
if Index == "None":
print "WARNING: The parameter cannot be found in log"
print ParameterToFind
print Obj_Log
print FileFolder + ObjectCode + LogExtension
Magnitude = Obj_Log[Index][1]
return Magnitude
def PyNeb_ValidDiagnostics(Diags,Obs,MisDiagnosticos):
ErrorCount = 0
Data = []
print "--------------------------"
for i in range(len(MisDiagnosticos.keys())):
for j in range(len(MisDiagnosticos.keys())):
if i != j:
try:
Te, Ne = Diags.getCrossTemDen(MisDiagnosticos.keys()[i] ,MisDiagnosticos.keys()[j], obs=Obs)
if str(Te) != "nan" and str(Ne) != "nan":
print "Combining: "
print str(MisDiagnosticos.keys()[i])
print str(MisDiagnosticos.keys()[j])
print Te, Ne
#print type(Te)
Data.append([MisDiagnosticos.keys()[i],MisDiagnosticos.keys()[j],Te,Ne])
except:
print ErrorCount + 1
print "--------------------------"
return Data
def PyNeb_EmLineNormalizer(EmList,ValuesList):
HBeta_Value = "None"
HBetaLabel = "H1_4861A"
for i in range(len(EmList)):
if EmList[i] == HBetaLabel:
HBeta_Value = float(ValuesList[i])
if HBeta_Value == "None":
print "WARNING: HBeta value not found"
print "The HBeta value is: "
print HBeta_Value
n_ValuesList = []
n_ValuesList.append(ValuesList[0])
for i in range(1,len(ValuesList)):
Flux = float(ValuesList[i])
n_Flux = Flux / HBeta_Value
n_ValuesList.append(str(n_Flux))
return n_ValuesList, str(HBeta_Value)
def EP_Constant(a_b_List,Sigma_x_y_List):
Sigma_Y = np.sqrt(np.sum(np.power(a_b_List*Sigma_x_y_List,2)))
return Sigma_Y
def EP_Sum(Sigma_x_y_List):
Sigma_Y = np.sqrt(np.sum(np.power(Sigma_x_y_List,2)))
return Sigma_Y
def EP_MulDiv(z,x_y_List,Sigma_x_y_List):
Sigma_Z = z * np.sqrt(np.power(Sigma_x_y_List/x_y_List,2))
return Sigma_Z
def EP_Powers(z,x_y_List,Sigma_x_y_List,a_b_List):
Sigma_Z = z * np.sqrt(np.power(a_b_List*Sigma_x_y_List/x_y_List,2))
return Sigma_Z
def EP_log10(x,Sigma_X,m):
Sigma_Z = m * Sigma_X / (2.303 * x)
return Sigma_Z
def HMS2deg(ra='', dec=''):
RA, DEC, rs, ds = '', '', 1, 1
if dec:
D, M, S = [float(i) for i in dec.split()]
if str(D)[0] == '-':
ds, D = -1, abs(D)
deg = D + (M/60) + (S/3600)
DEC = '{0}'.format(deg*ds)
if ra:
H, M, S = [float(i) for i in ra.split()]
if str(H)[0] == '-':
rs, H = -1, abs(H)
deg = (H*15) + (M/4) + (S/240)
RA = '{0}'.format(deg*rs)
if ra and dec:
return (RA, DEC)
else:
return RA or DEC
def Te_ne_Diagnostics():
Temperature_Diags = ["[ArIII] 5192/7136","[ArIII] 5192/7300+","[NII] 5755/6584","[NII] 5755/6584+" ,"[OIII] 4363/5007","[OIII] 4363/5007+","[SIII] 6312/9069","[SIII] 6312/9200+"]
Density_Diags = ["[ArIV] 4740/4711","[ClIII] 5538/5518","[FeIII] 4987/4659","[FeIII] 4987/4703","[FeIII] 4987/4882","[OII] 3726/3729","[SII] 6731/6716"]
return Temperature_Diags, Density_Diags
def Bilinear_Interpolation(X,Y,Q_11,Q_12,Q_21,Q_22):
if len(Q_11) != 3 or len(Q_12) != 3 or len(Q_21) != 3 or len(Q_22) != 3:
print "WARNING: Elements do not have the right size"
x_1 = Q_11[1]
x_2 = Q_22[1]
y_1 = Q_11[2]
y_2 = Q_22[2]
Q = 1 / ((x_2 - x_1) * (y_2 - y_1)) * (
Q_11[0] * (x_2 - X) * (y_2 - Y)
+ Q_21[0] * (X - x_1) * (y_2 - Y)
+ Q_12[0] * (x_2 - X) * (Y - y_1)
+ Q_22[0] * (X - x_1) * (Y - y_1)
)
return Q
# def AB_to_Erg(Flujo):
#
# a = np.array([1,2,3])
#
# if type(Flujo) == "list" or type(Flujo) == type(a):
#
# Erg_list = []
#
# for i in range(len(Flujo)):
# AB = Flujo[i]
#
# if type(AB) != "float":
# AB = float(AB)
#
# Erg = math.pow(10,-0.4*(AB+48.6))
#
# Erg_list.append(Erg)
#
# return Erg_list
#
# if type(Flujo) == float:
#
# AB = Flujo
#
# Erg = math.pow(10,-0.4*(AB+48.6))
#
# return Erg
#
# def Erg_to_AB(Flujo):
#
# a = np.array([1,2,3])
#
# if type(Flujo) == "list" or type(Flujo) == type(a):
# print "Echo"
#
# AB_list = []
#
# for i in range(len(Flujo)):
# Erg = Flujo[i]
#
# if type(Erg) != "float":
# Erg = float(Erg)
#
# AB = -2.5 * math.log10(Erg) - 48.6
#
# AB_list.append(Erg)
#
# return AB_list
#
# if type(Flujo) == float:
#
# Erg = Flujo
#
# AB = - 2.5 * math.log10(Erg) - 48.6
#
# return AB
# def AB_to_Erg(Flujo):
#
# a = np.array([1,2,3])
#
# if type(Flujo) == "list" or type(Flujo) == type(a):
#
# Erg_list = []
#
# for i in range(len(Flujo)):
# AB = Flujo[i]
#
# if type(AB) != "float":
# AB = float(AB)
#
# Erg = math.pow(10,-0.4*(AB+48.6))
#
# Erg_list.append(Erg)
#
# return Erg_list
#
# if type(Flujo) == float:
#
# AB = Flujo
#
# Erg = math.pow(10,-0.4*(AB+48.6))
#
# return Erg
#
# def Erg_to_AB(Flujo):
#
# a = np.array([1,2,3])
#
# if type(Flujo) == "list" or type(Flujo) == type(a):
# print "Echo"
#
# AB_list = []
#
# for i in range(len(Flujo)):
# Erg = Flujo[i]
#
# if type(Erg) != "float":
# Erg = float(Erg)
#
# AB = -2.5 * math.log10(Erg) - 48.6
#
# AB_list.append(Erg)
#
# return AB_list
#
# if type(Flujo) == float:
#
# Erg = Flujo
#
# AB = - 2.5 * math.log10(Erg) - 48.6
#
# return AB
|
|
"""Matrix factorization with Sparse PCA"""
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import numpy as np
from ..utils import check_random_state, check_array
from ..linear_model import ridge_regression
from ..base import BaseEstimator, TransformerMixin
from .dict_learning import dict_learning, dict_learning_online
class SparsePCA(BaseEstimator, TransformerMixin):
"""Sparse Principal Components Analysis (SparsePCA)
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Parameters
----------
n_components : int,
Number of sparse atoms to extract.
alpha : float,
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float,
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
max_iter : int,
Maximum number of iterations to perform.
tol : float,
Tolerance for the stopping condition.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int,
Number of parallel jobs to run.
U_init : array of shape (n_samples, n_components),
Initial values for the loadings for warm restart scenarios.
V_init : array of shape (n_components, n_features),
Initial values for the components for warm restart scenarios.
verbose :
Degree of verbosity of the printed output.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
Sparse components extracted from the data.
error_ : array
Vector of errors at each iteration.
n_iter_ : int
Number of iterations run.
See also
--------
PCA
MiniBatchSparsePCA
DictionaryLearning
"""
def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01,
max_iter=1000, tol=1e-8, method='lars', n_jobs=1, U_init=None,
V_init=None, verbose=False, random_state=None):
self.n_components = n_components
self.alpha = alpha
self.ridge_alpha = ridge_alpha
self.max_iter = max_iter
self.tol = tol
self.method = method
self.n_jobs = n_jobs
self.U_init = U_init
self.V_init = V_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
code_init = self.V_init.T if self.V_init is not None else None
dict_init = self.U_init.T if self.U_init is not None else None
Vt, _, E, self.n_iter_ = dict_learning(X.T, n_components, self.alpha,
tol=self.tol,
max_iter=self.max_iter,
method=self.method,
n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=random_state,
code_init=code_init,
dict_init=dict_init,
return_n_iter=True
)
self.components_ = Vt.T
self.error_ = E
return self
def transform(self, X, ridge_alpha=None):
"""Least Squares projection of the data onto the sparse components.
To avoid instability issues in case the system is under-determined,
regularization can be applied (Ridge regression) via the
`ridge_alpha` parameter.
Note that Sparse PCA components orthogonality is not enforced as in PCA
hence one cannot use a simple linear projection.
Parameters
----------
X: array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
ridge_alpha: float, default: 0.01
Amount of ridge shrinkage to apply in order to improve
conditioning.
Returns
-------
X_new array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X)
ridge_alpha = self.ridge_alpha if ridge_alpha is None else ridge_alpha
U = ridge_regression(self.components_.T, X.T, ridge_alpha,
solver='cholesky')
s = np.sqrt((U ** 2).sum(axis=0))
s[s == 0] = 1
U /= s
return U
class MiniBatchSparsePCA(SparsePCA):
"""Mini-batch Sparse Principal Components Analysis
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Parameters
----------
n_components : int,
number of sparse atoms to extract
alpha : int,
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float,
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
n_iter : int,
number of iterations to perform for each mini batch
callback : callable,
callable that gets invoked every five iterations
batch_size : int,
the number of features to take in each mini batch
verbose :
degree of output the procedure will print
shuffle : boolean,
whether to shuffle the data before splitting it in batches
n_jobs : int,
number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
Sparse components extracted from the data.
error_ : array
Vector of errors at each iteration.
n_iter_ : int
Number of iterations run.
See also
--------
PCA
SparsePCA
DictionaryLearning
"""
def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01,
n_iter=100, callback=None, batch_size=3, verbose=False,
shuffle=True, n_jobs=1, method='lars', random_state=None):
self.n_components = n_components
self.alpha = alpha
self.ridge_alpha = ridge_alpha
self.n_iter = n_iter
self.callback = callback
self.batch_size = batch_size
self.verbose = verbose
self.shuffle = shuffle
self.n_jobs = n_jobs
self.method = method
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
Vt, _, self.n_iter_ = dict_learning_online(
X.T, n_components, alpha=self.alpha,
n_iter=self.n_iter, return_code=True,
dict_init=None, verbose=self.verbose,
callback=self.callback,
batch_size=self.batch_size,
shuffle=self.shuffle,
n_jobs=self.n_jobs, method=self.method,
random_state=random_state,
return_n_iter=True
)
self.components_ = Vt.T
return self
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Dang Mai <contact@dangmai.net>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Generates smart playlists based on beets queries.
"""
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets import ui
from beets.util import mkdirall, normpath, syspath, bytestring_path
from beets.library import Item, Album, parse_query_string
from beets.dbcore import OrQuery
from beets.dbcore.query import MultipleSort, ParsingError
import os
import six
class SmartPlaylistPlugin(BeetsPlugin):
def __init__(self):
super(SmartPlaylistPlugin, self).__init__()
self.config.add({
'relative_to': None,
'playlist_dir': u'.',
'auto': True,
'playlists': []
})
self._matched_playlists = None
self._unmatched_playlists = None
if self.config['auto']:
self.register_listener('database_change', self.db_change)
def commands(self):
spl_update = ui.Subcommand(
'splupdate',
help=u'update the smart playlists. Playlist names may be '
u'passed as arguments.'
)
spl_update.func = self.update_cmd
return [spl_update]
def update_cmd(self, lib, opts, args):
self.build_queries()
if args:
args = set(ui.decargs(args))
for a in list(args):
if not a.endswith(".m3u"):
args.add("{0}.m3u".format(a))
playlists = set((name, q, a_q)
for name, q, a_q in self._unmatched_playlists
if name in args)
if not playlists:
raise ui.UserError(
u'No playlist matching any of {0} found'.format(
[name for name, _, _ in self._unmatched_playlists])
)
self._matched_playlists = playlists
self._unmatched_playlists -= playlists
else:
self._matched_playlists = self._unmatched_playlists
self.update_playlists(lib)
def build_queries(self):
"""
Instanciate queries for the playlists.
Each playlist has 2 queries: one or items one for albums, each with a
sort. We must also remember its name. _unmatched_playlists is a set of
tuples (name, (q, q_sort), (album_q, album_q_sort)).
sort may be any sort, or NullSort, or None. None and NullSort are
equivalent and both eval to False.
More precisely
- it will be NullSort when a playlist query ('query' or 'album_query')
is a single item or a list with 1 element
- it will be None when there are multiple items i a query
"""
self._unmatched_playlists = set()
self._matched_playlists = set()
for playlist in self.config['playlists'].get(list):
if 'name' not in playlist:
self._log.warn(u"playlist configuration is missing name")
continue
playlist_data = (playlist['name'],)
try:
for key, Model in (('query', Item), ('album_query', Album)):
qs = playlist.get(key)
if qs is None:
query_and_sort = None, None
elif isinstance(qs, six.string_types):
query_and_sort = parse_query_string(qs, Model)
elif len(qs) == 1:
query_and_sort = parse_query_string(qs[0], Model)
else:
# multiple queries and sorts
queries, sorts = zip(*(parse_query_string(q, Model)
for q in qs))
query = OrQuery(queries)
final_sorts = []
for s in sorts:
if s:
if isinstance(s, MultipleSort):
final_sorts += s.sorts
else:
final_sorts.append(s)
if not final_sorts:
sort = None
elif len(final_sorts) == 1:
sort, = final_sorts
else:
sort = MultipleSort(final_sorts)
query_and_sort = query, sort
playlist_data += (query_and_sort,)
except ParsingError as exc:
self._log.warn(u"invalid query in playlist {}: {}",
playlist['name'], exc)
continue
self._unmatched_playlists.add(playlist_data)
def matches(self, model, query, album_query):
if album_query and isinstance(model, Album):
return album_query.match(model)
if query and isinstance(model, Item):
return query.match(model)
return False
def db_change(self, lib, model):
if self._unmatched_playlists is None:
self.build_queries()
for playlist in self._unmatched_playlists:
n, (q, _), (a_q, _) = playlist
if self.matches(model, q, a_q):
self._log.debug(
u"{0} will be updated because of {1}", n, model)
self._matched_playlists.add(playlist)
self.register_listener('cli_exit', self.update_playlists)
self._unmatched_playlists -= self._matched_playlists
def update_playlists(self, lib):
self._log.info(u"Updating {0} smart playlists...",
len(self._matched_playlists))
playlist_dir = self.config['playlist_dir'].as_filename()
playlist_dir = bytestring_path(playlist_dir)
relative_to = self.config['relative_to'].get()
if relative_to:
relative_to = normpath(relative_to)
for playlist in self._matched_playlists:
name, (query, q_sort), (album_query, a_q_sort) = playlist
self._log.debug(u"Creating playlist {0}", name)
items = []
if query:
items.extend(lib.items(query, q_sort))
if album_query:
for album in lib.albums(album_query, a_q_sort):
items.extend(album.items())
m3us = {}
# As we allow tags in the m3u names, we'll need to iterate through
# the items and generate the correct m3u file names.
for item in items:
m3u_name = item.evaluate_template(name, True)
if m3u_name not in m3us:
m3us[m3u_name] = []
item_path = item.path
if relative_to:
item_path = os.path.relpath(item.path, relative_to)
if item_path not in m3us[m3u_name]:
m3us[m3u_name].append(item_path)
# Now iterate through the m3us that we need to generate
for m3u in m3us:
m3u_path = normpath(os.path.join(playlist_dir,
bytestring_path(m3u)))
mkdirall(m3u_path)
with open(syspath(m3u_path), 'wb') as f:
for path in m3us[m3u]:
f.write(path + b'\n')
self._log.info(u"{0} playlists updated", len(self._matched_playlists))
|
|
import os
import sys
import struct
MY_PATH, _ = os.path.split(os.path.realpath(__file__))
ARSDK_PATH=os.path.join(MY_PATH,'..', 'arsdk-xml')
ARCOMMANDS_PATH=os.path.join(ARSDK_PATH, 'xml')
sys.path.append(ARSDK_PATH)
import arsdkparser
_ctx = arsdkparser.ArParserCtx()
arsdkparser.parse_xml(_ctx, os.path.join(ARCOMMANDS_PATH, 'generic.xml'))
for f in sorted(os.listdir(ARCOMMANDS_PATH)):
if not f.endswith('.xml') or f == 'generic.xml':
continue
arsdkparser.parse_xml(_ctx, os.path.join(ARCOMMANDS_PATH, f))
arsdkparser.finalize_ftrs(_ctx)
class CommandError(Exception):
def __init__(self, msg):
self.value = msg
def __str__(self):
return repr(self.value)
_struct_fmt_for_type = {
'u8' : 'B',
'i8' : 'b',
'u16' : 'H',
'i16' : 'h',
'u32' : 'I',
'i32' : 'i',
'u64' : 'Q',
'i64' : 'q',
'float' : 'f',
'double' : 'd',
'string' : 'z',
'enum' : 'i',
}
def _format_string_for_cmd(cmd):
ret = '<'
for arg in cmd.args:
if isinstance(arg.argType, arsdkparser.ArMultiSetting):
raise Exception('Multisettings not supported !')
elif isinstance(arg.argType, arsdkparser.ArBitfield):
arg_str_type = arsdkparser.ArArgType.TO_STRING[arg.argType.btfType]
elif isinstance(arg.argType, arsdkparser.ArEnum):
arg_str_type = 'i32'
else:
arg_str_type = arsdkparser.ArArgType.TO_STRING[arg.argType]
ret += _struct_fmt_for_type[arg_str_type]
return ret, bool(cmd.args)
def _struct_pack(fmt, *args):
"""
like struct.pack(fmt, *args)
except that a 'z' format is supported to include null terminated strings
"""
nbarg = 0
real_fmt = ''
for c in fmt:
if c == 'z':
real_fmt += '%ds' % (len(args[nbarg])+1)
nbarg += 1
else:
real_fmt += c
if c in 'cbB?hHiIlLqQfdspP':
nbarg += 1
return struct.pack(real_fmt, *args)
def _struct_unpack(fmt, string):
"""
like struct.unpack(fmt, string)
except that a 'z' format is supported to read a null terminated string
"""
real_fmt=''
null_idx=[]
nbarg = 0
for i in range(len(fmt)):
c = fmt[i]
if c == 'z':
start = struct.calcsize(real_fmt)
strlen = string[start:].find('\0')
if strlen < 0:
raise CommandError('No null char in string')
real_fmt += '%dsB' % strlen
nbarg += 1
null_idx.append(nbarg)
nbarg += 1
else:
real_fmt += c
if c in 'cbB?hHiIlLqQfdspP':
nbarg += 1
content = struct.unpack(real_fmt, string)
ret = tuple([content[i] for i in range(len(content)) if i not in null_idx])
return ret
def pack_command(s_proj, s_cls, s_cmd, *args):
"""
Pack a command into a string.
Arguments:
- s_proj : Name of the project
- s_cls : Name of the class within the project (ignored for features)
- s_cmd : Name of the command within the class
- *args : Arguments of the command.
If the project, the class or the command can not be found in the command table,
a CommandError will be raised.
If the number and type of arguments in *arg do not match the expected ones, a
CommandError will be raised.
Return the command string, the command recommanded buffer and the command
recommanded timeout policy.
"""
proj = None
feat = None
projid = 0
cls = None
clsid = 0
cmd = None
# Let an exception be raised if we do not know the command or if the format is bad
# Find the project
if s_proj in _ctx.projectsByName:
proj = _ctx.projectsByName[s_proj]
elif s_proj in _ctx.featuresByName:
feat = _ctx.featuresByName[s_proj]
if proj is None and feat is None:
raise CommandError('Unknown project ' + s_proj)
if proj: # Project
projid = proj.projectId
# Find the class
if s_cls in proj.classesByName:
cls = proj.classesByName[s_cls]
if cls is None:
raise CommandError('Unknown class ' + s_cls + ' in project ' + s_proj)
clsid = cls.classId
# Find the command
if s_cmd in cls.cmdsByName:
cmd = cls.cmdsByName[s_cmd]
if cmd is None:
raise CommandError('Unknown command ' + s_cmd + ' in class ' + s_cls + ' of project ' + s_proj)
elif feat: # Feature
projid = feat.featureId
# Find the command
if s_cmd in feat.cmdsByName:
cmd = feat.cmdsByName[s_cmd]
if cmd is None:
raise CommandError('Unknown command ' + s_cmd + ' in feature ' + s_proj)
ret = struct.pack('<BBH', projid, clsid, cmd.cmdId)
argsfmt, needed = _format_string_for_cmd(cmd)
if needed:
try:
ret += _struct_pack(argsfmt, *args)
except IndexError:
raise CommandError('Missing arguments')
except TypeError:
raise CommandError('Bad type for arguments')
except struct.error:
raise CommandError('Bad type for arguments')
return ret, cmd.bufferType, cmd.timeoutPolicy
def unpack_command(buf):
"""
Unpack a command string into a dictionnary of arguments
Arguments:
- buf : The packed command
Return a dictionnary describing the command, and a boolean indicating whether the
command is known. If the boolean is False, then the dictionnary is {}
Return dictionnary format:
{
'name' : full name of the command (project.class.command)
'project' : project of the command
'class' : class of the command
'cmd' : command name
'listtype' : list type (none/list/map) of the command
'args' : arguments in the commands, in the form { 'name':value, ... }
'arg0' : value of the first argument ('' if no arguments)
this is useful for map commands, as this will be the key.
}
A CommandError is raised if the command is in a bad format.
"""
# Read the project/cls/cmd from the buffer
try:
(i_proj, i_cls, i_cmd) = struct.unpack('<BBH', buf[:4])
except struct.error:
raise CommandError('Bad input buffer (not an ARCommand)')
proj = None
feat = None
cls = None
cmd = None
# Let an exception be raised if we do not know the command or if the format is bad
# Find the project
if i_proj in _ctx.projectsById:
proj = _ctx.projectsById[i_proj]
# Or the feature
if i_proj in _ctx.featuresById:
feat = _ctx.featuresById[i_proj]
# If project, Find the class
if proj:
if i_cls in proj.classesById:
cls = proj.classesById[i_cls]
else:
return {}, False
if i_cmd in cls.cmdsById:
cmd = cls.cmdsById[i_cmd]
else:
return {}, False
# If feature, find directly the command
elif feat:
if i_cmd in feat.cmdsById:
cmd = feat.cmdsById[i_cmd]
elif i_cmd in feat.evtsById:
cmd = feat.evtsById[i_cmd]
else:
return {}, False
else:
return {}, False
args = ()
argsfmt, needed = _format_string_for_cmd(cmd)
if needed:
try:
args = _struct_unpack(argsfmt, buf[4:])
except struct.error:
raise CommandError('Bad input buffers (arguments do not match the command)')
ret = {
'name' : '%s.%s.%s' % (proj.name if proj else feat.name, cls.name if cls else '', cmd.name),
'proj' : proj.name if proj else feat.name,
'class' : cls.name if cls else '',
'cmd' : cmd.name,
'listtype' : cmd.listType,
'listtype_str' : arsdkparser.ArCmdListType.TO_STRING[cmd.listType],
'args' : {},
'arg0' : '',
}
for i in range(len(args)):
if i == 0:
ret['arg0'] = args[0]
ret['args'][cmd.args[i].name] = args[i]
return ret, True
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import init_ops
# Returns true iff the two initializers produce the same tensor to
# within a tiny tolerance.
def identicaltest(tc, init1, init2, use_gpu):
"""Tests if two initializations are identical to within tiny tolerances.
Args:
tc: An instance of TensorFlowTestCase.
init1: An Initializer that generates a tensor of a given shape
init2: An Initializer that generates a tensor of a given shape
use_gpu: Use gpu if true.
Returns:
True or False as determined by test.
"""
num = 100
with tc.test_session(use_gpu=use_gpu, graph=tf.Graph()):
t1 = init1([num]).eval()
with tc.test_session(use_gpu=use_gpu, graph=tf.Graph()):
t2 = init2([num]).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def duplicated_initializer(tc, init, use_gpu, graph_seed):
"""Tests duplicated random initializer within the same graph.
This test generates two random kernels from the same initializer to the same
graph, and checks if the results are close enough. Even given the same global,
seed, two different instances of random kernels should generate different
results.
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
use_gpu: Use gpu if true.
graph_seed: A graph-level seed to use.
Returns:
True or False as determined by test.
"""
num = 100
with tc.test_session(use_gpu=use_gpu, graph=tf.Graph()):
random_seed.set_random_seed(graph_seed)
t1 = init([num]).eval()
t2 = init([num]).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def _init_sampler(tc, init, num, use_gpu):
"""Returns a func to generate a random tensor of shape [num].
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
num: Size of 1D tensor to create.
use_gpu: Use gpu if true.
Returns:
Function to generate a random tensor.
"""
def func():
with tc.test_session(use_gpu=use_gpu):
return init([num]).eval()
return func
class ConstantInitializersTest(tf.test.TestCase):
def testZerosInitializer(self):
with self.test_session():
shape = [2, 3]
x = tf.get_variable("x", shape=shape, initializer=tf.zeros_initializer)
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
def testOnesInitializer(self):
with self.test_session():
shape = [2, 3]
x = tf.get_variable("x", shape=shape, initializer=tf.ones_initializer)
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
def testConstantZeroInitializer(self):
with self.test_session():
shape = [2, 3]
x = tf.get_variable("x", shape=shape,
initializer=tf.constant_initializer(0.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
def testConstantOneInitializer(self):
with self.test_session():
shape = [2, 3]
x = tf.get_variable("x", shape=shape,
initializer=tf.constant_initializer(1.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
class RandomNormalInitializationTest(tf.test.TestCase):
def testInitializerIdentical(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = tf.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, use_gpu))
def testInitializerDifferent(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = tf.random_normal_initializer(0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, use_gpu=use_gpu))
def testDuplicatedInitializer(self):
for use_gpu in [False, True]:
init = tf.random_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
tf.random_normal_initializer, 0.0, 1.0, dtype=tf.string)
class TruncatedNormalInitializationTest(tf.test.TestCase):
def testInitializerIdentical(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.truncated_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = tf.truncated_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, use_gpu))
def testInitializerDifferent(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.truncated_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = tf.truncated_normal_initializer(0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, use_gpu=use_gpu))
def testDuplicatedInitializer(self):
for use_gpu in [False, True]:
init = tf.truncated_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
tf.truncated_normal_initializer, 0.0, 1.0, dtype=tf.string)
class RandomUniformInitializationTest(tf.test.TestCase):
def testInitializerIdentical(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.random_uniform_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = tf.random_uniform_initializer(0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, use_gpu))
def testInitializerDifferent(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.random_uniform_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = tf.random_uniform_initializer(0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, use_gpu))
def testDuplicatedInitializer(self):
for use_gpu in [False, True]:
init = tf.random_uniform_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
tf.random_uniform_initializer, 0.0, 1.0, dtype=tf.string)
class UniformUnitScalingInitializationTest(tf.test.TestCase):
def testInitializerIdentical(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, use_gpu))
init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
init4 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init3, init4, use_gpu))
def testInitializerDifferent(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = tf.uniform_unit_scaling_initializer(seed=2, dtype=dtype)
init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, use_gpu))
self.assertFalse(identicaltest(self, init1, init3, use_gpu))
self.assertFalse(identicaltest(self, init2, init3, use_gpu))
def testDuplicatedInitializer(self):
for use_gpu in [False, True]:
init = tf.uniform_unit_scaling_initializer()
self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
tf.uniform_unit_scaling_initializer, dtype=tf.string)
class RandomWalkShapeTest(tf.test.TestCase):
def testRandomWalk(self):
# Fully known shape.
rnd1 = init_ops._random_walk([1, 2], tf.nn.relu)
self.assertEqual([1, 2], rnd1.get_shape())
# TODO(vrv): move to sequence_ops_test?
class RangeTest(tf.test.TestCase):
def _Range(self, start, limit, delta):
with self.test_session():
tf_ans = tf.range(start, limit, delta, name="range")
self.assertEqual([len(range(start, limit, delta))], tf_ans.get_shape())
return tf_ans.eval()
def testBasic(self):
self.assertTrue(np.array_equal(
self._Range(0, 5, 1), np.array([0, 1, 2, 3, 4])))
self.assertTrue(np.array_equal(
self._Range(0, 5, 2), np.array([0, 2, 4])))
self.assertTrue(np.array_equal(
self._Range(0, 6, 2), np.array([0, 2, 4])))
self.assertTrue(np.array_equal(
self._Range(13, 32, 7), np.array([13, 20, 27])))
self.assertTrue(np.array_equal(
self._Range(100, 500, 100), np.array([100, 200, 300, 400])))
self.assertEqual(tf.range(0, 5, 1).dtype, tf.int32)
def testLimitOnly(self):
with self.test_session():
self.assertAllEqual(np.arange(5), tf.range(5).eval())
def testEmpty(self):
for start in 0, 5:
self.assertTrue(np.array_equal(self._Range(start, start, 1), []))
# TODO(vrv): move to sequence_ops_test?
class LinSpaceTest(tf.test.TestCase):
def _LinSpace(self, start, stop, num):
with self.test_session():
tf_ans = tf.linspace(start, stop, num, name="linspace")
self.assertEqual([num], tf_ans.get_shape())
return tf_ans.eval()
def testPositive(self):
self.assertArrayNear(self._LinSpace(1., 5., 1), np.array([1.]), 1e-5)
self.assertArrayNear(self._LinSpace(1., 5., 2), np.array([1., 5.]), 1e-5)
self.assertArrayNear(self._LinSpace(1., 5., 3),
np.array([1., 3., 5.]), 1e-5)
self.assertArrayNear(self._LinSpace(1., 5., 4),
np.array([1., 7. / 3., 11. / 3., 5.]), 1e-5)
def testNegative(self):
self.assertArrayNear(self._LinSpace(-1., -5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(self._LinSpace(-1., -5., 2),
np.array([-1., -5.]), 1e-5)
self.assertArrayNear(self._LinSpace(-1., -5., 3),
np.array([-1., -3., -5.]), 1e-5)
self.assertArrayNear(self._LinSpace(-1., -5., 4),
np.array([-1., -7. / 3., -11. / 3., -5.]), 1e-5)
def testNegativeToPositive(self):
self.assertArrayNear(self._LinSpace(-1., 5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(self._LinSpace(-1., 5., 2), np.array([-1., 5.]), 1e-5)
self.assertArrayNear(self._LinSpace(-1., 5., 3),
np.array([-1., 2., 5.]), 1e-5)
self.assertArrayNear(self._LinSpace(-1., 5., 4),
np.array([-1., 1., 3., 5.]), 1e-5)
def testPoint(self):
self.assertArrayNear(self._LinSpace(5., 5., 1), np.array([5.]), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 2), np.array([5.] * 2), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 3), np.array([5.] * 3), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 4), np.array([5.] * 4), 1e-5)
class DeviceTest(tf.test.TestCase):
def testNoDevice(self):
with tf.Graph().as_default():
var = tf.Variable([[1.0, 1.0]])
self.assertDeviceEqual(None, var.device)
self.assertDeviceEqual(None, var.initializer.device)
def testDevice(self):
with tf.Graph().as_default():
with tf.device("/job:ps"):
var = tf.Variable([[1.0, 1.0]])
self.assertDeviceEqual("/job:ps", var.device)
self.assertDeviceEqual("/job:ps", var.initializer.device)
if __name__ == "__main__":
tf.test.main()
|
|
import copy
from direct.controls.ControlManager import CollisionHandlerRayStart
from direct.directnotify import DirectNotifyGlobal
from direct.directtools.DirectGeometry import CLAMP
from direct.distributed.ClockDelta import *
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.interval.IntervalGlobal import *
from direct.task import Task
import math
from pandac.PandaModules import *
import DistributedSuitPlanner
import Suit
import SuitBase
import SuitDNA
import SuitDialog
import SuitTimings
from otp.avatar import DistributedAvatar
from otp.otpbase import OTPGlobals
from toontown.battle import BattleProps
from toontown.battle import DistributedBattle
from toontown.chat.ChatGlobals import *
from toontown.nametag.NametagGlobals import *
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownBattleGlobals
from toontown.toonbase import ToontownGlobals
class DistributedSuitBase(DistributedAvatar.DistributedAvatar, Suit.Suit, SuitBase.SuitBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedSuitBase')
def __init__(self, cr):
try:
self.DistributedSuitBase_initialized
return
except:
self.DistributedSuitBase_initialized = 1
DistributedAvatar.DistributedAvatar.__init__(self, cr)
Suit.Suit.__init__(self)
SuitBase.SuitBase.__init__(self)
self.activeShadow = 0
self.virtual = 0
self.battleDetectName = None
self.cRay = None
self.cRayNode = None
self.cRayNodePath = None
self.cRayBitMask = None
self.lifter = None
self.cTrav = None
self.sp = None
self.fsm = None
self.prop = None
self.propInSound = None
self.propOutSound = None
self.reparentTo(hidden)
self.loop('neutral')
self.skeleRevives = 0
self.maxSkeleRevives = 0
self.sillySurgeText = False
self.interactivePropTrackBonus = -1
return
def setVirtual(self, virtual):
pass
def getVirtual(self):
return 0
def setSkeleRevives(self, num):
if num == None:
num = 0
self.skeleRevives = num
if num > self.maxSkeleRevives:
self.maxSkeleRevives = num
if self.getSkeleRevives() > 0:
nameInfo = TTLocalizer.SuitBaseNameWithLevel % {'name': self.name,
'dept': self.getStyleDept(),
'level': '%s%s' % (self.getActualLevel(), TTLocalizer.SkeleRevivePostFix)}
self.setDisplayName(nameInfo)
else:
nameInfo = TTLocalizer.SuitBaseNameWithLevel % {'name': self.name,
'dept': self.getStyleDept(),
'level': self.getActualLevel()}
self.setDisplayName(nameInfo)
return
def getSkeleRevives(self):
return self.skeleRevives
def getMaxSkeleRevives(self):
return self.maxSkeleRevives
def generate(self):
DistributedAvatar.DistributedAvatar.generate(self)
def disable(self):
self.notify.debug('DistributedSuit %d: disabling' % self.getDoId())
self.ignoreAll()
self.__removeCollisionData()
self.cleanupLoseActor()
self.stop()
taskMgr.remove(self.uniqueName('blink-task'))
DistributedAvatar.DistributedAvatar.disable(self)
def delete(self):
try:
self.DistributedSuitBase_deleted
except:
self.DistributedSuitBase_deleted = 1
self.notify.debug('DistributedSuit %d: deleting' % self.getDoId())
del self.dna
del self.sp
DistributedAvatar.DistributedAvatar.delete(self)
Suit.Suit.delete(self)
SuitBase.SuitBase.delete(self)
def setDNAString(self, dnaString):
Suit.Suit.setDNAString(self, dnaString)
def setDNA(self, dna):
Suit.Suit.setDNA(self, dna)
def getHP(self):
return self.currHP
def setHP(self, hp):
if hp > self.maxHP:
self.currHP = self.maxHP
else:
self.currHP = hp
return None
def getDialogueArray(self, *args):
return Suit.Suit.getDialogueArray(self, *args)
def __removeCollisionData(self):
self.enableRaycast(0)
self.cRay = None
self.cRayNode = None
self.cRayNodePath = None
self.lifter = None
self.cTrav = None
return
def setHeight(self, height):
Suit.Suit.setHeight(self, height)
def getRadius(self):
return Suit.Suit.getRadius(self)
def setLevelDist(self, level):
if self.notify.getDebug():
self.notify.debug('Got level %d from server for suit %d' % (level, self.getDoId()))
self.setLevel(level)
def attachPropeller(self):
if self.prop == None:
self.prop = BattleProps.globalPropPool.getProp('propeller')
if self.propInSound == None:
self.propInSound = base.loadSfx('phase_5/audio/sfx/ENC_propeller_in.ogg')
if self.propOutSound == None:
self.propOutSound = base.loadSfx('phase_5/audio/sfx/ENC_propeller_out.ogg')
if base.config.GetBool('want-new-cogs', 0):
head = self.find('**/to_head')
if head.isEmpty():
head = self.find('**/joint_head')
else:
head = self.find('**/joint_head')
self.prop.reparentTo(head)
return
def detachPropeller(self):
if self.prop:
self.prop.cleanup()
self.prop.removeNode()
self.prop = None
if self.propInSound:
self.propInSound = None
if self.propOutSound:
self.propOutSound = None
return
def beginSupaFlyMove(self, pos, moveIn, trackName, walkAfterLanding=True):
skyPos = Point3(pos)
if moveIn:
skyPos.setZ(pos.getZ() + SuitTimings.fromSky * ToontownGlobals.SuitWalkSpeed)
else:
skyPos.setZ(pos.getZ() + SuitTimings.toSky * ToontownGlobals.SuitWalkSpeed)
groundF = 28
dur = self.getDuration('landing')
fr = self.getFrameRate('landing')
if fr:
animTimeInAir = groundF / fr
else:
animTimeInAir = groundF
impactLength = dur - animTimeInAir
timeTillLanding = SuitTimings.fromSky - impactLength
waitTime = timeTillLanding - animTimeInAir
if self.prop == None:
self.prop = BattleProps.globalPropPool.getProp('propeller')
propDur = self.prop.getDuration('propeller')
lastSpinFrame = 8
fr = self.prop.getFrameRate('propeller')
spinTime = lastSpinFrame / fr
openTime = (lastSpinFrame + 1) / fr
if moveIn:
lerpPosTrack = Sequence(self.posInterval(timeTillLanding, pos, startPos=skyPos), Wait(impactLength))
shadowScale = self.dropShadow.getScale()
shadowTrack = Sequence(Func(self.dropShadow.reparentTo, render), Func(self.dropShadow.setPos, pos), self.dropShadow.scaleInterval(timeTillLanding, self.scale, startScale=Vec3(0.01, 0.01, 1.0)), Func(self.dropShadow.reparentTo, self.getShadowJoint()), Func(self.dropShadow.setPos, 0, 0, 0), Func(self.dropShadow.setScale, shadowScale))
fadeInTrack = Sequence(Func(self.setTransparency, 1), self.colorScaleInterval(1, colorScale=VBase4(1, 1, 1, 1), startColorScale=VBase4(1, 1, 1, 0)), Func(self.clearColorScale), Func(self.clearTransparency))
animTrack = Sequence(Func(self.pose, 'landing', 0), Wait(waitTime), ActorInterval(self, 'landing', duration=dur))
if walkAfterLanding:
animTrack.append(Func(self.loop, 'walk'))
self.attachPropeller()
propTrack = Parallel(SoundInterval(self.propInSound, duration=waitTime + dur, node=self), Sequence(ActorInterval(self.prop, 'propeller', constrainedLoop=1, duration=waitTime + spinTime, startTime=0.0, endTime=spinTime), ActorInterval(self.prop, 'propeller', duration=propDur - openTime, startTime=openTime), Func(self.detachPropeller)))
return Parallel(lerpPosTrack, shadowTrack, fadeInTrack, animTrack, propTrack, name=self.taskName('trackName'))
else:
lerpPosTrack = Sequence(Wait(impactLength), LerpPosInterval(self, timeTillLanding, skyPos, startPos=pos))
shadowTrack = Sequence(Func(self.dropShadow.reparentTo, render), Func(self.dropShadow.setPos, pos), self.dropShadow.scaleInterval(timeTillLanding, Vec3(0.01, 0.01, 1.0), startScale=self.scale), Func(self.dropShadow.reparentTo, self.getShadowJoint()), Func(self.dropShadow.setPos, 0, 0, 0))
fadeOutTrack = Sequence(Func(self.setTransparency, 1), self.colorScaleInterval(1, colorScale=VBase4(1, 1, 1, 0), startColorScale=VBase4(1, 1, 1, 1)), Func(self.clearColorScale), Func(self.clearTransparency), Func(self.reparentTo, hidden))
actInt = ActorInterval(self, 'landing', loop=0, startTime=dur, endTime=0.0)
self.attachPropeller()
self.prop.hide()
propTrack = Parallel(SoundInterval(self.propOutSound, duration=waitTime + dur, node=self), Sequence(Func(self.prop.show), ActorInterval(self.prop, 'propeller', endTime=openTime, startTime=propDur), ActorInterval(self.prop, 'propeller', constrainedLoop=1, duration=propDur - openTime, startTime=spinTime, endTime=0.0), Func(self.detachPropeller)))
return Parallel(ParallelEndTogether(lerpPosTrack, shadowTrack, fadeOutTrack), actInt, propTrack, name=self.taskName('trackName'))
return
def enableBattleDetect(self, name, handler):
if self.collTube:
self.battleDetectName = self.taskName(name)
self.collNode = CollisionNode(self.battleDetectName)
self.collNode.addSolid(self.collTube)
self.collNodePath = self.attachNewNode(self.collNode)
self.collNode.setCollideMask(ToontownGlobals.WallBitmask)
self.accept('enter' + self.battleDetectName, handler)
return Task.done
def disableBattleDetect(self):
if self.battleDetectName:
self.ignore('enter' + self.battleDetectName)
self.battleDetectName = None
if self.collNodePath:
self.collNodePath.removeNode()
self.collNodePath = None
return
def enableRaycast(self, enable = 1):
if not self.cTrav or not hasattr(self, 'cRayNode') or not self.cRayNode:
return
self.cTrav.removeCollider(self.cRayNodePath)
if enable:
if self.notify.getDebug():
self.notify.debug('enabling raycast')
self.cTrav.addCollider(self.cRayNodePath, self.lifter)
elif self.notify.getDebug():
self.notify.debug('disabling raycast')
def b_setBrushOff(self, index):
self.setBrushOff(index)
self.d_setBrushOff(index)
def d_setBrushOff(self, index):
self.sendUpdate('setBrushOff', [index])
def setBrushOff(self, index):
self.setChatAbsolute(SuitDialog.getBrushOffText(self.getStyleName(), index), CFSpeech | CFTimeout)
def initializeBodyCollisions(self, collIdStr):
DistributedAvatar.DistributedAvatar.initializeBodyCollisions(self, collIdStr)
if not self.ghostMode:
self.collNode.setCollideMask(self.collNode.getIntoCollideMask() | ToontownGlobals.PieBitmask)
self.cRay = CollisionRay(0.0, 0.0, CollisionHandlerRayStart, 0.0, 0.0, -1.0)
self.cRayNode = CollisionNode(self.taskName('cRay'))
self.cRayNode.addSolid(self.cRay)
self.cRayNodePath = self.attachNewNode(self.cRayNode)
self.cRayNodePath.hide()
self.cRayBitMask = ToontownGlobals.FloorBitmask
self.cRayNode.setFromCollideMask(self.cRayBitMask)
self.cRayNode.setIntoCollideMask(BitMask32.allOff())
self.lifter = CollisionHandlerFloor()
self.lifter.setOffset(ToontownGlobals.FloorOffset)
self.lifter.setReach(6.0)
self.lifter.setMaxVelocity(8.0)
self.lifter.addCollider(self.cRayNodePath, self)
self.cTrav = base.cTrav
def disableBodyCollisions(self):
self.disableBattleDetect()
self.enableRaycast(0)
if self.cRayNodePath:
self.cRayNodePath.removeNode()
del self.cRayNode
del self.cRay
del self.lifter
def denyBattle(self):
self.notify.debug('denyBattle()')
place = self.cr.playGame.getPlace()
if place.fsm.getCurrentState().getName() == 'WaitForBattle':
place.setState('walk')
self.resumePath(self.pathState)
def makePathTrack(self, nodePath, posPoints, velocity, name):
track = Sequence(name=name)
nodePath.setPos(posPoints[0])
for pointIndex in xrange(len(posPoints) - 1):
startPoint = posPoints[pointIndex]
endPoint = posPoints[pointIndex + 1]
track.append(Func(nodePath.headsUp, endPoint[0], endPoint[1], endPoint[2]))
distance = Vec3(endPoint - startPoint).length()
duration = distance / velocity
track.append(LerpPosInterval(nodePath, duration=duration, pos=Point3(endPoint), startPos=Point3(startPoint)))
return track
def setState(self, state):
if self.fsm == None:
return 0
if self.fsm.getCurrentState().getName() == state:
return 0
return self.fsm.request(state)
def subclassManagesParent(self):
return 0
def enterOff(self, *args):
self.hideNametag3d()
self.hideNametag2d()
if not self.subclassManagesParent():
self.setParent(ToontownGlobals.SPHidden)
def exitOff(self):
if not self.subclassManagesParent():
self.setParent(ToontownGlobals.SPRender)
self.showNametag3d()
self.showNametag2d()
self.loop('neutral', 0)
def enterBattle(self):
self.loop('neutral', 0)
self.disableBattleDetect()
self.corpMedallion.hide()
self.healthBar.show()
if self.currHP < self.maxHP:
self.updateHealthBar(0, 1)
def exitBattle(self):
self.healthBar.hide()
self.corpMedallion.show()
self.currHP = self.maxHP
self.interactivePropTrackBonus = -1
def enterWaitForBattle(self):
self.loop('neutral', 0)
def exitWaitForBattle(self):
pass
def setSkelecog(self, flag):
SuitBase.SuitBase.setSkelecog(self, flag)
if flag:
Suit.Suit.makeSkeleton(self)
def setWaiter(self, flag):
SuitBase.SuitBase.setWaiter(self, flag)
if flag:
Suit.Suit.makeWaiter(self)
def showHpText(self, number, bonus = 0, scale = 1, attackTrack = -1):
if self.HpTextEnabled and not self.ghostMode:
if number != 0:
if self.hpText:
self.hideHpText()
self.HpTextGenerator.setFont(OTPGlobals.getSignFont())
if number < 0:
self.HpTextGenerator.setText(str(number))
if base.cr.newsManager.isHolidayRunning(ToontownGlobals.SILLY_SURGE_HOLIDAY):
self.sillySurgeText = True
absNum = abs(number)
if absNum > 0 and absNum <= 10:
self.HpTextGenerator.setText(str(number) + '\n' + TTLocalizer.SillySurgeTerms[1])
elif absNum > 10 and absNum <= 20:
self.HpTextGenerator.setText(str(number) + '\n' + TTLocalizer.SillySurgeTerms[2])
elif absNum > 20 and absNum <= 30:
self.HpTextGenerator.setText(str(number) + '\n' + TTLocalizer.SillySurgeTerms[3])
elif absNum > 30 and absNum <= 40:
self.HpTextGenerator.setText(str(number) + '\n' + TTLocalizer.SillySurgeTerms[4])
elif absNum > 40 and absNum <= 50:
self.HpTextGenerator.setText(str(number) + '\n' + TTLocalizer.SillySurgeTerms[5])
elif absNum > 50 and absNum <= 60:
self.HpTextGenerator.setText(str(number) + '\n' + TTLocalizer.SillySurgeTerms[6])
elif absNum > 60 and absNum <= 70:
self.HpTextGenerator.setText(str(number) + '\n' + TTLocalizer.SillySurgeTerms[7])
elif absNum > 70 and absNum <= 80:
self.HpTextGenerator.setText(str(number) + '\n' + TTLocalizer.SillySurgeTerms[8])
elif absNum > 80 and absNum <= 90:
self.HpTextGenerator.setText(str(number) + '\n' + TTLocalizer.SillySurgeTerms[9])
elif absNum > 90 and absNum <= 100:
self.HpTextGenerator.setText(str(number) + '\n' + TTLocalizer.SillySurgeTerms[10])
elif absNum > 100 and absNum <= 110:
self.HpTextGenerator.setText(str(number) + '\n' + TTLocalizer.SillySurgeTerms[11])
else:
self.HpTextGenerator.setText(str(number) + '\n' + TTLocalizer.SillySurgeTerms[12])
if self.interactivePropTrackBonus > -1 and self.interactivePropTrackBonus == attackTrack:
self.sillySurgeText = True
if attackTrack in TTLocalizer.InteractivePropTrackBonusTerms:
self.HpTextGenerator.setText(str(number) + '\n' + TTLocalizer.InteractivePropTrackBonusTerms[attackTrack])
else:
self.HpTextGenerator.setText('+' + str(number))
self.HpTextGenerator.clearShadow()
self.HpTextGenerator.setAlign(TextNode.ACenter)
if bonus == 1:
r = 1.0
g = 1.0
b = 0
a = 1
elif bonus == 2:
r = 1.0
g = 0.5
b = 0
a = 1
elif number < 0:
r = 0.9
g = 0
b = 0
a = 1
if self.interactivePropTrackBonus > -1 and self.interactivePropTrackBonus == attackTrack:
r = 0
g = 0
b = 1
a = 1
else:
r = 0
g = 0.9
b = 0
a = 1
self.HpTextGenerator.setTextColor(r, g, b, a)
self.hpTextNode = self.HpTextGenerator.generate()
self.hpText = self.attachNewNode(self.hpTextNode)
self.hpText.setScale(scale)
self.hpText.setBillboardPointEye()
self.hpText.setBin('fixed', 100)
if self.sillySurgeText:
self.nametag3d.setDepthTest(0)
self.nametag3d.setBin('fixed', 99)
self.hpText.setPos(0, 0, self.height / 2)
seq = Sequence(self.hpText.posInterval(1.0, Point3(0, 0, self.height + 1.5), blendType='easeOut'), Wait(0.85), self.hpText.colorInterval(0.1, Vec4(r, g, b, 0), 0.1), Func(self.hideHpText))
seq.start()
def hideHpText(self):
DistributedAvatar.DistributedAvatar.hideHpText(self)
if self.sillySurgeText:
self.nametag3d.clearDepthTest()
self.nametag3d.clearBin()
self.sillySurgeText = False
def getAvIdName(self):
try:
level = self.getActualLevel()
except:
level = '???'
return '%s\n%s\nLevel %s' % (self.getName(), self.doId, level)
|
|
__all__ = ['Bloch']
import os
from numpy import (ndarray, array, linspace, pi, outer, cos, sin, ones, size,
sqrt, real, mod, append, ceil, arange)
import numpy as np
from packaging.version import parse as parse_version
from qutip.qobj import Qobj
from qutip.expect import expect
from qutip.operators import sigmax, sigmay, sigmaz
try:
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
# Define a custom _axes3D function based on the matplotlib version.
# The auto_add_to_figure keyword is new for matplotlib>=3.4.
if parse_version(matplotlib.__version__) >= parse_version('3.4'):
def _axes3D(fig, *args, **kwargs):
ax = Axes3D(fig, *args, auto_add_to_figure=False, **kwargs)
return fig.add_axes(ax)
else:
def _axes3D(*args, **kwargs):
return Axes3D(*args, **kwargs)
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, self.axes.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def do_3d_projection(self, renderer=None):
# only called by matplotlib >= 3.5
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, self.axes.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
return np.min(zs)
except ImportError:
pass
try:
from IPython.display import display
except ImportError:
pass
class Bloch:
r"""
Class for plotting data on the Bloch sphere. Valid data can be either
points, vectors, or Qobj objects.
Attributes
----------
axes : matplotlib.axes.Axes
User supplied Matplotlib axes for Bloch sphere animation.
fig : matplotlib.figure.Figure
User supplied Matplotlib Figure instance for plotting Bloch sphere.
font_color : str, default 'black'
Color of font used for Bloch sphere labels.
font_size : int, default 20
Size of font used for Bloch sphere labels.
frame_alpha : float, default 0.1
Sets transparency of Bloch sphere frame.
frame_color : str, default 'gray'
Color of sphere wireframe.
frame_width : int, default 1
Width of wireframe.
point_color : list, default ["b", "r", "g", "#CC6600"]
List of colors for Bloch sphere point markers to cycle through, i.e.
by default, points 0 and 4 will both be blue ('b').
point_marker : list, default ["o", "s", "d", "^"]
List of point marker shapes to cycle through.
point_size : list, default [25, 32, 35, 45]
List of point marker sizes. Note, not all point markers look the same
size when plotted!
sphere_alpha : float, default 0.2
Transparency of Bloch sphere itself.
sphere_color : str, default '#FFDDDD'
Color of Bloch sphere.
figsize : list, default [7, 7]
Figure size of Bloch sphere plot. Best to have both numbers the same;
otherwise you will have a Bloch sphere that looks like a football.
vector_color : list, ["g", "#CC6600", "b", "r"]
List of vector colors to cycle through.
vector_width : int, default 5
Width of displayed vectors.
vector_style : str, default '-\|>'
Vector arrowhead style (from matplotlib's arrow style).
vector_mutation : int, default 20
Width of vectors arrowhead.
view : list, default [-60, 30]
Azimuthal and Elevation viewing angles.
xlabel : list, default ["$x$", ""]
List of strings corresponding to +x and -x axes labels, respectively.
xlpos : list, default [1.1, -1.1]
Positions of +x and -x labels respectively.
ylabel : list, default ["$y$", ""]
List of strings corresponding to +y and -y axes labels, respectively.
ylpos : list, default [1.2, -1.2]
Positions of +y and -y labels respectively.
zlabel : list, default ['$\\left\|0\\right>$', '$\\left\|1\\right>$']
List of strings corresponding to +z and -z axes labels, respectively.
zlpos : list, default [1.2, -1.2]
Positions of +z and -z labels respectively.
"""
def __init__(self, fig=None, axes=None, view=None, figsize=None,
background=False):
# Figure and axes
self.fig = fig
self._ext_fig = fig is not None
self.axes = axes
# Background axes, default = False
self.background = background
# The size of the figure in inches, default = [5,5].
self.figsize = figsize if figsize else [5, 5]
# Azimuthal and Elvation viewing angles, default = [-60,30].
self.view = view if view else [-60, 30]
# Color of Bloch sphere, default = #FFDDDD
self.sphere_color = '#FFDDDD'
# Transparency of Bloch sphere, default = 0.2
self.sphere_alpha = 0.2
# Color of wireframe, default = 'gray'
self.frame_color = 'gray'
# Width of wireframe, default = 1
self.frame_width = 1
# Transparency of wireframe, default = 0.2
self.frame_alpha = 0.2
# Labels for x-axis (in LaTex), default = ['$x$', '']
self.xlabel = ['$x$', '']
# Position of x-axis labels, default = [1.2, -1.2]
self.xlpos = [1.2, -1.2]
# Labels for y-axis (in LaTex), default = ['$y$', '']
self.ylabel = ['$y$', '']
# Position of y-axis labels, default = [1.1, -1.1]
self.ylpos = [1.2, -1.2]
# Labels for z-axis (in LaTex),
# default = [r'$\left\|0\right>$', r'$\left|1\right>$']
self.zlabel = [r'$\left|0\right>$', r'$\left|1\right>$']
# Position of z-axis labels, default = [1.2, -1.2]
self.zlpos = [1.2, -1.2]
# ---font options---
# Color of fonts, default = 'black'
self.font_color = 'black'
# Size of fonts, default = 20
self.font_size = 20
# ---vector options---
# List of colors for Bloch vectors, default = ['b','g','r','y']
self.vector_color = ['g', '#CC6600', 'b', 'r']
#: Width of Bloch vectors, default = 5
self.vector_width = 3
#: Style of Bloch vectors, default = '-\|>' (or 'simple')
self.vector_style = '-|>'
#: Sets the width of the vectors arrowhead
self.vector_mutation = 20
# ---point options---
# List of colors for Bloch point markers, default = ['b','g','r','y']
self.point_color = ['b', 'r', 'g', '#CC6600']
# Size of point markers, default = 25
self.point_size = [25, 32, 35, 45]
# Shape of point markers, default = ['o','^','d','s']
self.point_marker = ['o', 's', 'd', '^']
# ---data lists---
# Data for point markers
self.points = []
# Data for Bloch vectors
self.vectors = []
# Data for annotations
self.annotations = []
# Number of times sphere has been saved
self.savenum = 0
# Style of points, 'm' for multiple colors, 's' for single color
self.point_style = []
# Data for line segment
self._lines = []
# Data for arcs and arc style
self._arcs = []
def set_label_convention(self, convention):
"""Set x, y and z labels according to one of conventions.
Parameters
----------
convention : string
One of the following:
- "original"
- "xyz"
- "sx sy sz"
- "01"
- "polarization jones"
- "polarization jones letters"
see also: https://en.wikipedia.org/wiki/Jones_calculus
- "polarization stokes"
see also: https://en.wikipedia.org/wiki/Stokes_parameters
"""
ketex = "$\\left.|%s\\right\\rangle$"
# \left.| is on purpose, so that every ket has the same size
if convention == "original":
self.xlabel = ['$x$', '']
self.ylabel = ['$y$', '']
self.zlabel = ['$\\left|0\\right>$', '$\\left|1\\right>$']
elif convention == "xyz":
self.xlabel = ['$x$', '']
self.ylabel = ['$y$', '']
self.zlabel = ['$z$', '']
elif convention == "sx sy sz":
self.xlabel = ['$s_x$', '']
self.ylabel = ['$s_y$', '']
self.zlabel = ['$s_z$', '']
elif convention == "01":
self.xlabel = ['', '']
self.ylabel = ['', '']
self.zlabel = ['$\\left|0\\right>$', '$\\left|1\\right>$']
elif convention == "polarization jones":
self.xlabel = [ketex % "\\nearrow\\hspace{-1.46}\\swarrow",
ketex % "\\nwarrow\\hspace{-1.46}\\searrow"]
self.ylabel = [ketex % "\\circlearrowleft", ketex %
"\\circlearrowright"]
self.zlabel = [ketex % "\\leftrightarrow", ketex % "\\updownarrow"]
elif convention == "polarization jones letters":
self.xlabel = [ketex % "D", ketex % "A"]
self.ylabel = [ketex % "L", ketex % "R"]
self.zlabel = [ketex % "H", ketex % "V"]
elif convention == "polarization stokes":
self.ylabel = ["$\\nearrow\\hspace{-1.46}\\swarrow$",
"$\\nwarrow\\hspace{-1.46}\\searrow$"]
self.zlabel = ["$\\circlearrowleft$", "$\\circlearrowright$"]
self.xlabel = ["$\\leftrightarrow$", "$\\updownarrow$"]
else:
raise Exception("No such convention.")
def __str__(self):
s = ""
s += "Bloch data:\n"
s += "-----------\n"
s += "Number of points: " + str(len(self.points)) + "\n"
s += "Number of vectors: " + str(len(self.vectors)) + "\n"
s += "\n"
s += "Bloch sphere properties:\n"
s += "------------------------\n"
s += "font_color: " + str(self.font_color) + "\n"
s += "font_size: " + str(self.font_size) + "\n"
s += "frame_alpha: " + str(self.frame_alpha) + "\n"
s += "frame_color: " + str(self.frame_color) + "\n"
s += "frame_width: " + str(self.frame_width) + "\n"
s += "point_color: " + str(self.point_color) + "\n"
s += "point_marker: " + str(self.point_marker) + "\n"
s += "point_size: " + str(self.point_size) + "\n"
s += "sphere_alpha: " + str(self.sphere_alpha) + "\n"
s += "sphere_color: " + str(self.sphere_color) + "\n"
s += "figsize: " + str(self.figsize) + "\n"
s += "vector_color: " + str(self.vector_color) + "\n"
s += "vector_width: " + str(self.vector_width) + "\n"
s += "vector_style: " + str(self.vector_style) + "\n"
s += "vector_mutation: " + str(self.vector_mutation) + "\n"
s += "view: " + str(self.view) + "\n"
s += "xlabel: " + str(self.xlabel) + "\n"
s += "xlpos: " + str(self.xlpos) + "\n"
s += "ylabel: " + str(self.ylabel) + "\n"
s += "ylpos: " + str(self.ylpos) + "\n"
s += "zlabel: " + str(self.zlabel) + "\n"
s += "zlpos: " + str(self.zlpos) + "\n"
return s
def _repr_png_(self):
from IPython.core.pylabtools import print_figure
self.render()
fig_data = print_figure(self.fig, 'png')
plt.close(self.fig)
return fig_data
def _repr_svg_(self):
from IPython.core.pylabtools import print_figure
self.render()
fig_data = print_figure(self.fig, 'svg').decode('utf-8')
plt.close(self.fig)
return fig_data
def clear(self):
"""Resets Bloch sphere data sets to empty.
"""
self.points = []
self.vectors = []
self.point_style = []
self.annotations = []
self._lines = []
self._arcs = []
def add_points(self, points, meth='s'):
"""Add a list of data points to bloch sphere.
Parameters
----------
points : array_like
Collection of data points.
meth : {'s', 'm', 'l'}
Type of points to plot, use 'm' for multicolored, 'l' for points
connected with a line.
"""
if not isinstance(points[0], (list, ndarray)):
points = [[points[0]], [points[1]], [points[2]]]
points = array(points)
if meth == 's':
if len(points[0]) == 1:
pnts = array([[points[0][0]], [points[1][0]], [points[2][0]]])
pnts = append(pnts, points, axis=1)
else:
pnts = points
self.points.append(pnts)
self.point_style.append('s')
elif meth == 'l':
self.points.append(points)
self.point_style.append('l')
else:
self.points.append(points)
self.point_style.append('m')
def add_states(self, state, kind='vector'):
"""Add a state vector Qobj to Bloch sphere.
Parameters
----------
state : Qobj
Input state vector.
kind : {'vector', 'point'}
Type of object to plot.
"""
if isinstance(state, Qobj):
state = [state]
for st in state:
vec = [expect(sigmax(), st),
expect(sigmay(), st),
expect(sigmaz(), st)]
if kind == 'vector':
self.add_vectors(vec)
elif kind == 'point':
self.add_points(vec)
def add_vectors(self, vectors):
"""Add a list of vectors to Bloch sphere.
Parameters
----------
vectors : array_like
Array with vectors of unit length or smaller.
"""
if isinstance(vectors[0], (list, tuple, ndarray)):
for vec in vectors:
self.vectors.append(vec)
else:
self.vectors.append(vectors)
def add_annotation(self, state_or_vector, text, **kwargs):
"""
Add a text or LaTeX annotation to Bloch sphere, parametrized by a qubit
state or a vector.
Parameters
----------
state_or_vector : Qobj/array/list/tuple
Position for the annotaion.
Qobj of a qubit or a vector of 3 elements.
text : str
Annotation text.
You can use LaTeX, but remember to use raw string
e.g. r"$\\langle x \\rangle$"
or escape backslashes
e.g. "$\\\\langle x \\\\rangle$".
kwargs :
Options as for mplot3d.axes3d.text, including:
fontsize, color, horizontalalignment, verticalalignment.
"""
if isinstance(state_or_vector, Qobj):
vec = [expect(sigmax(), state_or_vector),
expect(sigmay(), state_or_vector),
expect(sigmaz(), state_or_vector)]
elif isinstance(state_or_vector, (list, ndarray, tuple)) \
and len(state_or_vector) == 3:
vec = state_or_vector
else:
raise Exception("Position needs to be specified by a qubit " +
"state or a 3D vector.")
self.annotations.append({'position': vec,
'text': text,
'opts': kwargs})
def add_arc(self, start, end, fmt="b", steps=None, **kwargs):
"""Adds an arc between two points on a sphere. The arc is set to be
blue solid curve by default.
The start and end points must be on the same sphere (i.e. have the
same radius) but need not be on the unit sphere.
Parameters
----------
start : Qobj or array-like
Array with cartesian coordinates of the first point, or a state
vector or density matrix that can be mapped to a point on or
within the Bloch sphere.
end : Qobj or array-like
Array with cartesian coordinates of the second point, or a state
vector or density matrix that can be mapped to a point on or
within the Bloch sphere.
fmt : str, default: "b"
A matplotlib format string for rendering the arc.
steps : int, default: None
The number of segments to use when rendering the arc. The default
uses 100 steps times the distance between the start and end points,
with a minimum of 2 steps.
**kwargs : dict
Additional parameters to pass to the matplotlib .plot function
when rendering this arc.
"""
if isinstance(start, Qobj):
pt1 = [
expect(sigmax(), start),
expect(sigmay(), start),
expect(sigmaz(), start),
]
else:
pt1 = start
if isinstance(end, Qobj):
pt2 = [
expect(sigmax(), end),
expect(sigmay(), end),
expect(sigmaz(), end),
]
else:
pt2 = end
pt1 = np.asarray(pt1)
pt2 = np.asarray(pt2)
len1 = np.linalg.norm(pt1)
len2 = np.linalg.norm(pt2)
if len1 < 1e-12 or len2 < 1e-12:
raise ValueError('Polar and azimuthal angles undefined at origin.')
elif abs(len1 - len2) > 1e-12:
raise ValueError("Points not on the same sphere.")
elif (pt1 == pt2).all():
raise ValueError(
"Start and end represent the same point. No arc can be formed."
)
elif (pt1 == -pt2).all():
raise ValueError(
"Start and end are diagonally opposite, no unique arc is"
" possible."
)
if steps is None:
steps = int(np.linalg.norm(pt1 - pt2) * 100)
steps = max(2, steps)
t = np.linspace(0, 1, steps)
# All the points in this line are contained in the plane defined
# by pt1, pt2 and the origin.
line = pt1[:, np.newaxis] * t + pt2[:, np.newaxis] * (1 - t)
# Normalize all the points in the line so that are distance len1 from
# the origin.
arc = line * len1 / np.linalg.norm(line, axis=0)
self._arcs.append([arc, fmt, kwargs])
def add_line(self, start, end, fmt="k", **kwargs):
"""Adds a line segment connecting two points on the bloch sphere.
The line segment is set to be a black solid line by default.
Parameters
----------
start : Qobj or array-like
Array with cartesian coordinates of the first point, or a state
vector or density matrix that can be mapped to a point on or
within the Bloch sphere.
end : Qobj or array-like
Array with cartesian coordinates of the second point, or a state
vector or density matrix that can be mapped to a point on or
within the Bloch sphere.
fmt : str, default: "k"
A matplotlib format string for rendering the line.
**kwargs : dict
Additional parameters to pass to the matplotlib .plot function
when rendering this line.
"""
if isinstance(start, Qobj):
pt1 = [
expect(sigmax(), start),
expect(sigmay(), start),
expect(sigmaz(), start),
]
else:
pt1 = start
if isinstance(end, Qobj):
pt2 = [
expect(sigmax(), end),
expect(sigmay(), end),
expect(sigmaz(), end),
]
else:
pt2 = end
pt1 = np.asarray(pt1)
pt2 = np.asarray(pt2)
x = [pt1[1], pt2[1]]
y = [-pt1[0], -pt2[0]]
z = [pt1[2], pt2[2]]
v = [x, y, z]
self._lines.append([v, fmt, kwargs])
def make_sphere(self):
"""
Plots Bloch sphere and data sets.
"""
self.render()
def run_from_ipython(self):
try:
__IPYTHON__
return True
except NameError:
return False
def _is_inline_backend(self):
backend = matplotlib.get_backend()
return backend == "module://matplotlib_inline.backend_inline"
def render(self):
"""
Render the Bloch sphere and its data sets in on given figure and axes.
"""
if not self._ext_fig and not self._is_inline_backend():
# If no external figure was supplied, we check to see if the
# figure we created in a previous call to .render() has been
# closed, and re-create if has been. This has the unfortunate
# side effect of losing any modifications made to the axes or
# figure, but the alternative is to crash the matplotlib backend.
#
# The inline backend used by, e.g. jupyter notebooks, is happy to
# use closed figures so we leave those figures intact.
if (
self.fig is not None and
not plt.fignum_exists(self.fig.number)
):
self.fig = None
self.axes = None
if self.fig is None:
self.fig = plt.figure(figsize=self.figsize)
if self._is_inline_backend():
# We immediately close the inline figure do avoid displaying
# the figure twice when .show() calls display.
plt.close(self.fig)
if self.axes is None:
self.axes = _axes3D(self.fig, azim=self.view[0], elev=self.view[1])
# Clearing the axes is horrifically slow and loses a lot of the
# axes state, but matplotlib doesn't seem to provide a better way
# to redraw Axes3D. :/
self.axes.clear()
self.axes.grid(False)
if self.background:
self.axes.set_xlim3d(-1.3, 1.3)
self.axes.set_ylim3d(-1.3, 1.3)
self.axes.set_zlim3d(-1.3, 1.3)
else:
self.axes.set_axis_off()
self.axes.set_xlim3d(-0.7, 0.7)
self.axes.set_ylim3d(-0.7, 0.7)
self.axes.set_zlim3d(-0.7, 0.7)
# Manually set aspect ratio to fit a square bounding box.
# Matplotlib did this stretching for < 3.3.0, but not above.
if parse_version(matplotlib.__version__) >= parse_version('3.3'):
self.axes.set_box_aspect((1, 1, 1))
if not self.background:
self.plot_axes()
self.plot_back()
self.plot_points()
self.plot_vectors()
self.plot_front()
self.plot_axes_labels()
self.plot_annotations()
self.plot_lines()
self.plot_arcs()
# Trigger an update of the Bloch sphere if it is already shown:
self.fig.canvas.draw()
def plot_back(self):
# back half of sphere
u = linspace(0, pi, 25)
v = linspace(0, pi, 25)
x = outer(cos(u), sin(v))
y = outer(sin(u), sin(v))
z = outer(ones(size(u)), cos(v))
self.axes.plot_surface(x, y, z, rstride=2, cstride=2,
color=self.sphere_color, linewidth=0,
alpha=self.sphere_alpha)
# wireframe
self.axes.plot_wireframe(x, y, z, rstride=5, cstride=5,
color=self.frame_color,
alpha=self.frame_alpha)
# equator
self.axes.plot(1.0 * cos(u), 1.0 * sin(u), zs=0, zdir='z',
lw=self.frame_width, color=self.frame_color)
self.axes.plot(1.0 * cos(u), 1.0 * sin(u), zs=0, zdir='x',
lw=self.frame_width, color=self.frame_color)
def plot_front(self):
# front half of sphere
u = linspace(-pi, 0, 25)
v = linspace(0, pi, 25)
x = outer(cos(u), sin(v))
y = outer(sin(u), sin(v))
z = outer(ones(size(u)), cos(v))
self.axes.plot_surface(x, y, z, rstride=2, cstride=2,
color=self.sphere_color, linewidth=0,
alpha=self.sphere_alpha)
# wireframe
self.axes.plot_wireframe(x, y, z, rstride=5, cstride=5,
color=self.frame_color,
alpha=self.frame_alpha)
# equator
self.axes.plot(1.0 * cos(u), 1.0 * sin(u),
zs=0, zdir='z', lw=self.frame_width,
color=self.frame_color)
self.axes.plot(1.0 * cos(u), 1.0 * sin(u),
zs=0, zdir='x', lw=self.frame_width,
color=self.frame_color)
def plot_axes(self):
# axes
span = linspace(-1.0, 1.0, 2)
self.axes.plot(span, 0 * span, zs=0, zdir='z', label='X',
lw=self.frame_width, color=self.frame_color)
self.axes.plot(0 * span, span, zs=0, zdir='z', label='Y',
lw=self.frame_width, color=self.frame_color)
self.axes.plot(0 * span, span, zs=0, zdir='y', label='Z',
lw=self.frame_width, color=self.frame_color)
def plot_axes_labels(self):
# axes labels
opts = {'fontsize': self.font_size,
'color': self.font_color,
'horizontalalignment': 'center',
'verticalalignment': 'center'}
self.axes.text(0, -self.xlpos[0], 0, self.xlabel[0], **opts)
self.axes.text(0, -self.xlpos[1], 0, self.xlabel[1], **opts)
self.axes.text(self.ylpos[0], 0, 0, self.ylabel[0], **opts)
self.axes.text(self.ylpos[1], 0, 0, self.ylabel[1], **opts)
self.axes.text(0, 0, self.zlpos[0], self.zlabel[0], **opts)
self.axes.text(0, 0, self.zlpos[1], self.zlabel[1], **opts)
for a in (self.axes.xaxis.get_ticklines() +
self.axes.xaxis.get_ticklabels()):
a.set_visible(False)
for a in (self.axes.yaxis.get_ticklines() +
self.axes.yaxis.get_ticklabels()):
a.set_visible(False)
for a in (self.axes.zaxis.get_ticklines() +
self.axes.zaxis.get_ticklabels()):
a.set_visible(False)
def plot_vectors(self):
# -X and Y data are switched for plotting purposes
for k in range(len(self.vectors)):
xs3d = self.vectors[k][1] * array([0, 1])
ys3d = -self.vectors[k][0] * array([0, 1])
zs3d = self.vectors[k][2] * array([0, 1])
color = self.vector_color[mod(k, len(self.vector_color))]
if self.vector_style == '':
# simple line style
self.axes.plot(xs3d, ys3d, zs3d,
zs=0, zdir='z', label='Z',
lw=self.vector_width, color=color)
else:
# decorated style, with arrow heads
a = Arrow3D(xs3d, ys3d, zs3d,
mutation_scale=self.vector_mutation,
lw=self.vector_width,
arrowstyle=self.vector_style,
color=color)
self.axes.add_artist(a)
def plot_points(self):
# -X and Y data are switched for plotting purposes
for k in range(len(self.points)):
num = len(self.points[k][0])
dist = [sqrt(self.points[k][0][j] ** 2 +
self.points[k][1][j] ** 2 +
self.points[k][2][j] ** 2) for j in range(num)]
if any(abs(dist - dist[0]) / dist[0] > 1e-12):
# combine arrays so that they can be sorted together
zipped = list(zip(dist, range(num)))
zipped.sort() # sort rates from lowest to highest
dist, indperm = zip(*zipped)
indperm = array(indperm)
else:
indperm = arange(num)
if self.point_style[k] == 's':
self.axes.scatter(
real(self.points[k][1][indperm]),
- real(self.points[k][0][indperm]),
real(self.points[k][2][indperm]),
s=self.point_size[mod(k, len(self.point_size))],
alpha=1,
edgecolor=None,
zdir='z',
color=self.point_color[mod(k, len(self.point_color))],
marker=self.point_marker[mod(k, len(self.point_marker))])
elif self.point_style[k] == 'm':
pnt_colors = array(self.point_color *
int(ceil(num / float(len(self.point_color)))))
pnt_colors = pnt_colors[0:num]
pnt_colors = list(pnt_colors[indperm])
marker = self.point_marker[mod(k, len(self.point_marker))]
s = self.point_size[mod(k, len(self.point_size))]
self.axes.scatter(real(self.points[k][1][indperm]),
-real(self.points[k][0][indperm]),
real(self.points[k][2][indperm]),
s=s, alpha=1, edgecolor=None,
zdir='z', color=pnt_colors,
marker=marker)
elif self.point_style[k] == 'l':
color = self.point_color[mod(k, len(self.point_color))]
self.axes.plot(real(self.points[k][1]),
-real(self.points[k][0]),
real(self.points[k][2]),
alpha=0.75, zdir='z',
color=color)
def plot_annotations(self):
# -X and Y data are switched for plotting purposes
for annotation in self.annotations:
vec = annotation['position']
opts = {'fontsize': self.font_size,
'color': self.font_color,
'horizontalalignment': 'center',
'verticalalignment': 'center'}
opts.update(annotation['opts'])
self.axes.text(vec[1], -vec[0], vec[2],
annotation['text'], **opts)
def plot_lines(self):
for line, fmt, kw in self._lines:
self.axes.plot(line[0], line[1], line[2], fmt, **kw)
def plot_arcs(self):
for arc, fmt, kw in self._arcs:
self.axes.plot(arc[1, :], -arc[0, :], arc[2, :], fmt, **kw)
def show(self):
"""
Display Bloch sphere and corresponding data sets.
Notes
-----
When using inline plotting in Jupyter notebooks, any figure created
in a notebook cell is displayed after the cell executes. Thus if you
create a figure yourself and use it create a Bloch sphere with
``b = Bloch(..., fig=fig)`` and then call ``b.show()`` in the same
cell, then the figure will be displayed twice. If you do create your
own figure, the simplest solution to this is to not call ``.show()``
in the cell you create the figure in.
"""
self.render()
if self.run_from_ipython():
display(self.fig)
else:
self.fig.show()
def save(self, name=None, format='png', dirc=None, dpin=None):
"""Saves Bloch sphere to file of type ``format`` in directory ``dirc``.
Parameters
----------
name : str
Name of saved image. Must include path and format as well.
i.e. '/Users/Paul/Desktop/bloch.png'
This overrides the 'format' and 'dirc' arguments.
format : str
Format of output image.
dirc : str
Directory for output images. Defaults to current working directory.
dpin : int
Resolution in dots per inch.
Returns
-------
File containing plot of Bloch sphere.
"""
self.render()
# Conditional variable for first argument to savefig
# that is set in subsequent if-elses
complete_path = ""
if dirc:
if not os.path.isdir(os.getcwd() + "/" + str(dirc)):
os.makedirs(os.getcwd() + "/" + str(dirc))
if name is None:
if dirc:
complete_path = os.getcwd() + "/" + str(dirc) + '/bloch_' \
+ str(self.savenum) + '.' + format
else:
complete_path = os.getcwd() + '/bloch_' + \
str(self.savenum) + '.' + format
else:
complete_path = name
if dpin:
self.fig.savefig(complete_path, dpi=dpin)
else:
self.fig.savefig(complete_path)
self.savenum += 1
if self.fig:
plt.close(self.fig)
def _hide_tick_lines_and_labels(axis):
'''
Set visible property of ticklines and ticklabels of an axis to False
'''
for a in axis.get_ticklines() + axis.get_ticklabels():
a.set_visible(False)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for kws_streaming.layers.residual."""
import itertools
from absl.testing import parameterized
import numpy as np
from kws_streaming.layers import delay
from kws_streaming.layers import modes
from kws_streaming.layers import stream
from kws_streaming.layers import temporal_padding
from kws_streaming.layers import test_utils
from kws_streaming.layers.compat import tf
from kws_streaming.layers.compat import tf1
from kws_streaming.models import utils
from kws_streaming.train import inference
def delay_model(flags, time_delay, also_in_non_streaming):
"""Model with delay for streaming mode.
Args:
flags: model and data settings
time_delay: delay in time dim
also_in_non_streaming: Apply delay also in non-streaming mode.
Returns:
Keras model
"""
input_audio = tf.keras.layers.Input(
shape=(flags.desired_samples,), batch_size=flags.batch_size)
net = input_audio
net = tf.keras.backend.expand_dims(net)
net = delay.Delay(
delay=time_delay, also_in_non_streaming=also_in_non_streaming)(
net)
return tf.keras.Model(input_audio, net)
def residual_model(flags,
cnn_filters,
cnn_kernel_size,
cnn_act,
cnn_use_bias,
cnn_padding,
delay_also_in_non_streaming,
dilation=1):
"""Toy deep convolutional model with residual connections.
It can be used for speech enhancement.
Args:
flags: model and data settings
cnn_filters: list of filters in conv layer
cnn_kernel_size: list of kernel_size in conv layer
cnn_act: list of activation functions in conv layer
cnn_use_bias: list of use_bias in conv layer
cnn_padding: list of padding in conv layer
delay_also_in_non_streaming: Whether to apply delay also in non-streaming.
dilation: dilation applied on all conv layers
Returns:
Keras model and sum delay
Raises:
ValueError: if any of input list has different length from any other
or padding in not [same, causal]
"""
if not all(
len(cnn_filters) == len(l) for l in [
cnn_filters, cnn_kernel_size, cnn_act, cnn_use_bias, cnn_padding]):
raise ValueError('all input lists have to be the same length')
# it is an example of deep conv model for speech enhancement
# which can be trained in non streaming mode and converted to streaming mode
input_audio = tf.keras.layers.Input(
shape=(flags.desired_samples,), batch_size=flags.batch_size)
net = input_audio
sum_delay = 0
net = tf.keras.backend.expand_dims(net)
for filters, kernel_size, activation, use_bias, padding in zip(
cnn_filters, cnn_kernel_size,
cnn_act, cnn_use_bias, cnn_padding):
time_buffer_size = dilation * (kernel_size - 1)
if padding == 'causal':
# residual connection is simple with 'causal' padding
net_residual = net
elif padding == 'same':
# residual connection in streaming mode needs delay with 'same' padding
delay_val = time_buffer_size // 2
net_residual = delay.Delay(
delay=delay_val, also_in_non_streaming=delay_also_in_non_streaming)(
net)
sum_delay += delay_val
else:
raise ValueError('wrong padding mode ', padding)
# it is easier to convert model to streaming mode when padding function
# is decoupled from conv layer
net = temporal_padding.TemporalPadding(
padding='causal' if delay_also_in_non_streaming else padding,
padding_size=time_buffer_size)(
net)
# it is a ring buffer in streaming mode and lambda x during training
net = stream.Stream(
cell=tf.identity,
ring_buffer_size_in_time_dim=time_buffer_size,
use_one_step=False,
pad_time_dim=None)(net)
net = tf.keras.layers.Conv1D(
filters=filters,
kernel_size=kernel_size,
activation=activation,
use_bias=use_bias,
padding='valid')(net) # padding has to be valid!
net = tf.keras.layers.Add()([net, net_residual])
return tf.keras.Model(input_audio, net), sum_delay
def conv_model(flags,
cnn_filters,
cnn_kernel_size,
cnn_act,
cnn_use_bias,
cnn_padding,
dilation=1):
"""Toy convolutional model with sequence of convs with different paddings.
It can be used for speech enhancement.
Args:
flags: model and data settings
cnn_filters: list of filters in conv layer
cnn_kernel_size: list of kernel_size in conv layer
cnn_act: list of activation functions in conv layer
cnn_use_bias: list of use_bias in conv layer
cnn_padding: list of padding in conv layer
dilation: dilation applied on all conv layers
Returns:
Keras model and sum delay
Raises:
ValueError: if any of input list has different length from any other
or padding in not [same, causal]
"""
if not all(
len(cnn_filters) == len(l) for l in [
cnn_filters, cnn_kernel_size, cnn_act, cnn_use_bias, cnn_padding]):
raise ValueError('all input lists have to be the same length')
# it is an example of deep conv model for speech enhancement
# which can be trained in non streaming mode and converted to streaming mode
input_audio = tf.keras.layers.Input(
shape=(flags.desired_samples,), batch_size=flags.batch_size)
net = input_audio
sum_delay = 0
sum_shift = 0
net = tf.keras.backend.expand_dims(net)
for filters, kernel_size, activation, use_bias, padding in zip(
cnn_filters, cnn_kernel_size,
cnn_act, cnn_use_bias, cnn_padding):
time_buffer_size = dilation * (kernel_size - 1)
if padding == 'same':
# need a delay with 'same' padding in streaming mode
delay_val = time_buffer_size // 2
net = delay.Delay(delay=delay_val)(net)
sum_delay += delay_val * 2
elif padding == 'causal':
sum_shift += kernel_size
else:
raise ValueError('wrong padding mode ', padding)
# it is a ring buffer in streaming mode and lambda x during training
net = stream.Stream(
cell=tf.keras.layers.Conv1D(
filters=filters,
kernel_size=kernel_size,
activation=activation,
use_bias=use_bias,
padding='valid'),
use_one_step=False,
pad_time_dim=padding)(net)
return tf.keras.Model(input_audio, net), sum_delay, sum_shift
class DelayStreamTest(tf.test.TestCase, parameterized.TestCase):
"""Test delay layer."""
def setUp(self):
super(DelayStreamTest, self).setUp()
test_utils.set_seed(123)
@parameterized.parameters(
itertools.product([1, 4], ['causal', 'same'], [False, True]))
def test_residual(self, step, padding, delay_also_in_non_streaming):
"""Test residual connection in streaming mode with conv layer."""
# model and data parameters
cnn_filters = [1, 1]
cnn_kernel_size = [5, 3]
cnn_act = ['elu', 'elu']
cnn_use_bias = [False, False]
cnn_padding = [padding, padding]
params = test_utils.Params([step], clip_duration_ms=2)
# prepare input data
x = np.arange(params.desired_samples)
inp_audio = x
inp_audio = np.expand_dims(inp_audio, 0)
# prepare non stream model
model, sum_delay = residual_model(params, cnn_filters, cnn_kernel_size,
cnn_act, cnn_use_bias, cnn_padding,
delay_also_in_non_streaming)
model.summary()
# prepare streaming model
model_stream = utils.to_streaming_inference(
model, params, modes.Modes.STREAM_INTERNAL_STATE_INFERENCE)
model_stream.summary()
# run inference
non_stream_out = model.predict(inp_audio)
stream_out = inference.run_stream_inference(params, model_stream, inp_audio)
# normalize output data and compare them
channel = 0
non_stream_out = non_stream_out[0, :, channel]
stream_out = stream_out[0, :, channel]
min_len = min(stream_out.shape[0], non_stream_out.shape[0])
stream_out = stream_out[0:min_len]
non_stream_out = non_stream_out[0:min_len]
shift = 1
if delay_also_in_non_streaming:
# Delay was also applied in non-streaming, as well as streaming mode.
non_stream_out = non_stream_out[shift + sum_delay:min_len]
else:
non_stream_out = non_stream_out[shift:min_len - sum_delay]
stream_out = stream_out[sum_delay + shift:]
self.assertAllEqual(non_stream_out.shape, (31-sum_delay,))
self.assertAllClose(stream_out, non_stream_out)
@parameterized.parameters(False, True)
def test_delay_internal_state(self, delay_also_in_non_streaming):
"""Test delay layer with internal state."""
# model and data parameters
params = test_utils.Params([1], clip_duration_ms=1)
# prepare non stream model
time_delay = 3
model = delay_model(params, time_delay, delay_also_in_non_streaming)
model.summary()
# prepare streaming model
model_stream = utils.to_streaming_inference(
model, params, modes.Modes.STREAM_INTERNAL_STATE_INFERENCE)
model.summary()
# fill the buffer
for i in range(time_delay):
output = model_stream.predict([i + 1])
self.assertAllEqual(output[0, 0, 0], 0)
# now get the data with delay
for i in range(time_delay):
output = model_stream.predict([0])
self.assertAllEqual(output[0, 0, 0], i + 1)
def test_conv(self):
"""Test conv model with 'same' padding."""
# model and data parameters
cnn_filters = [1, 1, 1]
cnn_kernel_size = [5, 3, 5]
cnn_act = ['elu', 'elu', 'elu']
cnn_use_bias = [False, False, False]
cnn_padding = ['same', 'causal', 'same']
params = test_utils.Params([1], clip_duration_ms=2)
# prepare input data
x = np.arange(params.desired_samples)
inp_audio = x
inp_audio = np.expand_dims(inp_audio, 0)
# prepare non stream model
model, sum_delay, sum_shift = conv_model(params, cnn_filters,
cnn_kernel_size, cnn_act,
cnn_use_bias, cnn_padding)
model.summary()
non_stream_out = model.predict(inp_audio)
# prepare streaming model
model_stream = utils.to_streaming_inference(
model, params, modes.Modes.STREAM_INTERNAL_STATE_INFERENCE)
model_stream.summary()
stream_out = inference.run_stream_inference(params, model_stream, inp_audio)
shift = sum_shift + 1
# normalize output data and compare them
non_stream_out = non_stream_out[0, shift:-(sum_delay),]
stream_out = stream_out[0, sum_delay+shift:,]
self.assertAllClose(stream_out, non_stream_out)
if __name__ == '__main__':
tf1.disable_eager_execution()
tf.test.main()
|
|
"""Green wave training experiment."""
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import run_experiment_lite
from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from rllab.envs.gym_env import GymEnv
from flow.core.params import VehicleParams
from flow.core.params import TrafficLightParams
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \
InFlows
from flow.core.params import SumoCarFollowingParams
from flow.controllers import SimCarFollowingController, GridRouter
from flow.scenarios.grid import SimpleGridScenario
# set to true if you would like to run the experiment with inflows of vehicles
# from the edges, and false otherwise
USE_INFLOWS = False
# inflow rate of vehicles at every edge (only if USE_INFLOWS is set to True)
EDGE_INFLOW = 300
def gen_edges(col_num, row_num):
"""Define the names of all edges in the network.
Parameters
----------
col_num : int
number of columns of edges in the grid
row_num : int
number of rows of edges in the grid
Returns
-------
list of str
names of every edge to be generated.
"""
edges = []
for i in range(col_num):
edges += ["left" + str(row_num) + '_' + str(i)]
edges += ["right" + '0' + '_' + str(i)]
# build the left and then the right edges
for i in range(row_num):
edges += ["bot" + str(i) + '_' + '0']
edges += ["top" + str(i) + '_' + str(col_num)]
return edges
def get_flow_params(v_enter, vehs_per_hour, col_num, row_num, add_net_params):
"""Define the network and initial params in the presence of inflows.
Parameters
----------
v_enter : float
entering speed of inflow vehicles
vehs_per_hour : float
vehicle inflow rate (in veh/hr)
col_num : int
number of columns of edges in the grid
row_num : int
number of rows of edges in the grid
add_net_params : dict
additional network-specific parameters (unique to the grid)
Returns
-------
flow.core.params.InitialConfig
parameters specifying the initial configuration of vehicles in the
network
flow.core.params.NetParams
network-specific parameters used to generate the scenario
"""
initial_config = InitialConfig(
spacing="custom", lanes_distribution=float("inf"), shuffle=True)
inflow = InFlows()
outer_edges = gen_edges(col_num, row_num)
for i in range(len(outer_edges)):
inflow.add(
veh_type="idm",
edge=outer_edges[i],
vehs_per_hour=vehs_per_hour,
departLane="free",
departSpeed=v_enter)
net_params = NetParams(
inflows=inflow,
additional_params=add_net_params)
return initial_config, net_params
def get_non_flow_params(enter_speed, add_net_params):
"""Define the network and initial params in the absence of inflows.
Note that when a vehicle leaves a network in this case, it is immediately
returns to the start of the row/column it was traversing, and in the same
direction as it was before.
Parameters
----------
enter_speed : float
initial speed of vehicles as they enter the network.
add_net_params : dict
additional network-specific parameters (unique to the grid)
Returns
-------
flow.core.params.InitialConfig
parameters specifying the initial configuration of vehicles in the
network
flow.core.params.NetParams
network-specific parameters used to generate the scenario
"""
additional_init_params = {'enter_speed': enter_speed}
initial_config = InitialConfig(
spacing='custom', additional_params=additional_init_params)
net_params = NetParams(additional_params=add_net_params)
return initial_config, net_params
def run_task(*_):
"""Implement the run_task method needed to run experiments with rllab."""
V_ENTER = 30
INNER_LENGTH = 300
LONG_LENGTH = 100
SHORT_LENGTH = 300
N_ROWS = 3
N_COLUMNS = 3
NUM_CARS_LEFT = 1
NUM_CARS_RIGHT = 1
NUM_CARS_TOP = 1
NUM_CARS_BOT = 1
tot_cars = (NUM_CARS_LEFT + NUM_CARS_RIGHT) * N_COLUMNS \
+ (NUM_CARS_BOT + NUM_CARS_TOP) * N_ROWS
grid_array = {
"short_length": SHORT_LENGTH,
"inner_length": INNER_LENGTH,
"long_length": LONG_LENGTH,
"row_num": N_ROWS,
"col_num": N_COLUMNS,
"cars_left": NUM_CARS_LEFT,
"cars_right": NUM_CARS_RIGHT,
"cars_top": NUM_CARS_TOP,
"cars_bot": NUM_CARS_BOT
}
sim_params = SumoParams(sim_step=1, render=True)
vehicles = VehicleParams()
vehicles.add(
veh_id="idm",
acceleration_controller=(SimCarFollowingController, {}),
car_following_params=SumoCarFollowingParams(
min_gap=2.5,
tau=1.1,
max_speed=V_ENTER,
decel=7.5, # avoid collisions at emergency stops
speed_mode="all_checks"
),
routing_controller=(GridRouter, {}),
num_vehicles=tot_cars)
tl_logic = TrafficLightParams(baseline=False)
additional_env_params = {
"target_velocity": 50,
"switch_time": 3.0,
"num_observed": 2,
"discrete": False,
"tl_type": "controlled"
}
env_params = EnvParams(additional_params=additional_env_params)
additional_net_params = {
"speed_limit": 35,
"grid_array": grid_array,
"horizontal_lanes": 1,
"vertical_lanes": 1
}
if USE_INFLOWS:
initial_config, net_params = get_flow_params(
v_enter=V_ENTER,
vehs_per_hour=EDGE_INFLOW,
col_num=N_COLUMNS,
row_num=N_ROWS,
add_net_params=additional_net_params)
else:
initial_config, net_params = get_non_flow_params(
V_ENTER, additional_net_params)
scenario = SimpleGridScenario(
name="grid-intersection",
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config,
traffic_lights=tl_logic)
env_name = "PO_TrafficLightGridEnv"
pass_params = (env_name, sim_params, vehicles, env_params, net_params,
initial_config, scenario)
env = GymEnv(env_name, record_video=False, register_params=pass_params)
horizon = env.horizon
env = normalize(env)
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=40000,
max_path_length=horizon,
# whole_paths=True,
n_itr=800,
discount=0.999,
# step_size=0.01,
)
algo.train()
for seed in [6]: # , 7, 8]:
run_experiment_lite(
run_task,
# Number of parallel workers for sampling
n_parallel=1,
# n_parallel=1,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="all",
# Specifies the seed for the experiment. If this is not provided, a
# random seed will be used
seed=seed,
mode="local", # "local_docker", "ec2"
exp_prefix="green-wave",
# plot=True,
)
|
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2017 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
VGG19 on LUNA16 data.
Command:
python LUNA16_VGG_no_batch_Sigmoid.py -z 128 -e 200 -b gpu -i 0
"""
from neon import logger as neon_logger
from neon.initializers import Gaussian, GlorotUniform, Xavier, Constant
from neon.optimizers import Adam, Adadelta
from neon.optimizers import GradientDescentMomentum, Schedule, MultiOptimizer
from neon.layers import Conv, Dropout, Activation, Pooling, GeneralizedCost, Affine
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, Logistic, CrossEntropyBinary, Misclassification, PrecisionRecall
from neon.models import Model
from aeon import DataLoader
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser, extract_valid_args
from neon.backends import gen_backend
from neon.data.dataloader_transformers import BGRMeanSubtract, TypeCast, OneHot
import numpy as np
from neon.data.datasets import Dataset
from neon.util.persist import load_obj
import os
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument("--learning_rate", default=0.05,
help="initial learning rate")
parser.add_argument("--weight_decay", default=0.001, help="weight decay")
parser.add_argument('--deconv', action='store_true',
help='save visualization data from deconvolution')
args = parser.parse_args()
# hyperparameters
num_epochs = args.epochs
# Next line gets rid of the deterministic warning
args.deterministic = None
if (args.rng_seed is None):
args.rng_seed = 16
print('Batch size = {}'.format(args.batch_size))
# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
#be.enable_winograd = 4 # default to winograd 4 for fast autotune
# Set up the training set to load via aeon
# Augmentating the data via flipping, rotating, changing contrast/brightness
image_config = dict(height=64, width=64, flip_enable=True, channels=3,
contrast=(0.9,1.1), brightness=(0.9,1.1),
scale=(0.75,0.75), fixed_aspect_ratio=True)
label_config = dict(binary=False)
config = dict(type="image,label",
image=image_config,
label=label_config,
manifest_filename='manifest_all_but_9.csv',
minibatch_size=args.batch_size,
macrobatch_size=128,
cache_directory='cache_dir',
shuffle_manifest=True)
#shuffle_every_epoch = True)
train_set = DataLoader(config, be)
train_set = TypeCast(train_set, index=0, dtype=np.float32) # cast image to float
# Set up the validation set to load via aeon
image_config = dict(height=64, width=64, channels=3)
label_config = dict(binary=False)
config = dict(type="image,label",
image=image_config,
label=label_config,
manifest_filename='manifest_subset9_augmented.csv',
minibatch_size=args.batch_size)
valid_set = DataLoader(config, be)
valid_set = TypeCast(valid_set, index=0, dtype=np.float32) # cast image to float
# Set up the testset to load via aeon
image_config = dict(height=64, width=64, channels=3)
label_config = dict(binary=False)
config = dict(type="image,label",
image=image_config,
label=label_config,
manifest_filename='manifest_subset9_augmented.csv',
minibatch_size=args.batch_size,
subset_fraction=1.0)
test_set = DataLoader(config, be)
test_set = TypeCast(test_set, index=0, dtype=np.float32) # cast image to float
#init_uni = Gaussian(scale=0.05)
init_uni = GlorotUniform()
#opt_gdm = Adam(learning_rate=args.learning_rate, beta_1=0.9, beta_2=0.999)
opt_gdm = Adadelta(decay=0.95, epsilon=1e-6)
relu = Rectlin()
conv_params = {'strides': 1,
'padding': 1,
'init': Xavier(local=True),
'bias': Constant(0),
'activation': relu,
'batch_norm': False}
# Set up the model layers
vgg_layers = []
# set up 3x3 conv stacks with different number of filters
vgg_layers.append(Conv((3, 3, 64), **conv_params))
vgg_layers.append(Conv((3, 3, 64), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Conv((3, 3, 128), **conv_params))
vgg_layers.append(Conv((3, 3, 128), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Conv((3, 3, 256), **conv_params))
vgg_layers.append(Conv((3, 3, 256), **conv_params))
vgg_layers.append(Conv((3, 3, 256), **conv_params))
vgg_layers.append(Conv((3, 3, 256), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Affine(nout=4096, init=GlorotUniform(), bias=Constant(0), activation=relu, name='class_layer'))
vgg_layers.append(Dropout(keep=0.5))
vgg_layers.append(Affine(nout=4096, init=GlorotUniform(), bias=Constant(0), activation=relu))
vgg_layers.append(Dropout(keep=0.5))
vgg_layers.append(Affine(nout=512, init=GlorotUniform(), bias=Constant(0), activation=relu))
vgg_layers.append(Dropout(keep=0.5))
vgg_layers.append(Affine(nout=1, init=GlorotUniform(), bias=Constant(0), activation=Logistic()))
# define different optimizers for the class_layer and the rest of the network
# we use a momentum coefficient of 0.9 and weight decay of 0.0005.
opt_vgg = GradientDescentMomentum(0.001, 0.9, wdecay=0.0005)
opt_class_layer = GradientDescentMomentum(0.01, 0.9, wdecay=0.0005)
# also define optimizers for the bias layers, which have a different learning rate
# and not weight decay.
opt_bias = GradientDescentMomentum(0.002, 0.9)
opt_bias_class = GradientDescentMomentum(0.02, 0.9)
# set up the mapping of layers to optimizers
opt = MultiOptimizer({'default': opt_vgg, 'Bias': opt_bias,
'class_layer': opt_class_layer, 'class_layer_bias': opt_bias_class})
# use cross-entropy cost to train the network
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
lunaModel = Model(layers=vgg_layers)
# location and size of the VGG weights file
url = 'https://s3-us-west-1.amazonaws.com/nervana-modelzoo/VGG/'
filename = 'VGG_E.p' # VGG_E.p is VGG19; VGG_D.p is VGG16
size = 554227541
# edit filepath below if you have the file elsewhere
_, filepath = Dataset._valid_path_append('data', '', filename)
if not os.path.exists(filepath):
print('Need to fetch VGG pre-trained weights from cloud. Please wait...')
Dataset.fetch_dataset(url, filename, filepath, size)
# load the weights param file
print("Loading VGG weights from {}...".format(filepath))
trained_vgg = load_obj(filepath)
print("Done!")
param_layers = [l for l in lunaModel.layers.layers]
param_dict_list = trained_vgg['model']['config']['layers']
for layer, params in zip(param_layers, param_dict_list):
if(layer.name == 'class_layer'):
break
# To be sure, we print the name of the layer in our model
# and the name in the vgg model.
#print(layer.name + ", " + params['config']['name'])
layer.load_weights(params, load_states=True)
if args.model_file:
import os
assert os.path.exists(args.model_file), '%s not found' % args.model_file
lunaModel.load_params(args.model_file)
# configure callbacks
if args.callback_args['eval_freq'] is None:
args.callback_args['eval_freq'] = 1
# configure callbacks
callbacks = Callbacks(lunaModel, eval_set=valid_set, **args.callback_args)
# add a callback that saves the best model state
callbacks.add_save_best_state_callback('LUNA16_VGG_model_no_batch_sigmoid_pretrained.prm')
if args.deconv:
callbacks.add_deconv_callback(train_set, valid_set)
lunaModel.fit(train_set, optimizer=opt, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
lunaModel.save_params('LUNA16_VGG_model_no_batch_sigmoid_pretrained.prm')
neon_logger.display('Calculating metrics on the test set. This could take a while...')
neon_logger.display('Misclassification error (test) = {:.2f}%'.format(lunaModel.eval(test_set, metric=Misclassification())[0] * 100))
neon_logger.display('Precision/recall (test) = {}'.format(lunaModel.eval(test_set, metric=PrecisionRecall(num_classes=2))))
|
|
"""
Katz centrality.
"""
# Copyright (C) 2004-2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import not_implemented_for
__author__ = "\n".join(['Aric Hagberg (aric.hagberg@gmail.com)',
'Pieter Swart (swart@lanl.gov)',
'Sasha Gutfraind (ag362@cornell.edu)',
'Vincent Gauthier (vgauthier@luxbulb.org)'])
__all__ = ['katz_centrality',
'katz_centrality_numpy']
@not_implemented_for('multigraph')
def katz_centrality(G, alpha=0.1, beta=1.0,
max_iter=1000, tol=1.0e-6, nstart=None, normalized=True,
weight = 'weight'):
r"""Compute the Katz centrality for the nodes of the graph G.
Katz centrality is related to eigenvalue centrality and PageRank.
The Katz centrality for node `i` is
.. math::
x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
where `A` is the adjacency matrix of the graph G with eigenvalues `\lambda`.
The parameter `\beta` controls the initial centrality and
.. math::
\alpha < \frac{1}{\lambda_{max}}.
Katz centrality computes the relative influence of a node within a
network by measuring the number of the immediate neighbors (first
degree nodes) and also all other nodes in the network that connect
to the node under consideration through these immediate neighbors.
Extra weight can be provided to immediate neighbors through the
parameter :math:`\beta`. Connections made with distant neighbors
are, however, penalized by an attenuation factor `\alpha` which
should be strictly less than the inverse largest eigenvalue of the
adjacency matrix in order for the Katz centrality to be computed
correctly. More information is provided in [1]_ .
Parameters
----------
G : graph
A NetworkX graph
alpha : float
Attenuation factor
beta : scalar or dictionary, optional (default=1.0)
Weight attributed to the immediate neighborhood. If not a scalar the
dictionary must have an value for every node.
max_iter : integer, optional (default=1000)
Maximum number of iterations in power method.
tol : float, optional (default=1.0e-6)
Error tolerance used to check convergence in power method iteration.
nstart : dictionary, optional
Starting value of Katz iteration for each node.
normalized : bool, optional (default=True)
If True normalize the resulting values.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
nodes : dictionary
Dictionary of nodes with Katz centrality as the value.
Examples
--------
>>> import math
>>> G = nx.path_graph(4)
>>> phi = (1+math.sqrt(5))/2.0 # largest eigenvalue of adj matrix
>>> centrality = nx.katz_centrality(G,1/phi-0.01)
>>> for n,c in sorted(centrality.items()):
... print("%d %0.2f"%(n,c))
0 0.37
1 0.60
2 0.60
3 0.37
Notes
-----
This algorithm it uses the power method to find the eigenvector
corresponding to the largest eigenvalue of the adjacency matrix of G.
The constant alpha should be strictly less than the inverse of largest
eigenvalue of the adjacency matrix for the algorithm to converge.
The iteration will stop after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
When `\alpha = 1/\lambda_{max}` and `\beta=1` Katz centrality is the same as
eigenvector centrality.
For directed graphs this finds "left" eigenvectors which corresponds
to the in-edges in the graph. For out-edges Katz centrality
first reverse the graph with G.reverse().
References
----------
.. [1] M. Newman, Networks: An Introduction. Oxford University Press,
USA, 2010, p. 720.
See Also
--------
katz_centrality_numpy
eigenvector_centrality
eigenvector_centrality_numpy
pagerank
hits
"""
from math import sqrt
if len(G) == 0:
return {}
nnodes = G.number_of_nodes()
if nstart is None:
# choose starting vector with entries of 0
x = dict([(n,0) for n in G])
else:
x = nstart
try:
b = dict.fromkeys(G,float(beta))
except (TypeError,ValueError):
b = beta
if set(beta) != set(G):
raise nx.NetworkXError('beta dictionary '
'must have a value for every node')
# make up to max_iter iterations
for i in range(max_iter):
xlast = x
x = dict.fromkeys(xlast, 0)
# do the multiplication y^T = Alpha * x^T A - Beta
for n in x:
for nbr in G[n]:
x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)
for n in x:
x[n] = alpha*x[n] + b[n]
# check convergence
err = sum([abs(x[n]-xlast[n]) for n in x])
if err < nnodes*tol:
if normalized:
# normalize vector
try:
s = 1.0/sqrt(sum(v**2 for v in x.values()))
# this should never be zero?
except ZeroDivisionError:
s = 1.0
else:
s = 1
for n in x:
x[n] *= s
return x
raise nx.NetworkXError('Power iteration failed to converge in '
'%d iterations.' % max_iter)
@not_implemented_for('multigraph')
def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True,
weight = 'weight'):
r"""Compute the Katz centrality for the graph G.
Katz centrality is related to eigenvalue centrality and PageRank.
The Katz centrality for node `i` is
.. math::
x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
where `A` is the adjacency matrix of the graph G with eigenvalues `\lambda`.
The parameter `\beta` controls the initial centrality and
.. math::
\alpha < \frac{1}{\lambda_{max}}.
Katz centrality computes the relative influence of a node within a
network by measuring the number of the immediate neighbors (first
degree nodes) and also all other nodes in the network that connect
to the node under consideration through these immediate neighbors.
Extra weight can be provided to immediate neighbors through the
parameter :math:`\beta`. Connections made with distant neighbors
are, however, penalized by an attenuation factor `\alpha` which
should be strictly less than the inverse largest eigenvalue of the
adjacency matrix in order for the Katz centrality to be computed
correctly. More information is provided in [1]_ .
Parameters
----------
G : graph
A NetworkX graph
alpha : float
Attenuation factor
beta : scalar or dictionary, optional (default=1.0)
Weight attributed to the immediate neighborhood. If not a scalar the
dictionary must have an value for every node.
normalized : bool
If True normalize the resulting values.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
nodes : dictionary
Dictionary of nodes with Katz centrality as the value.
Examples
--------
>>> import math
>>> G = nx.path_graph(4)
>>> phi = (1+math.sqrt(5))/2.0 # largest eigenvalue of adj matrix
>>> centrality = nx.katz_centrality_numpy(G,1/phi)
>>> for n,c in sorted(centrality.items()):
... print("%d %0.2f"%(n,c))
0 0.37
1 0.60
2 0.60
3 0.37
Notes
------
This algorithm uses a direct linear solver to solve the above equation.
The constant alpha should be strictly less than the inverse of largest
eigenvalue of the adjacency matrix for there to be a solution. When
`\alpha = 1/\lambda_{max}` and `\beta=1` Katz centrality is the same as
eigenvector centrality.
For directed graphs this finds "left" eigenvectors which corresponds
to the in-edges in the graph. For out-edges Katz centrality
first reverse the graph with G.reverse().
References
----------
.. [1] M. Newman, Networks: An Introduction. Oxford University Press,
USA, 2010, p. 720.
See Also
--------
katz_centrality
eigenvector_centrality_numpy
eigenvector_centrality
pagerank
hits
"""
try:
import numpy as np
except ImportError:
raise ImportError('Requires NumPy: http://scipy.org/')
if len(G) == 0:
return {}
try:
nodelist = beta.keys()
if set(nodelist) != set(G):
raise nx.NetworkXError('beta dictionary '
'must have a value for every node')
b = np.array(list(beta.values()), dtype=float)
except AttributeError:
nodelist = G.nodes()
try:
b = np.ones((len(nodelist),1))*float(beta)
except (TypeError,ValueError):
raise nx.NetworkXError('beta must be a number')
A = nx.adj_matrix(G, nodelist=nodelist, weight=weight).todense().T
n = np.array(A).shape[0]
centrality = np.linalg.solve( np.eye(n,n) - (alpha * A) , b)
if normalized:
norm = np.sign(sum(centrality)) * np.linalg.norm(centrality)
else:
norm = 1.0
centrality = dict(zip(nodelist, map(float,centrality/norm)))
return centrality
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
import scipy
except:
raise SkipTest("SciPy not available")
|
|
""" Classes and functions for fitting tensors without free water
contamination """
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
import scipy.optimize as opt
from dipy.reconst.base import ReconstModel
from dipy.reconst.dti import (TensorFit, design_matrix, decompose_tensor,
_decompose_tensor_nan, from_lower_triangular,
lower_triangular)
from dipy.reconst.dki import _positive_evals
from dipy.reconst.vec_val_sum import vec_val_vect
from dipy.core.ndindex import ndindex
from dipy.reconst.multi_voxel import multi_voxel_fit
def fwdti_prediction(params, gtab, S0=1, Diso=3.0e-3):
r""" Signal prediction given the free water DTI model parameters.
Parameters
----------
params : (..., 13) ndarray
Model parameters. The last dimension should have the 12 tensor
parameters (3 eigenvalues, followed by the 3 corresponding
eigenvectors) and the volume fraction of the free water compartment.
gtab : a GradientTable class instance
The gradient table for this prediction
S0 : float or ndarray
The non diffusion-weighted signal in every voxel, or across all
voxels. Default: 1
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please adjust this value if you are assuming different
units of diffusion.
Returns
--------
S : (..., N) ndarray
Simulated signal based on the free water DTI model
Notes
-----
The predicted signal is given by:
$S(\theta, b) = S_0 * [(1-f) * e^{-b ADC} + f * e^{-b D_{iso}]$, where
$ADC = \theta Q \theta^T$, $\theta$ is a unit vector pointing at any
direction on the sphere for which a signal is to be predicted, $b$ is the b
value provided in the GradientTable input for that direction, $Q$ is the
quadratic form of the tensor determined by the input parameters, $f$ is the
free water diffusion compartment, $D_{iso}$ is the free water diffusivity
which is equal to $3 * 10^{-3} mm^{2}s^{-1} [1]_.
References
----------
.. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
Optimization of a free water elimination two-compartmental model
for diffusion tensor imaging. NeuroImage 103, 323-333.
doi: 10.1016/j.neuroimage.2014.09.053
"""
evals = params[..., :3]
evecs = params[..., 3:-1].reshape(params.shape[:-1] + (3, 3))
f = params[..., 12]
qform = vec_val_vect(evecs, evals)
lower_dt = lower_triangular(qform, S0)
lower_diso = lower_dt.copy()
lower_diso[..., 0] = lower_diso[..., 2] = lower_diso[..., 5] = Diso
lower_diso[..., 1] = lower_diso[..., 3] = lower_diso[..., 4] = 0
D = design_matrix(gtab)
pred_sig = np.zeros(f.shape + (gtab.bvals.shape[0],))
mask = _positive_evals(evals[..., 0], evals[..., 1], evals[..., 2])
index = ndindex(f.shape)
for v in index:
if mask[v]:
pred_sig[v] = (1 - f[v]) * np.exp(np.dot(lower_dt[v], D.T)) + \
f[v] * np.exp(np.dot(lower_diso[v], D.T))
return pred_sig
class FreeWaterTensorModel(ReconstModel):
""" Class for the Free Water Elimination Diffusion Tensor Model """
def __init__(self, gtab, fit_method="NLS", *args, **kwargs):
""" Free Water Diffusion Tensor Model [1]_.
Parameters
----------
gtab : GradientTable class instance
fit_method : str or callable
str can be one of the following:
'WLS' for weighted linear least square fit according to [1]_
:func:`fwdti.wls_iter`
'NLS' for non-linear least square fit according to [1]_
:func:`fwdti.nls_iter`
callable has to have the signature:
fit_method(design_matrix, data, *args, **kwargs)
args, kwargs : arguments and key-word arguments passed to the
fit_method. See fwdti.wls_iter, fwdti.nls_iter for
details
References
----------
.. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
Optimization of a free water elimination two-compartmental model
for diffusion tensor imaging. NeuroImage 103, 323-333.
doi: 10.1016/j.neuroimage.2014.09.053
"""
ReconstModel.__init__(self, gtab)
if not callable(fit_method):
try:
fit_method = common_fit_methods[fit_method]
except KeyError:
e_s = '"' + str(fit_method) + '" is not a known fit '
e_s += 'method, the fit method should either be a '
e_s += 'function or one of the common fit methods'
raise ValueError(e_s)
self.fit_method = fit_method
self.design_matrix = design_matrix(self.gtab)
self.args = args
self.kwargs = kwargs
# Check if at least three b-values are given
bmag = int(np.log10(self.gtab.bvals.max()))
b = self.gtab.bvals.copy() / (10 ** (bmag-1)) # normalize b units
b = b.round()
uniqueb = np.unique(b)
if len(uniqueb) < 3:
mes = "fwdti fit requires data for at least 2 non zero b-values"
raise ValueError(mes)
@multi_voxel_fit
def fit(self, data, mask=None):
""" Fit method of the free water elimination DTI model class
Parameters
----------
data : array
The measured signal from one voxel.
mask : array
A boolean array used to mark the coordinates in the data that
should be analyzed that has the shape data.shape[:-1]
"""
S0 = np.mean(data[self.gtab.b0s_mask])
fwdti_params = self.fit_method(self.design_matrix, data, S0,
*self.args, **self.kwargs)
return FreeWaterTensorFit(self, fwdti_params)
def predict(self, fwdti_params, S0=1):
""" Predict a signal for this TensorModel class instance given
parameters.
Parameters
----------
fwdti_params : (..., 13) ndarray
The last dimension should have 13 parameters: the 12 tensor
parameters (3 eigenvalues, followed by the 3 corresponding
eigenvectors) and the free water volume fraction.
S0 : float or ndarray
The non diffusion-weighted signal in every voxel, or across all
voxels. Default: 1
Returns
--------
S : (..., N) ndarray
Simulated signal based on the free water DTI model
"""
return fwdti_prediction(fwdti_params, self.gtab, S0=S0)
class FreeWaterTensorFit(TensorFit):
""" Class for fitting the Free Water Tensor Model """
def __init__(self, model, model_params):
""" Initialize a FreeWaterTensorFit class instance.
Since the free water tensor model is an extension of DTI, class
instance is defined as subclass of the TensorFit from dti.py
Parameters
----------
model : FreeWaterTensorModel Class instance
Class instance containing the free water tensor model for the fit
model_params : ndarray (x, y, z, 13) or (n, 13)
All parameters estimated from the free water tensor model.
Parameters are ordered as follows:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) The volume fraction of the free water compartment
"""
TensorFit.__init__(self, model, model_params)
@property
def f(self):
""" Returns the free water diffusion volume fraction f """
return self.model_params[..., 12]
def predict(self, gtab, S0=1):
r""" Given a free water tensor model fit, predict the signal on the
vertices of a gradient table
Parameters
----------
gtab : a GradientTable class instance
The gradient table for this prediction
S0 : float array
The mean non-diffusion weighted signal in each voxel. Default: 1 in
all voxels.
Returns
--------
S : (..., N) ndarray
Simulated signal based on the free water DTI model
"""
return fwdti_prediction(self.model_params, gtab, S0=S0)
def wls_iter(design_matrix, sig, S0, Diso=3e-3, mdreg=2.7e-3,
min_signal=1.0e-6, piterations=3):
""" Applies weighted linear least squares fit of the water free elimination
model to single voxel signals.
Parameters
----------
design_matrix : array (g, 7)
Design matrix holding the covariants used to solve for the regression
coefficients.
sig : array (g, )
Diffusion-weighted signal for a single voxel data.
S0 : float
Non diffusion weighted signal (i.e. signal for b-value=0).
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
mdreg : float, optimal
DTI's mean diffusivity regularization threshold. If standard DTI
diffusion tensor's mean diffusivity is almost near the free water
diffusion value, the diffusion signal is assumed to be only free water
diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
(corresponding to 90% of the free water diffusion value).
min_signal : float
The minimum signal value. Needs to be a strictly positive
number. Default: minimal signal in the data provided to `fit`.
piterations : inter, optional
Number of iterations used to refine the precision of f. Default is set
to 3 corresponding to a precision of 0.01.
Returns
-------
All parameters estimated from the free water tensor model.
Parameters are ordered as follows:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) The volume fraction of the free water compartment
"""
W = design_matrix
# DTI ordinary linear least square solution
log_s = np.log(np.maximum(sig, min_signal))
# Define weights
S2 = np.diag(sig**2)
# DTI weighted linear least square solution
WTS2 = np.dot(W.T, S2)
inv_WT_S2_W = np.linalg.pinv(np.dot(WTS2, W))
invWTS2W_WTS2 = np.dot(inv_WT_S2_W, WTS2)
params = np.dot(invWTS2W_WTS2, log_s)
md = (params[0] + params[2] + params[5]) / 3
# Process voxel if it has significant signal from tissue
if md < mdreg and np.mean(sig) > min_signal and S0 > min_signal:
# General free-water signal contribution
fwsig = np.exp(np.dot(design_matrix,
np.array([Diso, 0, Diso, 0, 0, Diso, 0])))
df = 1 # initialize precision
flow = 0 # lower f evaluated
fhig = 1 # higher f evaluated
ns = 9 # initial number of samples per iteration
for p in range(piterations):
df = df * 0.1
fs = np.linspace(flow+df, fhig-df, num=ns) # sampling f
SFW = np.array([fwsig, ]*ns) # repeat contributions for all values
FS, SI = np.meshgrid(fs, sig)
SA = SI - FS*S0*SFW.T
# SA < 0 means that the signal components from the free water
# component is larger than the total fiber. This cases are present
# for inapropriate large volume fractions (given the current S0
# value estimated). To overcome this issue negative SA are replaced
# by data's min positive signal.
SA[SA <= 0] = min_signal
y = np.log(SA / (1-FS))
all_new_params = np.dot(invWTS2W_WTS2, y)
# Select params for lower F2
SIpred = (1-FS)*np.exp(np.dot(W, all_new_params)) + FS*S0*SFW.T
F2 = np.sum(np.square(SI - SIpred), axis=0)
Mind = np.argmin(F2)
params = all_new_params[:, Mind]
f = fs[Mind] # Updated f
flow = f - df # refining precision
fhig = f + df
ns = 19
evals, evecs = decompose_tensor(from_lower_triangular(params))
fw_params = np.concatenate((evals, evecs[0], evecs[1], evecs[2],
np.array([f])), axis=0)
else:
fw_params = np.zeros(13)
if md > mdreg:
fw_params[12] = 1.0
return fw_params
def wls_fit_tensor(gtab, data, Diso=3e-3, mask=None, min_signal=1.0e-6,
piterations=3, mdreg=2.7e-3):
r""" Computes weighted least squares (WLS) fit to calculate self-diffusion
tensor using a linear regression model [1]_.
Parameters
----------
gtab : a GradientTable class instance
The gradient table containing diffusion acquisition parameters.
data : ndarray ([X, Y, Z, ...], g)
Data or response variables holding the data. Note that the last
dimension should contain the data. It makes no copies of data.
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
mask : array, optional
A boolean array used to mark the coordinates in the data that should
be analyzed that has the shape data.shape[:-1]
min_signal : float
The minimum signal value. Needs to be a strictly positive
number. Default: 1.0e-6.
piterations : inter, optional
Number of iterations used to refine the precision of f. Default is set
to 3 corresponding to a precision of 0.01.
mdreg : float, optimal
DTI's mean diffusivity regularization threshold. If standard DTI
diffusion tensor's mean diffusivity is almost near the free water
diffusion value, the diffusion signal is assumed to be only free water
diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
(corresponding to 90% of the free water diffusion value).
Returns
-------
fw_params : ndarray (x, y, z, 13)
Matrix containing in the last dimention the free water model parameters
in the following order:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) The volume fraction of the free water compartment.
References
----------
.. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
Optimization of a free water elimination two-compartmental model
for diffusion tensor imaging. NeuroImage 103, 323-333.
doi: 10.1016/j.neuroimage.2014.09.053
"""
fw_params = np.zeros(data.shape[:-1] + (13,))
W = design_matrix(gtab)
# Prepare mask
if mask is None:
mask = np.ones(data.shape[:-1], dtype=bool)
else:
if mask.shape != data.shape[:-1]:
raise ValueError("Mask is not the same shape as data.")
mask = np.array(mask, dtype=bool, copy=False)
# Prepare S0
S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)
index = ndindex(mask.shape)
for v in index:
if mask[v]:
params = wls_iter(W, data[v], S0[v], min_signal=min_signal,
Diso=3e-3, piterations=piterations, mdreg=mdreg)
fw_params[v] = params
return fw_params
def _nls_err_func(tensor_elements, design_matrix, data, Diso=3e-3,
weighting=None, sigma=None, cholesky=False,
f_transform=False):
""" Error function for the non-linear least-squares fit of the tensor water
elimination model.
Parameters
----------
tensor_elements : array (8, )
The six independent elements of the diffusion tensor followed by
-log(S0) and the volume fraction f of the water elimination
compartment. Note that if cholesky is set to true, tensor elements are
assumed to be written as Cholesky's decomposition elements. If
f_transform is true, volume fraction f has to be converted to
ft = arcsin(2*f - 1) + pi/2
design_matrix : array
The design matrix
data : array
The voxel signal in all gradient directions
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
weighting : str (optional).
Whether to use the Geman McClure weighting criterion (see [1]_
for details)
sigma : float or float array (optional)
If 'sigma' weighting is used, we will weight the error function
according to the background noise estimated either in aggregate over
all directions (when a float is provided), or to an estimate of the
noise in each diffusion-weighting direction (if an array is
provided). If 'gmm', the Geman-Mclure M-estimator is used for
weighting.
cholesky : bool, optional
If true, the diffusion tensor elements were decomposed using cholesky
decomposition. See fwdti.nls_fit_tensor
Default: False
f_transform : bool, optional
If true, the water volume fraction was converted to
ft = arcsin(2*f - 1) + pi/2, insuring f estimates between 0 and 1.
See fwdti.nls_fit_tensor
Default: True
"""
tensor = np.copy(tensor_elements)
if cholesky:
tensor[:6] = cholesky_to_lower_triangular(tensor[:6])
if f_transform:
f = 0.5 * (1 + np.sin(tensor[7] - np.pi/2))
else:
f = tensor[7]
# This is the predicted signal given the params:
y = (1-f) * np.exp(np.dot(design_matrix, tensor[:7])) + \
f * np.exp(np.dot(design_matrix,
np.array([Diso, 0, Diso, 0, 0, Diso, tensor[6]])))
# Compute the residuals
residuals = data - y
# If we don't want to weight the residuals, we are basically done:
if weighting is None:
# And we return the SSE:
return residuals
se = residuals ** 2
# If the user provided a sigma (e.g 1.5267 * std(background_noise), as
# suggested by Chang et al.) we will use it:
if weighting == 'sigma':
if sigma is None:
e_s = "Must provide sigma value as input to use this weighting"
e_s += " method"
raise ValueError(e_s)
w = 1/(sigma**2)
elif weighting == 'gmm':
# We use the Geman McClure M-estimator to compute the weights on the
# residuals:
C = 1.4826 * np.median(np.abs(residuals - np.median(residuals)))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
w = 1/(se + C**2)
# The weights are normalized to the mean weight (see p. 1089):
w = w/np.mean(w)
# Return the weighted residuals:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.sqrt(w * se)
def _nls_jacobian_func(tensor_elements, design_matrix, data, Diso=3e-3,
weighting=None, sigma=None, cholesky=False,
f_transform=False):
"""The Jacobian is the first derivative of the least squares error
function.
Parameters
----------
tensor_elements : array (8, )
The six independent elements of the diffusion tensor followed by
-log(S0) and the volume fraction f of the water elimination
compartment. Note that if f_transform is true, volume fraction f is
converted to ft = arcsin(2*f - 1) + pi/2
design_matrix : array
The design matrix
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
f_transform : bool, optional
If true, the water volume fraction was converted to
ft = arcsin(2*f - 1) + pi/2, insuring f estimates between 0 and 1.
See fwdti.nls_fit_tensor
Default: True
"""
tensor = np.copy(tensor_elements)
if f_transform:
f = 0.5 * (1 + np.sin(tensor[7] - np.pi/2))
else:
f = tensor[7]
t = np.exp(np.dot(design_matrix, tensor[:7]))
s = np.exp(np.dot(design_matrix,
np.array([Diso, 0, Diso, 0, 0, Diso, tensor[6]])))
T = (f-1.0) * t[:, None] * design_matrix
S = np.zeros(design_matrix.shape)
S[:, 6] = f * s
if f_transform:
df = (t-s) * (0.5*np.cos(tensor[7]-np.pi/2))
else:
df = (t-s)
return np.concatenate((T - S, df[:, None]), axis=1)
def nls_iter(design_matrix, sig, S0, Diso=3e-3, mdreg=2.7e-3,
min_signal=1.0e-6, cholesky=False, f_transform=True, jac=False,
weighting=None, sigma=None):
""" Applies non linear least squares fit of the water free elimination
model to single voxel signals.
Parameters
----------
design_matrix : array (g, 7)
Design matrix holding the covariants used to solve for the regression
coefficients.
sig : array (g, )
Diffusion-weighted signal for a single voxel data.
S0 : float
Non diffusion weighted signal (i.e. signal for b-value=0).
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
mdreg : float, optimal
DTI's mean diffusivity regularization threshold. If standard DTI
diffusion tensor's mean diffusivity is almost near the free water
diffusion value, the diffusion signal is assumed to be only free water
diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
(corresponding to 90% of the free water diffusion value).
min_signal : float
The minimum signal value. Needs to be a strictly positive
number.
cholesky : bool, optional
If true it uses cholesky decomposition to insure that diffusion tensor
is positive define.
Default: False
f_transform : bool, optional
If true, the water volume fractions is converted during the convergence
procedure to ft = arcsin(2*f - 1) + pi/2, insuring f estimates between
0 and 1.
Default: True
jac : bool
Use the Jacobian? Default: False
weighting: str, optional
the weighting scheme to use in considering the
squared-error. Default behavior is to use uniform weighting. Other
options: 'sigma' 'gmm'
sigma: float, optional
If the 'sigma' weighting scheme is used, a value of sigma needs to be
provided here. According to [Chang2005]_, a good value to use is
1.5267 * std(background_noise), where background_noise is estimated
from some part of the image known to contain no signal (only noise).
Returns
-------
All parameters estimated from the free water tensor model.
Parameters are ordered as follows:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) The volume fraction of the free water compartment.
"""
# Initial guess
params = wls_iter(design_matrix, sig, S0,
min_signal=min_signal, Diso=Diso, mdreg=mdreg)
# Process voxel if it has significant signal from tissue
if params[12] < 0.99 and np.mean(sig) > min_signal and S0 > min_signal:
# converting evals and evecs to diffusion tensor elements
evals = params[:3]
evecs = params[3:12].reshape((3, 3))
dt = lower_triangular(vec_val_vect(evecs, evals))
# Cholesky decomposition if requested
if cholesky:
dt = lower_triangular_to_cholesky(dt)
# f transformation if requested
if f_transform:
f = np.arcsin(2*params[12] - 1) + np.pi/2
else:
f = params[12]
# Use the Levenberg-Marquardt algorithm wrapped in opt.leastsq
start_params = np.concatenate((dt, [-np.log(S0), f]), axis=0)
if jac:
this_tensor, status = opt.leastsq(_nls_err_func, start_params[:8],
args=(design_matrix, sig, Diso,
weighting, sigma, cholesky,
f_transform),
Dfun=_nls_jacobian_func)
else:
this_tensor, status = opt.leastsq(_nls_err_func, start_params[:8],
args=(design_matrix, sig, Diso,
weighting, sigma, cholesky,
f_transform))
# Process tissue diffusion tensor
if cholesky:
this_tensor[:6] = cholesky_to_lower_triangular(this_tensor[:6])
evals, evecs = _decompose_tensor_nan(
from_lower_triangular(this_tensor[:6]),
from_lower_triangular(start_params[:6]))
# Process water volume fraction f
f = this_tensor[7]
if f_transform:
f = 0.5 * (1 + np.sin(f - np.pi/2))
params = np.concatenate((evals, evecs[0], evecs[1], evecs[2],
np.array([f])), axis=0)
return params
def nls_fit_tensor(gtab, data, mask=None, Diso=3e-3, mdreg=2.7e-3,
min_signal=1.0e-6, f_transform=True, cholesky=False,
jac=False, weighting=None, sigma=None):
"""
Fit the water elimination tensor model using the non-linear least-squares.
Parameters
----------
gtab : a GradientTable class instance
The gradient table containing diffusion acquisition parameters.
data : ndarray ([X, Y, Z, ...], g)
Data or response variables holding the data. Note that the last
dimension should contain the data. It makes no copies of data.
mask : array, optional
A boolean array used to mark the coordinates in the data that should
be analyzed that has the shape data.shape[:-1]
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
mdreg : float, optimal
DTI's mean diffusivity regularization threshold. If standard DTI
diffusion tensor's mean diffusivity is almost near the free water
diffusion value, the diffusion signal is assumed to be only free water
diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
(corresponding to 90% of the free water diffusion value).
min_signal : float
The minimum signal value. Needs to be a strictly positive
number. Default: 1.0e-6.
f_transform : bool, optional
If true, the water volume fractions is converted during the convergence
procedure to ft = arcsin(2*f - 1) + pi/2, insuring f estimates between
0 and 1.
Default: True
cholesky : bool, optional
If true it uses cholesky decomposition to insure that diffusion tensor
is positive define.
Default: False
jac : bool
Use the Jacobian? Default: False
weighting: str, optional
the weighting scheme to use in considering the
squared-error. Default behavior is to use uniform weighting. Other
options: 'sigma' 'gmm'
sigma: float, optional
If the 'sigma' weighting scheme is used, a value of sigma needs to be
provided here. According to [Chang2005]_, a good value to use is
1.5267 * std(background_noise), where background_noise is estimated
from some part of the image known to contain no signal (only noise).
Returns
-------
fw_params : ndarray (x, y, z, 13)
Matrix containing in the dimention the free water model parameters in
the following order:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) The volume fraction of the free water compartment
"""
fw_params = np.zeros(data.shape[:-1] + (13,))
W = design_matrix(gtab)
# Prepare mask
if mask is None:
mask = np.ones(data.shape[:-1], dtype=bool)
else:
if mask.shape != data.shape[:-1]:
raise ValueError("Mask is not the same shape as data.")
mask = np.array(mask, dtype=bool, copy=False)
# Prepare S0
S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)
index = ndindex(mask.shape)
for v in index:
if mask[v]:
params = nls_iter(W, data[v], S0[v], Diso=Diso, mdreg=mdreg,
min_signal=min_signal, f_transform=f_transform,
cholesky=cholesky, jac=jac, weighting=weighting,
sigma=sigma)
fw_params[v] = params
return fw_params
def lower_triangular_to_cholesky(tensor_elements):
""" Perfoms Cholesky decomposition of the diffusion tensor
Parameters
----------
tensor_elements : array (6,)
Array containing the six elements of diffusion tensor's lower
triangular.
Returns
-------
cholesky_elements : array (6,)
Array containing the six Cholesky's decomposition elements
(R0, R1, R2, R3, R4, R5) [1]_.
References
----------
.. [1] Koay, C.G., Carew, J.D., Alexander, A.L., Basser, P.J.,
Meyerand, M.E., 2006. Investigation of anomalous estimates of
tensor-derived quantities in diffusion tensor imaging. Magnetic
Resonance in Medicine, 55(4), 930-936. doi:10.1002/mrm.20832
"""
R0 = np.sqrt(tensor_elements[0])
R3 = tensor_elements[1] / R0
R1 = np.sqrt(tensor_elements[2] - R3**2)
R5 = tensor_elements[3] / R0
R4 = (tensor_elements[4] - R3*R5) / R1
R2 = np.sqrt(tensor_elements[5] - R4**2 - R5**2)
return np.array([R0, R1, R2, R3, R4, R5])
def cholesky_to_lower_triangular(R):
""" Convert Cholesky decompostion elements to the diffusion tensor elements
Parameters
----------
R : array (6,)
Array containing the six Cholesky's decomposition elements
(R0, R1, R2, R3, R4, R5) [1]_.
Returns
-------
tensor_elements : array (6,)
Array containing the six elements of diffusion tensor's lower
triangular.
References
----------
.. [1] Koay, C.G., Carew, J.D., Alexander, A.L., Basser, P.J.,
Meyerand, M.E., 2006. Investigation of anomalous estimates of
tensor-derived quantities in diffusion tensor imaging. Magnetic
Resonance in Medicine, 55(4), 930-936. doi:10.1002/mrm.20832
"""
Dxx = R[0]**2
Dxy = R[0]*R[3]
Dyy = R[1]**2 + R[3]**2
Dxz = R[0]*R[5]
Dyz = R[1]*R[4] + R[3]*R[5]
Dzz = R[2]**2 + R[4]**2 + R[5]**2
return np.array([Dxx, Dxy, Dyy, Dxz, Dyz, Dzz])
common_fit_methods = {'WLLS': wls_iter,
'WLS': wls_iter,
'NLLS': nls_iter,
'NLS': nls_iter,
}
|
|
from django.apps import AppConfig
from django.apps.registry import Apps
from django.db import models
from django.db.models.options import DEFAULT_NAMES, normalize_unique_together
from django.utils import six
from django.utils.module_loading import import_by_path
class InvalidBasesError(ValueError):
pass
class ProjectState(object):
"""
Represents the entire project's overall state.
This is the item that is passed around - we do it here rather than at the
app level so that cross-app FKs/etc. resolve properly.
"""
def __init__(self, models=None):
self.models = models or {}
self.apps = None
def add_model_state(self, model_state):
self.models[(model_state.app_label, model_state.name.lower())] = model_state
def clone(self):
"Returns an exact copy of this ProjectState"
return ProjectState(
models=dict((k, v.clone()) for k, v in self.models.items())
)
def render(self):
"Turns the project state into actual models in a new Apps"
if self.apps is None:
# Populate the app registry with a stub for each application.
app_labels = set(model_state.app_label for model_state in self.models.values())
self.apps = Apps([AppConfigStub(label) for label in sorted(app_labels)])
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
unrendered_models = list(self.models.values())
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self.apps)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError("Cannot resolve bases for %r" % new_unrendered_models)
unrendered_models = new_unrendered_models
return self.apps
@classmethod
def from_apps(cls, apps):
"Takes in an Apps and returns a ProjectState matching it"
app_models = {}
for model in apps.get_models():
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name.lower())] = model_state
return cls(app_models)
def __eq__(self, other):
if set(self.models.keys()) != set(other.models.keys()):
return False
return all(model == other.models[key] for key, model in self.models.items())
def __ne__(self, other):
return not (self == other)
class AppConfigStub(AppConfig):
"""
Stubs a Django AppConfig. Only provides a label, and a dict of models.
"""
# Not used, but required by AppConfig.__init__
path = ''
def __init__(self, label):
super(AppConfigStub, self).__init__(label, None)
def import_models(self, all_models):
self.models = all_models
class ModelState(object):
"""
Represents a Django Model. We don't use the actual Model class
as it's not designed to have its options changed - instead, we
mutate this one and then render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None):
self.app_label = app_label
self.name = name
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model, )
# Sanity-check that fields is NOT a dict. It must be ordered.
if isinstance(self.fields, dict):
raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.")
@classmethod
def from_model(cls, model):
"""
Feed me a model, get a ModelState representing it out.
"""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
name, path, args, kwargs = field.deconstruct()
field_class = import_by_path(path)
try:
fields.append((name, field_class(*args, **kwargs)))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s.%s: %s" % (
name,
model._meta.app_label,
model._meta.object_name,
e,
))
for field in model._meta.local_many_to_many:
name, path, args, kwargs = field.deconstruct()
field_class = import_by_path(path)
try:
fields.append((name, field_class(*args, **kwargs)))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_unique_together(ut))
else:
options[name] = model._meta.original_attrs[name]
# Make our record
bases = tuple(
(
"%s.%s" % (base._meta.app_label, base._meta.model_name)
if hasattr(base, "_meta") else
base
)
for base in model.__bases__
if (not hasattr(base, "_meta") or not base._meta.abstract)
)
# Ensure at least one base inherits from models.Model
if not any((isinstance(base, six.string_types) or issubclass(base, models.Model)) for base in bases):
bases = (models.Model, )
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
)
def clone(self):
"Returns an exact copy of this ModelState"
# We deep-clone the fields using deconstruction
fields = []
for name, field in self.fields:
_, path, args, kwargs = field.deconstruct()
field_class = import_by_path(path)
fields.append((name, field_class(*args, **kwargs)))
# Now make a copy
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=fields,
options=dict(self.options),
bases=self.bases,
)
def render(self, apps):
"Creates a Model object from our current state into the given apps"
# First, make a Meta object
meta_contents = {'app_label': self.app_label, "apps": apps}
meta_contents.update(self.options)
if "unique_together" in meta_contents:
meta_contents["unique_together"] = list(meta_contents["unique_together"])
meta = type("Meta", tuple(), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, six.string_types) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Turn fields into a dict for the body, add other bits
body = dict(self.fields)
body['Meta'] = meta
body['__module__'] = "__fake__"
# Then, make a Model object
return type(
self.name,
bases,
body,
)
def get_field_by_name(self, name):
for fname, field in self.fields:
if fname == name:
return field
raise ValueError("No field called %s on model %s" % (name, self.name))
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:])) for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and
(self.options == other.options) and
(self.bases == other.bases)
)
def __ne__(self, other):
return not (self == other)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import inspect
import logging
from django.core import urlresolvers
from django import forms
from django.forms.forms import NON_FIELD_ERRORS
from django import template
from django.template.defaultfilters import linebreaks
from django.template.defaultfilters import safe
from django.template.defaultfilters import slugify
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
from django.utils.translation import ugettext_lazy as _
# FIXME: TableStep
from django.utils.datastructures import SortedDict
from horizon import base
from horizon import exceptions
from horizon.templatetags.horizon import has_permissions
from horizon.utils import html
LOG = logging.getLogger(__name__)
class WorkflowContext(dict):
def __init__(self, workflow, *args, **kwargs):
super(WorkflowContext, self).__init__(*args, **kwargs)
self._workflow = workflow
def __setitem__(self, key, val):
super(WorkflowContext, self).__setitem__(key, val)
return self._workflow._trigger_handlers(key)
def __delitem__(self, key):
return self.__setitem__(key, None)
def set(self, key, val):
return self.__setitem__(key, val)
def unset(self, key):
return self.__delitem__(key)
class ActionMetaclass(forms.forms.DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
# Pop Meta for later processing
opts = attrs.pop("Meta", None)
# Create our new class
cls = super(ActionMetaclass, mcs).__new__(mcs, name, bases, attrs)
# Process options from Meta
cls.name = getattr(opts, "name", name)
cls.slug = getattr(opts, "slug", slugify(name))
cls.permissions = getattr(opts, "permissions", ())
cls.progress_message = getattr(opts,
"progress_message",
_("Processing..."))
cls.help_text = getattr(opts, "help_text", "")
cls.help_text_template = getattr(opts, "help_text_template", None)
return cls
class Action(forms.Form):
"""
An ``Action`` represents an atomic logical interaction you can have with
the system. This is easier to understand with a conceptual example: in the
context of a "launch instance" workflow, actions would include "naming
the instance", "selecting an image", and ultimately "launching the
instance".
Because ``Actions`` are always interactive, they always provide form
controls, and thus inherit from Django's ``Form`` class. However, they
have some additional intelligence added to them:
* ``Actions`` are aware of the permissions required to complete them.
* ``Actions`` have a meta-level concept of "help text" which is meant to be
displayed in such a way as to give context to the action regardless of
where the action is presented in a site or workflow.
* ``Actions`` understand how to handle their inputs and produce outputs,
much like :class:`~horizon.forms.SelfHandlingForm` does now.
``Action`` classes may define the following attributes in a ``Meta``
class within them:
.. attribute:: name
The verbose name for this action. Defaults to the name of the class.
.. attribute:: slug
A semi-unique slug for this action. Defaults to the "slugified" name
of the class.
.. attribute:: permissions
A list of permission names which this action requires in order to be
completed. Defaults to an empty list (``[]``).
.. attribute:: help_text
A string of simple help text to be displayed alongside the Action's
fields.
.. attribute:: help_text_template
A path to a template which contains more complex help text to be
displayed alongside the Action's fields. In conjunction with
:meth:`~horizon.workflows.Action.get_help_text` method you can
customize your help text template to display practically anything.
"""
__metaclass__ = ActionMetaclass
def __init__(self, request, context, *args, **kwargs):
if request.method == "POST":
super(Action, self).__init__(request.POST, initial=context)
else:
super(Action, self).__init__(initial=context)
if not hasattr(self, "handle"):
raise AttributeError("The action %s must define a handle method."
% self.__class__.__name__)
self.request = request
self._populate_choices(request, context)
def __unicode__(self):
return force_unicode(self.name)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def _populate_choices(self, request, context):
for field_name, bound_field in self.fields.items():
meth = getattr(self, "populate_%s_choices" % field_name, None)
if meth is not None and callable(meth):
bound_field.choices = meth(request, context)
def get_help_text(self, extra_context=None):
""" Returns the help text for this step. """
text = ""
extra_context = extra_context or {}
if self.help_text_template:
tmpl = template.loader.get_template(self.help_text_template)
context = template.RequestContext(self.request, extra_context)
text += tmpl.render(context)
else:
text += linebreaks(force_unicode(self.help_text))
return safe(text)
def add_error(self, message):
"""
Adds an error to the Action's Step based on API issues.
"""
self._get_errors()[NON_FIELD_ERRORS] = self.error_class([message])
def handle(self, request, context):
"""
Handles any requisite processing for this action. The method should
return either ``None`` or a dictionary of data to be passed to
:meth:`~horizon.workflows.Step.contribute`.
Returns ``None`` by default, effectively making it a no-op.
"""
return None
class Step(object):
"""
A step is a wrapper around an action which defines it's context in a
workflow. It knows about details such as:
* The workflow's context data (data passed from step to step).
* The data which must be present in the context to begin this step (the
step's dependencies).
* The keys which will be added to the context data upon completion of the
step.
* The connections between this step's fields and changes in the context
data (e.g. if that piece of data changes, what needs to be updated in
this step).
A ``Step`` class has the following attributes:
.. attribute:: action
The :class:`~horizon.workflows.Action` class which this step wraps.
.. attribute:: depends_on
A list of context data keys which this step requires in order to
begin interaction.
.. attribute:: contributes
A list of keys which this step will contribute to the workflow's
context data. Optional keys should still be listed, even if their
values may be set to ``None``.
.. attribute:: connections
A dictionary which maps context data key names to lists of callbacks.
The callbacks may be functions, dotted python paths to functions
which may be imported, or dotted strings beginning with ``"self"``
to indicate methods on the current ``Step`` instance.
.. attribute:: before
Another ``Step`` class. This optional attribute is used to provide
control over workflow ordering when steps are dynamically added to
workflows. The workflow mechanism will attempt to place the current
step before the step specified in the attribute.
.. attribute:: after
Another ``Step`` class. This attribute has the same purpose as
:meth:`~horizon.workflows.Step.before` except that it will instead
attempt to place the current step after the given step.
.. attribute:: help_text
A string of simple help text which will be prepended to the ``Action``
class' help text if desired.
.. attribute:: template_name
A path to a template which will be used to render this step. In
general the default common template should be used. Default:
``"horizon/common/_workflow_step.html"``.
.. attribute:: has_errors
A boolean value which indicates whether or not this step has any
errors on the action within it or in the scope of the workflow. This
attribute will only accurately reflect this status after validation
has occurred.
.. attribute:: slug
Inherited from the ``Action`` class.
.. attribute:: name
Inherited from the ``Action`` class.
.. attribute:: permissions
Inherited from the ``Action`` class.
"""
action_class = None
depends_on = ()
contributes = ()
connections = None
before = None
after = None
help_text = ""
template_name = "horizon/common/_workflow_step.html"
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __unicode__(self):
return force_unicode(self.name)
def __init__(self, workflow):
super(Step, self).__init__()
self.workflow = workflow
cls = self.__class__.__name__
if not (self.action_class and issubclass(self.action_class, Action)):
raise AttributeError("You must specify an action for %s." % cls)
self.slug = self.action_class.slug
self.name = self.action_class.name
self.permissions = self.action_class.permissions
self.has_errors = False
self._handlers = {}
if self.connections is None:
# We want a dict, but don't want to declare a mutable type on the
# class directly.
self.connections = {}
# Gather our connection handlers and make sure they exist.
for key, handlers in self.connections.items():
self._handlers[key] = []
# TODO(gabriel): This is a poor substitute for broader handling
if not isinstance(handlers, (list, tuple)):
raise TypeError("The connection handlers for %s must be a "
"list or tuple." % cls)
for possible_handler in handlers:
if callable(possible_handler):
# If it's callable we know the function exists and is valid
self._handlers[key].append(possible_handler)
continue
elif not isinstance(possible_handler, basestring):
return TypeError("Connection handlers must be either "
"callables or strings.")
bits = possible_handler.split(".")
if bits[0] == "self":
root = self
for bit in bits[1:]:
try:
root = getattr(root, bit)
except AttributeError:
raise AttributeError("The connection handler %s "
"could not be found on %s."
% (possible_handler, cls))
handler = root
elif len(bits) == 1:
# Import by name from local module not supported
raise ValueError("Importing a local function as a string "
"is not supported for the connection "
"handler %s on %s."
% (possible_handler, cls))
else:
# Try a general import
module_name = ".".join(bits[:-1])
try:
mod = import_module(module_name)
handler = getattr(mod, bits[-1])
except ImportError:
raise ImportError("Could not import %s from the "
"module %s as a connection "
"handler on %s."
% (bits[-1], module_name, cls))
except AttributeError:
raise AttributeError("Could not import %s from the "
"module %s as a connection "
"handler on %s."
% (bits[-1], module_name, cls))
self._handlers[key].append(handler)
@property
def action(self):
if not getattr(self, "_action", None):
try:
# Hook in the action context customization.
workflow_context = dict(self.workflow.context)
context = self.prepare_action_context(self.workflow.request,
workflow_context)
self._action = self.action_class(self.workflow.request,
context)
except:
LOG.exception("Problem instantiating action class.")
raise
return self._action
def prepare_action_context(self, request, context):
"""
Allows for customization of how the workflow context is passed to the
action; this is the reverse of what "contribute" does to make the
action outputs sane for the workflow. Changes to the context are not
saved globally here. They are localized to the action.
Simply returns the unaltered context by default.
"""
return context
def get_id(self):
""" Returns the ID for this step. Suitable for use in HTML markup. """
return "%s__%s" % (self.workflow.slug, self.slug)
def _verify_contributions(self, context):
for key in self.contributes:
# Make sure we don't skip steps based on weird behavior of
# POST query dicts.
field = self.action.fields.get(key, None)
if field and field.required and not context.get(key):
context.pop(key, None)
failed_to_contribute = set(self.contributes)
failed_to_contribute -= set(context.keys())
if failed_to_contribute:
raise exceptions.WorkflowError("The following expected data was "
"not added to the workflow context "
"by the step %s: %s."
% (self.__class__,
failed_to_contribute))
return True
def contribute(self, data, context):
"""
Adds the data listed in ``contributes`` to the workflow's shared
context. By default, the context is simply updated with all the data
returned by the action.
Note that even if the value of one of the ``contributes`` keys is
not present (e.g. optional) the key should still be added to the
context with a value of ``None``.
"""
if data:
for key in self.contributes:
context[key] = data.get(key, None)
return context
def render(self):
""" Renders the step. """
step_template = template.loader.get_template(self.template_name)
extra_context = {"form": self.action,
"step": self}
# FIXME: TableStep:
if issubclass(self.__class__, TableStep):
extra_context.update(self.get_context_data(self.workflow.request))
context = template.RequestContext(self.workflow.request, extra_context)
return step_template.render(context)
def get_help_text(self):
""" Returns the help text for this step. """
text = linebreaks(force_unicode(self.help_text))
text += self.action.get_help_text()
return safe(text)
def add_error(self, message):
"""
Adds an error to the Step based on API issues.
"""
self.action.add_error(message)
# FIXME: TableStep
class TableStep(Step):
"""
A :class:`~horizon.workflows.Step` class which knows how to deal with
:class:`~horizon.tables.DataTable` classes rendered inside of it.
This distinct class is required due to the complexity involved in handling
both dynamic tab loading, dynamic table updating and table actions all
within one view.
.. attribute:: table_classes
An iterable containing the :class:`~horizon.tables.DataTable` classes
which this tab will contain. Equivalent to the
:attr:`~horizon.tables.MultiTableView.table_classes` attribute on
:class:`~horizon.tables.MultiTableView`. For each table class you
need to define a corresponding ``get_{{ table_name }}_data`` method
as with :class:`~horizon.tables.MultiTableView`.
"""
table_classes = None
def __init__(self, workflow):
super(TableStep, self).__init__(workflow)
if not self.table_classes:
class_name = self.__class__.__name__
raise NotImplementedError("You must define a table_class "
"attribute on %s" % class_name)
# Instantiate our table classes but don't assign data yet
table_instances = [(table._meta.name,
table(workflow.request, needs_form_wrapper=False))
for table in self.table_classes]
self._tables = SortedDict(table_instances)
self._table_data_loaded = False
def load_table_data(self):
"""
Calls the ``get_{{ table_name }}_data`` methods for each table class
and sets the data on the tables.
"""
# We only want the data to be loaded once, so we track if we have...
if not self._table_data_loaded:
for table_name, table in self._tables.items():
# Fetch the data function.
func_name = "get_%s_data" % table_name
data_func = getattr(self, func_name, None)
if data_func is None:
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"on %s." % (func_name, cls_name))
# Load the data.
table.data = data_func()
table._meta.has_more_data = self.has_more_data(table)
# Mark our data as loaded so we don't run the loaders again.
self._table_data_loaded = True
def get_context_data(self, request):
"""
Adds a ``{{ table_name }}_table`` item to the context for each table
in the :attr:`~horizon.tabs.TableTab.table_classes` attribute.
If only one table class is provided, a shortcut ``table`` context
variable is also added containing the single table.
"""
context = {}
# If the data hasn't been manually loaded before now,
# make certain it's loaded before setting the context.
self.load_table_data()
for table_name, table in self._tables.items():
# If there's only one table class, add a shortcut name as well.
if len(self.table_classes) == 1:
context["table"] = table
context["%s_table" % table_name] = table
return context
def has_more_data(self, table):
return False
class WorkflowMetaclass(type):
def __new__(mcs, name, bases, attrs):
super(WorkflowMetaclass, mcs).__new__(mcs, name, bases, attrs)
attrs["_cls_registry"] = set([])
return type.__new__(mcs, name, bases, attrs)
class UpdateMembersStep(Step):
"""A step that allows a user to add/remove members from a group.
.. attribute:: show_roles
Set to False to disable the display of the roles dropdown.
.. attribute:: available_list_title
The title used for the available list column.
.. attribute:: members_list_title
The title used for the members list column.
.. attribute:: no_available_text
The placeholder text used when the available list is empty.
.. attribute:: no_members_text
The placeholder text used when the members list is empty.
"""
template_name = "horizon/common/_workflow_step_update_members.html"
show_roles = True
available_list_title = _("All available")
members_list_title = _("Members")
no_available_text = _("None available.")
no_members_text = _("No members.")
class Workflow(html.HTMLElement):
"""
A Workflow is a collection of Steps. It's interface is very
straightforward, but it is responsible for handling some very
important tasks such as:
* Handling the injection, removal, and ordering of arbitrary steps.
* Determining if the workflow can be completed by a given user at runtime
based on all available information.
* Dispatching connections between steps to ensure that when context data
changes all the applicable callback functions are executed.
* Verifying/validating the overall data integrity and subsequently
triggering the final method to complete the workflow.
The ``Workflow`` class has the following attributes:
.. attribute:: name
The verbose name for this workflow which will be displayed to the user.
Defaults to the class name.
.. attribute:: slug
The unique slug for this workflow. Required.
.. attribute:: steps
Read-only access to the final ordered set of step instances for
this workflow.
.. attribute:: default_steps
A list of :class:`~horizon.workflows.Step` classes which serve as the
starting point for this workflow's ordered steps. Defaults to an empty
list (``[]``).
.. attribute:: finalize_button_name
The name which will appear on the submit button for the workflow's
form. Defaults to ``"Save"``.
.. attribute:: success_message
A string which will be displayed to the user upon successful completion
of the workflow. Defaults to
``"{{ workflow.name }} completed successfully."``
.. attribute:: failure_message
A string which will be displayed to the user upon failure to complete
the workflow. Defaults to ``"{{ workflow.name }} did not complete."``
.. attribute:: depends_on
A roll-up list of all the ``depends_on`` values compiled from the
workflow's steps.
.. attribute:: contributions
A roll-up list of all the ``contributes`` values compiled from the
workflow's steps.
.. attribute:: template_name
Path to the template which should be used to render this workflow.
In general the default common template should be used. Default:
``"horizon/common/_workflow.html"``.
.. attribute:: entry_point
The slug of the step which should initially be active when the
workflow is rendered. This can be passed in upon initialization of
the workflow, or set anytime after initialization but before calling
either ``get_entry_point`` or ``render``.
.. attribute:: redirect_param_name
The name of a parameter used for tracking the URL to redirect to upon
completion of the workflow. Defaults to ``"next"``.
.. attribute:: object
The object (if any) which this workflow relates to. In the case of
a workflow which creates a new resource the object would be the created
resource after the relevant creation steps have been undertaken. In
the case of a workflow which updates a resource it would be the
resource being updated after it has been retrieved.
"""
__metaclass__ = WorkflowMetaclass
slug = None
default_steps = ()
template_name = "horizon/common/_workflow.html"
finalize_button_name = _("Save")
success_message = _("%s completed successfully.")
failure_message = _("%s did not complete.")
redirect_param_name = "next"
multipart = False
_registerable_class = Step
def __unicode__(self):
return self.name
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
super(Workflow, self).__init__(*args, **kwargs)
if self.slug is None:
raise AttributeError("The workflow %s must have a slug."
% self.__class__.__name__)
self.name = getattr(self, "name", self.__class__.__name__)
self.request = request
self.depends_on = set([])
self.contributions = set([])
self.entry_point = entry_point
self.object = None
# Put together our steps in order. Note that we pre-register
# non-default steps so that we can identify them and subsequently
# insert them in order correctly.
self._registry = dict([(step_class, step_class(self)) for step_class
in self.__class__._cls_registry
if step_class not in self.default_steps])
self._gather_steps()
# Determine all the context data we need to end up with.
for step in self.steps:
self.depends_on = self.depends_on | set(step.depends_on)
self.contributions = self.contributions | set(step.contributes)
# Initialize our context. For ease we can preseed it with a
# regular dictionary. This should happen after steps have been
# registered and ordered.
self.context = WorkflowContext(self)
context_seed = context_seed or {}
clean_seed = dict([(key, val)
for key, val in context_seed.items()
if key in self.contributions | self.depends_on])
self.context_seed = clean_seed
self.context.update(clean_seed)
if request and request.method == "POST":
for step in self.steps:
valid = step.action.is_valid()
# Be sure to use the CLEANED data if the workflow is valid.
if valid:
data = step.action.cleaned_data
else:
data = request.POST
self.context = step.contribute(data, self.context)
@property
def steps(self):
if getattr(self, "_ordered_steps", None) is None:
self._gather_steps()
return self._ordered_steps
def get_step(self, slug):
""" Returns the instantiated step matching the given slug. """
for step in self.steps:
if step.slug == slug:
return step
def _gather_steps(self):
ordered_step_classes = self._order_steps()
for default_step in self.default_steps:
self.register(default_step)
self._registry[default_step] = default_step(self)
self._ordered_steps = [self._registry[step_class]
for step_class in ordered_step_classes
if has_permissions(self.request.user,
self._registry[step_class])]
def _order_steps(self):
steps = list(copy.copy(self.default_steps))
additional = self._registry.keys()
for step in additional:
try:
min_pos = steps.index(step.after)
except ValueError:
min_pos = 0
try:
max_pos = steps.index(step.before)
except ValueError:
max_pos = len(steps)
if min_pos > max_pos:
raise exceptions.WorkflowError("The step %(new)s can't be "
"placed between the steps "
"%(after)s and %(before)s; the "
"step %(before)s comes before "
"%(after)s."
% {"new": additional,
"after": step.after,
"before": step.before})
steps.insert(max_pos, step)
return steps
def get_entry_point(self):
"""
Returns the slug of the step which the workflow should begin on.
This method takes into account both already-available data and errors
within the steps.
"""
# If we have a valid specified entry point, use it.
if self.entry_point:
if self.get_step(self.entry_point):
return self.entry_point
# Otherwise fall back to calculating the appropriate entry point.
for step in self.steps:
if step.has_errors:
return step.slug
try:
step._verify_contributions(self.context)
except exceptions.WorkflowError:
return step.slug
# If nothing else, just return the first step.
return self.steps[0].slug
def _trigger_handlers(self, key):
responses = []
handlers = [(step.slug, f) for step in self.steps
for f in step._handlers.get(key, [])]
for slug, handler in handlers:
responses.append((slug, handler(self.request, self.context)))
return responses
@classmethod
def register(cls, step_class):
""" Registers a :class:`~horizon.workflows.Step` with the workflow. """
if not inspect.isclass(step_class):
raise ValueError('Only classes may be registered.')
elif not issubclass(step_class, cls._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% cls._registerable_class.__name__)
if step_class in cls._cls_registry:
return False
else:
cls._cls_registry.add(step_class)
return True
@classmethod
def unregister(cls, step_class):
"""
Unregisters a :class:`~horizon.workflows.Step` from the workflow.
"""
try:
cls._cls_registry.remove(step_class)
except KeyError:
raise base.NotRegistered('%s is not registered' % cls)
return cls._unregister(step_class)
def validate(self, context):
"""
Hook for custom context data validation. Should return a boolean
value or raise :class:`~horizon.exceptions.WorkflowValidationError`.
"""
return True
def is_valid(self):
"""
Verified that all required data is present in the context and
calls the ``validate`` method to allow for finer-grained checks
on the context data.
"""
missing = self.depends_on - set(self.context.keys())
if missing:
raise exceptions.WorkflowValidationError(
"Unable to complete the workflow. The values %s are "
"required but not present." % ", ".join(missing))
# Validate each step. Cycle through all of them to catch all errors
# in one pass before returning.
steps_valid = True
for step in self.steps:
if not step.action.is_valid():
steps_valid = False
step.has_errors = True
if not steps_valid:
return steps_valid
return self.validate(self.context)
def finalize(self):
"""
Finalizes a workflow by running through all the actions in order
and calling their ``handle`` methods. Returns ``True`` on full success,
or ``False`` for a partial success, e.g. there were non-critical
errors. (If it failed completely the function wouldn't return.)
"""
partial = False
for step in self.steps:
try:
data = step.action.handle(self.request, self.context)
if data is True or data is None:
continue
elif data is False:
partial = True
else:
self.context = step.contribute(data or {}, self.context)
except:
partial = True
exceptions.handle(self.request)
if not self.handle(self.request, self.context):
partial = True
return not partial
def handle(self, request, context):
"""
Handles any final processing for this workflow. Should return a boolean
value indicating success.
"""
return True
def get_success_url(self):
"""
Returns a URL to redirect the user to upon completion. By default it
will attempt to parse a ``success_url`` attribute on the workflow,
which can take the form of a reversible URL pattern name, or a
standard HTTP URL.
"""
try:
return urlresolvers.reverse(self.success_url)
except urlresolvers.NoReverseMatch:
return self.success_url
def format_status_message(self, message):
"""
Hook to allow customization of the message returned to the user
upon successful or unsuccessful completion of the workflow.
By default it simply inserts the workflow's name into the message
string.
"""
if "%s" in message:
return message % self.name
else:
return message
def render(self):
""" Renders the workflow. """
workflow_template = template.loader.get_template(self.template_name)
extra_context = {"workflow": self}
if self.request.is_ajax():
extra_context['modal'] = True
context = template.RequestContext(self.request, extra_context)
return workflow_template.render(context)
def get_absolute_url(self):
""" Returns the canonical URL for this workflow.
This is used for the POST action attribute on the form element
wrapping the workflow.
For convenience it defaults to the value of
``request.get_full_path()`` with any query string stripped off,
e.g. the path at which the workflow was requested.
"""
return self.request.get_full_path().partition('?')[0]
def add_error_to_step(self, message, slug):
"""
Adds an error to the workflow's Step with the
specifed slug based on API issues. This is useful
when you wish for API errors to appear as errors on
the form rather than using the messages framework.
"""
step = self.get_step(slug)
if step:
step.add_error(message)
|
|
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
RangeIndex,
Series,
date_range,
)
import pandas._testing as tm
class TestResetIndex:
def test_reset_index_dti_round_trip(self):
dti = date_range(start="1/1/2001", end="6/1/2001", freq="D")._with_freq(None)
d1 = DataFrame({"v": np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
assert d2.dtypes[0] == np.dtype("M8[ns]")
d3 = d2.set_index("index")
tm.assert_frame_equal(d1, d3, check_names=False)
# GH#2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=["Date", "Value"])
df = df.set_index("Date")
assert df.index[0] == stamp
assert df.reset_index()["Date"][0] == stamp
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
ser.index.names = ["hash", "category"]
ser.name = "value"
df = ser.reset_index()
assert "value" in df
df = ser.reset_index(name="value2")
assert "value2" in df
# check inplace
s = ser.reset_index(drop=True)
s2 = ser
return_value = s2.reset_index(drop=True, inplace=True)
assert return_value is None
tm.assert_series_equal(s, s2)
# level
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
)
s = Series(np.random.randn(6), index=index)
rs = s.reset_index(level=1)
assert len(rs.columns) == 2
rs = s.reset_index(level=[0, 2], drop=True)
tm.assert_index_equal(rs.index, Index(index.get_level_values(1)))
assert isinstance(rs, Series)
def test_reset_index_name(self):
s = Series([1, 2, 3], index=Index(range(3), name="x"))
assert s.reset_index().index.name is None
assert s.reset_index(drop=True).index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
for levels in ["A", "B"], [0, 1]:
# With MultiIndex
s = df.set_index(["A", "B"])["C"]
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index("B"))
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index("B"))
result = s.reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True)
tm.assert_frame_equal(result, df[["C"]])
with pytest.raises(KeyError, match="Level E "):
s.reset_index(level=["A", "E"])
# With single-level Index
s = df.set_index("A")["B"]
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df[["A", "B"]])
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df[["A", "B"]])
result = s.reset_index(level=levels[0], drop=True)
tm.assert_series_equal(result, df["B"])
with pytest.raises(IndexError, match="Too many levels"):
s.reset_index(level=[0, 1, 2])
# Check that .reset_index([],drop=True) doesn't fail
result = Series(range(4)).reset_index([], drop=True)
expected = Series(range(4))
tm.assert_series_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
s = Series(range(2), name="A", dtype="int64")
series_result = s.reset_index()
assert isinstance(series_result.index, RangeIndex)
series_expected = DataFrame(
[[0, 0], [1, 1]], columns=["index", "A"], index=RangeIndex(stop=2)
)
tm.assert_frame_equal(series_result, series_expected)
def test_reset_index_drop_errors(self):
# GH 20925
# KeyError raised for series index when passed level name is missing
s = Series(range(4))
with pytest.raises(KeyError, match="does not match index name"):
s.reset_index("wrong", drop=True)
with pytest.raises(KeyError, match="does not match index name"):
s.reset_index("wrong")
# KeyError raised for series when level to be dropped is missing
s = Series(range(4), index=MultiIndex.from_product([[1, 2]] * 2))
with pytest.raises(KeyError, match="not found"):
s.reset_index("wrong", drop=True)
def test_reset_index_with_drop(self, series_with_multilevel_index):
ser = series_with_multilevel_index
deleveled = ser.reset_index()
assert isinstance(deleveled, DataFrame)
assert len(deleveled.columns) == len(ser.index.levels) + 1
assert deleveled.index.name == ser.index.name
deleveled = ser.reset_index(drop=True)
assert isinstance(deleveled, Series)
assert deleveled.index.name == ser.index.name
def test_drop_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
ser = Series([1, 2, 3], index=Index([1, 2, 3], name="a"))
msg = (
r"In a future version of pandas all arguments of Series\.reset_index "
r"except for the argument 'level' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.reset_index("a", False)
expected = DataFrame({"a": [1, 2, 3], 0: [1, 2, 3]})
tm.assert_frame_equal(result, expected)
def test_reset_index_inplace_and_drop_ignore_name(self):
# GH#44575
ser = Series(range(2), name="old")
ser.reset_index(name="new", drop=True, inplace=True)
expected = Series(range(2), name="old")
tm.assert_series_equal(ser, expected)
@pytest.mark.parametrize(
"array, dtype",
[
(["a", "b"], object),
(
pd.period_range("12-1-2000", periods=2, freq="Q-DEC"),
pd.PeriodDtype(freq="Q-DEC"),
),
],
)
def test_reset_index_dtypes_on_empty_series_with_multiindex(array, dtype):
# GH 19602 - Preserve dtype on empty Series with MultiIndex
idx = MultiIndex.from_product([[0, 1], [0.5, 1.0], array])
result = Series(dtype=object, index=idx)[:0].reset_index().dtypes
expected = Series(
{"level_0": np.int64, "level_1": np.float64, "level_2": dtype, 0: object}
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"names, expected_names",
[
(["A", "A"], ["A", "A"]),
(["level_1", None], ["level_1", "level_1"]),
],
)
@pytest.mark.parametrize("allow_duplicates", [False, True])
def test_column_name_duplicates(names, expected_names, allow_duplicates):
# GH#44755 reset_index with duplicate column labels
s = Series([1], index=MultiIndex.from_arrays([[1], [1]], names=names))
if allow_duplicates:
result = s.reset_index(allow_duplicates=True)
expected = DataFrame([[1, 1, 1]], columns=expected_names + [0])
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(ValueError, match="cannot insert"):
s.reset_index()
|
|
"""
The MIT License (MIT)
Copyright (c) 2014 - 2015 Jos "Zarthus" Ahrens and contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Tell by Zarthus
Licensed under MIT
Tell the bot to remember something. When target is active, return message.
"""
from core import moduletemplate
from tools import duration
from tools import validator
import sqlite3
import os
import time
class Tell(moduletemplate.BotModule):
def on_module_load(self):
self.db_file = os.path.join(self.db_dir, "{}_tell.db".format(self.network_name))
self.register_command("tell", "<nickname> <message>",
"Once <nickname> appears online (by sending a message to a channel I am in), "
"message them with <message>", self.PRIV_NONE, ["note"])
self.register_command("telldel", "<nickname | *>",
"Delete all pending messages for <nickname>, or all if * is specified.",
self.PRIV_NONE, ["deltell", "delnote"])
self.register_command("tellslist", None,
"Show if you have any pending messages or not, if you do, they are sent to your query.",
self.PRIV_NONE, ["listtells", "listtell", "notelist", "noteslist", "listnotes"])
self.register_command("tellhas", "[nickname]", "Check if [nickname] has any pending tells.",
self.PRIV_NONE, ["hastell", "hastells", "hasnote"])
self.register_command("tells", None, "Count the number of active tells in the database.",
self.PRIV_MOD, ["notes"])
if "max_tells" not in self.module_data:
self.module_data["max_tells"] = 5
self.hastells = {}
self.tell_make_db()
self.validator = validator.Validator()
def on_privmsg(self, target, nick, message):
if self.has_tells(nick):
self.show_tells(nick)
def on_join(self, nick, channel):
if nick.lower() not in self.hastells:
if self.has_tells(nick):
count = self.tell_count(nick)
self.message(nick, None, "Hello {}! You seem to have $(bold){}$(bold) pending message{}. "
"To view these messages, reply to my query with 'listtells' or speak in a "
"channel we share.".format(nick, count, "s" if count != 1 else ""), True)
def on_command(self, target, nick, command, commandtext, mod, admin):
if command in ["listtells", "listtell", "tellslist", "notelist", "noteslist", "listnotes"]:
if self.has_tells(nick):
return self.show_tells(nick)
else:
return self.notice(nick, "You do not have any pending messages.")
if command in ["hastell", "hastells", "tellhas", "hasnote", "hasnotes", "notehas"]:
targ = commandtext
if not targ:
targ = nick
if not self.validator.nickname(targ):
return self.notice(nick, "'{}' is not a valid IRC nickname.".format(targ))
if self.has_tells(targ):
return self.message(target, nick, "{} has pending messages.".format(targ))
return self.message(target, nick, "{} has no pending messages.".format(targ))
if command in ["tell", "note"]:
ct = commandtext.split()
if not commandtext or len(ct) < 2:
return self.notice(nick, "Usage: tell <nickname> <message>")
to = ct[0]
msg = " ".join(ct[1:])
if not self.validator.nickname(to):
return self.notice(nick, "'{}' is an invalid nickname.".format(to))
if to == self._conn.currentnick:
return self.notice(nick, "I cannot set tells for myself.")
if self.tell_passes_limit(nick):
return self.notice(nick, "You cannot set anymore tells. "
"Use deltell <nickname | *> to delete existing tells")
if self.tell_passes_limit(to, False):
return self.notice(nick, "{} cannot receive anymore tells.".format(to))
if self.tell_exists(nick, to):
return self.notice(nick, "You are already sending a message to {}. "
"Use 'deltell {}' to be able send a new message.".format(to, to))
success = self.tell_store(nick, to, msg)
if success:
return self.message(target, nick, "I will let {} know they have a pending message.".format(to))
return self.message(target, nick, "I was unable to add a message for {}.".format(to))
if command in ["deltell", "telldel", "delnote", "notedel"]:
if not commandtext:
return self.notice(nick, "Usage: deltell <nickname | *>")
if commandtext == "*":
self.tell_delete_all(nick)
return self.message(target, nick, "Deleted all pending messages sent by you.")
if not self.validator.nickname(commandtext):
return self.notice(nick, "'{}' is an invalid nickname.".format(commandtext))
if not self.has_tells(commandtext):
return self.notice(nick, "{} has no pending messages.".format(commandtext))
if not self.tell_count(nick, commandtext):
return self.notice(nick, "{} has no pending messages from you.".format(commandtext))
success = self.tell_delete(nick, commandtext)
if success:
return self.message(target, nick, "Deleted all messages to {} from you.".format(commandtext))
return self.message(target, nick, "Was unable to delete pending messages sent by you.")
if mod:
if command in ["notes", "tells"]:
count = self.tell_count("*")
return self.message(target, nick, "There are {} pending message{} in my database."
.format(count, "s" if count != 1 else ""))
return False
def show_tells(self, nick):
try:
conn = sqlite3.connect(self.db_file)
c = conn.cursor()
result = c.execute("SELECT sender, message, timestamp, unix_timestamp FROM tell WHERE "
"lower(recipient) = ?", [nick.lower()]).fetchall()
for msg in result:
self.message(nick, None, "$(bold){}$(clear) left you a message: {} - Sent on {} ({} ago)"
.format(msg[0], msg[1], msg[2],
duration.timesincetimestamp(int(msg[3]))), True)
if msg[0] == "$(lime)TellModule": # We don't want this kind of recursion.
continue
if nick.lower() != msg[0].lower():
udata = self.getUserData(msg[0]) # Let the sender know message has been sent if they're online.
if udata:
self.message(msg[0], None, "Delivered message '{}' to {} sent on {}."
.format(msg[1], nick, msg[2]))
else:
# Manually add a new entry to the database to let the user know their message was delivered.
sender = "$(lime)TellModule"
rcvmsg = ("Your message '{}' to {} has been delivered (Sent by you on: {})."
.format(msg[1], nick, msg[2]))
timestamp = time.strftime("%Y-%m-%d %H:%M:%S %Z")
unix_timestamp = int(time.time())
c.execute("INSERT INTO tell (sender, recipient, message, timestamp, unix_timestamp) "
"VALUES (?, ?, ?, ?, ?)", [sender, msg[0], rcvmsg, timestamp, unix_timestamp])
self.hastells[msg[0].lower()] = True
c.execute("DELETE FROM tell WHERE lower(recipient) = ?", [nick.lower()])
self.hastells[nick.lower()] = False
conn.commit()
conn.close()
except sqlite3.Error as e:
self.error("show_tells({}) error: {}".format(nick, str(e)))
return False
return True
def has_tells(self, to):
to = to.lower()
if to in self.hastells:
return self.hastells[to]
try:
conn = sqlite3.connect(self.db_file)
c = conn.cursor()
result = c.execute("SELECT timestamp FROM tell WHERE lower(recipient) = ?",
[to]).fetchone()
if result and len(result) >= 1:
self.hastells[to] = True
else:
self.hastells[to] = False
conn.close()
except sqlite3.Error as e:
self.error("has_tells({}) error: {}".format(to, str(e)))
return False
if to in self.hastells: # This should always be set, but the extra security if doesn't hurt.
return self.hastells[to]
return False
def tell_exists(self, sender, to):
exists = False
try:
conn = sqlite3.connect(self.db_file)
c = conn.cursor()
result = c.execute("SELECT timestamp FROM tell WHERE lower(sender) = ? and lower(recipient) = ?",
[sender.lower(), to.lower()]).fetchone()
if result and len(result) >= 1:
exists = True
conn.close()
except sqlite3.Error as e:
self.error("tell_exists({}, {}) error: {}".format(sender, to, str(e)))
return False
return exists
def tell_passes_limit(self, sender, send=True):
"""
Check if somebody is sending too many tells.
Sender: string, sender or receiver nick.
Send: Is it a sender, or a receiver.
Send should be set to false if you want to check if they have too many pending messages,
otherwise if they are sending too many.
"""
passedlimit = False
try:
conn = sqlite3.connect(self.db_file)
c = conn.cursor()
result = None
if send:
result = c.execute("SELECT count(timestamp) FROM tell WHERE lower(sender) = ?",
[sender.lower()]).fetchone()
else:
result = c.execute("SELECT count(timestamp) FROM tell WHERE lower(recipient) = ?",
[sender.lower()]).fetchone()
if result[0] >= self.module_data["max_tells"]:
passedlimit = True
conn.close()
except sqlite3.Error as e:
self.error("tell_passes_limit({}, {}) error: {}".format(sender, str(send), str(e)))
return False
return passedlimit
def tell_count(self, sender, to=None):
count = 0
try:
conn = sqlite3.connect(self.db_file)
c = conn.cursor()
result = None
if sender == "*":
result = c.execute("SELECT count(timestamp) FROM tell WHERE 1").fetchone()
elif to:
result = c.execute("SELECT count(timestamp) FROM tell WHERE lower(sender) = ? and "
"lower(recipient) = ?", [sender.lower(), to.lower()]).fetchone()
else:
result = c.execute("SELECT count(timestamp) FROM tell WHERE lower(sender) = ?",
[sender.lower()]).fetchone()
count = result[0]
conn.close()
except sqlite3.Error as e:
self.error("tell_list({}) error: {}".format(sender, str(e)))
return False
return count
def tell_store(self, sender, to, message):
timestamp = time.strftime("%Y-%m-%d %H:%M:%S %Z")
unix_timestamp = int(time.time())
try:
conn = sqlite3.connect(self.db_file)
c = conn.cursor()
c.execute("INSERT INTO tell (sender, recipient, message, timestamp, unix_timestamp) "
"VALUES (?, ?, ?, ?, ?)", [sender, to, message, timestamp, unix_timestamp])
conn.commit()
conn.close()
except sqlite3.Error as e:
self.error("tell_store({}, {}, {}) error: {}".format(sender, to, message, str(e)))
return False
self.hastells[to.lower()] = True
return True
def tell_delete(self, sender, to):
try:
conn = sqlite3.connect(self.db_file)
c = conn.cursor()
c.execute("DELETE FROM tell WHERE lower(sender) = ? and lower(recipient) = ?",
[sender.lower(), to.lower()])
conn.commit()
conn.close()
except sqlite3.Error as e:
self.error("tell_delete({}, {}) error: {}".format(sender, to, str(e)))
return False
self.hastells[to.lower()] = False
return True
def tell_delete_all(self, sender):
"""Deletes all pending tells from user."""
try:
conn = sqlite3.connect(self.db_file)
c = conn.cursor()
c.execute("DELETE FROM tell WHERE lower(sender) = ?", [sender.lower()])
conn.commit()
conn.close()
except sqlite3.Error as e:
self.error("tell_delete_all({}) error: {}".format(sender, str(e)))
return False
self.hastells[sender.lower()] = False
return True
def tell_make_db(self):
try:
conn = sqlite3.connect(self.db_file)
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS tell "
"(sender TEXT, recipient TEXT, message TEXT, timestamp TEXT, unix_timestamp TEXT)")
conn.commit()
conn.close()
except sqlite3.Error as e:
self.error("tell_make_db() error: Failed to create database tell.db: {}".format(str(e)))
|
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, timedelta_range
import pandas._testing as tm
class TestDataFrameAppend:
def test_append_multiindex(self, multiindex_dataframe_random_data, frame_or_series):
obj = multiindex_dataframe_random_data
if frame_or_series is Series:
obj = obj["A"]
a = obj[:5]
b = obj[5:]
result = a.append(b)
tm.assert_equal(result, obj)
def test_append_empty_list(self):
# GH 28769
df = DataFrame()
result = df.append([])
expected = df
tm.assert_frame_equal(result, expected)
assert result is not df
df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"])
result = df.append([])
expected = df
tm.assert_frame_equal(result, expected)
assert result is not df # .append() should return a new object
def test_append_series_dict(self):
df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"])
series = df.loc[4]
msg = "Indexes have overlapping values"
with pytest.raises(ValueError, match=msg):
df.append(series, verify_integrity=True)
series.name = None
msg = "Can only append a Series if ignore_index=True"
with pytest.raises(TypeError, match=msg):
df.append(series, verify_integrity=True)
result = df.append(series[::-1], ignore_index=True)
expected = df.append(
DataFrame({0: series[::-1]}, index=df.columns).T, ignore_index=True
)
tm.assert_frame_equal(result, expected)
# dict
result = df.append(series.to_dict(), ignore_index=True)
tm.assert_frame_equal(result, expected)
result = df.append(series[::-1][:3], ignore_index=True)
expected = df.append(
DataFrame({0: series[::-1][:3]}).T, ignore_index=True, sort=True
)
tm.assert_frame_equal(result, expected.loc[:, result.columns])
msg = "Can only append a dict if ignore_index=True"
with pytest.raises(TypeError, match=msg):
df.append(series.to_dict())
# can append when name set
row = df.loc[4]
row.name = 5
result = df.append(row)
expected = df.append(df[-1:], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_append_list_of_series_dicts(self):
df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"])
dicts = [x.to_dict() for idx, x in df.iterrows()]
result = df.append(dicts, ignore_index=True)
expected = df.append(df, ignore_index=True)
tm.assert_frame_equal(result, expected)
# different columns
dicts = [
{"foo": 1, "bar": 2, "baz": 3, "peekaboo": 4},
{"foo": 5, "bar": 6, "baz": 7, "peekaboo": 8},
]
result = df.append(dicts, ignore_index=True, sort=True)
expected = df.append(DataFrame(dicts), ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
def test_append_missing_cols(self):
# GH22252
# exercise the conditional branch in append method where the data
# to be appended is a list and does not contain all columns that are in
# the target DataFrame
df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"])
dicts = [{"foo": 9}, {"bar": 10}]
with tm.assert_produces_warning(None):
result = df.append(dicts, ignore_index=True, sort=True)
expected = df.append(DataFrame(dicts), ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
def test_append_empty_dataframe(self):
# Empty df append empty df
df1 = DataFrame()
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Non-empty df append empty df
df1 = DataFrame(np.random.randn(5, 2))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Empty df with columns append empty df
df1 = DataFrame(columns=["bar", "foo"])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Non-Empty df with columns append empty df
df1 = DataFrame(np.random.randn(5, 2), columns=["bar", "foo"])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
def test_append_dtypes(self):
# GH 5754
# row appends of different dtypes (so need to do by-item)
# can sometimes infer the correct type
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(5))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
df2 = DataFrame({"bar": "foo"}, index=range(1, 2))
result = df1.append(df2)
expected = DataFrame({"bar": [Timestamp("20130101"), "foo"]})
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
df2 = DataFrame({"bar": np.nan}, index=range(1, 2))
result = df1.append(df2)
expected = DataFrame(
{"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")}
)
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
df2 = DataFrame({"bar": np.nan}, index=range(1, 2), dtype=object)
result = df1.append(df2)
expected = DataFrame(
{"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")}
)
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": np.nan}, index=range(1))
df2 = DataFrame({"bar": Timestamp("20130101")}, index=range(1, 2))
result = df1.append(df2)
expected = DataFrame(
{"bar": Series([np.nan, Timestamp("20130101")], dtype="M8[ns]")}
)
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
df2 = DataFrame({"bar": 1}, index=range(1, 2), dtype=object)
result = df1.append(df2)
expected = DataFrame({"bar": Series([Timestamp("20130101"), 1])})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"timestamp", ["2019-07-19 07:04:57+0100", "2019-07-19 07:04:57"]
)
def test_append_timestamps_aware_or_naive(self, tz_naive_fixture, timestamp):
# GH 30238
tz = tz_naive_fixture
df = DataFrame([Timestamp(timestamp, tz=tz)])
result = df.append(df.iloc[0]).iloc[-1]
expected = Series(Timestamp(timestamp, tz=tz), name=0)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data, dtype",
[
([1], pd.Int64Dtype()),
([1], pd.CategoricalDtype()),
([pd.Interval(left=0, right=5)], pd.IntervalDtype()),
([pd.Period("2000-03", freq="M")], pd.PeriodDtype("M")),
([1], pd.SparseDtype()),
],
)
def test_other_dtypes(self, data, dtype):
df = DataFrame(data, dtype=dtype)
result = df.append(df.iloc[0]).iloc[-1]
expected = Series(data, name=0, dtype=dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])
def test_append_numpy_bug_1681(self, dtype):
# another datetime64 bug
if dtype == "datetime64[ns]":
index = date_range("2011/1/1", "2012/1/1", freq="W-FRI")
else:
index = timedelta_range("1 days", "10 days", freq="2D")
df = DataFrame()
other = DataFrame({"A": "foo", "B": index}, index=index)
result = df.append(other)
assert (result["B"] == index).all()
|
|
# Copyright (c) 2013, Nathan Dunsworth - NFXPlugins
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NFXPlugins nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NFXPLUGINS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__all__ = [
'SgQueryEngine'
]
# Python imports
import threading
import weakref
# This module imports
import ShotgunORM
class SgQueryJob(object):
'''
'''
def __repr__(self):
eIds = []
for entity in self.entities():
e = entity()
if e == None:
continue
eIds.append(e.id)
return '<%s(type:%s, fields:%s, entities:%s)>' % (
type(self).__name__,
self.entityType(),
self.fields(),
eIds
)
def __lt__(self, item):
return self.fields() < item.fields()
def __gt__(self, item):
return self.fields() > item.fields()
def __eq__(self, item):
return self.fields() == item.fields()
def __init__(self, sgEntityType, sgEntities, sgFields):
self._entityType = sgEntityType
self._entities = set(sgEntities)
self._fields = set(sgFields)
def fields(self):
return self._fields
def entities(self):
return self._entities
def entityType(self):
return self._entityType
class SgQueryEngine(object):
'''
Class that represents an asynchronous Entity field value pulling engine.
'''
def __del__(self):
try:
self.shutdown()
except:
pass
def __enter__(self):
self.__lock.acquire()
def __exit__(self, exc_type, exc_value, traceback):
self.__lock.release()
return False
def __repr__(self):
connection = self.connection()
if connection == None:
return '<SgQueryEngine>'
return '<SgQueryEngine(url:"%(url)s", script:"%(script)s">' % {
'url': connection.url(),
'script': connection.scriptName()
}
def __init__(self, sgConnection):
self.__lock = threading.Lock()
self.__block = threading.RLock()
self._qEvent = threading.Event()
self._qShutdownEvent = threading.Event()
self._qEvent.clear()
self.__connection = weakref.ref(sgConnection)
self._pendingQueries = []
self._entityQueue = {}
self.__engineThread = threading.Thread(
name=self.__repr__(),
target=SgQueryEngineWorker,
args = [
self.__connection,
self.__lock,
self.__block,
self._qEvent,
self._qShutdownEvent,
self._entityQueue,
self._pendingQueries
]
)
self.__engineThread.setDaemon(True)
def addQueue(self, sgEntity, sgFields):
'''
Adds the passed Entity and the specified fields to the queue.
'''
# The field pull queue works by taking the fields that each Entity is asking
# to pull and batches them in alike groups minimizing the amount of passes
# the Shotgun database will need to return each Entity by type.
#
# Each time a new batch is added the currently pending pulls are locked and
# checked to see if the new batch items can be added to them. This means a
# late addition may return quicker then another item that was added to the
# queue ealier simply because its requesting a set of fields that are lower
# in the queue.
#
# Example 1:
#
# * ENTITY: FIELDS PULLING *
# Entity A: ['firstname', 'lastname']
# Entity B: ['firstname', 'lastname', 'created_by']
# Entity C: ['firstname, 'lastname', 'created_by', 'created_at']
#
# * BATCH: ENTITIES, FIELDS PULLING *
# Batch1: [A, B, C], ['firstname', 'lastname']
# Batch2: [B, C], ['created_by']
# Batch3: [C], ['created_at']
if not isinstance(sgEntity, ShotgunORM.SgEntity):
raise TypeError('expected an SgEntity got %s' % sgEntity)
if not self.__engineThread.isAlive():
raise RuntimeError('engine thread is not running')
try:
undoFields = []
pullFields = []
sgFields = set(sgFields)
for name, field in sgEntity.fields(sgFields).items():
pullFields.append(name)
# Mark the field that it is updating.
field._SgField__isUpdatingEvent.clear()
undoFields.append(field)
if len(pullFields) <= 0:
return
ShotgunORM.LoggerQueryEngine.debug('%(qEng)s.addQueue(...)', {'qEng': self})
ShotgunORM.LoggerQueryEngine.debug(' * sgEntity: %(sgEntity)s', {'sgEntity': sgEntity})
ShotgunORM.LoggerQueryEngine.debug(' * sgFields: %(sgFields)s', {'sgFields': pullFields})
with self:
pullFields = set(pullFields)
eq = None
t = sgEntity.type
if self._entityQueue.has_key(t):
eq = self._entityQueue[t]
else:
eq = []
self._entityQueue[t] = eq
valid = False
eqLen = len(eq)
if eqLen <= 0:
# Weakref the Entity, this allows the Engine to not keep Entities
# around.
entities = [weakref.ref(sgEntity)]
q = SgQueryJob(t, entities, pullFields)
eq.append(q)
self._pendingQueries.append(q)
valid = True
elif eqLen == 1:
# This check sees if the q for this Entity type contains only a
# single Entity and if so it sees if that Entity is the currently
# processing one. If so it merges the q's fields with the current
# list of fields for the Entity.
q = eq[0]
qEntities = q.entities()
if len(qEntities) == 1:
qEntity = list(qEntities)[0]()
if qEntity == sgEntity:
q.fields().update(pullFields)
valid = True
if not valid:
for q in eq:
qFields = q.fields()
# Skip when the current batch has more fields to query then the
# Entity is asking for.
if len(pullFields) < len(qFields):
continue
sharedFields = pullFields & qFields
if len(sharedFields) >= 1:
q.entities().add(
weakref.ref(sgEntity)
)
pullFields -= sharedFields
# Halt if all fields have been queued up!
if len(pullFields) <= 0:
break
if len(pullFields) >= 1:
entities = [weakref.ref(sgEntity)]
q = SgQueryJob(t, entities, pullFields)
eq.append(q)
self._pendingQueries.append(q)
# Un-lock the engine if the q was empty.
# if not self._qEvent.isSet():
self._qEvent.set()
# Sort the field q list so that the largest queries are first.
eq.sort(reverse=True)
except Exception, e:
ShotgunORM.LoggerQueryEngine.error(e)
for field in undoFields:
field._SgField__isUpdatingEvent.set()
raise
def block(self):
'''
Blocks the query engine.
This allows multiple Entities to be batch added and prevents engine from
prematurely processing results.
Note:
You must always make sure to call unblock() after you are finished adding
items to the queue. Even if your code raises and exception you must not
forget to unblock the engine.
'''
self.__block.acquire()
def connection(self):
'''
Returns the connection the engine belongs to.
'''
return self.__connection()
def isBlocking(self):
'''
Returns True if the engine is currently blocking.
'''
return self.__block._is_owned()
def pending(self):
'''
Returns the number of pending queries.
'''
return len(self._pendingQueries)
def shutdown(self):
'''
Shutdown the engine.
'''
if self.__engineThread.isAlive():
self._qEvent.set()
self._qShutdownEvent.wait()
def start(self):
'''
Starts the engines background thread.
'''
self.__engineThread.start()
def unblock(self):
'''
Un-blocks the query engine.
Note:
This must always be called after blocking the engine.
'''
self.__block.release()
def SgQueryEngineWorker(
connection,
lock,
block,
event,
eventShutdown,
entityQueue,
pendingQueries
):
##############################################################################
#
# IMPORTANT!!!!!
#
# You must make sure to delete any var that is created which points to an
# Entity object. Otherwise the worker wont let it fall out of scope and this
# will prevent the Entity from being gc'd.
#
##############################################################################
while True:
entityType = None
entityFields = None
entities = None
event.wait()
if len(pendingQueries) <= 0:
try:
ShotgunORM.LoggerQueryEngine.debug(
'Stopping because engine set event and pendingQueries size is zero'
)
except:
pass
eventShutdown.set()
return
with block:
q = pendingQueries.pop(0)
qSize = len(pendingQueries) + 1
ShotgunORM.LoggerQueryEngine.debug('Queue: job 1 of %(size)d', {'size': qSize})
with lock:
if len(pendingQueries) <= 0:
event.clear()
entityType = q.entityType()
entityFields = list(q.fields())
entities = list(q.entities())
entityQueue[entityType].remove(q)
ShotgunORM.LoggerQueryEngine.debug('Preparing to process job %(q)s', {'q': q})
entityList = {}
entityIds = []
for i in entities:
entity = i()
# Check it was gc'd!
if entity == None:
continue
try:
entityList[entity['id']] = entity
entityIds.append(entity['id'])
finally:
del entity
# Bail if all the Entities were gc'd!
if len(entityList) <= 0:
ShotgunORM.LoggerQueryEngine.debug('Skipping job all Entities no longer exist')
continue
ShotgunORM.LoggerQueryEngine.debug(' * Processing')
con = connection()
if con == None:
try:
ShotgunORM.LoggerQueryEngine.debug(
' * Stopping because connection not found'
)
except:
pass
return
try:
ShotgunORM.LoggerQueryEngine.debug(' * Searching')
sgSearch = None
if len(entityIds) == 1:
sgSearch = con._sg_find(entityType, [['id', 'is', entityIds[0]]], entityFields)
else:
sgSearch = con._sg_find(entityType, [['id', 'in', entityIds]], entityFields)
ShotgunORM.LoggerQueryEngine.debug(' * Searching complete!')
except Exception, e:
ShotgunORM.LoggerQueryEngine.error(e)
for entity in entityList.values():
for field in entity.fields(entityFields).values():
field._SgField__isUpdatingEvent.set()
del entity
del entityList
continue
finally:
del con
for result in sgSearch:
entity = entityList[result['id']]
del result['type']
try:
for fieldName, field in entity.fields(entityFields).items():
field.setSyncUpdate(result[fieldName])
field._SgField__isUpdatingEvent.set()
finally:
del entity
del entityList
eventShutdown.set()
try:
ShotgunORM.LoggerQueryEngine.debug(' * Processing complete!')
except:
pass
|
|
from typing import Any, List, Dict, Optional, Text
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, HttpRequest
from django.shortcuts import redirect, render
from django.utils import translation
from django.utils.cache import patch_cache_control
from six.moves import zip_longest, zip, range
from zerver.decorator import zulip_login_required, process_client
from zerver.forms import ToSForm
from zerver.lib.realm_icon import realm_icon_url
from zerver.models import Message, UserProfile, Stream, Subscription, Huddle, \
Recipient, Realm, UserMessage, DefaultStream, RealmEmoji, RealmDomain, \
RealmFilter, PreregistrationUser, UserActivity, \
UserPresence, get_stream_recipient, name_changes_disabled, email_to_username, \
get_realm_domains
from zerver.lib.events import do_events_register
from zerver.lib.actions import update_user_presence, do_change_tos_version, \
do_update_pointer, realm_user_count
from zerver.lib.avatar import avatar_url
from zerver.lib.i18n import get_language_list, get_language_name, \
get_language_list_for_templates
from zerver.lib.push_notifications import num_push_devices_for_user
from zerver.lib.streams import access_stream_by_name
from zerver.lib.subdomains import get_subdomain
from zerver.lib.utils import statsd
import calendar
import datetime
import logging
import os
import re
import simplejson
import time
@zulip_login_required
def accounts_accept_terms(request):
# type: (HttpRequest) -> HttpResponse
if request.method == "POST":
form = ToSForm(request.POST)
if form.is_valid():
do_change_tos_version(request.user, settings.TOS_VERSION)
return redirect(home)
else:
form = ToSForm()
email = request.user.email
special_message_template = None
if request.user.tos_version is None and settings.FIRST_TIME_TOS_TEMPLATE is not None:
special_message_template = 'zerver/' + settings.FIRST_TIME_TOS_TEMPLATE
return render(
request,
'zerver/accounts_accept_terms.html',
context={'form': form,
'email': email,
'special_message_template': special_message_template},
)
def sent_time_in_epoch_seconds(user_message):
# type: (Optional[UserMessage]) -> Optional[float]
if user_message is None:
return None
# We have USE_TZ = True, so our datetime objects are timezone-aware.
# Return the epoch seconds in UTC.
return calendar.timegm(user_message.message.pub_date.utctimetuple())
def home(request):
# type: (HttpRequest) -> HttpResponse
if settings.DEVELOPMENT and os.path.exists('var/handlebars-templates/compile.error'):
response = render(request, 'zerver/handlebars_compilation_failed.html')
response.status_code = 500
return response
if not settings.ROOT_DOMAIN_LANDING_PAGE:
return home_real(request)
# If settings.ROOT_DOMAIN_LANDING_PAGE, sends the user the landing
# page, not the login form, on the root domain
subdomain = get_subdomain(request)
if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
return home_real(request)
return render(request, 'zerver/hello.html')
@zulip_login_required
def home_real(request):
# type: (HttpRequest) -> HttpResponse
# We need to modify the session object every two weeks or it will expire.
# This line makes reloading the page a sufficient action to keep the
# session alive.
request.session.modified = True
user_profile = request.user
# If a user hasn't signed the current Terms of Service, send them there
if settings.TERMS_OF_SERVICE is not None and settings.TOS_VERSION is not None and \
int(settings.TOS_VERSION.split('.')[0]) > user_profile.major_tos_version():
return accounts_accept_terms(request)
narrow = [] # type: List[List[Text]]
narrow_stream = None
narrow_topic = request.GET.get("topic")
if request.GET.get("stream"):
try:
narrow_stream_name = request.GET.get("stream")
(narrow_stream, ignored_rec, ignored_sub) = access_stream_by_name(
user_profile, narrow_stream_name)
narrow = [["stream", narrow_stream.name]]
except Exception:
logging.exception("Narrow parsing")
if narrow_stream is not None and narrow_topic is not None:
narrow.append(["topic", narrow_topic])
register_ret = do_events_register(user_profile, request.client,
apply_markdown=True, narrow=narrow)
user_has_messages = (register_ret['max_message_id'] != -1)
# Reset our don't-spam-users-with-email counter since the
# user has since logged in
if user_profile.last_reminder is not None:
user_profile.last_reminder = None
user_profile.save(update_fields=["last_reminder"])
# Brand new users get narrowed to PM with welcome-bot
needs_tutorial = user_profile.tutorial_status == UserProfile.TUTORIAL_WAITING
first_in_realm = realm_user_count(user_profile.realm) == 1
# If you are the only person in the realm and you didn't invite
# anyone, we'll continue to encourage you to do so on the frontend.
prompt_for_invites = first_in_realm and \
not PreregistrationUser.objects.filter(referred_by=user_profile).count()
if user_profile.pointer == -1 and user_has_messages:
# Put the new user's pointer at the bottom
#
# This improves performance, because we limit backfilling of messages
# before the pointer. It's also likely that someone joining an
# organization is interested in recent messages more than the very
# first messages on the system.
register_ret['pointer'] = register_ret['max_message_id']
user_profile.last_pointer_updater = request.session.session_key
if user_profile.pointer == -1:
latest_read = None
else:
try:
latest_read = UserMessage.objects.get(user_profile=user_profile,
message__id=user_profile.pointer)
except UserMessage.DoesNotExist:
# Don't completely fail if your saved pointer ID is invalid
logging.warning("%s has invalid pointer %s" % (user_profile.email, user_profile.pointer))
latest_read = None
# Set default language and make it persist
default_language = register_ret['default_language']
url_lang = '/{}'.format(request.LANGUAGE_CODE)
if not request.path.startswith(url_lang):
translation.activate(default_language)
request.session[translation.LANGUAGE_SESSION_KEY] = translation.get_language()
# Pass parameters to the client-side JavaScript code.
# These end up in a global JavaScript Object named 'page_params'.
page_params = dict(
# Server settings.
development_environment = settings.DEVELOPMENT,
debug_mode = settings.DEBUG,
test_suite = settings.TEST_SUITE,
poll_timeout = settings.POLL_TIMEOUT,
login_page = settings.HOME_NOT_LOGGED_IN,
root_domain_uri = settings.ROOT_DOMAIN_URI,
maxfilesize = settings.MAX_FILE_UPLOAD_SIZE,
max_avatar_file_size = settings.MAX_AVATAR_FILE_SIZE,
server_generation = settings.SERVER_GENERATION,
use_websockets = settings.USE_WEBSOCKETS,
save_stacktraces = settings.SAVE_FRONTEND_STACKTRACES,
server_inline_image_preview = settings.INLINE_IMAGE_PREVIEW,
server_inline_url_embed_preview = settings.INLINE_URL_EMBED_PREVIEW,
password_min_length = settings.PASSWORD_MIN_LENGTH,
password_min_guesses = settings.PASSWORD_MIN_GUESSES,
# Misc. extra data.
have_initial_messages = user_has_messages,
initial_servertime = time.time(), # Used for calculating relative presence age
default_language_name = get_language_name(register_ret['default_language']),
language_list_dbl_col = get_language_list_for_templates(register_ret['default_language']),
language_list = get_language_list(),
needs_tutorial = needs_tutorial,
first_in_realm = first_in_realm,
prompt_for_invites = prompt_for_invites,
furthest_read_time = sent_time_in_epoch_seconds(latest_read),
has_mobile_devices = num_push_devices_for_user(user_profile) > 0,
)
undesired_register_ret_fields = [
'streams',
]
for field_name in set(register_ret.keys()) - set(undesired_register_ret_fields):
page_params[field_name] = register_ret[field_name]
if narrow_stream is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = get_stream_recipient(narrow_stream.id)
try:
initial_pointer = Message.objects.filter(recipient=recipient).order_by('id').reverse()[0].id
except IndexError:
initial_pointer = -1
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic is not None:
page_params["narrow_topic"] = narrow_topic
page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow]
page_params["max_message_id"] = initial_pointer
page_params["pointer"] = initial_pointer
page_params["have_initial_messages"] = (initial_pointer != -1)
page_params["enable_desktop_notifications"] = False
statsd.incr('views.home')
show_invites = True
# Some realms only allow admins to invite users
if user_profile.realm.invite_by_admins_only and not user_profile.is_realm_admin:
show_invites = False
request._log_data['extra'] = "[%s]" % (register_ret["queue_id"],)
response = render(request, 'zerver/index.html',
context={'user_profile': user_profile,
'page_params': simplejson.encoder.JSONEncoderForHTML().encode(page_params),
'nofontface': is_buggy_ua(request.META.get("HTTP_USER_AGENT", "Unspecified")),
'avatar_url': avatar_url(user_profile),
'show_debug':
settings.DEBUG and ('show_debug' in request.GET),
'pipeline': settings.PIPELINE_ENABLED,
'show_invites': show_invites,
'is_admin': user_profile.is_realm_admin,
'show_webathena': user_profile.realm.webathena_enabled,
'enable_feedback': settings.ENABLE_FEEDBACK,
'embedded': narrow_stream is not None,
},)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
return response
@zulip_login_required
def desktop_home(request):
# type: (HttpRequest) -> HttpResponse
return HttpResponseRedirect(reverse('zerver.views.home.home'))
def apps_view(request, _):
# type: (HttpRequest, Text) -> HttpResponse
if settings.ZILENCER_ENABLED:
return render(request, 'zerver/apps.html')
return HttpResponseRedirect('https://zulipchat.com/apps/', status=301)
def is_buggy_ua(agent):
# type: (str) -> bool
"""Discrimiate CSS served to clients based on User Agent
Due to QTBUG-3467, @font-face is not supported in QtWebKit.
This may get fixed in the future, but for right now we can
just serve the more conservative CSS to all our desktop apps.
"""
return ("Zulip Desktop/" in agent or "ZulipDesktop/" in agent) and \
"Mac" not in agent
|
|
from __future__ import print_function
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
import logging
import hashlib
import keyword
from collections import OrderedDict
from pyang import types as ptypes
from ydkgen import api_model as atypes
"""
common.py
YANG model driven API, common definitions.
"""
# ----------------------------------------------------------------
# Generic lookups
# -----------------------------------------------------------------
yang_int = {
'int8',
'int16',
'int32',
'int64',
'uint8',
'uint16',
'uint32',
'uint64',
}
yang_int_ranges = {
'int8': (-128, 127),
'int16': (-32768, 32767),
'int32': (-2147483648, 2147483647),
'int64': (-9223372036854775808, 9223372036854775807),
'uint8': (0, 255),
'uint16': (0, 65535),
'uint32': (0, 4294967295),
'uint64': (0, 18446744073709551615),
}
yang_base_types = {
'binary',
'bits',
'boolean',
'decimal64',
'empty',
'identityref',
'instance-identifier',
'int8',
'int16',
'int32',
'int64',
'leafref',
'string',
'uint8',
'uint16',
'uint32',
'uint64',
# union, separate handling
# enumeration, separate handling
}
container_nodes = {
'module',
'container',
'choice',
'case',
'list',
'augment',
# 'grouping',
'uses',
'rpc',
'input',
'output',
}
class YdkGenException(Exception):
"""Exception raised when there is a problem in the generation.
.. attribute:: msg
The message describing the error.
"""
def __init__(self, msg):
self.msg = msg
logger = logging.getLogger('ydkgen')
if len(logger.handlers) == 1:
print(msg)
def yang_id(stmt):
if hasattr(stmt, 'arg') and stmt.arg is not None:
return stmt.arg.replace(':', '_')
else:
return None
def merge_file_path_segments(segs):
'''Merge the segs to form a path '''
return_seg = ''
for seg in segs:
if not seg.length() == 0 and not return_seg.endswith('/'):
return_seg = '%s/' % (return_seg)
return_seg = '%s%s' % (return_seg, seg)
return return_seg
def ispythonkeyword(word):
return keyword.iskeyword(word) or word in ('None', 'parent', 'children', 'operation', 'exec', 'entity')
def iscppkeyword(word):
return word in ('parent', 'operator', 'inline', 'default', 'virtual',
'children', 'value', 'auto', 'entity', 'int', 'signed'
'final', 'template', 'index', 'protected', 'true', 'false',
'default' , 'auto', 'static', 'or', 'do', 'new', 'delete',
'private', 'public', 'export' , 'virtual', 'for', 'and',
'break', 'case', 'catch', 'float', 'long', 'return',
'explicit', 'class', 'if', 'try', 'while', 'and', 'or',
'const', 'continue', 'double', 'else', 'value', 'namespace',
'operation', 'volatile', 'register', 'short', 'extern',
'mutable', 'unsigned', 'struct', 'switch', 'void', 'typedef',
'typeid', 'using', 'char', 'goto', 'not','clock')
def get_sphinx_ref_label(named_element):
return named_element.fqn().replace('.', '_')
def split_to_words(input_text):
words = []
''' A word boundary starts if the current character is
in Caps and the previous character is in lowercase
for example NetworkElement , at Element the E is in Caps
and the previoud character is k in lower
or if the current character is in Caps and the next character
in in lower case ApplicationCLIEvent for Event while reaching E
the next chracter is v
'''
word = None
previous_caps = False
for index, ch in enumerate(input_text):
if ch.isupper():
if not previous_caps:
# word boundary
if word is not None:
words.append(word)
word = ch
else:
# previous was caps
if index != len(input_text) - 1:
if input_text[index + 1].islower():
# this is a word boundary
if word is not None:
words.append(word)
word = ch
else:
# add it to the current word
word = '%s%s' % (word, ch)
else:
word = '%s%s' % (word, ch)
previous_caps = True
else:
if word is None:
word = ch
else:
word = '%s%s' % (word, ch)
previous_caps = False
words.append(word)
return words
def convert_to_reStructuredText(yang_text):
if isinstance(yang_text, bytes):
yang_text = yang_text.decode('utf-8')
reSt = yang_text
if reSt is not None and len(reSt) > 0:
reSt = yang_text.replace('\\', '\\\\')
reSt = reSt.replace(':', '\:')
reSt = reSt.replace('_', '\_')
reSt = reSt.replace('-', '\-')
reSt = reSt.replace('*', '\*')
reSt = reSt.replace('|', '\|')
return reSt
def is_config_stmt(stmt):
if hasattr(stmt, 'i_config'):
is_config = stmt.i_config
if is_config is not None:
return is_config
parent = stmt.parent
if parent is None:
return True
else:
return is_config_stmt(parent)
def get_module_name(stmt):
if stmt.keyword == 'module':
return stmt.arg
module_stmt = stmt.i_module
if module_stmt is None:
return None
if module_stmt.i_including_modulename is not None:
return module_stmt.i_including_modulename
else:
return module_stmt.arg
def sort_classes_at_same_level(classes):
''' Returns a list of the classes in the same order '''
if len(classes) <= 1:
return classes
#classes = sorted(classes, key=lambda cls: cls.name)
classes_processed = []
classes_not_processed = OrderedDict()
for clazz in classes:
dependent_siblings = clazz.get_dependent_siblings()
if len(dependent_siblings) == 0:
classes_processed.append(clazz)
else:
classes_not_processed[clazz] = dependent_siblings
classes_not_processed = OrderedDict(classes_not_processed.items())
while len(classes_not_processed) > 0:
for clazz in list(classes_not_processed.keys()):
dependent_siblings = classes_not_processed[clazz]
not_processed = False
for sibling in dependent_siblings:
if sibling not in classes_processed:
not_processed = True
break
if not not_processed:
# all dependents are processed so go ahead and add to processed
classes_processed.append(clazz)
del classes_not_processed[clazz]
return classes_processed
def get_rst_file_name(named_element):
if hasattr(named_element, 'get_package'):
package = named_element.get_package()
else:
package = named_element
filename = package.bundle_name + named_element.fqn()
filename = filename.encode('utf-8')
hex_name = 'gen_doc_%s' % hashlib.sha1(filename).hexdigest()
return hex_name
def has_terminal_nodes(element):
# has leaf or leaflist
if isinstance(element, atypes.Property):
ptype = element.property_type
else:
ptype = element
for p in ptype.properties():
if is_terminal_prop(p):
return True
return False
def is_config_prop(prop):
is_config = True
if hasattr(prop.stmt, 'i_config'):
is_config = prop.stmt.i_config
return is_config
def snake_case(input_text):
snake_case = input_text.replace('-', '_')
snake_case = snake_case.replace('.', '_')
return snake_case.lower()
def get_include_guard_name(name, file_index=-1):
if file_index > -1:
return '_{0}_{1}_'.format(name.upper(), file_index)
else:
return '_{0}_'.format(name.upper())
def is_nonid_class_element(element):
return isinstance(element, atypes.Class) and not element.is_identity()
def is_class_element(element):
return isinstance(element, atypes.Class)
def is_identity_element(element):
return isinstance(element, atypes.Class) and element.is_identity()
def is_list_element(element):
return element.stmt.keyword == 'list'
def is_mandatory_element(element):
mandatory = element.stmt.search_one('mandatory')
return mandatory is not None and mandatory.arg == 'true'
def is_pkg_element(element):
return isinstance(element, atypes.Package)
def is_presence_element(element):
return element.stmt.search_one('presence') is not None
def is_prop_element(element):
return isinstance(element, atypes.Property)
def is_class_prop(prop):
return is_class_element(prop.property_type)
def is_decimal64_prop(prop):
return isinstance(prop.property_type, ptypes.Decimal64TypeSpec)
def is_empty_prop(prop):
return isinstance(prop.property_type, ptypes.EmptyTypeSpec)
def is_identity_prop(prop):
return is_identity_element(prop.property_type)
def is_identityref_prop(prop):
return (isinstance(prop.property_type, atypes.Class) and
prop.property_type.is_identity() and
prop.stmt.i_leafref_ptr is not None)
def is_leaflist_prop(prop):
return prop.stmt.keyword == 'leaf-list'
def is_leafref_prop(prop):
return (isinstance(prop.property_type, ptypes.PathTypeSpec) and
prop.stmt.i_leafref_ptr is not None)
def is_path_prop(prop):
return isinstance(prop.property_type, ptypes.PathTypeSpec)
def is_reference_prop(prop):
return (is_leafref_prop(prop) or is_identityref_prop(prop))
def is_terminal_prop(prop):
return prop.stmt.keyword in ('leaf', 'leaflist')
def is_union_prop(prop):
return is_union_type_spec(prop.property_type)
def is_union_type_spec(type_spec):
return isinstance(type_spec, ptypes.UnionTypeSpec)
def is_identityref_type_spec(type_spec):
return isinstance(type_spec, ptypes.IdentityrefTypeSpec)
def is_match_all(pattern):
return pattern in ('[^\*].*', '\*')
def get_typedef_stmt(type_stmt):
while all([hasattr(type_stmt, 'i_typedef') and
type_stmt.i_typedef is not None]):
type_stmt = type_stmt.i_typedef.search_one('type')
return type_stmt
def get_top_class(clazz):
while not isinstance(clazz.owner, atypes.Package):
clazz = clazz.owner
return clazz
def get_obj_name(clazz):
obj_names = []
while not isinstance(clazz, atypes.Package):
obj_name = clazz.name.lower()
obj_names.append(obj_name)
clazz = clazz.owner
return '_'.join(reversed(obj_names))
def get_qn(lang, element):
qn = ''
if lang == 'py':
qn = element.qn()
elif lang == 'cpp':
qn = element.fully_qualified_cpp_name()
return qn
def get_element_path(lang, element, length=None):
# path is consists of path segments(seg)
path = []
sep = get_path_sep(lang)
while not is_pkg_element(element):
seg = _get_element_seg(element)
if all((is_list_element(element),
not is_pkg_element(element.owner),
path)):
# list/leaf-list contains one element
seg += '[0]'
path.append(seg)
element = element.owner
if length is None:
return sep.join(reversed(path))
else:
# ever used?
path = list(reversed(path))[:length]
return sep.join(path)
def _get_element_seg(element):
seg = ''
if any((is_pkg_element(element.owner),
is_prop_element(element))):
seg = element.name
else:
for prop in element.owner.properties():
if prop.stmt == element.stmt:
seg = prop.name
return seg.lower()
def get_path_sep(lang):
sep = ''
if lang == 'py':
sep = '.'
elif lang == 'cpp':
sep = '->'
return sep
def has_list_ancestor(clazz):
c = clazz.owner
parents = []
while c is not None and not isinstance(c,atypes.Package):
parents.append(c)
c = c.owner
for p in parents:
key_props = p.get_key_props()
if key_props is not None and len(key_props) > 0:
return True
return False
def is_top_level_class(clazz):
return clazz.owner is not None and isinstance(clazz.owner, atypes.Package)
|
|
#!/usr/bin/python
# decrypt_multibit_classic_walletkeys.py
# Extract Private Keys from a MultiBit Classic wallet
# Copyright (C) 2017, HCP
# All rights reserved.
#
# Based on decrypt_bitcoinj_seed.pyw
# Copyright (C) 2014, 2016 Christopher Gurnee
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# If you find this program helpful, please consider a small
# donation to the developer at the following Bitcoin address:
#
# 1NyVDDmhZPcyKhyrkiUFZbqPPuiYxwTujb
#
# Thank You!
from __future__ import print_function
__version__ = '0.4.0'
import hashlib, sys, os, getpass
import aespython.key_expander, aespython.aes_cipher, aespython.cbc_mode
import wallet_pb2, binascii, bitcoin
import pylibscrypt
sha256 = hashlib.sha256
md5 = hashlib.md5
key_expander = aespython.key_expander.KeyExpander(256)
def wait_key():
''' Wait for a key press on the console and return it. '''
result = None
if os.name == 'nt':
import msvcrt
result = msvcrt.getch()
else:
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result
def aes256_cbc_decrypt(ciphertext, key, iv):
"""decrypts the ciphertext using AES256 in CBC mode
:param ciphertext: the encrypted ciphertext
:type ciphertext: str
:param key: the 256-bit key
:type key: str
:param iv: the 128-bit initialization vector
:type iv: str
:return: the decrypted ciphertext, or raises a ValueError if the key was wrong
:rtype: str
"""
block_cipher = aespython.aes_cipher.AESCipher( key_expander.expand(map(ord, key)) )
stream_cipher = aespython.cbc_mode.CBCMode(block_cipher, 16)
stream_cipher.set_iv(bytearray(iv))
plaintext = bytearray()
for i in xrange(0, len(ciphertext), 16):
plaintext.extend( stream_cipher.decrypt_block(map(ord, ciphertext[i:i+16])) )
padding_len = plaintext[-1]
# check for PKCS7 padding
if not (1 <= padding_len <= 16 and plaintext.endswith(chr(padding_len) * padding_len)):
raise ValueError('incorrect password')
return str(plaintext[:-padding_len])
multibit_hd_password = None
def load_wallet(wallet_file, get_password_fn):
"""load and if necessary decrypt a bitcoinj wallet file
:param wallet_file: an open bitcoinj wallet file
:type wallet_file: file
:param get_password_fn: a callback returning a password that's called iff one is required
:type get_password_fn: function
:return: the Wallet protobuf message or None if no password was entered when required
:rtype: wallet_pb2.Wallet
"""
#// The format of the encrypted ".cipher" file is:
#// 7 magic bytes 'mendoza' in ASCII.
#// 1 byte version number of format - initially set to 0 (actually 0x00!)
#// 8 bytes salt
#// 16 bytes iv
#// rest of file is the encrypted byte data
wallet_file.seek(0)
magic_bytes = wallet_file.read(7)
version = wallet_file.read(1)
wallet_file.seek(0, os.SEEK_END)
wallet_size = wallet_file.tell()
wallet_file.seek(0)
if magic_bytes[0:7] == b"mendoza" and version == "\x00" and wallet_size % 16 == 0:
print("")
print("MultiBit Classic Cipher file found")
print("")
takes_long = not pylibscrypt._done # if a binary library wasn't found, this'll take a while
ciphertext = wallet_file.read()
assert len(ciphertext) % 16 == 0
password = get_password_fn(takes_long)
if not password:
return None
# Derive the encryption key
salt = ciphertext[8:16]
key = pylibscrypt.scrypt(password.encode('utf_16_be'), salt, olen=32)
iv = ciphertext[16:32]
# Decrypt the wallet ( v0.5.0+ )
plaintext = aes256_cbc_decrypt(ciphertext[32:], key, iv)
if plaintext[2:6] != b"org.":
raise ValueError('incorrect password')
# Else it's not a cipher file encrypted
else:
print("File is NOT a Multibit Classic Cipher File")
return
# Parse the wallet protobuf
pb_wallet = wallet_pb2.Wallet()
try:
pb_wallet.ParseFromString(plaintext)
except Exception as e:
msg = 'not a wallet file: ' + str(e)
if password:
msg = "incorrect password (or " + msg + ")"
raise ValueError(msg)
f = open('parsed_cipher_wallet.txt','w')
f.write(pb_wallet.__str__())
f.close()
print("--------------------------------------------------------------------------------")
if pb_wallet.encryption_type == 2:
print("Keys are encrypted")
takes_long = not pylibscrypt._done # if a binary library wasn't found, this'll take a while
password = get_password_fn(takes_long)
if not password:
return None
salt = pb_wallet.encryption_parameters.salt
dkey = pylibscrypt.scrypt(password.encode('utf_16_be'), salt, olen=32)
for enckeys in pb_wallet.key:
ciphertext = enckeys.encrypted_data.encrypted_private_key
iv = enckeys.encrypted_data.initialisation_vector
privkey = aes256_cbc_decrypt(ciphertext, dkey, iv)
print("")
print("Pubkey: " + bitcoin.pubtoaddr(enckeys.public_key))
print("Privkey: " + bitcoin.encode_privkey(privkey, 'wif_compressed'))
elif pb_wallet.encryption_type == 1:
print("Keys NOT encrypted")
for enckeys in pb_wallet.key:
print("")
print("Pubkey: " + bitcoin.pubtoaddr(enckeys.public_key))
print("Privkey: " + bitcoin.encode_privkey(enckeys.secret_bytes, 'wif_compressed'))
print("")
print("--------------------------------------------------------------------------------")
return pb_wallet
if __name__ == '__main__':
if len(sys.argv) != 2 or sys.argv[1].startswith('-'):
sys.exit('usage: decrypt_multibit_classic_wallet_cipher.py multibit-wallet-.cipher-file')
wallet_file = open(sys.argv[1], 'rb')
def get_password_factory(prompt):
def get_password(takes_long_arg_ignored): # must return unicode
encoding = sys.stdin.encoding or 'ASCII'
if 'utf' not in encoding.lower():
print('terminal does not support UTF; passwords with non-ASCII chars might not work', file=sys.stderr)
password = getpass.getpass(prompt + ' ')
if isinstance(password, str):
password = password.decode(encoding) # convert from terminal's encoding to unicode
return password
return get_password
# These functions differ between command-line and GUI runs
get_password = get_password_factory('This wallet file is encrypted, please enter its password:')
get_pin = get_password_factory("This wallet's seed is encrypted with a PIN or password, please enter it:")
display_error = lambda msg: print(msg, file=sys.stderr)
# Load (and possibly decrypt) the wallet, retrying on bad passwords
while True:
try:
wallet = load_wallet(wallet_file, get_password)
if not wallet: # if no password was entered
sys.exit('canceled')
break
except ValueError as e:
display_error(str(e))
if not e.args[0].startswith('incorrect password'):
raise
print("")
print("Press any key to continue...")
wait_key()
|
|
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PFIF 1.1 - 1.4 parsing and serialization (see http://zesty.ca/pfif/).
This module converts between PFIF XML documents (PFIF 1.1, 1.2, 1.3, or 1.4) and
plain Python dictionaries that have PFIF 1.4 field names as keys (always 1.4)
and Unicode strings as values. Some useful constants are also defined here
according to the PFIF specification. Use parse_file() to parse PFIF 1.1, 1.2,
1.3, or 1.4 files; use PFIF_1_1, PFIF_1_2, PFIF_1_3, or PFIF_1_4 to serialize to
the desired version."""
__author__ = 'kpy@google.com (Ka-Ping Yee) and many other Googlers'
import StringIO
import logging
import os
import re
import defusedxml.sax
import xml.sax.handler
# Possible values for the 'sex' field on a person record.
PERSON_SEX_VALUES = [
'', # unspecified
'female',
'male',
'other'
]
# Possible values for the 'status' field on a note record.
NOTE_STATUS_VALUES = [
'', # unspecified
'information_sought',
'is_note_author',
'believed_alive',
'believed_missing',
'believed_dead',
]
# Fields to preserve in a placeholder for an expired record.
PLACEHOLDER_FIELDS = [
'person_record_id',
'source_date',
'entry_date',
'expiry_date'
]
# A dict mapping old field names to the new field names in PFIF 1.4,
# for backward compatibility with older PFIF versions.
RENAMED_FIELDS = {
'home_zip': 'home_postal_code', # Renamed in PFIF 1.2
'first_name': 'given_name', # Renamed in PFIF 1.4
'last_name': 'family_name', # Renamed in PFIF 1.4
'found': 'author_made_contact', # Renamed in PFIF 1.4
'other': 'description', # Renamed in PFIF 1.4
}
DESCRIPTION_FIELD_LABEL = 'description:'
def xml_escape(s):
# XML may only contain the following characters (even after entity
# references are expanded). See: https://www.w3.org/TR/REC-xml/#charsets
s = re.sub(ur'''[^\x09\x0a\x0d\x20-\ud7ff\ue000-\ufffd]''', '', s)
return s.replace('&','&').replace('<','<').replace('>','>')
def convert_description_to_other(desc):
"""Converts 'description' in PFIF 1.4 to 'other' in older versions."""
INDENT_DEPTH = 4
# Do not add description label if it's already there, so when exporting and
# importing the same person record, we don't duplicate the label.
if not desc.strip() or desc.startswith(DESCRIPTION_FIELD_LABEL):
return desc
# Indent the text and prepend the description label.
return DESCRIPTION_FIELD_LABEL + '\n' + ' ' * INDENT_DEPTH + \
('\n' + ' ' * INDENT_DEPTH).join(desc.split('\n')).rstrip(' ')
def maybe_convert_other_to_description(other):
"""Converts 'other' in PFIF 1.3 and earlier to 'description' in PFIF 1.4 if
'other' has only 'description' field. Otherwise it returns 'other' without
modifying it, so we don't lose any information."""
description_lines = []
has_description_field = False
for line in other.splitlines(True):
if line.startswith(DESCRIPTION_FIELD_LABEL):
has_description_field = True
line = line[len(DESCRIPTION_FIELD_LABEL):]
if not line.strip():
continue
elif re.match(r'\S+:', line):
return other
description_lines.append(line.strip(' \t'))
if not has_description_field:
return other
return ''.join(description_lines)
class PfifVersion:
def __init__(self, version, ns, fields, mandatory_fields, serializers):
self.version = version
self.ns = ns
# A dict mapping each record type to a list of its fields in order.
self.fields = fields
# A dict mapping each record type to a list of its mandatory fields.
# TODO(ryok): we should validate that imported records have these
# mandatory fields populated.
self.mandatory_fields = mandatory_fields
# A dict mapping field names to serializer functions.
self.serializers = serializers
def check_tag(self, (ns, local), parent=None):
"""Given a namespace-qualified tag and its parent, returns the PFIF
type or field name if the tag is valid, or None if the tag is not
recognized."""
if ns == self.ns:
if not parent or local in self.fields[parent]:
return local
def write_fields(self, file, type, record, indent=''):
"""Writes PFIF tags for a record's fields."""
for field in self.fields[type]:
if record.get(field) or field in self.mandatory_fields[type]:
escaped_value = xml_escape(record.get(field, ''))
file.write(indent + '<pfif:%s>%s</pfif:%s>\n' %
(field, escaped_value.encode('utf-8'), field))
def write_person(self, file, person, notes=[], indent=''):
"""Writes PFIF for a person record and a list of its note records."""
file.write(indent + '<pfif:person>\n')
self.write_fields(file, 'person', person, indent + ' ')
for note in notes:
self.write_note(file, note, indent + ' ')
file.write(indent + '</pfif:person>\n')
def write_note(self, file, note, indent=''):
"""Writes PFIF for a note record."""
file.write(indent + '<pfif:note>\n')
self.write_fields(file, 'note', note, indent + ' ')
file.write(indent + '</pfif:note>\n')
def write_file(self, file, persons, get_notes_for_person=lambda p: []):
"""Takes a list of person records and a function that gets the list
of note records for each person, and writes PFIF to the given file
object. Each record is a plain dictionary of strings."""
file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
file.write('<pfif:pfif xmlns:pfif="%s">\n' % self.ns)
for person in persons:
self.write_person(file, person, get_notes_for_person(person), ' ')
file.write('</pfif:pfif>\n')
def entity_to_dict(self, entity, fields):
"""Converts a person or note record from a Python object (with PFIF 1.4
field names as attributes) to a Python dictionary (with the given field
names as keys, and Unicode strings as values)."""
record = {}
for field in fields:
maybe_renamed_field = field
if not hasattr(entity, field):
maybe_renamed_field = RENAMED_FIELDS.get(field, field)
value = getattr(entity, maybe_renamed_field, None)
if value:
# For backward compatibility with PFIF 1.3 and earlier.
if field == 'other' and maybe_renamed_field == 'description':
value = convert_description_to_other(value)
record[field] = self.serializers.get(field, nop)(value)
return record
def person_to_dict(self, entity, expired=False):
dict = self.entity_to_dict(entity, self.fields['person'])
if expired: # Clear all fields except those needed for the placeholder.
for field in set(dict.keys()) - set(PLACEHOLDER_FIELDS):
del dict[field]
return dict
def note_to_dict(self, entity):
return self.entity_to_dict(entity, self.fields['note'])
# Serializers that convert Python values to PFIF strings.
def nop(value):
return value
def format_boolean(value):
return value and 'true' or 'false'
def format_utc_datetime(dt):
return dt and dt.replace(microsecond=0).isoformat() + 'Z' or ''
SERIALIZERS = { # Serialization functions (for fields that need conversion).
'found': format_boolean,
'author_made_contact': format_boolean,
'source_date': format_utc_datetime,
'entry_date': format_utc_datetime,
'expiry_date': format_utc_datetime
}
PFIF_1_1 = PfifVersion(
'1.1',
'http://zesty.ca/pfif/1.1',
{
'person': [ # Fields of a <person> element, in PFIF 1.1 standard order.
'person_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_name',
'source_date',
'source_url',
'first_name',
'last_name',
'home_city',
'home_state',
'home_neighborhood',
'home_street',
'home_zip',
'photo_url',
'other',
],
'note': [ # Fields of a <note> element, in PFIF 1.1 standard order.
'note_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_date',
'found',
'email_of_found_person',
'phone_of_found_person',
'last_known_location',
'text',
]
},
{ # Mandatory fields in <person> and <note> elements.
'person': ['person_record_id', 'first_name', 'last_name'],
'note': ['note_record_id', 'author_name', 'source_date', 'text'],
},
SERIALIZERS)
PFIF_1_2 = PfifVersion(
'1.2',
'http://zesty.ca/pfif/1.2',
{
'person': [ # Fields of a <person> element in PFIF 1.2.
'person_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_name',
'source_date',
'source_url',
'first_name',
'last_name',
'sex',
'date_of_birth',
'age',
'home_street',
'home_neighborhood',
'home_city',
'home_state',
'home_postal_code',
'home_country',
'photo_url',
'other',
],
'note': [ # Fields of a <note> element in PFIF 1.2.
'note_record_id',
'person_record_id',
'linked_person_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_date',
'found',
'status',
'email_of_found_person',
'phone_of_found_person',
'last_known_location',
'text',
]
},
{ # Mandatory fields in <person> and <note> elements.
'person': ['person_record_id', 'first_name', 'last_name'],
'note': ['note_record_id', 'author_name', 'source_date', 'text'],
},
SERIALIZERS)
PFIF_1_3 = PfifVersion(
'1.3',
'http://zesty.ca/pfif/1.3',
{
'person': [ # Fields of a <person> element in PFIF 1.3.
'person_record_id',
'entry_date',
'expiry_date',
'author_name',
'author_email',
'author_phone',
'source_name',
'source_date',
'source_url',
'full_name',
'first_name',
'last_name',
'sex',
'date_of_birth',
'age',
'home_street',
'home_neighborhood',
'home_city',
'home_state',
'home_postal_code',
'home_country',
'photo_url',
'other',
],
'note': [ # Fields of a <note> element in PFIF 1.3.
'note_record_id',
'person_record_id',
'linked_person_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_date',
'found',
'status',
'email_of_found_person',
'phone_of_found_person',
'last_known_location',
'text',
]
},
{ # Mandatory fields in <person> and <note> elements.
'person': ['person_record_id', 'source_date', 'full_name'],
'note': ['note_record_id', 'author_name', 'source_date', 'text'],
},
SERIALIZERS)
PFIF_1_4 = PfifVersion(
'1.4',
'http://zesty.ca/pfif/1.4',
{
'person': [ # Fields of a <person> element in PFIF 1.4.
'person_record_id',
'entry_date',
'expiry_date',
'author_name',
'author_email',
'author_phone',
'source_name',
'source_date',
'source_url',
'full_name',
'given_name',
'family_name',
'alternate_names',
'description',
'sex',
'date_of_birth',
'age',
'home_street',
'home_neighborhood',
'home_city',
'home_state',
'home_postal_code',
'home_country',
'photo_url',
'profile_urls',
],
'note': [ # Fields of a <note> element in PFIF 1.4.
'note_record_id',
'person_record_id',
'linked_person_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_date',
'author_made_contact',
'status',
'email_of_found_person',
'phone_of_found_person',
'last_known_location',
'text',
'photo_url',
]
},
{ # Mandatory fields in <person> and <note> elements.
'person': ['person_record_id', 'source_date', 'full_name'],
'note': ['note_record_id', 'author_name', 'source_date', 'text'],
},
SERIALIZERS)
PFIF_VERSIONS = {
'1.1': PFIF_1_1,
'1.2': PFIF_1_2,
'1.3': PFIF_1_3,
'1.4': PFIF_1_4
}
PFIF_DEFAULT_VERSION = '1.4'
assert PFIF_DEFAULT_VERSION in PFIF_VERSIONS
def check_pfif_tag(name, parent=None):
"""Recognizes a PFIF XML tag from any version of PFIF."""
return PFIF_1_4.check_tag(name, parent) or \
PFIF_1_3.check_tag(name, parent) or \
PFIF_1_2.check_tag(name, parent) or \
PFIF_1_1.check_tag(name, parent)
class Handler(xml.sax.handler.ContentHandler):
"""SAX event handler for parsing PFIF documents."""
def __init__(self, rename_fields=True):
# Wether to attempt to rename fields based on RENAMED_FIELDS.
self.rename_fields = rename_fields
self.tags = []
self.person = {}
self.note = {}
self.enclosed_notes = [] # Notes enclosed by the current <person>.
self.person_records = []
self.note_records = []
def startElementNS(self, tag, qname, attrs):
self.tags.append(tag)
if check_pfif_tag(tag) == 'person':
self.person = {}
self.enclosed_notes = []
elif check_pfif_tag(tag) == 'note':
self.note = {}
def endElementNS(self, tag, qname):
assert self.tags.pop() == tag
if check_pfif_tag(tag) == 'person':
self.person_records.append(self.person)
if 'person_record_id' in self.person:
# Copy the person's person_record_id to any enclosed notes.
for note in self.enclosed_notes:
note['person_record_id'] = self.person['person_record_id']
elif check_pfif_tag(tag) == 'note':
# Save all parsed notes (whether or not enclosed in <person>).
self.note_records.append(self.note)
self.enclosed_notes.append(self.note)
def append_to_field(self, record, tag, parent, content):
field = check_pfif_tag(tag, parent)
if field:
record[field] = record.get(field, u'') + content
elif content.strip():
logging.warn('ignored tag %r with content %r', tag, content)
def characters(self, content):
if content and len(self.tags) >= 2:
parent, tag = self.tags[-2], self.tags[-1]
if check_pfif_tag(parent) == 'person':
self.append_to_field(self.person, tag, 'person', content)
elif check_pfif_tag(parent) == 'note':
self.append_to_field(self.note, tag, 'note', content)
def rename_fields_to_latest(record):
"""Renames fields in PFIF 1.3 and earlier to PFIF 1.4, and also does a
special conversion for other -> description."""
for old, new in RENAMED_FIELDS.iteritems():
if old in record:
record[new] = record[old]
# For backward-compatibility with PFIF 1.3 and earlier.
if old == 'other' and new =='description':
record[new] = maybe_convert_other_to_description(record[old])
del record[old]
def parse_file(pfif_utf8_file, rename_fields=True):
"""Reads a UTF-8-encoded PFIF file to give a list of person records and a
list of note records. Each record is a plain dictionary of strings,
with PFIF 1.4 field names as keys if rename_fields is True; otherwise,
the field names are kept as is in the input XML file."""
handler = Handler(rename_fields)
parser = defusedxml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, True)
# Below two are to avoid XML External Entity attacks:
# https://www.owasp.org/index.php/XML_External_Entity_(XXE)_Processing
parser.setFeature(xml.sax.handler.feature_external_pes, False)
parser.setFeature(xml.sax.handler.feature_external_ges, False)
parser.setContentHandler(handler)
parser.parse(pfif_utf8_file)
if rename_fields:
for record in handler.person_records + handler.note_records:
rename_fields_to_latest(record)
return handler.person_records, handler.note_records
|
|
import sys
from qt import *
from kuralib import kuraapp
from kuralib.lng_lex import *
from kuragui.guitabdialog import guiTabDialog
from kuragui.guilistview import guiListView
from kuragui.guidetaillist import guiDetailList
from kuragui.guicombobox import guiComboBox
from kuragui.constants import *
from kuragui.guilineedit import guiLineEdit
from kuragui.guiconfig import guiConf
from formtags import TagsTab
from wdglexeme import tabLexeme
from dlglexeme import dlgLexeme
from dlglexchooser import dlgLexChooser
from resource import *
from dbobj.dbexceptions import dbError
class tabElementSplit(QWidget):
def __init__(self, parent, parentRecord):
QWidget.__init__(self, parent)
self.parentRecord=parentRecord
wdgElementLayout = QGridLayout(self)
wdgElementLayout.setSpacing(6)
wdgElementLayout.setMargin(11)
self.lblElement = QLabel(self,'lblElement')
self.lblElement.setText("Separate the morphemes with a dot.")
wdgElementLayout.addWidget(self.lblElement,0,0)
self.txtElement = guiLineEdit(self)
self.txtElement.setFont(guiConf.widgetfont)
self.txtElement.setText(parentRecord.getFieldValue("text"))
wdgElementLayout.addWidget(self.txtElement,0,1)
self.lblSubType=QLabel(self)
self.lblSubType.setText("Type")
wdgElementLayout.addWidget(self.lblSubType, 1, 0)
self.cmbSubType=guiComboBox(self)
wdgElementLayout.addWidget(self.cmbSubType, 1, 1)
self.cmbSubType.fillComboBox(self.parentRecord, "elementtypecode", INSERT)
self.lblSeparate = QLabel(self,'lblSeparate')
self.lblSeparate.setText(
'Nota Bene: all current morpheme data for this element will be deleted.')
self.lblSeparate.setAlignment(QLabel.WordBreak |
QLabel.AlignVCenter |
QLabel.AlignLeft)
wdgElementLayout.addWidget(self.lblSeparate,2,0)
self.bnSeparate = QPushButton(self,'bnSeparate')
self.bnSeparate.setText('Create morphemes')
self.connect(self.bnSeparate, SIGNAL("clicked()")
, self.slotSplitElement)
wdgElementLayout.addWidget(self.bnSeparate,2,1)
self.lsvChildElements = QListView(self,'lsvChildElements')
self.lsvChildElements.setFont(guiConf.widgetfont)
self.lsvChildElements.setSorting(-1, FALSE)
self.lsvChildElements.setRootIsDecorated(TRUE)
self.lsvChildElements.setShowSortIndicator(FALSE)
self.lsvChildElements.setTreeStepSize(40)
self.lsvChildElements.addColumn("Element")
self.lsvChildElements.setColumnWidthMode(0, QListView.Maximum)
self.connect(self.lsvChildElements, SIGNAL("returnPressed(QListViewItem *)")
, self.slotItemSelected)
self.connect(self.lsvChildElements, SIGNAL("doubleClicked(QListViewItem *)")
, self.slotItemSelected)
wdgElementLayout.addMultiCellWidget(self.lsvChildElements,3,3,0,1)
self.refresh()
def refresh(self):
self.item_to_record={}
self.record_to_item={}
self.lsvChildElements.clear()
self.addChildren(self.lsvChildElements, self.parentRecord.elementnr)
def addChildren(self, parent, elementnr):
if elementnr == None:
return
previous=None
for elmt in kuraapp.app.getObjects("lng_element"
, parent_elementnr = elementnr):
if previous==None:
item=QListViewItem(parent)
else:
item=QListViewItem(parent, previous)
item.setText(0, elmt.getFieldValue("text"))
previous=item
self.item_to_record[item]=elmt
self.record_to_item[elmt]=item
self.addChildren(item, elmt.elementnr)
def slotSplitElement(self):
if self.lsvChildElements.childCount() > 0:
if QMessageBox.warning(self, "Kura"
, "This element already has sub-elements. Do you want to delete them?"
, "Yes", "No"
) == 1:
return
try:
self.parentRecord.deleteChildren("lng_element")
except dbError, dberr:
QMessageBox.information(self
, "Error while deleting"
, dberr.errorMessage)
els=unicode(self.txtElement.text()).split(".")
if len(els) > 1:
seqnr=0
for element in els:
record=kuraapp.app.createObject("lng_element"
, streamnr=self.parentRecord.streamnr
, seqnr=seqnr
, parent_elementnr=self.parentRecord.elementnr
, text=element
, languagenr=self.parentRecord.languagenr
, elementtypecode=self.cmbSubType.currentKey()
, usernr=guiConf.usernr
)
record.insert()
seqnr+=1
self.refresh()
def slotItemSelected(self, item):
self.dlgOpen = dlgElement(kuraapp.app,
self,
'Edit Morpheme', "Morpheme",
self.item_to_record[item],
UPDATE,
kuraapp.app.tables["lng_element"])
self.connect(self.dlgOpen, PYSIGNAL("sigAcceptData")
, self.slotOpenAccept)
self.dlgOpen.show()
def slotOpenAccept(self):
rec = self.dlgOpen.getMasterRecord()
item = self.record_to_item[rec]
item.setText(0, rec.text)
class tabElementLexeme(tabLexeme):
def __init__(self, parent, parentRecord):
tabLexeme.__init__(self, parent)
self.parentRecord=parentRecord
if self.parentRecord.lexnr != None:
self.lexeme=kuraapp.app.getObject( "lng_lex"
, lexnr=self.parentRecord.lexnr
)
self.__setValues(self.lexeme)
else:
self.lexeme = None
self.connect(self.bnZoom, SIGNAL("clicked()"), self.__zoom)
self.connect(self.bnPick, SIGNAL("clicked()"), self.__pick)
self.connect(self.bnAdd, SIGNAL("clicked()"), self.__add)
def __setValues(self, lexeme):
self.txtForm.setFont(guiConf.widgetfont)
self.txtPhoneticForm.setFont(guiConf.widgetfont)
self.txtGlosse.setFont(guiConf.widgetfont)
self.txtDescription.setFont(guiConf.widgetfont)
self.txtLanguage.setFont(guiConf.widgetfont)
self.txtForm.setText(lexeme.getFieldValue("form"))
self.txtPhoneticForm.setText(lexeme.getFieldValue("phonetic_form"))
self.txtGlosse.setText(lexeme.getFieldValue("glosse"))
self.txtDescription.setText(lexeme.getFieldValue("description"))
self.txtLanguage.setText(lexeme.getFieldValue("language"))
def __zoom(self):
if self.lexeme:
self.dlgZoom=dlgLexeme(kuraapp.app, self, 'Edit lexical item', "Lexeme",
self.lexeme, UPDATE, self.lexeme.tableDef)
self.connect(self.dlgZoom, PYSIGNAL("sigAcceptData")
, self.slotZoomAccept)
self.dlgZoom.show()
def slotZoomAccept(self):
self.lexeme=self.dlgZoom.getMasterRecord()
self.parentRecord.lexnr=self.lexeme.lexnr
self.__setValues(self.lexeme)
def __pick(self):
#
# This is a modal dialog
#
dlgPick=dlgLexChooser(self, self.parentRecord)
dlgPick.txtForm.setText(self.parentRecord.getFieldValue("text"))
dlgPick.refreshSource()
if dlgPick.exec_loop()==1:
self.lexeme=dlgPick.masterRecord
self.parentRecord.lexnr=self.lexeme.lexnr
self.parentRecord.lexeme=self.lexeme.glosse
self.__setValues(self.lexeme)
def __add(self):
if QMessageBox.warning(self, "Kura"
, "Do you want to add this element to the lexicon?"
, "Yes", "No", "Cancel"
, 2, 3
) == 0:
self.lexeme=kuraapp.app.createObject("lng_lex"
, languagenr=self.parentRecord.languagenr
, form=self.parentRecord.text
, usernr=guiConf.usernr
, phonetic_form=self.parentRecord.getPhoneticTranscription()
, glosse=self.parentRecord.getGlosse()
, description=self.parentRecord.getDescription()
)
if self.lexeme.glosse is None:
self.lexeme.glosse="<empty>"
self.lexeme.insert()
self.parentRecord.lexnr=self.lexeme.lexnr
self.__setValues(self.lexeme)
class dlgElement(guiTabDialog):
def __init__(self, app, parent, title, firstTabTitle,
record, mode, tableDef, showChildren=FALSE):
guiTabDialog.__init__( self
, app=app
, parent=parent
, title=title
, firstTabTitle="&Elements"
, record=record
, mode=mode
, tableDef=tableDef
, showChildren=FALSE
, addBottomSpring=TRUE)
self.tagstab=TagsTab(self, app, record, "lng_element_tag")
guiTabDialog.addChildTab( self
, "&Tags"
, self.tagstab
, record
, DETAIL)
guiTabDialog.addChildTab(self
, "&Related Lexeme"
, tabElementLexeme(self, record)
, record
, DETAIL)
guiTabDialog.addChildTab( self
, "&Morphemes"
, tabElementSplit(self, record)
, record
, DETAIL)
def accept(self):
try:
self.tagstab.formTags.save()
except Exception, e:
QMessageBox.critical(self, "Error saving tags", unicode(e))
guiTabDialog.accept(self)
__copyright__="""
copyright : (C) 2002 by Boudewijn Rempt
see copyright notice for license
email : boud@valdyas.org
"""
__revision__="""$Revision: 1.10 $"""[11:-2]
|
|
# # header
# coding: utf-8
class Token(object):
__slots__ = 'start_mark', 'end_mark', '_comment',
def __init__(self, start_mark, end_mark):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in self.__slots__ if not key.endswith('_mark') and
hasattr('self', key)]
attributes.sort()
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
def add_post_comment(self, comment):
if not hasattr(self, '_comment'):
self._comment = [None, None]
self._comment[0] = comment
def add_pre_comments(self, comments):
if not hasattr(self, '_comment'):
self._comment = [None, None]
assert self._comment[1] is None
self._comment[1] = comments
def get_comment(self):
return getattr(self, '_comment', None)
@property
def comment(self):
return getattr(self, '_comment', None)
def move_comment(self, target, empty=False):
"""move a comment from this token to target (normally next token)
used to combine e.g. comments before a BlockEntryToken to the
ScalarToken that follows it
empty is a special for empty values -> comment after key
"""
c = self.comment
if c is None:
return
# don't push beyond last element
if isinstance(target, StreamEndToken):
return
if isinstance(self, ValueToken) and isinstance(target, BlockEntryToken):
return
delattr(self, '_comment')
tc = target.comment
if not tc: # target comment, just insert
# special for empty value in key: value issue 25
if empty:
c = [c[0], c[1], None, None, c[0]]
target._comment = c
return self
if c[0] and tc[0] or c[1] and tc[1]:
raise NotImplementedError('overlap in comment %r %r' % c, tc)
if c[0]:
tc[0] = c[0]
if c[1]:
tc[1] = c[1]
return self
def split_comment(self):
""" split the post part of a comment, and return it
as comment to be added. Delete second part if [None, None]
abc: # this goes to sequence
# this goes to first element
- first element
"""
comment = self.comment
if comment is None or comment[0] is None:
return None # nothing to do
ret_val = [comment[0], None]
if comment[1] is None:
delattr(self, '_comment')
return ret_val
# class BOMToken(Token):
# id = '<byte order mark>'
class DirectiveToken(Token):
__slots__ = 'name', 'value',
id = '<directive>'
def __init__(self, name, value, start_mark, end_mark):
Token.__init__(self, start_mark, end_mark)
self.name = name
self.value = value
class DocumentStartToken(Token):
__slots__ = ()
id = '<document start>'
class DocumentEndToken(Token):
__slots__ = ()
id = '<document end>'
class StreamStartToken(Token):
__slots__ = 'encoding',
id = '<stream start>'
def __init__(self, start_mark=None, end_mark=None, encoding=None):
Token.__init__(self, start_mark, end_mark)
self.encoding = encoding
class StreamEndToken(Token):
__slots__ = ()
id = '<stream end>'
class BlockSequenceStartToken(Token):
__slots__ = ()
id = '<block sequence start>'
class BlockMappingStartToken(Token):
__slots__ = ()
id = '<block mapping start>'
class BlockEndToken(Token):
__slots__ = ()
id = '<block end>'
class FlowSequenceStartToken(Token):
__slots__ = ()
id = '['
class FlowMappingStartToken(Token):
__slots__ = ()
id = '{'
class FlowSequenceEndToken(Token):
__slots__ = ()
id = ']'
class FlowMappingEndToken(Token):
__slots__ = ()
id = '}'
class KeyToken(Token):
__slots__ = ()
id = '?'
class ValueToken(Token):
__slots__ = ()
id = ':'
class BlockEntryToken(Token):
__slots__ = ()
id = '-'
class FlowEntryToken(Token):
__slots__ = ()
id = ','
class AliasToken(Token):
__slots__ = 'value',
id = '<alias>'
def __init__(self, value, start_mark, end_mark):
Token.__init__(self, start_mark, end_mark)
self.value = value
class AnchorToken(Token):
__slots__ = 'value',
id = '<anchor>'
def __init__(self, value, start_mark, end_mark):
Token.__init__(self, start_mark, end_mark)
self.value = value
class TagToken(Token):
__slots__ = 'value',
id = '<tag>'
def __init__(self, value, start_mark, end_mark):
Token.__init__(self, start_mark, end_mark)
self.value = value
class ScalarToken(Token):
__slots__ = 'value', 'plain', 'style',
id = '<scalar>'
def __init__(self, value, plain, start_mark, end_mark, style=None):
Token.__init__(self, start_mark, end_mark)
self.value = value
self.plain = plain
self.style = style
class CommentToken(Token):
__slots__ = 'value', 'pre_done',
id = '<comment>'
def __init__(self, value, start_mark, end_mark):
Token.__init__(self, start_mark, end_mark)
self.value = value
def reset(self):
if hasattr(self, 'pre_done'):
delattr(self, 'pre_done')
|
|
from __future__ import unicode_literals
import re
import warnings
from django.template.base import (
TOKEN_BLOCK, TOKEN_COMMENT, TOKEN_TEXT, TOKEN_VAR, TRANSLATOR_COMMENT_MARK,
Lexer,
)
from django.utils import six
from django.utils.encoding import force_text
from django.utils.six import StringIO
from . import TranslatorCommentWarning, trim_whitespace
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Change every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = re.compile(
# Match the trans 'some text' part
r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))"""
# Match and ignore optional filters
r"""(?:\s*\|\s*[^\s:]+(?::(?:[^\s'":]+|(?:"[^"]*?")|(?:'[^']*?')))?)*"""
# Match the optional context part
r"""(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*"""
)
block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
def templatize(src, origin=None, charset='utf-8'):
"""
Turn a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
src = force_text(src, charset)
out = StringIO('')
message_context = None
intrans = False
inplural = False
trimmed = False
singular = []
plural = []
incomment = False
comment = []
lineno_comment_map = {}
comment_lineno_cache = None
# Adding the u prefix allows gettext to recognize the Unicode string
# (#26093).
raw_prefix = 'u' if six.PY3 else ''
def join_tokens(tokens, trim=False):
message = ''.join(tokens)
if trim:
message = trim_whitespace(message)
return message
for t in Lexer(src).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(' # %s' % line)
else:
out.write(' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(' npgettext({p}{!r}, {p}{!r}, {p}{!r},count) '.format(
message_context,
join_tokens(singular, trimmed),
join_tokens(plural, trimmed),
p=raw_prefix,
))
else:
out.write(' ngettext({p}{!r}, {p}{!r}, count) '.format(
join_tokens(singular, trimmed),
join_tokens(plural, trimmed),
p=raw_prefix,
))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
if message_context:
out.write(' pgettext({p}{!r}, {p}{!r}) '.format(
message_context,
join_tokens(singular, trimmed),
p=raw_prefix,
))
else:
out.write(' gettext({p}{!r}) '.format(
join_tokens(singular, trimmed),
p=raw_prefix,
))
for part in singular:
out.write(blankout(part, 'S'))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError(
"Translation blocks must not include other block tags: "
"%s (%sline %d)" % (t.contents, filemsg, t.lineno)
)
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = t.contents.replace('%', '%%')
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
# Handle comment tokens (`{# ... #}`) plus other constructs on
# the same line:
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count('\n')
if comment_lineno_cache == cur_lineno:
if t.token_type != TOKEN_COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
warn_msg = (
"The translator-targeted comment '%s' "
"(%sline %d) was ignored, because it wasn't "
"the last item on the line."
) % (c, filemsg, comment_lineno_cache)
warnings.warn(warn_msg, TranslatorCommentWarning)
lineno_comment_map[comment_lineno_cache] = []
else:
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
comment_lineno_cache = None
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = g.replace('%', '%%')
if imatch.group(2):
# A context is provided
context_match = context_re.match(imatch.group(2))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(' pgettext({p}{!r}, {p}{!r}) '.format(
message_context, g, p=raw_prefix
))
message_context = None
else:
out.write(' gettext({p}{!r}) '.format(g, p=raw_prefix))
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
if bmatch.group(1):
# A context is provided
context_match = context_re.match(bmatch.group(1))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
trimmed = 'trimmed' in t.split_contents()
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':', 1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno, []).append(t.contents)
comment_lineno_cache = t.lineno
else:
out.write(blankout(t.contents, 'X'))
return out.getvalue()
|
|
"""Numeric integration of data coming from a source sensor over time."""
from decimal import Decimal, DecimalException
import logging
import voluptuous as vol
from homeassistant.components.sensor import (
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
PLATFORM_SCHEMA,
STATE_CLASS_TOTAL,
SensorEntity,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
CONF_METHOD,
CONF_NAME,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TIME_DAYS,
TIME_HOURS,
TIME_MINUTES,
TIME_SECONDS,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.restore_state import RestoreEntity
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
ATTR_SOURCE_ID = "source"
CONF_SOURCE_SENSOR = "source"
CONF_ROUND_DIGITS = "round"
CONF_UNIT_PREFIX = "unit_prefix"
CONF_UNIT_TIME = "unit_time"
CONF_UNIT_OF_MEASUREMENT = "unit"
TRAPEZOIDAL_METHOD = "trapezoidal"
LEFT_METHOD = "left"
RIGHT_METHOD = "right"
INTEGRATION_METHOD = [TRAPEZOIDAL_METHOD, LEFT_METHOD, RIGHT_METHOD]
# SI Metric prefixes
UNIT_PREFIXES = {None: 1, "k": 10 ** 3, "M": 10 ** 6, "G": 10 ** 9, "T": 10 ** 12}
# SI Time prefixes
UNIT_TIME = {
TIME_SECONDS: 1,
TIME_MINUTES: 60,
TIME_HOURS: 60 * 60,
TIME_DAYS: 24 * 60 * 60,
}
ICON = "mdi:chart-histogram"
DEFAULT_ROUND = 3
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_UNIT_OF_MEASUREMENT),
PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_SOURCE_SENSOR): cv.entity_id,
vol.Optional(CONF_ROUND_DIGITS, default=DEFAULT_ROUND): vol.Coerce(int),
vol.Optional(CONF_UNIT_PREFIX, default=None): vol.In(UNIT_PREFIXES),
vol.Optional(CONF_UNIT_TIME, default=TIME_HOURS): vol.In(UNIT_TIME),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_METHOD, default=TRAPEZOIDAL_METHOD): vol.In(
INTEGRATION_METHOD
),
}
),
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the integration sensor."""
integral = IntegrationSensor(
config[CONF_SOURCE_SENSOR],
config.get(CONF_NAME),
config[CONF_ROUND_DIGITS],
config[CONF_UNIT_PREFIX],
config[CONF_UNIT_TIME],
config.get(CONF_UNIT_OF_MEASUREMENT),
config[CONF_METHOD],
)
async_add_entities([integral])
class IntegrationSensor(RestoreEntity, SensorEntity):
"""Representation of an integration sensor."""
def __init__(
self,
source_entity,
name,
round_digits,
unit_prefix,
unit_time,
unit_of_measurement,
integration_method,
):
"""Initialize the integration sensor."""
self._sensor_source_id = source_entity
self._round_digits = round_digits
self._state = None
self._method = integration_method
self._name = name if name is not None else f"{source_entity} integral"
self._unit_template = (
f"{'' if unit_prefix is None else unit_prefix}{{}}{unit_time}"
)
self._unit_of_measurement = unit_of_measurement
self._unit_prefix = UNIT_PREFIXES[unit_prefix]
self._unit_time = UNIT_TIME[unit_time]
self._attr_state_class = STATE_CLASS_TOTAL
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
try:
self._state = Decimal(state.state)
except (DecimalException, ValueError) as err:
_LOGGER.warning("Could not restore last state: %s", err)
else:
self._attr_device_class = state.attributes.get(ATTR_DEVICE_CLASS)
if self._unit_of_measurement is None:
self._unit_of_measurement = state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
@callback
def calc_integration(event):
"""Handle the sensor state changes."""
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
if self._unit_of_measurement is None:
unit = new_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
self._unit_of_measurement = self._unit_template.format(
"" if unit is None else unit
)
if (
self.device_class is None
and new_state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_POWER
):
self._attr_device_class = DEVICE_CLASS_ENERGY
if (
old_state is None
or old_state.state in (STATE_UNKNOWN, STATE_UNAVAILABLE)
or new_state.state in (STATE_UNKNOWN, STATE_UNAVAILABLE)
):
return
try:
# integration as the Riemann integral of previous measures.
area = 0
elapsed_time = (
new_state.last_updated - old_state.last_updated
).total_seconds()
if self._method == TRAPEZOIDAL_METHOD:
area = (
(Decimal(new_state.state) + Decimal(old_state.state))
* Decimal(elapsed_time)
/ 2
)
elif self._method == LEFT_METHOD:
area = Decimal(old_state.state) * Decimal(elapsed_time)
elif self._method == RIGHT_METHOD:
area = Decimal(new_state.state) * Decimal(elapsed_time)
integral = area / (self._unit_prefix * self._unit_time)
assert isinstance(integral, Decimal)
except ValueError as err:
_LOGGER.warning("While calculating integration: %s", err)
except DecimalException as err:
_LOGGER.warning(
"Invalid state (%s > %s): %s", old_state.state, new_state.state, err
)
except AssertionError as err:
_LOGGER.error("Could not calculate integral: %s", err)
else:
if isinstance(self._state, Decimal):
self._state += integral
else:
self._state = integral
self.async_write_ha_state()
async_track_state_change_event(
self.hass, [self._sensor_source_id], calc_integration
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
if isinstance(self._state, Decimal):
return round(self._state, self._round_digits)
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_SOURCE_ID: self._sensor_source_id}
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
|
|
from __future__ import absolute_import
import mock
from bokeh.core.properties import field, value
from bokeh.core.validation import check_integrity
from bokeh.models.annotations import (
Legend, LegendItem, ColorBar, Arrow, BoxAnnotation, Span, LabelSet, Label,
Title, Band, Whisker
)
from bokeh.models import (
ColumnDataSource, ArrowHead, BasicTicker, BasicTickFormatter, GlyphRenderer
)
from .utils.property_utils import (
FILL, LINE, TEXT, ANGLE, prefix,
check_properties_existence, check_fill_properties,
check_line_properties, check_text_properties
)
def test_Legend():
legend = Legend()
assert legend.plot is None
assert legend.location == 'top_right'
assert legend.label_standoff == 5
assert legend.label_height == 20
assert legend.label_width == 20
assert legend.glyph_height == 20
assert legend.glyph_width == 20
assert legend.padding == 10
assert legend.spacing == 3
assert legend.margin == 10
assert legend.items == []
check_line_properties(legend, "border_", "#e5e5e5", 1.0, 0.5)
check_text_properties(legend, "label_", "10pt", "middle")
check_fill_properties(legend, "background_", "#ffffff", 0.95)
check_properties_existence(legend, [
"plot",
"visible",
"location",
"orientation",
"label_standoff",
"label_height",
"label_width",
"glyph_height",
"glyph_width",
"margin",
"padding",
"spacing",
"items",
"level",
"click_policy"],
prefix('label_', TEXT),
prefix('border_', LINE),
prefix('background_', FILL),
prefix('inactive_', FILL))
def test_ColorBar():
color_bar = ColorBar()
assert color_bar.plot is None
assert color_bar.location == 'top_right'
assert color_bar.orientation == 'vertical'
assert color_bar.height == 'auto'
assert color_bar.width == 'auto'
assert color_bar.scale_alpha == 1.0
assert color_bar.title is None
assert color_bar.title_standoff == 2
assert isinstance(color_bar.ticker, BasicTicker)
assert isinstance(color_bar.formatter, BasicTickFormatter)
assert color_bar.color_mapper is None
assert color_bar.margin == 30
assert color_bar.padding == 10
assert color_bar.label_standoff == 5
assert color_bar.major_tick_in == 5
assert color_bar.major_tick_out == 0
assert color_bar.minor_tick_in == 0
assert color_bar.minor_tick_out == 0
check_text_properties(color_bar, "title_", "10pt", "bottom", "italic")
check_text_properties(color_bar, "major_label_", "8pt", "middle", "normal", "center")
check_line_properties(color_bar, "major_tick_", "#ffffff")
check_line_properties(color_bar, "minor_tick_", None)
check_line_properties(color_bar, "bar_", None)
check_line_properties(color_bar, "border_", None)
check_fill_properties(color_bar, "background_", "#ffffff", 0.95)
check_properties_existence(color_bar, [
"plot",
"level",
"visible",
"location",
"orientation",
"height",
"width",
"scale_alpha",
"title",
"title_standoff",
"ticker",
"formatter",
"color_mapper",
"margin",
"padding",
"label_standoff",
"major_tick_in",
"major_tick_out",
"minor_tick_in",
"minor_tick_out",
"major_label_overrides"],
prefix('title_', TEXT),
prefix('major_label_', TEXT),
prefix('major_tick_', LINE),
prefix('minor_tick_', LINE),
prefix('bar_', LINE),
prefix('border_', LINE),
prefix('background_', FILL)
)
def test_Arrow():
arrow = Arrow()
assert arrow.plot is None
assert arrow.x_start is None
assert arrow.y_start is None
assert arrow.start_units == 'data'
assert arrow.start is None
assert arrow.x_end is None
assert arrow.y_end is None
assert arrow.end_units == 'data'
assert isinstance(arrow.end, ArrowHead)
assert arrow.source is None
assert arrow.x_range_name == "default"
assert arrow.y_range_name == "default"
check_line_properties(arrow)
check_properties_existence(arrow, [
"plot",
"level",
"visible",
"x_start",
"y_start",
"start_units",
"start",
"x_end",
"y_end",
"end_units",
"end",
"source",
"x_range_name",
"y_range_name"],
LINE)
def test_BoxAnnotation():
box = BoxAnnotation()
assert box.plot is None
assert box.left is None
assert box.left_units == 'data'
assert box.right is None
assert box.right_units == 'data'
assert box.bottom is None
assert box.bottom_units == 'data'
assert box.top is None
assert box.top_units == 'data'
assert box.x_range_name == 'default'
assert box.y_range_name == 'default'
assert box.level == 'annotation'
check_line_properties(box, "", '#cccccc', 1, 0.3)
check_fill_properties(box, "", "#fff9ba", 0.4)
check_properties_existence(box, [
"render_mode",
"plot",
"visible",
"left",
"left_units",
"right",
"right_units",
"bottom",
"bottom_units",
"top",
"top_units",
"x_range_name",
"y_range_name",
"level",
], LINE, FILL)
def test_Band():
band = Band()
assert band.plot is None
assert band.level == 'annotation'
assert band.lower is None
assert band.lower_units == 'data'
assert band.upper is None
assert band.upper_units == 'data'
assert band.base is None
assert band.dimension == 'height'
assert isinstance(band.source, ColumnDataSource)
assert band.x_range_name == 'default'
assert band.y_range_name == 'default'
check_line_properties(band, "", "#cccccc", 1.0, 0.3)
check_fill_properties(band, "", "#fff9ba", 0.4)
check_properties_existence(band, [
"plot",
"visible",
"level",
"lower",
"lower_units",
"upper",
"upper_units",
"base",
"base_units",
"dimension",
"source",
"x_range_name",
"y_range_name",
], LINE, FILL)
def test_Label():
label = Label()
assert label.plot is None
assert label.level == 'annotation'
assert label.x is None
assert label.y is None
assert label.x_units == 'data'
assert label.y_units == 'data'
assert label.text is None
assert label.angle == 0
assert label.angle_units == 'rad'
assert label.x_offset == 0
assert label.y_offset == 0
assert label.render_mode == 'canvas'
assert label.x_range_name == 'default'
assert label.y_range_name == 'default'
check_text_properties(label)
check_fill_properties(label, "background_", None, 1.0)
check_line_properties(label, "border_", None, 1.0, 1.0)
check_properties_existence(label, [
"plot",
"level",
"visible",
"x",
"y",
"x_units",
"y_units",
"text",
"angle",
"angle_units",
"x_offset",
"y_offset",
"render_mode",
"x_range_name",
"y_range_name"],
TEXT,
prefix('border_', LINE),
prefix('background_', FILL))
def test_LabelSet():
label_set = LabelSet()
assert label_set.plot is None
assert label_set.level == 'annotation'
assert label_set.x is None
assert label_set.y is None
assert label_set.x_units == 'data'
assert label_set.y_units == 'data'
assert label_set.text == 'text'
assert label_set.angle == 0
assert label_set.angle_units == 'rad'
assert label_set.x_offset == 0
assert label_set.y_offset == 0
assert label_set.render_mode == 'canvas'
assert label_set.x_range_name == 'default'
assert label_set.y_range_name == 'default'
assert isinstance(label_set.source, ColumnDataSource)
assert label_set.source.data == {}
check_text_properties(label_set)
check_fill_properties(label_set, "background_", None, 1.0)
check_line_properties(label_set, "border_", None, 1.0, 1.0)
check_properties_existence(label_set, [
"plot",
"visible",
"level",
"x",
"y",
"x_units",
"y_units",
"text",
"angle",
"angle_units",
"x_offset",
"y_offset",
"render_mode",
"x_range_name",
"y_range_name",
"source"],
TEXT,
ANGLE,
prefix('border_', LINE),
prefix('background_', FILL))
def test_Span():
line = Span()
assert line.plot is None
assert line.location is None
assert line.location_units == 'data'
assert line.dimension == 'width'
assert line.x_range_name == 'default'
assert line.y_range_name == 'default'
assert line.level == 'annotation'
assert line.render_mode == 'canvas'
check_line_properties(line, "", 'black', 1.0)
check_properties_existence(line, [
"plot",
"visible",
"location",
"location_units",
"dimension",
"x_range_name",
"y_range_name",
"level",
"render_mode"
], LINE)
def test_Title():
title = Title()
assert title.plot is None
assert title.level == 'annotation'
assert title.text is None
assert title.vertical_align == 'bottom'
assert title.align == 'left'
assert title.offset == 0
assert title.text_font == 'helvetica'
assert title.text_font_size == {'value': '10pt'}
assert title.text_font_style == 'bold'
assert title.text_color == '#444444'
assert title.text_alpha == 1.0
check_fill_properties(title, "background_", None, 1.0)
check_line_properties(title, "border_", None, 1.0, 1.0)
check_properties_existence(title, [
"plot",
"visible",
"level",
"text",
"vertical_align",
"align",
"offset",
"text_font",
"text_font_size",
"text_font_style",
"text_color",
"text_alpha",
"render_mode"],
prefix('border_', LINE),
prefix('background_', FILL))
def test_Whisker():
whisker = Whisker()
assert whisker.plot is None
assert whisker.level == 'underlay'
assert whisker.lower is None
assert whisker.lower_units == 'data'
assert isinstance(whisker.lower_head, ArrowHead)
assert whisker.lower_head.size == 10
assert whisker.lower_head.level == 'underlay'
assert whisker.upper is None
assert whisker.upper_units == 'data'
assert isinstance(whisker.upper_head, ArrowHead)
assert whisker.upper_head.size == 10
assert whisker.upper_head.level == 'underlay'
assert whisker.base is None
assert whisker.dimension == 'height'
assert isinstance(whisker.source, ColumnDataSource)
assert whisker.x_range_name == 'default'
assert whisker.y_range_name == 'default'
check_line_properties(whisker, "")
check_properties_existence(whisker, [
"plot",
"visible",
"level",
"lower",
"lower_units",
"lower_head",
"upper",
"upper_units",
"upper_head",
"base",
"base_units",
"dimension",
"source",
"x_range_name",
"y_range_name"],
LINE)
def test_can_add_multiple_glyph_renderers_to_legend_item():
legend_item = LegendItem()
gr_1 = GlyphRenderer()
gr_2 = GlyphRenderer()
legend_item.renderers = [gr_1, gr_2]
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([legend_item])
assert mock_logger.error.call_count == 0
def test_legend_item_with_field_label_and_different_data_sources_raises_a_validation_error():
legend_item = LegendItem()
gr_1 = GlyphRenderer(data_source=ColumnDataSource(data={'label': [1]}))
gr_2 = GlyphRenderer(data_source=ColumnDataSource(data={'label': [1]}))
legend_item.label = field('label')
legend_item.renderers = [gr_1, gr_2]
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([legend_item])
assert mock_logger.error.call_count == 1
def test_legend_item_with_value_label_and_different_data_sources_does_not_raise_a_validation_error():
legend_item = LegendItem()
gr_1 = GlyphRenderer(data_source=ColumnDataSource())
gr_2 = GlyphRenderer(data_source=ColumnDataSource())
legend_item.label = value('label')
legend_item.renderers = [gr_1, gr_2]
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([legend_item])
assert mock_logger.error.call_count == 0
def test_legend_item_with_field_label_raises_error_if_field_not_in_cds():
legend_item = LegendItem()
gr_1 = GlyphRenderer(data_source=ColumnDataSource())
legend_item.label = field('label')
legend_item.renderers = [gr_1]
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([legend_item])
assert mock_logger.error.call_count == 1
|
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Optional
from pants.backend.python.goals.coverage_py import (
CoverageConfig,
CoverageSubsystem,
PytestCoverageData,
)
from pants.backend.python.subsystems.pytest import PyTest, PythonTestFieldSet
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest
from pants.backend.python.util_rules.pex import Pex, PexRequest, VenvPex, VenvPexProcess
from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.test import (
BuildPackageDependenciesRequest,
BuiltPackageDependencies,
JunitXMLDir,
JunitXMLDirSource,
RuntimePackageDependenciesField,
TestDebugRequest,
TestExtraEnv,
TestFieldSet,
TestResult,
TestSubsystem,
)
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.addresses import Address
from pants.engine.collection import Collection
from pants.engine.environment import CompleteEnvironment
from pants.engine.fs import (
EMPTY_DIGEST,
CreateDigest,
Digest,
DigestSubset,
Directory,
MergeDigests,
PathGlobs,
RemovePrefix,
Snapshot,
)
from pants.engine.process import (
FallibleProcessResult,
InteractiveProcess,
Process,
ProcessCacheScope,
)
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import Target, TransitiveTargets, TransitiveTargetsRequest, WrappedTarget
from pants.engine.unions import UnionMembership, UnionRule, union
from pants.option.global_options import GlobalOptions
from pants.python.python_setup import PythonSetup
from pants.util.logging import LogLevel
logger = logging.getLogger()
# -----------------------------------------------------------------------------------------
# Plugin hook
# -----------------------------------------------------------------------------------------
@dataclass(frozen=True)
class PytestPluginSetup:
"""The result of custom set up logic before Pytest runs.
Please reach out it if you would like certain functionality, such as allowing your plugin to set
environment variables.
"""
digest: Digest = EMPTY_DIGEST
@union
@dataclass(frozen=True) # type: ignore[misc]
class PytestPluginSetupRequest(ABC):
"""A request to set up the test environment before Pytest runs, e.g. to set up databases.
To use, subclass PytestPluginSetupRequest, register the rule
`UnionRule(PytestPluginSetupRequest, MyCustomPytestPluginSetupRequest)`, and add a rule that
takes your subclass as a parameter and returns `PytestPluginSetup`.
"""
target: Target
@classmethod
@abstractmethod
def is_applicable(cls, target: Target) -> bool:
"""Whether the setup implementation should be used for this target or not."""
class AllPytestPluginSetups(Collection[PytestPluginSetup]):
pass
# TODO: Why is this necessary? We should be able to use `PythonTestFieldSet` as the rule param.
@dataclass(frozen=True)
class AllPytestPluginSetupsRequest:
address: Address
@rule
async def run_all_setup_plugins(
request: AllPytestPluginSetupsRequest, union_membership: UnionMembership
) -> AllPytestPluginSetups:
wrapped_tgt = await Get(WrappedTarget, Address, request.address)
applicable_setup_request_types = tuple(
request
for request in union_membership.get(PytestPluginSetupRequest)
if request.is_applicable(wrapped_tgt.target)
)
setups = await MultiGet(
Get(PytestPluginSetup, PytestPluginSetupRequest, request(wrapped_tgt.target)) # type: ignore[misc, abstract]
for request in applicable_setup_request_types
)
return AllPytestPluginSetups(setups)
# -----------------------------------------------------------------------------------------
# Core logic
# -----------------------------------------------------------------------------------------
# If a user wants extra pytest output (e.g., plugin output) to show up in dist/
# they must ensure that output goes under this directory. E.g.,
# ./pants test <target> -- --html=extra-output/report.html
_EXTRA_OUTPUT_DIR = "extra-output"
@dataclass(frozen=True)
class TestSetupRequest:
field_set: PythonTestFieldSet
is_debug: bool
@dataclass(frozen=True)
class TestSetup:
process: Process
results_file_name: Optional[str]
# Prevent this class from being detected by pytest as a test class.
__test__ = False
@rule(level=LogLevel.DEBUG)
async def setup_pytest_for_target(
request: TestSetupRequest,
pytest: PyTest,
test_subsystem: TestSubsystem,
python_setup: PythonSetup,
coverage_config: CoverageConfig,
coverage_subsystem: CoverageSubsystem,
test_extra_env: TestExtraEnv,
global_options: GlobalOptions,
complete_env: CompleteEnvironment,
) -> TestSetup:
transitive_targets, plugin_setups = await MultiGet(
Get(TransitiveTargets, TransitiveTargetsRequest([request.field_set.address])),
Get(AllPytestPluginSetups, AllPytestPluginSetupsRequest(request.field_set.address)),
)
all_targets = transitive_targets.closure
interpreter_constraints = InterpreterConstraints.create_from_targets(all_targets, python_setup)
requirements_pex_get = Get(
Pex,
PexFromTargetsRequest,
PexFromTargetsRequest.for_requirements(
[request.field_set.address],
internal_only=True,
resolve_and_lockfile=request.field_set.resolve.resolve_and_lockfile(python_setup),
),
)
pytest_pex_get = Get(
Pex,
PexRequest(
output_filename="pytest.pex",
requirements=pytest.pex_requirements(),
interpreter_constraints=interpreter_constraints,
internal_only=True,
),
)
# Ensure that the empty extra output dir exists.
extra_output_directory_digest_get = Get(Digest, CreateDigest([Directory(_EXTRA_OUTPUT_DIR)]))
prepared_sources_get = Get(
PythonSourceFiles, PythonSourceFilesRequest(all_targets, include_files=True)
)
# Get the file names for the test_target so that we can specify to Pytest precisely which files
# to test, rather than using auto-discovery.
field_set_source_files_get = Get(SourceFiles, SourceFilesRequest([request.field_set.sources]))
(
pytest_pex,
requirements_pex,
prepared_sources,
field_set_source_files,
extra_output_directory_digest,
) = await MultiGet(
pytest_pex_get,
requirements_pex_get,
prepared_sources_get,
field_set_source_files_get,
extra_output_directory_digest_get,
)
local_dists = await Get(
LocalDistsPex,
LocalDistsPexRequest(
[request.field_set.address],
interpreter_constraints=interpreter_constraints,
sources=prepared_sources,
),
)
pytest_runner_pex_get = Get(
VenvPex,
PexRequest(
output_filename="pytest_runner.pex",
interpreter_constraints=interpreter_constraints,
main=pytest.main,
internal_only=True,
pex_path=[pytest_pex, requirements_pex, local_dists.pex],
),
)
config_files_get = Get(
ConfigFiles,
ConfigFilesRequest,
pytest.config_request(field_set_source_files.snapshot.dirs),
)
pytest_runner_pex, config_files = await MultiGet(pytest_runner_pex_get, config_files_get)
input_digest = await Get(
Digest,
MergeDigests(
(
coverage_config.digest,
local_dists.remaining_sources.source_files.snapshot.digest,
config_files.snapshot.digest,
extra_output_directory_digest,
*(plugin_setup.digest for plugin_setup in plugin_setups),
)
),
)
add_opts = [f"--color={'yes' if global_options.options.colors else 'no'}"]
output_files = []
results_file_name = None
if not request.is_debug:
results_file_name = f"{request.field_set.address.path_safe_spec}.xml"
add_opts.extend(
(f"--junitxml={results_file_name}", "-o", f"junit_family={pytest.options.junit_family}")
)
output_files.append(results_file_name)
coverage_args = []
if test_subsystem.use_coverage and not request.is_debug:
pytest.validate_pytest_cov_included()
output_files.append(".coverage")
if coverage_subsystem.filter:
cov_args = [f"--cov={morf}" for morf in coverage_subsystem.filter]
else:
# N.B.: Passing `--cov=` or `--cov=.` to communicate "record coverage for all sources"
# fails in certain contexts as detailed in:
# https://github.com/pantsbuild/pants/issues/12390
# Instead we focus coverage on just the directories containing python source files
# materialized to the Process chroot.
cov_args = [f"--cov={source_root}" for source_root in prepared_sources.source_roots]
coverage_args = [
"--cov-report=", # Turn off output.
f"--cov-config={coverage_config.path}",
*cov_args,
]
extra_env = {
"PYTEST_ADDOPTS": " ".join(add_opts),
"PEX_EXTRA_SYS_PATH": ":".join(prepared_sources.source_roots),
**test_extra_env.env,
# NOTE: `complete_env` intentionally after `test_extra_env` to allow overriding within
# `python_tests`
**complete_env.get_subset(request.field_set.extra_env_vars.value or ()),
}
# Cache test runs only if they are successful, or not at all if `--test-force`.
cache_scope = (
ProcessCacheScope.PER_SESSION if test_subsystem.force else ProcessCacheScope.SUCCESSFUL
)
process = await Get(
Process,
VenvPexProcess(
pytest_runner_pex,
argv=(*pytest.options.args, *coverage_args, *field_set_source_files.files),
extra_env=extra_env,
input_digest=input_digest,
output_directories=(_EXTRA_OUTPUT_DIR,),
output_files=output_files,
timeout_seconds=request.field_set.timeout.calculate_from_global_options(pytest),
execution_slot_variable=pytest.options.execution_slot_var,
description=f"Run Pytest for {request.field_set.address}",
level=LogLevel.DEBUG,
cache_scope=cache_scope,
),
)
return TestSetup(process, results_file_name=results_file_name)
@rule(desc="Run Pytest", level=LogLevel.DEBUG)
async def run_python_test(
field_set: PythonTestFieldSet, test_subsystem: TestSubsystem, pytest: PyTest
) -> TestResult:
setup = await Get(TestSetup, TestSetupRequest(field_set, is_debug=False))
result = await Get(FallibleProcessResult, Process, setup.process)
coverage_data = None
if test_subsystem.use_coverage:
coverage_snapshot = await Get(
Snapshot, DigestSubset(result.output_digest, PathGlobs([".coverage"]))
)
if coverage_snapshot.files == (".coverage",):
coverage_data = PytestCoverageData(field_set.address, coverage_snapshot.digest)
else:
logger.warning(f"Failed to generate coverage data for {field_set.address}.")
xml_results_snapshot = None
if setup.results_file_name:
xml_results_snapshot = await Get(
Snapshot, DigestSubset(result.output_digest, PathGlobs([setup.results_file_name]))
)
if xml_results_snapshot.files != (setup.results_file_name,):
logger.warning(f"Failed to generate JUnit XML data for {field_set.address}.")
extra_output_snapshot = await Get(
Snapshot, DigestSubset(result.output_digest, PathGlobs([f"{_EXTRA_OUTPUT_DIR}/**"]))
)
extra_output_snapshot = await Get(
Snapshot, RemovePrefix(extra_output_snapshot.digest, _EXTRA_OUTPUT_DIR)
)
return TestResult.from_fallible_process_result(
result,
address=field_set.address,
output_setting=test_subsystem.output,
coverage_data=coverage_data,
xml_results=xml_results_snapshot,
extra_output=extra_output_snapshot,
)
@rule(desc="Set up Pytest to run interactively", level=LogLevel.DEBUG)
async def debug_python_test(field_set: PythonTestFieldSet) -> TestDebugRequest:
setup = await Get(TestSetup, TestSetupRequest(field_set, is_debug=True))
return TestDebugRequest(
InteractiveProcess.from_process(
setup.process, forward_signals_to_process=False, restartable=True
)
)
@union
class PytestJunitXMLDirSource:
pass
@rule
def builtin_xml_dir_source(_: PytestJunitXMLDirSource, pytest: PyTest) -> JunitXMLDir:
# TODO: When this field access is removed, the entire union should be as well.
return JunitXMLDir(pytest.options.junit_xml_dir)
# -----------------------------------------------------------------------------------------
# `runtime_package_dependencies` plugin
# -----------------------------------------------------------------------------------------
@dataclass(frozen=True)
class RuntimePackagesPluginRequest(PytestPluginSetupRequest):
@classmethod
def is_applicable(cls, target: Target) -> bool:
return bool(target.get(RuntimePackageDependenciesField).value)
@rule
async def setup_runtime_packages(request: RuntimePackagesPluginRequest) -> PytestPluginSetup:
built_packages = await Get(
BuiltPackageDependencies,
BuildPackageDependenciesRequest(request.target.get(RuntimePackageDependenciesField)),
)
digest = await Get(Digest, MergeDigests(pkg.digest for pkg in built_packages))
return PytestPluginSetup(digest)
def rules():
return [
*collect_rules(),
UnionRule(TestFieldSet, PythonTestFieldSet),
UnionRule(PytestPluginSetupRequest, RuntimePackagesPluginRequest),
UnionRule(JunitXMLDirSource, PytestJunitXMLDirSource),
]
|
|
import os
import re
import sublime
import sublime_plugin
from statusprocess import *
from asyncprocess import *
from output_panel import *
PROMPT = "jshint:\n"
SUPPORTED_SELECTORS = ('source.js', 'source.json', 'source.coffee')
###############################################
# Support Functions
#
def is_supported_view(view):
return (not view.is_scratch() and
any(map(lambda x: view.score_selector(0, x), SUPPORTED_SELECTORS)))
def is_autolinting(view):
return is_supported_view and (view.settings().get("js_autolint", True))
###############################################
# Commands
#
class ToggleJsAutolintCommand(sublime_plugin.WindowCommand):
"""Toggle autolinting on supported files"""
def is_enabled(self):
return is_supported_view(self.window.active_view())
def run(self):
toggle_setting(self.window.active_view().settings(), 'js_autolint', True)
class JslintCommand(sublime_plugin.WindowCommand):
def is_enabled(self):
return is_supported_view(self.window.active_view())
def run(self):
s = sublime.load_settings(SETTINGS_FILE)
view = self.window.active_view()
file_path = view.file_name()
file_name = os.path.basename(file_path)
self.debug = s.get('debug', False)
self.buffered_data = ''
self.file_path = file_path
self.file_name = file_name
self.is_running = True
self.output_panel_showed = False
self.ignored_error_count = 0
self.ignore_errors = s.get('ignore_errors', [])
self.init_output_panel()
if len(s.get('jshint_config', '')) > 0:
config_path = s.get('jshint_config')
else:
config_path = sublime.packages_path()+'/SublimeNodeTools/jshint-config.json'
if len(s.get('jshint_bin', '')) > 0:
jshint_bin = s.get('jshint_bin')
else:
jshint_bin = sublime.packages_path() + '/SublimeNodeTools/bin/bolt-lint'
# TODO: lookup all CoffeeScript and JavaScript ranges, not just the first
if view.score_selector(0, 'source.coffee'):
coffee_flag = ' --coffee '
else:
coffee_flag = ''
script = view.substr(sublime.Region(0, view.size()))
cmd = 'source ~/.profile; "'+jshint_bin + '" --config "' + config_path +'" --file "' + file_name + '" ' + coffee_flag + s.get('jshint_options', '')
if self.debug:
print "DEBUG: " + str(cmd)
AsyncProcess(cmd, self, script)
StatusProcess('Starting JSHint for file ' + file_name, self)
JsLintEventListener.disabled = True
def init_output_panel(self):
if not hasattr(self, 'output_view'):
self.output_view = self.window.get_output_panel(RESULT_VIEW_NAME)
self.output_view.set_name(RESULT_VIEW_NAME)
self.clear_test_view()
self.output_view.settings().set("file_path", self.file_path)
def clear_test_view(self):
self.output_view.set_read_only(False)
edit = self.output_view.begin_edit()
self.output_view.erase(edit, sublime.Region(0, self.output_view.size()))
self.output_view.insert(edit, self.output_view.size(), PROMPT)
self.output_view.end_edit(edit)
self.output_view.set_read_only(True)
def append_data(self, proc, data, end=False):
self.buffered_data = self.buffered_data + data.decode("utf-8")
data = self.buffered_data.replace(self.file_path, self.file_name).replace('\r\n', '\n').replace('\r', '\n')
if end == False:
rsep_pos = data.rfind('\n')
if rsep_pos == -1:
# not found full line.
return
self.buffered_data = data[rsep_pos+1:]
data = data[:rsep_pos+1]
# ignore error.
text = data
if len(self.ignore_errors) > 0:
text = ''
for line in data.split('\n'):
if len(line) == 0:
continue
ignored = False
for rule in self.ignore_errors:
if re.search(rule, line):
ignored = True
self.ignored_error_count += 1
if self.debug:
print "text match line "
print "rule = " + rule
print "line = " + line
print "---------"
break
if ignored == False:
text += line + '\n'
show_output_panel(self.window)
selection_was_at_end = (len(self.output_view.sel()) == 1 and self.output_view.sel()[0] == sublime.Region(self.output_view.size()))
self.output_view.set_read_only(False)
edit = self.output_view.begin_edit()
self.output_view.insert(edit, self.output_view.size(), text)
if end and self.ignored_error_count > 0:
text = '\nignored ' + str(self.ignored_error_count) + ' errors.\n'
self.output_view.insert(edit, self.output_view.size(), text)
# if selection_was_at_end:
# self.output_view.show(self.output_view.size())
self.output_view.end_edit(edit)
self.output_view.set_read_only(True)
# if end:
# self.output_view.run_command("goto_line", {"line": 1})
def update_status(self, msg, progress):
sublime.status_message(msg + " " + progress)
def proc_terminated(self, proc):
if proc.returncode == 0:
sublime.status_message("jshint: " + self.file_name + ' has no errors')
hide_output_panel(self.window)
else:
msg = ''
self.append_data(proc, msg, True)
JsLintEventListener.disabled = False
class JsLintEventListener(sublime_plugin.EventListener):
"""jslint event"""
disabled = False
def __init__(self):
self.previous_resion = None
self.file_view = None
def on_post_save(self, view):
if is_supported_view(view):
if is_autolinting(view):
print "AUTOLINTING " + view.file_name()
view.window().run_command("jslint")
else:
sublime.status_message('Autolint disabled for this file')
def on_deactivated(self, view):
if view.name() != RESULT_VIEW_NAME:
return
self.previous_resion = None
if self.file_view:
self.file_view.erase_regions(RESULT_VIEW_NAME)
def on_selection_modified(self, view):
if JsLintEventListener.disabled:
return
if view.name() != RESULT_VIEW_NAME:
return
region = view.line(view.sel()[0])
# make sure call once.
if self.previous_resion == region:
return
self.previous_resion = region
# extract line from jslint result.
text = view.substr(region).split(':')
if len(text) < 4 or text[0] != 'jslint' or re.match('\d+', text[2]) == None or re.match('\d+', text[3]) == None:
return
line = int(text[2])
col = int(text[3])
# hightligh view line.
view.add_regions(RESULT_VIEW_NAME, [region], "comment")
# find the file view.
file_path = view.settings().get('file_path')
window = sublime.active_window()
file_view = None
for v in window.views():
if v.file_name() == file_path:
file_view = v
break
if file_view == None:
return
self.file_view = file_view
window.focus_view(file_view)
file_view.run_command("goto_line", {"line": line})
file_region = file_view.line(file_view.sel()[0])
# highlight file_view line
file_view.add_regions(RESULT_VIEW_NAME, [file_region], "string")
|
|
#!/usr/bin/env python
"""Semantic protocol buffers can be created from proto2 .proto files.
For maintaining inter-operatibility with primitive protocol buffer
implementations, we can parse the field descriptors created by the standard
Google proto implementation, and generate Semantic proto descriptors.
This file contains interoperability code with the Google protocol buffer
library.
"""
import logging
from grr.lib import rdfvalue
from grr.lib import type_info
from grr.proto import semantic_pb2
# Field types present in the proto2 field descriptors.
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_UINT32 = 13
TYPE_ENUM = 14
TYPE_SFIXED32 = 15
TYPE_SFIXED64 = 16
TYPE_SINT32 = 17
TYPE_SINT64 = 18
MAX_TYPE = 18
# These are labels in the descriptor. Semantic protobufs only distinguish
# between optional and repeated labels. Required is not enforced by the library
# - it should be done by the user in their Validate() method.
LABEL_OPTIONAL = 1
LABEL_REQUIRED = 2
LABEL_REPEATED = 3
MAX_LABEL = 3
# Semantic Value data store type specifies how they prefer to be encoded. This
# maps to a proto2 primitive field type. When parsing the .proto file we must
# ensure that the semantic value is getting encoded into the correct primitive
# field type.
_SEMANTIC_PRIMITIVE_TO_FIELD_TYPE = dict(
bytes=TYPE_BYTES,
string=TYPE_STRING,
integer=TYPE_INT64,
unsigned_integer=TYPE_UINT64,
)
def DefineFromProtobuf(cls, protobuf):
"""Add type info definitions from an existing protobuf.
We support building this class by copying definitions from an annotated
protobuf using the semantic protobuf. This is ideal for interoperability
with other languages and non-semantic protobuf implementations. In that case
it might be easier to simply annotate the .proto file with the relevant
semantic information.
Args:
cls: The class to add fields descriptors to (i.e. the new semantic class).
protobuf: A generated proto2 protocol buffer class as produced by the
standard Google protobuf compiler.
"""
# Parse message level options.
message_options = protobuf.DESCRIPTOR.GetOptions()
semantic_options = message_options.Extensions[semantic_pb2.semantic]
# Support message descriptions
if semantic_options.description and not cls.__doc__:
cls.__doc__ = semantic_options.description
# We search through all the field descriptors and build type info
# descriptors from them.
for field in protobuf.DESCRIPTOR.fields:
type_descriptor = None
# Does this field have semantic options?
options = field.GetOptions().Extensions[semantic_pb2.sem_type]
kwargs = dict(description=options.description, name=field.name,
friendly_name=options.friendly_name,
field_number=field.number, labels=list(options.label))
if field.has_default_value:
kwargs["default"] = field.default_value
# This field is a non-protobuf semantic value.
if options.type and field.type != TYPE_MESSAGE:
rdf_type = getattr(rdfvalue, options.type, None)
if rdf_type:
# Make sure that the field type is the same as what is required by the
# semantic type.
required_field_type = _SEMANTIC_PRIMITIVE_TO_FIELD_TYPE[
rdf_type.data_store_type]
if required_field_type != field.type:
raise rdfvalue.InitializeError(
("%s: .proto file uses incorrect field to store Semantic Value "
"%s: Should be %s") % (
cls.__name__, field.name, rdf_type.data_store_type))
type_descriptor = type_info.ProtoRDFValue(rdf_type=options.type, **kwargs)
# A semantic protobuf is already a semantic value so it is an error to
# specify it in two places.
elif options.type and field.type == TYPE_MESSAGE:
raise rdfvalue.InitializeError(
("%s: .proto file specified both Semantic Value type %s and "
"Semantic protobuf %s") % (
cls.__name__, options.type, field.message_type.name))
# Try to figure out what this field actually is from the descriptor.
elif field.type == TYPE_DOUBLE:
type_descriptor = type_info.ProtoDouble(**kwargs)
elif field.type == TYPE_FLOAT:
type_descriptor = type_info.ProtoFloat(**kwargs)
elif field.type == TYPE_BOOL:
type_descriptor = type_info.ProtoBoolean(**kwargs)
elif field.type == TYPE_STRING:
type_descriptor = type_info.ProtoString(**kwargs)
elif field.type == TYPE_BYTES:
type_descriptor = type_info.ProtoBinary(**kwargs)
if options.dynamic_type:
# This may be a dynamic type. In this case the dynamic_type option
# names a method (which must exist) which should return the class of
# the embedded semantic value.
dynamic_cb = getattr(cls, options.dynamic_type, None)
if dynamic_cb is not None:
type_descriptor = type_info.ProtoDynamicEmbedded(
dynamic_cb=dynamic_cb, **kwargs)
else:
logging.warning("Dynamic type specifies a non existant callback %s",
options.dynamic_type)
elif field.type == TYPE_INT64 or field.type == TYPE_INT32:
type_descriptor = type_info.ProtoSignedInteger(**kwargs)
elif field.type == TYPE_UINT32 or field.type == TYPE_UINT64:
type_descriptor = type_info.ProtoUnsignedInteger(**kwargs)
# An embedded protocol buffer.
elif field.type == TYPE_MESSAGE and field.message_type:
# Refer to another protobuf. Note that the target does not need to be
# known at this time. It will be resolved using the late binding algorithm
# when it is known. Therefore this can actually also refer to this current
# protobuf (i.e. nested proto).
type_descriptor = type_info.ProtoEmbedded(
nested=field.message_type.name, **kwargs)
# TODO(user): support late binding here.
if type_descriptor.type:
# This traps the following problem:
# class Certificate(rdfvalue.RDFValueArray):
# protobuf = jobs_pb2.BlobArray
#
# A primitive Protobuf definition like:
# message Certificate {
# ....
# };
# And a field like:
# optional Certificate csr = 1 [(sem_type) = {
# description: "A Certificate RDFValue with the CSR in it.",
# }];
# If we blindly allowed the Certificate RDFValue to be used, the
# semantic library will end up embedding a BlobArray protobuf, but the
# primitive library will still use Certificate.
# The name of the primitive protobuf the semantic type implements.
semantic_protobuf_primitive = type_descriptor.type.protobuf.__name__
# This is an error because the primitive library will use the protobuf
# named in the field, but the semantic library will implement a
# different protobuf.
if semantic_protobuf_primitive != field.message_type.name:
raise rdfvalue.InitializeError(
("%s.%s: Conflicting primitive (%s) and semantic protobuf %s "
"which implements primitive protobuf (%s)") %(
cls.__name__, field.name, field.message_type.name,
type_descriptor.type.__name__, semantic_protobuf_primitive))
elif field.enum_type: # It is an enum.
enum_desc = field.enum_type
enum_dict = {}
enum_descriptions = {}
for enum_value in enum_desc.values:
enum_dict[enum_value.name] = enum_value.number
description = enum_value.GetOptions().Extensions[
semantic_pb2.description]
enum_descriptions[enum_value.name] = description
enum_dict = dict((x.name, x.number) for x in enum_desc.values)
type_descriptor = type_info.ProtoEnum(
enum_name=enum_desc.name, enum=enum_dict,
enum_descriptions=enum_descriptions, **kwargs)
# Attach the enum container to the class for easy reference:
setattr(cls, enum_desc.name, type_descriptor.enum_container)
# If we do not recognize the type descriptor we ignore this field.
if type_descriptor is not None:
# If the field is repeated, wrap it in a ProtoList.
if field.label == LABEL_REPEATED:
type_descriptor = type_info.ProtoList(type_descriptor)
try:
cls.AddDescriptor(type_descriptor)
except Exception:
logging.error("Failed to parse protobuf %s", cls)
raise
else:
logging.error("Unknown field type for %s - Ignoring.", field.name)
|
|
from etk.timeseries.annotation import utility, block_detector
from etk.timeseries.annotation.table_processor import parsed_table
from etk.timeseries.annotation import cell_classifier
import logging
from etk.timeseries.annotation import multi_table_processor
class parsed_sheet:
def __init__(self, name, index, real_values, merged_cells):
self.sheet_name = name
self.sheet_index = index
self.raw_values = real_values
self.classifier = cell_classifier.simple_tag_classifier()
self.add_tags()
self.table_list = []
self.merged_cells = merged_cells
self.in_complex = False
# Returns the merged block if the given cell is located in one.
def get_merged_block(self, row, col):
for block in self.merged_cells:
if row >= block[0] and row < block[1] and col >= block[2] and col < block[3]:
return block
return None
# looks for the neighbor cells of the given date_cell and returns the the smallest rectangular block that this date is located in.
def find_date_blocks(self, date_cell):
blocks = []
for i in range(len(self.classified_tags)):
for j in range(len(self.classified_tags[i])):
if date_cell not in self.classified_tags[i][j]:
continue
if block_detector.check_cell_in_block(blocks, i, j):
continue
blocks.append(block_detector.BFS_date_block(i, j, self.classified_tags, date_cell))
for block in blocks:
logging.debug("Date block: " + str(block))
yield block_detector.find_minimum_boundry_rectangle(block)
# finding the tables with time series in the given sheet
def find_tables(self, out_file):
step = 1
sheet_label_blocks = []
logging.info("------------------------------------")
for rb in self.find_date_blocks(self.classifier.get_date_tag()):
# check if the header is only a single time cell (which can be an event in the best case)
if not parsed_table.is_valid_time_header(rb):
print("Pre validation of time header failed. " + str(rb))
continue
detected_table = parsed_table(rb, self)
if not detected_table.is_valid_time_header_post(self.table_list):
print("Post validation of time header failed. " + str(rb))
continue
sheet_label_blocks = []
step += 1
orientation = detected_table.get_orientation()
print("Date block detected : " + str(rb))
logging.info("Orientation is " + orientation)
# if they are merged time cells
for i in range(rb.upper_row, rb.lower_row):
for j in range(rb.left_col, rb.right_col):
if self.get_merged_block(i, j) != None:
detected_table.check_offset()
# check for overlapping tables caused by overlapping times
has_overlap, break_points = multi_table_processor.complex_table.check_for_overlapping_time(detected_table.time_block, orientation, self.raw_values)
detected_table.break_points = break_points
if has_overlap:
detected_table.is_complex = True
if orientation == utility.row_orientation:
for x in block_detector.find_label_block(self.get_tags(), detected_table.data_start, rb.lower_row, 0, rb.left_col, utility.column_key, sheet_label_blocks):
detected_table.add_label(x)
else:
for x in block_detector.find_label_block(self.get_tags(), detected_table.data_start, rb.right_col, 0, rb.upper_row, utility.row_key, sheet_label_blocks):
out_file.write('rows[' + str(x.upper_row + 1) + "-" + str(x.lower_row) + "] columns[ " + str( x.left_col + 1) + "-" + str(x.right_col) + "]\n")
detected_table.add_label(x)
detected_table.find_table_borders(self.get_tags())
logging.info("Table borders: " + str(detected_table.borders))
# check for mergeds cells aftar finding the table borders
# check for the remaining cells that are not present in any block. They may be labels?
# check for unassigned_cells
dtb = detected_table.borders
#Find more labels on the right/bottom side of date blocks
if orientation == utility.row_orientation:
for x in block_detector.find_label_block(self.get_tags(), detected_table.data_start, rb.lower_row, rb.right_col, dtb.right_col, utility.column_key, sheet_label_blocks):
detected_table.add_label(x)
out_file.write('rows[' + str(x.upper_row + 1) + "-" + str(x.lower_row) + "] columns[ " + str(x.left_col + 1) + "-" + str(x.right_col) + "]\n")
else:
for x in block_detector.find_label_block(self.get_tags(), detected_table.data_start, rb.right_col, rb.lower_row, dtb.lower_row, utility.row_key, sheet_label_blocks):
out_file.write('rows[' + str(x.upper_row + 1) + "-" + str(x.lower_row) + "] columns[ " + str( x.left_col + 1) + "-" + str(x.right_col) + "]\n")
detected_table.add_label(x)
detected_table.find_label_names(self.get_tags())
self.add_table(detected_table)
def add_table(self, table):
self.table_list.append(table)
def add_tags(self):
self.classified_tags = self.classifier.tag_sheet(self.raw_values)
# get class tags of the sheet cells
def get_tags(self):
return self.classified_tags
# creates the output json for the sheet based on the tables found
def get_output(self):
if len(self.table_list) == 0:
return None
sheet_json = dict()
property = {"sheet_indices": str([self.sheet_index])}
sheet_json['Properties'] = property
sheet_json['TimeSeriesRegions'] = []
for table in self.table_list:
for t_json in table.create_json():
sheet_json['TimeSeriesRegions'].append(t_json)
sheet_json['GlobalMetadata'] = [{"name": "title", "source": "sheet_name"}]
return sheet_json
@classmethod
def merge_sheet_indices(cls, idx1, idx2):
idx1_stripped = idx1[1:-1].split(":")
idx2_stripped = str(idx2[1:-1])
if idx1 == idx2:
return idx1
elif len(idx1_stripped) == 2 and int(idx1_stripped[1]) + 1 == int(idx2_stripped):
return "[" + idx1_stripped[0] + ":" + idx2_stripped + "]"
elif len(idx1_stripped) == 1 and int(idx1_stripped[0]) + 1 == int(idx2_stripped):
return "[" + idx1_stripped[0] + ":" + idx2_stripped + "]"
else:
raise Exception("Sheet indices cannot be merged.")
@classmethod
def merge_row_indices(cls, idx1, idx2):
if idx1 == idx2:
return idx1
else:
raise Exception("Row indices cannot be merged.")
@classmethod
#Merge json for 2 sheets if they are similar
def merge_json(cls, json1, json2):
try:
if type(json1) != type(json2):
raise Exception("Json type is not equal")
if isinstance(json1, list):
ret = []
if len(json1) != len(json2):
raise Exception("List length not equal")
for item1, item2 in zip(json1, json2):
merged = cls.merge_json(item1, item2)
ret.append(merged)
return ret
elif isinstance(json1, dict):
ret = {}
if len(json1) != len(json2):
raise Exception("Dict length not equal.")
for key in json1:
if key in json2:
if type(json1[key]) == type(json2[key]):
if isinstance(json1[key], list) or isinstance(json1[key], dict):
merged = cls.merge_json(json1[key], json2[key])
ret[key] = merged
elif key == "sheet_indices":
ret[key] = cls.merge_sheet_indices(json1[key], json2[key])
elif key == "rows":
ret[key] = cls.merge_row_indices(json1[key], json2[key])
elif json1[key] == json2[key]:
ret[key] = json1[key]
else:
raise Exception("Key values are not equal.")
else:
raise Exception("Dict key types do not match")
else:
raise Exception("Key not found in dict")
return ret
elif json1 == json2:
return json1
except Exception as exc:
raise Exception(exc)
@classmethod
def merge_json_list(cls, final_json):
return_json = []
json = final_json[0]
for i in range(len(final_json)):
try:
merged_json = cls.merge_json(json, final_json[i])
json = merged_json
except:
return_json.append(json)
json = final_json[i]
return_json.append(json)
return return_json
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.