gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
import datetime
from django.forms import *
from django.utils import formats
from django.utils.encoding import StrAndUnicode, force_unicode
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django.forms.util import flatatt
from django.utils import datetime_safe
from dojango.util import json_encode
from dojango.util.config import Config
from dojango.util import dojo_collector
__all__ = (
'Media', 'MediaDefiningClass', # original django classes
'DojoWidgetMixin', 'Input', 'Widget', 'TextInput', 'PasswordInput',
'HiddenInput', 'MultipleHiddenInput', 'FileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioInput', 'RadioFieldRenderer',
'RadioSelect', 'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SimpleTextarea', 'EditorInput', 'HorizontalSliderInput',
'VerticalSliderInput', 'ValidationTextInput', 'ValidationPasswordInput',
'EmailTextInput', 'IPAddressTextInput', 'URLTextInput', 'NumberTextInput',
'RangeBoundTextInput', 'NumberSpinnerInput', 'RatingInput', 'DateInputAnim',
'DropDownSelect', 'CheckedMultiSelect', 'FilteringSelect', 'ComboBox',
'ComboBoxStore', 'FilteringSelectStore', 'ListInput',
)
dojo_config = Config() # initialize the configuration
class DojoWidgetMixin:
"""A helper mixin, that is used by every custom dojo widget.
Some dojo widgets can utilize the validation information of a field and here
we mixin those attributes into the widget. Field attributes that are listed
in the 'valid_extra_attrs' will be mixed into the attributes of a widget.
The 'default_field_attr_map' property contains the default mapping of field
attributes to dojo widget attributes.
This mixin also takes care passing the required dojo modules to the collector.
'dojo_type' defines the used dojo module type of this widget and adds this
module to the collector, if no 'alt_require' property is defined. When
'alt_require' is set, this module will be passed to the collector. By using
'extra_dojo_require' it is possible to pass additional dojo modules to the
collector.
"""
dojo_type = None # this is the dojoType definition of the widget. also used for generating the dojo.require call
alt_require = None # alternative dojo.require call (not using the dojo_type)
extra_dojo_require = [] # these dojo modules also needs to be loaded for this widget
default_field_attr_map = { # the default map for mapping field attributes to dojo attributes
'required':'required',
'help_text':'promptMessage',
'min_value':'constraints.min',
'max_value':'constraints.max',
'max_length':'maxLength',
'max_digits':'maxLength',
'decimal_places':'constraints.places',
'js_regex':'regExp',
'multiple':'multiple',
}
field_attr_map = {} # used for overwriting the default attr-map
valid_extra_attrs = [] # these field_attributes are valid for the current widget
def _mixin_attr(self, attrs, key, value):
"""Mixes in the passed key/value into the passed attrs and returns that
extended attrs dictionary.
A 'key', that is separated by a dot, e.g. 'constraints.min', will be
added as:
{'constraints':{'min':value}}
"""
dojo_field_attr = key.split(".")
inner_dict = attrs
len_fields = len(dojo_field_attr)
count = 0
for i in dojo_field_attr:
count = count+1
if count == len_fields and inner_dict.get(i, None) is None:
if isinstance(value, datetime.datetime):
if isinstance(self, TimeInput):
value = value.strftime('T%H:%M:%S')
if isinstance(self, DateInput):
value = value.strftime('%Y-%m-%d')
value = str(value).replace(' ', 'T') # see dojo.date.stamp
if isinstance(value, datetime.date):
value = str(value)
if isinstance(value, datetime.time):
value = "T" + str(value) # see dojo.date.stamp
inner_dict[i] = value
elif not inner_dict.has_key(i):
inner_dict[i] = {}
inner_dict = inner_dict[i]
return attrs
def build_attrs(self, extra_attrs=None, **kwargs):
"""Overwritten helper function for building an attribute dictionary.
This helper also takes care passing the used dojo modules to the
collector. Furthermore it mixes in the used field attributes into the
attributes of this widget.
"""
# gathering all widget attributes
attrs = dict(self.attrs, **kwargs)
field_attr = self.default_field_attr_map.copy() # use a copy of that object. otherwise changed field_attr_map would overwrite the default-map for all widgets!
field_attr.update(self.field_attr_map) # the field-attribute-mapping can be customzied
if extra_attrs:
attrs.update(extra_attrs)
# assigning dojoType to our widget
dojo_type = getattr(self, "dojo_type", False)
if dojo_type:
attrs["dojoType"] = dojo_type # add the dojoType attribute
# fill the global collector object
if getattr(self, "alt_require", False):
dojo_collector.add_module(self.alt_require)
elif dojo_type:
dojo_collector.add_module(self.dojo_type)
extra_requires = getattr(self, "extra_dojo_require", [])
for i in extra_requires:
dojo_collector.add_module(i)
# mixin those additional field attrs, that are valid for this widget
extra_field_attrs = attrs.get("extra_field_attrs", False)
if extra_field_attrs:
for i in self.valid_extra_attrs:
field_val = extra_field_attrs.get(i, None)
new_attr_name = field_attr.get(i, None)
if field_val is not None and new_attr_name is not None:
attrs = self._mixin_attr(attrs, new_attr_name, field_val)
del attrs["extra_field_attrs"]
# now encode several attributes, e.g. False = false, True = true
for i in attrs:
if isinstance(attrs[i], bool):
attrs[i] = json_encode(attrs[i])
return attrs
#############################################
# ALL OVERWRITTEN DEFAULT DJANGO WIDGETS
#############################################
class Widget(DojoWidgetMixin, widgets.Widget):
dojo_type = 'dijit._Widget'
class Input(DojoWidgetMixin, widgets.Input):
pass
class TextInput(DojoWidgetMixin, widgets.TextInput):
dojo_type = 'dijit.form.TextBox'
valid_extra_attrs = [
'max_length',
]
class PasswordInput(DojoWidgetMixin, widgets.PasswordInput):
dojo_type = 'dijit.form.TextBox'
valid_extra_attrs = [
'max_length',
]
class HiddenInput(DojoWidgetMixin, widgets.HiddenInput):
dojo_type = 'dijit.form.TextBox' # otherwise dijit.form.Form can't get its values
class MultipleHiddenInput(DojoWidgetMixin, widgets.MultipleHiddenInput):
dojo_type = 'dijit.form.TextBox' # otherwise dijit.form.Form can't get its values
class FileInput(DojoWidgetMixin, widgets.FileInput):
dojo_type = 'dojox.form.FileInput'
class Media:
css = {
'all': ('%(base_url)s/dojox/form/resources/FileInput.css' % {
'base_url':dojo_config.dojo_base_url
},)
}
class Textarea(DojoWidgetMixin, widgets.Textarea):
"""Auto resizing textarea"""
dojo_type = 'dijit.form.Textarea'
valid_extra_attrs = [
'max_length'
]
if DateInput:
class DateInput(DojoWidgetMixin, widgets.DateInput):
dojo_type = 'dijit.form.DateTextBox'
valid_extra_attrs = [
'required',
'help_text',
'min_value',
'max_value',
]
else: # fallback for older django versions
class DateInput(TextInput):
"""Copy of the implementation in Django 1.1. Before this widget did not exists."""
dojo_type = 'dijit.form.DateTextBox'
valid_extra_attrs = [
'required',
'help_text',
'min_value',
'max_value',
]
format = '%Y-%m-%d' # '2006-10-25'
def __init__(self, attrs=None, format=None):
super(DateInput, self).__init__(attrs)
if format:
self.format = format
def render(self, name, value, attrs=None):
if value is None:
value = ''
elif hasattr(value, 'strftime'):
value = datetime_safe.new_date(value)
value = value.strftime(self.format)
return super(DateInput, self).render(name, value, attrs)
if TimeInput:
class TimeInput(DojoWidgetMixin, widgets.TimeInput):
dojo_type = 'dijit.form.TimeTextBox'
valid_extra_attrs = [
'required',
'help_text',
'min_value',
'max_value',
]
format = "T%H:%M:%S" # special for dojo: 'T12:12:33'
def __init__(self, attrs=None, format=None):
# always passing the dojo time format
super(TimeInput, self).__init__(attrs, format=self.format)
def _has_changed(self, initial, data):
try:
input_format = self.format
initial = datetime.time(*time.strptime(initial, input_format)[3:6])
except (TypeError, ValueError):
pass
return super(TimeInput, self)._has_changed(self._format_value(initial), data)
else: # fallback for older django versions
class TimeInput(TextInput):
"""Copy of the implementation in Django 1.1. Before this widget did not exists."""
dojo_type = 'dijit.form.TimeTextBox'
valid_extra_attrs = [
'required',
'help_text',
'min_value',
'max_value',
]
format = "T%H:%M:%S" # special for dojo: 'T12:12:33'
def __init__(self, attrs=None, format=None):
super(TimeInput, self).__init__(attrs)
if format:
self.format = format
def render(self, name, value, attrs=None):
if value is None:
value = ''
elif hasattr(value, 'strftime'):
value = value.strftime(self.format)
return super(TimeInput, self).render(name, value, attrs)
class CheckboxInput(DojoWidgetMixin, widgets.CheckboxInput):
dojo_type = 'dijit.form.CheckBox'
class Select(DojoWidgetMixin, widgets.Select):
dojo_type = dojo_config.version < '1.4' and 'dijit.form.FilteringSelect' or 'dijit.form.Select'
valid_extra_attrs = dojo_config.version < '1.4' and \
['required', 'help_text',] or \
['required',]
class NullBooleanSelect(DojoWidgetMixin, widgets.NullBooleanSelect):
dojo_type = dojo_config.version < '1.4' and 'dijit.form.FilteringSelect' or 'dijit.form.Select'
valid_extra_attrs = dojo_config.version < '1.4' and \
['required', 'help_text',] or \
['required',]
class SelectMultiple(DojoWidgetMixin, widgets.SelectMultiple):
dojo_type = 'dijit.form.MultiSelect'
RadioInput = widgets.RadioInput
RadioFieldRenderer = widgets.RadioFieldRenderer
class RadioSelect(DojoWidgetMixin, widgets.RadioSelect):
dojo_type = 'dijit.form.RadioButton'
def __init__(self, *args, **kwargs):
if dojo_config.version < '1.3':
self.alt_require = 'dijit.form.CheckBox'
super(RadioSelect, self).__init__(*args, **kwargs)
class CheckboxSelectMultiple(DojoWidgetMixin, widgets.CheckboxSelectMultiple):
dojo_type = 'dijit.form.CheckBox'
class MultiWidget(DojoWidgetMixin, widgets.MultiWidget):
dojo_type = None
class SplitDateTimeWidget(widgets.SplitDateTimeWidget):
"DateTimeInput is using two input fields."
try:
# for older django versions
date_format = DateInput.format
time_format = TimeInput.format
except AttributeError:
date_format = None
time_format = None
def __init__(self, attrs=None, date_format=None, time_format=None):
if date_format:
self.date_format = date_format
if time_format:
self.time_format = time_format
split_widgets = (DateInput(attrs=attrs, format=self.date_format),
TimeInput(attrs=attrs, format=self.time_format))
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
widgets.MultiWidget.__init__(self, split_widgets, attrs)
class SplitHiddenDateTimeWidget(DojoWidgetMixin, widgets.SplitHiddenDateTimeWidget):
dojo_type = "dijit.form.TextBox"
DateTimeInput = SplitDateTimeWidget
#############################################
# MORE ENHANCED DJANGO/DOJO WIDGETS
#############################################
class SimpleTextarea(Textarea):
"""No autoexpanding textarea"""
dojo_type = "dijit.form.SimpleTextarea"
class EditorInput(Textarea):
dojo_type = 'dijit.Editor'
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
# dijit.Editor must be rendered in a div (see dijit/_editor/RichText.js)
return mark_safe(u'<div%s>%s</div>' % (flatatt(final_attrs),
force_unicode(value))) # we don't escape the value for the editor
class HorizontalSliderInput(TextInput):
dojo_type = 'dijit.form.HorizontalSlider'
valid_extra_attrs = [
'max_value',
'min_value',
]
field_attr_map = {
'max_value': 'maximum',
'min_value': 'minimum',
}
def __init__(self, attrs=None):
if dojo_config.version < '1.3':
self.alt_require = 'dijit.form.Slider'
super(HorizontalSliderInput, self).__init__(attrs)
class VerticalSliderInput(HorizontalSliderInput):
dojo_type = 'dijit.form.VerticalSlider'
class ValidationTextInput(TextInput):
dojo_type = 'dijit.form.ValidationTextBox'
valid_extra_attrs = [
'required',
'help_text',
'js_regex',
'max_length',
]
js_regex_func = None
def render(self, name, value, attrs=None):
if self.js_regex_func:
attrs = self.build_attrs(attrs, regExpGen=self.js_regex_func)
return super(ValidationTextInput, self).render(name, value, attrs)
class ValidationPasswordInput(PasswordInput):
dojo_type = 'dijit.form.ValidationTextBox'
valid_extra_attrs = [
'required',
'help_text',
'js_regex',
'max_length',
]
class EmailTextInput(ValidationTextInput):
extra_dojo_require = [
'dojox.validate.regexp'
]
js_regex_func = "dojox.validate.regexp.emailAddress"
def __init__(self, attrs=None):
if dojo_config.version < '1.3':
self.js_regex_func = 'dojox.regexp.emailAddress'
super(EmailTextInput, self).__init__(attrs)
class IPAddressTextInput(ValidationTextInput):
extra_dojo_require = [
'dojox.validate.regexp'
]
js_regex_func = "dojox.validate.regexp.ipAddress"
def __init__(self, attrs=None):
if dojo_config.version < '1.3':
self.js_regex_func = 'dojox.regexp.ipAddress'
super(IPAddressTextInput, self).__init__(attrs)
class URLTextInput(ValidationTextInput):
extra_dojo_require = [
'dojox.validate.regexp'
]
js_regex_func = "dojox.validate.regexp.url"
def __init__(self, attrs=None):
if dojo_config.version < '1.3':
self.js_regex_func = 'dojox.regexp.url'
super(URLTextInput, self).__init__(attrs)
class NumberTextInput(TextInput):
dojo_type = 'dijit.form.NumberTextBox'
valid_extra_attrs = [
'min_value',
'max_value',
'required',
'help_text',
'decimal_places',
'max_digits',
]
class RangeBoundTextInput(NumberTextInput):
dojo_type = 'dijit.form.RangeBoundTextBox'
class NumberSpinnerInput(NumberTextInput):
dojo_type = 'dijit.form.NumberSpinner'
class RatingInput(TextInput):
dojo_type = 'dojox.form.Rating'
valid_extra_attrs = [
'max_value',
]
field_attr_map = {
'max_value': 'numStars',
}
class Media:
css = {
'all': ('%(base_url)s/dojox/form/resources/Rating.css' % {
'base_url':dojo_config.dojo_base_url
},)
}
class DateInputAnim(DateInput):
dojo_type = 'dojox.form.DateTextBox'
class Media:
css = {
'all': ('%(base_url)s/dojox/widget/Calendar/Calendar.css' % {
'base_url':dojo_config.dojo_base_url
},)
}
class DropDownSelect(Select):
dojo_type = 'dojox.form.DropDownSelect'
valid_extra_attrs = []
class Media:
css = {
'all': ('%(base_url)s/dojox/form/resources/DropDownSelect.css' % {
'base_url':dojo_config.dojo_base_url
},)
}
class CheckedMultiSelect(SelectMultiple):
dojo_type = 'dojox.form.CheckedMultiSelect'
valid_extra_attrs = []
# TODO: fix attribute multiple=multiple
# seems there is a dependency in dojox.form.CheckedMultiSelect for dijit.form.MultiSelect,
# but CheckedMultiSelect is not extending that
class Media:
css = {
'all': ('%(base_url)s/dojox/form/resources/CheckedMultiSelect.css' % {
'base_url':dojo_config.dojo_base_url
},)
}
class ComboBox(DojoWidgetMixin, widgets.Select):
"""Nearly the same as FilteringSelect, but ignoring the option value."""
dojo_type = 'dijit.form.ComboBox'
valid_extra_attrs = [
'required',
'help_text',
]
class FilteringSelect(ComboBox):
dojo_type = 'dijit.form.FilteringSelect'
class ComboBoxStore(TextInput):
"""A combobox that is receiving data from a given dojo data url.
As default dojo.data.ItemFileReadStore is used. You can overwrite
that behaviour by passing a different store name
(e.g. dojox.data.QueryReadStore).
Usage:
ComboBoxStore("/dojo-data-store-url/")
"""
dojo_type = 'dijit.form.ComboBox'
valid_extra_attrs = [
'required',
'help_text',
]
store = 'dojo.data.ItemFileReadStore'
store_attrs = {}
url = None
def __init__(self, url, attrs=None, store=None, store_attrs={}):
self.url = url
if store:
self.store = store
if store_attrs:
self.store_attrs = store_attrs
self.extra_dojo_require.append(self.store)
super(ComboBoxStore, self).__init__(attrs)
def render(self, name, value, attrs=None):
if value is None: value = ''
store_id = self.get_store_id(getattr(attrs, "id", None), name)
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name, store=store_id)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(self._format_value(value))
self.store_attrs.update({
'dojoType': self.store,
'url': self.url,
'jsId':store_id
})
# TODO: convert store attributes to valid js-format (False => false, dict => {}, array = [])
store_node = '<div%s></div>' % flatatt(self.store_attrs)
return mark_safe(u'%s<input%s />' % (store_node, flatatt(final_attrs)))
def get_store_id(self, id, name):
return "_store_" + (id and id or name)
class FilteringSelectStore(ComboBoxStore):
dojo_type = 'dijit.form.FilteringSelect'
class ListInput(DojoWidgetMixin, widgets.TextInput):
dojo_type = 'dojox.form.ListInput'
class Media:
css = {
'all': ('%(base_url)s/dojox/form/resources/ListInput.css' % {
'base_url':dojo_config.dojo_base_url
},)
}
# THE RANGE SLIDER NEEDS A DIFFERENT REPRESENTATION WITHIN HTML
# SOMETHING LIKE:
# <div dojoType="dojox.form.RangeSlider"><input value="5"/><input value="10"/></div>
'''class HorizontalRangeSlider(HorizontalSliderInput):
"""This just can be used with a comma-separated-value like: 20,40"""
dojo_type = 'dojox.form.HorizontalRangeSlider'
alt_require = 'dojox.form.RangeSlider'
class Media:
css = {
'all': ('%(base_url)s/dojox/form/resources/RangeSlider.css' % {
'base_url':dojo_config.dojo_base_url
},)
}
'''
# TODO: implement
# dojox.form.RangeSlider
# dojox.form.MultiComboBox
# dojox.form.FileUploader
| |
# -*- coding: utf-8 -*-
"""
drawing ERD(table relationships) by graphviz dot
"""
import os
import re
import json
from time import strftime, localtime
import logging
import subprocess
from mako.template import Template
from random import randint
from mabozen.config import get_db_config
from mabozen.lib.pg import parse_url
from mabozen.lib.pg import Pg
LOGGER = logging.getLogger("tools")
COLORS = ['darkgreen', 'red', 'navy', 'green', 'yellow', 'blue',
'violet', 'darksalmon', 'black', 'orangered', 'cyan', 'darkorange',
'yellowgreen', 'black']
class RelationMap(object):
""" class for query PostgreSQL schema information"""
def __init__(self, table_list, rtype, etype):
""" init """
if rtype not in ['c','p']:
raise
self.rtype = rtype
if etype not in [1, 2, 3]:
raise
self.etype = etype
self.table_list = table_list
self.max_level = 10
cfg = get_db_config()
url = cfg['DB_URL']
#catalog == dbname
#schema == username
components = parse_url(url)
self.catalog = components["database"]
self.schema = components["username"]
self.dbi = Pg(components)
self.edges = []
self.nodes_single = set()
self.tables = set()
self.edge_list = set()
@classmethod
def get_keys(cls, line):
""" match foreign key """
rawstr = r"""FOREIGN KEY \((.*)\) REFERENCES (.*)\((.*)\)"""
compile_obj = re.compile(rawstr)
match_obj = compile_obj.search(line)
fkey = match_obj.group(1)
tab = match_obj.group(2)
pkey = match_obj.group(3)
return(fkey, tab, pkey)
def dot(self):
""" graphviz dot """
dot_template = Template(filename="graphviz_mako.dot", \
disable_unicode=True, input_encoding='utf-8')
dotstr = dot_template.render(nodes = self.nodes_single, \
edges = self.edges)
#print(dotstr)
#here = os.path.dirname(os.path.abspath(__file__) )
stamp = (strftime("%Y%m%d%H%M%S", localtime()))
dot_file = os.sep.join(["..\\..\\working", "graph", \
"maboss%s_%s.dot" % (self.etype, stamp)])
with open(dot_file,'w') as fileh:
fileh.write(dotstr.replace("\r\n","\n"))
out_file = os.sep.join(["..\\..\\working", "graph", \
"maboss%s_%s.svg" %(self.etype, stamp)])
cmd = r'''C:\Tools\Graphviz2.30\bin\dot -Tsvg -o %s %s''' \
% (out_file, dot_file )
#print (cmd)
subprocess.Popen(cmd, shell=True)
def _make_edge(self, rel):
""" edge defination """
color = COLORS[randint(0, len(COLORS)-1)]
if self.etype == 1:
edge = """%s -> %s [label="%s", color="%s"]""" \
% (rel["ctable"], rel["ptable"], \
self.get_keys(rel["ppkey"])[2] , color)
elif self.etype == 2:
edge = """%s -> %s [label="%s"]""" % (rel["ctable"], \
rel["ptable"], self.get_keys(rel["ppkey"])[2] )
elif self.etype == 3:
edge = """%s -> %s[color="%s"]""" \
% (rel["ctable"], rel["ptable"], color)
else:
raise
return edge
def one(self, table_name, level):
""" relationships of one table"""
if len(self.table_list) != 0:
if table_name not in self.table_list:
return
if table_name in self.tables:
return
else:
self.tables.add(table_name)
if level > self.max_level:
return
i_json = {"schema":self.schema, "table_name":table_name, "type":"p"}
func_map = {'c':"mtp_get_ctable_pg1", 'p':"mtp_get_ptable_pg2"}
self.dbi.execute("select * from %s ('%s')" \
% (func_map[self.rtype], json.dumps(i_json)))
result = self.dbi.fetchone()
#print(json.dumps(result[0], sort_keys=True,
#indent=2, separators=(',', ': ')) )
if result[0] == None:
self.nodes_single.add(table_name)
return
comment = "\n/* %s [%d] */" % (table_name, len(result[0]))
self.edges.append(comment)
for rel in result[0]:
if len(self.table_list) != 0:
if rel["ptable"] not in self.table_list:
continue
edge = self._make_edge(rel)
edge_s = """%s -> %s""" % (rel["ctable"], rel["ptable"])
if edge_s in self.edge_list:
continue
else:
self.edge_list.add(edge_s)
self.edges.append(edge)
#print(rel["ctable"])
self.one(rel["ctable"], level+1)
def prepare_all(self):
""" prepare all relatonships """
sql = """select table_name from information_schema.tables
where table_catalog = '%s'
and table_schema = '%s'
and table_type = 'BASE TABLE'
order by table_name
""" % (self.catalog, self.schema)
self.dbi.execute(sql)
tabs = self.dbi.fetchall()
for tab in tabs:
#print(tab[0])
self.one(tab[0], 1)
def prepare_root(self, table_name, max_level):
""" fetch children """
self.max_level = max_level
self.one(table_name, 1)
def process(table_list):
""" main """
rmap = RelationMap(table_list, 'p', 3)
table_name = None#"wip_order"
rmap.prepare_all()
max_level = 3
rmap.prepare_root(table_name, max_level)
rmap.dot()
def main():
""" main """
with open("relation_group.json",'r') as fileh:
groups = json.loads(fileh.read())
for table_group in groups:
print table_group["tables"]
process(table_group["tables"])
if __name__ == '__main__':
main()
| |
# importing necessary utilities and frameworks
import os
import csv
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import sklearn
# helper functions
def cropImage(image):
cropped_image = image[65:140, 0:320]
# NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]
#np.reshape(img, (height, width, nchannels)) // height = 140-65=75; width = 320-0=320; channels=3 for RGB
resized_cropped_image = cv2.resize(cropped_image, (320,75), interpolation=cv2.INTER_AREA)
reshaped_cropped_image = np.reshape(resized_cropped_image,(75,320,3))
return reshaped_cropped_image
# flips the image on its horizontal axis (left/right)
def flipImage(image):
flipped_image = np.fliplr(image)
return flipped_image
# reverses the steering angle
def flipSteeringAngle(angle):
flipped_steering_angle = -angle
return flipped_steering_angle
# flips the image on its vertical axis (up/down)
def flipImageUpDown(image):
flipped_ud_image = np.flipud(image)
return flipped_ud_image
# converts color image to gray
def grayscale(image):
grayscale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return grayscale_image
# normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
def normalizeGrayscaleImage(image):
"""
"""
a = 0.1
b = 0.9
grayscale_min = 0
grayscale_max = 255
normalized_grayscale_image = a + (((image - grayscale_min)*(b - a) )/( grayscale_max - grayscale_min ))
#print("Normalized Grayscale Image")
#plt.imshow(normalized_grayscale_image)
#plt.show()
return normalized_grayscale_image
# returns the correction factor to dynamically set the steering angle
def getCorrectionFactor():
return 0.14
# generator function to train the model using mini-batches
def generator(samples, batch_size):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
#local arrays to hold car images and steering angles read from the training dataset
images = []
angles = []
#get steering correction factor
angle_correction_amt = getCorrectionFactor()
for batch_sample in batch_samples:
#first column: image from car center camera
center_name = 'data/IMG/'+batch_sample[0].split('/')[-1]
center_image = cv2.imread(center_name)
images.append(center_image)
#augment the center image by flipping the image on its horizontal axis
images.append(flipImage(center_image))
#second column: image from car left camera
left_name = 'data/IMG/'+batch_sample[1].split('/')[-1]
left_image = cv2.imread(left_name)
images.append(left_image)
#augment the left image by flipping the image on its horizontal axis
images.append(flipImage(left_image))
#third column: image from car right camera
right_name = 'data/IMG/'+batch_sample[2].split('/')[-1]
right_image = cv2.imread(right_name)
images.append(right_image)
#augment the right image by flipping the image on its horizontal axis
images.append(flipImage(right_image))
#fourth column: steering angle
steering_angle = float(batch_sample[3])
angles.append(steering_angle)
#augment steering angles by flipping the image on its horizontal axis
angles.append(flipSteeringAngle(steering_angle))
#adjust left steering angle with correction factor
left_angle = steering_angle + angle_correction_amt
angles.append(left_angle)
#augment steering angles by flipping the image on its horizontal axis
angles.append(flipSteeringAngle(left_angle))
#adjust right steering angle with correction factor
right_angle = steering_angle - angle_correction_amt
angles.append(right_angle)
#augment steering angles by flipping the image on its horizontal axis
angles.append(flipSteeringAngle(right_angle))
X_train = np.array(images)
y_train = np.array(angles)
#shuffle dataset before returning
yield sklearn.utils.shuffle(X_train, y_train)
# read car information
def readCarData():
print("Reading Car Data")
with open('data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
carData.append(row)
print("Finished reading Car Data")
return carData
# split the training dataset to get a test sample (0.2% of the entire training dataset)
def splitDataToTrainingAndValidationSamples(carData):
from sklearn.model_selection import train_test_split
#train_samples, validation_samples = train_test_split(carData, test_size=0.2)
return train_test_split(carData, test_size=0.2)
# train the model (Keras model_version)
def trainModel(train_generator, validation_generator):
#import Keras utilities
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, Activation
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras import optimizers
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: (x/127.5)-1.,input_shape=(row, col, ch), output_shape=(row, col, ch)))
# apply cropping to reduce training time
model.add(Cropping2D(cropping=((75,20), (1,1)),input_shape=(160, 320, 3)))
# nvidia CNN model - 5 convolutional runs with non-linear regression
model.add(Convolution2D(24,5,5, activation="relu", subsample=(2, 2)))
model.add(Convolution2D(36,5,5, activation="relu", subsample=(2, 2)))
model.add(Convolution2D(48,5,5, activation="relu", subsample=(2, 2)))
model.add(Convolution2D(64,3,3, activation="relu", subsample=(1, 1)))
model.add(Convolution2D(64,3,3, activation="relu", subsample=(1, 1)))
# dropout to prevent overfitting
model.add(Dropout(keep_prob))
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(keep_prob))
model.add(Dense(50))
model.add(Dropout(keep_prob))
model.add(Dense(10))
model.add(Dropout(keep_prob))
model.add(Dense(1))
#leveraging the adam optimizer with a mean squared error loss
model.compile(loss='mse', optimizer='adam')
# saving the model
print("Saving Model")
model.save('model.h5')
print("Saved Model")
model.summary()
#train model
history_object = model.fit_generator(train_generator,
samples_per_epoch=(((len(train_samples)//mini_batch_size)*mini_batch_size)*3),
validation_data = validation_generator,
nb_val_samples = len(validation_samples),
nb_epoch=2, verbose=1)
return history_object
# function to plot the model information
def plotModel(history_object):
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('Model Mean Squared Error Loss')
plt.ylabel('Mean Squared Error Loss')
plt.xlabel('Epoch')
plt.legend(['Training set', 'Validation set'], loc='upper right')
plt.show()
print("Finished Model")
# global variables
carData = []
ch, row, col = 3, 160, 320 # Trimmed image format
keep_prob = 0.9
mini_batch_size = 32
# read Car Data
carData = readCarData()
# generate training and test data set samples
train_samples, validation_samples = splitDataToTrainingAndValidationSamples(carData)
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=mini_batch_size)
validation_generator = generator(validation_samples, batch_size=mini_batch_size)
history_object = trainModel(train_generator, validation_generator)
# visualization
plotModel(history_object)
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class rnat_stats(base_resource) :
ur""" Statistics for RNAT configured route resource.
"""
def __init__(self) :
self._clearstats = ""
self._rnattotrxbytes = 0
self._rnatrxbytesrate = 0
self._rnattottxbytes = 0
self._rnattxbytesrate = 0
self._rnattotrxpkts = 0
self._rnatrxpktsrate = 0
self._rnattottxpkts = 0
self._rnattxpktsrate = 0
self._rnattottxsyn = 0
self._rnattxsynrate = 0
self._rnatcursessions = 0
@property
def clearstats(self) :
ur"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
ur"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def rnattottxpkts(self) :
ur"""Packets sent during RNAT sessions.
"""
try :
return self._rnattottxpkts
except Exception as e:
raise e
@property
def rnatrxbytesrate(self) :
ur"""Rate (/s) counter for rnattotrxbytes.
"""
try :
return self._rnatrxbytesrate
except Exception as e:
raise e
@property
def rnattxsynrate(self) :
ur"""Rate (/s) counter for rnattottxsyn.
"""
try :
return self._rnattxsynrate
except Exception as e:
raise e
@property
def rnattxpktsrate(self) :
ur"""Rate (/s) counter for rnattottxpkts.
"""
try :
return self._rnattxpktsrate
except Exception as e:
raise e
@property
def rnattottxsyn(self) :
ur"""Requests for connections sent during RNAT sessions.
"""
try :
return self._rnattottxsyn
except Exception as e:
raise e
@property
def rnattxbytesrate(self) :
ur"""Rate (/s) counter for rnattottxbytes.
"""
try :
return self._rnattxbytesrate
except Exception as e:
raise e
@property
def rnatrxpktsrate(self) :
ur"""Rate (/s) counter for rnattotrxpkts.
"""
try :
return self._rnatrxpktsrate
except Exception as e:
raise e
@property
def rnatcursessions(self) :
ur"""Currently active RNAT sessions.
"""
try :
return self._rnatcursessions
except Exception as e:
raise e
@property
def rnattotrxpkts(self) :
ur"""Packets received during RNAT sessions.
"""
try :
return self._rnattotrxpkts
except Exception as e:
raise e
@property
def rnattotrxbytes(self) :
ur"""Bytes received during RNAT sessions.
"""
try :
return self._rnattotrxbytes
except Exception as e:
raise e
@property
def rnattottxbytes(self) :
ur"""Bytes sent during RNAT sessions.
"""
try :
return self._rnattottxbytes
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(rnat_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.rnat
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
ur""" Use this API to fetch the statistics of all rnat_stats resources that are configured on netscaler.
"""
try :
obj = rnat_stats()
if not name :
response = obj.stat_resources(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class rnat_response(base_response) :
def __init__(self, length=1) :
self.rnat = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.rnat = [rnat_stats() for _ in range(length)]
| |
# coding: utf-8
from __future__ import absolute_import
from swagger_server.models.comment import Comment
from swagger_server.models.contact import Contact
from swagger_server.models.message import Message
from swagger_server.models.project import Project
from . import BaseTestCase
from six import BytesIO
from flask import json
class TestDefaultController(BaseTestCase):
""" DefaultController integration test stubs """
def test_projects_delete(self):
"""
Test case for projects_delete
You shouldn't delete the entire List
"""
project = Project()
response = self.client.open('/project-tracker/projects',
method='DELETE',
data=json.dumps(project),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_get(self):
"""
Test case for projects_get
List all projects
"""
response = self.client.open('/project-tracker/projects',
method='GET')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_id_comments_delete(self):
"""
Test case for projects_id_comments_delete
You can't delete the entire List
"""
project = Comment()
response = self.client.open('/project-tracker/projects/{id}/comments'.format(id=56),
method='DELETE',
data=json.dumps(project),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_id_comments_get(self):
"""
Test case for projects_id_comments_get
List all comments on this project
"""
response = self.client.open('/project-tracker/projects/{id}/comments'.format(id=56),
method='GET')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_id_comments_patch(self):
"""
Test case for projects_id_comments_patch
You can't put the entire List
"""
project = Comment()
response = self.client.open('/project-tracker/projects/{id}/comments'.format(id=56),
method='PATCH',
data=json.dumps(project),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_id_comments_post(self):
"""
Test case for projects_id_comments_post
Create a new comment
"""
project = Comment()
response = self.client.open('/project-tracker/projects/{id}/comments'.format(id=56),
method='POST',
data=json.dumps(project),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_id_comments_put(self):
"""
Test case for projects_id_comments_put
You can't put the entire List
"""
project = Comment()
response = self.client.open('/project-tracker/projects/{id}/comments'.format(id=56),
method='PUT',
data=json.dumps(project),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_id_contacts_delete(self):
"""
Test case for projects_id_contacts_delete
You can't delete the entire List
"""
project = Contact()
response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),
method='DELETE',
data=json.dumps(project),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_id_contacts_get(self):
"""
Test case for projects_id_contacts_get
List all contacts associated with this project
"""
response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),
method='GET')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_id_contacts_patch(self):
"""
Test case for projects_id_contacts_patch
You can't put the entire List
"""
project = Contact()
response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),
method='PATCH',
data=json.dumps(project),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_id_contacts_post(self):
"""
Test case for projects_id_contacts_post
Add a new contact
"""
project = Contact()
response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),
method='POST',
data=json.dumps(project),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_id_contacts_put(self):
"""
Test case for projects_id_contacts_put
You can't put the entire List
"""
project = Contact()
response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),
method='PUT',
data=json.dumps(project),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_id_delete(self):
"""
Test case for projects_id_delete
Delete a project
"""
response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),
method='DELETE')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_id_get(self):
"""
Test case for projects_id_get
get specific project by id
"""
response = self.client.open('/project-tracker/projects/{id}'.format(id=56),
method='GET')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_id_post(self):
"""
Test case for projects_id_post
update an existing project
"""
project = Project()
response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),
method='POST',
data=json.dumps(project),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_id_put(self):
"""
Test case for projects_id_put
update an existing project
"""
project = Project()
response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),
method='PUT',
data=json.dumps(project),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_patch(self):
"""
Test case for projects_patch
You shouldn't put the entire List
"""
project = Project()
response = self.client.open('/project-tracker/projects',
method='PATCH',
data=json.dumps(project),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_post(self):
"""
Test case for projects_post
Create a new project
"""
project = Project()
response = self.client.open('/project-tracker/projects',
method='POST',
data=json.dumps(project),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_projects_put(self):
"""
Test case for projects_put
You shouldn't put the entire List
"""
project = Project()
response = self.client.open('/project-tracker/projects',
method='PUT',
data=json.dumps(project),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| |
import mock
from urlparse import urlparse
import pytest
from nose.tools import * # flake8: noqa
from framework.auth import core
from osf.models import Guid
from api.base.settings.defaults import API_BASE
from api.base.settings import osf_settings
from api_tests import utils as test_utils
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
CommentFactory,
RegistrationFactory,
PrivateLinkFactory,
)
from addons.wiki.tests.factories import NodeWikiFactory
class CommentDetailMixin(object):
def setUp(self):
super(CommentDetailMixin, self).setUp()
self.user = AuthUserFactory()
self.contributor = AuthUserFactory()
self.non_contributor = AuthUserFactory()
def _set_up_payload(self, target_id, content='test', has_content=True):
payload = {
'data': {
'id': target_id,
'type': 'comments',
'attributes': {
'content': 'Updating this comment',
'deleted': False
}
}
}
if has_content:
payload['data']['attributes']['content'] = content
return payload
def _set_up_private_project_with_comment(self):
raise NotImplementedError
def _set_up_public_project_with_comment(self):
raise NotImplementedError
def _set_up_registration_with_comment(self):
raise NotImplementedError
def test_private_node_logged_in_contributor_can_view_comment(self):
self._set_up_private_project_with_comment()
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(self.comment._id, res.json['data']['id'])
assert_equal(self.comment.content, res.json['data']['attributes']['content'])
def test_private_node_logged_in_non_contributor_cannot_view_comment(self):
self._set_up_private_project_with_comment()
res = self.app.get(self.private_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_private_node_logged_out_user_cannot_view_comment(self):
self._set_up_private_project_with_comment()
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_private_node_user_with_private_link_can_see_comment(self):
self._set_up_private_project_with_comment()
private_link = PrivateLinkFactory(anonymous=False)
private_link.nodes.add(self.private_project)
private_link.save()
res = self.app.get('/{}comments/{}/'.format(API_BASE, self.comment._id), {'view_only': private_link.key}, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(self.comment._id, res.json['data']['id'])
assert_equal(self.comment.content, res.json['data']['attributes']['content'])
def test_private_node_user_with_anonymous_link_cannot_see_commenter_info(self):
self._set_up_private_project_with_comment()
private_link = PrivateLinkFactory(anonymous=True)
private_link.nodes.add(self.private_project)
private_link.save()
res = self.app.get('/{}comments/{}/'.format(API_BASE, self.comment._id), {'view_only': private_link.key})
assert_equal(res.status_code, 200)
assert_equal(self.comment._id, res.json['data']['id'])
assert_equal(self.comment.content, res.json['data']['attributes']['content'])
assert_not_in('user', res.json['data']['relationships'])
def test_private_node_user_with_anonymous_link_cannot_see_mention_info(self):
self._set_up_private_project_with_comment()
self.comment.content = 'test with [@username](userlink) and @mention'
self.comment.save()
private_link = PrivateLinkFactory(anonymous=True)
private_link.nodes.add(self.private_project)
private_link.save()
res = self.app.get('/{}comments/{}/'.format(API_BASE, self.comment._id), {'view_only': private_link.key})
assert_equal(res.status_code, 200)
assert_equal(self.comment._id, res.json['data']['id'])
assert_equal( 'test with @A User and @mention', res.json['data']['attributes']['content'])
def test_public_node_logged_in_contributor_can_view_comment(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(self.public_comment._id, res.json['data']['id'])
assert_equal(self.public_comment.content, res.json['data']['attributes']['content'])
def test_public_node_logged_in_non_contributor_can_view_comment(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
assert_equal(self.public_comment._id, res.json['data']['id'])
assert_equal(self.public_comment.content, res.json['data']['attributes']['content'])
def test_public_node_logged_out_user_can_view_comment(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(self.public_comment._id, res.json['data']['id'])
assert_equal(self.public_comment.content, res.json['data']['attributes']['content'])
def test_public_node_user_with_private_link_can_view_comment(self):
self._set_up_public_project_with_comment()
private_link = PrivateLinkFactory(anonymous=False)
private_link.nodes.add(self.public_project)
private_link.save()
res = self.app.get('/{}comments/{}/'.format(API_BASE, self.public_comment._id), {'view_only': private_link.key}, expect_errors=True)
assert_equal(self.public_comment._id, res.json['data']['id'])
assert_equal(self.public_comment.content, res.json['data']['attributes']['content'])
def test_registration_logged_in_contributor_can_view_comment(self):
self._set_up_registration_with_comment()
res = self.app.get(self.comment_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(self.registration_comment._id, res.json['data']['id'])
assert_equal(self.registration_comment.content, res.json['data']['attributes']['content'])
def test_comment_has_user_link(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['user']['links']['related']['href']
expected_url = '/{}users/{}/'.format(API_BASE, self.user._id)
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
def test_comment_has_node_link(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['node']['links']['related']['href']
expected_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id)
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
def test_registration_comment_has_node_link(self):
self._set_up_registration_with_comment()
res = self.app.get(self.comment_url, auth=self.user.auth)
url = res.json['data']['relationships']['node']['links']['related']['href']
expected_url = '/{}registrations/{}/'.format(API_BASE, self.registration._id)
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
def test_comment_has_replies_link(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
url = res.json['data']['relationships']['replies']['links']['related']['href']
uri = test_utils.urlparse_drop_netloc(url)
res = self.app.get(uri)
assert_equal(res.status_code, 200)
assert_equal(res.json['data'][0]['type'], 'comments')
def test_comment_has_reports_link(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['reports']['links']['related']['href']
expected_url = '/{}comments/{}/reports/'.format(API_BASE, self.public_comment._id)
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
def test_private_node_only_logged_in_contributor_commenter_can_update_comment(self):
self._set_up_private_project_with_comment()
res = self.app.put_json_api(self.private_url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(self.payload['data']['attributes']['content'], res.json['data']['attributes']['content'])
def test_private_node_logged_in_non_contributor_cannot_update_comment(self):
self._set_up_private_project_with_comment()
res = self.app.put_json_api(self.private_url, self.payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_private_node_logged_out_user_cannot_update_comment(self):
self._set_up_private_project_with_comment()
res = self.app.put_json_api(self.private_url, self.payload, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_public_node_only_contributor_commenter_can_update_comment(self):
self._set_up_public_project_with_comment()
res = self.app.put_json_api(self.public_url, self.public_comment_payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(self.public_comment_payload['data']['attributes']['content'], res.json['data']['attributes']['content'])
def test_public_node_contributor_cannot_update_other_users_comment(self):
self._set_up_public_project_with_comment()
res = self.app.put_json_api(self.public_url, self.public_comment_payload, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_public_node_non_contributor_cannot_update_other_users_comment(self):
self._set_up_public_project_with_comment()
res = self.app.put_json_api(self.public_url, self.public_comment_payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_public_node_logged_out_user_cannot_update_comment(self):
self._set_up_public_project_with_comment()
res = self.app.put_json_api(self.public_url, self.public_comment_payload, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_update_comment_cannot_exceed_max_length(self):
self._set_up_private_project_with_comment()
content = ('c' * (osf_settings.COMMENT_MAXLENGTH + 3))
payload = self._set_up_payload(self.comment._id, content=content)
res = self.app.put_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'],
'Ensure this field has no more than {} characters.'.format(str(osf_settings.COMMENT_MAXLENGTH)))
def test_update_comment_cannot_be_empty(self):
self._set_up_private_project_with_comment()
payload = self._set_up_payload(self.comment._id, content='')
res = self.app.put_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be blank.')
def test_private_node_only_logged_in_contributor_commenter_can_delete_comment(self):
self._set_up_private_project_with_comment()
res = self.app.delete_json_api(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 204)
def test_private_node_only_logged_in_contributor_commenter_can_delete_own_reply(self):
self._set_up_private_project_with_comment()
reply_target = Guid.load(self.comment._id)
reply = CommentFactory(node=self.private_project, target=reply_target, user=self.user)
reply_url = '/{}comments/{}/'.format(API_BASE, reply._id)
res = self.app.delete_json_api(reply_url, auth=self.user.auth)
assert_equal(res.status_code, 204)
def test_private_node_only_logged_in_contributor_commenter_can_undelete_own_reply(self):
self._set_up_private_project_with_comment()
reply_target = Guid.load(self.comment._id)
reply = CommentFactory(node=self.private_project, target=reply_target, user=self.user)
reply_url = '/{}comments/{}/'.format(API_BASE, reply._id)
reply.is_deleted = True
reply.save()
payload = self._set_up_payload(reply._id, has_content=False)
res = self.app.patch_json_api(reply_url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_false(res.json['data']['attributes']['deleted'])
assert_equal(res.json['data']['attributes']['content'], reply.content)
def test_private_node_contributor_cannot_delete_other_users_comment(self):
self._set_up_private_project_with_comment()
res = self.app.delete_json_api(self.private_url, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_private_node_non_contributor_cannot_delete_comment(self):
self._set_up_private_project_with_comment()
res = self.app.delete_json_api(self.private_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_private_node_logged_out_user_cannot_delete_comment(self):
self._set_up_private_project_with_comment()
res = self.app.delete_json_api(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_private_node_user_cannot_delete_already_deleted_comment(self):
self._set_up_private_project_with_comment()
self.comment.is_deleted = True
self.comment.save()
res = self.app.delete_json_api(self.private_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Comment already deleted.')
def test_private_node_only_logged_in_contributor_commenter_can_undelete_comment(self):
self._set_up_private_project_with_comment()
self.comment.is_deleted = True
self.comment.save()
url = '/{}comments/{}/'.format(API_BASE, self.comment._id)
payload = self._set_up_payload(self.comment._id, has_content=False)
res = self.app.patch_json_api(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_false(res.json['data']['attributes']['deleted'])
assert_equal(res.json['data']['attributes']['content'], self.comment.content)
def test_private_node_contributor_cannot_undelete_other_users_comment(self):
self._set_up_private_project_with_comment()
self.comment.is_deleted = True
self.comment.save()
url = '/{}comments/{}/'.format(API_BASE, self.comment._id)
payload = self._set_up_payload(self.comment._id, has_content=False)
res = self.app.patch_json_api(url, payload, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_non_contributor_cannot_undelete_comment(self):
self._set_up_private_project_with_comment()
self.comment.is_deleted = True
self.comment.save()
url = '/{}comments/{}/'.format(API_BASE, self.comment._id)
payload = self._set_up_payload(self.comment._id, has_content=False)
res = self.app.patch_json_api(url, payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_logged_out_user_cannot_undelete_comment(self):
self._set_up_private_project_with_comment()
self.comment.is_deleted = True
self.comment.save()
url = '/{}comments/{}/'.format(API_BASE, self.comment._id)
payload = self._set_up_payload(self.comment._id, has_content=False)
res = self.app.patch_json_api(url, payload, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_only_logged_in_contributor_commenter_can_delete_comment(self):
self._set_up_public_project_with_comment()
res = self.app.delete_json_api(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 204)
def test_public_node_contributor_cannot_delete_other_users_comment(self):
self._set_up_public_project_with_comment()
res = self.app.delete_json_api(self.public_url, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_public_node_non_contributor_cannot_delete_other_users_comment(self):
self._set_up_public_project_with_comment()
res = self.app.delete_json_api(self.public_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_public_node_logged_out_user_cannot_delete_comment(self):
self._set_up_public_project_with_comment()
res = self.app.delete_json_api(self.public_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_public_node_user_cannot_delete_already_deleted_comment(self):
self._set_up_public_project_with_comment()
self.public_comment.is_deleted = True
self.public_comment.save()
res = self.app.delete_json_api(self.public_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Comment already deleted.')
def test_private_node_only_logged_in_commenter_can_view_deleted_comment(self):
self._set_up_private_project_with_comment()
self.comment.is_deleted = True
self.comment.save()
url = '/{}comments/{}/'.format(API_BASE, self.comment._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['content'], self.comment.content)
def test_private_node_contributor_cannot_see_other_users_deleted_comment(self):
self._set_up_private_project_with_comment()
self.comment.is_deleted = True
self.comment.save()
url = '/{}comments/{}/'.format(API_BASE, self.comment._id)
res = self.app.get(url, auth=self.contributor.auth)
assert_equal(res.status_code, 200)
assert_is_none(res.json['data']['attributes']['content'])
def test_private_node_logged_out_user_cannot_see_deleted_comment(self):
self._set_up_private_project_with_comment()
self.comment.is_deleted = True
self.comment.save()
url = '/{}comments/{}/'.format(API_BASE, self.comment._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_private_node_view_only_link_user_cannot_see_deleted_comment(self):
self._set_up_private_project_with_comment()
self.comment.is_deleted = True
self.comment.save()
private_link = PrivateLinkFactory(anonymous=False)
private_link.nodes.add(self.private_project)
private_link.save()
res = self.app.get('/{}comments/{}/'.format(API_BASE, self.comment._id), {'view_only': private_link.key}, expect_errors=True)
assert_equal(res.status_code, 200)
assert_is_none(res.json['data']['attributes']['content'])
def test_private_node_anonymous_view_only_link_user_cannot_see_deleted_comment(self):
self._set_up_private_project_with_comment()
self.comment.is_deleted = True
self.comment.save()
anonymous_link = PrivateLinkFactory(anonymous=True)
anonymous_link.nodes.add(self.private_project)
anonymous_link.save()
res = self.app.get('/{}comments/{}/'.format(API_BASE, self.comment._id), {'view_only': anonymous_link.key}, expect_errors=True)
assert_equal(res.status_code, 200)
assert_is_none(res.json['data']['attributes']['content'])
def test_public_node_only_logged_in_commenter_can_view_deleted_comment(self):
self._set_up_public_project_with_comment()
self.public_comment.is_deleted = True
self.public_comment.save()
url = '/{}comments/{}/'.format(API_BASE, self.public_comment._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['content'], self.public_comment.content)
def test_public_node_contributor_cannot_view_other_users_deleted_comment(self):
self._set_up_public_project_with_comment()
self.public_comment.is_deleted = True
self.public_comment.save()
url = '/{}comments/{}/'.format(API_BASE, self.public_comment._id)
res = self.app.get(url, auth=self.contributor.auth)
assert_equal(res.status_code, 200)
assert_is_none(res.json['data']['attributes']['content'])
def test_public_node_non_contributor_cannot_view_other_users_deleted_comment(self):
self._set_up_public_project_with_comment()
self.public_comment.is_deleted = True
self.public_comment.save()
url = '/{}comments/{}/'.format(API_BASE, self.public_comment._id)
res = self.app.get(url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
assert_is_none(res.json['data']['attributes']['content'])
def test_public_node_logged_out_user_cannot_view_deleted_comments(self):
self._set_up_public_project_with_comment()
self.public_comment.is_deleted = True
self.public_comment.save()
url = '/{}comments/{}/'.format(API_BASE, self.public_comment._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_is_none(res.json['data']['attributes']['content'])
def test_public_node_view_only_link_user_cannot_see_deleted_comment(self):
self._set_up_public_project_with_comment()
self.public_comment.is_deleted = True
self.public_comment.save()
private_link = PrivateLinkFactory(anonymous=False)
private_link.nodes.add(self.public_project)
private_link.save()
res = self.app.get('/{}comments/{}/'.format(API_BASE, self.public_comment._id), {'view_only': private_link.key}, expect_errors=True)
assert_equal(res.status_code, 200)
assert_is_none(res.json['data']['attributes']['content'])
class TestCommentDetailView(CommentDetailMixin, ApiTestCase):
def _set_up_private_project_with_comment(self):
self.private_project = ProjectFactory.create(is_public=False, creator=self.user)
self.private_project.add_contributor(self.contributor, save=True)
self.comment = CommentFactory(node=self.private_project, user=self.user)
self.private_url = '/{}comments/{}/'.format(API_BASE, self.comment._id)
self.payload = self._set_up_payload(self.comment._id)
def _set_up_public_project_with_comment(self):
self.public_project = ProjectFactory.create(is_public=True, creator=self.user)
self.public_project.add_contributor(self.contributor, save=True)
self.public_comment = CommentFactory(node=self.public_project, user=self.user)
reply_target = Guid.load(self.public_comment._id)
self.public_comment_reply = CommentFactory(node=self.public_project, target=reply_target, user=self.user)
self.public_url = '/{}comments/{}/'.format(API_BASE, self.public_comment._id)
self.public_comment_payload = self._set_up_payload(self.public_comment._id)
def _set_up_registration_with_comment(self):
self.registration = RegistrationFactory(creator=self.user)
self.registration_url = '/{}registrations/{}/'.format(API_BASE, self.registration._id)
self.registration_comment = CommentFactory(node=self.registration, user=self.user)
self.comment_url = '/{}comments/{}/'.format(API_BASE, self.registration_comment._id)
reply_target = Guid.load(self.registration_comment._id)
self.registration_comment_reply = CommentFactory(node=self.registration, target=reply_target, user=self.user)
self.replies_url = '/{}registrations/{}/comments/?filter[target]={}'.format(API_BASE, self.registration._id, self.registration_comment._id)
def test_comment_has_target_link_with_correct_type(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['target']['links']['related']['href']
expected_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id)
target_type = res.json['data']['relationships']['target']['links']['related']['meta']['type']
expected_type = 'nodes'
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
assert_equal(target_type, expected_type)
def test_public_node_non_contributor_commenter_can_update_comment(self):
project = ProjectFactory(is_public=True, comment_level='public')
comment = CommentFactory(node=project, user=self.non_contributor)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = self._set_up_payload(comment._id)
res = self.app.put_json_api(url, payload, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
assert_equal(payload['data']['attributes']['content'], res.json['data']['attributes']['content'])
def test_public_node_non_contributor_commenter_cannot_update_own_comment_if_comment_level_private(self):
project = ProjectFactory(is_public=True, comment_level='public')
comment = CommentFactory(node=project, user=self.non_contributor)
project.comment_level = 'private'
project.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = self._set_up_payload(comment._id)
res = self.app.put_json_api(url, payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_public_node_non_contributor_commenter_can_delete_comment(self):
project = ProjectFactory(is_public=True)
comment = CommentFactory(node=project, user=self.non_contributor)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = self.app.delete_json_api(url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 204)
def test_registration_comment_has_usable_replies_relationship_link(self):
self._set_up_registration_with_comment()
res = self.app.get(self.registration_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
comments_url = res.json['data']['relationships']['comments']['links']['related']['href']
comments_uri = test_utils.urlparse_drop_netloc(comments_url)
comments_res = self.app.get(comments_uri, auth=self.user.auth)
assert_equal(comments_res.status_code, 200)
replies_url = comments_res.json['data'][0]['relationships']['replies']['links']['related']['href']
replies_uri = test_utils.urlparse_drop_netloc(replies_url)
replies_res = self.app.get(replies_uri, auth=self.user.auth)
node_url = comments_res.json['data'][0]['relationships']['node']['links']['related']['href']
node_uri = test_utils.urlparse_drop_netloc(node_url)
assert_equal(node_uri, self.registration_url)
def test_registration_comment_has_usable_node_relationship_link(self):
self._set_up_registration_with_comment()
res = self.app.get(self.registration_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
comments_url = res.json['data']['relationships']['comments']['links']['related']['href']
comments_uri = test_utils.urlparse_drop_netloc(comments_url)
comments_res = self.app.get(comments_uri, auth=self.user.auth)
assert_equal(comments_res.status_code, 200)
node_url = comments_res.json['data'][0]['relationships']['node']['links']['related']['href']
node_uri = test_utils.urlparse_drop_netloc(node_url)
node_res = self.app.get(node_uri, auth=self.user.auth)
assert_in(self.registration._id, node_res.json['data']['id'])
class TestFileCommentDetailView(CommentDetailMixin, ApiTestCase):
def _set_up_private_project_with_comment(self):
self.private_project = ProjectFactory.create(is_public=False, creator=self.user, comment_level='private')
self.private_project.add_contributor(self.contributor, save=True)
self.file = test_utils.create_test_file(self.private_project, self.user)
self.comment = CommentFactory(node=self.private_project, target=self.file.get_guid(), user=self.user)
self.private_url = '/{}comments/{}/'.format(API_BASE, self.comment._id)
self.payload = self._set_up_payload(self.comment._id)
def _set_up_public_project_with_comment(self):
self.public_project = ProjectFactory.create(is_public=True, creator=self.user, comment_level='private')
self.public_project.add_contributor(self.contributor, save=True)
self.public_file = test_utils.create_test_file(self.public_project, self.user)
self.public_comment = CommentFactory(node=self.public_project, target=self.public_file.get_guid(), user=self.user)
reply_target = Guid.load(self.public_comment._id)
self.public_comment_reply = CommentFactory(node=self.public_project, target=reply_target, user=self.user)
self.public_url = '/{}comments/{}/'.format(API_BASE, self.public_comment._id)
self.public_comment_payload = self._set_up_payload(self.public_comment._id)
def _set_up_registration_with_comment(self):
self.registration = RegistrationFactory(creator=self.user, comment_level='private')
self.registration_file = test_utils.create_test_file(self.registration, self.user)
self.registration_comment = CommentFactory(node=self.registration, target=self.registration_file.get_guid(), user=self.user)
self.comment_url = '/{}comments/{}/'.format(API_BASE, self.registration_comment._id)
reply_target = Guid.load(self.registration_comment._id)
self.registration_comment_reply = CommentFactory(node=self.registration, target=reply_target, user=self.user)
def test_file_comment_has_target_link_with_correct_type(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['target']['links']['related']['href']
expected_url = '/{}files/{}/'.format(API_BASE, self.public_file._id)
target_type = res.json['data']['relationships']['target']['links']['related']['meta']['type']
expected_type = 'files'
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
assert_equal(target_type, expected_type)
def test_public_node_non_contributor_commenter_can_update_file_comment(self):
project = ProjectFactory(is_public=True)
test_file = test_utils.create_test_file(project, project.creator)
comment = CommentFactory(node=project, target=test_file.get_guid(), user=self.non_contributor)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = self._set_up_payload(comment._id)
res = self.app.put_json_api(url, payload, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
assert_equal(payload['data']['attributes']['content'], res.json['data']['attributes']['content'])
def test_public_node_non_contributor_commenter_cannot_update_own_file_comment_if_comment_level_private(self):
project = ProjectFactory(is_public=True)
test_file = test_utils.create_test_file(project, project.creator)
comment = CommentFactory(node=project, target=test_file.get_guid(), user=self.non_contributor)
project.comment_level = 'private'
project.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = self._set_up_payload(comment._id)
res = self.app.put_json_api(url, payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_public_node_non_contributor_commenter_can_delete_file_comment(self):
project = ProjectFactory(is_public=True, comment_level='public')
test_file = test_utils.create_test_file(project, project.creator)
comment = CommentFactory(node=project, target=test_file.get_guid(), user=self.non_contributor)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = self.app.delete_json_api(url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 204)
def test_comment_detail_for_deleted_file_is_not_returned(self):
self._set_up_private_project_with_comment()
# Delete commented file
osfstorage = self.private_project.get_addon('osfstorage')
root_node = osfstorage.get_root()
self.file.delete()
res = self.app.get(self.private_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
class TestWikiCommentDetailView(CommentDetailMixin, ApiTestCase):
def _set_up_private_project_with_comment(self):
self.private_project = ProjectFactory.create(is_public=False, creator=self.user, comment_level='private')
self.private_project.add_contributor(self.contributor, save=True)
with mock.patch('osf.models.AbstractNode.update_search'):
self.wiki = NodeWikiFactory(node=self.private_project, user=self.user)
self.comment = CommentFactory(node=self.private_project, target=Guid.load(self.wiki._id), user=self.user)
self.private_url = '/{}comments/{}/'.format(API_BASE, self.comment._id)
self.payload = self._set_up_payload(self.comment._id)
def _set_up_public_project_with_comment(self):
self.public_project = ProjectFactory.create(is_public=True, creator=self.user, comment_level='private')
self.public_project.add_contributor(self.contributor, save=True)
with mock.patch('osf.models.AbstractNode.update_search'):
self.public_wiki = NodeWikiFactory(node=self.public_project, user=self.user)
self.public_comment = CommentFactory(node=self.public_project, target=Guid.load(self.public_wiki._id), user=self.user)
reply_target = Guid.load(self.public_comment._id)
self.public_comment_reply = CommentFactory(node=self.public_project, target=reply_target, user=self.user)
self.public_url = '/{}comments/{}/'.format(API_BASE, self.public_comment._id)
self.public_comment_payload = self._set_up_payload(self.public_comment._id)
def _set_up_registration_with_comment(self):
self.registration = RegistrationFactory(creator=self.user, comment_level='private')
with mock.patch('osf.models.AbstractNode.update_search'):
self.registration_wiki = NodeWikiFactory(node=self.registration, user=self.user)
self.registration_comment = CommentFactory(node=self.registration, target=Guid.load(self.registration_wiki._id), user=self.user)
self.comment_url = '/{}comments/{}/'.format(API_BASE, self.registration_comment._id)
reply_target = Guid.load(self.registration_comment._id)
self.registration_comment_reply = CommentFactory(node=self.registration, target=reply_target, user=self.user)
def test_wiki_comment_has_target_link_with_correct_type(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['target']['links']['related']['href']
expected_url = self.public_wiki.get_absolute_url()
target_type = res.json['data']['relationships']['target']['links']['related']['meta']['type']
expected_type = 'wiki'
assert_equal(res.status_code, 200)
assert_equal(url, expected_url)
assert_equal(target_type, expected_type)
def test_public_node_non_contributor_commenter_can_update_wiki_comment(self):
project = ProjectFactory(is_public=True)
test_wiki = NodeWikiFactory(node=project, user=self.user)
comment = CommentFactory(node=project, target=Guid.load(test_wiki._id), user=self.non_contributor)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = self._set_up_payload(comment._id)
res = self.app.put_json_api(url, payload, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
assert_equal(payload['data']['attributes']['content'], res.json['data']['attributes']['content'])
def test_public_node_non_contributor_commenter_cannot_update_own_wiki_comment_if_comment_level_private(self):
project = ProjectFactory(is_public=True)
test_wiki = NodeWikiFactory(node=project, user=self.user)
comment = CommentFactory(node=project, target=Guid.load(test_wiki._id), user=self.non_contributor)
project.comment_level = 'private'
project.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = self._set_up_payload(comment._id)
res = self.app.put_json_api(url, payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_public_node_non_contributor_commenter_can_delete_wiki_comment(self):
project = ProjectFactory(is_public=True, comment_level='public')
test_wiki = NodeWikiFactory(node=project, user=self.user)
comment = CommentFactory(node=project, target=Guid.load(test_wiki._id), user=self.non_contributor)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = self.app.delete_json_api(url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 204)
def test_comment_detail_for_deleted_wiki_is_not_returned(self):
self._set_up_private_project_with_comment()
# Delete commented wiki page
self.private_project.delete_node_wiki(self.wiki.page_name, core.Auth(self.user))
res = self.app.get(self.private_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
| |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package attention
# Module caffe2.python.attention
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import brew
class AttentionType:
Regular, Recurrent, Dot, SoftCoverage = tuple(range(4))
def s(scope, name):
# We have to manually scope due to our internal/external blob
# relationships.
return "{}/{}".format(str(scope), str(name))
# c_i = \sum_j w_{ij}\textbf{s}_j
def _calc_weighted_context(
model,
encoder_outputs_transposed,
encoder_output_dim,
attention_weights_3d,
scope,
):
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = brew.batch_mat_mul(
model,
[encoder_outputs_transposed, attention_weights_3d],
s(scope, 'attention_weighted_encoder_context'),
)
# [batch_size, encoder_output_dim]
attention_weighted_encoder_context, _ = model.net.Reshape(
attention_weighted_encoder_context,
[
attention_weighted_encoder_context,
s(scope, 'attention_weighted_encoder_context_old_shape'),
],
shape=[1, -1, encoder_output_dim],
)
return attention_weighted_encoder_context
# Calculate a softmax over the passed in attention energy logits
def _calc_attention_weights(
model,
attention_logits_transposed,
scope,
encoder_lengths=None,
):
if encoder_lengths is not None:
attention_logits_transposed = model.net.SequenceMask(
[attention_logits_transposed, encoder_lengths],
['masked_attention_logits'],
mode='sequence',
)
# [batch_size, encoder_length, 1]
attention_weights_3d = brew.softmax(
model,
attention_logits_transposed,
s(scope, 'attention_weights_3d'),
engine='CUDNN',
axis=1,
)
return attention_weights_3d
# e_{ij} = \textbf{v}^T tanh \alpha(\textbf{h}_{i-1}, \textbf{s}_j)
def _calc_attention_logits_from_sum_match(
model,
decoder_hidden_encoder_outputs_sum,
encoder_output_dim,
scope,
):
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Tanh(
decoder_hidden_encoder_outputs_sum,
decoder_hidden_encoder_outputs_sum,
)
# [encoder_length, batch_size, 1]
attention_logits = brew.fc(
model,
decoder_hidden_encoder_outputs_sum,
s(scope, 'attention_logits'),
dim_in=encoder_output_dim,
dim_out=1,
axis=2,
freeze_bias=True,
)
# [batch_size, encoder_length, 1]
attention_logits_transposed = brew.transpose(
model,
attention_logits,
s(scope, 'attention_logits_transposed'),
axes=[1, 0, 2],
)
return attention_logits_transposed
# \textbf{W}^\alpha used in the context of \alpha_{sum}(a,b)
def _apply_fc_weight_for_sum_match(
model,
input,
dim_in,
dim_out,
scope,
name,
):
output = brew.fc(
model,
input,
s(scope, name),
dim_in=dim_in,
dim_out=dim_out,
axis=2,
)
output = model.net.Squeeze(
output,
output,
dims=[0],
)
return output
# Implement RecAtt due to section 4.1 in http://arxiv.org/abs/1601.03317
def apply_recurrent_attention(
model,
encoder_output_dim,
encoder_outputs_transposed,
weighted_encoder_outputs,
decoder_hidden_state_t,
decoder_hidden_state_dim,
attention_weighted_encoder_context_t_prev,
scope,
encoder_lengths=None,
):
weighted_prev_attention_context = _apply_fc_weight_for_sum_match(
model=model,
input=attention_weighted_encoder_context_t_prev,
dim_in=encoder_output_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_prev_attention_context',
)
weighted_decoder_hidden_state = _apply_fc_weight_for_sum_match(
model=model,
input=decoder_hidden_state_t,
dim_in=decoder_hidden_state_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_decoder_hidden_state',
)
# [1, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum_tmp = model.net.Add(
[
weighted_prev_attention_context,
weighted_decoder_hidden_state,
],
s(scope, 'decoder_hidden_encoder_outputs_sum_tmp'),
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Add(
[
weighted_encoder_outputs,
decoder_hidden_encoder_outputs_sum_tmp,
],
s(scope, 'decoder_hidden_encoder_outputs_sum'),
broadcast=1,
)
attention_logits_transposed = _calc_attention_logits_from_sum_match(
model=model,
decoder_hidden_encoder_outputs_sum=decoder_hidden_encoder_outputs_sum,
encoder_output_dim=encoder_output_dim,
scope=scope,
)
# [batch_size, encoder_length, 1]
attention_weights_3d = _calc_attention_weights(
model=model,
attention_logits_transposed=attention_logits_transposed,
scope=scope,
encoder_lengths=encoder_lengths,
)
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = _calc_weighted_context(
model=model,
encoder_outputs_transposed=encoder_outputs_transposed,
encoder_output_dim=encoder_output_dim,
attention_weights_3d=attention_weights_3d,
scope=scope,
)
return attention_weighted_encoder_context, attention_weights_3d, [
decoder_hidden_encoder_outputs_sum,
]
def apply_regular_attention(
model,
encoder_output_dim,
encoder_outputs_transposed,
weighted_encoder_outputs,
decoder_hidden_state_t,
decoder_hidden_state_dim,
scope,
encoder_lengths=None,
):
weighted_decoder_hidden_state = _apply_fc_weight_for_sum_match(
model=model,
input=decoder_hidden_state_t,
dim_in=decoder_hidden_state_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_decoder_hidden_state',
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Add(
[weighted_encoder_outputs, weighted_decoder_hidden_state],
s(scope, 'decoder_hidden_encoder_outputs_sum'),
broadcast=1,
use_grad_hack=1,
)
attention_logits_transposed = _calc_attention_logits_from_sum_match(
model=model,
decoder_hidden_encoder_outputs_sum=decoder_hidden_encoder_outputs_sum,
encoder_output_dim=encoder_output_dim,
scope=scope,
)
# [batch_size, encoder_length, 1]
attention_weights_3d = _calc_attention_weights(
model=model,
attention_logits_transposed=attention_logits_transposed,
scope=scope,
encoder_lengths=encoder_lengths,
)
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = _calc_weighted_context(
model=model,
encoder_outputs_transposed=encoder_outputs_transposed,
encoder_output_dim=encoder_output_dim,
attention_weights_3d=attention_weights_3d,
scope=scope,
)
return attention_weighted_encoder_context, attention_weights_3d, [
decoder_hidden_encoder_outputs_sum,
]
def apply_dot_attention(
model,
encoder_output_dim,
# [batch_size, encoder_output_dim, encoder_length]
encoder_outputs_transposed,
# [1, batch_size, decoder_state_dim]
decoder_hidden_state_t,
decoder_hidden_state_dim,
scope,
encoder_lengths=None,
):
if decoder_hidden_state_dim != encoder_output_dim:
weighted_decoder_hidden_state = brew.fc(
model,
decoder_hidden_state_t,
s(scope, 'weighted_decoder_hidden_state'),
dim_in=decoder_hidden_state_dim,
dim_out=encoder_output_dim,
axis=2,
)
else:
weighted_decoder_hidden_state = decoder_hidden_state_t
# [batch_size, decoder_state_dim]
squeezed_weighted_decoder_hidden_state = model.net.Squeeze(
weighted_decoder_hidden_state,
s(scope, 'squeezed_weighted_decoder_hidden_state'),
dims=[0],
)
# [batch_size, decoder_state_dim, 1]
expanddims_squeezed_weighted_decoder_hidden_state = model.net.ExpandDims(
squeezed_weighted_decoder_hidden_state,
squeezed_weighted_decoder_hidden_state,
dims=[2],
)
# [batch_size, encoder_output_dim, 1]
attention_logits_transposed = model.net.BatchMatMul(
[
encoder_outputs_transposed,
expanddims_squeezed_weighted_decoder_hidden_state,
],
s(scope, 'attention_logits'),
trans_a=1,
)
# [batch_size, encoder_length, 1]
attention_weights_3d = _calc_attention_weights(
model=model,
attention_logits_transposed=attention_logits_transposed,
scope=scope,
encoder_lengths=encoder_lengths,
)
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = _calc_weighted_context(
model=model,
encoder_outputs_transposed=encoder_outputs_transposed,
encoder_output_dim=encoder_output_dim,
attention_weights_3d=attention_weights_3d,
scope=scope,
)
return attention_weighted_encoder_context, attention_weights_3d, []
def apply_soft_coverage_attention(
model,
encoder_output_dim,
encoder_outputs_transposed,
weighted_encoder_outputs,
decoder_hidden_state_t,
decoder_hidden_state_dim,
scope,
encoder_lengths,
coverage_t_prev,
coverage_weights,
):
weighted_decoder_hidden_state = _apply_fc_weight_for_sum_match(
model=model,
input=decoder_hidden_state_t,
dim_in=decoder_hidden_state_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_decoder_hidden_state',
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Add(
[weighted_encoder_outputs, weighted_decoder_hidden_state],
s(scope, 'decoder_hidden_encoder_outputs_sum'),
broadcast=1,
)
# [batch_size, encoder_length]
coverage_t_prev_2d = model.net.Squeeze(
coverage_t_prev,
s(scope, 'coverage_t_prev_2d'),
dims=[0],
)
# [encoder_length, batch_size]
coverage_t_prev_transposed = brew.transpose(
model,
coverage_t_prev_2d,
s(scope, 'coverage_t_prev_transposed'),
)
# [encoder_length, batch_size, encoder_output_dim]
scaled_coverage_weights = model.net.Mul(
[coverage_weights, coverage_t_prev_transposed],
s(scope, 'scaled_coverage_weights'),
broadcast=1,
axis=0,
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Add(
[decoder_hidden_encoder_outputs_sum, scaled_coverage_weights],
decoder_hidden_encoder_outputs_sum,
)
# [batch_size, encoder_length, 1]
attention_logits_transposed = _calc_attention_logits_from_sum_match(
model=model,
decoder_hidden_encoder_outputs_sum=decoder_hidden_encoder_outputs_sum,
encoder_output_dim=encoder_output_dim,
scope=scope,
)
# [batch_size, encoder_length, 1]
attention_weights_3d = _calc_attention_weights(
model=model,
attention_logits_transposed=attention_logits_transposed,
scope=scope,
encoder_lengths=encoder_lengths,
)
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = _calc_weighted_context(
model=model,
encoder_outputs_transposed=encoder_outputs_transposed,
encoder_output_dim=encoder_output_dim,
attention_weights_3d=attention_weights_3d,
scope=scope,
)
# [batch_size, encoder_length]
attention_weights_2d = model.net.Squeeze(
attention_weights_3d,
s(scope, 'attention_weights_2d'),
dims=[2],
)
coverage_t = model.net.Add(
[coverage_t_prev, attention_weights_2d],
s(scope, 'coverage_t'),
broadcast=1,
)
return (
attention_weighted_encoder_context,
attention_weights_3d,
[decoder_hidden_encoder_outputs_sum],
coverage_t,
)
| |
# coding=utf-8
"""
Collects JMX metrics from the Jolokia Agent. Jolokia is an HTTP bridge that
provides access to JMX MBeans without the need to write Java code. See the
[Reference Guide](http://www.jolokia.org/reference/html/index.html) for more
information.
By default, all MBeans will be queried for metrics. All numerical values will
be published to Graphite; anything else will be ignored. JolokiaCollector will
create a reasonable namespace for each metric based on each MBeans domain and
name. e.g) ```java.lang:name=ParNew,type=GarbageCollector``` would become
```java.lang.name_ParNew.type_GarbageCollector```.
#### Dependencies
* Jolokia
* A running JVM with Jolokia installed/configured
#### Example Configuration
If desired, JolokiaCollector can be configured to query specific MBeans by
providing a list of ```mbeans```. If ```mbeans``` is not provided, all MBeans
will be queried for metrics. Note that the mbean prefix is checked both
with and without rewrites (including fixup re-writes) applied. This allows
you to specify "java.lang:name=ParNew,type=GarbageCollector" (the raw name from
jolokia) or "java.lang.name_ParNew.type_GarbageCollector" (the fixed name
as used for output)
If the ```regex``` flag is set to True, mbeans will match based on regular
expressions rather than a plain textual match.
The ```rewrite``` section provides a way of renaming the data keys before
it sent out to the handler. The section consists of pairs of from-to
regular expressions. If the resultant name is completely blank, the
metric is not published, providing a way to exclude specific metrics within
an mbean.
```
host = localhost
port = 8778
mbeans = "java.lang:name=ParNew,type=GarbageCollector",
"org.apache.cassandra.metrics:name=WriteTimeouts,type=ClientRequestMetrics"
[rewrite]
java = coffee
"-v\d+\.\d+\.\d+" = "-AllVersions"
".*GetS2Activities.*" = ""
```
"""
import diamond.collector
import json
import re
import time
import urllib
import urllib2
import sys
class MBean(object):
def __init__(self, prefix, bean_key, bean_value):
self.prefix = prefix
self.bean_key = bean_key
self.bean_value = bean_value
def parse(self, patch_dimensions, patch_metric_name):
metric_prefix, meta = self.prefix.split(':', 1)
raw_dims = self.parse_dimension(meta)
self.metric_name, self.metric_type, self.dimensions = patch_dimensions(self, raw_dims)
raw_name_list = [metric_prefix]
if self.metric_type:
raw_name_list.append(self.metric_type)
if self.metric_name:
raw_name_list.append(self.metric_name)
metric_name_list = patch_metric_name(self, raw_name_list)
return metric_name_list, self.dimensions
def parse_dimension(self, meta):
dimensions = {}
for k, v in [kv.split('=') for kv in meta.split(',')]:
dimensions[str(k)] = v
return dimensions
class JolokiaCollector(diamond.collector.Collector):
LIST_URL = "/list?ifModifiedSince=%s&maxDepth=%s"
READ_URL = "/?ignoreErrors=true&includeStackTrace=false&maxCollectionSize=%s&p=read/%s"
LIST_QUERY_URL = "/list/%s?maxDepth=%s"
"""
These domains contain MBeans that are for management purposes,
or otherwise do not contain useful metrics
"""
IGNORE_DOMAINS = ['JMImplementation', 'jmx4perl', 'jolokia',
'com.sun.management', 'java.util.logging']
def get_default_config_help(self):
config_help = super(JolokiaCollector,
self).get_default_config_help()
config_help.update({
'mbeans': "Pipe delimited list of MBeans for which to collect"
" stats. If not provided, all stats will"
" be collected.",
'regex': "Contols if mbeans option matches with regex,"
" False by default.",
'host': 'Hostname',
'port': 'Port',
'mbean_blacklist': 'A list of blacklisted mbeans',
'rewrite': "This sub-section of the config contains pairs of"
" from-to regex rewrites.",
'url_path': 'Path to jolokia. typically "jmx" or "jolokia"',
'listing_max_depth': 'max depth of domain listings tree, 0=deepest, 1=keys only, 2=weird',
'read_limit': 'Request size to read from jolokia, defaults to 1000, 0 = no limit'
})
return config_help
def get_default_config(self):
config = super(JolokiaCollector, self).get_default_config()
config.update({
'mbeans': [],
'regex': False,
'rewrite': [],
'url_path': 'jolokia',
'host': 'localhost',
'mbean_blacklist': [],
'port': 8778,
'listing_max_depth': 1,
'read_limit': 1000,
})
self.domain_keys = []
self.last_list_request = 0
return config
def __init__(self, *args, **kwargs):
super(JolokiaCollector, self).__init__(*args, **kwargs)
self.mbeans = []
self.rewrite = {}
if isinstance(self.config['mbeans'], basestring):
for mbean in self.config['mbeans'].split('|'):
self.mbeans.append(mbean.strip())
elif isinstance(self.config['mbeans'], list):
self.mbeans = self.config['mbeans']
if isinstance(self.config['rewrite'], dict):
self.rewrite = self.config['rewrite']
def check_mbean(self, mbean):
if not self.mbeans:
return True
mbeanfix = self.clean_up(mbean)
if self.config['regex'] is not None:
for chkbean in self.mbeans:
if re.match(chkbean, mbean) is not None or \
re.match(chkbean, mbeanfix) is not None:
return True
else:
if mbean in self.mbeans or mbeanfix in self.mbeans:
return True
def read_metric_path(self, full_path):
obj = self.read_request(full_path, True)
mbeans = obj['value'] if obj['status'] == 200 else {}
self.collect_bean(full_path, mbeans)
def read_except_blacklist(self, prefix, blacklist):
listing = self.list_request(prefix)
try:
domains = listing['value'] if listing['status'] == 200 else {}
domain_keys = domains.keys()
for path in domain_keys:
full_path = prefix + ":" + path
if self.check_mbean_blacklist(full_path, blacklist):
self.read_metric_path(full_path)
except KeyError:
self.log.error("Unable to retrieve mbean listing")
def check_mbean_blacklist(self, mbean, blacklist):
for line in blacklist:
if mbean.find(line) != -1:
return False
return True
def check_domain_for_blacklist(self, domain, blacklist):
for line in blacklist:
if line.find(domain) != -1:
return True
return False
def collect(self):
listing = self.list_request()
try:
domains = listing['value'] if listing['status'] == 200 else {}
if listing['status'] == 200:
self.domain_keys = domains.keys()
self.last_list_request = listing.get('timestamp', int(time.time()))
for domain in self.domain_keys:
if domain not in self.IGNORE_DOMAINS:
self.publish_metric_from_domain(domain)
except KeyError:
# The reponse was totally empty, or not an expected format
self.log.error('Unable to retrieve MBean listing.')
def publish_metric_from_domain(self, domain):
if self.check_domain_for_blacklist(domain, self.config["mbean_blacklist"]):
self.read_except_blacklist(domain, self.config["mbean_blacklist"])
return
obj = self.read_request(domain)
mbeans = obj['value'] if obj['status'] == 200 else {}
for k, v in mbeans.iteritems():
if self.check_mbean(k):
self.collect_bean(k, v)
def read_json(self, request):
json_str = request.read()
return json.loads(json_str)
def list_request(self, bean_path=None):
try:
if bean_path:
url_path = self.LIST_QUERY_URL % (bean_path,
self.config['listing_max_depth'])
else:
url_path = self.LIST_URL % (self.last_list_request,
self.config['listing_max_depth'])
url = "http://%s:%s/%s%s" % (self.config['host'],
self.config['port'],
self.config['url_path'],
url_path)
response = urllib2.urlopen(url)
return self.read_json(response)
except (urllib2.HTTPError, ValueError):
self.log.error('Unable to read JSON response.')
return {}
def read_request(self, url_path, read_bean=False):
try:
if read_bean:
url_path = self.READ_URL % (self.config['read_limit'],
self.escape_domain(url_path))
else:
url_path = self.READ_URL % (self.config['read_limit'],
self.escape_domain(url_path)) + ":*"
url = "http://%s:%s/%s%s" % (self.config['host'],
self.config['port'],
self.config['url_path'],
url_path)
response = urllib2.urlopen(url)
return self.read_json(response)
except (urllib2.HTTPError, ValueError):
self.log.error('Unable to read JSON response.')
return {}
# escape the JMX domain per https://jolokia.org/reference/html/protocol.html
# the Jolokia documentation suggests that, when using the p query parameter,
# simply urlencoding should be sufficient, but in practice, the '!' appears
# necessary (and not harmful)
def escape_domain(self, domain):
domain = re.sub('!', '!!', domain)
domain = re.sub('/', '!/', domain)
domain = re.sub('"', '!"', domain)
domain = urllib.quote(domain)
return domain
def clean_up(self, text):
text = re.sub('["\'(){}<>\[\]]', '', text)
text = re.sub('[:,.]+', '.', text)
text = re.sub('[^a-zA-Z0-9_.+-]+', '_', text)
for (oldstr, newstr) in self.rewrite.items():
text = re.sub(oldstr, newstr, text)
return text
def collect_bean(self, prefix, obj):
for k, v in obj.iteritems():
if type(v) in [int, float, long]:
key = "%s.%s" % (prefix, k)
key = self.clean_up(key)
if key != "":
self.publish(key, v)
elif type(v) in [dict]:
self.collect_bean("%s.%s" % (prefix, k), v)
elif type(v) in [list]:
self.interpret_bean_with_list("%s.%s" % (prefix, k), v)
def patch_dimensions(self, bean, dims):
raise NotImplementedError()
def patch_metric_name(self, bean, metric_name_list):
raise NotImplementedError()
def parse_dimension_bean(self, prefix, key, value):
mbean = MBean(prefix, key, value)
try:
metric_name_list, self.dimensions = mbean.parse(self.patch_dimensions, self.patch_metric_name)
metric_name = '.'.join(metric_name_list)
metric_name = self.clean_up(metric_name)
if metric_name == "":
self.dimensions = {}
return
if key.lower() == 'count':
self.publish_cumulative_counter(metric_name, value)
else:
self.publish(metric_name, value)
except:
exctype, value = sys.exc_info()[:2]
self.log.error(str(value))
# There's no unambiguous way to interpret list values, so
# this hook lets subclasses handle them.
def interpret_bean_with_list(self, prefix, values):
pass
| |
# No shebang line, this module is meant to be imported
#
# Copyright 2014 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from contextlib import nested
from uuid import UUID, uuid4
from mock import Mock, patch
from voluptuous import Schema, MultipleInvalid
from pyfarm.core.utility import ImmutableDict
from pyfarm.core.enums import INTEGER_TYPES, STRING_TYPES, WINDOWS
from pyfarm.agent.config import config
from pyfarm.agent.testutil import TestCase, skipIf
from pyfarm.agent.sysinfo.user import is_administrator
from pyfarm.jobtypes.core.internals import USER_GROUP_TYPES
from pyfarm.jobtypes.core.jobtype import (
JobType, CommandData, process_stdout, process_stderr)
from pyfarm.jobtypes.core.log import STDOUT, STDERR, logpool
IS_ADMIN = is_administrator()
def fake_assignment():
assignment_id = uuid4()
assignment = {
"id": assignment_id,
"job": {
"id": 1,
"by": 1,
"title": "Hello World",
"data": {"a": True, "b": False, "c": None, "d": 1}},
"jobtype": {
"name": "Foo",
"version": 1},
"tasks": [{"id": 1, "frame": 1, "attempt": 1},
{"id": 1, "frame": 1, "attempt": 1}]}
config["current_assignments"][assignment_id] = assignment
return assignment
class FakeProcessProtocol(object):
def __init__(self):
self.uuid = uuid4()
class TestSchema(TestCase):
def test_attribute(self):
self.assertIsInstance(JobType.ASSIGNMENT_SCHEMA, Schema)
def test_schema(self):
JobType.ASSIGNMENT_SCHEMA(fake_assignment())
class TestInit(TestCase):
def test_uuid(self):
job = JobType(fake_assignment())
self.assertIsInstance(job.uuid, UUID)
def test_sets_config(self):
job = JobType(fake_assignment())
self.assertIn(job.uuid, config["jobtypes"])
self.assertIs(config["jobtypes"][job.uuid], job)
def test_assignment(self):
assignment = fake_assignment()
job = JobType(assignment)
self.assertIsInstance(job.assignment, ImmutableDict)
assignment["jobtype"].pop("id")
self.assertEqual(job.assignment, assignment)
def test_attributes(self):
job = JobType(fake_assignment())
self.assertIsInstance(job.processes, dict)
self.assertEqual(job.processes, {})
self.assertIsInstance(job.failed_processes, set)
self.assertEqual(job.failed_processes, set())
self.assertIsInstance(job.finished_tasks, set)
self.assertEqual(job.finished_tasks, set())
self.assertIsInstance(job.failed_tasks, set)
self.assertEqual(job.failed_tasks, set())
self.assertFalse(job.stop_called)
self.assertFalse(job.start_called)
class TestCommandData(TestCase):
def test_set_basic_attributes(self):
command = os.urandom(12)
arguments = (1, None, True, "foobar")
data = CommandData(command, *arguments)
self.assertEqual(data.command, command)
for argument in data.arguments:
self.assertIsInstance(argument, str)
self.assertIsInstance(data.arguments, tuple)
self.assertEqual(data.arguments, ("1", "None", "True", "foobar"))
self.assertIsNone(data.env)
self.assertIsNone(data.cwd)
self.assertIsNone(data.user)
self.assertIsNone(data.group)
def test_set_kwargs(self):
data = CommandData(
"", env={"foo": "bar"}, cwd="/", user="usr", group="grp")
self.assertEqual(data.env, {"foo": "bar"})
self.assertEqual(data.cwd, "/")
self.assertEqual(data.user, "usr")
self.assertEqual(data.group, "grp")
def test_unknown_kwarg(self):
with self.assertRaises(ValueError):
CommandData("", foobar=True)
def test_validate_command_type(self):
with self.assertRaisesRegexp(
TypeError, re.compile(".*string.*command.*")):
CommandData(None).validate()
def test_validate_env_type(self):
with self.assertRaisesRegexp(
TypeError, re.compile(".*dictionary.*env.*")):
CommandData("", env=1).validate()
def test_user_group_types(self):
self.assertEqual(
USER_GROUP_TYPES,
tuple(list(STRING_TYPES) + list(INTEGER_TYPES) + [type(None)]))
def test_validate_user_type(self):
with self.assertRaisesRegexp(
TypeError, re.compile(".*user.*")):
CommandData("", user=1.0).validate()
def test_validate_group_type(self):
with self.assertRaisesRegexp(
TypeError, re.compile(".*group.*")):
CommandData("", group=1.0).validate()
@skipIf(WINDOWS, "Non-Windows only")
@skipIf(IS_ADMIN, "Is Administrator")
def test_validate_change_user_non_admin_failure(self):
with self.assertRaises(EnvironmentError):
CommandData("", user=0).validate()
@skipIf(WINDOWS, "Non-Windows only")
@skipIf(IS_ADMIN, "Is Administrator")
def test_validate_change_group_non_admin_failure(self):
with self.assertRaises(EnvironmentError):
CommandData("", group=0).validate()
@skipIf(WINDOWS, "Non-Windows only")
@skipIf(not IS_ADMIN, "Not Administrator")
def test_validate_change_user_admin(self):
CommandData("", user=0).validate()
@skipIf(WINDOWS, "Non-Windows only")
@skipIf(not IS_ADMIN, "Not Administrator")
def test_validate_change_group_admin(self):
CommandData("", group=0).validate()
def test_validate_cwd_default(self):
initial_cwd = os.getcwd()
config["agent_chdir"] = None
data = CommandData("")
data.validate()
self.assertEqual(data.cwd, os.getcwd())
self.assertEqual(initial_cwd, os.getcwd())
def test_validate_cwd_config(self):
initial_cwd = os.getcwd()
testdir, _ = self.create_directory(count=0)
config["agent_chdir"] = testdir
data = CommandData("")
data.validate()
self.assertEqual(data.cwd, testdir)
self.assertEqual(initial_cwd, os.getcwd())
def test_validate_cwd_does_not_exist(self):
data = CommandData("", cwd=os.urandom(4).encode("hex"))
with self.assertRaises(OSError):
data.validate()
def test_validate_cwd_invalid_type(self):
data = CommandData("", cwd=1)
with self.assertRaises(TypeError):
data.validate()
def test_set_default_environment_noop(self):
data = CommandData("", env={"foo": "bar"})
data.set_default_environment({"a": "b"})
self.assertEqual(data.env, {"foo": "bar"})
def test_set_default_environment(self):
data = CommandData("", env=None)
data.set_default_environment({"a": "b"})
self.assertEqual(data.env, {"a": "b"})
class TestJobTypeLoad(TestCase):
def test_schema(self):
with self.assertRaises(MultipleInvalid):
JobType.load({})
class TestJobTypeLogLine(TestCase):
def prepare_config(self):
super(TestJobTypeLogLine, self).prepare_config()
config["jobtype_capture_process_output"] = False
def test_capure_stdout(self):
config["jobtype_capture_process_output"] = True
jobtype = JobType(fake_assignment())
with patch.object(process_stdout, "info") as mocked:
jobtype.log_stdout_line(Mock(id=1), "stdout")
mocked.assert_called_once_with("task %r: %s", 1, "stdout")
def test_capure_stderr(self):
config["jobtype_capture_process_output"] = True
jobtype = JobType(fake_assignment())
with patch.object(process_stderr, "info") as mocked:
jobtype.log_stderr_line(Mock(id=2), "stderr")
mocked.assert_called_once_with("task %r: %s", 2, "stderr")
def test_no_capure_stdout(self):
config["jobtype_capture_process_output"] = False
protocol = Mock(id=3, pid=33)
jobtype = JobType(fake_assignment())
logpool.open_log(jobtype.uuid, self.create_file(), ignore_existing=True)
with patch.object(logpool, "log") as mocked:
jobtype.log_stdout_line(protocol, "stdout")
mocked.assert_called_once_with(jobtype.uuid, STDOUT, "stdout", 33)
def test_no_capure_stderr(self):
config["jobtype_capture_process_output"] = False
protocol = Mock(id=3, pid=33)
jobtype = JobType(fake_assignment())
logpool.open_log(jobtype.uuid, self.create_file(), ignore_existing=True)
with patch.object(logpool, "log") as mocked:
jobtype.log_stderr_line(protocol, "stderr")
mocked.assert_called_once_with(jobtype.uuid, STDERR, "stderr", 33)
# NOTE: These tests test code flow rather than function
class TestJobTypeHandleStdoutLine(TestCase):
def test_preprocess_can_stop_handling(self):
jobtype = JobType(fake_assignment())
protocol = Mock(id=1)
with nested(
patch.object(jobtype, "preprocess_stdout_line", return_value=False),
patch.object(jobtype, "format_stdout_line"),
) as (_, mocked_format):
jobtype.handle_stdout_line(protocol, "stdout 1")
self.assertEqual(mocked_format.call_count, 0)
def test_preprocess_replaces_output(self):
jobtype = JobType(fake_assignment())
logpool.open_log(jobtype.uuid, self.create_file(), ignore_existing=True)
protocol = Mock(id=2)
with nested(
patch.object(jobtype, "preprocess_stdout_line", return_value="foo"),
patch.object(jobtype, "format_stdout_line"),
) as (_, mocked):
jobtype.handle_stdout_line(protocol, "stdout 2")
mocked.assert_called_with(protocol, "foo")
def test_format_replaces_output(self):
jobtype = JobType(fake_assignment())
logpool.open_log(jobtype.uuid, self.create_file(), ignore_existing=True)
protocol = Mock(id=3)
with nested(
patch.object(jobtype, "format_stdout_line", return_value="bar"),
patch.object(jobtype, "log_stdout_line"),
patch.object(jobtype, "process_stdout_line"),
) as (_, log_mock, process_mock):
jobtype.handle_stdout_line(protocol, "stdout 3")
log_mock.assert_called_with(protocol, "bar")
process_mock.assert_called_with(protocol, "bar")
# NOTE: These tests test code flow rather than function
class TestJobTypeHandleStderrLine(TestCase):
def test_preprocess_can_stop_handling(self):
jobtype = JobType(fake_assignment())
protocol = Mock(id=1)
with nested(
patch.object(jobtype, "preprocess_stderr_line", return_value=False),
patch.object(jobtype, "format_stderr_line"),
) as (_, mocked_format):
jobtype.handle_stderr_line(protocol, "stderr 1")
self.assertEqual(mocked_format.call_count, 0)
def test_preprocess_replaces_output(self):
jobtype = JobType(fake_assignment())
logpool.open_log(jobtype.uuid, self.create_file(), ignore_existing=True)
protocol = Mock(id=2)
with nested(
patch.object(jobtype, "preprocess_stderr_line", return_value="foo"),
patch.object(jobtype, "format_stderr_line"),
) as (_, mocked):
jobtype.handle_stderr_line(protocol, "stderr 2")
mocked.assert_called_with(protocol, "foo")
def test_format_replaces_output(self):
jobtype = JobType(fake_assignment())
logpool.open_log(jobtype.uuid, self.create_file(), ignore_existing=True)
protocol = Mock(id=3)
with nested(
patch.object(jobtype, "format_stderr_line", return_value="bar"),
patch.object(jobtype, "log_stderr_line"),
patch.object(jobtype, "process_stderr_line"),
) as (_, log_mock, process_mock):
jobtype.handle_stderr_line(protocol, "stderr 3")
log_mock.assert_called_with(protocol, "bar")
process_mock.assert_called_with(protocol, "bar")
def test_process_stderr_line_calls_stdout_line_processing(self):
jobtype = JobType(fake_assignment())
logpool.open_log(jobtype.uuid, self.create_file(), ignore_existing=True)
protocol = Mock(id=4)
with patch.object(jobtype, "process_stdout_line") as process_mock:
jobtype.process_stderr_line(protocol, "stderr 4")
process_mock.assert_called_with(protocol, "stderr 4")
| |
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2011, Evan Leis
#
# Distributed under the terms of the Lesser GNU General Public License (LGPL)
#-----------------------------------------------------------------------------
'''
Created on Jun 8, 2011
@author: evan
'''
from lxml import etree
from kayako.core.lib import UnsetParameter
from kayako.core.object import KayakoObject
from kayako.objects.ticket.ticket_note import TicketNote
from kayako.objects.ticket.ticket_post import TicketPost
from kayako.objects.ticket.ticket_time_track import TicketTimeTrack
from kayako.exception import KayakoRequestError, KayakoResponseError
class Ticket(KayakoObject):
'''
Kayako Ticket API Object.
subject The Ticket Subject
fullname Full Name of creator
email Email Address of creator
contents The contents of the first ticket post
departmentid The Department ID
ticketstatusid The Ticket Status ID
ticketpriorityid The Ticket Priority ID
tickettypeid The Ticket Type ID
userid The User ID, if the ticket is to be created as a user.
staffid The Staff ID, if the ticket is to be created as a staff
ownerstaffid The Owner Staff ID, if you want to set an Owner for this
ticket
type The ticket type: 'default' or 'phone'
'''
controller = '/Tickets/Ticket'
__parameters__ = [
'id',
'subject',
'fullname',
'email',
'contents',
'departmentid',
'ticketstatusid',
'ticketpriorityid', # synonym for priorityid
'tickettypeid',
'userid',
'staffid',
'ownerstaffid',
'type',
'flagtype',
'displayid',
'statusid',
'typeid',
'userorganization',
'userorganizationid',
'ownerstaffname',
'lastreplier',
'creationtime',
'lastactivity',
'laststaffreply',
'lastuserreply',
'slaplanid',
'nextreplydue',
'resolutiondue',
'replies',
'ipaddress',
'creator',
'creationmode',
'creationtype',
'isescalated',
'ignoreautoresponder',
'templategroupname',
'escalationruleid',
'tags',
'autouserid',
'watchers',
'workflows',
'notes',
'posts',
'timetracks',
]
__required_add_parameters__ = ['subject', 'fullname', 'email', 'contents', 'departmentid', 'ticketstatusid', 'ticketpriorityid', 'tickettypeid', ]
__add_parameters__ = ['subject', 'fullname', 'email', 'contents', 'departmentid', 'ticketstatusid', 'ticketpriorityid', 'tickettypeid', 'userid', 'autouserid', 'ignoreautoresponder', 'staffid',
'ownerstaffid', 'type']
__save_parameters__ = ['subject', 'fullname', 'email', 'departmentid', 'ticketstatusid', 'ticketpriorityid', 'ownerstaffid', 'userid', 'autouserid']
@classmethod
def _parse_ticket(cls, api, ticket_tree):
ticketid = cls._parse_int(ticket_tree.get('id'))
workflows = [dict(id=workflow_node.get('id'), title=workflow_node.get('title')) for workflow_node in ticket_tree.findall('workflow')]
watchers = [dict(staffid=watcher_node.get('staffid'), name=watcher_node.get('name')) for watcher_node in ticket_tree.findall('watcher')]
notes = [TicketNote(api, **TicketNote._parse_ticket_note(ticket_note_tree, ticketid)) for ticket_note_tree in ticket_tree.findall('note') if ticket_note_tree.get('type') == 'ticket']
# timetracks = [TicketTimeTrack(api, **TicketTimeTrack._parse_ticket_time_track(ticket_time_track_tree, ticketid)) for ticket_time_track_tree in ticket_tree.findall('note') if
# ticket_note_tree.get('type') == 'timetrack']
posts = []
posts_node = ticket_tree.find('posts')
if posts_node is not None:
posts = [TicketPost(api, **TicketPost._parse_ticket_post(ticket_post_tree, ticketid)) for ticket_post_tree in posts_node.findall('post')]
params = dict(
id=ticketid,
subject=cls._get_string(ticket_tree.find('subject')),
fullname=cls._get_string(ticket_tree.find('fullname')),
email=cls._get_string(ticket_tree.find('email')),
departmentid=cls._get_int(ticket_tree.find('departmentid')),
autouserid=cls._get_boolean(ticket_tree.find('autouserid'), required=False),
ticketstatusid=cls._get_int(ticket_tree.find('ticketstatusid'), required=False),
ticketpriorityid=cls._get_int(ticket_tree.find('priorityid')), # Note the difference, request param is ticketpriorityid, response is priorityid
tickettypeid=cls._get_int(ticket_tree.find('tickettypeid'), required=False),
userid=cls._get_int(ticket_tree.find('userid')),
ownerstaffid=cls._get_int(ticket_tree.find('ownerstaffid')),
flagtype=cls._parse_int(ticket_tree.get('flagtype'), 'flagtype'),
displayid=cls._get_string(ticket_tree.find('displayid')),
statusid=cls._get_int(ticket_tree.find('statusid')),
typeid=cls._get_int(ticket_tree.find('typeid')),
userorganization=cls._get_string(ticket_tree.find('userorganization')),
userorganizationid=cls._get_int(ticket_tree.find('userorganizationid'), required=False),
ownerstaffname=cls._get_string(ticket_tree.find('ownerstaffname')),
lastreplier=cls._get_string(ticket_tree.find('lastreplier')),
creationtime=cls._get_date(ticket_tree.find('creationtime')),
lastactivity=cls._get_date(ticket_tree.find('lastactivity')),
laststaffreply=cls._get_date(ticket_tree.find('laststaffreply')),
lastuserreply=cls._get_date(ticket_tree.find('lastuserreply')),
slaplanid=cls._get_int(ticket_tree.find('slaplanid')),
nextreplydue=cls._get_date(ticket_tree.find('nextreplydue')),
resolutiondue=cls._get_date(ticket_tree.find('resolutiondue')),
replies=cls._get_int(ticket_tree.find('replies')),
ipaddress=cls._get_string(ticket_tree.find('ipaddress')),
creator=cls._get_int(ticket_tree.find('creator')),
creationmode=cls._get_int(ticket_tree.find('creationmode')),
creationtype=cls._get_int(ticket_tree.find('creationtype')),
isescalated=cls._get_boolean(ticket_tree.find('isescalated')),
escalationruleid=cls._get_int(ticket_tree.find('escalationruleid')),
tags=cls._get_string(ticket_tree.find('tags')),
templategroupname=cls._get_string(ticket_tree.find('templategroupname')),
watchers=watchers,
workflows=workflows,
notes=notes,
posts=posts,
timetracks=[],
)
return params
def _update_from_response(self, ticket_tree):
ticketid = self._parse_int(ticket_tree.get('id'))
if ticketid is not None:
self.id = ticketid
priority_node = ticket_tree.find('priorityid')
if priority_node is not None:
self.ticketpriorityid = self._get_int(priority_node)
for int_node in ['departmentid', 'userid', 'ownerstaffid', 'flagtype', 'statusid', 'slaplanid', 'replies', 'creator', 'creationmode', 'creationtype', 'escalationruleid',
'ticketstatusid', 'tickettypeid', 'userorganizationid']:
node = ticket_tree.find(int_node)
if node is not None:
setattr(self, int_node, self._get_int(node, required=False))
for str_node in ['subject', 'email', 'displayid', 'userorganization', 'ownerstaffname', 'lastreplier', 'ipaddress', 'tags']:
node = ticket_tree.find(str_node)
if node is not None:
setattr(self, str_node, self._get_string(node))
for bool_node in ['isescalated', 'autouserid']:
node = ticket_tree.find(bool_node)
if node is not None:
setattr(self, bool_node, self._get_boolean(node, required=False))
for date_node in ['creationtime', 'lastactivity', 'lastuserreply', 'nextreplydue', 'resolutiondue', ]:
node = ticket_tree.find(date_node)
if node is not None:
setattr(self, date_node, self._get_date(node, required=False))
@classmethod
def get_all(cls, api, departmentid, ticketstatusid=-1, ownerstaffid=-1, userid=-1, count=1000, start=0):
'''
Get all of the tickets filtered by the parameters:
Lists are converted to comma-separated values.
Required:
departmentid Filter the tickets by the specified department id, you can specify multiple id's by separating the values using a comma. Example: 1,2,3
Optional:
ticketstatusid Filter the tickets by the specified ticket status id, you can specify multiple id's by separating the values using a comma. Example: 1,2,3
ownerstaffid Filter the tickets by the specified owner staff id, you can specify multiple id's by separating the values using a comma. Example: 1,2,3
userid Filter the tickets by the specified user id, you can specify multiple id's by separating the values using a comma. Example: 1,2,3
'''
if isinstance(departmentid, (list, tuple)):
departmentid = ','.join([str(id_item) for id_item in departmentid])
if isinstance(ticketstatusid, (list, tuple)):
ticketstatusid = ','.join([str(id_item) for id_item in ticketstatusid])
if isinstance(ownerstaffid, (list, tuple)):
ownerstaffid = ','.join([str(id_item) for id_item in ownerstaffid])
if isinstance(userid, (list, tuple)):
userid = ','.join([str(id_item) for id_item in userid])
response = api._request('%s/ListAll/%s/%s/%s/%s/%s/%s' % (cls.controller, departmentid, ticketstatusid, ownerstaffid, userid, count, start), 'GET')
tree = etree.parse(response)
return [Ticket(api, **cls._parse_ticket(api, ticket_tree)) for ticket_tree in tree.findall('ticket')]
@classmethod
def get(cls, api, id):
try:
response = api._request('%s/%s/' % (cls.controller, id), 'GET')
except KayakoResponseError as error:
if 'HTTP Error 404' in str(error):
return None
else:
raise
tree = etree.parse(response)
node = tree.find('ticket')
if node is None:
return None
params = cls._parse_ticket(api, node)
return Ticket(api, **params)
def add(self):
'''
Add this Ticket.
Requires:
subject The Ticket Subject
fullname Full Name of creator
email Email Address of creator
contents The contents of the first ticket post
departmentid The Department ID
ticketstatusid The Ticket Status ID
ticketpriorityid The Ticket Priority ID
tickettypeid The Ticket Type ID
At least one of these must be present:
userid The User ID, if the ticket is to be created as a user.
staffid The Staff ID, if the ticket is to be created as a staff
Optional:
ownerstaffid The Owner Staff ID, if you want to set an Owner for this ticket
type The ticket type: 'default' or 'phone'
'''
if self.id is not UnsetParameter:
raise KayakoRequestError('Cannot add a pre-existing %s. Use save instead. (id: %s)' % (self.__class__.__name__, self.id))
parameters = self.add_parameters
for required_parameter in self.__required_add_parameters__:
if required_parameter not in parameters:
raise KayakoRequestError('Cannot add %s: Missing required field: %s.' % (self.__class__.__name__, required_parameter))
if 'userid' not in parameters and 'staffid' not in parameters and 'email' not in parameters:
raise KayakoRequestError('To add a Ticket, at least one of the following parameters must be set: userid, staffid. (id: %s)' % self.id)
response = self.api._request(self.controller, 'POST', **parameters)
tree = etree.parse(response)
node = tree.find('ticket')
self._update_from_response(node)
def save(self):
'''
Save this ticket.
Saves only the following:
subject The Ticket Subject
fullname Full Name of creator
email Email Address of creator
departmentid The Department ID
ticketstatusid The Ticket Status ID
ticketpriorityid The Ticket Priority ID
tickettypeid The Ticket Type ID
ownerstaffid The Owner Staff ID, if you want to set an Owner for this ticket
userid The User ID, if you want to change the user for this ticket
'''
response = self._save('%s/%s/' % (self.controller, self.id))
tree = etree.parse(response)
node = tree.find('ticket')
self._update_from_response(node)
def delete(self):
self._delete('%s/%s/' % (self.controller, self.id))
def __str__(self):
return '<Ticket (%s): %s - %s><Template: %s>' % (self.id, 'UNSUBMITTED' if not self.displayid else self.displayid, self.subject, self.templategroupname)
| |
##################################
# InMoov auto generated calibration
# Sun Sep 03 02:32:48 UTC 2017
##################################
# Servo Config : i01.head.eyeX
i01.head.eyeX.detach()
i01.head.eyeX.setMinMax(60.0,100.0)
i01.head.eyeX.setVelocity(-1.0)
i01.head.eyeX.setRest(80.0)
i01.head.eyeX.setPin(22)
i01.head.eyeX.attach("i01.left",22,80.0)
i01.head.eyeX.enableAutoDisable(True)
i01.head.eyeX.enableAutoEnable(True)
# Servo Config : i01.head.eyeY
i01.head.eyeY.detach()
i01.head.eyeY.setMinMax(50.0,100.0)
i01.head.eyeY.setVelocity(-1.0)
i01.head.eyeY.setRest(90.0)
i01.head.eyeY.setPin(24)
i01.head.eyeY.attach("i01.left",24,90.0)
i01.head.eyeY.enableAutoDisable(True)
i01.head.eyeY.enableAutoEnable(True)
# Servo Config : i01.head.jaw
i01.head.jaw.detach()
i01.head.jaw.setMinMax(10.0,25.0)
i01.head.jaw.setVelocity(-1.0)
i01.head.jaw.setRest(10.0)
i01.head.jaw.setPin(26)
i01.head.jaw.attach("i01.right",26,10.0)
i01.head.jaw.enableAutoDisable(True)
i01.head.jaw.enableAutoEnable(True)
# Servo Config : i01.head.neck
i01.head.neck.detach()
i01.head.neck.setMinMax(20.0,90.0)
i01.head.neck.setVelocity(-1.0)
i01.head.neck.setRest(40.0)
i01.head.neck.setPin(12)
i01.head.neck.attach("i01.right",12,90.0)
i01.head.neck.enableAutoDisable(True)
i01.head.neck.enableAutoEnable(True)
# Servo Config : i01.head.rollNeck
i01.head.rollNeck.detach()
i01.head.rollNeck.setMinMax(20.0,160.0)
i01.head.rollNeck.setVelocity(-1.0)
i01.head.rollNeck.setRest(90.0)
i01.head.rollNeck.setPin(30)
i01.head.rollNeck.attach("i01.left",30,90.0)
i01.head.rollNeck.enableAutoDisable(True)
i01.head.rollNeck.enableAutoEnable(True)
# Servo Config : i01.head.rothead
i01.head.rothead.detach()
i01.head.rothead.setMinMax(30.0,150.0)
i01.head.rothead.setVelocity(-1.0)
i01.head.rothead.setRest(90.0)
i01.head.rothead.setPin(13)
i01.head.rothead.attach("i01.right",13,90.0)
i01.head.rothead.enableAutoDisable(True)
i01.head.rothead.enableAutoEnable(True)
# Servo Config : i01.leftArm.bicep
i01.leftArm.bicep.detach()
i01.leftArm.bicep.setMinMax(5.0,90.0)
i01.leftArm.bicep.setVelocity(-1.0)
i01.leftArm.bicep.setRest(5.0)
i01.leftArm.bicep.setPin(8)
i01.leftArm.bicep.attach("i01.left",8,5.0)
i01.leftArm.bicep.enableAutoDisable(True)
i01.leftArm.bicep.enableAutoEnable(True)
# Servo Config : i01.leftArm.omoplate
i01.leftArm.omoplate.detach()
i01.leftArm.omoplate.setMinMax(10.0,80.0)
i01.leftArm.omoplate.setVelocity(-1.0)
i01.leftArm.omoplate.setRest(10.0)
i01.leftArm.omoplate.setPin(11)
i01.leftArm.omoplate.attach("i01.left",11,10.0)
i01.leftArm.omoplate.enableAutoDisable(True)
i01.leftArm.omoplate.enableAutoEnable(True)
# Servo Config : i01.leftArm.rotate
i01.leftArm.rotate.detach()
i01.leftArm.rotate.setMinMax(40.0,180.0)
i01.leftArm.rotate.setVelocity(-1.0)
i01.leftArm.rotate.setRest(90.0)
i01.leftArm.rotate.setPin(9)
i01.leftArm.rotate.attach("i01.left",9,90.0)
i01.leftArm.rotate.enableAutoDisable(True)
i01.leftArm.rotate.enableAutoEnable(True)
# Servo Config : i01.leftArm.shoulder
i01.leftArm.shoulder.detach()
i01.leftArm.shoulder.setMinMax(0.0,180.0)
i01.leftArm.shoulder.setVelocity(-1.0)
i01.leftArm.shoulder.setRest(30.0)
i01.leftArm.shoulder.setPin(10)
i01.leftArm.shoulder.attach("i01.left",10,30.0)
i01.leftArm.shoulder.enableAutoDisable(True)
i01.leftArm.shoulder.enableAutoEnable(True)
# Servo Config : i01.leftHand.index
i01.leftHand.index.detach()
i01.leftHand.index.setMinMax(0.0,180.0)
i01.leftHand.index.setVelocity(-1.0)
i01.leftHand.index.setRest(2.0)
i01.leftHand.index.setPin(3)
i01.leftHand.index.attach("i01.left",3,2.0)
i01.leftHand.index.enableAutoDisable(True)
i01.leftHand.index.enableAutoEnable(True)
# Servo Config : i01.leftHand.majeure
i01.leftHand.majeure.detach()
i01.leftHand.majeure.setMinMax(0.0,180.0)
i01.leftHand.majeure.setVelocity(-1.0)
i01.leftHand.majeure.setRest(2.0)
i01.leftHand.majeure.setPin(4)
i01.leftHand.majeure.attach("i01.left",4,2.0)
i01.leftHand.majeure.enableAutoDisable(True)
i01.leftHand.majeure.enableAutoEnable(True)
# Servo Config : i01.leftHand.pinky
i01.leftHand.pinky.detach()
i01.leftHand.pinky.setMinMax(0.0,180.0)
i01.leftHand.pinky.setVelocity(-1.0)
i01.leftHand.pinky.setRest(2.0)
i01.leftHand.pinky.setPin(6)
i01.leftHand.pinky.attach("i01.left",6,2.0)
i01.leftHand.pinky.enableAutoDisable(True)
i01.leftHand.pinky.enableAutoEnable(True)
# Servo Config : i01.leftHand.ringFinger
i01.leftHand.ringFinger.detach()
i01.leftHand.ringFinger.setMinMax(0.0,180.0)
i01.leftHand.ringFinger.setVelocity(-1.0)
i01.leftHand.ringFinger.setRest(2.0)
i01.leftHand.ringFinger.setPin(5)
i01.leftHand.ringFinger.attach("i01.left",5,2.0)
i01.leftHand.ringFinger.enableAutoDisable(True)
i01.leftHand.ringFinger.enableAutoEnable(True)
# Servo Config : i01.leftHand.thumb
i01.leftHand.thumb.detach()
i01.leftHand.thumb.setMinMax(0.0,180.0)
i01.leftHand.thumb.setVelocity(-1.0)
i01.leftHand.thumb.setRest(2.0)
i01.leftHand.thumb.setPin(2)
i01.leftHand.thumb.attach("i01.left",2,2.0)
i01.leftHand.thumb.enableAutoDisable(True)
i01.leftHand.thumb.enableAutoEnable(True)
# Servo Config : i01.leftHand.wrist
i01.leftHand.wrist.detach()
i01.leftHand.wrist.setMinMax(0.0,180.0)
i01.leftHand.wrist.setVelocity(-1.0)
i01.leftHand.wrist.setRest(90.0)
i01.leftHand.wrist.setPin(7)
i01.leftHand.wrist.attach("i01.left",7,90.0)
i01.leftHand.wrist.enableAutoDisable(True)
i01.leftHand.wrist.enableAutoEnable(True)
# Servo Config : i01.rightArm.bicep
i01.rightArm.bicep.detach()
i01.rightArm.bicep.setMinMax(5.0,90.0)
i01.rightArm.bicep.setVelocity(-1.0)
i01.rightArm.bicep.setRest(5.0)
i01.rightArm.bicep.setPin(8)
i01.rightArm.bicep.attach("i01.right",8,5.0)
i01.rightArm.bicep.enableAutoDisable(True)
i01.rightArm.bicep.enableAutoEnable(True)
# Servo Config : i01.rightArm.omoplate
i01.rightArm.omoplate.detach()
i01.rightArm.omoplate.setMinMax(10.0,80.0)
i01.rightArm.omoplate.setVelocity(-1.0)
i01.rightArm.omoplate.setRest(10.0)
i01.rightArm.omoplate.setPin(11)
i01.rightArm.omoplate.attach("i01.right",11,10.0)
i01.rightArm.omoplate.enableAutoDisable(True)
i01.rightArm.omoplate.enableAutoEnable(True)
# Servo Config : i01.rightArm.rotate
i01.rightArm.rotate.detach()
i01.rightArm.rotate.setMinMax(40.0,180.0)
i01.rightArm.rotate.setVelocity(-1.0)
i01.rightArm.rotate.setRest(90.0)
i01.rightArm.rotate.setPin(9)
i01.rightArm.rotate.attach("i01.right",9,90.0)
i01.rightArm.rotate.enableAutoDisable(True)
i01.rightArm.rotate.enableAutoEnable(True)
# Servo Config : i01.rightArm.shoulder
i01.rightArm.shoulder.detach()
i01.rightArm.shoulder.setMinMax(0.0,180.0)
i01.rightArm.shoulder.setVelocity(-1.0)
i01.rightArm.shoulder.setRest(30.0)
i01.rightArm.shoulder.setPin(10)
i01.rightArm.shoulder.attach("i01.right",10,30.0)
i01.rightArm.shoulder.enableAutoDisable(True)
i01.rightArm.shoulder.enableAutoEnable(True)
# Servo Config : i01.rightHand.index
i01.rightHand.index.detach()
i01.rightHand.index.setMinMax(0.0,180.0)
i01.rightHand.index.setVelocity(-1.0)
i01.rightHand.index.setRest(2.0)
i01.rightHand.index.setPin(3)
i01.rightHand.index.attach("i01.right",3,2.0)
i01.rightHand.index.enableAutoDisable(True)
i01.rightHand.index.enableAutoEnable(True)
# Servo Config : i01.rightHand.majeure
i01.rightHand.majeure.detach()
i01.rightHand.majeure.setMinMax(0.0,180.0)
i01.rightHand.majeure.setVelocity(-1.0)
i01.rightHand.majeure.setRest(2.0)
i01.rightHand.majeure.setPin(4)
i01.rightHand.majeure.attach("i01.right",4,2.0)
i01.rightHand.majeure.enableAutoDisable(True)
i01.rightHand.majeure.enableAutoEnable(True)
# Servo Config : i01.rightHand.pinky
i01.rightHand.pinky.detach()
i01.rightHand.pinky.setMinMax(0.0,180.0)
i01.rightHand.pinky.setVelocity(-1.0)
i01.rightHand.pinky.setRest(2.0)
i01.rightHand.pinky.setPin(6)
i01.rightHand.pinky.attach("i01.right",6,2.0)
i01.rightHand.pinky.enableAutoDisable(True)
i01.rightHand.pinky.enableAutoEnable(True)
# Servo Config : i01.rightHand.ringFinger
i01.rightHand.ringFinger.detach()
i01.rightHand.ringFinger.setMinMax(0.0,180.0)
i01.rightHand.ringFinger.setVelocity(-1.0)
i01.rightHand.ringFinger.setRest(2.0)
i01.rightHand.ringFinger.setPin(5)
i01.rightHand.ringFinger.attach("i01.right",5,2.0)
i01.rightHand.ringFinger.enableAutoDisable(True)
i01.rightHand.ringFinger.enableAutoEnable(True)
# Servo Config : i01.rightHand.thumb
i01.rightHand.thumb.detach()
i01.rightHand.thumb.setMinMax(0.0,180.0)
i01.rightHand.thumb.setVelocity(-1.0)
i01.rightHand.thumb.setRest(2.0)
i01.rightHand.thumb.setPin(2)
i01.rightHand.thumb.attach("i01.right",2,2.0)
i01.rightHand.thumb.enableAutoDisable(True)
i01.rightHand.thumb.enableAutoEnable(True)
# Servo Config : i01.rightHand.wrist
i01.rightHand.wrist.detach()
i01.rightHand.wrist.setMinMax(0.0,180.0)
i01.rightHand.wrist.setVelocity(-1.0)
i01.rightHand.wrist.setRest(90.0)
i01.rightHand.wrist.setPin(7)
i01.rightHand.wrist.attach("i01.right",7,90.0)
i01.rightHand.wrist.enableAutoDisable(True)
i01.rightHand.wrist.enableAutoEnable(True)
# Servo Config : i01.torso.lowStom
i01.torso.lowStom.detach()
i01.torso.lowStom.setMinMax(0.0,180.0)
i01.torso.lowStom.setVelocity(-1.0)
i01.torso.lowStom.setRest(90.0)
i01.torso.lowStom.setPin(29)
i01.torso.lowStom.attach("i01.left",29,90.0)
i01.torso.lowStom.enableAutoDisable(True)
i01.torso.lowStom.enableAutoEnable(True)
# Servo Config : i01.torso.midStom
i01.torso.midStom.detach()
i01.torso.midStom.setMinMax(0.0,180.0)
i01.torso.midStom.setVelocity(-1.0)
i01.torso.midStom.setRest(90.0)
i01.torso.midStom.setPin(28)
i01.torso.midStom.attach("i01.left",28,90.0)
i01.torso.midStom.enableAutoDisable(True)
i01.torso.midStom.enableAutoEnable(True)
# Servo Config : i01.torso.topStom
i01.torso.topStom.detach()
i01.torso.topStom.setMinMax(60.0,120.0)
i01.torso.topStom.setVelocity(-1.0)
i01.torso.topStom.setRest(90.0)
i01.torso.topStom.setPin(27)
i01.torso.topStom.attach("i01.left",27,90.0)
i01.torso.topStom.enableAutoDisable(True)
i01.torso.topStom.enableAutoEnable(True)
| |
from django.shortcuts import render, redirect, get_object_or_404, reverse
from django.core.exceptions import PermissionDenied
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.conf import settings
from ..models import Site
from ..forms import SiteForm, ProcessForm
from ..helpers import (reload_services, update_supervisor, create_config_files, create_process_config, restart_supervisor,
get_supervisor_status, write_new_index_file, get_latest_commit, reload_nginx_config, list_executable_files)
from ...vms.models import VirtualMachine
from ....utils.emails import send_new_site_email
@login_required
def create_view(request):
# Redirect the user if any approvals need to be done.
if request.user.is_staff:
if request.user.site_requests.filter(teacher_approval__isnull=True).exists():
return redirect("approve_site")
if request.method == "POST":
form = SiteForm(request.POST, user=request.user)
if form.is_valid():
site = form.save()
for user in site.group.users.filter(service=False):
if not user == request.user:
send_new_site_email(user, site)
if not site.category == "dynamic":
write_new_index_file(site)
reload_services()
return redirect("info_site", site_id=site.id)
else:
form = SiteForm(user=request.user)
context = {
"form": form,
"site": None,
"project_domain": settings.PROJECT_DOMAIN
}
return render(request, "sites/create_site.html", context)
@login_required
def edit_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if not request.user.is_superuser and not site.group.users.filter(id=request.user.id).exists():
raise PermissionDenied
if request.method == "POST":
current_members = list(site.group.users.filter(service=False).values_list('id', flat=True))
form = SiteForm(request.POST, instance=site, user=request.user)
if form.is_valid():
site = form.save()
for user in site.group.users.filter(service=False).exclude(id__in=current_members):
send_new_site_email(user, site)
reload_services()
return redirect("info_site", site_id=site_id)
else:
form = SiteForm(instance=site, user=request.user)
context = {
"form": form,
"site": site,
"project_domain": settings.PROJECT_DOMAIN
}
return render(request, "sites/create_site.html", context)
@login_required
def delete_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if not request.user.is_superuser and not (site.purpose == "project" and site.group.users.filter(id=request.user.id).exists()):
raise PermissionDenied
if request.method == "POST":
if not request.POST.get("confirm", None) == site.name:
messages.error(request, "Delete confirmation failed!")
return redirect("delete_site", site_id=site_id)
site.delete()
messages.success(request, "Site {} deleted!".format(site.name))
return redirect("index")
context = {
"site": site
}
return render(request, "sites/delete_site.html", context)
@login_required
def process_status_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if not request.user.is_superuser and not site.group.users.filter(id=request.user.id).exists():
raise PermissionDenied
return HttpResponse(get_supervisor_status(site))
@login_required
def modify_process_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if not request.user.is_superuser and not site.group.users.filter(id=request.user.id).exists():
raise PermissionDenied
if site.category != "dynamic":
messages.error(request, "You must set your site type to dynamic before adding a process.")
return redirect("info_site", site_id=site_id)
if request.method == "POST":
try:
form = ProcessForm(request.user, request.POST, instance=site.process)
except Site.process.RelatedObjectDoesNotExist:
form = ProcessForm(request.user, request.POST, initial={"site": site.id})
if form.is_valid():
proc = form.save()
create_process_config(proc)
update_supervisor()
messages.success(request, "Process modified!")
return redirect("info_site", site_id=proc.site.id)
else:
try:
form = ProcessForm(request.user, instance=site.process)
except Site.process.RelatedObjectDoesNotExist:
form = ProcessForm(request.user, initial={"site": site.id})
context = {
"form": form,
"site": site,
"files": list_executable_files(site.path, level=3)
}
return render(request, "sites/create_process.html", context)
@login_required
def modify_vm_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if not request.user.is_superuser and not site.group.users.filter(id=request.user.id).exists():
raise PermissionDenied
if not site.category == "vm":
messages.error(request, "Not a VM site!")
return redirect("info_site", site_id=site.id)
if request.method == "POST":
vm = request.POST.get("vm", None)
if hasattr(site, "virtual_machine"):
current_vm = site.virtual_machine
current_vm.site = None
current_vm.save()
if vm is not None and vm != "__blank__":
vm = int(vm)
new_vm = VirtualMachine.objects.get(id=vm)
new_vm.site = site
new_vm.save()
create_config_files(site)
reload_nginx_config()
messages.success(request, "Virtual machine successfully linked!")
return redirect("info_site", site_id=site.id)
if request.user.is_superuser:
vms = VirtualMachine.objects.all().order_by("name")
else:
vms = VirtualMachine.objects.filter(users=request.user).order_by("name")
context = {
"site": site,
"vms": vms
}
return render(request, "sites/edit_vm.html", context)
@login_required
def delete_process_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if not request.user.is_superuser and not site.group.users.filter(id=request.user.id).exists():
raise PermissionDenied
if request.method == "POST":
try:
site.process.delete()
messages.success(request, "Process deleted!")
except Site.process.RelatedObjectDoesNotExist:
messages.error(request, "Process not found.")
return redirect("info_site", site_id=site.id)
else:
return render(request, "sites/delete_process.html", {"site": site})
@login_required
def restart_process_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if not request.user.is_superuser and not site.group.users.filter(id=request.user.id).exists():
raise PermissionDenied
restart_supervisor(site)
messages.success(request, "Restarted supervisor application!")
return redirect("info_site", site_id=site_id)
@login_required
def info_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if not request.user.is_superuser and not site.group.users.filter(id=request.user.id).exists():
raise PermissionDenied
context = {
"site": site,
"users": site.group.users.filter(service=False).order_by("username"),
"status": get_supervisor_status(site),
"latest_commit": get_latest_commit(site),
"webhook_url": request.build_absolute_uri(reverse("git_webhook", kwargs={"site_id": site_id})).replace("http://", "https://")
}
return render(request, "sites/info_site.html", context)
| |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import builtins
from functools import partial
from typing import Any, List, NamedTuple, Optional, Sequence, Tuple, Union
import numpy as np
from jax import core
from jax._src import dtypes
from jax._src.lax import lax
from jax.interpreters import ad
from jax.interpreters import batching
from jax.interpreters import masking
from jax.interpreters import mlir
from jax.interpreters import xla
from jax._src.util import safe_zip
from jax._src.lib.mlir.dialects import mhlo
from jax._src.lib import xla_client
xops = xla_client.ops
_max = builtins.max
Array = Any
DType = Any
Shape = core.Shape
class ConvDimensionNumbers(NamedTuple):
"""Describes batch, spatial, and feature dimensions of a convolution.
Args:
lhs_spec: a tuple of nonnegative integer dimension numbers containing
`(batch dimension, feature dimension, spatial dimensions...)`.
rhs_spec: a tuple of nonnegative integer dimension numbers containing
`(out feature dimension, in feature dimension, spatial dimensions...)`.
out_spec: a tuple of nonnegative integer dimension numbers containing
`(batch dimension, feature dimension, spatial dimensions...)`.
"""
lhs_spec: Sequence[int]
rhs_spec: Sequence[int]
out_spec: Sequence[int]
ConvGeneralDilatedDimensionNumbers = Union[
None, ConvDimensionNumbers, Tuple[str, str, str]]
def conv_general_dilated(
lhs: Array, rhs: Array, window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
lhs_dilation: Optional[Sequence[int]] = None,
rhs_dilation: Optional[Sequence[int]] = None,
dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,
feature_group_count: int = 1, batch_group_count: int = 1,
precision: lax.PrecisionLike = None,
preferred_element_type: Optional[DType] = None) -> Array:
"""General n-dimensional convolution operator, with optional dilation.
Wraps XLA's `Conv
<https://www.tensorflow.org/xla/operation_semantics#conv_convolution>`_
operator.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `lhs`. LHS dilation
is also known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
dimension_numbers: either `None`, a ``ConvDimensionNumbers`` object, or
a 3-tuple ``(lhs_spec, rhs_spec, out_spec)``, where each element is a
string of length `n+2`.
feature_group_count: integer, default 1. See XLA HLO docs.
batch_group_count: integer, default 1. See XLA HLO docs.
precision: Optional. Either ``None``, which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``), a string (e.g. 'highest' or
'fastest', see the ``jax.default_matmul_precision`` context manager), or a
tuple of two :class:`~jax.lax.Precision` enums or strings indicating precision of
``lhs`` and ``rhs``.
preferred_element_type: Optional. Either ``None``, which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
An array containing the convolution result.
In the string case of ``dimension_numbers``, each character identifies by
position:
- the batch dimensions in ``lhs``, ``rhs``, and the output with the character
'N',
- the feature dimensions in `lhs` and the output with the character 'C',
- the input and output feature dimensions in rhs with the characters 'I'
and 'O' respectively, and
- spatial dimension correspondences between lhs, rhs, and the output using
any distinct characters.
For example, to indicate dimension numbers consistent with the ``conv``
function with two spatial dimensions, one could use ``('NCHW', 'OIHW',
'NCHW')``. As another example, to indicate dimension numbers consistent with
the TensorFlow Conv2D operation, one could use ``('NHWC', 'HWIO', 'NHWC')``.
When using the latter form of convolution dimension specification, window
strides are associated with spatial dimension character labels according to
the order in which the labels appear in the ``rhs_spec`` string, so that
``window_strides[0]`` is matched with the dimension corresponding to the first
character appearing in rhs_spec that is not ``'I'`` or ``'O'``.
If ``dimension_numbers`` is ``None``, the default is ``('NCHW', 'OIHW',
'NCHW')`` (for a 2D convolution).
"""
dnums = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
if lhs_dilation is None:
lhs_dilation = (1,) * (lhs.ndim - 2)
elif isinstance(padding, str) and not len(lhs_dilation) == lhs_dilation.count(1):
raise ValueError(
"String padding is not implemented for transposed convolution "
"using this op. Please either exactly specify the required padding or "
"use conv_transpose.")
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
if isinstance(padding, str):
lhs_perm, rhs_perm, _ = dnums
rhs_shape = np.take(rhs.shape, rhs_perm)[2:] # type: ignore[index]
effective_rhs_shape = [(k-1) * r + 1 for k, r in zip(rhs_shape, rhs_dilation)]
padding = lax.padtype_to_pads(
np.take(lhs.shape, lhs_perm)[2:], effective_rhs_shape, # type: ignore[index]
window_strides, padding)
preferred_element_type = (
None if preferred_element_type is None else
dtypes.canonicalize_dtype(np.dtype(preferred_element_type)))
return conv_general_dilated_p.bind(
lhs, rhs, window_strides=tuple(window_strides), padding=tuple(padding),
lhs_dilation=tuple(lhs_dilation), rhs_dilation=tuple(rhs_dilation),
dimension_numbers=dnums,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
lhs_shape=lhs.shape, rhs_shape=rhs.shape,
precision=lax.canonicalize_precision(precision),
preferred_element_type=preferred_element_type)
### convenience wrappers around traceables
def conv(lhs: Array, rhs: Array, window_strides: Sequence[int],
padding: str, precision: lax.PrecisionLike = None,
preferred_element_type: Optional[DType] = None) -> Array:
"""Convenience wrapper around `conv_general_dilated`.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`.
precision: Optional. Either ``None``, which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
:class:`~jax.lax.Precision` enums indicating precision of ``lhs``` and ``rhs``.
preferred_element_type: Optional. Either ``None``, which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
An array containing the convolution result.
"""
return conv_general_dilated(lhs, rhs, window_strides, padding,
precision=precision,
preferred_element_type=preferred_element_type)
def conv_with_general_padding(lhs: Array, rhs: Array,
window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
lhs_dilation: Optional[Sequence[int]],
rhs_dilation: Optional[Sequence[int]],
precision: lax.PrecisionLike = None,
preferred_element_type: Optional[DType] = None) -> Array:
"""Convenience wrapper around `conv_general_dilated`.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `lhs`. LHS dilation
is also known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
precision: Optional. Either ``None``, which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
:class:`~jax.lax.Precision` enums indicating precision of ``lhs``` and ``rhs``.
preferred_element_type: Optional. Either ``None``, which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
An array containing the convolution result.
"""
return conv_general_dilated(
lhs, rhs, window_strides, padding, lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation, precision=precision,
preferred_element_type=preferred_element_type)
def _conv_transpose_padding(k, s, padding):
"""Calculate before and after padding for a dim of transposed convolution.
Args:
k: int: kernel dimension.
s: int: dimension stride value.
padding: 'same' or 'valid' padding mode for original forward conv.
Returns:
2-tuple: ints: before and after padding for transposed convolution.
"""
if padding == 'SAME':
pad_len = k + s - 2
if s > k - 1:
pad_a = k - 1
else:
pad_a = int(np.ceil(pad_len / 2))
elif padding == 'VALID':
pad_len = k + s - 2 + _max(k - s, 0)
pad_a = k - 1
else:
raise ValueError('Padding mode must be `SAME` or `VALID`.')
pad_b = pad_len - pad_a
return pad_a, pad_b
def _flip_axes(x, axes):
"""Flip ndarray 'x' along each axis specified in axes tuple."""
for axis in axes:
x = np.flip(x, axis)
return x
def conv_transpose(lhs: Array, rhs: Array, strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
rhs_dilation: Optional[Sequence[int]] = None,
dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,
transpose_kernel: bool = False,
precision: lax.PrecisionLike = None,
preferred_element_type: Optional[DType] = None) -> Array:
"""Convenience wrapper for calculating the N-d convolution "transpose".
This function directly calculates a fractionally strided conv rather than
indirectly calculating the gradient (transpose) of a forward convolution.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
strides: sequence of `n` integers, sets fractional stride.
padding: 'SAME', 'VALID' will set as transpose of corresponding forward
conv, or a sequence of `n` integer 2-tuples describing before-and-after
padding for each `n` spatial dimension.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
dimension_numbers: tuple of dimension descriptors as in
lax.conv_general_dilated. Defaults to tensorflow convention.
transpose_kernel: if True flips spatial axes and swaps the input/output
channel axes of the kernel. This makes the output of this function identical
to the gradient-derived functions like keras.layers.Conv2DTranspose
applied to the same kernel. For typical use in neural nets this is completely
pointless and just makes input/output channel specification confusing.
precision: Optional. Either ``None``, which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
:class:`~jax.lax.Precision` enums indicating precision of ``lhs``` and ``rhs``.
preferred_element_type: Optional. Either ``None``, which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
Transposed N-d convolution, with output padding following the conventions of
keras.layers.Conv2DTranspose.
"""
assert len(lhs.shape) == len(rhs.shape) and len(lhs.shape) >= 2
ndims = len(lhs.shape)
one = (1,) * (ndims - 2)
# Set dimensional layout defaults if not specified.
if dimension_numbers is None:
if ndims == 2:
dimension_numbers = ('NC', 'IO', 'NC')
elif ndims == 3:
dimension_numbers = ('NHC', 'HIO', 'NHC')
elif ndims == 4:
dimension_numbers = ('NHWC', 'HWIO', 'NHWC')
elif ndims == 5:
dimension_numbers = ('NHWDC', 'HWDIO', 'NHWDC')
else:
raise ValueError('No 4+ dimensional dimension_number defaults.')
dn = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
k_shape = np.take(rhs.shape, dn.rhs_spec)
k_sdims = k_shape[2:] # type: ignore[index]
# Calculate correct output shape given padding and strides.
pads: Union[str, Sequence[Tuple[int, int]]]
if isinstance(padding, str) and padding in {'SAME', 'VALID'}:
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
effective_k_size = map(lambda k, r: (k-1) * r + 1, k_sdims, rhs_dilation)
pads = [_conv_transpose_padding(k, s, padding)
for k,s in zip(effective_k_size, strides)]
else:
pads = padding
if transpose_kernel:
# flip spatial dims and swap input / output channel axes
rhs = _flip_axes(rhs, np.array(dn.rhs_spec)[2:])
rhs = np.swapaxes(rhs, dn.rhs_spec[0], dn.rhs_spec[1])
return conv_general_dilated(lhs, rhs, one, pads, strides, rhs_dilation, dn,
precision=precision,
preferred_element_type=preferred_element_type)
def _conv_general_dilated_shape_rule(
lhs: core.ShapedArray, rhs: core.ShapedArray, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count,
batch_group_count, **unused_kwargs) -> Tuple[int, ...]:
assert type(dimension_numbers) is ConvDimensionNumbers
if len(lhs.shape) != len(rhs.shape):
msg = ("conv_general_dilated lhs and rhs must have the same number of "
"dimensions, but got {} and {}.")
raise ValueError(msg.format(lhs.shape, rhs.shape))
if not feature_group_count > 0:
msg = ("conv_general_dilated feature_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(feature_group_count))
lhs_feature_count = lhs.shape[dimension_numbers.lhs_spec[1]]
quot, rem = divmod(lhs_feature_count, feature_group_count)
if rem:
msg = ("conv_general_dilated feature_group_count must divide lhs feature "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(feature_group_count, lhs_feature_count))
if not core.symbolic_equal_dim(quot, rhs.shape[dimension_numbers.rhs_spec[1]]):
msg = ("conv_general_dilated lhs feature dimension size divided by "
"feature_group_count must equal the rhs input feature dimension "
"size, but {} // {} != {}.")
raise ValueError(msg.format(lhs_feature_count, feature_group_count,
rhs.shape[dimension_numbers.rhs_spec[1]]))
if rhs.shape[dimension_numbers.rhs_spec[0]] % feature_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of feature_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
feature_group_count))
if not batch_group_count > 0:
msg = ("conv_general_dilated batch_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(batch_group_count))
lhs_batch_count = lhs.shape[dimension_numbers.lhs_spec[0]]
if batch_group_count > 1 and lhs_batch_count % batch_group_count != 0:
msg = ("conv_general_dilated batch_group_count must divide lhs batch "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(batch_group_count, lhs_batch_count))
if rhs.shape[dimension_numbers.rhs_spec[0]] % batch_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of batch_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
batch_group_count))
if batch_group_count > 1 and feature_group_count > 1:
msg = ("At most one of batch_group_count and feature_group_count may be > "
"1, got batch_group_count={} and feature_group_count={}")
raise ValueError(msg.format(batch_group_count, feature_group_count))
if len(_conv_sdims(dimension_numbers.rhs_spec)) != len(window_strides):
msg = ("conv_general_dilated window and window_strides must have "
"the same number of dimensions, but got {} and {}")
raise ValueError(
msg.format(len(_conv_sdims(dimension_numbers.rhs_spec)), len(window_strides)))
lhs_perm, rhs_perm, out_perm = dimension_numbers
lhs_trans = lax._dilate_shape(np.take(lhs.shape, lhs_perm), lhs_dilation)
rhs_trans = lax._dilate_shape(np.take(rhs.shape, rhs_perm), rhs_dilation)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding,
batch_group_count)
return tuple(np.take(out_trans, np.argsort(out_perm))) # type: ignore[arg-type]
def _conv_general_dilated_dtype_rule(
lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, preferred_element_type, **unused_kwargs):
input_dtype = lax.naryop_dtype_rule(lax._input_dtype, [lax._any, lax._any],
'conv_general_dilated', lhs, rhs)
if preferred_element_type is None:
return input_dtype
lax._validate_preferred_element_type(input_dtype, preferred_element_type)
return preferred_element_type
_conv_spec_transpose = lambda spec: (spec[1], spec[0]) + spec[2:]
_conv_sdims = lambda spec: spec[2:]
# Understanding the convolution transpose rules:
# Ignoring the spatial dimensions, let m = batch, j = input feature,
# k = output feature.
#
# Convolution computes the following contraction:
# Forward: [m, j] [j, k] -> [m, k]
#
# The transposes are similar to the rules for transposing a matmul:
# LHS transpose: [m, k] [k, j] -> [m, j]
# RHS transpose: [j, m] [m, k] -> [j, k]
#
# With feature grouping, we have the following signatures:
# Forward: [m, gj] [j, gk] -> [m, gk]
# LHS transpose: [m, gk] [k, gj] -> [m, gj]
# --> implemented as feature grouping after transposing the group from the
# kernel input features to the kernel output features.
# RHS transpose: [gj, m] [m, gk] -> [j, gk]
# --> which is batch grouping.
#
# With batch grouping, we have the following signatures:
# Forward: [gm,j] [j,gk]->[m,gk]
# LHS transpose: [m, gk][gk, j] -> [gm, j]
# --> implemented as feature grouping with transposing the group on the kernel
# and the output.
# RHS transpose: [j, gm][m, gk] -> [j, gk]
# --> which is feature grouping.
def _conv_general_dilated_transpose_lhs(
g, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
lhs_shape, rhs_shape, precision, preferred_element_type):
assert type(dimension_numbers) is ConvDimensionNumbers
assert batch_group_count == 1 or feature_group_count == 1
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
t_rhs_spec = _conv_spec_transpose(rhs_spec)
if feature_group_count > 1:
# in addition to switching the dims in the spec, need to move the feature
# group axis into the transposed rhs's output feature dim
rhs = _reshape_axis_out_of(rhs_spec[0], feature_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
elif batch_group_count > 1:
rhs = _reshape_axis_out_of(rhs_spec[0], batch_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
feature_group_count = batch_group_count
trans_dimension_numbers = ConvDimensionNumbers(out_spec, t_rhs_spec, lhs_spec)
padding = _conv_general_vjp_lhs_padding(
np.take(lhs_shape, lhs_sdims), np.take(rhs_shape, rhs_sdims),
window_strides, np.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
revd_weights = lax.rev(rhs, rhs_sdims)
out = conv_general_dilated(
g, revd_weights, window_strides=lhs_dilation, padding=padding,
lhs_dilation=window_strides, rhs_dilation=rhs_dilation,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=1, precision=precision,
preferred_element_type=preferred_element_type)
if batch_group_count > 1:
out = _reshape_axis_out_of(lhs_spec[1], batch_group_count, out)
out = _reshape_axis_into(lhs_spec[1], lhs_spec[0], out)
return out
def _conv_general_dilated_transpose_rhs(
g, lhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers: ConvDimensionNumbers, feature_group_count: int,
batch_group_count: int, lhs_shape, rhs_shape, precision,
preferred_element_type):
assert type(dimension_numbers) is ConvDimensionNumbers
if np.size(g) == 0:
# Avoids forming degenerate convolutions where the RHS has spatial size 0.
# Awkwardly, we don't have an aval for the rhs readily available, so instead
# of returning an ad_util.Zero instance here, representing a symbolic zero
# value, we instead return a None, which is meant to represent having no
# cotangent at all (and is thus incorrect for this situation), since the two
# are treated the same operationally.
# TODO(mattjj): adjust defbilinear so that the rhs aval is available here
return None
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_trans, rhs_trans, out_trans = map(_conv_spec_transpose, dimension_numbers)
assert batch_group_count == 1 or feature_group_count == 1
if batch_group_count > 1:
feature_group_count = batch_group_count
batch_group_count = 1
elif feature_group_count > 1:
batch_group_count = feature_group_count
feature_group_count = 1
trans_dimension_numbers = ConvDimensionNumbers(lhs_trans, out_trans, rhs_trans)
padding = _conv_general_vjp_rhs_padding(
np.take(lhs_shape, lhs_sdims), np.take(rhs_shape, rhs_sdims),
window_strides, np.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
return conv_general_dilated(
lhs, g, window_strides=rhs_dilation, padding=padding,
lhs_dilation=lhs_dilation, rhs_dilation=window_strides,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count, precision=precision,
preferred_element_type=preferred_element_type)
def _conv_general_dilated_translation_rule(
ctx, avals_in, avals_out, lhs, rhs, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count,
batch_group_count, precision, expand_complex_convolutions,
preferred_element_type, **unused_kwargs):
assert type(dimension_numbers) is ConvDimensionNumbers
dimension_numbers = _conv_general_proto(dimension_numbers)
precision_config = lax._precision_config(precision)
dtype = avals_in[0].dtype
if expand_complex_convolutions and np.issubdtype(dtype, np.complexfloating):
# We use a trick for complex multiplication due to Gauss which uses three
# multiplications and five additions; instead of the naive method of four
# multiplications and two additions.
# https://en.wikipedia.org/wiki/Multiplication_algorithm#Complex_multiplication_algorithm
#
# This performance win comes with a trade-off in accuracy; especially in
# cases when the real and imaginary differ hugely in magnitude. The relative
# error bound (e.g. 1p-24 in case of float32) would be relative to the
# maximum of real and imaginary parts of the result instead of being
# satisfied by the real and imaginary parts independently of each other.
if preferred_element_type is not None:
# Convert complex dtype to types used for real and imaginary parts
assert np.issubdtype(preferred_element_type, np.complexfloating)
preferred_element_type = xla.dtype_to_primitive_type(np.dtype(
np.float64 if preferred_element_type == np.complex128
else np.float32))
conv = lambda x, y: xops.ConvGeneralDilated(
x, y, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
precision_config=precision_config,
preferred_element_type=preferred_element_type)
lhs_real, lhs_imag = xops.Real(lhs), xops.Imag(lhs)
rhs_real, rhs_imag = xops.Real(rhs), xops.Imag(rhs)
k1 = conv(xops.Add(lhs_real, lhs_imag), rhs_real)
k2 = conv(lhs_real, xops.Sub(rhs_imag, rhs_real))
k3 = conv(lhs_imag, xops.Add(rhs_real, rhs_imag))
return [xops.Complex(xops.Sub(k1, k3), xops.Add(k1, k2))]
if preferred_element_type is not None:
preferred_element_type = xla.dtype_to_primitive_type(preferred_element_type)
return [xops.ConvGeneralDilated(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
precision_config=precision_config,
preferred_element_type=preferred_element_type)]
def _conv_general_dilated_batch_rule(
batched_args, batch_dims, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count, precision,
preferred_element_type, **unused_kwargs):
assert batch_group_count == 1 or feature_group_count == 1
lhs, rhs = batched_args
lhs_bdim, rhs_bdim = batch_dims
lhs_spec, rhs_spec, out_spec = dimension_numbers
if lhs_bdim is not None and rhs_bdim is not None:
assert lhs.shape[lhs_bdim] == rhs.shape[rhs_bdim]
if batch_group_count > 1:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
batch_group_count *= lhs.shape[lhs_bdim]
else:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[1], lhs)
feature_group_count *= lhs.shape[lhs_bdim]
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(
new_lhs, new_rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count=feature_group_count,
batch_group_count=batch_group_count, precision=precision,
preferred_element_type=preferred_element_type)
out = _reshape_axis_out_of(out_spec[1], lhs.shape[lhs_bdim], out)
return out, out_spec[1]
elif lhs_bdim is not None:
if batch_group_count == 1:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, precision=precision,
preferred_element_type=preferred_element_type)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
else:
new_lhs = _reshape_axis_out_of(lhs_spec[0] + int(lhs_bdim <= lhs_spec[0]),
batch_group_count, lhs)
new_lhs = _reshape_axis_into(lhs_bdim + int(lhs_spec[0] < lhs_bdim),
lhs_spec[0] + 1,
new_lhs)
new_lhs = _reshape_axis_into(lhs_spec[0], lhs_spec[0], new_lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision,
preferred_element_type=preferred_element_type)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
elif rhs_bdim is not None:
if feature_group_count == 1 and batch_group_count == 1:
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision,
preferred_element_type=preferred_element_type)
out = _reshape_axis_out_of(out_spec[1], rhs.shape[rhs_bdim], out)
return out, out_spec[1]
else:
# groups need to be outermost, so we need to factor them out of the
# rhs output feature dim, then factor the batch dim into the remaining rhs
# output feature dim, then put groups back in. We do something
# similar on the output. An alternative which would require more FLOPs but
# fewer reshapes would be to broadcast lhs.
group_count = (feature_group_count if feature_group_count > 1
else batch_group_count)
new_rhs = _reshape_axis_out_of(rhs_spec[0] + int(rhs_bdim <= rhs_spec[0]),
group_count, rhs)
new_rhs = _reshape_axis_into(rhs_bdim + int(rhs_spec[0] < rhs_bdim),
rhs_spec[0] + 1,
new_rhs)
new_rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[0], new_rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision,
preferred_element_type=preferred_element_type)
out = _reshape_axis_out_of(out_spec[1], group_count, out)
out = _reshape_axis_out_of(out_spec[1] + 1, rhs.shape[rhs_bdim], out)
out = _reshape_axis_into(out_spec[1], out_spec[1] + 1, out)
return out, out_spec[1]
def _conv_general_dilated_masking_rule(
padded_vals, logical_shapes, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count, batch_group_count,
lhs_shape, rhs_shape, precision, preferred_element_type):
lhs, rhs = padded_vals
logical_lhs_shape, logical_rhs_shape = logical_shapes
o, i, *window_dimensions = dimension_numbers.rhs_spec
assert (np.all(np.take(rhs.shape, window_dimensions)
== np.take(logical_rhs_shape, window_dimensions))), \
"Conv filter masking not yet implemented."
n, c, *padded_dimensions = dimension_numbers.lhs_spec
return conv_general_dilated(
lax._masked(lhs, logical_lhs_shape, padded_dimensions),
lax._masked(rhs, logical_rhs_shape, (i,)),
window_strides=window_strides, padding=padding,
lhs_dilation=lhs_dilation, rhs_dilation=rhs_dilation,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
precision=precision,
preferred_element_type=preferred_element_type)
conv_general_dilated_p = lax.standard_primitive(
_conv_general_dilated_shape_rule, _conv_general_dilated_dtype_rule,
'conv_general_dilated', partial(_conv_general_dilated_translation_rule,
expand_complex_convolutions=False))
# TODO(b/161124619, b/161126248): XLA does not support complex convolution on
# GPU, and on CPU it uses a slow loop-based implementation;
# on these backends, lower complex convolutions away.
xla.register_translation(conv_general_dilated_p,
partial(_conv_general_dilated_translation_rule,
expand_complex_convolutions=True),
platform='cpu')
xla.register_translation(conv_general_dilated_p,
partial(_conv_general_dilated_translation_rule,
expand_complex_convolutions=True),
platform='gpu')
ad.defbilinear(conv_general_dilated_p,
_conv_general_dilated_transpose_lhs,
_conv_general_dilated_transpose_rhs)
batching.primitive_batchers[conv_general_dilated_p] = \
_conv_general_dilated_batch_rule
masking.masking_rules[conv_general_dilated_p] = \
_conv_general_dilated_masking_rule
def _complex_mul(mul, x, y):
# We use a trick for complex multiplication sometimes attributed to Gauss
# which uses three multiplications and five additions; instead of the naive
# method of four multiplications and two additions.
# https://en.wikipedia.org/wiki/Multiplication_algorithm#Complex_multiplication_algorithm
#
# This performance win comes with a trade-off in accuracy; especially in
# cases when the real and imaginary differ hugely in magnitude. The relative
# error bound (e.g. 1p-24 in case of float32) would be relative to the
# maximum of real and imaginary parts of the result instead of being
# satisfied by the real and imaginary parts independently of each other.
x_re, x_im = lax.real(x), lax.imag(x)
y_re, y_im = lax.real(y), lax.imag(y)
k1 = mul(lax.add(x_re, x_im), y_re)
k2 = mul(x_re, lax.sub(y_im, y_re))
k3 = mul(x_im, lax.add(y_re, y_im))
return lax.complex(lax.sub(k1, k3), lax.add(k1, k2))
_real_dtype = lambda dtype: np.finfo(dtype).dtype
def _conv_general_dilated_lower(
ctx, lhs, rhs, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count,
batch_group_count, precision, preferred_element_type,
expand_complex_convolutions=False, **unused_kwargs):
lhs_aval, rhs_aval = ctx.avals_in
aval_out, = ctx.avals_out
assert isinstance(dimension_numbers, ConvDimensionNumbers)
dtype = lhs_aval.dtype
if expand_complex_convolutions and np.issubdtype(dtype, np.complexfloating):
if preferred_element_type is not None:
# Convert complex dtype to types used for real and imaginary parts
assert np.issubdtype(preferred_element_type, np.complexfloating)
preferred_element_type = _real_dtype(preferred_element_type)
complex_conv = mlir.lower_fun(
partial(
_complex_mul,
partial(conv_general_dilated, window_strides=window_strides,
padding=padding, lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation, dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count, precision=precision,
preferred_element_type=preferred_element_type)),
multiple_results=False)
return complex_conv(ctx, lhs, rhs)
lhs_spec, rhs_spec, out_spec = dimension_numbers
dnums = mhlo.ConvDimensionNumbers.get(
input_batch_dimension=lhs_spec[0],
input_feature_dimension=lhs_spec[1],
input_spatial_dimensions=list(lhs_spec[2:]),
kernel_output_feature_dimension=rhs_spec[0],
kernel_input_feature_dimension=rhs_spec[1],
kernel_spatial_dimensions=list(rhs_spec[2:]),
output_batch_dimension=out_spec[0],
output_feature_dimension=out_spec[1],
output_spatial_dimensions=list(out_spec[2:]))
num_spatial_dims = len(rhs_spec) - 2
window_reversal = mlir.dense_bool_elements([False] * num_spatial_dims)
return [mhlo.ConvOp(mlir.aval_to_ir_type(aval_out), lhs, rhs,
mlir.dense_int_elements(window_strides),
mlir.dense_int_elements(padding),
mlir.dense_int_elements(lhs_dilation),
mlir.dense_int_elements(rhs_dilation),
window_reversal, dnums,
mlir.i64_attr(feature_group_count),
mlir.i64_attr(batch_group_count),
lax.precision_attr(precision)).result]
mlir.register_lowering(conv_general_dilated_p, _conv_general_dilated_lower)
mlir.register_lowering(
conv_general_dilated_p,
partial(_conv_general_dilated_lower, expand_complex_convolutions=True),
platform='cpu')
mlir.register_lowering(
conv_general_dilated_p,
partial(_conv_general_dilated_lower, expand_complex_convolutions=True),
platform='gpu')
def _reshape_axis_into(src, dst, x):
perm = [i for i in range(x.ndim) if i != src]
perm.insert(dst, src)
new_shape = list(np.delete(x.shape, src))
new_shape[dst] *= x.shape[src]
return lax.reshape(x, new_shape, perm)
def _reshape_axis_out_of(src, size1, x):
shape = list(x.shape)
size2, ragged = divmod(shape[src], size1)
assert not ragged
shape[src:src+1] = [size1, size2]
return lax.reshape(x, shape)
def _check_conv_shapes(name, lhs_shape, rhs_shape, window_strides):
"""Check that conv shapes are valid and are consistent with window_strides."""
if len(lhs_shape) != len(rhs_shape):
msg = "Arguments to {} must have same rank, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if len(lhs_shape) < 2:
msg = "Arguments to {} must have rank at least 2, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if lhs_shape[1] != rhs_shape[1]:
msg = "Arguments to {} must agree on input feature size, got {} and {}."
raise TypeError(msg.format(name, lhs_shape[1], rhs_shape[1]))
lax._check_shapelike(name, "window_strides", window_strides)
if not np.all(np.greater(window_strides, 0)):
msg = "All elements of window_strides must be positive, got {}."
raise TypeError(msg.format(window_strides))
if len(window_strides) != len(lhs_shape) - 2:
msg = "{} window_strides has wrong length: expected {}, got {}."
expected_length = len(lhs_shape) - 2
raise TypeError(msg.format(name, expected_length, len(window_strides)))
def conv_shape_tuple(lhs_shape, rhs_shape, strides, pads, batch_group_count=1):
"""Compute the shape tuple of a conv given input shapes in canonical order."""
if isinstance(pads, str):
pads = lax.padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads)
if len(pads) != len(lhs_shape) - 2:
msg = "Wrong number of explicit pads for convolution: expected {}, got {}."
raise TypeError(msg.format(len(lhs_shape) - 2, len(pads)))
lhs_padded = np.add(lhs_shape[2:], np.sum(np.array(pads).reshape(-1, 2),
axis=1))
out_space = core.stride_shape(lhs_padded, rhs_shape[2:], strides)
out_space = np.maximum(0, out_space)
if batch_group_count > 1:
assert lhs_shape[0] % batch_group_count == 0
out_shape_0 = lhs_shape[0] // batch_group_count
else:
out_shape_0 = lhs_shape[0]
out_shape = (out_shape_0, rhs_shape[0])
return tuple(out_shape + tuple(out_space))
def conv_general_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = np.take(lhs_shape, lhs_perm)
rhs_trans = np.take(rhs_shape, rhs_perm)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding)
return tuple(np.take(out_trans, np.argsort(out_perm)))
def conv_transpose_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = np.take(lhs_shape, lhs_perm)
rhs_trans = np.take(rhs_shape, rhs_perm)
if isinstance(padding, str):
padding = [_conv_transpose_padding(k, s, padding)
for k,s in zip(rhs_trans[2:], window_strides)]
padding = list(map(np.sum, padding))
unpad_out_space = [(i-1) * s - k + 2
for i, k, s in zip(lhs_trans[2:],
rhs_trans[2:],
window_strides)]
out_space = np.sum([unpad_out_space, padding], axis=0).tolist()
out_trans = tuple((lhs_trans[0], rhs_trans[0]) + tuple(out_space))
return tuple(np.take(out_trans, np.argsort(out_perm)))
def conv_dimension_numbers(lhs_shape, rhs_shape, dimension_numbers
) -> ConvDimensionNumbers:
"""Converts convolution `dimension_numbers` to a `ConvDimensionNumbers`.
Args:
lhs_shape: tuple of nonnegative integers, shape of the convolution input.
rhs_shape: tuple of nonnegative integers, shape of the convolution kernel.
dimension_numbers: None or a tuple/list of strings or a ConvDimensionNumbers
object following the convolution dimension number specification format in
xla_client.py.
Returns:
A `ConvDimensionNumbers` object that represents `dimension_numbers` in the
canonical form used by lax functions.
"""
if isinstance(dimension_numbers, ConvDimensionNumbers):
return dimension_numbers
if len(lhs_shape) != len(rhs_shape):
msg = "convolution requires lhs and rhs ndim to be equal, got {} and {}."
raise TypeError(msg.format(len(lhs_shape), len(rhs_shape)))
if dimension_numbers is None:
iota = tuple(range(len(lhs_shape)))
return ConvDimensionNumbers(iota, iota, iota)
elif isinstance(dimension_numbers, (list, tuple)):
if len(dimension_numbers) != 3:
msg = "convolution dimension_numbers list/tuple must be length 3, got {}."
raise TypeError(msg.format(len(dimension_numbers)))
if not all(isinstance(elt, str) for elt in dimension_numbers):
msg = "convolution dimension_numbers elements must be strings, got {}."
raise TypeError(msg.format(tuple(map(type, dimension_numbers))))
msg = ("convolution dimension_numbers[{}] must have len equal to the ndim "
"of lhs and rhs, got {} for lhs and rhs shapes {} and {}.")
for i, elt in enumerate(dimension_numbers):
if len(elt) != len(lhs_shape):
raise TypeError(msg.format(i, len(elt), lhs_shape, rhs_shape))
lhs_spec, rhs_spec, out_spec = conv_general_permutations(dimension_numbers)
return ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)
else:
msg = "convolution dimension_numbers must be tuple/list or None, got {}."
raise TypeError(msg.format(type(dimension_numbers)))
def conv_general_permutations(dimension_numbers):
"""Utility for convolution dimension permutations relative to Conv HLO."""
lhs_spec, rhs_spec, out_spec = dimension_numbers
lhs_char, rhs_char, out_char = charpairs = ("N", "C"), ("O", "I"), ("N", "C")
for i, (a, b) in enumerate(charpairs):
if not dimension_numbers[i].count(a) == dimension_numbers[i].count(b) == 1:
msg = ("convolution dimension_numbers[{}] must contain the characters "
"'{}' and '{}' exactly once, got {}.")
raise TypeError(msg.format(i, a, b, dimension_numbers[i]))
if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):
msg = ("convolution dimension_numbers[{}] cannot have duplicate "
"characters, got {}.")
raise TypeError(msg.format(i, dimension_numbers[i]))
if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==
set(out_spec) - set(out_char)):
msg = ("convolution dimension_numbers elements must each have the same "
"set of spatial characters, got {}.")
raise TypeError(msg.format(dimension_numbers))
def getperm(spec, charpair):
spatial = (i for i, c in enumerate(spec) if c not in charpair)
if spec is not rhs_spec:
spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))
return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)
lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)
return lhs_perm, rhs_perm, out_perm
def _conv_general_proto(dimension_numbers):
assert type(dimension_numbers) is ConvDimensionNumbers
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_client.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _conv_general_vjp_lhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation) -> List[Tuple[int, int]]:
lhs_dilated_shape = lax._dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = lax._dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = lax._dilate_shape(out_shape, window_strides)
pad_before = np.subtract(rhs_dilated_shape, [lo for lo, _ in padding]) - 1
pad_after = (np.add(lhs_dilated_shape, rhs_dilated_shape) - 1
- out_dilated_shape - pad_before)
return safe_zip(pad_before, pad_after)
def _conv_general_vjp_rhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation):
if len(in_shape) == 0: # 0D conv
return []
lhs_dilated_shape = lax._dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = lax._dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = lax._dilate_shape(out_shape, window_strides)
pads_lo, _ = zip(*padding)
pads_from_lhs = core.diff_shape(out_dilated_shape, lhs_dilated_shape)
pads_from_rhs = core.diff_shape(core.diff_shape(rhs_dilated_shape, pads_lo),
(1,) * len(pads_lo))
pads_hi = core.sum_shapes(pads_from_lhs, pads_from_rhs)
return list(zip(pads_lo, pads_hi))
| |
import ssl
import zlib
import errno
import atexit
import select
import socket
import logging
import msgpack
import threading
import collections
logger = logging.getLogger(__name__)
import synapse.glob as s_glob
import synapse.common as s_common
import synapse.lib.scope as s_scope
import synapse.lib.msgpack as s_msgpack
import synapse.lib.threads as s_threads
import synapse.lib.thisplat as s_thisplat
from synapse.eventbus import EventBus
def sockgzip(byts):
blen = len(byts)
byts = zlib.compress(byts)
#print('GZIP DELTA: %d -> %d' % (blen,len(byts)))
return s_msgpack.en(('sock:gzip', {'data': byts}))
class Socket(EventBus):
'''
Wrapper for the builtin socket.Socket class.
Args:
sock socket.socket: socket to wrap
**info:
'''
def __init__(self, sock, **info):
EventBus.__init__(self)
self.sock = sock # type: socket.socket
self.unpk = msgpack.Unpacker(use_list=False, encoding='utf8',
unicode_errors='surrogatepass')
self.iden = s_common.guid()
self.info = info
self.blocking = True # sockets are blocking by default
if self.info.get('nodelay', True):
self._tryTcpNoDelay()
self.txbuf = None # the remainder of a partially sent byts
self.txque = collections.deque()
self.rxque = collections.deque()
self.onfini(self._finiSocket)
def _addTxByts(self, byts):
self.txque.append(byts)
self.fire('sock:tx:add')
def send(self, byts):
'''
Send bytes on the socket.
Args:
byts (bytes): The bytes to send
Returns:
int: The sent byte count (or None) on fini()
'''
try:
return self.sock.send(byts)
except (OSError, ConnectionError) as e:
logger.exception('Error during socket.send() - shutting down socket [%s]', self)
self.fini()
return None
def runTxLoop(self):
'''
Run a pass through the non-blocking tx loop.
Returns:
(bool): True if there is still more work to do
'''
while True:
if not self.txbuf:
if not self.txque:
break
self.txbuf = self.txque.popleft()
self.fire('sock:tx:pop')
sent = self.send(self.txbuf)
self.txbuf = self.txbuf[sent:]
# if we still have a txbuf after sending
# we could only send part of the buffer
if self.txbuf:
break
if not self.txbuf and not self.txque:
return False
return True
def get(self, prop, defval=None):
'''
Retrieve a property from the socket's info dict.
Example:
if sock.get('listen'):
dostuff()
'''
return self.info.get(prop, defval)
def set(self, prop, valu):
'''
Set a property on the Socket by name.
Example:
sock.set('woot', 30)
'''
self.info[prop] = valu
def recvall(self, size):
'''
Recieve the exact number of bytes requested.
Returns None on if socket closes early.
Example:
byts = sock.recvall(300)
if byts == None:
return
dostuff(byts)
Notes:
* this API will trigger fini() on close
'''
byts = b''
remain = size
try:
while remain:
x = self.sock.recv(remain)
if not x:
return None
byts += x
remain -= len(x)
except socket.error as e:
# fini triggered above.
return None
return byts
def recvobj(self):
for mesg in self:
return mesg
def setblocking(self, valu):
'''
Set the socket's blocking mode to True/False.
Args:
valu (bool): False to set socket non-blocking
'''
valu = bool(valu)
self.blocking = valu
self.sock.setblocking(valu)
def tx(self, mesg):
'''
Transmit a mesg tufo ( type, info ) via the socket using msgpack.
If present this API is safe for use with a socket in a Plex().
'''
byts = s_msgpack.en(mesg)
return self.txbytes(byts)
def txbytes(self, byts):
# we may support gzip on the socket message
if len(byts) > 50000 and self.get('sock:can:gzip'):
byts = sockgzip(byts)
# if the socket is non-blocking assume someone is managing
# the socket via sock:tx:add events
if not self.blocking:
self._addTxByts(byts)
return True
try:
self.sendall(byts)
return True
except (OSError, ConnectionError) as e:
logger.exception('Error during socket.txbytes() - shutting down socket [%s]', self)
self.fini()
return False
def rx(self):
'''
Yield any completed mesg tufos (type,info) in the recv buffer.
Example:
for mesg in sock.rx():
dostuff(mesg)
'''
# Yield any objects we have already queued up first.
while self.rxque:
yield self.rxque.popleft()
# the "preread" state for a socket means it has IO todo
# which is part of it's initial negotiation ( not mesg )
if self.get('preread'):
self.fire('link:sock:preread', sock=self)
return
byts = self.recv(1024000)
# special case for non-blocking recv with no data ready
if byts is None:
return
try:
self.unpk.feed(byts)
for mesg in self.unpk:
self.rxque.append(mesg)
while self.rxque:
yield self.rxque.popleft()
except Exception as e:
logger.exception('Error during unpacking / yielding message - shutting down socket [%s]', self)
self.fini()
return
def __iter__(self):
'''
Receive loop which yields messages until socket close.
'''
while not self.isfini:
for mesg in self.rx():
yield mesg
def _tryTcpNoDelay(self):
if self.sock.family not in (socket.AF_INET, socket.AF_INET6):
return False
if self.sock.type != socket.SOCK_STREAM:
return False
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return True
def accept(self):
try:
sock, addr = self.sock.accept()
except Exception as e:
return None, None
logger.debug('Accepting connection from %r', addr)
sock = self.__class__(sock, accept=True)
relay = self.get('relay')
if relay is not None:
relay._prepLinkSock(sock)
self.fire('link:sock:accept', sock=sock)
# check if the link:sock:accept callback fini()d the sock.
if sock.isfini:
return None, None
return sock, addr
def close(self):
'''
Hook the socket close() function to trigger fini()
'''
self.fini()
def recv(self, size):
'''
Slightly modified recv function which masks socket errors.
( makes them look like a simple close )
Additionally, any non-blocking recv's with no available data
will return None!
'''
try:
byts = self.sock.recv(size)
if not byts:
self.fini()
return byts
return byts
except ssl.SSLError as e:
# handle "did not complete" error where we didn't
# get all the bytes necessary to decrypt the data.
if e.errno == 2:
return None
self.fini()
return b''
except socket.error as e:
if e.errno == errno.EAGAIN:
return None
self.fini()
return b''
def __getattr__(self, name):
# allows us to be a thin wrapper
return getattr(self.sock, name)
def _finiSocket(self):
try:
self.sock.close()
except OSError as e:
pass
class Plex(EventBus):
'''
Manage multiple Sockets using a multi-plexor IO thread.
'''
def __init__(self):
EventBus.__init__(self)
#self._plex_sel = selectors.DefaultSelector()
self._plex_thr = None
self._plex_lock = threading.Lock()
self._plex_socks = {}
# used for select()
self._plex_rxsocks = []
self._plex_txsocks = []
self._plex_wake, self._plex_s2 = socketpair()
self._plex_s2.set('wake', True)
self.addPlexSock(self._plex_s2)
self.onfini(self._onPlexFini)
self._plex_thr = self._plexMainLoop()
def __len__(self):
return len(self._plex_socks)
def getPlexSocks(self):
'''
Return a list of the Socket()s managed by the Plex().
Returns:
([Socket(),...]): The list of Socket() instances.
'''
return self._plex_socks.values()
def _finiPlexSock(self, sock):
self._plex_socks.pop(sock.iden, None)
# try/wrap these because list has no discard()
try:
self._plex_rxsocks.remove(sock)
except ValueError as e:
pass
try:
self._plex_txsocks.remove(sock)
except ValueError as e:
pass
self.wake()
# call sock fini from a pool thread
if not sock.isfini:
s_glob.pool.call(sock.fini)
def wake(self):
'''
'''
if s_threads.current() is self._plex_thr:
return
self._plexWake()
def addPlexSock(self, sock):
'''
Add a Socket to the Plex()
Args:
sock (Socket): Socket to add.
Example:
plex.addPlexSock(sock)
'''
sock.setblocking(0)
def txadd(mesg):
with self._plex_lock:
istx = sock.get('plex:istx')
if not istx:
# since it's not in the tx list yet lets fire the
# tx loop and see if we need to be added...
if sock.runTxLoop():
sock.set('plex:istx', True)
self._plex_txsocks.append(sock)
self.wake()
sock.on('sock:tx:add', txadd)
self._plex_socks[sock.iden] = sock
# we monitor all socks for rx and xx
self._plex_rxsocks.append(sock)
def fini():
self._finiPlexSock(sock)
sock.onfini(fini)
self.wake()
def _plexWake(self):
try:
self._plex_wake.sendall(b'\x00')
except socket.error as e:
return
@s_common.firethread
def _plexMainLoop(self):
s_threads.iCantWait(name='SynPlexMain')
while not self.isfini:
try:
rxlist, txlist, xxlist = select.select(self._plex_rxsocks, self._plex_txsocks, self._plex_rxsocks, 0.2)
except Exception as e:
# go through ALL of our rx sockets, and call fini() on the
# sock if those sockets fileno() call is -1
# The .copy() method is used since it is faster for small lists.
logger.exception('Error during socket select. Culling fini or fileno==-1 sockets.')
[sock.fini() for sock in self._plex_rxsocks.copy() if sock.fileno() == -1]
continue
try:
for rxsock in rxlist:
if rxsock.get('wake'):
rxsock.recv(10240)
continue
# if he's a listen sock... accept()
if rxsock.get('listen'):
connsock, connaddr = rxsock.accept()
if connsock is not None:
self.addPlexSock(connsock)
self.fire('link:sock:init', sock=connsock)
continue
# yield any completed mesgs
for mesg in rxsock.rx():
self.fire('link:sock:mesg', sock=rxsock, mesg=mesg)
for txsock in txlist:
if not txsock.runTxLoop():
txsock.set('plex:istx', False)
try:
self._plex_txsocks.remove(txsock)
except ValueError as e:
pass
[self._finiPlexSock(sock) for sock in xxlist]
except Exception as e:
logger.warning('plexMainLoop: %s', e)
def _onPlexFini(self):
socks = list(self._plex_socks.values())
[s.fini() for s in socks]
self._plex_wake.fini()
self._plex_thr.join()
def listen(sockaddr, **sockinfo):
'''
Simplified listening socket contructor.
'''
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind(sockaddr)
sock.listen(120)
return Socket(sock, listen=True, **sockinfo)
except socket.error as e:
sock.close()
raise
def connect(sockaddr, **sockinfo):
'''
Simplified connected TCP socket constructor.
'''
sock = socket.socket()
try:
sock.connect(sockaddr)
return Socket(sock, **sockinfo)
except Exception as e:
sock.close()
raise
def _sockpair():
s = socket.socket()
s.bind(('127.0.0.1', 0))
s.listen(1)
s1 = socket.socket()
s1.connect(s.getsockname())
s2 = s.accept()[0]
s.close()
return Socket(s1), Socket(s2)
def socketpair():
'''
Standard sockepair() on posix systems, and pure shinanegans on windows.
'''
try:
s1, s2 = socket.socketpair()
return Socket(s1), Socket(s2)
except AttributeError as e:
return _sockpair()
def inet_pton(afam, text):
'''
Implements classic socket.inet_pton regardless of platform. (aka windows)
'''
return s_thisplat.inet_pton(afam, text)
def inet_ntop(afam, byts):
'''
Implements classic socket.inet_ntop regardless of platform. (aka windows)
'''
return s_thisplat.inet_ntop(afam, byts)
def hostaddr(dest='8.8.8.8'):
'''
Retrieve the ipv4 address for this host ( optionally as seen from dest ).
Example:
addr = s_socket.hostaddr()
'''
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't actually send any packets!
sock.connect((dest, 80))
addr, port = sock.getsockname()
sock.close()
return addr
# make a plex and register an atexit handler.
def _plex_ctor():
plex = Plex()
atexit.register(plex.fini)
return plex
# add a Plex constructor to the global scope
s_scope.ctor('plex', _plex_ctor)
| |
#
# Copyright (c) 2010-2014, MIT Probabilistic Computing Project
#
# Lead Developers: Dan Lovell and Jay Baxter
# Authors: Dan Lovell, Baxter Eaves, Jay Baxter, Vikash Mansinghka
# Research Leads: Vikash Mansinghka, Patrick Shafto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from collections import namedtuple
from collections import defaultdict
from collections import Counter
#
import pylab
#
import crosscat.tests.plot_utils as pu
import crosscat.utils.data_utils as du
get_time_per_step = lambda timing_row: float(timing_row.time_per_step)
get_num_rows = lambda timing_row: timing_row.num_rows
get_num_cols = lambda timing_row: timing_row.num_cols
get_num_views = lambda timing_row: timing_row.num_views
get_num_clusters = lambda timing_row: timing_row.num_clusters
do_strip = lambda string: string.strip()
#
def parse_timing_file(filename):
header, rows = du.read_csv(filename)
_timing_row = namedtuple('timing_row', ' '.join(header))
timing_rows = []
for row in rows:
row = map(do_strip, row)
timing_row = _timing_row(*row)
timing_rows.append(timing_row)
return timing_rows
def group_results(timing_rows, get_fixed_parameters, get_variable_parameter):
dict_of_dicts = defaultdict(dict)
for timing_row in these_timing_rows:
fixed_parameters = get_fixed_parameters(timing_row)
variable_parameter = get_variable_parameter(timing_row)
dict_of_dicts[fixed_parameters][variable_parameter] = timing_row
return dict_of_dicts
num_cols_to_color = {'4':'b', '16':'r', '32':'m', '64':'g', '128':'c', '256':'k'}
num_rows_to_color = {'100':'b', '400':'r', '1000':'m', '4000':'y', '10000':'g'}
num_clusters_to_marker = {'10':'x', '20':'o', '40':'s', '50':'v'}
num_views_to_marker = {'1':'x', '2':'o', '4':'v'}
num_rows_to_marker = {'100':'x', '400':'o', '1000':'v', '4000':'1', '10000':'*'}
num_cols_to_marker = {'4':'x', '16':'o', '32':'v', '64':'1', '128':'*',
'256':'s'}
#
plot_parameter_lookup = dict(
rows=dict(
vary_what='rows',
which_kernel='row_partition_assignments',
get_fixed_parameters=lambda timing_row: 'Co=%s;Cl=%s;V=%s' % \
(timing_row.num_cols, timing_row.num_clusters,
timing_row.num_views),
get_variable_parameter=get_num_rows,
get_color_parameter=get_num_cols,
color_dict=num_cols_to_color,
color_label_prepend='#Col=',
get_marker_parameter=get_num_clusters,
marker_dict=num_clusters_to_marker,
marker_label_prepend='#Clust=',
),
cols=dict(
vary_what='cols',
which_kernel='column_partition_assignments',
get_fixed_parameters=lambda timing_row: 'R=%s;Cl=%s;V=%s' % \
(timing_row.num_rows, timing_row.num_clusters,
timing_row.num_views),
get_variable_parameter=get_num_cols,
get_color_parameter=get_num_rows,
color_dict=num_rows_to_color,
color_label_prepend='#Row=',
get_marker_parameter=get_num_clusters,
marker_dict=num_clusters_to_marker,
marker_label_prepend='#Clust=',
),
clusters=dict(
vary_what='clusters',
which_kernel='row_partition_assignments',
get_fixed_parameters=lambda timing_row: 'R=%s;Co=%s;V=%s' % \
(timing_row.num_rows, timing_row.num_cols,
timing_row.num_views),
get_variable_parameter=get_num_clusters,
get_color_parameter=get_num_rows,
color_dict=num_rows_to_color,
color_label_prepend='#Row=',
get_marker_parameter=get_num_views,
marker_dict=num_views_to_marker,
marker_label_prepend='#View=',
),
views=dict(
vary_what='views',
which_kernel='column_partition_assignments',
get_fixed_parameters=lambda timing_row: 'R=%s;Co=%s;Cl=%s' % \
(timing_row.num_rows, timing_row.num_cols,
timing_row.num_clusters),
get_variable_parameter=get_num_views,
get_color_parameter=get_num_rows,
color_dict=num_rows_to_color,
color_label_prepend='#Row=',
get_marker_parameter=get_num_cols,
marker_dict=num_cols_to_marker,
marker_label_prepend='#Col=',
),
)
get_first_label_value = lambda label: label[1+label.index('='):label.index(';')]
label_cmp = lambda x, y: cmp(int(get_first_label_value(x)), int(get_first_label_value(y)))
def plot_grouped_data(dict_of_dicts, plot_parameters, plot_filename=None):
get_color_parameter = plot_parameters['get_color_parameter']
color_dict = plot_parameters['color_dict']
color_label_prepend = plot_parameters['color_label_prepend']
timing_row_to_color = lambda timing_row: \
color_dict[get_color_parameter(timing_row)]
get_marker_parameter = plot_parameters['get_marker_parameter']
marker_dict = plot_parameters['marker_dict']
marker_label_prepend = plot_parameters['marker_label_prepend']
timing_row_to_marker = lambda timing_row: \
marker_dict[get_marker_parameter(timing_row)]
vary_what = plot_parameters['vary_what']
which_kernel = plot_parameters['which_kernel']
#
fh = pylab.figure()
for configuration, run_data in dict_of_dicts.iteritems():
x = sorted(run_data.keys())
_y = [run_data[el] for el in x]
y = map(get_time_per_step, _y)
#
plot_args = dict()
first_timing_row = run_data.values()[0]
color = timing_row_to_color(first_timing_row)
plot_args['color'] = color
marker = timing_row_to_marker(first_timing_row)
plot_args['marker'] = marker
label = str(configuration)
plot_args['label'] = label
#
pylab.plot(x, y, **plot_args)
#
pylab.xlabel('# %s' % vary_what)
pylab.ylabel('time per step (seconds)')
pylab.title('Timing analysis for kernel: %s' % which_kernel)
# pu.legend_outside(bbox_to_anchor=(0.5, -.1), ncol=4, label_cmp=label_cmp)
pu.legend_outside_from_dicts(marker_dict, color_dict,
marker_label_prepend=marker_label_prepend, color_label_prepend=color_label_prepend,
bbox_to_anchor=(0.5, -.1), label_cmp=label_cmp)
if plot_filename is not None:
pu.savefig_legend_outside(plot_filename)
else:
pylab.ion()
pylab.show()
return fh
if __name__ == '__main__':
# parse some arguments
parser = argparse.ArgumentParser()
parser.add_argument('--vary_what', type=str, default='views')
parser.add_argument('--input_filename', type=str, default='parsed_output')
parser.add_argument('--plot_filename', type=str, default=None)
args = parser.parse_args()
input_filename = args.input_filename
vary_what = args.vary_what
plot_filename = args.plot_filename
# configure parsing/plotting
plot_parameters = plot_parameter_lookup[vary_what]
which_kernel = plot_parameters['which_kernel']
get_fixed_parameters = plot_parameters['get_fixed_parameters']
get_variable_parameter = plot_parameters['get_variable_parameter']
# some helper functions
get_is_this_kernel = lambda timing_row: \
timing_row.which_kernel == which_kernel
is_one_view = lambda timing_row: timing_row.num_views == '1'
# parse the timing data
timing_rows = parse_timing_file(input_filename)
these_timing_rows = filter(get_is_this_kernel, timing_rows)
# these_timing_rows = filter(is_one_view, these_timing_rows)
dict_of_dicts = group_results(these_timing_rows, get_fixed_parameters,
get_variable_parameter)
# plot
plot_grouped_data(dict_of_dicts, plot_parameters, plot_filename)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project Compiler
#
import os, sys, re, shutil, time, run, sgmllib, codecs, tempfile, subprocess
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
sys.path.append(os.path.abspath(os.path.join(template_dir,'..')))
sys.path.append(os.path.abspath(os.path.join(template_dir,'..', 'common')))
from tiapp import *
import jspacker
from csspacker import CSSPacker
import traceback
try:
import json
except:
import simplejson as json
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store', '_svn'];
ignoreDirs = ['iphone', 'android', 'mobileweb', '.git', '.svn', 'CVS'];
HEADER = """/**
* Appcelerator Titanium Mobile
* This is generated code. Do not modify. Your changes *will* be lost.
* Generated code is Copyright (c) 2009-2012 by Appcelerator, Inc.
* All Rights Reserved.
*/
#import <Foundation/Foundation.h>
"""
INTERFACE_HEADER= """
@interface ApplicationRouting : NSObject {
}
+ (NSData*) resolveAppAsset:(NSString*)path;
"""
IMPL_HEADER= """#import "ApplicationRouting.h"
extern NSData* filterDataInRange(NSData* thedata, NSRange range);
@implementation ApplicationRouting
"""
FOOTER ="""
@end
"""
MODULE_IMPL_HEADER = """#import "ApplicationMods.h"
@implementation ApplicationMods
+ (NSArray*) compiledMods
{
NSMutableArray *modules = [NSMutableArray array];
"""
class HTMLParser(sgmllib.SGMLParser):
def parse(self, s):
self.feed(s)
self.close()
def __init__(self, verbose=0):
sgmllib.SGMLParser.__init__(self, verbose)
self.scripts = []
def start_script(self, attributes):
for name, value in attributes:
if name == "src":
self.scripts.append(value)
def get_scripts(self):
return self.scripts
def read_module_properties(dir):
file = os.path.join(dir,'manifest')
dict = {}
if os.path.exists(file):
contents = open(file).read()
for line in contents.splitlines(True):
if line[0:1]=='#': continue
idx = line.find(':')
if idx==-1: continue
k=line[0:idx]
v=line[idx+1:].strip()
dict[k]=v
return dict
#Convert non-unicode obj to unicode encoded in utf-8.
def to_unicode_or_not(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
# Need to pre-parse xcconfig files to mangle variable names, and then
# dump them into a map so that we can re-assemble them later
def parse_xcconfig(xcconfig, moduleId, variables):
module_xcconfig = open(xcconfig)
new_xcconfig = ''
local_variables = {}
prefix = moduleId.upper().replace('.','_')
for line in module_xcconfig:
# Strip comments
comment = line.find('//')
if comment != -1:
line = line[0:comment]
# Generate new varname / value pairings
# The regular expression parses a valid line into components
# <var>=<value>
# <var>[<key>=<keyvalue>]=<value>
# e.g.
# OTHER_LDFLAGS=-framework EventKit
# OTHER_LDFLAGS[sdk=iphoneos4*]=-liconv
splitline = re.split('(([^\[=]+)(\[[^\]]+\])?) *=? *(.+)', line)
if len(splitline) >= 5:
varname = splitline[1]
value = splitline[4]
name = prefix + '_' + varname.strip()
name = re.sub(r'[^\w]', '_', name)
local_variables[varname] = name
new_xcconfig += name + '=' + value + '\n'
module_xcconfig.close()
# Update any local variable references with new varname
# and add variables to the global variables map
for (varname, name) in local_variables.iteritems():
source = '$(%s)' % varname
target = '$(%s)' % name
new_xcconfig = new_xcconfig.replace(source,target)
# Add new varname to the list
if not varname in variables:
variables[varname] = [name]
else:
variables[varname].append(name)
new_xcconfig += '\n'
return new_xcconfig
def softlink_resources(source,target,use_ignoreDirs=True):
if not os.path.exists(target):
os.makedirs(target)
for file in os.listdir(source):
if (use_ignoreDirs and (file in ignoreDirs)) or (file in ignoreFiles):
continue
from_ = to_unicode_or_not(os.path.join(source, file))
to_ = to_unicode_or_not(os.path.join(target, file))
if os.path.isdir(from_):
print "[DEBUG] creating: %s" % (to_)
softlink_resources(from_,to_,use_ignoreDirs)
else:
print "[DEBUG] linking: %s to %s" % (from_,to_)
if os.path.exists(to_):
if os.path.islink(to_):
os.remove(to_)
os.symlink(from_, to_)
else:
os.symlink(from_, to_)
def clear_application_routing(classes_dir):
impf = open(os.path.join(classes_dir,'ApplicationRouting.m'),'w+')
impf.write(HEADER)
impf.write(IMPL_HEADER)
impf.write("+ (NSData*) resolveAppAsset:(NSString*)path;\n{\n")
impf.write(" return nil;\n")
impf.write('}\n')
impf.write(FOOTER)
impf.close()
def softlink_for_simulator(project_dir,app_dir):
resources_dir = os.path.join(project_dir,'Resources')
iphone_resources_dir = os.path.join(resources_dir,'iphone')
iphone_platform_dir = os.path.join(project_dir,'platform','iphone')
softlink_resources(resources_dir,app_dir)
if(os.path.exists(iphone_resources_dir)):
softlink_resources(iphone_resources_dir,app_dir,False)
dest_mod_dir = os.path.join(app_dir,'modules')
src_mod_dir = os.path.join(project_dir,'modules')
if(os.path.exists(src_mod_dir)):
softlink_resources(src_mod_dir,dest_mod_dir)
src_mod_iphone_dir = os.path.join(src_mod_dir,'iphone')
if(os.path.exists(src_mod_iphone_dir)):
softlink_resources(os.path.join(project_dir,'modules','iphone'),dest_mod_dir,False)
iphone_classes_dir = os.path.join(project_dir,'build','iphone','Classes')
clear_application_routing(iphone_classes_dir)
#
# TODO/FIXME
#
# - encryptor
#
class Compiler(object):
def __init__(self, project_dir, appid, name, deploytype):
self.deploytype = deploytype
self.project_dir = project_dir
self.project_name = name
self.appid = appid
if deploytype != 'export-build' and deploytype != 'commonjs':
self.iphone_dir = os.path.join(project_dir,'build','iphone')
else:
self.iphone_dir = project_dir
self.classes_dir = os.path.join(self.iphone_dir,'Classes')
self.assets_dir = os.path.join(self.iphone_dir,'assets')
self.modules = []
self.modules_metadata = []
self.exports = []
# for now, these are required
self.defines = ['USE_TI_ANALYTICS','USE_TI_NETWORK','USE_TI_PLATFORM','USE_TI_UI', 'USE_TI_API']
def compileProject(self,xcode=False,devicefamily='ios',iphone_version='iphoneos',silent=False,sdk=None):
tiapp_xml = os.path.join(self.project_dir,'tiapp.xml')
ti = TiAppXML(tiapp_xml)
if sdk is None:
sdk_version = os.path.basename(os.path.abspath(os.path.join(template_dir,'../')))
else:
sdk_version = sdk
if xcode:
app_name = os.environ['FULL_PRODUCT_NAME']
app_dir = os.path.join(os.environ['TARGET_BUILD_DIR'],os.environ['CONTENTS_FOLDER_PATH'])
else:
target = 'Debug'
if self.deploytype == 'production':
target = 'Release'
app_name = self.project_name+'.app'
app_folder_name = '%s-iphoneos' % target
app_dir = os.path.abspath(os.path.join(self.iphone_dir,'build',app_folder_name,app_name))
if not silent:
print "[INFO] Titanium SDK version: %s" % sdk_version
print "[INFO] iPhone Device family: %s" % devicefamily
print "[INFO] iPhone SDK version: %s" % iphone_version
if self.deploytype != 'export-build':
main_template_file = os.path.join(template_dir,'main.m')
main_template = codecs.open(main_template_file, encoding='utf-8').read()
main_template = main_template.replace('__PROJECT_NAME__',self.project_name)
main_template = main_template.replace('__PROJECT_ID__',self.appid)
main_template = main_template.replace('__DEPLOYTYPE__',self.deploytype)
main_template = main_template.replace('__APP_ID__',self.appid)
main_template = main_template.replace('__APP_ANALYTICS__',ti.properties['analytics'])
main_template = main_template.replace('__APP_PUBLISHER__',ti.properties['publisher'])
main_template = main_template.replace('__APP_URL__',ti.properties['url'])
main_template = main_template.replace('__APP_NAME__',ti.properties['name'])
main_template = main_template.replace('__APP_VERSION__',ti.properties['version'])
main_template = main_template.replace('__APP_DESCRIPTION__',ti.properties['description'])
main_template = main_template.replace('__APP_COPYRIGHT__',ti.properties['copyright'])
main_template = main_template.replace('__APP_GUID__',ti.properties['guid'])
main_template = main_template.replace('__APP_RESOURCE_DIR__','')
main_template_out = os.path.join(self.iphone_dir,'main.m')
main_file = codecs.open(main_template_out,'w+',encoding='utf-8')
main_file_contents = main_file.read()
if main_file_contents!=main_template:
main_file.write(main_template)
main_file.close()
resources_dir = os.path.join(self.project_dir,'Resources')
iphone_resources_dir = os.path.join(resources_dir,'iphone')
iphone_platform_dir = os.path.join(self.project_dir,'platform','iphone')
# copy in any resources in our module like icons
# NOTE: This means that any JS-only modules in the local project
# are hashed up and dumped into the export.
has_modules = False
missing_modules, modules, module_js = ([], [], [])
module_js_dir = os.path.join(self.project_dir,'modules')
if os.path.exists(module_js_dir):
for file in os.listdir(module_js_dir):
if file.endswith('.js'):
module_js.append({'from':os.path.join(module_js_dir,file),'to':os.path.join(app_dir,file),'path':'modules/'+file})
if self.deploytype != 'export-build':
# Have to load the module detection here, in order to
# prevent distributing even MORE stuff in export/transport
sys.path.append(os.path.join(template_dir,'../module'))
from module import ModuleDetector
detector = ModuleDetector(self.project_dir)
missing_modules, modules = detector.find_app_modules(ti, 'iphone', self.deploytype)
# we have to copy these even in simulator given the path difference
if os.path.exists(app_dir):
self.copy_resources([iphone_resources_dir],app_dir,False)
if os.path.exists(app_dir):
self.copy_resources([iphone_platform_dir],app_dir,False)
# generate the includes for all compiled modules
xcconfig_c = "// this is a generated file - DO NOT EDIT\n\n"
if len(modules) > 0:
mods = open(os.path.join(self.classes_dir,'ApplicationMods.m'),'w+')
variables = {}
mods.write(MODULE_IMPL_HEADER)
for module in modules:
if module.js:
# CommonJS module
module_js.append({'from': module.js, 'path': 'modules/' + os.path.basename(module.js)})
module_id = module.manifest.moduleid.lower()
module_name = module.manifest.name.lower()
module_version = module.manifest.version
module_guid = ''
module_licensekey = ''
if module.manifest.has_property('guid'):
module_guid = module.manifest.guid
if module.manifest.has_property('licensekey'):
module_licensekey = module.manifest.licensekey
self.modules_metadata.append({'guid':module_guid,'name':module_name,'id':module_id,'dir':module.path,'version':module_version,'licensekey':module_licensekey})
xcfile = module.get_resource('module.xcconfig')
if os.path.exists(xcfile):
xcconfig_contents = parse_xcconfig(xcfile, module_id, variables)
xcconfig_c += xcconfig_contents
xcfile = os.path.join(self.project_dir,'modules','iphone',"%s.xcconfig" % module_name)
if os.path.exists(xcfile):
xcconfig_contents = parse_xcconfig(xcfile, module_id, variables)
xcconfig_c += xcconfig_contents
mods.write(" [modules addObject:[NSDictionary dictionaryWithObjectsAndKeys:@\"%s\",@\"name\",@\"%s\",@\"moduleid\",@\"%s\",@\"version\",@\"%s\",@\"guid\",@\"%s\",@\"licensekey\",nil]];\n" % (module_name,module_id,module_version,module_guid,module_licensekey));
# Load export symbols from modules...
metadata_path = os.path.join(module.path, 'metadata.json')
if os.path.exists(metadata_path):
self.load_metadata(metadata_path)
mods.write(" return modules;\n")
mods.write("}\n")
mods.write(FOOTER)
mods.close()
for (name, values) in variables.iteritems():
xcconfig_c += name + '=$(inherited) '
for value in values:
xcconfig_c += '$(%s) ' % value
xcconfig_c += '\n'
has_modules = True
xcconfig = os.path.join(self.iphone_dir,"module.xcconfig")
make_xcc = True
if os.path.exists(xcconfig):
existing_xcc = open(xcconfig).read()
# only copy if different so we don't trigger re-compile in xcode
make_xcc = existing_xcc!=xcconfig_c
if make_xcc:
xcconfig = open(xcconfig,'w')
xcconfig.write(xcconfig_c)
xcconfig.close()
#endif deploytype != 'export-build'
else:
# ... And for exported projects, load export symbols from
# the 'metadata' dir.
metadata_dir = os.path.join(self.iphone_dir, 'metadata')
if os.path.isdir(metadata_dir):
for file in os.listdir(metadata_dir):
self.load_metadata(os.path.join(metadata_dir,file))
if self.deploytype=='simulator' or self.deploytype=='export':
shutil.copy(os.path.join(template_dir,'Classes','defines.h'),os.path.join(self.classes_dir,'defines.h'))
if self.deploytype!='development' or has_modules:
if os.path.exists(app_dir) and self.deploytype != 'development':
self.copy_resources([resources_dir],app_dir,self.deploytype != 'test',module_js)
if self.deploytype == 'production':
debugger_plist = os.path.join(app_dir,'debugger.plist')
if os.path.exists(debugger_plist):
os.remove(debugger_plist)
if self.deploytype!='development' and self.deploytype!='export':
defines_file = os.path.join(self.classes_dir, 'defines.h')
defines_header = open(defines_file,'w+')
defines_content = "// Warning: this is generated file. Do not modify!\n\n"
defines_content+= "#define TI_VERSION %s\n"%sdk_version
for sym in self.defines:
defines_content+="#define %s\n" % sym
if defines_content!=defines_header.read():
defines_header.write(defines_content)
defines_header.close()
# deploy any module image files
for module in self.modules:
img_dir = os.path.join(template_dir,'modules',module.lower(),'images')
print "[DEBUG] module image = %s" % img_dir
if not os.path.exists(img_dir): continue
dest_img_dir = os.path.join(app_dir,'modules',module.lower(),'images')
if not os.path.exists(dest_img_dir):
os.makedirs(dest_img_dir)
self.copy_resources([img_dir],dest_img_dir,False)
if self.deploytype!='development' and os.path.exists(app_dir):
# optimize PNGs - since we don't include them in the Resources of the xcodeproj
# the ones we copy in won't get optimized so we need to run it manually
# we can skip this on the simulator but should do it on device
dev_path = "/Developer"
# we need to ask xcode where the root path is
path = run.run(["/usr/bin/xcode-select","-print-path"],True,False)
if path:
dev_path = path.strip()
run.run(["%s/Platforms/iPhoneOS.platform/Developer/usr/bin/iphoneos-optimize"%dev_path,app_dir],False)
# remove empty directories
os.chdir(app_dir)
os.system("find . -type d -empty -delete")
else:
print "[INFO] Skipping JS compile, running from simulator"
if self.deploytype=='development':
softlink_for_simulator(self.project_dir,app_dir)
def compile_module(self):
root_asset = self.compile_commonjs_file(self.appid+'.js', os.path.join(self.assets_dir, self.appid+'.js'))
js_files = []
for root, dirs, files in os.walk(self.assets_dir, True, None, True):
for file in [f for f in files if os.path.splitext(f)[1] == '.js']:
full_path = os.path.join(root, file)
self.compile_js_file(os.path.relpath(full_path, self.assets_dir), full_path, js_files)
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
titanium_prep = os.path.abspath(os.path.join(template_dir,'titanium_prep'))
cmdinputfile = tempfile.TemporaryFile()
cmdinputfile.write('\n'.join(js_files))
cmdinputfile.seek(0)
module_assets = subprocess.Popen([titanium_prep, self.appid, self.assets_dir], stdin=cmdinputfile,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()[0]
cmdinputfile.close()
# Clean up the generated assets
for file in js_files:
os.remove(os.path.join(self.assets_dir, file))
return (root_asset, module_assets)
def load_metadata(self, file):
module_metadata = open(file,'r')
metadata = json.load(module_metadata)
module_metadata.close()
for symbol in metadata['exports']:
self.add_symbol(symbol)
def add_symbol(self,api):
print "[DEBUG] detected symbol: %s" % api
curtoken = ''
tokens = api.split(".")
try:
self.modules.index(tokens[0])
except:
self.modules.append(tokens[0])
for token in tokens:
curtoken+=token+"."
symbol = 'USE_TI_%s' % (curtoken.replace('.create','').replace('.','').replace('-','_').upper())
try:
self.defines.index(symbol)
except:
self.defines.append(symbol)
def extract_tokens(self,sym,line):
# sloppy joe parsing coooode
# could be prettier and faster but it works and rather reliable
c = 0
tokens = []
search = sym + "."
size = len(search)
while True:
i = line.find(search,c)
if i < 0:
break
found = False
buf = ''
x = 0
for n in line[i+size:]:
# look for a terminal - this could probably be easier
if n in ['(',')','{','}','=',',',' ',':','!','[',']','+','*','/','~','^','%','\n','\t','\r']:
found = True
break
buf+=n
x+=1
tokens.append(buf)
if found:
c = i + x + 1
continue
break
return sorted(set(tokens))
def compile_js(self,file_contents):
for line in file_contents.split(';'):
for symbol in ('Titanium','Ti'):
for sym in self.extract_tokens(symbol,line):
self.add_symbol(sym)
self.exports.append(sym)
def process_html_files(self,data,source_root):
compile = []
if data.has_key('.js'):
for entry in data['.html']:
html_file = entry['from']
file_contents = open(os.path.expanduser(html_file)).read()
parser = HTMLParser()
parser.parse(file_contents)
# extract all our scripts that are dependencies and we
# don't compile these
scripts = parser.get_scripts()
if len(scripts) > 0:
js_files = data['.js']
for script in scripts:
# if a remote script, ignore
if script.startswith('http:') or script.startswith('https:'):
continue
if script.startswith('app://'):
script = script[6:]
# build a file relative to the html file
fullpath = os.path.abspath(os.path.join(os.path.dirname(html_file),script))
# remove this script from being compiled
for f in js_files:
if f['from']==fullpath:
# target it to be compiled
compile.append(f)
js_files.remove(f)
break
return compile
def compile_js_asset_file(self,path,file):
file_contents = open(os.path.expanduser(file)).read()
if self.deploytype == 'production' or self.deploytype == 'commonjs':
file_contents = jspacker.jsmin(file_contents)
file_contents = file_contents.replace('Titanium.','Ti.')
self.compile_js(file_contents)
path = os.path.join(self.assets_dir,path)
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
tfile = open(path,'w+')
tfile.write(file_contents)
tfile.close()
# TODO: We should remove this when we can "safely" say we no longer support
# versions prior to 2.1, and also change the module loader code in iOS to
# no longer check for moduleAsset.
def compile_commonjs_file(self,path,from_):
js_files = []
self.compile_js_file(path, from_, js_files)
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
titanium_prep = os.path.abspath(os.path.join(template_dir,'titanium_prep'))
cmdinputfile = tempfile.TemporaryFile()
cmdinputfile.write('\n'.join(js_files))
cmdinputfile.seek(0)
so = subprocess.Popen([titanium_prep, self.appid, self.assets_dir], stdin=cmdinputfile,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()[0]
cmdinputfile.close()
return so
def compile_js_file(self, path, from_, js_files):
print "[DEBUG] compiling: %s" % from_
path = path.replace('.','_')
self.compile_js_asset_file(path,from_)
js_files.append(path);
def copy_resources(self,sources,target,write_routing=True,module_js=[]):
js_files = []
if write_routing:
intf = open(os.path.join(self.classes_dir,'ApplicationRouting.h'),'w+')
impf = open(os.path.join(self.classes_dir,'ApplicationRouting.m'),'w+')
intf.write(HEADER)
intf.write(INTERFACE_HEADER)
impf.write(HEADER)
impf.write(IMPL_HEADER)
impf.write("+ (NSData*) resolveAppAsset:(NSString*)path;\n{\n")
if not os.path.exists(os.path.expanduser(target)):
os.makedirs(os.path.expanduser(target))
if not os.path.exists(self.assets_dir):
os.makedirs(self.assets_dir)
def compile_js_file(path,from_):
year, month, day, hour, minute, second, weekday, yearday, daylight = time.localtime(time.time())
print "[DEBUG] (%02d:%02d:%02d) compiling: %s" % (hour, minute, second, from_)
path = path.replace('.','_')
self.compile_js_asset_file(path,from_)
js_files.append(path);
def compile_js_files():
year, month, day, hour, minute, second, weekday, yearday, daylight = time.localtime(time.time())
print "[DEBUG] (%02d:%02d:%02d) packaging javascript" % (hour, minute, second)
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
titanium_prep = os.path.abspath(os.path.join(template_dir,'titanium_prep'))
cmdinputfile = tempfile.TemporaryFile()
cmdinputfile.write('\n'.join(js_files))
cmdinputfile.seek(0)
so = subprocess.Popen([titanium_prep, self.appid, self.assets_dir], stdin=cmdinputfile,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()[0]
cmdinputfile.close()
impf.write(so)
year, month, day, hour, minute, second, weekday, yearday, daylight = time.localtime(time.time())
print "[DEBUG] (%02d:%02d:%02d) packaging finished" % (hour, minute, second)
def add_compiled_resources(source,target):
print "[DEBUG] copy resources from %s to %s" % (source,target)
compiled_targets = {}
for root, dirs, files in os.walk(source, True, None, True):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles:
continue
prefix = root[len(source):]
from_ = to_unicode_or_not(os.path.join(root, file))
to_ = os.path.expanduser(from_.replace(source, target, 1))
to_directory = os.path.expanduser(os.path.split(to_)[0])
if not os.path.exists(to_directory):
os.makedirs(to_directory)
fp = os.path.splitext(file)
ext = fp[1]
if ext == '.jss': continue
if len(fp)>1 and ext in ['.html','.js','.css']:
path = prefix + os.sep + file
path = path[1:]
entry = {'path':path,'from':from_,'to':to_}
if compiled_targets.has_key(ext):
compiled_targets[ext].append(entry)
else:
compiled_targets[ext]=[entry]
if not (write_routing and len(fp)>1 and ext in ['.html','.js','.css']):
# only copy if different filesize or doesn't exist
if not os.path.exists(to_) or os.path.getsize(from_)!=os.path.getsize(to_):
print "[DEBUG] copying: %s to %s" % (from_,to_)
shutil.copyfile(from_, to_)
if compiled_targets.has_key('.html'):
compiled = self.process_html_files(compiled_targets,source)
if len(compiled) > 0:
for c in compiled:
from_ = c['from']
to_ = c['to']
path = c['path']
print "[DEBUG] copying: %s to %s" % (from_,to_)
file_contents = open(from_).read()
file_contents = jspacker.jsmin(file_contents)
file_contents = file_contents.replace('Titanium.','Ti.')
to = open(to_,'w')
to.write(file_contents)
to.close()
for ext in ('.css','.html'):
if compiled_targets.has_key(ext):
for css_file in compiled_targets[ext]:
from_ = css_file['from']
to_ = css_file['to']
print "[DEBUG] copying: %s to %s" % (from_,to_)
if path.endswith('.css'):
file_contents = open(from_).read()
packer = CSSPacker(file_contents)
file_contents = packer.pack()
to = open(to_,'w')
to.write(file_contents)
to.close()
else:
shutil.copyfile(from_, to_)
if compiled_targets.has_key('.js'):
for js_file in compiled_targets['.js']:
path = js_file['path']
from_ = js_file['from']
compile_js_file(path, from_)
# copy in any module assets
for metadata in self.modules_metadata:
tp_dir = os.path.join(metadata['dir'],'assets')
if not os.path.exists(tp_dir): continue
tp_id = metadata['id']
t = '%s/modules/%s' %(target,tp_id)
add_compiled_resources(tp_dir,t)
for source in sources:
add_compiled_resources(source,target)
for js_file in module_js:
compile_js_file(js_file['path'], js_file['from'])
if write_routing:
compile_js_files();
impf.write("\tNSNumber *index = [map objectForKey:path];\n")
impf.write("\tif (index == nil) { return nil; }\n")
impf.write("\treturn filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);\n")
impf.write('}\n')
intf.write(FOOTER)
impf.write(FOOTER)
intf.close()
impf.close()
if __name__ == "__main__":
argv = sys.argv
if len(argv) < 3:
print "[USAGE] %s <dir> <deploytype> [devicetype] [ios_version] [sdk_version]" % argv[0]
exit(1)
project_dir = argv[1]
deploytype = argv[2]
if deploytype == 'export-build':
xcode = True
else:
xcode = False
if len(argv) >= 4:
devicefamily = argv[3]
else:
devicefamily = 'unknown'
if len(argv) >= 5:
ios = argv[4]
else:
ios = 'unknown'
if len(argv) >= 6:
sdk = argv[5]
else:
sdk = None
tiapp_xml = os.path.join(project_dir,'tiapp.xml')
ti = TiAppXML(tiapp_xml)
appid = ti.properties['id']
name = ti.properties['name']
c = Compiler(project_dir,appid,name,deploytype)
c.compileProject(xcode,devicefamily,ios,sdk=sdk)
| |
# -*- coding: utf-8 -*-
__all__ = ['Reverse', 'UrlBuildingError']
from .url import URL
from .url_templates import UrlBuildingError
from ..utils import cached_property
class Location(object):
'''
Class representing an endpoint in the reverse url map.
'''
def __init__(self, *builders, **kwargs):
self.builders = list(builders)
self.subdomains = kwargs.get('subdomains', [])
@property
def need_arguments(self):
for b in self.builders:
if b._url_params:
return True
return False
def build_path(self, reverse, **kwargs):
result = []
for b in self.builders:
result.append(b(**kwargs))
return ''.join(result)
def build_subdomians(self, reverse):
subdomains = [getattr(x, 'primary', x)
for x in self.subdomains
if getattr(x, 'primary', x)]
return u'.'.join(subdomains)
@property
def url_arguments(self):
return reduce(lambda x,y: x|set(y._url_params), self.builders, set())
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self.builders == other.builders and self.subdomains == other.subdomains
def __repr__(self):
return '{}(*{!r}, subdomains={!r})'.format(
self.__class__.__name__, self.builders, self.subdomains)
class Reverse(object):
'''
Object incapsulating reverse url map and methods needed to build urls
by their names, namespaces and parameters.
Usually an instance of `Reverse` can be found in `env.root`.
'''
def __init__(self, scope, location=None, path='', host='', ready=False,
need_arguments=False, bound_env=None, parent=None,
finalize_params=None):
# location is stuff containing builders for current reverse step
# (builds url part for particular namespace or endpoint)
self._location = location
# scope is a dict having nested namespace and endpoint names as key and
# (location, nested scope) tuple as values for the current namespace
self._scope = scope
self._path = path
self._host = host
# ready means that self._location path and subdomain have been already
# added to self._path and self._host
self._ready = ready
# in the case it is endpoint and
# default rule accepts arguments, it is still callable
self._callable = not ready or (
'' in scope and scope[''][0].need_arguments)
self._need_arguments = need_arguments
self._is_endpoint = (not self._scope) or ('' in self._scope)
self._is_scope = bool(self._scope)
self._bound_env = bound_env
self._parent = parent
self._finalize_params = finalize_params or {}
def _attach_subdomain(self, host, location):
subdomain = location.build_subdomians(self)
if not host:
return subdomain
if subdomain:
return subdomain + '.' + host
return host
def __call__(self, **kwargs):
'''
Get a copy of the `Reverse` but with same namespace and same url name,
but with arguments attached.
'''
if not self._callable:
raise UrlBuildingError('Endpoint do not accept arguments')
if self._is_endpoint or self._need_arguments:
finalize_params = {}
path, host = self._path, self._host
if self._location and not self._ready:
host = self._attach_subdomain(host, self._location)
path += self._location.build_path(self, **kwargs)
if '' in self._scope:
finalize_params = kwargs
return self.__class__(self._scope, self._location, path=path, host=host,
bound_env=self._bound_env,
ready=self._is_endpoint,
parent=self._parent,
finalize_params=finalize_params)
raise UrlBuildingError('Not an endpoint {}'.format(repr(self)))
def __getattr__(self, name):
'''
Get subreverse, a reverse in current namespace with the name, equal
to the attribute name::
env.root.index # getattr(env.root, 'index')
'''
if self._is_scope and name in self._scope:
if self._need_arguments:
return getattr(self(), name)
location, scope = self._scope[name]
path = self._path
host = self._host
ready = not location.need_arguments
if ready:
path += location.build_path(self)
host = self._attach_subdomain(host, location)
return self.__class__(scope, location, path, host, ready,
bound_env=self._bound_env,
parent=self,
need_arguments=location.need_arguments)
raise UrlBuildingError('Namespace or endpoint "{}" does not exist'
' in {!r}'.format(name, self))
def _finalize(self):
# deferred build of the last part of url for endpoints that
# also have nested scopes
# i.e. finalization of __call__ for as_url
if self._need_arguments:
self = self()
path, host = self._path, self._host
location = self._scope[''][0]
host = self._attach_subdomain(host, location)
path += location.build_path(self, **self._finalize_params)
return self.__class__({}, self._location, path=path, host=host,
bound_env=self._bound_env,
parent=self._parent,
ready=self._is_endpoint)
@cached_property
def url_arguments(self):
args = set()
if self._is_endpoint or self._need_arguments:
if self._location:
args |= self._location.url_arguments
if self._is_endpoint and self._scope:
args |= self._scope[''][0].url_arguments
return args
def _build_url_silent(self, _name, **kwargs):
subreverse = self
used_args = set()
for part in _name.split('.'):
if not subreverse._ready and subreverse._need_arguments:
used_args |= subreverse.url_arguments
subreverse = subreverse(**kwargs)
subreverse = getattr(subreverse, part)
if not subreverse._ready and subreverse._is_endpoint:
used_args |= subreverse.url_arguments
subreverse = subreverse(**kwargs)
return used_args, subreverse
def build_subreverse(self, _name, **kwargs):
'''
String-based reverse API. Returns subreverse object::
env.root.build_subreverse('user', user_id=1).profile
'''
_, subreverse = self._build_url_silent(_name, **kwargs)
return subreverse
def build_url(self, _name, **kwargs):
'''
String-based reverse API. Returns URL object::
env.root.build_url('user.profile', user_id=1)
Checks that all necessary arguments are provided and all
provided arguments are used.
'''
used_args, subreverse = self._build_url_silent(_name, **kwargs)
if set(kwargs).difference(used_args):
raise UrlBuildingError(
'Not all arguments are used during URL building: {}'\
.format(', '.join(set(kwargs).difference(used_args))))
return subreverse.as_url
@property
def as_url(self):
'''
Reverse object converted to `web.URL`.
If Reverse is bound to env:
* try to build relative URL,
* use current domain name, port and scheme as default
'''
if '' in self._scope:
return self._finalize().as_url
if not self._is_endpoint:
raise UrlBuildingError('Not an endpoint {}'.format(repr(self)))
if self._ready:
path, host = self._path, self._host
else:
return self().as_url
#raise UrlBuildingError('Not an endpoint {}'.format(repr(self)))
# XXX there is a little mess with `domain` and `host` terms
if ':' in host:
domain, port = host.split(':')
else:
domain = host
port = None
if self._bound_env:
request = self._bound_env.request
scheme_port = {'http': '80',
'https': '443'}.get(request.scheme, '80')
# Domain to compare with the result of build.
# If both values are equal, domain part can be hidden from result.
# Take it from route_state, not from env.request, because
# route_state contains domain values with aliased replaced by their
# primary value
primary_domain = self._bound_env._route_state.primary_domain
host_split = request.host.split(':')
request_domain = host_split[0]
request_port = host_split[1] if len(host_split) > 1 else scheme_port
port = port or request_port
return URL(path, host=domain or request_domain,
port=port if port != scheme_port else None,
schema=request.scheme,
show_host=host and (domain != primary_domain \
or port != request_port))
return URL(path, host=domain, port=port, show_host=True)
def __str__(self):
'''URLencoded representation of the URL'''
return str(self.as_url)
@classmethod
def from_handler(cls, handler):
'''
Get unbound instance of the class related to given handler::
app = web.cases(..)
Reverse.from_handler(app)
'''
return cls(handler._locations())
def bind_to_env(self, bound_env):
'''
Get a copy of the reverse, bound to `env` object.
Can be found in env.root attribute::
# done in iktomi.web.app.Application
env.root = Reverse.from_handler(app).bind_to_env(env)
'''
return self.__class__(self._scope, self._location,
path=self._path, host=self._host,
ready=self._ready,
need_arguments=self._need_arguments,
finalize_params=self._finalize_params,
parent=self._parent,
bound_env=bound_env)
def __repr__(self):
return '{}(path=\'{}\', host=\'{}\')'.format(
self.__class__.__name__, self._path, self._host)
| |
#PyJ2D - Copyright (C) 2011 James Garnon <https://gatc.ca/>
#Released under the MIT License <https://opensource.org/licenses/MIT>
from java.awt.image import BufferedImage, RasterFormatException
from java.lang import ArrayIndexOutOfBoundsException
from java.util import Hashtable
from pyj2d.rect import Rect
from pyj2d.color import Color
from pyj2d import constants as Const
__docformat__ = 'restructuredtext'
_return_rect = True
class Surface(BufferedImage):
"""
**pyj2d.Surface**
* Surface.get_size
* Surface.get_width
* Surface.get_height
* Surface.get_rect
* Surface.copy
* Surface.convert
* Surface.convert_alpha
* Surface.subsurface
* Surface.blit
* Surface.set_colorkey
* Surface.get_colorkey
* Surface.replace_color
* Surface.get_at
* Surface.set_at
* Surface.fill
* Surface.get_parent
* Surface.get_offset
"""
def __init__(self, *arg):
"""
Return Surface that is subclassed from java.awt.image.BufferedImage.
Alternative arguments:
* Size (w,h) of surface, optional second argument of flags (SRCALPHA)
* Bufferedimage to convert to Surface
Module initialization places pyj2d.Surface in module's namespace.
"""
try:
width, height = arg[0]
try:
if arg[1] & (BufferedImage.TYPE_INT_ARGB | Const.SRCALPHA):
BufferedImage.__init__(self, width, height,
BufferedImage.TYPE_INT_ARGB)
else:
BufferedImage.__init__(self, width, height,
BufferedImage.TYPE_INT_RGB)
except IndexError:
BufferedImage.__init__(self, width, height,
BufferedImage.TYPE_INT_ARGB)
graphics2D = self.createGraphics()
graphics2D.setColor(Color(0,0,0))
graphics2D.fillRect(0, 0, width, height)
graphics2D.dispose()
except TypeError:
try:
cm = arg[0].getColorModel()
raster = arg[0].getRaster()
isRasterPremultiplied = arg[0].isAlphaPremultiplied()
properties = Hashtable()
keys = arg[0].getPropertyNames()
if keys != None:
for key in keys:
properties.put(key, arg[0].getProperty(key))
except AttributeError:
cm, raster, isRasterPremultiplied, properties = arg
BufferedImage.__init__(self, cm, raster,
isRasterPremultiplied, properties)
self._display = None #display surface
self._super_surface = None
self._offset = (0,0)
self._colorkey = None
self._nonimplemented_methods()
def __str__(self):
s = '<%s(%dx%d)>'
return s % (self.__class__.__name__, self.width, self.height)
def __repr__(self):
return self.__str__()
def get_size(self):
"""
Return width and height of surface.
"""
return (self.width, self.height)
def get_width(self):
"""
Return width of surface.
"""
return self.width
def get_height(self):
"""
Return height of surface.
"""
return self.height
def get_rect(self, **attr):
"""
Return rect of the surface.
An optional keyword argument of the rect position.
"""
rect = Rect(0, 0, self.width, self.height)
for key in attr:
getattr(rect, '_set_'+key)(attr[key])
return rect
def copy(self):
"""
Return Surface that is a copy of this surface.
"""
if not self._super_surface:
img_properties = Hashtable()
keys = self.getPropertyNames()
if keys != None:
for key in keys:
img_properties.put(key, self.getProperty(key))
surface = Surface(
self.getColorModel(),
self.getData(),
self.isAlphaPremultiplied(),
img_properties
)
surface._colorkey = self._colorkey
else:
surface = Surface((self.width, self.height),
BufferedImage.TYPE_INT_ARGB)
g2d = surface.createGraphics()
g2d.drawImage(self, 0, 0, None)
g2d.dispose()
surface._colorkey = self._colorkey
return surface
def convert(self):
"""
Convert surface without pixel alpha, return converted surface.
"""
surface = Surface((self.width, self.height),
BufferedImage.TYPE_INT_RGB)
g2d = surface.createGraphics()
g2d.drawImage(self, 0, 0, None)
g2d.dispose()
surface._colorkey = self._colorkey
return surface
def convert_alpha(self):
"""
Convert surface with pixel alpha, return converted surface.
"""
surface = Surface((self.width, self.height),
BufferedImage.TYPE_INT_ARGB)
g2d = surface.createGraphics()
g2d.drawImage(self, 0, 0, None)
g2d.dispose()
surface._colorkey = self._colorkey
return surface
def subsurface(self, rect):
"""
Return Surface that represents a subsurface that shares data with this surface.
The rect argument is the area of the subsurface.
"""
try:
try:
subsurf = self.getSubimage(rect.x, rect.y, rect.width, rect.height)
except AttributeError:
rect = Rect(rect)
subsurf = self.getSubimage(rect.x, rect.y, rect.width, rect.height)
except RasterFormatException:
try:
rect = self.get_rect().intersection(rect)
subsurf = self.getSubimage(rect.x, rect.y, rect.width, rect.height)
except:
raise ValueError('subsurface outside surface area')
surface = Surface(subsurf)
surface._super_surface = self
surface._offset = (rect.x,rect.y)
surface._colorkey = self._colorkey
return surface
def blit(self, surface, position, area=None):
"""
Draw given surface on this surface at position.
Optional area delimitates the region of given surface to draw.
"""
if not _return_rect:
g2d = self.createGraphics()
if not area:
g2d.drawImage(surface, position[0], position[1], None)
else:
g2d.drawImage(surface,
position[0],position[1],position[0]+area[2],position[1]+area[3],
area[0],area[1],area[0]+area[2],area[1]+area[3], None)
g2d.dispose()
return None
g2d = self.createGraphics()
if not area:
rect = Rect(position[0],position[1],surface.width,surface.height)
g2d.drawImage(surface, rect.x, rect.y, None)
else:
rect = Rect(position[0],position[1],area[2],area[3])
g2d.drawImage(surface,
rect.x,rect.y,rect.x+area[2],rect.y+area[3],
area[0],area[1],area[0]+area[2],area[1]+area[3], None)
g2d.dispose()
return self.get_rect().clip(rect)
def _blits(self, surfaces):
g2d = self.createGraphics()
for surface, rect in surfaces:
g2d.drawImage(surface, rect.x, rect.y, None)
g2d.dispose()
def _blit_clear(self, surface, rect_list):
g2d = self.createGraphics()
for r in rect_list:
g2d.drawImage(surface,
r.x, r.y, r.x+r.width, r.y+r.height,
r.x, r.y, r.x+r.width, r.y+r.height, None)
g2d.dispose()
def set_colorkey(self, color, flags=None):
"""
Set surface colorkey.
"""
if self._colorkey:
r = self._colorkey.r
g = self._colorkey.g
b = self._colorkey.b
self.replace_color((r,g,b,0), self._colorkey)
self._colorkey = None
if color:
self._colorkey = Color(color)
self.replace_color(self._colorkey)
return None
def get_colorkey(self):
"""
Return surface colorkey.
"""
if self._colorkey:
return ( self._colorkey.r,
self._colorkey.g,
self._colorkey.b,
self._colorkey.a )
else:
return None
def replace_color(self, color, new_color=None):
"""
Replace color with with new_color or with alpha.
"""
pixels = self.getRGB(0, 0, self.width, self.height,
None,0,self.width)
if hasattr(color, 'a'):
color1 = color
else:
color1 = Color(color)
if new_color is None:
color2 = Color(color1.r, color1.g, color1.b, 0)
else:
if hasattr(new_color, 'a'):
color2 = new_color
else:
color2 = Color(new_color)
for i, pixel in enumerate(pixels):
if pixel == color1.getRGB():
pixels[i] = color2.getRGB()
self.setRGB(0, 0, self.width, self.height,
pixels, 0, self.width)
return None
def get_at(self, pos):
"""
Return color of a surface pixel.
The pos argument represents x,y position of pixel.
"""
try:
return Color(self.getRGB(pos[0], pos[1]))
except ArrayIndexOutOfBoundsException:
raise IndexError('pixel index out of range')
def set_at(self, pos, color):
"""
Set color of a surface pixel.
The arguments represent position x,y and color of pixel.
"""
color = Color(color)
try:
self.setRGB(pos[0], pos[1], color.getRGB())
except ArrayIndexOutOfBoundsException:
raise IndexError('pixel index out of range')
return None
def fill(self, color=(0,0,0), rect=None):
"""
Fill surface with color.
"""
g2d = self.createGraphics()
color = Color(color)
g2d.setColor(color)
if not rect:
rect = Rect(0, 0, self.width, self.height)
else:
rect = Rect(rect)
g2d.fillRect(rect.x, rect.y, rect.width, rect.height)
g2d.dispose()
return rect
def get_parent(self):
"""
Return parent Surface of subsurface.
"""
return self._super_surface #if delete, delete subsurface...
def get_offset(self):
"""
Return offset of subsurface in surface.
"""
return self._offset
def _nonimplemented_methods(self):
self.set_alpha = lambda *arg: None
self.get_alpha = lambda *arg: None
self.lock = lambda *arg: None
self.unlock = lambda *arg: None
self.mustlock = lambda *arg: False
self.get_locked = lambda *arg: False
self.get_locks = lambda *arg: ()
def bounding_rect_return(setting):
"""
Set whether surface blit function returns bounding Rect.
Setting (bool) defaults to True on module initialization.
"""
global _return_rect
_return_rect = setting
| |
#!/usr/bin/env python
#
# Copyright (c) 2012-2015 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Based on make_can_boot_descriptor.py originally created by Ben Dyer, David Sidrane and Pavel Kirienko for PX4.
# See https://github.com/PX4/Firmware/blob/nuttx_next/Tools/make_can_boot_descriptor.py
#
from __future__ import division, absolute_import, print_function, unicode_literals
import os
import sys
import struct
import optparse
import binascii
from io import BytesIO
class AppDescriptor(object):
"""
Brickproof bootloader firmware image descriptor format:
uint64_t signature (bytes [7:0] set to 'APDesc00' by linker script)
uint64_t image_crc (set to 0 by linker script)
uint32_t image_size (set to 0 by linker script)
uint32_t vcs_commit (set in source)
uint8_t version_major (set in source)
uint8_t version_minor (set in source)
uint8_t reserved[6] (set to 0xFF by linker script)
"""
LENGTH = 8 + 8 + 4 + 4 + 1 + 1 + 6
SIGNATURE = b"APDesc00"
RESERVED = b"\xFF" * 6
def __init__(self, bytes=None):
self.signature = AppDescriptor.SIGNATURE
self.image_crc = 0
self.image_size = 0
self.vcs_commit = 0
self.version_major = 0
self.version_minor = 0
self.reserved = AppDescriptor.RESERVED
if bytes:
try:
self.unpack(bytes)
except Exception:
raise ValueError("Invalid AppDescriptor: {0}".format(binascii.b2a_hex(bytes)))
def pack(self):
return struct.pack("<8sQLLBB6s", self.signature, self.image_crc, self.image_size, self.vcs_commit,
self.version_major, self.version_minor, self.reserved)
def unpack(self, bytes):
(self.signature, self.image_crc, self.image_size, self.vcs_commit,
self.version_major, self.version_minor, self.reserved) = struct.unpack("<8sQLLBB6s", bytes)
if not self.empty and not self.valid:
raise ValueError()
@property
def empty(self):
return (self.signature == AppDescriptor.SIGNATURE and
self.image_crc == 0 and self.image_size == 0 and
self.reserved == AppDescriptor.RESERVED)
@property
def valid(self):
return (self.signature == AppDescriptor.SIGNATURE and
self.image_crc != 0 and self.image_size > 0 and
self.reserved == AppDescriptor.RESERVED)
class FirmwareImage(object):
# Large padding may allow for faster CRC verification
PADDING = 8
def __init__(self, path, mode="r"):
self._file = open(path, (mode + "b").replace("bb", "b"))
self._padding = self.PADDING
if "r" in mode:
self._contents = BytesIO(self._file.read())
else:
self._contents = BytesIO()
self._do_write = False
self._length = None
self._descriptor_offset = None
self._descriptor_bytes = None
self._descriptor = None
def __enter__(self):
return self
def __getattr__(self, attr):
if attr == "write":
self._do_write = True
return getattr(self._contents, attr)
def __iter__(self):
return iter(self._contents)
def __exit__(self, *args):
if self._do_write:
if getattr(self._file, "seek", None):
self._file.seek(0)
self._file.write(self._contents.getvalue())
if self._padding:
self._file.write(b'\xff' * self._padding)
self._file.close()
def _write_descriptor_raw(self):
# Seek to the appropriate location, write the serialized descriptor, and seek back.
prev_offset = self._contents.tell()
self._contents.seek(self._descriptor_offset)
self._contents.write(self._descriptor.pack())
self._contents.seek(prev_offset)
def write_descriptor(self):
# Set the descriptor's length and CRC to the values required for CRC computation
self.app_descriptor.image_size = self.length
self.app_descriptor.image_crc = 0
self._write_descriptor_raw()
# Update the descriptor's CRC based on the computed value and write it out again
self.app_descriptor.image_crc = self.crc
self._write_descriptor_raw()
@property
def crc(self):
MASK = 0xFFFFFFFFFFFFFFFF
POLY = 0x42F0E1EBA9EA3693
# Calculate the image CRC with the image_crc field in the app descriptor zeroed out.
crc_offset = self.app_descriptor_offset + len(AppDescriptor.SIGNATURE)
content = bytearray(self._contents.getvalue())
content[crc_offset:crc_offset + 8] = bytearray(b"\x00" * 8)
if self._padding:
content += bytearray(b"\xff" * self._padding)
val = MASK
for byte in content:
val ^= (byte << 56) & MASK
for bit in range(8):
if val & (1 << 63):
val = ((val << 1) & MASK) ^ POLY
else:
val <<= 1
return (val & MASK) ^ MASK
@property
def length(self):
if not self._length:
# Find the length of the file by seeking to the end and getting the offset
prev_offset = self._contents.tell()
self._contents.seek(0, os.SEEK_END)
self._length = self._contents.tell()
if self._padding:
mod = self._length % self._padding
self._padding = self._padding - mod if mod else 0
self._length += self._padding
self._contents.seek(prev_offset)
return self._length
@property
def app_descriptor_offset(self):
if not self._descriptor_offset:
# Save the current position
prev_offset = self._contents.tell()
# Check each byte in the file to see if a valid descriptor starts at that location.
# Slow, but not slow enough to matter.
offset = 0
while offset < self.length - AppDescriptor.LENGTH:
self._contents.seek(offset)
try:
# If this throws an exception, there isn't a valid descriptor at this offset
AppDescriptor(self._contents.read(AppDescriptor.LENGTH))
except Exception:
offset += 1
else:
self._descriptor_offset = offset
break
# Go back to the previous position
self._contents.seek(prev_offset)
return self._descriptor_offset
@property
def app_descriptor(self):
if not self._descriptor:
# Save the current position
prev_offset = self._contents.tell()
# Jump to the descriptor adn parse it
self._contents.seek(self.app_descriptor_offset)
self._descriptor_bytes = self._contents.read(AppDescriptor.LENGTH)
self._descriptor = AppDescriptor(self._descriptor_bytes)
# Go back to the previous offset
self._contents.seek(prev_offset)
return self._descriptor
@app_descriptor.setter
def app_descriptor(self, value):
self._descriptor = value
if __name__ == "__main__":
parser = optparse.OptionParser(usage="usage: %prog [options] <input binary> <node name> <hardware version string>")
parser.add_option("--also-patch-descriptor-in", dest="also_patch_descriptor_in", default=[], action='append',
help="file where the descriptor will be updated too (e.g. ELF)", metavar="PATH")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help="show additional firmware information on stdout")
options, args = parser.parse_args()
if len(args) != 3:
parser.error("Invalid usage")
with FirmwareImage(args[0], "rb") as in_image:
out_file = '%s-%s-%s.%s.%x.application.bin' % (args[1], args[2],
in_image.app_descriptor.version_major,
in_image.app_descriptor.version_minor,
in_image.app_descriptor.vcs_commit)
with FirmwareImage(out_file, "wb") as out_image:
image = in_image.read()
out_image.write(image)
out_image.write_descriptor()
for patchee in options.also_patch_descriptor_in:
with open(patchee, "rb") as im:
also_image = im.read()
also_image = also_image.replace(in_image.app_descriptor.pack(), out_image.app_descriptor.pack())
with open(patchee, "wb") as im:
im.write(also_image)
if options.verbose:
sys.stderr.write("""
Application descriptor located at offset 0x{0.app_descriptor_offset:08X}
READ VALUES
------------------------------------------------------------------------------
Field Type Value
signature uint64 {1.signature!r}
image_crc uint64 0x{1.image_crc:016X}
image_size uint32 0x{1.image_size:X} ({1.image_size:d} B)
vcs_commit uint32 {1.vcs_commit:08X}
version_major uint8 {1.version_major:d}
version_minor uint8 {1.version_minor:d}
reserved uint8[6] {1.reserved!r}
WRITTEN VALUES
------------------------------------------------------------------------------
Field Type Value
signature uint64 {2.signature!r}
image_crc uint64 0x{2.image_crc:016X}
image_size uint32 0x{2.image_size:X} ({2.image_size:d} B)
vcs_commit uint32 {2.vcs_commit:08X}
version_major uint8 {2.version_major:d}
version_minor uint8 {2.version_minor:d}
reserved uint8[6] {2.reserved!r}
""".format(in_image, in_image.app_descriptor, out_image.app_descriptor))
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import fixtures
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
import webob
from nova.api.openstack.compute import servers as servers_v21
from nova.compute import api as compute_api
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.image import glance
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
CONF = nova.conf.CONF
FAKE_UUID = fakes.FAKE_UUID
@ddt.ddt
class ServerActionsControllerTestV21(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_base_url = 'http://localhost:9292/images/'
image_href = image_base_url + '/' + image_uuid
servers = servers_v21
validation_error = exception.ValidationError
request_too_large_error = exception.ValidationError
image_url = None
def setUp(self):
super(ServerActionsControllerTestV21, self).setUp()
self.flags(group='glance', api_servers=['http://localhost:9292'])
self.stub_out('nova.compute.api.API.get',
fakes.fake_compute_get(vm_state=vm_states.ACTIVE,
project_id=fakes.FAKE_PROJECT_ID,
host='fake_host'))
self.stub_out('nova.objects.Instance.save', lambda *a, **kw: None)
fakes.stub_out_compute_api_snapshot(self)
self.useFixture(nova_fixtures.GlanceFixture(self))
self.flags(enable_instance_password=True, group='api')
# TODO(stephenfin): Use uuidsentinel instead of this
self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
self.controller = self._get_controller()
self.compute_api = self.controller.compute_api
# We don't care about anything getting as far as hitting the compute
# RPC API so we just mock it out here.
mock_rpcapi = mock.patch.object(self.compute_api, 'compute_rpcapi')
mock_rpcapi.start()
self.addCleanup(mock_rpcapi.stop)
# The project_id here matches what is used by default in
# fake_compute_get which need to match for policy checks.
self.req = fakes.HTTPRequest.blank('',
project_id=fakes.FAKE_PROJECT_ID)
self.context = self.req.environ['nova.context']
self.image_api = glance.API()
# Assume that anything that hits the compute API and looks for a
# RequestSpec doesn't care about it, since testing logic that deep
# should be done in nova.tests.unit.compute.test_api.
mock_reqspec = mock.patch('nova.objects.RequestSpec')
mock_reqspec.start()
self.addCleanup(mock_reqspec.stop)
# Similarly we shouldn't care about anything hitting conductor from
# these tests.
mock_conductor = mock.patch.object(
self.controller.compute_api, 'compute_task_api')
mock_conductor.start()
self.addCleanup(mock_conductor.stop)
self.mock_neutron_extension_list = self.useFixture(
fixtures.MockPatch(
'nova.network.neutron.API._refresh_neutron_extensions_cache'
)
).mock
self.mock_neutron_extension_list.return_value = {'extensions': []}
def _get_controller(self):
return self.servers.ServersController()
def _test_locked_instance(self, action, method=None, body_map=None,
compute_api_args_map=None):
if body_map is None:
body_map = {}
if compute_api_args_map is None:
compute_api_args_map = {}
args, kwargs = compute_api_args_map.get(action, ((), {}))
uuid = uuidutils.generate_uuid()
context = self.req.environ['nova.context']
instance = fake_instance.fake_db_instance(
id=1, uuid=uuid, vm_state=vm_states.ACTIVE, task_state=None,
project_id=context.project_id,
user_id=context.user_id)
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), instance)
with test.nested(
mock.patch.object(compute_api.API, 'get',
return_value=instance),
mock.patch.object(compute_api.API, method,
side_effect=exception.InstanceIsLocked(
instance_uuid=instance['uuid'])),
) as (mock_get, mock_method):
controller_function = 'self.controller.' + action
self.assertRaises(webob.exc.HTTPConflict,
eval(controller_function),
self.req, instance['uuid'],
body=body_map.get(action))
expected_attrs = ['flavor', 'numa_topology']
if method == 'resize':
expected_attrs.append('services')
mock_get.assert_called_once_with(self.context, uuid,
expected_attrs=expected_attrs,
cell_down_support=False)
mock_method.assert_called_once_with(self.context, instance,
*args, **kwargs)
def test_actions_with_locked_instance(self):
actions = ['_action_resize', '_action_confirm_resize',
'_action_revert_resize', '_action_reboot',
'_action_rebuild']
method_translations = {'_action_resize': 'resize',
'_action_confirm_resize': 'confirm_resize',
'_action_revert_resize': 'revert_resize',
'_action_reboot': 'reboot',
'_action_rebuild': 'rebuild'}
body_map = {'_action_resize': {'resize': {'flavorRef': '2'}},
'_action_reboot': {'reboot': {'type': 'HARD'}},
'_action_rebuild': {'rebuild': {
'imageRef': self.image_uuid,
'adminPass': 'TNc53Dr8s7vw'}}}
args_map = {'_action_resize': (('2'), {'auto_disk_config': None}),
'_action_confirm_resize': ((), {}),
'_action_reboot': (('HARD',), {}),
'_action_rebuild': ((self.image_uuid,
'TNc53Dr8s7vw'), {})}
for action in actions:
method = method_translations.get(action)
self._test_locked_instance(action, method=method,
body_map=body_map,
compute_api_args_map=args_map)
def test_reboot_hard(self):
body = dict(reboot=dict(type="HARD"))
self.controller._action_reboot(self.req, FAKE_UUID, body=body)
def test_reboot_soft(self):
body = dict(reboot=dict(type="SOFT"))
self.controller._action_reboot(self.req, FAKE_UUID, body=body)
def test_reboot_incorrect_type(self):
body = dict(reboot=dict(type="NOT_A_TYPE"))
self.assertRaises(self.validation_error,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_missing_type(self):
body = dict(reboot=dict())
self.assertRaises(self.validation_error,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_none(self):
body = dict(reboot=dict(type=None))
self.assertRaises(self.validation_error,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_not_found(self):
body = dict(reboot=dict(type="HARD"))
with mock.patch('nova.compute.api.API.get',
side_effect=exception.InstanceNotFound(
instance_id=uuids.fake)):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_reboot,
self.req, uuids.fake, body=body)
def test_reboot_raises_conflict_on_invalid_state(self):
body = dict(reboot=dict(type="HARD"))
def fake_reboot(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stub_out('nova.compute.api.API.reboot', fake_reboot)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_soft_with_soft_in_progress_raises_conflict(self):
body = dict(reboot=dict(type="SOFT"))
self.stub_out('nova.compute.api.API.get',
fakes.fake_compute_get(project_id=fakes.FAKE_PROJECT_ID,
vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING))
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_hard_with_soft_in_progress_does_not_raise(self):
body = dict(reboot=dict(type="HARD"))
self.stub_out('nova.compute.api.API.get',
fakes.fake_compute_get(project_id=fakes.FAKE_PROJECT_ID,
vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING))
self.controller._action_reboot(self.req, FAKE_UUID, body=body)
def test_reboot_hard_with_hard_in_progress(self):
body = dict(reboot=dict(type="HARD"))
self.stub_out('nova.compute.api.API.get',
fakes.fake_compute_get(
project_id=fakes.FAKE_PROJECT_ID,
vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING_HARD))
self.controller._action_reboot(self.req, FAKE_UUID, body=body)
def test_reboot_soft_with_hard_in_progress_raises_conflict(self):
body = dict(reboot=dict(type="SOFT"))
self.stub_out('nova.compute.api.API.get',
fakes.fake_compute_get(
project_id=fakes.FAKE_PROJECT_ID,
vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING_HARD))
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def _test_rebuild_preserve_ephemeral(self, value=None):
return_server = fakes.fake_compute_get(
project_id=fakes.FAKE_PROJECT_ID,
image_ref=uuids.image_ref,
vm_state=vm_states.ACTIVE,
host='fake_host')
self.stub_out('nova.compute.api.API.get', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
if value is not None:
body['rebuild']['preserve_ephemeral'] = value
with mock.patch.object(compute_api.API, 'rebuild') as mock_rebuild:
self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
if value is not None:
mock_rebuild.assert_called_once_with(self.context, mock.ANY,
self._image_href, mock.ANY, preserve_ephemeral=value)
else:
mock_rebuild.assert_called_once_with(self.context, mock.ANY,
self._image_href, mock.ANY)
def test_rebuild_preserve_ephemeral_true(self):
self._test_rebuild_preserve_ephemeral(True)
def test_rebuild_preserve_ephemeral_false(self):
self._test_rebuild_preserve_ephemeral(False)
def test_rebuild_preserve_ephemeral_default(self):
self._test_rebuild_preserve_ephemeral()
def test_rebuild_accepted_minimum(self):
return_server = fakes.fake_compute_get(
project_id=fakes.FAKE_PROJECT_ID,
image_ref=uuids.image_ref,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stub_out('nova.compute.api.API.get', return_server)
self_href = 'http://localhost/v2/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
robj = self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], uuids.image_ref)
self.assertEqual(len(body['server']['adminPass']),
CONF.password_length)
self.assertEqual(robj['location'], self_href)
# pep3333 requires applications produces headers which are str
self.assertEqual(str, type(robj['location']))
def test_rebuild_instance_with_image_uuid(self):
info = dict(image_href_in_call=None)
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
self.stub_out('nova.compute.api.API.rebuild', rebuild)
# proper local hrefs must start with 'http://localhost/v2/'
body = {
'rebuild': {
'imageRef': self.image_uuid,
},
}
self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
self.assertEqual(info['image_href_in_call'], self.image_uuid)
def test_rebuild_instance_with_image_href_uses_uuid(self):
# proper local hrefs must start with 'http://localhost/v2/'
body = {
'rebuild': {
'imageRef': self.image_href,
},
}
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_accepted_minimum_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False, group='api')
return_server = fakes.fake_compute_get(
project_id=fakes.FAKE_PROJECT_ID,
image_ref=uuids.image_ref,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stub_out('nova.compute.api.API.get', return_server)
self_href = 'http://localhost/v2/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
robj = self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], uuids.image_ref)
self.assertNotIn("adminPass", body['server'])
self.assertEqual(robj['location'], self_href)
# pep3333 requires applications produces headers which are str
self.assertEqual(str, type(robj['location']))
@ddt.data(
exception.InstanceIsLocked(instance_uuid=uuids.instance),
)
@mock.patch('nova.compute.api.API.rebuild')
def test_rebuild__http_conflict_error(self, exc, mock_rebuild):
mock_rebuild.side_effect = exc
self.assertRaises(
webob.exc.HTTPConflict,
self.controller._action_rebuild,
self.req, uuids.instance,
body={'rebuild': {'imageRef': uuids.image}})
@ddt.data(
exception.ForbiddenWithAccelerators(),
exception.OperationNotSupportedForVTPM(
instance_uuid=uuids.instance, operation='foo'),
exception.OperationNotSupportedForVDPAInterface(
instance_uuid=uuids.instance, operation='foo'),
)
@mock.patch('nova.compute.api.API.rebuild')
def test_rebuild_raises_badrequest_for_not_supported_features(
self, exc, mock_rebuild):
mock_rebuild.side_effect = exc
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, uuids.instance,
body={'rebuild': {'imageRef': uuids.image}})
def test_rebuild_raises_conflict_on_invalid_state(self):
body = {'rebuild': {'imageRef': uuids.image}}
def fake_rebuild(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stub_out('nova.compute.api.API.rebuild', fake_rebuild)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_accepted_with_metadata(self):
metadata = {'new': 'metadata'}
return_server = fakes.fake_compute_get(
project_id=fakes.FAKE_PROJECT_ID,
metadata=metadata,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stub_out('nova.compute.api.API.get', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": metadata,
},
}
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
self.assertEqual(body['server']['metadata'], metadata)
def test_rebuild_accepted_with_bad_metadata(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": "stack",
},
}
self.assertRaises(self.validation_error,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_with_too_large_metadata(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": {
256 * "k": "value"
}
}
}
self.assertRaises(self.request_too_large_error,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=body)
def test_rebuild_bad_entity(self):
body = {
"rebuild": {
"imageId": self._image_href,
},
}
self.assertRaises(self.validation_error,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_admin_pass(self):
return_server = fakes.fake_compute_get(
project_id=fakes.FAKE_PROJECT_ID,
image_ref=uuids.image_ref,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stub_out('nova.compute.api.API.get', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"adminPass": "asdf",
},
}
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
self.assertEqual(body['server']['image']['id'], uuids.image_ref)
self.assertEqual(body['server']['adminPass'], 'asdf')
def test_rebuild_admin_pass_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False, group='api')
return_server = fakes.fake_compute_get(
project_id=fakes.FAKE_PROJECT_ID,
image_ref=FAKE_UUID,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stub_out('nova.compute.api.API.get', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"adminPass": "asdf",
},
}
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
self.assertEqual(body['server']['image']['id'], FAKE_UUID)
self.assertNotIn('adminPass', body['server'])
def test_rebuild_server_not_found(self):
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
with mock.patch('nova.compute.api.API.get',
side_effect=exception.InstanceNotFound(
instance_id=FAKE_UUID)):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_with_bad_image(self):
body = {
"rebuild": {
"imageRef": "foo",
},
}
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_accessIP(self):
attributes = {
'access_ip_v4': '172.19.0.1',
'access_ip_v6': 'fe80::1',
}
body = {
"rebuild": {
"imageRef": self._image_href,
"accessIPv4": "172.19.0.1",
"accessIPv6": "fe80::1",
},
}
data = {'changes': {}}
orig_get = compute_api.API.get
def wrap_get(*args, **kwargs):
data['instance'] = orig_get(*args, **kwargs)
return data['instance']
def fake_save(context, **kwargs):
data['changes'].update(data['instance'].obj_get_changes())
self.stub_out('nova.compute.api.API.get', wrap_get)
self.stub_out('nova.objects.Instance.save', fake_save)
self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
self.assertEqual(self._image_href, data['changes']['image_ref'])
self.assertEqual("", data['changes']['kernel_id'])
self.assertEqual("", data['changes']['ramdisk_id'])
self.assertEqual(task_states.REBUILDING, data['changes']['task_state'])
self.assertEqual(0, data['changes']['progress'])
for attr, value in attributes.items():
self.assertEqual(value, str(data['changes'][attr]))
def test_rebuild_when_kernel_not_exists(self):
def return_image_meta(*args, **kwargs):
image_meta_table = {
uuids.image_1_id: {
'id': uuids.image_1_id,
'status': 'active',
'container_format': 'ari'
},
uuids.image_2_id: {
'id': uuids.image_2_id,
'status': 'active',
'container_format': 'raw',
'properties': {
'kernel_id': uuids.kernel_id,
'ramdisk_id': uuids.ramdisk_id
}
},
}
image_id = args[2]
try:
image_meta = image_meta_table[str(image_id)]
except KeyError:
raise exception.ImageNotFound(image_id=image_id)
return image_meta
self.stub_out('nova.tests.fixtures.GlanceFixture.show',
return_image_meta)
body = {
"rebuild": {
"imageRef": uuids.image_2_id,
},
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_proper_kernel_ram(self):
instance_meta = {'kernel_id': None, 'ramdisk_id': None}
orig_get = compute_api.API.get
def wrap_get(*args, **kwargs):
inst = orig_get(*args, **kwargs)
instance_meta['instance'] = inst
return inst
def fake_save(context, **kwargs):
instance = instance_meta['instance']
for key in instance_meta.keys():
if key in instance.obj_what_changed():
instance_meta[key] = instance[key]
def return_image_meta(*args, **kwargs):
image_meta_table = {
uuids.kernel_image_id: {
'id': uuids.kernel_image_id,
'status': 'active',
'container_format': 'aki'},
uuids.ramdisk_image_id: {
'id': uuids.ramdisk_image_id,
'status': 'active',
'container_format': 'ari'},
'155d900f-4e14-4e4c-a73d-069cbf4541e6':
{'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'status': 'active',
'container_format': 'raw',
'properties': {'kernel_id': uuids.kernel_image_id,
'ramdisk_id': uuids.ramdisk_image_id}},
}
image_id = args[2]
try:
image_meta = image_meta_table[str(image_id)]
except KeyError:
raise exception.ImageNotFound(image_id=image_id)
return image_meta
self.stub_out('nova.tests.fixtures.GlanceFixture.show',
return_image_meta)
self.stub_out('nova.compute.api.API.get', wrap_get)
self.stub_out('nova.objects.Instance.save', fake_save)
body = {
"rebuild": {
"imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
},
}
self.controller._action_rebuild(self.req, FAKE_UUID, body=body).obj
self.assertEqual(instance_meta['kernel_id'], uuids.kernel_image_id)
self.assertEqual(instance_meta['ramdisk_id'], uuids.ramdisk_image_id)
@mock.patch.object(compute_api.API, 'rebuild')
def test_rebuild_instance_raise_auto_disk_config_exc(self, mock_rebuild):
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
mock_rebuild.side_effect = exception.AutoDiskConfigDisabledByImage(
image='dummy')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
@mock.patch.object(compute_api.API, 'rebuild')
def test_rebuild_raise_invalid_architecture_exc(self, mock_rebuild):
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
mock_rebuild.side_effect = exception.InvalidArchitectureName('arm64')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
@mock.patch.object(compute_api.API, 'rebuild')
def test_rebuild_raise_invalid_volume_exc(self, mock_rebuild):
"""Make sure that we can't rebuild with an InvalidVolume exception."""
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
mock_rebuild.side_effect = exception.InvalidVolume('error')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_resize_server(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.resize_called = False
def resize_mock(*args, **kwargs):
self.resize_called = True
self.stub_out('nova.compute.api.API.resize', resize_mock)
self.controller._action_resize(self.req, FAKE_UUID, body=body)
self.assertTrue(self.resize_called)
def test_resize_server_no_flavor(self):
body = dict(resize=dict())
self.assertRaises(self.validation_error,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_server_no_flavor_ref(self):
body = dict(resize=dict(flavorRef=None))
self.assertRaises(self.validation_error,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_server_with_extra_arg(self):
body = dict(resize=dict(favorRef="http://localhost/3",
extra_arg="extra_arg"))
self.assertRaises(self.validation_error,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_server_invalid_flavor_ref(self):
body = dict(resize=dict(flavorRef=1.2))
self.assertRaises(self.validation_error,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_with_server_not_found(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
with mock.patch('nova.compute.api.API.get',
side_effect=exception.InstanceNotFound(
instance_id=FAKE_UUID)):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_with_image_exceptions(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.resize_called = 0
image_id = 'fake_image_id'
exceptions = [
(exception.ImageNotAuthorized(image_id=image_id),
webob.exc.HTTPUnauthorized),
(exception.ImageNotFound(image_id=image_id),
webob.exc.HTTPBadRequest),
(exception.Invalid, webob.exc.HTTPBadRequest),
(exception.AutoDiskConfigDisabledByImage(image=image_id),
webob.exc.HTTPBadRequest),
]
raised, expected = map(iter, zip(*exceptions))
def _fake_resize(obj, context, instance, flavor_id,
auto_disk_config=None):
self.resize_called += 1
raise next(raised)
self.stub_out('nova.compute.api.API.resize', _fake_resize)
for call_no in range(len(exceptions)):
next_exception = next(expected)
actual = self.assertRaises(next_exception,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
if (isinstance(exceptions[call_no][0],
exception.NoValidHost)):
self.assertEqual(actual.explanation,
'No valid host was found. Bad host')
elif (isinstance(exceptions[call_no][0],
exception.AutoDiskConfigDisabledByImage)):
self.assertEqual(actual.explanation,
'Requested image fake_image_id has automatic'
' disk resize disabled.')
self.assertEqual(self.resize_called, call_no + 1)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.CannotResizeDisk(reason=''))
def test_resize_raises_cannot_resize_disk(self, mock_resize):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.FlavorNotFound(reason='',
flavor_id='fake_id'))
def test_resize_raises_flavor_not_found(self, mock_resize):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_with_too_many_instances(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
def fake_resize(*args, **kwargs):
raise exception.TooManyInstances(message="TooManyInstance")
self.stub_out('nova.compute.api.API.resize', fake_resize)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_raises_conflict_on_invalid_state(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
def fake_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stub_out('nova.compute.api.API.resize', fake_resize)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
@mock.patch.object(compute_api.API, 'resize')
def test_resize_instance_raise_auto_disk_config_exc(self, mock_resize):
mock_resize.side_effect = exception.AutoDiskConfigDisabledByImage(
image='dummy')
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.PciRequestAliasNotDefined(
alias='fake_name'))
def test_resize_pci_alias_not_defined(self, mock_resize):
# Tests that PciRequestAliasNotDefined is translated to a 400 error.
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.ForbiddenWithAccelerators)
def test_resize_raises_badrequest_for_accelerator(self, mock_resize):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.OperationNotSupportedForVDPAInterface(
instance_uuid=FAKE_UUID, operation='foo'))
def test_resize_raises_badrequest_for_vdpaInterface(self, mock_resize):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_confirm_resize_server(self):
body = dict(confirmResize=None)
self.confirm_resize_called = False
def cr_mock(*args):
self.confirm_resize_called = True
self.stub_out('nova.compute.api.API.confirm_resize', cr_mock)
self.controller._action_confirm_resize(self.req, FAKE_UUID, body=body)
self.assertTrue(self.confirm_resize_called)
def test_confirm_resize_migration_not_found(self):
body = dict(confirmResize=None)
def confirm_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stub_out('nova.compute.api.API.confirm_resize',
confirm_resize_mock)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_confirm_resize,
self.req, FAKE_UUID, body=body)
def test_confirm_resize_raises_conflict_on_invalid_state(self):
body = dict(confirmResize=None)
def fake_confirm_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stub_out('nova.compute.api.API.confirm_resize',
fake_confirm_resize)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_confirm_resize,
self.req, FAKE_UUID, body=body)
def test_revert_resize_migration_not_found(self):
body = dict(revertResize=None)
def revert_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stub_out('nova.compute.api.API.revert_resize',
revert_resize_mock)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_revert_resize,
self.req, FAKE_UUID, body=body)
def test_revert_resize_server_not_found(self):
body = dict(revertResize=None)
with mock.patch('nova.compute.api.API.get',
side_effect=exception.InstanceNotFound(
instance_id='bad_server_id')):
self.assertRaises(webob. exc.HTTPNotFound,
self.controller._action_revert_resize,
self.req, "bad_server_id", body=body)
def test_revert_resize_server(self):
body = dict(revertResize=None)
self.revert_resize_called = False
def revert_mock(*args):
self.revert_resize_called = True
self.stub_out('nova.compute.api.API.revert_resize', revert_mock)
body = self.controller._action_revert_resize(self.req, FAKE_UUID,
body=body)
self.assertTrue(self.revert_resize_called)
def test_revert_resize_raises_conflict_on_invalid_state(self):
body = dict(revertResize=None)
def fake_revert_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stub_out('nova.compute.api.API.revert_resize',
fake_revert_resize)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_revert_resize,
self.req, FAKE_UUID, body=body)
def test_create_image(self):
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
response = self.controller._action_create_image(self.req, FAKE_UUID,
body=body)
location = response.headers['Location']
self.assertEqual(self.image_url + '123' if self.image_url else
self.image_api.generate_image_url('123', self.context),
location)
def test_create_image_v2_45(self):
"""Tests the createImage server action API with the 2.45 microversion
where there is a response body but no Location header.
"""
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
req = fakes.HTTPRequest.blank('', version='2.45')
response = self.controller._action_create_image(req, FAKE_UUID,
body=body)
self.assertIsInstance(response, dict)
self.assertEqual('123', response['image_id'])
def test_create_image_name_too_long(self):
long_name = 'a' * 260
body = {
'createImage': {
'name': long_name,
},
}
self.assertRaises(self.validation_error,
self.controller._action_create_image, self.req,
FAKE_UUID, body=body)
def _do_test_create_volume_backed_image(
self, extra_properties, mock_vol_create_side_effect=None):
def _fake_id(x):
return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
body = dict(createImage=dict(name='snapshot_of_volume_backed'))
if extra_properties:
body['createImage']['metadata'] = extra_properties
image_service = glance.get_default_image_service()
bdm = [dict(volume_id=_fake_id('a'),
volume_size=1,
device_name='vda',
delete_on_termination=False)]
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': _fake_id('a'),
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'vda',
'snapshot_id': 1,
'boot_index': 0,
'delete_on_termination': False,
'no_device': None})]
self.stub_out(
'nova.db.main.api.block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
system_metadata = dict(image_kernel_id=_fake_id('b'),
image_ramdisk_id=_fake_id('c'),
image_root_device_name='/dev/vda',
image_block_device_mapping=str(bdm),
image_container_format='ami')
instance = fakes.fake_compute_get(project_id=fakes.FAKE_PROJECT_ID,
image_ref=uuids.fake,
vm_state=vm_states.ACTIVE,
root_device_name='/dev/vda',
system_metadata=system_metadata)
self.stub_out('nova.compute.api.API.get', instance)
volume = dict(id=_fake_id('a'),
size=1,
host='fake',
display_description='fake')
snapshot = dict(id=_fake_id('d'))
with test.nested(
mock.patch.object(
self.controller.compute_api.volume_api, 'get_absolute_limits',
return_value={'totalSnapshotsUsed': 0,
'maxTotalSnapshots': 10}),
mock.patch.object(self.controller.compute_api.compute_rpcapi,
'quiesce_instance',
side_effect=exception.InstanceQuiesceNotSupported(
instance_id='fake', reason='test')),
mock.patch.object(self.controller.compute_api.volume_api, 'get',
return_value=volume),
mock.patch.object(self.controller.compute_api.volume_api,
'create_snapshot_force',
return_value=snapshot),
) as (mock_get_limits, mock_quiesce, mock_vol_get, mock_vol_create):
if mock_vol_create_side_effect:
mock_vol_create.side_effect = mock_vol_create_side_effect
response = self.controller._action_create_image(self.req,
FAKE_UUID, body=body)
location = response.headers['Location']
image_id = location.replace(self.image_url or
self.image_api.generate_image_url('', self.context),
'')
image = image_service.show(None, image_id)
self.assertEqual(image['name'], 'snapshot_of_volume_backed')
properties = image['properties']
self.assertEqual(properties['kernel_id'], _fake_id('b'))
self.assertEqual(properties['ramdisk_id'], _fake_id('c'))
self.assertEqual(properties['root_device_name'], '/dev/vda')
self.assertTrue(properties['bdm_v2'])
bdms = properties['block_device_mapping']
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['boot_index'], 0)
self.assertEqual(bdms[0]['source_type'], 'snapshot')
self.assertEqual(bdms[0]['destination_type'], 'volume')
self.assertEqual(bdms[0]['snapshot_id'], snapshot['id'])
self.assertEqual('/dev/vda', bdms[0]['device_name'])
for fld in ('connection_info', 'id', 'instance_uuid'):
self.assertNotIn(fld, bdms[0])
for k in extra_properties.keys():
self.assertEqual(properties[k], extra_properties[k])
mock_quiesce.assert_called_once_with(mock.ANY, mock.ANY)
mock_vol_get.assert_called_once_with(mock.ANY, volume['id'])
mock_vol_create.assert_called_once_with(mock.ANY, volume['id'],
mock.ANY, mock.ANY)
def test_create_volume_backed_image_no_metadata(self):
self._do_test_create_volume_backed_image({})
def test_create_volume_backed_image_with_metadata(self):
self._do_test_create_volume_backed_image(dict(ImageType='Gold',
ImageVersion='2.0'))
def test_create_volume_backed_image_cinder_over_quota(self):
self.assertRaises(
webob.exc.HTTPForbidden,
self._do_test_create_volume_backed_image, {},
mock_vol_create_side_effect=exception.OverQuota(
overs='snapshot'))
def _test_create_volume_backed_image_with_metadata_from_volume(
self, extra_metadata=None):
def _fake_id(x):
return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
body = dict(createImage=dict(name='snapshot_of_volume_backed'))
if extra_metadata:
body['createImage']['metadata'] = extra_metadata
image_service = glance.get_default_image_service()
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': _fake_id('a'),
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'vda',
'snapshot_id': 1,
'boot_index': 0,
'delete_on_termination': False,
'no_device': None})]
self.stub_out(
'nova.db.main.api.block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
instance = fakes.fake_compute_get(
project_id=fakes.FAKE_PROJECT_ID,
image_ref='',
vm_state=vm_states.ACTIVE,
root_device_name='/dev/vda',
system_metadata={'image_test_key1': 'test_value1',
'image_test_key2': 'test_value2'})
self.stub_out('nova.compute.api.API.get', instance)
volume = dict(id=_fake_id('a'),
size=1,
host='fake',
display_description='fake')
snapshot = dict(id=_fake_id('d'))
with test.nested(
mock.patch.object(
self.controller.compute_api.volume_api, 'get_absolute_limits',
return_value={'totalSnapshotsUsed': 0,
'maxTotalSnapshots': 10}),
mock.patch.object(self.controller.compute_api.compute_rpcapi,
'quiesce_instance',
side_effect=exception.InstanceQuiesceNotSupported(
instance_id='fake', reason='test')),
mock.patch.object(self.controller.compute_api.volume_api, 'get',
return_value=volume),
mock.patch.object(self.controller.compute_api.volume_api,
'create_snapshot_force',
return_value=snapshot),
) as (mock_get_limits, mock_quiesce, mock_vol_get, mock_vol_create):
response = self.controller._action_create_image(self.req,
FAKE_UUID, body=body)
location = response.headers['Location']
image_id = location.replace(self.image_base_url, '')
image = image_service.show(None, image_id)
properties = image['properties']
self.assertEqual(properties['test_key1'], 'test_value1')
self.assertEqual(properties['test_key2'], 'test_value2')
if extra_metadata:
for key, val in extra_metadata.items():
self.assertEqual(properties[key], val)
mock_quiesce.assert_called_once_with(mock.ANY, mock.ANY)
mock_vol_get.assert_called_once_with(mock.ANY, volume['id'])
mock_vol_create.assert_called_once_with(mock.ANY, volume['id'],
mock.ANY, mock.ANY)
def test_create_vol_backed_img_with_meta_from_vol_without_extra_meta(self):
self._test_create_volume_backed_image_with_metadata_from_volume()
def test_create_vol_backed_img_with_meta_from_vol_with_extra_meta(self):
self._test_create_volume_backed_image_with_metadata_from_volume(
extra_metadata={'a': 'b'})
def test_create_image_with_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {'key': 'asdf'},
},
}
response = self.controller._action_create_image(self.req, FAKE_UUID,
body=body)
location = response.headers['Location']
self.assertEqual(self.image_url + '123' if self.image_url else
self.image_api.generate_image_url('123', self.context), location)
def test_create_image_with_too_much_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {},
},
}
for num in range(CONF.quota.metadata_items + 1):
body['createImage']['metadata']['foo%i' % num] = "bar"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def test_create_image_no_name(self):
body = {
'createImage': {},
}
self.assertRaises(self.validation_error,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def test_create_image_blank_name(self):
body = {
'createImage': {
'name': '',
}
}
self.assertRaises(self.validation_error,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def test_create_image_bad_metadata(self):
body = {
'createImage': {
'name': 'geoff',
'metadata': 'henry',
},
}
self.assertRaises(self.validation_error,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def test_create_image_raises_conflict_on_invalid_state(self):
def snapshot(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stub_out('nova.compute.api.API.snapshot', snapshot)
body = {
"createImage": {
"name": "test_snapshot",
},
}
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
| |
# Copyright 2012 OpenStack LLC.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import sys
from quantumclient.quantum.v2_0.subnet import CreateSubnet
from quantumclient.quantum.v2_0.subnet import DeleteSubnet
from quantumclient.quantum.v2_0.subnet import ListSubnet
from quantumclient.quantum.v2_0.subnet import ShowSubnet
from quantumclient.quantum.v2_0.subnet import UpdateSubnet
from quantumclient.tests.unit.test_cli20 import CLITestV20Base
from quantumclient.tests.unit.test_cli20 import MyApp
class CLITestV20Subnet(CLITestV20Base):
def test_create_subnet(self):
"""Create subnet: --gateway gateway netid cidr."""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'cidrvalue'
gateway = 'gatewayvalue'
args = ['--gateway', gateway, netid, cidr]
position_names = ['ip_version', 'network_id', 'cidr', 'gateway_ip']
position_values = [4, netid, cidr, gateway]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_subnet_with_no_gateway(self):
"""Create subnet: --no-gateway netid cidr"""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'cidrvalue'
args = ['--no-gateway', netid, cidr]
position_names = ['ip_version', 'network_id', 'cidr', 'gateway_ip']
position_values = [4, netid, cidr, None]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_subnet_with_bad_gateway_option(self):
"""Create sbunet: --no-gateway netid cidr"""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'cidrvalue'
gateway = 'gatewayvalue'
args = ['--gateway', gateway, '--no-gateway', netid, cidr]
position_names = ['ip_version', 'network_id', 'cidr', 'gateway_ip']
position_values = [4, netid, cidr, None]
try:
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
except:
return
self.fail('No exception for bad gateway option')
def test_create_subnet_tenant(self):
"""Create subnet: --tenant_id tenantid netid cidr."""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid', netid, cidr]
position_names = ['ip_version', 'network_id', 'cidr']
position_values = [4, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_tags(self):
"""Create subnet: netid cidr --tags a b."""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = [netid, cidr, '--tags', 'a', 'b']
position_names = ['ip_version', 'network_id', 'cidr']
position_values = [4, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tags=['a', 'b'])
def test_create_subnet_allocation_pool(self):
"""Create subnet: --tenant_id tenantid <allocation_pool> netid cidr.
The <allocation_pool> is --allocation_pool start=1.1.1.10,end=1.1.1.20
"""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--allocation_pool', 'start=1.1.1.10,end=1.1.1.20',
netid, cidr]
position_names = ['ip_version', 'allocation_pools', 'network_id',
'cidr']
pool = [{'start': '1.1.1.10', 'end': '1.1.1.20'}]
position_values = [4, pool, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_allocation_pools(self):
"""Create subnet: --tenant-id tenantid <pools> netid cidr.
The <pools> are --allocation_pool start=1.1.1.10,end=1.1.1.20 and
--allocation_pool start=1.1.1.30,end=1.1.1.40
"""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--allocation_pool', 'start=1.1.1.10,end=1.1.1.20',
'--allocation_pool', 'start=1.1.1.30,end=1.1.1.40',
netid, cidr]
position_names = ['ip_version', 'allocation_pools', 'network_id',
'cidr']
pools = [{'start': '1.1.1.10', 'end': '1.1.1.20'},
{'start': '1.1.1.30', 'end': '1.1.1.40'}]
position_values = [4, pools, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_host_route(self):
"""Create subnet: --tenant_id tenantid <host_route> netid cidr.
The <host_route> is
--host-route destination=172.16.1.0/24,nexthop=1.1.1.20
"""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--host-route', 'destination=172.16.1.0/24,nexthop=1.1.1.20',
netid, cidr]
position_names = ['ip_version', 'host_routes', 'network_id',
'cidr']
route = [{'destination': '172.16.1.0/24', 'nexthop': '1.1.1.20'}]
position_values = [4, route, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_host_routes(self):
"""Create subnet: --tenant-id tenantid <host_routes> netid cidr.
The <host_routes> are
--host-route destination=172.16.1.0/24,nexthop=1.1.1.20 and
--host-route destination=172.17.7.0/24,nexthop=1.1.1.40
"""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--host-route', 'destination=172.16.1.0/24,nexthop=1.1.1.20',
'--host-route', 'destination=172.17.7.0/24,nexthop=1.1.1.40',
netid, cidr]
position_names = ['ip_version', 'host_routes', 'network_id',
'cidr']
routes = [{'destination': '172.16.1.0/24', 'nexthop': '1.1.1.20'},
{'destination': '172.17.7.0/24', 'nexthop': '1.1.1.40'}]
position_values = [4, routes, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_dns_nameservers(self):
"""Create subnet: --tenant-id tenantid <dns-nameservers> netid cidr.
The <dns-nameservers> are
--dns-nameserver 1.1.1.20 and --dns-nameserver 1.1.1.40
"""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--dns-nameserver', '1.1.1.20',
'--dns-nameserver', '1.1.1.40',
netid, cidr]
position_names = ['ip_version', 'dns_nameservers', 'network_id',
'cidr']
nameservers = ['1.1.1.20', '1.1.1.40']
position_values = [4, nameservers, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_with_disable_dhcp(self):
"""Create subnet: --tenant-id tenantid --disable-dhcp netid cidr."""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--disable-dhcp',
netid, cidr]
position_names = ['ip_version', 'enable_dhcp', 'network_id',
'cidr']
position_values = [4, False, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_merge_single_plurar(self):
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--allocation-pool', 'start=1.1.1.10,end=1.1.1.20',
netid, cidr,
'--allocation-pools', 'list=true', 'type=dict',
'start=1.1.1.30,end=1.1.1.40']
position_names = ['ip_version', 'allocation_pools', 'network_id',
'cidr']
pools = [{'start': '1.1.1.10', 'end': '1.1.1.20'},
{'start': '1.1.1.30', 'end': '1.1.1.40'}]
position_values = [4, pools, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_merge_plurar(self):
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
netid, cidr,
'--allocation-pools', 'list=true', 'type=dict',
'start=1.1.1.30,end=1.1.1.40']
position_names = ['ip_version', 'allocation_pools', 'network_id',
'cidr']
pools = [{'start': '1.1.1.30', 'end': '1.1.1.40'}]
position_values = [4, pools, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_merge_single_single(self):
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--allocation-pool', 'start=1.1.1.10,end=1.1.1.20',
netid, cidr,
'--allocation-pool',
'start=1.1.1.30,end=1.1.1.40']
position_names = ['ip_version', 'allocation_pools', 'network_id',
'cidr']
pools = [{'start': '1.1.1.10', 'end': '1.1.1.20'},
{'start': '1.1.1.30', 'end': '1.1.1.40'}]
position_values = [4, pools, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_list_subnets_detail(self):
"""List subnets: -D."""
resources = "subnets"
cmd = ListSubnet(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_subnets_tags(self):
"""List subnets: -- --tags a b."""
resources = "subnets"
cmd = ListSubnet(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, tags=['a', 'b'])
def test_list_subnets_detail_tags(self):
"""List subnets: -D -- --tags a b."""
resources = "subnets"
cmd = ListSubnet(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, detail=True, tags=['a', 'b'])
def test_list_subnets_fields(self):
"""List subnets: --fields a --fields b -- --fields c d."""
resources = "subnets"
cmd = ListSubnet(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def test_update_subnet(self):
"""Update subnet: myid --name myname --tags a b."""
resource = 'subnet'
cmd = UpdateSubnet(MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], }
)
def test_show_subnet(self):
"""Show subnet: --fields id --fields name myid."""
resource = 'subnet'
cmd = ShowSubnet(MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_delete_subnet(self):
"""Delete subnet: subnetid."""
resource = 'subnet'
cmd = DeleteSubnet(MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
| |
##################################################################
# Code for testing the variational Multi-Stage Generative Model. #
##################################################################
# basic python
import numpy as np
import numpy.random as npr
import cPickle
# theano business
import theano
import theano.tensor as T
# phil's sweetness
import utils
from NetLayers import relu_actfun, softplus_actfun, tanh_actfun
from InfNet import InfNet
from HydraNet import HydraNet
from GPSImputer import GPSImputer
from OneStageModel import OneStageModel
from load_data import load_udm, load_udm_ss, load_mnist, load_binarized_mnist, \
load_tfd, load_svhn_gray
from HelperFuncs import construct_masked_data, shift_and_scale_into_01, \
row_shuffle, to_fX
RESULT_PATH = "IMP_MNIST_VAE_500/"
###############################
###############################
## TEST GPS IMPUTER ON MNIST ##
###############################
###############################
def test_mnist(occ_dim=15, drop_prob=0.0):
#########################################
# Format the result tag more thoroughly #
#########################################
dp_int = int(100.0 * drop_prob)
result_tag = "{}VAE_OD{}_DP{}".format(RESULT_PATH, occ_dim, dp_int)
##########################
# Get some training data #
##########################
rng = np.random.RandomState(1234)
dataset = 'data/mnist.pkl.gz'
datasets = load_udm(dataset, as_shared=False, zero_mean=False)
Xtr = datasets[0][0]
Xva = datasets[1][0]
Xtr = to_fX(shift_and_scale_into_01(Xtr))
Xva = to_fX(shift_and_scale_into_01(Xva))
tr_samples = Xtr.shape[0]
va_samples = Xva.shape[0]
batch_size = 200
batch_reps = 1
all_pix_mean = np.mean(np.mean(Xtr, axis=1))
data_mean = to_fX(all_pix_mean * np.ones((Xtr.shape[1],)))
############################################################
# Setup some parameters for the Iterative Refinement Model #
############################################################
obs_dim = Xtr.shape[1]
z_dim = 100
imp_steps = 15 # we'll check for the best step count (found oracularly)
init_scale = 1.0
x_in_sym = T.matrix('x_in_sym')
x_out_sym = T.matrix('x_out_sym')
x_mask_sym = T.matrix('x_mask_sym')
#################
# p_zi_given_xi #
#################
params = {}
shared_config = [obs_dim, 500, 500]
top_config = [shared_config[-1], z_dim]
params['shared_config'] = shared_config
params['mu_config'] = top_config
params['sigma_config'] = top_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['lam_l2a'] = 0.0
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_zi_given_xi = InfNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_zi_given_xi.init_biases(0.2)
###################
# p_xip1_given_zi #
###################
params = {}
shared_config = [z_dim, 500, 500]
output_config = [obs_dim, obs_dim]
params['shared_config'] = shared_config
params['output_config'] = output_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['lam_l2a'] = 0.0
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_xip1_given_zi = HydraNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_xip1_given_zi.init_biases(0.2)
###################
# q_zi_given_x_xi #
###################
params = {}
shared_config = [(obs_dim + obs_dim), 500, 500]
top_config = [shared_config[-1], z_dim]
params['shared_config'] = shared_config
params['mu_config'] = top_config
params['sigma_config'] = top_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['lam_l2a'] = 0.0
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
q_zi_given_x_xi = InfNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
q_zi_given_x_xi.init_biases(0.2)
###########################################################
# Define parameters for the GPSImputer, and initialize it #
###########################################################
print("Building the GPSImputer...")
gpsi_params = {}
gpsi_params['obs_dim'] = obs_dim
gpsi_params['z_dim'] = z_dim
gpsi_params['imp_steps'] = imp_steps
gpsi_params['step_type'] = 'jump'
gpsi_params['x_type'] = 'bernoulli'
gpsi_params['obs_transform'] = 'sigmoid'
gpsi_params['use_osm_mode'] = True
GPSI = GPSImputer(rng=rng,
x_in=x_in_sym, x_out=x_out_sym, x_mask=x_mask_sym, \
p_zi_given_xi=p_zi_given_xi, \
p_xip1_given_zi=p_xip1_given_zi, \
q_zi_given_x_xi=q_zi_given_x_xi, \
params=gpsi_params, \
shared_param_dicts=None)
#########################################################################
# Define parameters for the underlying OneStageModel, and initialize it #
#########################################################################
print("Building the OneStageModel...")
osm_params = {}
osm_params['x_type'] = 'bernoulli'
osm_params['xt_transform'] = 'sigmoid'
OSM = OneStageModel(rng=rng, \
x_in=x_in_sym, \
p_x_given_z=p_xip1_given_zi, \
q_z_given_x=p_zi_given_xi, \
x_dim=obs_dim, z_dim=z_dim, \
params=osm_params)
################################################################
# Apply some updates, to check that they aren't totally broken #
################################################################
log_name = "{}_RESULTS.txt".format(result_tag)
out_file = open(log_name, 'wb')
costs = [0. for i in range(10)]
learn_rate = 0.0002
momentum = 0.5
batch_idx = np.arange(batch_size) + tr_samples
for i in range(200000):
scale = min(1.0, ((i+1) / 5000.0))
if (((i + 1) % 15000) == 0):
learn_rate = learn_rate * 0.92
if (i > 10000):
momentum = 0.90
else:
momentum = 0.50
# get the indices of training samples for this batch update
batch_idx += batch_size
if (np.max(batch_idx) >= tr_samples):
# we finished an "epoch", so we rejumble the training set
Xtr = row_shuffle(Xtr)
batch_idx = np.arange(batch_size)
# set sgd and objective function hyperparams for this update
OSM.set_sgd_params(lr=scale*learn_rate, \
mom_1=scale*momentum, mom_2=0.99)
OSM.set_lam_nll(lam_nll=1.0)
OSM.set_lam_kld(lam_kld_1=1.0, lam_kld_2=0.0)
OSM.set_lam_l2w(1e-4)
# perform a minibatch update and record the cost for this batch
xb = to_fX( Xtr.take(batch_idx, axis=0) )
result = OSM.train_joint(xb, batch_reps)
costs = [(costs[j] + result[j]) for j in range(len(result)-1)]
if ((i % 250) == 0):
costs = [(v / 250.0) for v in costs]
str1 = "-- batch {0:d} --".format(i)
str2 = " joint_cost: {0:.4f}".format(costs[0])
str3 = " nll_cost : {0:.4f}".format(costs[1])
str4 = " kld_cost : {0:.4f}".format(costs[2])
str5 = " reg_cost : {0:.4f}".format(costs[3])
joint_str = "\n".join([str1, str2, str3, str4, str5])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
costs = [0.0 for v in costs]
if ((i % 1000) == 0):
Xva = row_shuffle(Xva)
# record an estimate of performance on the test set
xi, xo, xm = construct_masked_data(Xva[0:5000], drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
step_nll, step_kld = GPSI.compute_per_step_cost(xi, xo, xm, sample_count=10)
min_nll = np.min(step_nll)
str1 = " va_nll_bound : {}".format(min_nll)
str2 = " va_nll_min : {}".format(min_nll)
str3 = " va_nll_final : {}".format(step_nll[-1])
joint_str = "\n".join([str1, str2, str3])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
if ((i % 5000) == 0):
# Get some validation samples for evaluating model performance
xb = to_fX( Xva[0:100] )
xi, xo, xm = construct_masked_data(xb, drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
xi = np.repeat(xi, 2, axis=0)
xo = np.repeat(xo, 2, axis=0)
xm = np.repeat(xm, 2, axis=0)
# draw some sample imputations from the model
samp_count = xi.shape[0]
_, model_samps = GPSI.sample_imputer(xi, xo, xm, use_guide_policy=False)
seq_len = len(model_samps)
seq_samps = np.zeros((seq_len*samp_count, model_samps[0].shape[1]))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
seq_samps[idx] = model_samps[s2][s1]
idx += 1
file_name = "{}_samples_ng_b{}.png".format(result_tag, i)
utils.visualize_samples(seq_samps, file_name, num_rows=20)
# get visualizations of policy parameters
file_name = "{}_gen_gen_weights_b{}.png".format(result_tag, i)
W = GPSI.gen_gen_weights.get_value(borrow=False)
utils.visualize_samples(W[:,:obs_dim], file_name, num_rows=20)
file_name = "{}_gen_inf_weights_b{}.png".format(result_tag, i)
W = GPSI.gen_inf_weights.get_value(borrow=False).T
utils.visualize_samples(W[:,:obs_dim], file_name, num_rows=20)
#############################
#############################
## TEST GPS IMPUTER ON TFD ##
#############################
#############################
def test_tfd(occ_dim=15, drop_prob=0.0):
RESULT_PATH = "IMP_TFD_VAE/"
#########################################
# Format the result tag more thoroughly #
#########################################
dp_int = int(100.0 * drop_prob)
result_tag = "{}VAE_OD{}_DP{}".format(RESULT_PATH, occ_dim, dp_int)
##########################
# Get some training data #
##########################
data_file = 'data/tfd_data_48x48.pkl'
dataset = load_tfd(tfd_pkl_name=data_file, which_set='unlabeled', fold='all')
Xtr_unlabeled = dataset[0]
dataset = load_tfd(tfd_pkl_name=data_file, which_set='train', fold='all')
Xtr_train = dataset[0]
Xtr = np.vstack([Xtr_unlabeled, Xtr_train])
dataset = load_tfd(tfd_pkl_name=data_file, which_set='valid', fold='all')
Xva = dataset[0]
Xtr = to_fX(shift_and_scale_into_01(Xtr))
Xva = to_fX(shift_and_scale_into_01(Xva))
tr_samples = Xtr.shape[0]
va_samples = Xva.shape[0]
batch_size = 250
all_pix_mean = np.mean(np.mean(Xtr, axis=1))
data_mean = to_fX( all_pix_mean * np.ones((Xtr.shape[1],)) )
############################################################
# Setup some parameters for the Iterative Refinement Model #
############################################################
obs_dim = Xtr.shape[1]
z_dim = 100
imp_steps = 15 # we'll check for the best step count (found oracularly)
init_scale = 1.0
x_in_sym = T.matrix('x_in_sym')
x_out_sym = T.matrix('x_out_sym')
x_mask_sym = T.matrix('x_mask_sym')
#################
# p_zi_given_xi #
#################
params = {}
shared_config = [obs_dim, 1000, 1000]
top_config = [shared_config[-1], z_dim]
params['shared_config'] = shared_config
params['mu_config'] = top_config
params['sigma_config'] = top_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['lam_l2a'] = 0.0
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_zi_given_xi = InfNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_zi_given_xi.init_biases(0.2)
###################
# p_xip1_given_zi #
###################
params = {}
shared_config = [z_dim, 1000, 1000]
output_config = [obs_dim, obs_dim]
params['shared_config'] = shared_config
params['output_config'] = output_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['lam_l2a'] = 0.0
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_xip1_given_zi = HydraNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_xip1_given_zi.init_biases(0.2)
###################
# q_zi_given_x_xi #
###################
params = {}
shared_config = [(obs_dim + obs_dim), 1000, 1000]
top_config = [shared_config[-1], z_dim]
params['shared_config'] = shared_config
params['mu_config'] = top_config
params['sigma_config'] = top_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['lam_l2a'] = 0.0
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
q_zi_given_x_xi = InfNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
q_zi_given_x_xi.init_biases(0.2)
###########################################################
# Define parameters for the GPSImputer, and initialize it #
###########################################################
print("Building the GPSImputer...")
gpsi_params = {}
gpsi_params['obs_dim'] = obs_dim
gpsi_params['z_dim'] = z_dim
gpsi_params['imp_steps'] = imp_steps
gpsi_params['step_type'] = 'jump'
gpsi_params['x_type'] = 'bernoulli'
gpsi_params['obs_transform'] = 'sigmoid'
gpsi_params['use_osm_mode'] = True
GPSI = GPSImputer(rng=rng,
x_in=x_in_sym, x_out=x_out_sym, x_mask=x_mask_sym, \
p_zi_given_xi=p_zi_given_xi, \
p_xip1_given_zi=p_xip1_given_zi, \
q_zi_given_x_xi=q_zi_given_x_xi, \
params=gpsi_params, \
shared_param_dicts=None)
#########################################################################
# Define parameters for the underlying OneStageModel, and initialize it #
#########################################################################
print("Building the OneStageModel...")
osm_params = {}
osm_params['x_type'] = 'bernoulli'
osm_params['xt_transform'] = 'sigmoid'
OSM = OneStageModel(rng=rng, \
x_in=x_in_sym, \
p_x_given_z=p_xip1_given_zi, \
q_z_given_x=p_zi_given_xi, \
x_dim=obs_dim, z_dim=z_dim, \
params=osm_params)
################################################################
# Apply some updates, to check that they aren't totally broken #
################################################################
log_name = "{}_RESULTS.txt".format(result_tag)
out_file = open(log_name, 'wb')
costs = [0. for i in range(10)]
learn_rate = 0.0002
momentum = 0.5
batch_idx = np.arange(batch_size) + tr_samples
for i in range(200005):
scale = min(1.0, ((i+1) / 5000.0))
if (((i + 1) % 15000) == 0):
learn_rate = learn_rate * 0.92
if (i > 10000):
momentum = 0.90
else:
momentum = 0.50
# get the indices of training samples for this batch update
batch_idx += batch_size
if (np.max(batch_idx) >= tr_samples):
# we finished an "epoch", so we rejumble the training set
Xtr = row_shuffle(Xtr)
batch_idx = np.arange(batch_size)
# set sgd and objective function hyperparams for this update
OSM.set_sgd_params(lr=scale*learn_rate, \
mom_1=scale*momentum, mom_2=0.99)
OSM.set_lam_nll(lam_nll=1.0)
OSM.set_lam_kld(lam_kld_1=1.0, lam_kld_2=0.0)
OSM.set_lam_l2w(1e-4)
# perform a minibatch update and record the cost for this batch
xb = to_fX( Xtr.take(batch_idx, axis=0) )
result = OSM.train_joint(xb, batch_reps)
costs = [(costs[j] + result[j]) for j in range(len(result)-1)]
if ((i % 250) == 0):
costs = [(v / 250.0) for v in costs]
str1 = "-- batch {0:d} --".format(i)
str2 = " joint_cost: {0:.4f}".format(costs[0])
str3 = " nll_cost : {0:.4f}".format(costs[1])
str4 = " kld_cost : {0:.4f}".format(costs[2])
str5 = " reg_cost : {0:.4f}".format(costs[3])
joint_str = "\n".join([str1, str2, str3, str4, str5])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
costs = [0.0 for v in costs]
if ((i % 1000) == 0):
Xva = row_shuffle(Xva)
# record an estimate of performance on the test set
xi, xo, xm = construct_masked_data(Xva[0:5000], drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
step_nll, step_kld = GPSI.compute_per_step_cost(xi, xo, xm, sample_count=10)
min_nll = np.min(step_nll)
str1 = " va_nll_bound : {}".format(min_nll)
str2 = " va_nll_min : {}".format(min_nll)
str3 = " va_nll_final : {}".format(step_nll[-1])
joint_str = "\n".join([str1, str2, str3])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
if ((i % 10000) == 0):
# Get some validation samples for evaluating model performance
xb = to_fX( Xva[0:100] )
xi, xo, xm = construct_masked_data(xb, drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
xi = np.repeat(xi, 2, axis=0)
xo = np.repeat(xo, 2, axis=0)
xm = np.repeat(xm, 2, axis=0)
# draw some sample imputations from the model
samp_count = xi.shape[0]
_, model_samps = GPSI.sample_imputer(xi, xo, xm, use_guide_policy=False)
seq_len = len(model_samps)
seq_samps = np.zeros((seq_len*samp_count, model_samps[0].shape[1]))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
seq_samps[idx] = model_samps[s2][s1]
idx += 1
file_name = "{}_samples_ng_b{}.png".format(result_tag, i)
utils.visualize_samples(seq_samps, file_name, num_rows=20)
# get visualizations of policy parameters
file_name = "{}_gen_gen_weights_b{}.png".format(result_tag, i)
W = GPSI.gen_gen_weights.get_value(borrow=False)
utils.visualize_samples(W[:,:obs_dim], file_name, num_rows=20)
file_name = "{}_gen_inf_weights_b{}.png".format(result_tag, i)
W = GPSI.gen_inf_weights.get_value(borrow=False).T
utils.visualize_samples(W[:,:obs_dim], file_name, num_rows=20)
##############################
##############################
## TEST GPS IMPUTER ON SVHN ##
##############################
##############################
def test_svhn(occ_dim=15, drop_prob=0.0):
RESULT_PATH = "IMP_SVHN_VAE/"
#########################################
# Format the result tag more thoroughly #
#########################################
dp_int = int(100.0 * drop_prob)
result_tag = "{}VAE_OD{}_DP{}".format(RESULT_PATH, occ_dim, dp_int)
##########################
# Get some training data #
##########################
tr_file = 'data/svhn_train_gray.pkl'
te_file = 'data/svhn_test_gray.pkl'
ex_file = 'data/svhn_extra_gray.pkl'
data = load_svhn_gray(tr_file, te_file, ex_file=ex_file, ex_count=200000)
Xtr = to_fX( shift_and_scale_into_01(np.vstack([data['Xtr'], data['Xex']])) )
Xva = to_fX( shift_and_scale_into_01(data['Xte']) )
tr_samples = Xtr.shape[0]
va_samples = Xva.shape[0]
batch_size = 250
all_pix_mean = np.mean(np.mean(Xtr, axis=1))
data_mean = to_fX( all_pix_mean * np.ones((Xtr.shape[1],)) )
############################################################
# Setup some parameters for the Iterative Refinement Model #
############################################################
obs_dim = Xtr.shape[1]
z_dim = 100
imp_steps = 15 # we'll check for the best step count (found oracularly)
init_scale = 1.0
x_in_sym = T.matrix('x_in_sym')
x_out_sym = T.matrix('x_out_sym')
x_mask_sym = T.matrix('x_mask_sym')
#################
# p_zi_given_xi #
#################
params = {}
shared_config = [obs_dim, 1000, 1000]
top_config = [shared_config[-1], z_dim]
params['shared_config'] = shared_config
params['mu_config'] = top_config
params['sigma_config'] = top_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['lam_l2a'] = 0.0
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_zi_given_xi = InfNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_zi_given_xi.init_biases(0.2)
###################
# p_xip1_given_zi #
###################
params = {}
shared_config = [z_dim, 1000, 1000]
output_config = [obs_dim, obs_dim]
params['shared_config'] = shared_config
params['output_config'] = output_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['lam_l2a'] = 0.0
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_xip1_given_zi = HydraNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_xip1_given_zi.init_biases(0.2)
###################
# q_zi_given_x_xi #
###################
params = {}
shared_config = [(obs_dim + obs_dim), 1000, 1000]
top_config = [shared_config[-1], z_dim]
params['shared_config'] = shared_config
params['mu_config'] = top_config
params['sigma_config'] = top_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['lam_l2a'] = 0.0
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
q_zi_given_x_xi = InfNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
q_zi_given_x_xi.init_biases(0.2)
###########################################################
# Define parameters for the GPSImputer, and initialize it #
###########################################################
print("Building the GPSImputer...")
gpsi_params = {}
gpsi_params['obs_dim'] = obs_dim
gpsi_params['z_dim'] = z_dim
gpsi_params['imp_steps'] = imp_steps
gpsi_params['step_type'] = 'jump'
gpsi_params['x_type'] = 'bernoulli'
gpsi_params['obs_transform'] = 'sigmoid'
gpsi_params['use_osm_mode'] = True
GPSI = GPSImputer(rng=rng,
x_in=x_in_sym, x_out=x_out_sym, x_mask=x_mask_sym, \
p_zi_given_xi=p_zi_given_xi, \
p_xip1_given_zi=p_xip1_given_zi, \
q_zi_given_x_xi=q_zi_given_x_xi, \
params=gpsi_params, \
shared_param_dicts=None)
#########################################################################
# Define parameters for the underlying OneStageModel, and initialize it #
#########################################################################
print("Building the OneStageModel...")
osm_params = {}
osm_params['x_type'] = 'bernoulli'
osm_params['xt_transform'] = 'sigmoid'
OSM = OneStageModel(rng=rng, \
x_in=x_in_sym, \
p_x_given_z=p_xip1_given_zi, \
q_z_given_x=p_zi_given_xi, \
x_dim=obs_dim, z_dim=z_dim, \
params=osm_params)
################################################################
# Apply some updates, to check that they aren't totally broken #
################################################################
log_name = "{}_RESULTS.txt".format(result_tag)
out_file = open(log_name, 'wb')
costs = [0. for i in range(10)]
learn_rate = 0.0002
momentum = 0.5
batch_idx = np.arange(batch_size) + tr_samples
for i in range(200005):
scale = min(1.0, ((i+1) / 5000.0))
if (((i + 1) % 15000) == 0):
learn_rate = learn_rate * 0.92
if (i > 10000):
momentum = 0.90
else:
momentum = 0.50
# get the indices of training samples for this batch update
batch_idx += batch_size
if (np.max(batch_idx) >= tr_samples):
# we finished an "epoch", so we rejumble the training set
Xtr = row_shuffle(Xtr)
batch_idx = np.arange(batch_size)
# set sgd and objective function hyperparams for this update
OSM.set_sgd_params(lr=scale*learn_rate, \
mom_1=scale*momentum, mom_2=0.99)
OSM.set_lam_nll(lam_nll=1.0)
OSM.set_lam_kld(lam_kld_1=1.0, lam_kld_2=0.0)
OSM.set_lam_l2w(1e-4)
# perform a minibatch update and record the cost for this batch
xb = to_fX( Xtr.take(batch_idx, axis=0) )
result = OSM.train_joint(xb, batch_reps)
costs = [(costs[j] + result[j]) for j in range(len(result)-1)]
if ((i % 250) == 0):
costs = [(v / 250.0) for v in costs]
str1 = "-- batch {0:d} --".format(i)
str2 = " joint_cost: {0:.4f}".format(costs[0])
str3 = " nll_cost : {0:.4f}".format(costs[1])
str4 = " kld_cost : {0:.4f}".format(costs[2])
str5 = " reg_cost : {0:.4f}".format(costs[3])
joint_str = "\n".join([str1, str2, str3, str4, str5])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
costs = [0.0 for v in costs]
if ((i % 1000) == 0):
Xva = row_shuffle(Xva)
# record an estimate of performance on the test set
xi, xo, xm = construct_masked_data(Xva[0:5000], drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
step_nll, step_kld = GPSI.compute_per_step_cost(xi, xo, xm, sample_count=10)
min_nll = np.min(step_nll)
str1 = " va_nll_bound : {}".format(min_nll)
str2 = " va_nll_min : {}".format(min_nll)
str3 = " va_nll_final : {}".format(step_nll[-1])
joint_str = "\n".join([str1, str2, str3])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
if ((i % 10000) == 0):
# Get some validation samples for evaluating model performance
xb = to_fX( Xva[0:100] )
xi, xo, xm = construct_masked_data(xb, drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
xi = np.repeat(xi, 2, axis=0)
xo = np.repeat(xo, 2, axis=0)
xm = np.repeat(xm, 2, axis=0)
# draw some sample imputations from the model
samp_count = xi.shape[0]
_, model_samps = GPSI.sample_imputer(xi, xo, xm, use_guide_policy=False)
seq_len = len(model_samps)
seq_samps = np.zeros((seq_len*samp_count, model_samps[0].shape[1]))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
seq_samps[idx] = model_samps[s2][s1]
idx += 1
file_name = "{}_samples_ng_b{}.png".format(result_tag, i)
utils.visualize_samples(seq_samps, file_name, num_rows=20)
# get visualizations of policy parameters
file_name = "{}_gen_gen_weights_b{}.png".format(result_tag, i)
W = GPSI.gen_gen_weights.get_value(borrow=False)
utils.visualize_samples(W[:,:obs_dim], file_name, num_rows=20)
file_name = "{}_gen_inf_weights_b{}.png".format(result_tag, i)
W = GPSI.gen_inf_weights.get_value(borrow=False).T
utils.visualize_samples(W[:,:obs_dim], file_name, num_rows=20)
if __name__=="__main__":
#########
# MNIST #
#########
#test_mnist(occ_dim=0, drop_prob=0.6)
#test_mnist(occ_dim=0, drop_prob=0.7)
#test_mnist(occ_dim=0, drop_prob=0.8)
#test_mnist(occ_dim=0, drop_prob=0.9)
#test_mnist(occ_dim=14, drop_prob=0.0)
#test_mnist(occ_dim=16, drop_prob=0.0)
#######
# TFD #
#######
#test_tfd(occ_dim=25, drop_prob=0.0)
#test_tfd(occ_dim=25, drop_prob=0.8)
########
# SVHN #
########
#test_svhn(occ_dim=17, drop_prob=0.0)
#test_svhn(occ_dim=17, drop_prob=0.8)
| |
#!/usr/bin/env python
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run xDS integration tests on GCP using Traffic Director."""
import argparse
import googleapiclient.discovery
import grpc
import logging
import os
import random
import shlex
import socket
import subprocess
import sys
import tempfile
import time
from oauth2client.client import GoogleCredentials
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
from src.proto.grpc.testing import empty_pb2
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import test_pb2_grpc
logger = logging.getLogger()
console_handler = logging.StreamHandler()
formatter = logging.Formatter(fmt='%(asctime)s: %(levelname)-8s %(message)s')
console_handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(console_handler)
logger.setLevel(logging.WARNING)
_TEST_CASES = [
'backends_restart',
'change_backend_service',
'gentle_failover',
'new_instance_group_receives_traffic',
'ping_pong',
'remove_instance_group',
'round_robin',
'secondary_locality_gets_no_requests_on_partial_primary_failure',
'secondary_locality_gets_requests_on_primary_failure',
'traffic_splitting',
]
# Valid test cases, but not in all. So the tests can only run manually, and
# aren't enabled automatically for all languages.
#
# TODO: Move them into _TEST_CASES when support is ready in all languages.
_ADDITIONAL_TEST_CASES = ['path_matching', 'header_matching']
def parse_test_cases(arg):
if arg == '':
return []
arg_split = arg.split(',')
test_cases = set()
all_test_cases = _TEST_CASES + _ADDITIONAL_TEST_CASES
for arg in arg_split:
if arg == "all":
test_cases = test_cases.union(_TEST_CASES)
else:
test_cases = test_cases.union([arg])
if not all([test_case in all_test_cases for test_case in test_cases]):
raise Exception('Failed to parse test cases %s' % arg)
# Perserve order.
return [x for x in all_test_cases if x in test_cases]
def parse_port_range(port_arg):
try:
port = int(port_arg)
return range(port, port + 1)
except:
port_min, port_max = port_arg.split(':')
return range(int(port_min), int(port_max) + 1)
argp = argparse.ArgumentParser(description='Run xDS interop tests on GCP')
argp.add_argument('--project_id', help='GCP project id')
argp.add_argument(
'--gcp_suffix',
default='',
help='Optional suffix for all generated GCP resource names. Useful to '
'ensure distinct names across test runs.')
argp.add_argument(
'--test_case',
default='ping_pong',
type=parse_test_cases,
help='Comma-separated list of test cases to run. Available tests: %s, '
'(or \'all\' to run every test). '
'Alternative tests not included in \'all\': %s' %
(','.join(_TEST_CASES), ','.join(_ADDITIONAL_TEST_CASES)))
argp.add_argument(
'--bootstrap_file',
default='',
help='File to reference via GRPC_XDS_BOOTSTRAP. Disables built-in '
'bootstrap generation')
argp.add_argument(
'--client_cmd',
default=None,
help='Command to launch xDS test client. {server_uri}, {stats_port} and '
'{qps} references will be replaced using str.format(). GRPC_XDS_BOOTSTRAP '
'will be set for the command')
argp.add_argument('--zone', default='us-central1-a')
argp.add_argument('--secondary_zone',
default='us-west1-b',
help='Zone to use for secondary TD locality tests')
argp.add_argument('--qps', default=100, type=int, help='Client QPS')
argp.add_argument(
'--wait_for_backend_sec',
default=1200,
type=int,
help='Time limit for waiting for created backend services to report '
'healthy when launching or updated GCP resources')
argp.add_argument(
'--use_existing_gcp_resources',
default=False,
action='store_true',
help=
'If set, find and use already created GCP resources instead of creating new'
' ones.')
argp.add_argument(
'--keep_gcp_resources',
default=False,
action='store_true',
help=
'Leave GCP VMs and configuration running after test. Default behavior is '
'to delete when tests complete.')
argp.add_argument(
'--compute_discovery_document',
default=None,
type=str,
help=
'If provided, uses this file instead of retrieving via the GCP discovery '
'API')
argp.add_argument(
'--alpha_compute_discovery_document',
default=None,
type=str,
help='If provided, uses this file instead of retrieving via the alpha GCP '
'discovery API')
argp.add_argument('--network',
default='global/networks/default',
help='GCP network to use')
argp.add_argument('--service_port_range',
default='8080:8110',
type=parse_port_range,
help='Listening port for created gRPC backends. Specified as '
'either a single int or as a range in the format min:max, in '
'which case an available port p will be chosen s.t. min <= p '
'<= max')
argp.add_argument(
'--stats_port',
default=8079,
type=int,
help='Local port for the client process to expose the LB stats service')
argp.add_argument('--xds_server',
default='trafficdirector.googleapis.com:443',
help='xDS server')
argp.add_argument('--source_image',
default='projects/debian-cloud/global/images/family/debian-9',
help='Source image for VMs created during the test')
argp.add_argument('--path_to_server_binary',
default=None,
type=str,
help='If set, the server binary must already be pre-built on '
'the specified source image')
argp.add_argument('--machine_type',
default='e2-standard-2',
help='Machine type for VMs created during the test')
argp.add_argument(
'--instance_group_size',
default=2,
type=int,
help='Number of VMs to create per instance group. Certain test cases (e.g., '
'round_robin) may not give meaningful results if this is set to a value '
'less than 2.')
argp.add_argument('--verbose',
help='verbose log output',
default=False,
action='store_true')
# TODO(ericgribkoff) Remove this param once the sponge-formatted log files are
# visible in all test environments.
argp.add_argument('--log_client_output',
help='Log captured client output',
default=False,
action='store_true')
# TODO(ericgribkoff) Remove this flag once all test environments are verified to
# have access to the alpha compute APIs.
argp.add_argument('--only_stable_gcp_apis',
help='Do not use alpha compute APIs. Some tests may be '
'incompatible with this option (gRPC health checks are '
'currently alpha and required for simulating server failure',
default=False,
action='store_true')
args = argp.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
_DEFAULT_SERVICE_PORT = 80
_WAIT_FOR_BACKEND_SEC = args.wait_for_backend_sec
_WAIT_FOR_OPERATION_SEC = 300
_INSTANCE_GROUP_SIZE = args.instance_group_size
_NUM_TEST_RPCS = 10 * args.qps
_WAIT_FOR_STATS_SEC = 180
_WAIT_FOR_VALID_CONFIG_SEC = 60
_WAIT_FOR_URL_MAP_PATCH_SEC = 300
_CONNECTION_TIMEOUT_SEC = 60
_GCP_API_RETRIES = 5
_BOOTSTRAP_TEMPLATE = """
{{
"node": {{
"id": "{node_id}",
"metadata": {{
"TRAFFICDIRECTOR_NETWORK_NAME": "%s"
}},
"locality": {{
"zone": "%s"
}}
}},
"xds_servers": [{{
"server_uri": "%s",
"channel_creds": [
{{
"type": "google_default",
"config": {{}}
}}
]
}}]
}}""" % (args.network.split('/')[-1], args.zone, args.xds_server)
# TODO(ericgribkoff) Add change_backend_service to this list once TD no longer
# sends an update with no localities when adding the MIG to the backend service
# can race with the URL map patch.
_TESTS_TO_FAIL_ON_RPC_FAILURE = [
'new_instance_group_receives_traffic', 'ping_pong', 'round_robin'
]
# Tests that run UnaryCall and EmptyCall.
_TESTS_TO_RUN_MULTIPLE_RPCS = ['path_matching', 'header_matching']
# Tests that make UnaryCall with test metadata.
_TESTS_TO_SEND_METADATA = ['header_matching']
_TEST_METADATA_KEY = 'xds_md'
_TEST_METADATA_VALUE = 'exact_match'
_PATH_MATCHER_NAME = 'path-matcher'
_BASE_TEMPLATE_NAME = 'test-template'
_BASE_INSTANCE_GROUP_NAME = 'test-ig'
_BASE_HEALTH_CHECK_NAME = 'test-hc'
_BASE_FIREWALL_RULE_NAME = 'test-fw-rule'
_BASE_BACKEND_SERVICE_NAME = 'test-backend-service'
_BASE_URL_MAP_NAME = 'test-map'
_BASE_SERVICE_HOST = 'grpc-test'
_BASE_TARGET_PROXY_NAME = 'test-target-proxy'
_BASE_FORWARDING_RULE_NAME = 'test-forwarding-rule'
_TEST_LOG_BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../reports')
_SPONGE_LOG_NAME = 'sponge_log.log'
_SPONGE_XML_NAME = 'sponge_log.xml'
def get_client_stats(num_rpcs, timeout_sec):
with grpc.insecure_channel('localhost:%d' % args.stats_port) as channel:
stub = test_pb2_grpc.LoadBalancerStatsServiceStub(channel)
request = messages_pb2.LoadBalancerStatsRequest()
request.num_rpcs = num_rpcs
request.timeout_sec = timeout_sec
rpc_timeout = timeout_sec + _CONNECTION_TIMEOUT_SEC
response = stub.GetClientStats(request,
wait_for_ready=True,
timeout=rpc_timeout)
logger.debug('Invoked GetClientStats RPC: %s', response)
return response
class RpcDistributionError(Exception):
pass
def _verify_rpcs_to_given_backends(backends, timeout_sec, num_rpcs,
allow_failures):
start_time = time.time()
error_msg = None
logger.debug('Waiting for %d sec until backends %s receive load' %
(timeout_sec, backends))
while time.time() - start_time <= timeout_sec:
error_msg = None
stats = get_client_stats(num_rpcs, timeout_sec)
rpcs_by_peer = stats.rpcs_by_peer
for backend in backends:
if backend not in rpcs_by_peer:
error_msg = 'Backend %s did not receive load' % backend
break
if not error_msg and len(rpcs_by_peer) > len(backends):
error_msg = 'Unexpected backend received load: %s' % rpcs_by_peer
if not allow_failures and stats.num_failures > 0:
error_msg = '%d RPCs failed' % stats.num_failures
if not error_msg:
return
raise RpcDistributionError(error_msg)
def wait_until_all_rpcs_go_to_given_backends_or_fail(backends,
timeout_sec,
num_rpcs=_NUM_TEST_RPCS):
_verify_rpcs_to_given_backends(backends,
timeout_sec,
num_rpcs,
allow_failures=True)
def wait_until_all_rpcs_go_to_given_backends(backends,
timeout_sec,
num_rpcs=_NUM_TEST_RPCS):
_verify_rpcs_to_given_backends(backends,
timeout_sec,
num_rpcs,
allow_failures=False)
def compare_distributions(actual_distribution, expected_distribution,
threshold):
"""Compare if two distributions are similar.
Args:
actual_distribution: A list of floats, contains the actual distribution.
expected_distribution: A list of floats, contains the expected distribution.
threshold: Number within [0,100], the threshold percentage by which the
actual distribution can differ from the expected distribution.
Returns:
The similarity between the distributions as a boolean. Returns true if the
actual distribution lies within the threshold of the expected
distribution, false otherwise.
Raises:
ValueError: if threshold is not with in [0,100].
Exception: containing detailed error messages.
"""
if len(expected_distribution) != len(actual_distribution):
raise Exception(
'Error: expected and actual distributions have different size (%d vs %d)'
% (len(expected_distribution), len(actual_distribution)))
if threshold < 0 or threshold > 100:
raise ValueError('Value error: Threshold should be between 0 to 100')
threshold_fraction = threshold / 100.0
for expected, actual in zip(expected_distribution, actual_distribution):
if actual < (expected * (1 - threshold_fraction)):
raise Exception("actual(%f) < expected(%f-%d%%)" %
(actual, expected, threshold))
if actual > (expected * (1 + threshold_fraction)):
raise Exception("actual(%f) > expected(%f+%d%%)" %
(actual, expected, threshold))
return True
def compare_expected_instances(stats, expected_instances):
"""Compare if stats have expected instances for each type of RPC.
Args:
stats: LoadBalancerStatsResponse reported by interop client.
expected_instances: a dict with key as the RPC type (string), value as
the expected backend instances (list of strings).
Returns:
Returns true if the instances are expected. False if not.
"""
for rpc_type, expected_peers in expected_instances.items():
rpcs_by_peer_for_type = stats.rpcs_by_method[rpc_type]
rpcs_by_peer = rpcs_by_peer_for_type.rpcs_by_peer if rpcs_by_peer_for_type else None
logger.debug('rpc: %s, by_peer: %s', rpc_type, rpcs_by_peer)
peers = list(rpcs_by_peer.keys())
if set(peers) != set(expected_peers):
logger.info('unexpected peers for %s, got %s, want %s', rpc_type,
peers, expected_peers)
return False
return True
def test_backends_restart(gcp, backend_service, instance_group):
logger.info('Running test_backends_restart')
instance_names = get_instance_names(gcp, instance_group)
num_instances = len(instance_names)
start_time = time.time()
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_STATS_SEC)
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
try:
resize_instance_group(gcp, instance_group, 0)
wait_until_all_rpcs_go_to_given_backends_or_fail([],
_WAIT_FOR_BACKEND_SEC)
finally:
resize_instance_group(gcp, instance_group, num_instances)
wait_for_healthy_backends(gcp, backend_service, instance_group)
new_instance_names = get_instance_names(gcp, instance_group)
wait_until_all_rpcs_go_to_given_backends(new_instance_names,
_WAIT_FOR_BACKEND_SEC)
new_stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
original_distribution = list(stats.rpcs_by_peer.values())
original_distribution.sort()
new_distribution = list(new_stats.rpcs_by_peer.values())
new_distribution.sort()
threshold = 3
for i in range(len(original_distribution)):
if abs(original_distribution[i] - new_distribution[i]) > threshold:
raise Exception('Distributions do not match: ', stats, new_stats)
def test_change_backend_service(gcp, original_backend_service, instance_group,
alternate_backend_service,
same_zone_instance_group):
logger.info('Running test_change_backend_service')
original_backend_instances = get_instance_names(gcp, instance_group)
alternate_backend_instances = get_instance_names(gcp,
same_zone_instance_group)
patch_backend_instances(gcp, alternate_backend_service,
[same_zone_instance_group])
wait_for_healthy_backends(gcp, original_backend_service, instance_group)
wait_for_healthy_backends(gcp, alternate_backend_service,
same_zone_instance_group)
wait_until_all_rpcs_go_to_given_backends(original_backend_instances,
_WAIT_FOR_STATS_SEC)
try:
patch_url_map_backend_service(gcp, alternate_backend_service)
wait_until_all_rpcs_go_to_given_backends(alternate_backend_instances,
_WAIT_FOR_URL_MAP_PATCH_SEC)
finally:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_instances(gcp, alternate_backend_service, [])
def test_gentle_failover(gcp,
backend_service,
primary_instance_group,
secondary_instance_group,
swapped_primary_and_secondary=False):
logger.info('Running test_gentle_failover')
num_primary_instances = len(get_instance_names(gcp, primary_instance_group))
min_instances_for_gentle_failover = 3 # Need >50% failure to start failover
try:
if num_primary_instances < min_instances_for_gentle_failover:
resize_instance_group(gcp, primary_instance_group,
min_instances_for_gentle_failover)
patch_backend_instances(
gcp, backend_service,
[primary_instance_group, secondary_instance_group])
primary_instance_names = get_instance_names(gcp, primary_instance_group)
secondary_instance_names = get_instance_names(gcp,
secondary_instance_group)
wait_for_healthy_backends(gcp, backend_service, primary_instance_group)
wait_for_healthy_backends(gcp, backend_service,
secondary_instance_group)
wait_until_all_rpcs_go_to_given_backends(primary_instance_names,
_WAIT_FOR_STATS_SEC)
instances_to_stop = primary_instance_names[:-1]
remaining_instances = primary_instance_names[-1:]
try:
set_serving_status(instances_to_stop,
gcp.service_port,
serving=False)
wait_until_all_rpcs_go_to_given_backends(
remaining_instances + secondary_instance_names,
_WAIT_FOR_BACKEND_SEC)
finally:
set_serving_status(primary_instance_names,
gcp.service_port,
serving=True)
except RpcDistributionError as e:
if not swapped_primary_and_secondary and is_primary_instance_group(
gcp, secondary_instance_group):
# Swap expectation of primary and secondary instance groups.
test_gentle_failover(gcp,
backend_service,
secondary_instance_group,
primary_instance_group,
swapped_primary_and_secondary=True)
else:
raise e
finally:
patch_backend_instances(gcp, backend_service, [primary_instance_group])
resize_instance_group(gcp, primary_instance_group,
num_primary_instances)
instance_names = get_instance_names(gcp, primary_instance_group)
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_BACKEND_SEC)
def test_new_instance_group_receives_traffic(gcp, backend_service,
instance_group,
same_zone_instance_group):
logger.info('Running test_new_instance_group_receives_traffic')
instance_names = get_instance_names(gcp, instance_group)
# TODO(ericgribkoff) Reduce this timeout. When running sequentially, this
# occurs after patching the url map in test_change_backend_service, so we
# need the extended timeout here as well.
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_URL_MAP_PATCH_SEC)
try:
patch_backend_instances(gcp,
backend_service,
[instance_group, same_zone_instance_group],
balancing_mode='RATE')
wait_for_healthy_backends(gcp, backend_service, instance_group)
wait_for_healthy_backends(gcp, backend_service,
same_zone_instance_group)
combined_instance_names = instance_names + get_instance_names(
gcp, same_zone_instance_group)
wait_until_all_rpcs_go_to_given_backends(combined_instance_names,
_WAIT_FOR_BACKEND_SEC)
finally:
patch_backend_instances(gcp, backend_service, [instance_group])
def test_ping_pong(gcp, backend_service, instance_group):
logger.info('Running test_ping_pong')
wait_for_healthy_backends(gcp, backend_service, instance_group)
instance_names = get_instance_names(gcp, instance_group)
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_STATS_SEC)
def test_remove_instance_group(gcp, backend_service, instance_group,
same_zone_instance_group):
logger.info('Running test_remove_instance_group')
try:
patch_backend_instances(gcp,
backend_service,
[instance_group, same_zone_instance_group],
balancing_mode='RATE')
wait_for_healthy_backends(gcp, backend_service, instance_group)
wait_for_healthy_backends(gcp, backend_service,
same_zone_instance_group)
instance_names = get_instance_names(gcp, instance_group)
same_zone_instance_names = get_instance_names(gcp,
same_zone_instance_group)
wait_until_all_rpcs_go_to_given_backends(
instance_names + same_zone_instance_names, _WAIT_FOR_BACKEND_SEC)
patch_backend_instances(gcp,
backend_service, [same_zone_instance_group],
balancing_mode='RATE')
wait_until_all_rpcs_go_to_given_backends(same_zone_instance_names,
_WAIT_FOR_BACKEND_SEC)
finally:
patch_backend_instances(gcp, backend_service, [instance_group])
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_BACKEND_SEC)
def test_round_robin(gcp, backend_service, instance_group):
logger.info('Running test_round_robin')
wait_for_healthy_backends(gcp, backend_service, instance_group)
instance_names = get_instance_names(gcp, instance_group)
threshold = 1
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_STATS_SEC)
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
requests_received = [stats.rpcs_by_peer[x] for x in stats.rpcs_by_peer]
total_requests_received = sum(requests_received)
if total_requests_received != _NUM_TEST_RPCS:
raise Exception('Unexpected RPC failures', stats)
expected_requests = total_requests_received / len(instance_names)
for instance in instance_names:
if abs(stats.rpcs_by_peer[instance] - expected_requests) > threshold:
raise Exception(
'RPC peer distribution differs from expected by more than %d '
'for instance %s (%s)' % (threshold, instance, stats))
def test_secondary_locality_gets_no_requests_on_partial_primary_failure(
gcp,
backend_service,
primary_instance_group,
secondary_instance_group,
swapped_primary_and_secondary=False):
logger.info(
'Running secondary_locality_gets_no_requests_on_partial_primary_failure'
)
try:
patch_backend_instances(
gcp, backend_service,
[primary_instance_group, secondary_instance_group])
wait_for_healthy_backends(gcp, backend_service, primary_instance_group)
wait_for_healthy_backends(gcp, backend_service,
secondary_instance_group)
primary_instance_names = get_instance_names(gcp, primary_instance_group)
wait_until_all_rpcs_go_to_given_backends(primary_instance_names,
_WAIT_FOR_STATS_SEC)
instances_to_stop = primary_instance_names[:1]
remaining_instances = primary_instance_names[1:]
try:
set_serving_status(instances_to_stop,
gcp.service_port,
serving=False)
wait_until_all_rpcs_go_to_given_backends(remaining_instances,
_WAIT_FOR_BACKEND_SEC)
finally:
set_serving_status(primary_instance_names,
gcp.service_port,
serving=True)
except RpcDistributionError as e:
if not swapped_primary_and_secondary and is_primary_instance_group(
gcp, secondary_instance_group):
# Swap expectation of primary and secondary instance groups.
test_secondary_locality_gets_no_requests_on_partial_primary_failure(
gcp,
backend_service,
secondary_instance_group,
primary_instance_group,
swapped_primary_and_secondary=True)
else:
raise e
finally:
patch_backend_instances(gcp, backend_service, [primary_instance_group])
def test_secondary_locality_gets_requests_on_primary_failure(
gcp,
backend_service,
primary_instance_group,
secondary_instance_group,
swapped_primary_and_secondary=False):
logger.info('Running secondary_locality_gets_requests_on_primary_failure')
try:
patch_backend_instances(
gcp, backend_service,
[primary_instance_group, secondary_instance_group])
wait_for_healthy_backends(gcp, backend_service, primary_instance_group)
wait_for_healthy_backends(gcp, backend_service,
secondary_instance_group)
primary_instance_names = get_instance_names(gcp, primary_instance_group)
secondary_instance_names = get_instance_names(gcp,
secondary_instance_group)
wait_until_all_rpcs_go_to_given_backends(primary_instance_names,
_WAIT_FOR_STATS_SEC)
try:
set_serving_status(primary_instance_names,
gcp.service_port,
serving=False)
wait_until_all_rpcs_go_to_given_backends(secondary_instance_names,
_WAIT_FOR_BACKEND_SEC)
finally:
set_serving_status(primary_instance_names,
gcp.service_port,
serving=True)
except RpcDistributionError as e:
if not swapped_primary_and_secondary and is_primary_instance_group(
gcp, secondary_instance_group):
# Swap expectation of primary and secondary instance groups.
test_secondary_locality_gets_requests_on_primary_failure(
gcp,
backend_service,
secondary_instance_group,
primary_instance_group,
swapped_primary_and_secondary=True)
else:
raise e
finally:
patch_backend_instances(gcp, backend_service, [primary_instance_group])
def prepare_services_for_urlmap_tests(gcp, original_backend_service,
instance_group, alternate_backend_service,
same_zone_instance_group):
'''
This function prepares the services to be ready for tests that modifies
urlmaps.
Returns:
Returns original and alternate backend names as lists of strings.
'''
# The config validation for proxyless doesn't allow setting
# default_route_action or route_rules. Disable validate
# validate_for_proxyless for this test. This can be removed when validation
# accepts default_route_action.
logger.info('disabling validate_for_proxyless in target proxy')
set_validate_for_proxyless(gcp, False)
logger.info('waiting for original backends to become healthy')
wait_for_healthy_backends(gcp, original_backend_service, instance_group)
patch_backend_instances(gcp, alternate_backend_service,
[same_zone_instance_group])
logger.info('waiting for alternate to become healthy')
wait_for_healthy_backends(gcp, alternate_backend_service,
same_zone_instance_group)
original_backend_instances = get_instance_names(gcp, instance_group)
logger.info('original backends instances: %s', original_backend_instances)
alternate_backend_instances = get_instance_names(gcp,
same_zone_instance_group)
logger.info('alternate backends instances: %s', alternate_backend_instances)
# Start with all traffic going to original_backend_service.
logger.info('waiting for traffic to all go to original backends')
wait_until_all_rpcs_go_to_given_backends(original_backend_instances,
_WAIT_FOR_STATS_SEC)
return original_backend_instances, alternate_backend_instances
def test_traffic_splitting(gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group):
# This test start with all traffic going to original_backend_service. Then
# it updates URL-map to set default action to traffic splitting between
# original and alternate. It waits for all backends in both services to
# receive traffic, then verifies that weights are expected.
logger.info('Running test_traffic_splitting')
original_backend_instances, alternate_backend_instances = prepare_services_for_urlmap_tests(
gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group)
try:
# Patch urlmap, change route action to traffic splitting between
# original and alternate.
logger.info('patching url map with traffic splitting')
original_service_percentage, alternate_service_percentage = 20, 80
patch_url_map_backend_service(
gcp,
services_with_weights={
original_backend_service: original_service_percentage,
alternate_backend_service: alternate_service_percentage,
})
# Split percentage between instances: [20,80] -> [10,10,40,40].
expected_instance_percentage = [
original_service_percentage * 1.0 / len(original_backend_instances)
] * len(original_backend_instances) + [
alternate_service_percentage * 1.0 /
len(alternate_backend_instances)
] * len(alternate_backend_instances)
# Wait for traffic to go to both services.
logger.info(
'waiting for traffic to go to all backends (including alternate)')
wait_until_all_rpcs_go_to_given_backends(
original_backend_instances + alternate_backend_instances,
_WAIT_FOR_STATS_SEC)
# Verify that weights between two services are expected.
retry_count = 10
# Each attempt takes about 10 seconds, 10 retries is equivalent to 100
# seconds timeout.
for i in range(retry_count):
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
got_instance_count = [
stats.rpcs_by_peer[i] for i in original_backend_instances
] + [stats.rpcs_by_peer[i] for i in alternate_backend_instances]
total_count = sum(got_instance_count)
got_instance_percentage = [
x * 100.0 / total_count for x in got_instance_count
]
try:
compare_distributions(got_instance_percentage,
expected_instance_percentage, 5)
except Exception as e:
logger.info('attempt %d', i)
logger.info('got percentage: %s', got_instance_percentage)
logger.info('expected percentage: %s',
expected_instance_percentage)
logger.info(e)
if i == retry_count - 1:
raise Exception(
'RPC distribution (%s) differs from expected (%s)' %
(got_instance_percentage, expected_instance_percentage))
else:
logger.info("success")
break
finally:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_instances(gcp, alternate_backend_service, [])
set_validate_for_proxyless(gcp, True)
def test_path_matching(gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group):
# This test start with all traffic (UnaryCall and EmptyCall) going to
# original_backend_service.
#
# Then it updates URL-map to add routes, to make UnaryCall and EmptyCall to
# go different backends. It waits for all backends in both services to
# receive traffic, then verifies that traffic goes to the expected
# backends.
logger.info('Running test_path_matching')
original_backend_instances, alternate_backend_instances = prepare_services_for_urlmap_tests(
gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group)
try:
# A list of tuples (route_rules, expected_instances).
test_cases = [
(
[{
'priority': 0,
# FullPath EmptyCall -> alternate_backend_service.
'matchRules': [{
'fullPathMatch': '/grpc.testing.TestService/EmptyCall'
}],
'service': alternate_backend_service.url
}],
{
"EmptyCall": alternate_backend_instances,
"UnaryCall": original_backend_instances
}),
(
[{
'priority': 0,
# Prefix UnaryCall -> alternate_backend_service.
'matchRules': [{
'prefixMatch': '/grpc.testing.TestService/Unary'
}],
'service': alternate_backend_service.url
}],
{
"UnaryCall": alternate_backend_instances,
"EmptyCall": original_backend_instances
})
]
for (route_rules, expected_instances) in test_cases:
logger.info('patching url map with %s -> alternative',
route_rules[0]['matchRules'])
patch_url_map_backend_service(gcp,
original_backend_service,
route_rules=route_rules)
# Wait for traffic to go to both services.
logger.info(
'waiting for traffic to go to all backends (including alternate)'
)
wait_until_all_rpcs_go_to_given_backends(
original_backend_instances + alternate_backend_instances,
_WAIT_FOR_STATS_SEC)
retry_count = 10
# Each attempt takes about 10 seconds, 10 retries is equivalent to 100
# seconds timeout.
for i in range(retry_count):
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
if not stats.rpcs_by_method:
raise ValueError(
'stats.rpcs_by_method is None, the interop client stats service does not support this test case'
)
logger.info('attempt %d', i)
if compare_expected_instances(stats, expected_instances):
logger.info("success")
break
finally:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_instances(gcp, alternate_backend_service, [])
set_validate_for_proxyless(gcp, True)
def test_header_matching(gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group):
# This test start with all traffic (UnaryCall and EmptyCall) going to
# original_backend_service.
#
# Then it updates URL-map to add routes, to make RPCs with test headers to
# go to different backends. It waits for all backends in both services to
# receive traffic, then verifies that traffic goes to the expected
# backends.
logger.info('Running test_header_matching')
original_backend_instances, alternate_backend_instances = prepare_services_for_urlmap_tests(
gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group)
try:
# A list of tuples (route_rules, expected_instances).
test_cases = [(
[{
'priority': 0,
# Header ExactMatch -> alternate_backend_service.
# EmptyCall is sent with the metadata.
'matchRules': [{
'prefixMatch':
'/',
'headerMatches': [{
'headerName': _TEST_METADATA_KEY,
'exactMatch': _TEST_METADATA_VALUE
}]
}],
'service': alternate_backend_service.url
}],
{
"EmptyCall": alternate_backend_instances,
"UnaryCall": original_backend_instances
})]
for (route_rules, expected_instances) in test_cases:
logger.info('patching url map with %s -> alternative',
route_rules[0]['matchRules'])
patch_url_map_backend_service(gcp,
original_backend_service,
route_rules=route_rules)
# Wait for traffic to go to both services.
logger.info(
'waiting for traffic to go to all backends (including alternate)'
)
wait_until_all_rpcs_go_to_given_backends(
original_backend_instances + alternate_backend_instances,
_WAIT_FOR_STATS_SEC)
retry_count = 10
# Each attempt takes about 10 seconds, 10 retries is equivalent to 100
# seconds timeout.
for i in range(retry_count):
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
if not stats.rpcs_by_method:
raise ValueError(
'stats.rpcs_by_method is None, the interop client stats service does not support this test case'
)
logger.info('attempt %d', i)
if compare_expected_instances(stats, expected_instances):
logger.info("success")
break
finally:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_instances(gcp, alternate_backend_service, [])
set_validate_for_proxyless(gcp, True)
def set_serving_status(instances, service_port, serving):
for instance in instances:
with grpc.insecure_channel('%s:%d' %
(instance, service_port)) as channel:
stub = test_pb2_grpc.XdsUpdateHealthServiceStub(channel)
if serving:
stub.SetServing(empty_pb2.Empty())
else:
stub.SetNotServing(empty_pb2.Empty())
def is_primary_instance_group(gcp, instance_group):
# Clients may connect to a TD instance in a different region than the
# client, in which case primary/secondary assignments may not be based on
# the client's actual locality.
instance_names = get_instance_names(gcp, instance_group)
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
return all(peer in instance_names for peer in stats.rpcs_by_peer.keys())
def get_startup_script(path_to_server_binary, service_port):
if path_to_server_binary:
return "nohup %s --port=%d 1>/dev/null &" % (path_to_server_binary,
service_port)
else:
return """#!/bin/bash
sudo apt update
sudo apt install -y git default-jdk
mkdir java_server
pushd java_server
git clone https://github.com/grpc/grpc-java.git
pushd grpc-java
pushd interop-testing
../gradlew installDist -x test -PskipCodegen=true -PskipAndroid=true
nohup build/install/grpc-interop-testing/bin/xds-test-server \
--port=%d 1>/dev/null &""" % service_port
def create_instance_template(gcp, name, network, source_image, machine_type,
startup_script):
config = {
'name': name,
'properties': {
'tags': {
'items': ['allow-health-checks']
},
'machineType': machine_type,
'serviceAccounts': [{
'email': 'default',
'scopes': ['https://www.googleapis.com/auth/cloud-platform',]
}],
'networkInterfaces': [{
'accessConfigs': [{
'type': 'ONE_TO_ONE_NAT'
}],
'network': network
}],
'disks': [{
'boot': True,
'initializeParams': {
'sourceImage': source_image
}
}],
'metadata': {
'items': [{
'key': 'startup-script',
'value': startup_script
}]
}
}
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.instanceTemplates().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.instance_template = GcpResource(config['name'], result['targetLink'])
def add_instance_group(gcp, zone, name, size):
config = {
'name': name,
'instanceTemplate': gcp.instance_template.url,
'targetSize': size,
'namedPorts': [{
'name': 'grpc',
'port': gcp.service_port
}]
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.instanceGroupManagers().insert(
project=gcp.project, zone=zone,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_zone_operation(gcp, zone, result['name'])
result = gcp.compute.instanceGroupManagers().get(
project=gcp.project, zone=zone,
instanceGroupManager=config['name']).execute(
num_retries=_GCP_API_RETRIES)
instance_group = InstanceGroup(config['name'], result['instanceGroup'],
zone)
gcp.instance_groups.append(instance_group)
wait_for_instance_group_to_reach_expected_size(gcp, instance_group, size,
_WAIT_FOR_OPERATION_SEC)
return instance_group
def create_health_check(gcp, name):
if gcp.alpha_compute:
config = {
'name': name,
'type': 'GRPC',
'grpcHealthCheck': {
'portSpecification': 'USE_SERVING_PORT'
}
}
compute_to_use = gcp.alpha_compute
else:
config = {
'name': name,
'type': 'TCP',
'tcpHealthCheck': {
'portName': 'grpc'
}
}
compute_to_use = gcp.compute
logger.debug('Sending GCP request with body=%s', config)
result = compute_to_use.healthChecks().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.health_check = GcpResource(config['name'], result['targetLink'])
def create_health_check_firewall_rule(gcp, name):
config = {
'name': name,
'direction': 'INGRESS',
'allowed': [{
'IPProtocol': 'tcp'
}],
'sourceRanges': ['35.191.0.0/16', '130.211.0.0/22'],
'targetTags': ['allow-health-checks'],
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.firewalls().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.health_check_firewall_rule = GcpResource(config['name'],
result['targetLink'])
def add_backend_service(gcp, name):
if gcp.alpha_compute:
protocol = 'GRPC'
compute_to_use = gcp.alpha_compute
else:
protocol = 'HTTP2'
compute_to_use = gcp.compute
config = {
'name': name,
'loadBalancingScheme': 'INTERNAL_SELF_MANAGED',
'healthChecks': [gcp.health_check.url],
'portName': 'grpc',
'protocol': protocol
}
logger.debug('Sending GCP request with body=%s', config)
result = compute_to_use.backendServices().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
backend_service = GcpResource(config['name'], result['targetLink'])
gcp.backend_services.append(backend_service)
return backend_service
def create_url_map(gcp, name, backend_service, host_name):
config = {
'name': name,
'defaultService': backend_service.url,
'pathMatchers': [{
'name': _PATH_MATCHER_NAME,
'defaultService': backend_service.url,
}],
'hostRules': [{
'hosts': [host_name],
'pathMatcher': _PATH_MATCHER_NAME
}]
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.urlMaps().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.url_map = GcpResource(config['name'], result['targetLink'])
def patch_url_map_host_rule_with_port(gcp, name, backend_service, host_name):
config = {
'hostRules': [{
'hosts': ['%s:%d' % (host_name, gcp.service_port)],
'pathMatcher': _PATH_MATCHER_NAME
}]
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.urlMaps().patch(
project=gcp.project, urlMap=name,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
def set_validate_for_proxyless(gcp, validate_for_proxyless):
if not gcp.alpha_compute:
logger.debug(
'Not setting validateForProxy because alpha is not enabled')
return
# This function deletes global_forwarding_rule and target_proxy, then
# recreate target_proxy with validateForProxyless=False. This is necessary
# because patching target_grpc_proxy isn't supported.
delete_global_forwarding_rule(gcp)
delete_target_proxy(gcp)
create_target_proxy(gcp, gcp.target_proxy.name, validate_for_proxyless)
create_global_forwarding_rule(gcp, gcp.global_forwarding_rule.name,
[gcp.service_port])
def create_target_proxy(gcp, name, validate_for_proxyless=True):
if gcp.alpha_compute:
config = {
'name': name,
'url_map': gcp.url_map.url,
'validate_for_proxyless': validate_for_proxyless,
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.alpha_compute.targetGrpcProxies().insert(
project=gcp.project,
body=config).execute(num_retries=_GCP_API_RETRIES)
else:
config = {
'name': name,
'url_map': gcp.url_map.url,
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.targetHttpProxies().insert(
project=gcp.project,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.target_proxy = GcpResource(config['name'], result['targetLink'])
def create_global_forwarding_rule(gcp, name, potential_ports):
if gcp.alpha_compute:
compute_to_use = gcp.alpha_compute
else:
compute_to_use = gcp.compute
for port in potential_ports:
try:
config = {
'name': name,
'loadBalancingScheme': 'INTERNAL_SELF_MANAGED',
'portRange': str(port),
'IPAddress': '0.0.0.0',
'network': args.network,
'target': gcp.target_proxy.url,
}
logger.debug('Sending GCP request with body=%s', config)
result = compute_to_use.globalForwardingRules().insert(
project=gcp.project,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.global_forwarding_rule = GcpResource(config['name'],
result['targetLink'])
gcp.service_port = port
return
except googleapiclient.errors.HttpError as http_error:
logger.warning(
'Got error %s when attempting to create forwarding rule to '
'0.0.0.0:%d. Retrying with another port.' % (http_error, port))
def get_health_check(gcp, health_check_name):
result = gcp.compute.healthChecks().get(
project=gcp.project, healthCheck=health_check_name).execute()
gcp.health_check = GcpResource(health_check_name, result['selfLink'])
def get_health_check_firewall_rule(gcp, firewall_name):
result = gcp.compute.firewalls().get(project=gcp.project,
firewall=firewall_name).execute()
gcp.health_check_firewall_rule = GcpResource(firewall_name,
result['selfLink'])
def get_backend_service(gcp, backend_service_name):
result = gcp.compute.backendServices().get(
project=gcp.project, backendService=backend_service_name).execute()
backend_service = GcpResource(backend_service_name, result['selfLink'])
gcp.backend_services.append(backend_service)
return backend_service
def get_url_map(gcp, url_map_name):
result = gcp.compute.urlMaps().get(project=gcp.project,
urlMap=url_map_name).execute()
gcp.url_map = GcpResource(url_map_name, result['selfLink'])
def get_target_proxy(gcp, target_proxy_name):
if gcp.alpha_compute:
result = gcp.alpha_compute.targetGrpcProxies().get(
project=gcp.project, targetGrpcProxy=target_proxy_name).execute()
else:
result = gcp.compute.targetHttpProxies().get(
project=gcp.project, targetHttpProxy=target_proxy_name).execute()
gcp.target_proxy = GcpResource(target_proxy_name, result['selfLink'])
def get_global_forwarding_rule(gcp, forwarding_rule_name):
result = gcp.compute.globalForwardingRules().get(
project=gcp.project, forwardingRule=forwarding_rule_name).execute()
gcp.global_forwarding_rule = GcpResource(forwarding_rule_name,
result['selfLink'])
def get_instance_template(gcp, template_name):
result = gcp.compute.instanceTemplates().get(
project=gcp.project, instanceTemplate=template_name).execute()
gcp.instance_template = GcpResource(template_name, result['selfLink'])
def get_instance_group(gcp, zone, instance_group_name):
result = gcp.compute.instanceGroups().get(
project=gcp.project, zone=zone,
instanceGroup=instance_group_name).execute()
gcp.service_port = result['namedPorts'][0]['port']
instance_group = InstanceGroup(instance_group_name, result['selfLink'],
zone)
gcp.instance_groups.append(instance_group)
return instance_group
def delete_global_forwarding_rule(gcp):
try:
result = gcp.compute.globalForwardingRules().delete(
project=gcp.project,
forwardingRule=gcp.global_forwarding_rule.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_target_proxy(gcp):
try:
if gcp.alpha_compute:
result = gcp.alpha_compute.targetGrpcProxies().delete(
project=gcp.project,
targetGrpcProxy=gcp.target_proxy.name).execute(
num_retries=_GCP_API_RETRIES)
else:
result = gcp.compute.targetHttpProxies().delete(
project=gcp.project,
targetHttpProxy=gcp.target_proxy.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_url_map(gcp):
try:
result = gcp.compute.urlMaps().delete(
project=gcp.project,
urlMap=gcp.url_map.name).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_backend_services(gcp):
for backend_service in gcp.backend_services:
try:
result = gcp.compute.backendServices().delete(
project=gcp.project,
backendService=backend_service.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_firewall(gcp):
try:
result = gcp.compute.firewalls().delete(
project=gcp.project,
firewall=gcp.health_check_firewall_rule.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_health_check(gcp):
try:
result = gcp.compute.healthChecks().delete(
project=gcp.project, healthCheck=gcp.health_check.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_instance_groups(gcp):
for instance_group in gcp.instance_groups:
try:
result = gcp.compute.instanceGroupManagers().delete(
project=gcp.project,
zone=instance_group.zone,
instanceGroupManager=instance_group.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_zone_operation(gcp,
instance_group.zone,
result['name'],
timeout_sec=_WAIT_FOR_BACKEND_SEC)
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_instance_template(gcp):
try:
result = gcp.compute.instanceTemplates().delete(
project=gcp.project,
instanceTemplate=gcp.instance_template.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def patch_backend_instances(gcp,
backend_service,
instance_groups,
balancing_mode='UTILIZATION'):
if gcp.alpha_compute:
compute_to_use = gcp.alpha_compute
else:
compute_to_use = gcp.compute
config = {
'backends': [{
'group': instance_group.url,
'balancingMode': balancing_mode,
'maxRate': 1 if balancing_mode == 'RATE' else None
} for instance_group in instance_groups],
}
logger.debug('Sending GCP request with body=%s', config)
result = compute_to_use.backendServices().patch(
project=gcp.project, backendService=backend_service.name,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp,
result['name'],
timeout_sec=_WAIT_FOR_BACKEND_SEC)
def resize_instance_group(gcp,
instance_group,
new_size,
timeout_sec=_WAIT_FOR_OPERATION_SEC):
result = gcp.compute.instanceGroupManagers().resize(
project=gcp.project,
zone=instance_group.zone,
instanceGroupManager=instance_group.name,
size=new_size).execute(num_retries=_GCP_API_RETRIES)
wait_for_zone_operation(gcp,
instance_group.zone,
result['name'],
timeout_sec=360)
wait_for_instance_group_to_reach_expected_size(gcp, instance_group,
new_size, timeout_sec)
def patch_url_map_backend_service(gcp,
backend_service=None,
services_with_weights=None,
route_rules=None):
'''change url_map's backend service
Only one of backend_service and service_with_weights can be not None.
'''
if backend_service and services_with_weights:
raise ValueError(
'both backend_service and service_with_weights are not None.')
default_service = backend_service.url if backend_service else None
default_route_action = {
'weightedBackendServices': [{
'backendService': service.url,
'weight': w,
} for service, w in services_with_weights.items()]
} if services_with_weights else None
config = {
'pathMatchers': [{
'name': _PATH_MATCHER_NAME,
'defaultService': default_service,
'defaultRouteAction': default_route_action,
'routeRules': route_rules,
}]
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.urlMaps().patch(
project=gcp.project, urlMap=gcp.url_map.name,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
def wait_for_instance_group_to_reach_expected_size(gcp, instance_group,
expected_size, timeout_sec):
start_time = time.time()
while True:
current_size = len(get_instance_names(gcp, instance_group))
if current_size == expected_size:
break
if time.time() - start_time > timeout_sec:
raise Exception(
'Instance group had expected size %d but actual size %d' %
(expected_size, current_size))
time.sleep(2)
def wait_for_global_operation(gcp,
operation,
timeout_sec=_WAIT_FOR_OPERATION_SEC):
start_time = time.time()
while time.time() - start_time <= timeout_sec:
result = gcp.compute.globalOperations().get(
project=gcp.project,
operation=operation).execute(num_retries=_GCP_API_RETRIES)
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
return
time.sleep(2)
raise Exception('Operation %s did not complete within %d' %
(operation, timeout_sec))
def wait_for_zone_operation(gcp,
zone,
operation,
timeout_sec=_WAIT_FOR_OPERATION_SEC):
start_time = time.time()
while time.time() - start_time <= timeout_sec:
result = gcp.compute.zoneOperations().get(
project=gcp.project, zone=zone,
operation=operation).execute(num_retries=_GCP_API_RETRIES)
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
return
time.sleep(2)
raise Exception('Operation %s did not complete within %d' %
(operation, timeout_sec))
def wait_for_healthy_backends(gcp,
backend_service,
instance_group,
timeout_sec=_WAIT_FOR_BACKEND_SEC):
start_time = time.time()
config = {'group': instance_group.url}
expected_size = len(get_instance_names(gcp, instance_group))
while time.time() - start_time <= timeout_sec:
result = gcp.compute.backendServices().getHealth(
project=gcp.project,
backendService=backend_service.name,
body=config).execute(num_retries=_GCP_API_RETRIES)
if 'healthStatus' in result:
logger.info('received healthStatus: %s', result['healthStatus'])
healthy = True
for instance in result['healthStatus']:
if instance['healthState'] != 'HEALTHY':
healthy = False
break
if healthy and expected_size == len(result['healthStatus']):
return
time.sleep(2)
raise Exception('Not all backends became healthy within %d seconds: %s' %
(timeout_sec, result))
def get_instance_names(gcp, instance_group):
instance_names = []
result = gcp.compute.instanceGroups().listInstances(
project=gcp.project,
zone=instance_group.zone,
instanceGroup=instance_group.name,
body={
'instanceState': 'ALL'
}).execute(num_retries=_GCP_API_RETRIES)
if 'items' not in result:
return []
for item in result['items']:
# listInstances() returns the full URL of the instance, which ends with
# the instance name. compute.instances().get() requires using the
# instance name (not the full URL) to look up instance details, so we
# just extract the name manually.
instance_name = item['instance'].split('/')[-1]
instance_names.append(instance_name)
logger.info('retrieved instance names: %s', instance_names)
return instance_names
def clean_up(gcp):
if gcp.global_forwarding_rule:
delete_global_forwarding_rule(gcp)
if gcp.target_proxy:
delete_target_proxy(gcp)
if gcp.url_map:
delete_url_map(gcp)
delete_backend_services(gcp)
if gcp.health_check_firewall_rule:
delete_firewall(gcp)
if gcp.health_check:
delete_health_check(gcp)
delete_instance_groups(gcp)
if gcp.instance_template:
delete_instance_template(gcp)
class InstanceGroup(object):
def __init__(self, name, url, zone):
self.name = name
self.url = url
self.zone = zone
class GcpResource(object):
def __init__(self, name, url):
self.name = name
self.url = url
class GcpState(object):
def __init__(self, compute, alpha_compute, project):
self.compute = compute
self.alpha_compute = alpha_compute
self.project = project
self.health_check = None
self.health_check_firewall_rule = None
self.backend_services = []
self.url_map = None
self.target_proxy = None
self.global_forwarding_rule = None
self.service_port = None
self.instance_template = None
self.instance_groups = []
alpha_compute = None
if args.compute_discovery_document:
with open(args.compute_discovery_document, 'r') as discovery_doc:
compute = googleapiclient.discovery.build_from_document(
discovery_doc.read())
if not args.only_stable_gcp_apis and args.alpha_compute_discovery_document:
with open(args.alpha_compute_discovery_document, 'r') as discovery_doc:
alpha_compute = googleapiclient.discovery.build_from_document(
discovery_doc.read())
else:
compute = googleapiclient.discovery.build('compute', 'v1')
if not args.only_stable_gcp_apis:
alpha_compute = googleapiclient.discovery.build('compute', 'alpha')
try:
gcp = GcpState(compute, alpha_compute, args.project_id)
health_check_name = _BASE_HEALTH_CHECK_NAME + args.gcp_suffix
firewall_name = _BASE_FIREWALL_RULE_NAME + args.gcp_suffix
backend_service_name = _BASE_BACKEND_SERVICE_NAME + args.gcp_suffix
alternate_backend_service_name = _BASE_BACKEND_SERVICE_NAME + '-alternate' + args.gcp_suffix
url_map_name = _BASE_URL_MAP_NAME + args.gcp_suffix
service_host_name = _BASE_SERVICE_HOST + args.gcp_suffix
target_proxy_name = _BASE_TARGET_PROXY_NAME + args.gcp_suffix
forwarding_rule_name = _BASE_FORWARDING_RULE_NAME + args.gcp_suffix
template_name = _BASE_TEMPLATE_NAME + args.gcp_suffix
instance_group_name = _BASE_INSTANCE_GROUP_NAME + args.gcp_suffix
same_zone_instance_group_name = _BASE_INSTANCE_GROUP_NAME + '-same-zone' + args.gcp_suffix
secondary_zone_instance_group_name = _BASE_INSTANCE_GROUP_NAME + '-secondary-zone' + args.gcp_suffix
if args.use_existing_gcp_resources:
logger.info('Reusing existing GCP resources')
get_health_check(gcp, health_check_name)
try:
get_health_check_firewall_rule(gcp, firewall_name)
except googleapiclient.errors.HttpError as http_error:
# Firewall rule may be auto-deleted periodically depending on GCP
# project settings.
logger.exception('Failed to find firewall rule, recreating')
create_health_check_firewall_rule(gcp, firewall_name)
backend_service = get_backend_service(gcp, backend_service_name)
alternate_backend_service = get_backend_service(
gcp, alternate_backend_service_name)
get_url_map(gcp, url_map_name)
get_target_proxy(gcp, target_proxy_name)
get_global_forwarding_rule(gcp, forwarding_rule_name)
get_instance_template(gcp, template_name)
instance_group = get_instance_group(gcp, args.zone, instance_group_name)
same_zone_instance_group = get_instance_group(
gcp, args.zone, same_zone_instance_group_name)
secondary_zone_instance_group = get_instance_group(
gcp, args.secondary_zone, secondary_zone_instance_group_name)
else:
create_health_check(gcp, health_check_name)
create_health_check_firewall_rule(gcp, firewall_name)
backend_service = add_backend_service(gcp, backend_service_name)
alternate_backend_service = add_backend_service(
gcp, alternate_backend_service_name)
create_url_map(gcp, url_map_name, backend_service, service_host_name)
create_target_proxy(gcp, target_proxy_name)
potential_service_ports = list(args.service_port_range)
random.shuffle(potential_service_ports)
create_global_forwarding_rule(gcp, forwarding_rule_name,
potential_service_ports)
if not gcp.service_port:
raise Exception(
'Failed to find a valid ip:port for the forwarding rule')
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(gcp, url_map_name,
backend_service,
service_host_name)
startup_script = get_startup_script(args.path_to_server_binary,
gcp.service_port)
create_instance_template(gcp, template_name, args.network,
args.source_image, args.machine_type,
startup_script)
instance_group = add_instance_group(gcp, args.zone, instance_group_name,
_INSTANCE_GROUP_SIZE)
patch_backend_instances(gcp, backend_service, [instance_group])
same_zone_instance_group = add_instance_group(
gcp, args.zone, same_zone_instance_group_name, _INSTANCE_GROUP_SIZE)
secondary_zone_instance_group = add_instance_group(
gcp, args.secondary_zone, secondary_zone_instance_group_name,
_INSTANCE_GROUP_SIZE)
wait_for_healthy_backends(gcp, backend_service, instance_group)
if args.test_case:
if gcp.service_port == _DEFAULT_SERVICE_PORT:
server_uri = service_host_name
else:
server_uri = service_host_name + ':' + str(gcp.service_port)
if args.bootstrap_file:
bootstrap_path = os.path.abspath(args.bootstrap_file)
else:
with tempfile.NamedTemporaryFile(delete=False) as bootstrap_file:
bootstrap_file.write(
_BOOTSTRAP_TEMPLATE.format(
node_id=socket.gethostname()).encode('utf-8'))
bootstrap_path = bootstrap_file.name
client_env = dict(os.environ, GRPC_XDS_BOOTSTRAP=bootstrap_path)
test_results = {}
failed_tests = []
for test_case in args.test_case:
result = jobset.JobResult()
log_dir = os.path.join(_TEST_LOG_BASE_DIR, test_case)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
test_log_filename = os.path.join(log_dir, _SPONGE_LOG_NAME)
test_log_file = open(test_log_filename, 'w+')
client_process = None
if test_case in _TESTS_TO_RUN_MULTIPLE_RPCS:
rpcs_to_send = '--rpc="UnaryCall,EmptyCall"'
else:
rpcs_to_send = '--rpc="UnaryCall"'
if test_case in _TESTS_TO_SEND_METADATA:
metadata_to_send = '--metadata="EmptyCall:{key}:{value}"'.format(
key=_TEST_METADATA_KEY, value=_TEST_METADATA_VALUE)
else:
metadata_to_send = '--metadata=""'
if test_case in _TESTS_TO_FAIL_ON_RPC_FAILURE:
fail_on_failed_rpc = '--fail_on_failed_rpc=true'
else:
fail_on_failed_rpc = '--fail_on_failed_rpc=false'
client_cmd_formatted = args.client_cmd.format(
server_uri=server_uri,
stats_port=args.stats_port,
qps=args.qps,
fail_on_failed_rpc=fail_on_failed_rpc,
rpcs_to_send=rpcs_to_send,
metadata_to_send=metadata_to_send)
logger.debug('running client: %s', client_cmd_formatted)
client_cmd = shlex.split(client_cmd_formatted)
try:
client_process = subprocess.Popen(client_cmd,
env=client_env,
stderr=subprocess.STDOUT,
stdout=test_log_file)
if test_case == 'backends_restart':
test_backends_restart(gcp, backend_service, instance_group)
elif test_case == 'change_backend_service':
test_change_backend_service(gcp, backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group)
elif test_case == 'gentle_failover':
test_gentle_failover(gcp, backend_service, instance_group,
secondary_zone_instance_group)
elif test_case == 'new_instance_group_receives_traffic':
test_new_instance_group_receives_traffic(
gcp, backend_service, instance_group,
same_zone_instance_group)
elif test_case == 'ping_pong':
test_ping_pong(gcp, backend_service, instance_group)
elif test_case == 'remove_instance_group':
test_remove_instance_group(gcp, backend_service,
instance_group,
same_zone_instance_group)
elif test_case == 'round_robin':
test_round_robin(gcp, backend_service, instance_group)
elif test_case == 'secondary_locality_gets_no_requests_on_partial_primary_failure':
test_secondary_locality_gets_no_requests_on_partial_primary_failure(
gcp, backend_service, instance_group,
secondary_zone_instance_group)
elif test_case == 'secondary_locality_gets_requests_on_primary_failure':
test_secondary_locality_gets_requests_on_primary_failure(
gcp, backend_service, instance_group,
secondary_zone_instance_group)
elif test_case == 'traffic_splitting':
test_traffic_splitting(gcp, backend_service, instance_group,
alternate_backend_service,
same_zone_instance_group)
elif test_case == 'path_matching':
test_path_matching(gcp, backend_service, instance_group,
alternate_backend_service,
same_zone_instance_group)
elif test_case == 'header_matching':
test_header_matching(gcp, backend_service, instance_group,
alternate_backend_service,
same_zone_instance_group)
else:
logger.error('Unknown test case: %s', test_case)
sys.exit(1)
if client_process.poll() is not None:
raise Exception(
'Client process exited prematurely with exit code %d' %
client_process.returncode)
result.state = 'PASSED'
result.returncode = 0
except Exception as e:
logger.exception('Test case %s failed', test_case)
failed_tests.append(test_case)
result.state = 'FAILED'
result.message = str(e)
finally:
if client_process and not client_process.returncode:
client_process.terminate()
test_log_file.close()
# Workaround for Python 3, as report_utils will invoke decode() on
# result.message, which has a default value of ''.
result.message = result.message.encode('UTF-8')
test_results[test_case] = [result]
if args.log_client_output:
logger.info('Client output:')
with open(test_log_filename, 'r') as client_output:
logger.info(client_output.read())
if not os.path.exists(_TEST_LOG_BASE_DIR):
os.makedirs(_TEST_LOG_BASE_DIR)
report_utils.render_junit_xml_report(test_results,
os.path.join(
_TEST_LOG_BASE_DIR,
_SPONGE_XML_NAME),
suite_name='xds_tests',
multi_target=True)
if failed_tests:
logger.error('Test case(s) %s failed', failed_tests)
sys.exit(1)
finally:
if not args.keep_gcp_resources:
logger.info('Cleaning up GCP resources. This may take some time.')
clean_up(gcp)
| |
#!/usr/bin/env python
# coding:utf-8
"""
GAEProxyHandler is the handler of http proxy port. default to 8087
if HTTP request:
do_METHOD()
elif HTTPS request:
do_CONNECT()
What is Direct mode:
if user access google site like www.google.com, client.google.com,
we don't need forward request to GAE server.
we can send the original request to google ip directly.
because most google ip act as general front server.
Youtube content server do not support direct mode.
look direct_handler.py for more detail.
What GAE mode:
Google App Engine support urlfetch for proxy.
every google account can apply 12 appid.
after deploy server code under gae_proxy/server/gae to GAE server, user can
use GAE server as http proxy.
Here is the global link view:
Browser => GAE_proxy => GAE server => target http/https server.
look gae_hander.py for more detail.
"""
import errno
import socket
import ssl
import urlparse
import OpenSSL
NetWorkIOError = (socket.error, ssl.SSLError, OpenSSL.SSL.Error, OSError)
from xlog import getLogger
xlog = getLogger("gae_proxy")
import simple_http_client
import simple_http_server
from cert_util import CertUtil
import gae_handler
import direct_handler
import web_control
from front import front
class GAEProxyHandler(simple_http_server.HttpServerHandler):
gae_support_methods = tuple(["GET", "POST", "HEAD", "PUT", "DELETE", "PATCH"])
# GAE don't support command like OPTION
bufsize = 256*1024
max_retry = 3
local_names = []
def setup(self):
self.__class__.do_GET = self.__class__.do_METHOD
self.__class__.do_PUT = self.__class__.do_METHOD
self.__class__.do_POST = self.__class__.do_METHOD
self.__class__.do_HEAD = self.__class__.do_METHOD
self.__class__.do_DELETE = self.__class__.do_METHOD
self.__class__.do_OPTIONS = self.__class__.do_METHOD
self.self_check_response_data = "HTTP/1.1 200 OK\r\n"\
"Access-Control-Allow-Origin: *\r\n"\
"Cache-Control: no-cache, no-store, must-revalidate\r\n"\
"Pragma: no-cache\r\n"\
"Expires: 0\r\n"\
"Content-Type: text/plain\r\n"\
"Keep-Alive:\r\n"\
"Persist:\r\n"\
"Connection: Keep-Alive, Persist\r\n"\
"Content-Length: 2\r\n\r\nOK"
self.fake_host = web_control.get_fake_host()
def forward_local(self):
"""
If browser send localhost:xxx request to GAE_proxy,
we forward it to localhost.
"""
request_headers = dict((k.title(), v) for k, v in self.headers.items())
payload = b''
if 'Content-Length' in request_headers:
try:
payload_len = int(request_headers.get('Content-Length', 0))
payload = self.rfile.read(payload_len)
except Exception as e:
xlog.warn('forward_local read payload failed:%s', e)
return
response = simple_http_client.request(self.command, self.path, request_headers, payload)
if not response:
xlog.warn("forward_local fail, command:%s, path:%s, headers: %s, payload: %s",
self.command, self.path, request_headers, payload)
return
out_list = []
out_list.append("HTTP/1.1 %d\r\n" % response.status)
for key in response.headers:
key = key.title()
out_list.append("%s: %s\r\n" % (key, response.headers[key]))
out_list.append("\r\n")
out_list.append(response.text)
self.wfile.write("".join(out_list))
def send_method_allows(self, headers, payload):
xlog.debug("send method allow list for:%s %s", self.command, self.path)
# Refer: https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS#Preflighted_requests
response = \
"HTTP/1.1 200 OK\r\n"\
"Access-Control-Allow-Credentials: true\r\n"\
"Access-Control-Allow-Methods: GET, POST, HEAD, PUT, DELETE, PATCH\r\n"\
"Access-Control-Max-Age: 1728000\r\n"\
"Content-Length: 0\r\n"
req_header = headers.get("Access-Control-Request-Headers", "")
if req_header:
response += "Access-Control-Allow-Headers: %s\r\n" % req_header
origin = headers.get("Origin", "")
if origin:
response += "Access-Control-Allow-Origin: %s\r\n" % origin
else:
response += "Access-Control-Allow-Origin: *\r\n"
response += "\r\n"
self.wfile.write(response)
def is_local(self, hosts):
if 0 == len(self.local_names):
self.local_names.append('localhost')
self.local_names.append(socket.gethostname().lower())
try:
self.local_names.append(socket.gethostbyname_ex(socket.gethostname())[-1])
except socket.gaierror:
# TODO Append local IP address to local_names
pass
for s in hosts:
s = s.lower()
if s.startswith('127.') \
or s.startswith('192.168.') \
or s.startswith('10.') \
or s.startswith('169.254.') \
or s in self.local_names:
print s
return True
return False
def do_METHOD(self):
self.close_connection = 0
host = self.headers.get('Host', '')
host_ip, _, port = host.rpartition(':')
if isinstance(self.connection, ssl.SSLSocket):
method = "https"
else:
method = "http"
if self.path[0] == '/' and host:
self.path = '%s://%s%s' % (method, host, self.path)
elif not host and '://' in self.path:
host = urlparse.urlparse(self.path).netloc
if self.is_local([host, host_ip]):
xlog.debug("Browse localhost by proxy")
return self.forward_local()
if host == self.fake_host:
# xlog.debug("%s %s", self.command, self.path)
# for web_ui status page
# auto detect browser proxy setting is work
return self.wfile.write(self.self_check_response_data)
self.parsed_url = urlparse.urlparse(self.path)
if host in front.config.HOSTS_GAE:
return self.do_AGENT()
# redirect http request to https request
# avoid key word filter when pass through GFW
if host in front.config.HOSTS_DIRECT:
if isinstance(self.connection, ssl.SSLSocket):
return self.do_DIRECT()
else:
xlog.debug("Host:%s Direct redirect to https", host)
return self.wfile.write(('HTTP/1.1 301\r\nLocation: %s\r\nContent-Length: 0\r\n\r\n' % self.path.replace('http://', 'https://', 1)).encode())
if host.endswith(front.config.HOSTS_GAE_ENDSWITH):
return self.do_AGENT()
if host.endswith(front.config.HOSTS_DIRECT_ENDSWITH):
if isinstance(self.connection, ssl.SSLSocket):
return self.do_DIRECT()
else:
xlog.debug("Host:%s Direct redirect to https", host)
return self.wfile.write(('HTTP/1.1 301\r\nLocation: %s\r\nContent-Length: 0\r\n\r\n' % self.path.replace('http://', 'https://', 1)).encode())
return self.do_AGENT()
# Called by do_METHOD and do_CONNECT_AGENT
def do_AGENT(self):
def get_crlf(rfile):
crlf = rfile.readline(2)
if crlf != "\r\n":
xlog.warn("chunk header read fail crlf")
request_headers = dict((k.title(), v) for k, v in self.headers.items())
payload = b''
if 'Content-Length' in request_headers:
try:
payload_len = int(request_headers.get('Content-Length', 0))
#xlog.debug("payload_len:%d %s %s", payload_len, self.command, self.path)
payload = self.rfile.read(payload_len)
except NetWorkIOError as e:
xlog.error('handle_method_urlfetch read payload failed:%s', e)
return
elif 'Transfer-Encoding' in request_headers:
# chunked, used by facebook android client
payload = ""
while True:
chunk_size_str = self.rfile.readline(65537)
chunk_size_list = chunk_size_str.split(";")
chunk_size = int("0x"+chunk_size_list[0], 0)
if len(chunk_size_list) > 1 and chunk_size_list[1] != "\r\n":
xlog.warn("chunk ext: %s", chunk_size_str)
if chunk_size == 0:
while True:
line = self.rfile.readline(65537)
if line == "\r\n":
break
else:
xlog.warn("entity header:%s", line)
break
payload += self.rfile.read(chunk_size)
get_crlf(self.rfile)
if self.command == "OPTIONS":
return self.send_method_allows(request_headers, payload)
if self.command not in self.gae_support_methods:
xlog.warn("Method %s not support in GAEProxy for %s", self.command, self.path)
return self.wfile.write(('HTTP/1.1 404 Not Found\r\n\r\n').encode())
xlog.debug("GAE %s %s from:%s", self.command, self.path, self.address_string())
gae_handler.handler(self.command, self.path, request_headers, payload, self.wfile)
def do_CONNECT(self):
self.close_connection = 0
host, _, port = self.path.rpartition(':')
if host in front.config.HOSTS_GAE:
return self.do_CONNECT_AGENT()
if host in front.config.HOSTS_DIRECT:
return self.do_CONNECT_DIRECT()
if host.endswith(front.config.HOSTS_GAE_ENDSWITH):
return self.do_CONNECT_AGENT()
if host.endswith(front.config.HOSTS_DIRECT_ENDSWITH):
return self.do_CONNECT_DIRECT()
return self.do_CONNECT_AGENT()
def do_CONNECT_AGENT(self):
"""send fake cert to client"""
# GAE supports the following HTTP methods: GET, POST, HEAD, PUT, DELETE, and PATCH
host, _, port = self.path.rpartition(':')
port = int(port)
certfile = CertUtil.get_cert(host)
# xlog.info('https GAE %s %s:%d ', self.command, host, port)
self.wfile.write(b'HTTP/1.1 200 OK\r\n\r\n')
try:
ssl_sock = ssl.wrap_socket(self.connection, keyfile=CertUtil.cert_keyfile, certfile=certfile, server_side=True)
except ssl.SSLError as e:
xlog.info('ssl error: %s, create full domain cert for host:%s', e, host)
certfile = CertUtil.get_cert(host, full_name=True)
return
except Exception as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET):
xlog.exception('ssl.wrap_socket(self.connection=%r) failed: %s path:%s, errno:%s', self.connection, e, self.path, e.args[0])
return
self.__realwfile = self.wfile
self.__realrfile = self.rfile
self.connection = ssl_sock
self.rfile = self.connection.makefile('rb', self.bufsize)
self.wfile = self.connection.makefile('wb', 0)
self.parse_request()
if self.path[0] == '/' and host:
self.path = 'https://%s%s' % (self.headers['Host'], self.path)
if self.path == "https://%s/xxnet" % self.fake_host:
# for web_ui status page
# auto detect browser proxy setting is work
xlog.debug("CONNECT %s %s", self.command, self.path)
return self.wfile.write(self.self_check_response_data)
try:
if self.path[0] == '/' and host:
self.path = 'http://%s%s' % (host, self.path)
elif not host and '://' in self.path:
host = urlparse.urlparse(self.path).netloc
self.parsed_url = urlparse.urlparse(self.path)
return self.do_AGENT()
except NetWorkIOError as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ETIMEDOUT, errno.EPIPE):
raise
def do_CONNECT_DIRECT(self):
"""deploy fake cert to client"""
host, _, port = self.path.rpartition(':')
port = int(port)
if port != 443:
xlog.warn("CONNECT %s port:%d not support", host, port)
return
certfile = CertUtil.get_cert(host)
self.wfile.write(b'HTTP/1.1 200 OK\r\n\r\n')
try:
ssl_sock = ssl.wrap_socket(self.connection, keyfile=CertUtil.cert_keyfile, certfile=certfile, server_side=True)
except ssl.SSLError as e:
xlog.info('ssl error: %s, create full domain cert for host:%s', e, host)
certfile = CertUtil.get_cert(host, full_name=True)
return
except Exception as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET):
xlog.exception('ssl.wrap_socket(self.connection=%r) failed: %s path:%s, errno:%s', self.connection, e, self.path, e.args[0])
return
self.__realwfile = self.wfile
self.__realrfile = self.rfile
self.connection = ssl_sock
self.rfile = self.connection.makefile('rb', self.bufsize)
self.wfile = self.connection.makefile('wb', 0)
self.parse_request()
self.do_DIRECT()
def do_DIRECT(self):
if self.path[0] == '/':
host = self.headers['Host']
path = self.path
url = 'https://%s%s' % (host, self.path)
else:
url = self.path
self.parsed_url = urlparse.urlparse(self.path)
host = self.parsed_url[1]
if len(self.parsed_url[4]):
path = '?'.join([self.parsed_url[2], self.parsed_url[4]])
else:
path = self.parsed_url[2]
xlog.debug('DIRECT %s %s', self.command, url)
request_headers = dict((k.title(), v) for k, v in self.headers.items())
if 'Content-Length' in request_headers:
try:
payload_len = int(request_headers.get('Content-Length', 0))
# xlog.debug("payload_len:%d %s %s", payload_len, self.command, self.path)
payload = self.rfile.read(payload_len)
except NetWorkIOError as e:
xlog.error('Direct %s read payload failed:%s', url, e)
return
else:
payload = b''
try:
direct_handler.handler(self.command, host, path, request_headers, payload, self.wfile)
except NetWorkIOError as e:
xlog.warn('DIRECT %s %s except:%r', self.command, url, e)
if e.args[0] not in (errno.ECONNABORTED, errno.ETIMEDOUT, errno.EPIPE):
raise
# called by smart_router
def wrap_ssl(sock, host, port, client_address):
certfile = CertUtil.get_cert(host or 'www.google.com')
ssl_sock = ssl.wrap_socket(sock, keyfile=CertUtil.cert_keyfile,
certfile=certfile, server_side=True)
return ssl_sock
| |
__author__ = 'Sean Griffin'
__version__ = '1.0.0'
__email__ = 'sean@thoughtbot.com'
import sys
import os.path
import json
import shutil
from pymel.core import *
from maya.OpenMaya import *
from maya.OpenMayaMPx import *
kPluginTranslatorTypeName = 'Three.js'
kOptionScript = 'ThreeJsExportScript'
kDefaultOptionsString = '0'
FLOAT_PRECISION = 8
class ThreeJsWriter(object):
def __init__(self):
self.componentKeys = ['vertices', 'normals', 'colors', 'uvs', 'faces',
'materials', 'colorMaps', 'specularMaps', 'bumpMaps', 'copyTextures',
'bones', 'skeletalAnim', 'bakeAnimations', 'prettyOutput']
def write(self, path, optionString, accessMode):
self.path = path
self.accessMode = accessMode
self._parseOptions(optionString)
self.verticeOffset = 0
self.uvOffset = 0
self.normalOffset = 0
self.vertices = []
self.materials = []
self.faces = []
self.normals = []
self.uvs = []
self.morphTargets = []
self.bones = []
self.animations = []
self.skinIndices = []
self.skinWeights = []
print("exporting meshes")
self._exportMeshes()
if self.options["materials"]:
print("exporting materials")
self._exportMaterials()
if not self.accessMode == MPxFileTranslator.kExportActiveAccessMode :
if self.options["bakeAnimations"]:
print("exporting animations")
self._exportAnimations()
self._goToFrame(self.options["startFrame"])
if self.options["bones"]:
print("exporting bones")
select(map(lambda m: m.getParent(), ls(type='mesh')))
runtime.GoToBindPose()
self._exportBones()
print("exporting skins")
self._exportSkins()
if self.options["skeletalAnim"]:
print("exporting keyframe animations")
self._exportKeyframeAnimations()
print("writing file")
output = {
'metadata': {
'formatVersion': 3.1,
'generatedBy': 'Maya Exporter'
},
'vertices': self.vertices,
'uvs': [self.uvs],
'faces': self.faces,
'normals': self.normals,
'materials': self.materials,
}
if not self.accessMode == MPxFileTranslator.kExportActiveAccessMode :
if self.options['bakeAnimations']:
output['morphTargets'] = self.morphTargets
if self.options['bones']:
output['bones'] = self.bones
output['skinIndices'] = self.skinIndices
output['skinWeights'] = self.skinWeights
output['influencesPerVertex'] = self.options["influencesPerVertex"]
if self.options['skeletalAnim']:
output['animations'] = self.animations
with file(path, 'w') as f:
if self.options['prettyOutput']:
f.write(json.dumps(output, indent=4, separators=(", ", ": ")))
else:
f.write(json.dumps(output, separators=(",",":")))
def _allMeshes(self):
if not self.accessMode == MPxFileTranslator.kExportActiveAccessMode :
print("*** Exporting ALL (NEW) ***")
self.__allMeshes = filter(lambda m: len(m.listConnections()) > 0, ls(type='mesh'))
else :
print("### Exporting SELECTED ###")
self.__allMeshes = ls(selection=True)
return self.__allMeshes
def _parseOptions(self, optionsString):
self.options = dict([(x, False) for x in self.componentKeys])
for key in self.componentKeys:
self.options[key] = key in optionsString
if self.options["bones"]:
boneOptionsString = optionsString[optionsString.find("bones"):]
boneOptions = boneOptionsString.split(' ')
self.options["influencesPerVertex"] = int(boneOptions[1])
if self.options["bakeAnimations"]:
bakeAnimOptionsString = optionsString[optionsString.find("bakeAnimations"):]
bakeAnimOptions = bakeAnimOptionsString.split(' ')
self.options["startFrame"] = int(bakeAnimOptions[1])
self.options["endFrame"] = int(bakeAnimOptions[2])
self.options["stepFrame"] = int(bakeAnimOptions[3])
def _exportMeshes(self):
if self.options['vertices']:
self._exportVertices()
for mesh in self._allMeshes():
self._exportMesh(mesh)
def _exportMesh(self, mesh):
print("Exporting " + mesh.name())
if self.options['faces']:
print("Exporting faces")
self._exportFaces(mesh)
self.verticeOffset += len(mesh.getPoints())
self.uvOffset += mesh.numUVs()
self.normalOffset += mesh.numNormals()
if self.options['normals']:
print("Exporting normals")
self._exportNormals(mesh)
if self.options['uvs']:
print("Exporting UVs")
self._exportUVs(mesh)
def _getMaterialIndex(self, face, mesh):
if not hasattr(self, '_materialIndices'):
self._materialIndices = dict([(mat['DbgName'], i) for i, mat in enumerate(self.materials)])
if self.options['materials']:
for engine in mesh.listConnections(type='shadingEngine'):
if sets(engine, isMember=face) or sets(engine, isMember=mesh):
for material in engine.listConnections(type='lambert'):
if self._materialIndices.has_key(material.name()):
return self._materialIndices[material.name()]
return -1
def _exportVertices(self):
self.vertices += self._getVertices()
def _exportAnimations(self):
for frame in self._framesToExport():
self._exportAnimationForFrame(frame)
def _framesToExport(self):
return range(self.options["startFrame"], self.options["endFrame"], self.options["stepFrame"])
def _exportAnimationForFrame(self, frame):
print("exporting frame " + str(frame))
self._goToFrame(frame)
self.morphTargets.append({
'name': "frame_" + str(frame),
'vertices': self._getVertices()
})
def _getVertices(self):
return [coord for mesh in self._allMeshes() for point in mesh.getPoints(space='world') for coord in [round(point.x, FLOAT_PRECISION), round(point.y, FLOAT_PRECISION), round(point.z, FLOAT_PRECISION)]]
def _goToFrame(self, frame):
currentTime(frame)
def _exportFaces(self, mesh):
typeBitmask = self._getTypeBitmask()
for face in mesh.faces:
materialIndex = self._getMaterialIndex(face, mesh)
hasMaterial = materialIndex != -1
self._exportFaceBitmask(face, typeBitmask, hasMaterial=hasMaterial)
self.faces += map(lambda x: x + self.verticeOffset, face.getVertices())
if self.options['materials']:
if hasMaterial:
self.faces.append(materialIndex)
if self.options['uvs'] and face.hasUVs():
self.faces += map(lambda v: face.getUVIndex(v) + self.uvOffset, range(face.polygonVertexCount()))
if self.options['normals']:
self._exportFaceVertexNormals(face)
def _exportFaceBitmask(self, face, typeBitmask, hasMaterial=True):
if face.polygonVertexCount() == 4:
faceBitmask = 1
else:
faceBitmask = 0
if hasMaterial:
faceBitmask |= (1 << 1)
if self.options['uvs'] and face.hasUVs():
faceBitmask |= (1 << 3)
self.faces.append(typeBitmask | faceBitmask)
def _exportFaceVertexNormals(self, face):
for i in range(face.polygonVertexCount()):
self.faces.append(face.normalIndex(i) + self.normalOffset)
def _exportNormals(self, mesh):
for normal in mesh.getNormals():
self.normals += [round(normal.x, FLOAT_PRECISION), round(normal.y, FLOAT_PRECISION), round(normal.z, FLOAT_PRECISION)]
def _exportUVs(self, mesh):
us, vs = mesh.getUVs()
for i, u in enumerate(us):
self.uvs.append(u)
self.uvs.append(vs[i])
def _getTypeBitmask(self):
bitmask = 0
if self.options['normals']:
bitmask |= 32
return bitmask
def _exportMaterials(self):
hist = listHistory( self._allMeshes(), f=1 )
mats = listConnections( hist, type='lambert' )
for mat in mats:
print("material: " + mat)
self.materials.append(self._exportMaterial(mat))
def _exportMaterial(self, mat):
result = {
"DbgName": mat.name(),
"blending": "NormalBlending",
"colorDiffuse": map(lambda i: i * mat.getDiffuseCoeff(), mat.getColor().rgb),
"depthTest": True,
"depthWrite": True,
"shading": mat.__class__.__name__,
"opacity": mat.getTransparency().r,
"transparent": mat.getTransparency().r != 1.0,
"vertexColors": False
}
if isinstance(mat, nodetypes.Phong):
result["colorSpecular"] = mat.getSpecularColor().rgb
result["reflectivity"] = mat.getReflectivity()
result["specularCoef"] = mat.getCosPower()
if self.options["specularMaps"]:
self._exportSpecularMap(result, mat)
if self.options["bumpMaps"]:
self._exportBumpMap(result, mat)
if self.options["colorMaps"]:
self._exportColorMap(result, mat)
return result
def _exportBumpMap(self, result, mat):
for bump in mat.listConnections(type='bump2d'):
for f in bump.listConnections(type='file'):
result["mapNormalFactor"] = 1
self._exportFile(result, f, "Normal")
def _exportColorMap(self, result, mat):
for f in mat.attr('color').inputs():
result["colorDiffuse"] = f.attr('defaultColor').get()
self._exportFile(result, f, "Diffuse")
def _exportSpecularMap(self, result, mat):
for f in mat.attr('specularColor').inputs():
result["colorSpecular"] = f.attr('defaultColor').get()
self._exportFile(result, f, "Specular")
def _exportFile(self, result, mapFile, mapType):
src = mapFile.ftn.get()
targetDir = os.path.dirname(self.path)
fName = os.path.basename(src)
if self.options['copyTextures']:
shutil.copy2(src, os.path.join(targetDir, fName))
result["map" + mapType] = fName
result["map" + mapType + "Repeat"] = [1, 1]
result["map" + mapType + "Wrap"] = ["repeat", "repeat"]
result["map" + mapType + "Anisotropy"] = 4
def _exportBones(self):
hist = listHistory( self._allMeshes(), f=1 )
joints = listConnections( hist, type="joint")
for joint in joints:
if joint.getParent():
parentIndex = self._indexOfJoint(joint.getParent().name())
else:
parentIndex = -1
rotq = joint.getRotation(quaternion=True) * joint.getOrientation()
pos = joint.getTranslation()
self.bones.append({
"parent": parentIndex,
"name": joint.name(),
"pos": self._roundPos(pos),
"rotq": self._roundQuat(rotq)
})
def _indexOfJoint(self, name):
if not hasattr(self, '_jointNames'):
self._jointNames = dict([(joint.name(), i) for i, joint in enumerate(ls(type='joint'))])
if name in self._jointNames:
return self._jointNames[name]
else:
return -1
def _exportKeyframeAnimations(self):
hierarchy = []
i = -1
frameRate = FramesPerSecond(currentUnit(query=True, time=True)).value()
hist = listHistory( self._allMeshes(), f=1 )
joints = listConnections( hist, type="joint")
for joint in joints:
hierarchy.append({
"parent": i,
"keys": self._getKeyframes(joint, frameRate)
})
i += 1
self.animations.append({
"name": "skeletalAction.001",
"length": (playbackOptions(maxTime=True, query=True) - playbackOptions(minTime=True, query=True)) / frameRate,
"fps": 1,
"hierarchy": hierarchy
})
def _getKeyframes(self, joint, frameRate):
firstFrame = playbackOptions(minTime=True, query=True)
lastFrame = playbackOptions(maxTime=True, query=True)
frames = sorted(list(set(keyframe(joint, query=True) + [firstFrame, lastFrame])))
keys = []
print("joint " + joint.name() + " has " + str(len(frames)) + " keyframes")
for frame in frames:
self._goToFrame(frame)
keys.append(self._getCurrentKeyframe(joint, frame, frameRate))
return keys
def _getCurrentKeyframe(self, joint, frame, frameRate):
pos = joint.getTranslation()
rot = joint.getRotation(quaternion=True) * joint.getOrientation()
return {
'time': (frame - playbackOptions(minTime=True, query=True)) / frameRate,
'pos': self._roundPos(pos),
'rot': self._roundQuat(rot),
'scl': [1,1,1]
}
def _roundPos(self, pos):
return map(lambda x: round(x, FLOAT_PRECISION), [pos.x, pos.y, pos.z])
def _roundQuat(self, rot):
return map(lambda x: round(x, FLOAT_PRECISION), [rot.x, rot.y, rot.z, rot.w])
def _exportSkins(self):
for mesh in self._allMeshes():
print("exporting skins for mesh: " + mesh.name())
hist = listHistory( mesh, f=1 )
skins = listConnections( hist, type='skinCluster')
if len(skins) > 0:
print("mesh has " + str(len(skins)) + " skins")
skin = skins[0]
joints = skin.influenceObjects()
for weights in skin.getWeights(mesh.vtx):
numWeights = 0
for i in range(0, len(weights)):
if weights[i] > 0:
self.skinWeights.append(weights[i])
self.skinIndices.append(self._indexOfJoint(joints[i].name()))
numWeights += 1
if numWeights > self.options["influencesPerVertex"]:
raise Exception("More than " + str(self.options["influencesPerVertex"]) + " influences on a vertex in " + mesh.name() + ".")
for i in range(0, self.options["influencesPerVertex"] - numWeights):
self.skinWeights.append(0)
self.skinIndices.append(0)
else:
print("mesh has no skins, appending 0")
for i in range(0, len(mesh.getPoints()) * self.options["influencesPerVertex"]):
self.skinWeights.append(0)
self.skinIndices.append(0)
class NullAnimCurve(object):
def getValue(self, index):
return 0.0
class ThreeJsTranslator(MPxFileTranslator):
def __init__(self):
MPxFileTranslator.__init__(self)
def haveWriteMethod(self):
return True
def filter(self):
return '*.json'
def defaultExtension(self):
return 'json'
def writer(self, fileObject, optionString, accessMode):
path = fileObject.fullName()
writer = ThreeJsWriter()
writer.write(path, optionString, accessMode)
def translatorCreator():
return asMPxPtr(ThreeJsTranslator())
def initializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.registerFileTranslator(kPluginTranslatorTypeName, None, translatorCreator, kOptionScript, kDefaultOptionsString)
except:
sys.stderr.write('Failed to register translator: %s' % kPluginTranslatorTypeName)
raise
def uninitializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.deregisterFileTranslator(kPluginTranslatorTypeName)
except:
sys.stderr.write('Failed to deregister translator: %s' % kPluginTranslatorTypeName)
raise
class FramesPerSecond(object):
MAYA_VALUES = {
'game': 15,
'film': 24,
'pal': 25,
'ntsc': 30,
'show': 48,
'palf': 50,
'ntscf': 60
}
def __init__(self, fpsString):
self.fpsString = fpsString
def value(self):
if self.fpsString in FramesPerSecond.MAYA_VALUES:
return FramesPerSecond.MAYA_VALUES[self.fpsString]
else:
return int(filter(lambda c: c.isdigit(), self.fpsString))
###################################################################
## The code below was taken from the Blender 3JS Exporter
## It's purpose is to fix the JSON output so that it does not
## put each array value on it's own line, which is ridiculous
## for this type of output.
###################################################################
ROUND = 6
## THREE override function
def _json_floatstr(o):
if ROUND is not None:
o = round(o, ROUND)
return '%g' % o
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
#if _indent is not None:
# _current_indent_level += 1
# newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
# separator = _item_separator + newline_indent
# buf += newline_indent
#else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = sorted(dct.items(), key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
# override the encoder
json.encoder._make_iterencode = _make_iterencode
| |
import requests
import webbrowser
class APICall(object):
def __init__(self, token, user_id):
self.url = "https://manager.fenero.com/MobileApi/"
self.token = token
self.id = user_id
# API call worker function
def poll_api(self, req, p):
r = requests.get(self.url + req, params=p)
if r.status_code != 200:
print("Error: " + str(r.status_code))
else:
# Return JSON if available, otherwise return raw format (CSV)
try:
return r.json()
except ValueError:
return r.text
# Returns a list of all campaigns and ACD queues.
def get_campaigns_and_queues(self):
req_type = "GetCampaignsAndQueues"
payload = {'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Returns a list of all live chat queues.
def get_live_chat_queues(self):
req_type = "GetLiveChatQueues"
payload = {'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Returns a list of all local and toll-free DIDs in the system.
def get_dids(self):
req_type = "GetDIDs"
payload = {'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Returns a list of all dispositions in the system.
def get_dispositions(self):
req_type = "GetDispositions"
payload = {'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Returns a list of all users in the system.
def get_users(self):
req_type = "GetUsers"
payload = {'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Returns a combination of JSON arrays containing real-time call, agent, and time information.
def get_realtime_stats(self, campaign_ids, group_ids):
req_type = "GetRealtimeStats"
payload = {'userId': self.id, 'campaignIds': campaign_ids, 'groupIds': group_ids,
'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Send a request to blind-monitor an agent's session. Values for the following parameters can be retrieved by
# sending a call to GetRealtimeStats
def monitor(self, session_id, server_ip, user_phone, agent_id):
req_type = "Monitor"
payload = {'userId': self.id, 'sessionId': session_id, 'serverIP': server_ip, 'userPhone': user_phone,
'agentID': agent_id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Similar to the Monitor command, except the callee on the receiving end of the userPhone will be able to actively
# participate in the call and heard by both parties. Values for the following parameters can be retrieved by sending
# a call to GetRealtimeStats
def barge(self, session_id, server_ip, user_phone, agent_id):
req_type = "Barge"
payload = {'userId': self.id, 'sessionId': session_id, 'serverIP': server_ip, 'userPhone': user_phone,
'agentID': agent_id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Returns a JSON formatted array of dialing lists currently in the system.
def get_lists(self, campaign_ids):
req_type = "GetLists"
payload = {'userId': self.id, 'campaignIds': campaign_ids, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Create a new dialing list.
def create_list(self, name, description, caller_id, campaign_id, reset_times, active):
req_type = "CreateList"
payload = {'name': name, 'description': description, 'callerId': caller_id, 'campaignId': campaign_id,
'resetTimes': reset_times, 'active': active, 'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Copies the custom field definitions from an existing list to the destination list specified.
def copy_custom_fields(self, source_list_id, destination_list_id):
req_type = "CopyCustomFields"
payload = {'sourceListId': source_list_id, 'destinationListId': destination_list_id, 'userId': self.id,
'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Sets an inactive list in an active state. If there are any dialable leads in the system and other rules are met
# (e.g. recycling rules, campaign hours of operations, available agents, etc), this will begin dialing the records
# in the list.
def start_list(self, list_id):
req_type = "StartList"
payload = {'userId': self.id, 'listId': list_id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Sets an active list to an inactive state. Dialing operations (if any) will be stopped immediately (any currently
# active calls will continue to be processed).
def stop_list(self, list_id):
req_type = "StopList"
payload = {'userId': self.id, 'listId': list_id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Searches the quality assurance system for all call recordings within the specified date range, for the given
# campaign/ACD queue.
def get_recordings(self, start_date, end_date, campaign_ids):
req_type = "GetRecordings"
payload = {'userId': self.id, 'startDate': start_date, 'endDate': end_date, 'campaignIds': campaign_ids,
'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Given a recording Id, streams or immediately downloads the recording's audio file.
def stream_recording(self, recording_id):
req_type = "StreamRecording"
payload = {'userId': self.id, 'recordingId': recording_id, 'appTokenId': self.token}
r = requests.get(self.url + req_type, params=payload, stream=True)
with open(recording_id, 'wb') as f: # Save .wav stream to disk
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
webbrowser.open(recording_id) # Open file in default browser
# Executes the Agent Activity Summary report using the filters specified as parameters and returns raw output as
# comma-separated values
def report_agent_activity_summary(self, start_date, end_date, users, campaign_ids, tz_offset):
req_type = "ReportAgentActivitySummary"
payload = {'startDate': start_date, 'endDate': end_date, 'users': users, 'campaignIds': campaign_ids,
'tzOffset': tz_offset, 'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Agent Aux Detail report using the filters specified as parameters and returns raw output as
# comma-separated values
def report_agent_aux_detail(self, start_date, end_date, report_type, users, campaign_ids, tz_offset):
req_type = "ReportAgentAuxDetail"
payload = {'startDate': start_date, 'endDate': end_date, 'reportType': report_type, 'users': users,
'campaignIds': campaign_ids, 'tzOffset': tz_offset, 'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Agent Performance Detail report using the filters specified as parameters and returns raw output as
# comma-separated values
def report_agent_performance_detail(self, start_date, end_date, report_type, users, campaign_ids, queue_ids,
tz_offset):
req_type = "ReportAgentPerformanceDetail"
payload = {'startDate': start_date, 'endDate': end_date, 'reportType': report_type, 'users': users,
'campaignIds': campaign_ids, 'queueIds': queue_ids, 'tzOffset': tz_offset, 'userId': self.id,
'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Agent Log report using the filters specified as parameters and returns raw output as comma-separated
# values
def report_agent_log(self, start_date, end_date, users, campaign_ids, tz_offset):
req_type = "ReportAgentLog"
payload = {'startDate': start_date, 'endDate': end_date, 'users': users, 'campaignIds': campaign_ids,
'tzOffset': tz_offset, 'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Agent Staff Time report using the filters specified as parameters and returns raw output as
# comma-separated values
def report_agent_staff_time(self, start_date, end_date, users, tz_offset):
req_type = "ReportAgentStaffTime"
payload = {'startDate': start_date, 'endDate': end_date, 'users': users, 'tzOffset': tz_offset,
'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Agent Disposition report using the filters specified as parameters and returns raw output as
# comma-separated values
def report_agent_disposition(self, start_date, end_date, users, tz_offset):
req_type = "ReportAgentDisposition"
payload = {'startDate': start_date, 'endDate': end_date, 'users': users, 'tzOffset': tz_offset,
'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Campaign DNC report using the filters specified as parameters and returns raw output as
# comma-separated values
def report_campaign_dnc(self, campaign_ids):
req_type = "ReportCampaignDNC"
payload = {'campaignIds': campaign_ids, 'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Call Detail Records (Inbound) report using the filters specified as parameters and returns raw output
# as comma-separated values. This report is the source of all inbound call-related billing activity.
def report_call_detail_records_inbound(self, start_date, end_date, tz_offset):
req_type = "ReportCallDetailRecordsInbound"
payload = {'startDate': start_date, 'endDate': end_date, 'tzOffset': tz_offset, 'userId': self.id,
'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Call Detail Records (Outbound) report using the filters specified as parameters and returns raw
# output as comma-separated values. This report is the source of all outbound call-related billing activity.
def report_call_detail_records_outbound(self, start_date, end_date, tz_offset):
req_type = "ReportCallDetailRecordsInbound"
payload = {'startDate': start_date, 'endDate': end_date, 'tzOffset': tz_offset, 'userId': self.id,
'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Call Detail Records (Inbound&Outbound) report using the filters specified as parameters and returns
# raw output as comma-separated values. This report is the source of all call-related billing activity.
def report_call_detail_records_combined(self, start_date, end_date, tz_offset):
req_type = "ReportCallDetailRecordsCombined"
payload = {'startDate': start_date, 'endDate': end_date, 'tzOffset': tz_offset, 'userId': self.id,
'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Call Detail Usage report using the filters specified as parameters and returns raw output as
# comma-separated values. This report is a summarized view of the Call Detail Records (Inbound&Outbound) reports.
def report_call_detail_usage(self, start_date, end_date, report_type, users, campaign_ids, queue_ids, tz_offset,
):
req_type = "ReportCallDetailUsage"
payload = {'startDate': start_date, 'endDate': end_date, 'reportType': report_type, 'users': users,
'campaignIds': campaign_ids, 'queueIds': queue_ids, 'tzOffset': tz_offset, 'userId': self.id,
'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Live Chat Log report using the filters specified as parameters and returns raw output as
# comma-separated values.
def report_live_chat_log(self, start_date, end_date, chat_queue_ids, tz_offset):
req_type = "ReportLiveChatLog"
payload = {'startDate': start_date, 'endDate': end_date, 'chatQueueIds': chat_queue_ids,
'tzOffset': tz_offset, 'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Interaction Detail report using the filters specified as parameters and returns raw output as
# comma-separated values.
def report_interaction_detail(self, start_date, end_date, list_ids, disposition_ids, campaign_ids, tz_offset):
req_type = "ReportInteractionDetail"
payload = {'startDate': start_date, 'endDate': end_date, 'listIds': list_ids,
'dispositionIds': disposition_ids, 'campaignIds': campaign_ids, 'tzOffset': tz_offset,
'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Interaction Detail Log report using the filters specified as parameters and returns raw output as
# comma-separated values.
def report_interaction_detail_log(self, start_date, end_date, list_ids, disposition_ids, campaign_ids, tz_offset):
req_type = "ReportInteractionDetailLog"
payload = {'startDate': start_date, 'endDate': end_date, 'listIds': list_ids, 'dispositionIds': disposition_ids,
'campaignIds': campaign_ids, 'tzOffset': tz_offset, 'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the System Call Log report using the filters specified as parameters and returns raw output as
# comma-separated values
def report_system_call_log(self, start_date, end_date, queue_ids, campaign_ids, tz_offset):
req_type = "ReportSystemCallLog"
payload = {'startDate': start_date, 'endDate': end_date, 'queueIds': queue_ids, 'campaignIds': campaign_ids,
'tzOffset': tz_offset, 'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Lead Detail report using the filters specified as parameters and returns raw output as
# comma-separated values.
def report_lead_detail(self, start_date, end_date, list_ids, queue_ids, campaign_ids, tz_offset):
req_type = "ReportLeadDetail"
payload = {'startDate': start_date, 'endDate': end_date, 'listIds': list_ids, 'queueIds': queue_ids,
'campaignIds': campaign_ids, 'tzOffset': tz_offset, 'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the Disposition Summary report using the filters specified as parameters and returns raw output as
# comma-separated values.
def report_dispositions_summary(self, start_date, end_date, list_ids, queue_ids, campaign_ids, tz_offset):
req_type = "ReportDispositionSummary"
payload = {'startDate': start_date, 'endDate': end_date, 'listIds': list_ids, 'queueIds': queue_ids,
'campaignIds': campaign_ids, 'tzOffset': tz_offset, 'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
# Executes the outbound-specific Flash Summary report using the filters specified as parameters and returns raw
# output as comma-separated values.
def report_flash_summary(self, start_date, end_date, list_ids, tz_offset):
req_type = "ReportFlashSummary"
payload = {'startDate': start_date, 'endDate': end_date, 'listIds': list_ids, 'tzOffset': tz_offset,
'userId': self.id, 'appTokenId': self.token}
return self.poll_api(req_type, payload)
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# http://pythonhosted.org/llfuse/example.html
# https://github.com/python-llfuse/python-llfuse
import errno
import logging
import os
import stat
import tarfile
import time
from argparse import ArgumentParser
try:
import faulthandler
except ImportError:
pass
else:
faulthandler.enable()
# works with version llfuse 1.2
import llfuse
log = logging.getLogger(__name__)
class TarFS(llfuse.Operations):
"""
"""
def __init__(self, tarname):
"""
"""
super(TarFS, self).__init__()
self.tar = None
if tarname.lower().endswith("gz"):
self.tar = tarfile.open(tarname, mode="r:gz")
elif tarname.lower().endswith("bz2"):
self.tar = tarfile.open(tarname, mode="r:bz2")
elif tarname.lower().endswith("xz"):
self.tar = tarfile.open(tarname, mode="r:xz")
else:
self.tar = tarfile.open(tarname, mode="r")
# size used later in statfs syscall for df
self.whole_size = os.stat(tarname).st_size
# inodes numbers are indexes from tar.getnames() + 1
self.delta = llfuse.ROOT_INODE + 1
# max inode value, if we get something higher we don't need to check anything
self.max_inode = len(self.tar.getnames()) + self.delta
def getattr(self, inode, ctx=None):
"""
"""
entry = llfuse.EntryAttributes()
# root inode attributes
if inode == llfuse.ROOT_INODE:
entry.st_mode = (stat.S_IFDIR | 0o755)
entry.st_size = 0
stamp = int(time.time() * 1e9)
# parameters for inodes inside the tar file
elif inode < self.max_inode:
tar_inode = self.tar.getmembers()[inode - self.delta]
# setting proper mode based on the type of the inode
entry.st_mode = 0
if tar_inode.isdir():
entry.st_mode = stat.S_IFDIR
elif tar_inode.isreg():
entry.st_mode = stat.S_IFREG
elif tar_inode.islnk():
entry.st_mode = stat.S_IFLNK
elif tar_inode.issym():
entry.st_mode = stat.S_IFLNK
elif tar_inode.isfifo():
entry.st_mode = stat.S_IFIFO
elif tar_inode.ischr():
entry.st_mode = stat.S_IFCHR
entry.st_mode |= tar_inode.mode
# inode size
entry.st_size = tar_inode.size
# we will use mtime for atime and ctime also
stamp = (tar_inode.mtime * 1e9)
else:
raise llfuse.FUSEError(errno.ENOENT)
entry.st_atime_ns = stamp
entry.st_ctime_ns = stamp
entry.st_mtime_ns = stamp
entry.st_gid = os.getgid()
entry.st_uid = os.getuid()
entry.st_ino = inode
# because this is read-only FS we can set timeouts to large values
entry.attr_timeout = 3600
entry.entry_timeout = 3600
return entry
def lookup(self, parent_inode, name, ctx=None):
"""
"""
# parent_inode needs to be lower then max_inode
assert parent_inode < self.max_inode
# special case of '.' inode
if name == b'.':
return self.getattr(parent_inode)
# special case of '..' inode
idx = parent_inode - self.delta
if name == b'..':
# we get the name of the folder above
p_path = os.path.split(self.tar.getnames()[idx])[0]
# knowing the name we find the index for it in the list
idx = self.tar.getnames().index(p_path)
# index + delta is our inode number
return self.getattr(idx + self.delta)
# special case of ROOT inode
if parent_inode == llfuse.ROOT_INODE:
prefix = ""
else:
prefix = self.tar.getnames()[idx]
idx = 0
for fd in self.tar.getnames():
if os.path.split(fd)[0] == prefix and\
name == os.path.basename(fd).encode('utf-8'):
return self.getattr(idx + self.delta)
idx += 1
raise llfuse.FUSEError(errno.ENOENT)
def opendir(self, inode, ctx):
"""
"""
if inode == llfuse.ROOT_INODE:
return inode
elif inode < self.max_inode:
idx = inode - self.delta
if self.tar.getmembers()[idx].isdir():
return inode
raise llfuse.FUSEError(errno.ENOENT)
def readdir(self, inode, off):
"""
"""
if inode == llfuse.ROOT_INODE:
prefix = ""
else:
idx = inode - self.delta
prefix = self.tar.getnames()[idx]
idx = 1
for fd in self.tar.getnames():
if os.path.split(fd)[0] == prefix:
if idx > off:
yield (os.path.basename(fd).encode('utf-8'), self.getattr(idx - 1 + self.delta), idx)
idx += 1
def open(self, inode, flags, ctx):
"""
"""
return inode
def read(self, fh, off, size):
"""
"""
idx = fh - self.delta
fd = self.tar.extractfile(self.tar.getnames()[idx])
fd.seek(off)
return fd.read(size)
def statfs(self, ctx):
"""
to make output of df nicer
man 2 statvfs
"""
stfs = llfuse.StatvfsData()
stfs.f_bavail = 0
stfs.f_bfree = 0
stfs.f_blocks = self.whole_size
stfs.f_bsize = 4096
stfs.f_favail = 0
stfs.f_ffree = 0
stfs.f_files = self.max_inode
stfs.f_frsize = 1
return stfs
def init_logging(debug=False):
"""
"""
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(threadName)s: '
'[%(name)s] %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
root_logger = logging.getLogger()
if debug:
handler.setLevel(logging.DEBUG)
root_logger.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO)
root_logger.setLevel(logging.INFO)
root_logger.addHandler(handler)
def parse_args():
'''Parse command line'''
parser = ArgumentParser()
parser.add_argument('tarfile', type=str,
help='tarfile to mount')
parser.add_argument('--mountpoint', type=str,
help='Where to mount the file system', default="")
parser.add_argument('--debug', action='store_true', default=False,
help='Enable debugging output')
parser.add_argument('--debug-fuse', action='store_true', default=False,
help='Enable FUSE debugging output')
return parser.parse_args()
def getmount_point(opt):
"""
"""
tarfile = os.path.basename(opt.tarfile)
mpath = opt.mountpoint
if not mpath:
(mpath, ext) = os.path.splitext(tarfile)
if ext and mpath:
if not os.path.exists(mpath):
os.mkdir(mpath)
return mpath
elif os.path.isdir(mpath):
return mpath
raise Exception("Please specify a correct mountpoint")
return mpath
def main():
"""
"""
options = parse_args()
init_logging(options.debug)
mpath = getmount_point(options)
tarfs = TarFS(options.tarfile)
fuse_options = set(llfuse.default_options)
fuse_options.add('fsname=fuse_tar')
fuse_options.add('ro')
if options.debug_fuse:
fuse_options.add('debug')
llfuse.init(tarfs, mpath, fuse_options)
try:
llfuse.main()
except:
llfuse.close(unmount=False)
raise
llfuse.close()
if __name__ == '__main__':
main()
| |
from flask import abort, request, jsonify, url_for, g
from ..models import User, Bucketlist, Item
from flask_restful import Api, Resource, reqparse, fields, marshal
from flask_sqlalchemy import SQLAlchemy
from flask_httpauth import HTTPTokenAuth
from app import db
from ..serializers import bucketlist_serializer, item_serializer
from base64 import b32encode
from sqlalchemy import exc
auth = HTTPTokenAuth(scheme='Token')
@auth.verify_token
def verify_token(token):
"""verifies token"""
user = User.verify_auth_token(token)
if not user:
return False
g.user = user
return True
class RegisterApi(Resource):
def __init__(self):
self.parse = reqparse.RequestParser()
self.parse.add_argument(
'username',
type=str,
required=True,
help='username not provided',
location='form')
self.parse.add_argument(
'password',
type=str,
required=True,
help='password not provided',
location='form')
super(RegisterApi, self).__init__()
def post(self):
"""
Register a new user given a username and a password
parameters:
-username
-password
"""
args = self.parse.parse_args()
username = args['username']
password = args['password']
try:
user = User(username, password)
db.session.add(user)
db.session.commit()
return {'message': 'User successfully added'}, 201
except exc.IntegrityError:
db.session.rollback()
return {'message': "User already exists"}, 409
class LoginApi(Resource):
def __init__(self):
self.parse = reqparse.RequestParser()
self.parse.add_argument(
'username',
type=str,
required=True,
help='username not provided',
location='form')
self.parse.add_argument(
'password',
type=str,
required=True,
help='password not provided',
location='form')
super(LoginApi, self).__init__()
def post(self):
"""
Logs in the user given a username and a password and return an authentication token for a
successful login
parameters:
-username
-password
"""
args = self.parse.parse_args()
username = args['username'].lower()
password = args['password']
user = User.query.filter_by(username=username).first()
if user:
if user.verify_password(password):
token = user.generate_auth_token()
return {'Authorization': 'Token ' + token.decode('ascii')}
if not user.verify_password(password):
return {'message': 'Invalid password '}, 403
else:
return {'message': 'Specified username not found '}, 403
class IndexResource(Resource):
@auth.login_required
def get(self):
"""
This is the bucketlist API
Call this api to create a Bucketlists of things you want to do, add items to this
bucketlist, view, edit and delete bucketlists and items
---
parameters:
-
responses:
500:
description: Server error!
200:
description: user token
schema:
token
404:
description: Not logged in.logged in
400:
description: Error logging in
"""
return {'message': "Welcome to Bucketlist API"}, 200
class BucketlistsApi(Resource):
@auth.login_required
def post(self):
"""
Creates a new bucketlst given the name
parameters:
-name
"""
parse = reqparse.RequestParser()
parse.add_argument(
'name',
type=str,
required=True,
help='Buckestlist name not provided',
location='form')
args = parse.parse_args()
name = args['name'].casefold()
if Bucketlist.query.filter_by(
name=name, created_by=g.user.id).first() is None:
created_by = g.user.id
new_bucketlist = Bucketlist(name, created_by)
db.session.add(new_bucketlist)
db.session.commit()
new_bucketlist = new_bucketlist
return {"successfully created: ": marshal(
new_bucketlist, bucketlist_serializer)}, 201
else:
return {'message': "Bucketlist already exists"}, 409
@auth.login_required
def get(self):
"""
Returns bucketlists created by a given user, the returned lists are paginated. The 'q' argument
is used to search a bucketlist by name.
parameters:
-limit
-page
-id
-q
"""
parse = reqparse.RequestParser()
parse.add_argument('page', type=int, default=1)
parse.add_argument('limit', type=int, default=5)
parse.add_argument('q', type=str, location='args')
args = parse.parse_args()
search_name = args['q']
limit = args['limit']
page_no = args['page']
# implement search function/option
if search_name:
search_results = Bucketlist.query.filter_by(
name=search_name.casefold(), created_by=g.user.id).first()
if search_results:
return {
"Found ": marshal(
search_results,
bucketlist_serializer)}
else:
return {'message': 'Bucketlist ' + search_name + ' not found.'}
# get all bucketlists and paginate
bucketlists_per_page = Bucketlist.query.filter_by(
created_by=g.user.id).paginate(
page=page_no, per_page=limit, error_out=True)
all_bucketlists = bucketlists_per_page.pages
has_next = bucketlists_per_page.has_next
has_previous = bucketlists_per_page.has_prev
if has_next:
next_page = str(request.url_root) + 'api/v1/bucketlists?' + \
'limit=' + str(limit) + '&page=' + str(page_no + 1)
else:
next_page = 'None'
if has_previous:
previous_page = request.url_root + 'api/v1/bucketlists?' + \
'limit=' + str(limit) + '&page=' + str(page_no - 1)
else:
previous_page = 'None'
bucketlists = bucketlists_per_page.items
response = {'bucketlists': marshal(bucketlists, bucketlist_serializer),
'has_next': has_next,
'pages': all_bucketlists,
'previous_page': previous_page,
'next_page': next_page
}
return response
class BucketlistApi(Resource):
@auth.login_required
def get(self, id):
"""
Returns the bucketlist with the given id
parameters:
-id
"""
got_list = Bucketlist.query.filter_by(
id=id, created_by=g.user.id).first()
if got_list:
return marshal(got_list, bucketlist_serializer)
else:
return {'message': 'Specified bucketlist not found.'}, 404
@auth.login_required
def put(self, id):
"""
Edits/updates the bucketlist with the given id
parameters:
-name
-id
"""
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=True,
help='New name not provided', location='form')
args = parser.parse_args()
new_name = args['name']
got_list = Bucketlist.query.filter_by(
id=id, created_by=g.user.id).first()
if not got_list is None:
got_list.name = new_name
db.session.add(got_list)
db.session.commit()
return {"successfully edited ": marshal(
got_list, bucketlist_serializer)}, 200
else:
return {'message': 'Specified bucketlist not found.'}, 404
@auth.login_required
def delete(self, id):
"""
Deletes the bucketlist with the given id
parameters:
-id
"""
to_delete = Bucketlist.query.filter_by(
created_by=g.user.id, id=id).first()
if not to_delete is None:
db.session.delete(to_delete)
db.session.commit()
return {'message': 'Bucketlist successfully deleted'}, 204
else:
return {'message': 'Not deleted, Bucketlist does not exist'}, 404
class BucketlistItemCreateApi(Resource):
def __init__(self):
self.parse = reqparse.RequestParser()
self.parse.add_argument('name', type=str, required=True,
help='Item name not provided',
location='form')
self.parse.add_argument('description', type=str,
location='form')
@auth.login_required
def post(self, id):
"""
Creates a new item in the bucketlist with the provided id
parameters:
-name
-description
-id
-item_id
"""
args = self.parse.parse_args()
item_name = args['name']
description = args['description']
bucketlist_id = id
bucketlist = Bucketlist.query.filter_by(
id=bucketlist_id, created_by=g.user.id).first()
if not bucketlist:
return {
'message': 'The bucketlist you want to insert an item to does not exists.'}, 404
if Item.query.filter_by(
name=item_name,
bucketlist_id=bucketlist_id).first():
return {
'message': 'An item with the provided item name already exists.'}, 409
if description:
new_item = Item(item_name, bucketlist_id, description)
else:
new_item = Item(item_name, bucketlist_id)
db.session.add(new_item)
db.session.commit()
created_item = new_item
return {"successfuly created: ": marshal(new_item, item_serializer)}, 201
class BucketItemsApi(Resource):
def __init__(self):
self.parse = reqparse.RequestParser()
self.parse.add_argument('name', type=str, required=True,
help='Bucket list id not provided',
location='form')
self.parse.add_argument('description', type=str,
location='form')
super(BucketItemsApi, self).__init__()
@auth.login_required
def put(self, id, item_id):
"""
Edits/updates the an item with the given item_id and within the bucketlist with the given id
parameters:
-name
-description
-done
-id
-item_id
"""
parser = reqparse.RequestParser()
parser.add_argument('name', type=str)
parser.add_argument('done', type=bool)
parser.add_argument('description', type=str)
edit_item = Item.query.filter_by(bucketlist_id=id, id=item_id).first()
if edit_item is None:
return {'message': 'The item you tried to edit does not exist.'}, 404
else:
args = parser.parse_args()
done = args['done']
name = args['name']
description = args['description']
if done:
edit_item.done = done
if name:
edit_item.name = name
if description:
edit_item.description = description
db.session.add(edit_item)
db.session.commit()
return {
"successfully upadated: ": marshal(
edit_item, item_serializer)}, 200
@auth.login_required
def delete(self, id, item_id):
"""
Deletes the an item with the given item_id and within the bucketlist with the given id
parameters:
-id
-item_id
"""
delete_item = Item.query.filter_by(
bucketlist_id=id, id=item_id).first()
if delete_item is None:
return {
'message': 'The item you tried to delete does not exist.'}, 404
db.session.delete(delete_item)
db.session.commit()
return {'message': ''}, 204
| |
"""A collection of string operations (most are no longer used).
Warning: most of the code you see here isn't normally used nowadays.
Beginning with Python 1.6, many of these functions are implemented as
methods on the standard string object. They used to be implemented by
a built-in module called strop, but strop is now obsolete itself.
Public module variables:
whitespace -- a string containing all characters considered whitespace
lowercase -- a string containing all characters considered lowercase letters
uppercase -- a string containing all characters considered uppercase letters
letters -- a string containing all characters considered letters
digits -- a string containing all characters considered decimal digits
hexdigits -- a string containing all characters considered hexadecimal digits
octdigits -- a string containing all characters considered octal digits
punctuation -- a string containing all characters considered punctuation
printable -- a string containing all characters considered printable
"""
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters = lowercase + uppercase
ascii_lowercase = lowercase
ascii_uppercase = uppercase
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + letters + punctuation + whitespace
# Case conversion helpers
# Use str to convert Unicode literal in case of -U
l = map(chr, xrange(256))
_idmap = str('').join(l)
del l
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s, [sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. Note that this replaces runs of whitespace characters by
a single space.
"""
return (sep or ' ').join([x.capitalize() for x in s.split(sep)])
# Construct a translation string
_idmapL = None
def maketrans(fromstr, tostr):
"""maketrans(frm, to) -> string
Return a translation table (a string of 256 bytes long)
suitable for use in string.translate. The strings frm and to
must be of the same length.
"""
if len(fromstr) != len(tostr):
raise ValueError, "maketrans arguments must have same length"
global _idmapL
if not _idmapL:
_idmapL = map(None, _idmap)
L = _idmapL[:]
fromstr = map(ord, fromstr)
for i in range(len(fromstr)):
L[fromstr[i]] = tostr[i]
return ''.join(L)
####################################################################
from _Tools import re as _re
class _multimap:
"""Helper class for combining multiple mappings.
Used by .{safe_,}substitute() to combine the mapping and keyword
arguments.
"""
def __init__(self, primary, secondary):
self._primary = primary
self._secondary = secondary
def __getitem__(self, key):
try:
return self._primary[key]
except KeyError:
return self._secondary[key]
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)
class Template:
"""A string class for supporting $-substitutions."""
__metaclass__ = _TemplateMetaclass
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return self.delimiter + named
braced = mo.group('braced')
if braced is not None:
try:
return '%s' % (mapping[braced],)
except KeyError:
return self.delimiter + '{' + braced + '}'
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return self.delimiter
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
####################################################################
# NOTE: Everything below here is deprecated. Use string methods instead.
# This stuff will go away in Python 3.0.
# Backward compatible names for exceptions
index_error = ValueError
atoi_error = ValueError
atof_error = ValueError
atol_error = ValueError
# convert UPPER CASE letters to lower case
def lower(s):
"""lower(s) -> string
Return a copy of the string s converted to lowercase.
"""
return s.lower()
# Convert lower case letters to UPPER CASE
def upper(s):
"""upper(s) -> string
Return a copy of the string s converted to uppercase.
"""
return s.upper()
# Swap lower case letters and UPPER CASE
def swapcase(s):
"""swapcase(s) -> string
Return a copy of the string s with upper case characters
converted to lowercase and vice versa.
"""
return s.swapcase()
# Strip leading and trailing tabs and spaces
def strip(s, chars=None):
"""strip(s [,chars]) -> string
Return a copy of the string s with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping.
"""
return s.strip(chars)
# Strip leading tabs and spaces
def lstrip(s, chars=None):
"""lstrip(s [,chars]) -> string
Return a copy of the string s with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.lstrip(chars)
# Strip trailing tabs and spaces
def rstrip(s, chars=None):
"""rstrip(s [,chars]) -> string
Return a copy of the string s with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.rstrip(chars)
# Split a string into a list of space/tab-separated words
def split(s, sep=None, maxsplit=-1):
"""split(s [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string. If maxsplit is given, splits at no more than
maxsplit places (resulting in at most maxsplit+1 words). If sep
is not specified or is None, any whitespace string is a separator.
(split and splitfields are synonymous)
"""
return s.split(sep, maxsplit)
splitfields = split
# Split a string into a list of space/tab-separated words
def rsplit(s, sep=None, maxsplit=-1):
"""rsplit(s [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string, starting at the end of the string and working
to the front. If maxsplit is given, at most maxsplit splits are
done. If sep is not specified or is None, any whitespace string
is a separator.
"""
return s.rsplit(sep, maxsplit)
# Join fields with optional separator
def join(words, sep = ' '):
"""join(list [,sep]) -> string
Return a string composed of the words in list, with
intervening occurrences of sep. The default separator is a
single space.
(joinfields and join are synonymous)
"""
return sep.join(words)
joinfields = join
# Find substring, raise exception if not found
def index(s, *args):
"""index(s, sub [,start [,end]]) -> int
Like find but raises ValueError when the substring is not found.
"""
return s.index(*args)
# Find last substring, raise exception if not found
def rindex(s, *args):
"""rindex(s, sub [,start [,end]]) -> int
Like rfind but raises ValueError when the substring is not found.
"""
return s.rindex(*args)
# Count non-overlapping occurrences of substring
def count(s, *args):
"""count(s, sub[, start[,end]]) -> int
Return the number of occurrences of substring sub in string
s[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return s.count(*args)
# Find substring, return -1 if not found
def find(s, *args):
"""find(s, sub [,start [,end]]) -> in
Return the lowest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.find(*args)
# Find last substring, return -1 if not found
def rfind(s, *args):
"""rfind(s, sub [,start [,end]]) -> int
Return the highest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.rfind(*args)
# for a bit of speed
_float = float
_int = int
_long = long
# Convert string to float
def atof(s):
"""atof(s) -> float
Return the floating point number represented by the string s.
"""
return _float(s)
# Convert string to integer
def atoi(s , base=10):
"""atoi(s [,base]) -> int
Return the integer represented by the string s in the given
base, which defaults to 10. The string s must consist of one
or more digits, possibly preceded by a sign. If base is 0, it
is chosen from the leading characters of s, 0 for octal, 0x or
0X for hexadecimal. If base is 16, a preceding 0x or 0X is
accepted.
"""
return _int(s, base)
# Convert string to long integer
def atol(s, base=10):
"""atol(s [,base]) -> long
Return the long integer represented by the string s in the
given base, which defaults to 10. The string s must consist
of one or more digits, possibly preceded by a sign. If base
is 0, it is chosen from the leading characters of s, 0 for
octal, 0x or 0X for hexadecimal. If base is 16, a preceding
0x or 0X is accepted. A trailing L or l is not accepted,
unless base is 0.
"""
return _long(s, base)
# Left-justify a string
def ljust(s, width, *args):
"""ljust(s, width[, fillchar]) -> string
Return a left-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated. If specified the fillchar is used instead of spaces.
"""
return s.ljust(width, *args)
# Right-justify a string
def rjust(s, width, *args):
"""rjust(s, width[, fillchar]) -> string
Return a right-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated. If specified the fillchar is used instead of spaces.
"""
return s.rjust(width, *args)
# Center a string
def center(s, width, *args):
"""center(s, width[, fillchar]) -> string
Return a center version of s, in a field of the specified
width. padded with spaces as needed. The string is never
truncated. If specified the fillchar is used instead of spaces.
"""
return s.center(width, *args)
# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
# Decadent feature: the argument may be a string or a number
# (Use of this is deprecated; it should be a string as with ljust c.s.)
def zfill(x, width):
"""zfill(x, width) -> string
Pad a numeric string x with zeros on the left, to fill a field
of the specified width. The string x is never truncated.
"""
if not isinstance(x, basestring):
x = repr(x)
return x.zfill(width)
# Expand tabs in a string.
# Doesn't take non-printing chars into account, but does understand \n.
def expandtabs(s, tabsize=8):
"""expandtabs(s [,tabsize]) -> string
Return a copy of the string s with all tab characters replaced
by the appropriate number of spaces, depending on the current
column, and the tabsize (default 8).
"""
return s.expandtabs(tabsize)
# Character translation through look-up table.
def translate(s, table, deletions=""):
"""translate(s,table [,deletions]) -> string
Return a copy of the string s, where all characters occurring
in the optional argument deletions are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256. The
deletions argument is not allowed for Unicode strings.
"""
if deletions:
return s.translate(table, deletions)
else:
# Add s[:0] so that if s is Unicode and table is an 8-bit string,
# table is converted to Unicode. This means that table *cannot*
# be a dictionary -- for that feature, use u.translate() directly.
return s.translate(table + s[:0])
# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
def capitalize(s):
"""capitalize(s) -> string
Return a copy of the string s with only its first character
capitalized.
"""
return s.capitalize()
# Substring replacement (global)
def replace(s, old, new, maxsplit=-1):
"""replace (str, old, new[, maxsplit]) -> string
Return a copy of string str with all occurrences of substring
old replaced by new. If the optional argument maxsplit is
given, only the first maxsplit occurrences are replaced.
"""
return s.replace(old, new, maxsplit)
# Try importing optional built-in module "strop" -- if it exists,
# it redefines some string operations that are 100-1000 times faster.
# It also defines values for whitespace, lowercase and uppercase
# that match <ctype.h>'s definitions.
#try:
# from strop import maketrans, lowercase, uppercase, whitespace
# letters = lowercase + uppercase
#except ImportError:
# pass # Use the original versions
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cv2
import os
import re
import numpy as np
import platform
import time
import operator
from EmeraldAI.Config.Config import Config
from EmeraldAI.Logic.Modules import Global
from EmeraldAI.Logic.Singleton import Singleton
from EmeraldAI.Logic.Logger import FileLogger
from EmeraldAI.Logic.Modules import Hashing
class DetectionSettings(object):
def __init__(self, scale, minNeighbors, minSize):
self.Scale = scale
self.MinNeighbors = minNeighbors
self.MinSize = minSize
class ComputerVision(object):
__metaclass__ = Singleton
def __init__(self):
self.__ModelFile = "myCVModel{0}.mdl"
self.__DictionaryFile = "myCVDict{0}.npy"
if(Config().Get("ComputerVision", "DetectionSettings") == "precise"):
self.__DetectionSettings = DetectionSettings(1.2, 4, (80, 80))
self.__FaceDetectionSettings = DetectionSettings(1.1, 4, (30, 30))
self.__FastDetection = False
self.__SkipUnlikelyClassifier = False
else:
self.__DetectionSettings = DetectionSettings(1.3, 4, (100, 100))
self.__FaceDetectionSettings = DetectionSettings(1.2, 4, (50, 50))
self.__SkipUnlikelyClassifier = True
if(Config().Get("ComputerVision", "DetectionSettings") == "medium"):
self.__FastDetection = False
else:
self.__FastDetection = True
self.__DatasetBasePath = os.path.join(Global.EmeraldPath, "Data", "ComputerVisionData")
self.__TempCVFolder = "Temp"
self.__DisabledFileFolder = "Disabled"
self.__UnknownUserTag = Config().Get("ComputerVision", "UnknownUserTag") # Unknown
self.__NotKnownDataTag = Config().Get("ComputerVision", "NotKnownDataTag") # NotKnown
self.__ImageLimit = Config().GetInt("ComputerVision", "ImageLimit") # 100
self.__ImageSuffix = Config().GetBoolean("ComputerVision", "ImageSuffix") # True
self.__ResizeWidth = Config().GetInt("ComputerVision", "ImageSizeWidth") # 350
self.__ResizeHeight = Config().GetInt("ComputerVision", "ImageSizeHeight") # 350
self.__PredictionTimeout = Config().GetInt("ComputerVision.Prediction", "PredictionTimeout") # 5
self.__PredictStreamTimeoutDate = 0
self.__PredictStreamTimeoutBool = False
self.__PredictStreamLuckyShot = True
self.__PredictStreamMaxDistance = Config().GetInt("ComputerVision.Prediction", "MaxPredictionDistance") # 500
self.__PredictStreamResult = {}
self.__haarDir = os.path.join(Global.EmeraldPath, "Data", "HaarCascades")
self.__frontalFace = cv2.CascadeClassifier(os.path.join(self.__haarDir, "haarcascade_frontalface_default.xml"))
self.__frontalFace2 = cv2.CascadeClassifier(os.path.join(self.__haarDir, "haarcascade_frontalface_alt.xml"))
self.__frontalFace3 = cv2.CascadeClassifier(os.path.join(self.__haarDir, "haarcascade_frontalface_alt2.xml"))
self.__frontalFace4 = cv2.CascadeClassifier(os.path.join(self.__haarDir, "haarcascade_frontalface_alt_tree.xml"))
self.__frontalFace5 = cv2.CascadeClassifier(os.path.join(self.__haarDir, "haarcascade_profileface.xml"))
self.__fullBody = cv2.CascadeClassifier(os.path.join(self.__haarDir, "haarcascade_fullbody.xml"))
self.__upperBody = cv2.CascadeClassifier(os.path.join(self.__haarDir, "haarcascade_upperbody.xml"))
self.__headShoulders = cv2.CascadeClassifier(os.path.join(self.__haarDir, "haarcascade_head_shoulders.xml"))
if(Config().Get("ComputerVision", "Recognizer") == "FisherFace"):
try:
self.__RecognizerModel = cv2.createFisherFaceRecognizer()
except:
try:
self.__RecognizerModel = cv2.face.createFisherFaceRecognizer()
except:
self.__RecognizerModel = cv2.face.FisherFaceRecognizer_create()
else:
try:
self.__RecognizerModel = cv2.createLBPHFaceRecognizer()
except:
try:
self.__RecognizerModel = cv2.face.createLBPHFaceRecognizer()
except:
self.__RecognizerModel = cv2.face.LBPHFaceRecognizer_create()
self.__RecognizerDictionary = {}
def __toGrayscale(self, img):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
gray = cv2.equalizeHist(gray)
return gray
def __cropImage(self, img, face):
x, y, w, h = [result for result in face]
return img[y:y+h,x:x+w]
def __saveImg(self, img, datasetName, imageType, fileName, prefix=None):
try:
Global.EnsureDirectoryExists(os.path.join(self.__DatasetBasePath, datasetName))
Global.EnsureDirectoryExists(os.path.join(self.__DatasetBasePath, datasetName, imageType))
if prefix is not None:
fileName = "{0}_{1}".format(prefix, fileName)
if self.__ImageSuffix:
hashValue = Hashing.GenHash()
fileName = "{0}_{1}".format(fileName, hashValue)
fileName = "{0}.{1}".format(fileName, ".jpg")
cv2.imwrite(os.path.join(self.__DatasetBasePath, datasetName, imageType, fileName), img) #Write image
except:
pass #If error, pass file
def __loadImages(self, datasetName, imageSize=None):
trainingData = []
trainingLabels = []
trainingLabelsDict = {}
for dirname, dirnames, _ in os.walk(os.path.join(self.__DatasetBasePath, datasetName)):
for subdirname in dirnames:
if imageSize != None and not subdirname.startswith(imageSize):
continue
if subdirname == self.__DisabledFileFolder:
continue
subjectPath = os.path.join(dirname, subdirname)
for filename in os.listdir(subjectPath):
if(not filename.startswith('.') and filename != self.__DisabledFileFolder):
try:
image = cv2.imread(os.path.join(subjectPath, filename), cv2.IMREAD_GRAYSCALE)
trainingData.append(image)
trimmedSubdirname = subdirname.replace(imageSize, "")
if (trimmedSubdirname not in trainingLabelsDict):
trainingLabelsDict[trimmedSubdirname] = len(trainingLabelsDict)
labelID = trainingLabelsDict[trimmedSubdirname]
trainingLabels.append(labelID)
except IOError, (errno, strerror):
FileLogger().Error("ComputerVision: IO Exception: {0}{1}".format(errno, strerror))
except Exception as e:
FileLogger().Error("ComputerVision: Exception: {0}".format(e))
return trainingData, np.asarray(trainingLabels), trainingLabelsDict
def __getHighestImageID(self, datasetName, imageType):
maxImgNum = 0
for _, _, filenames in os.walk(os.path.join(self.__DatasetBasePath, datasetName, imageType)):
for f in filenames:
tmpNum = re.findall(r'\d+|$', f)[0]
if(len(tmpNum) > 0 and int(tmpNum) > maxImgNum):
maxImgNum = int(tmpNum)
return int(maxImgNum)
def __thresholdReached(self, threshold):
if len(self.__PredictStreamResult) > 0:
for _, resultSet in self.__PredictStreamResult.iteritems():
maxKey = max(resultSet.iteritems(), key=operator.itemgetter(1))[0]
if maxKey != self.__UnknownUserTag and threshold < resultSet[maxKey]:
return True
return False
def __addPrediction(self, id, key, distance):
if(self.__PredictStreamResult.has_key(id)):
if(self.__PredictStreamResult[id].has_key(key)):
self.__PredictStreamResult[id][key] += (self.__PredictStreamMaxDistance - distance) / 10
else:
self.__PredictStreamResult[id][key] = (self.__PredictStreamMaxDistance - distance) / 10
else:
self.__PredictStreamResult[id] = {}
self.__PredictStreamResult[id][key] = (self.__PredictStreamMaxDistance - distance) / 10
def __getSortedListDir(self, path):
if platform.system() == 'Windows':
mtime = lambda f: os.stat(os.path.join(path, f)).st_ctime
else:
mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime
return list(sorted(os.listdir(path), key=mtime))
def __disableFile(self, filePath, fileName):
Global.EnsureDirectoryExists(os.path.join(filePath, self.__DisabledFileFolder))
os.rename(os.path.join(filePath, fileName), os.path.join(filePath, self.__DisabledFileFolder, fileName))
def LimitImagesInFolder(self, datasetName, amount=None):
if amount is None:
amount = self.__ImageLimit
amount += 2 # add one for 'Disabled' folder and one for eventual hidden file
for dirname, dirnames, _ in os.walk(os.path.join(self.__DatasetBasePath, datasetName)):
for subdirname in dirnames:
if subdirname == self.__DisabledFileFolder:
continue
subjectPath = os.path.join(dirname, subdirname)
dirContent = self.__getSortedListDir(subjectPath)
if len(dirContent) > amount:
filesToDeactivate = len(dirContent) - amount
for filename in dirContent:
if(not filename.startswith('.')):
if filesToDeactivate > 0:
self.__disableFile(subjectPath, filename)
filesToDeactivate -= 1
else:
continue
def GetLuma(self, img):
averageColor = np.average(np.average(img, axis=0), axis=0) # averageColor: BGR!!!
return (0.299 * averageColor[2] + 0.587 * averageColor[1] + 0.114 * averageColor[0])
def DetectBody(self, img):
bodies = self.__headShoulders.detectMultiScale(img, scaleFactor=self.__DetectionSettings.Scale, minNeighbors=self.__DetectionSettings.MinNeighbors, minSize=self.__DetectionSettings.MinSize, flags=cv2.CASCADE_SCALE_IMAGE)
if len(bodies) > 0:
return bodies
if not self.__SkipUnlikelyClassifier:
bodies = self.__fullBody.detectMultiScale(img, scaleFactor=self.__DetectionSettings.Scale, minNeighbors=self.__DetectionSettings.MinNeighbors, minSize=self.__DetectionSettings.MinSize, flags=cv2.CASCADE_SCALE_IMAGE)
if len(bodies) > 0:
return bodies
bodies = self.__upperBody.detectMultiScale(img, scaleFactor=self.__DetectionSettings.Scale, minNeighbors=self.__DetectionSettings.MinNeighbors, minSize=self.__DetectionSettings.MinSize, flags=cv2.CASCADE_SCALE_IMAGE)
if len(bodies) > 0:
return bodies
return []
def DetectFaceFast(self, img):
face = self.__frontalFace.detectMultiScale(img, scaleFactor=self.__FaceDetectionSettings.Scale, minNeighbors=self.__FaceDetectionSettings.MinNeighbors, minSize=self.__FaceDetectionSettings.MinSize, flags=cv2.CASCADE_SCALE_IMAGE)
if len(face) > 0:
return face
face5 = self.__frontalFace5.detectMultiScale(img, scaleFactor=self.__FaceDetectionSettings.Scale, minNeighbors=self.__FaceDetectionSettings.MinNeighbors, minSize=self.__FaceDetectionSettings.MinSize, flags=cv2.CASCADE_SCALE_IMAGE)
if len(face5) > 0:
return face
if not self.__SkipUnlikelyClassifier:
face2 = self.__frontalFace2.detectMultiScale(img, scaleFactor=self.__FaceDetectionSettings.Scale, minNeighbors=self.__FaceDetectionSettings.MinNeighbors, minSize=self.__FaceDetectionSettings.MinSize, flags=cv2.CASCADE_SCALE_IMAGE)
if len(face2) > 0:
return face2
face3 = self.__frontalFace3.detectMultiScale(img, scaleFactor=self.__FaceDetectionSettings.Scale, minNeighbors=self.__FaceDetectionSettings.MinNeighbors, minSize=self.__FaceDetectionSettings.MinSize, flags=cv2.CASCADE_SCALE_IMAGE)
if len(face3) > 0:
return face3
face4 = self.__frontalFace4.detectMultiScale(img, scaleFactor=self.__FaceDetectionSettings.Scale, minNeighbors=self.__FaceDetectionSettings.MinNeighbors, minSize=self.__FaceDetectionSettings.MinSize, flags=cv2.CASCADE_SCALE_IMAGE)
if len(face4) > 0:
return face4
return []
def DetectFaceBest(self, img):
face = self.__frontalFace.detectMultiScale(img, scaleFactor=self.__FaceDetectionSettings.Scale, minNeighbors=self.__FaceDetectionSettings.MinNeighbors, minSize=self.__FaceDetectionSettings.MinSize, flags=cv2.CASCADE_SCALE_IMAGE)
face5 = self.__frontalFace5.detectMultiScale(img, scaleFactor=self.__FaceDetectionSettings.Scale, minNeighbors=self.__FaceDetectionSettings.MinNeighbors, minSize=self.__FaceDetectionSettings.MinSize, flags=cv2.CASCADE_SCALE_IMAGE)
if not self.__SkipUnlikelyClassifier:
face2 = self.__frontalFace2.detectMultiScale(img, scaleFactor=self.__FaceDetectionSettings.Scale, minNeighbors=self.__FaceDetectionSettings.MinNeighbors, minSize=self.__FaceDetectionSettings.MinSize, flags=cv2.CASCADE_SCALE_IMAGE)
face3 = self.__frontalFace3.detectMultiScale(img, scaleFactor=self.__FaceDetectionSettings.Scale, minNeighbors=self.__FaceDetectionSettings.MinNeighbors, minSize=self.__FaceDetectionSettings.MinSize, flags=cv2.CASCADE_SCALE_IMAGE)
face4 = self.__frontalFace4.detectMultiScale(img, scaleFactor=self.__FaceDetectionSettings.Scale, minNeighbors=self.__FaceDetectionSettings.MinNeighbors, minSize=self.__FaceDetectionSettings.MinSize, flags=cv2.CASCADE_SCALE_IMAGE)
bestResult = face
if (len(bestResult) < len(face5)):
bestResult = face5
if not self.__SkipUnlikelyClassifier:
if (len(bestResult) < len(face2)):
bestResult = face2
if (len(bestResult) < len(face3)):
bestResult = face3
if (len(bestResult) < len(face4)):
bestResult = face4
return bestResult
def TrainModel(self, datasetName, imageSize=None):
if imageSize is None:
imageSize = "{0}x{1}".format(self.__ResizeWidth, self.__ResizeHeight)
images, labels, labelDict = self.__loadImages(datasetName, imageSize)
if len(images) == 0 or len(labels) == 0:
FileLogger().Error("ComputerVision: No Data given")
return
self.__RecognizerModel.train(images, labels)
self.__RecognizerDictionary = labelDict
path = os.path.join(self.__DatasetBasePath, datasetName)
try:
self.__RecognizerModel.save(os.path.join(path, self.__ModelFile.format(imageSize)))
except:
self.__RecognizerModel.write(os.path.join(path, self.__ModelFile.format(imageSize)))
np.save(os.path.join(path, self.__DictionaryFile.format(imageSize)), labelDict)
def LoadModel(self, datasetName, imageSize=None):
if imageSize is None:
imageSize = "{0}x{1}".format(self.__ResizeWidth, self.__ResizeHeight)
path = os.path.join(self.__DatasetBasePath, datasetName)
try:
try:
self.__RecognizerModel.load(os.path.join(path, self.__ModelFile.format(imageSize)))
except:
self.__RecognizerModel.read(os.path.join(path, self.__ModelFile.format(imageSize)))
self.__RecognizerDictionary = np.load(os.path.join(path, self.__DictionaryFile.format(imageSize))).item()
return self.__RecognizerModel, self.__RecognizerDictionary
except Exception as e:
FileLogger().Error("ComputerVision: Exception: Error while opening File {0}".format(e))
return None, None
def TakeImage(self, image, imageType, dataArray=None, datasetName=None, grayscale=False, prefix=None):
if datasetName is None:
datasetName = self.__TempCVFolder
if(dataArray is None):
if grayscale:
image = self.__toGrayscale(image)
fileName = str(self.__getHighestImageID(datasetName, imageType) + 1)
self.__saveImg(image, datasetName, imageType, fileName, prefix)
return True
if len(dataArray) > 0:
for imageData in dataArray:
croppedImage = self.__cropImage(image, imageData)
if grayscale:
croppedImage = self.__toGrayscale(croppedImage)
resizedImage = cv2.resize(croppedImage, (self.__ResizeWidth, self.__ResizeHeight))
fileName = str(self.__getHighestImageID(datasetName, imageType) + 1)
self.__saveImg(resizedImage, datasetName, imageType, fileName, prefix)
return True
return False
def Predict(self, image, predictionObjectList):
if(self.__FastDetection):
faces = self.DetectFaceFast(image)
else:
faces = self.DetectFaceBest(image)
result = []
if len(faces) > 0:
faceId = 1
for face in faces:
croppedImage = self.__cropImage(image, face)
resizedImage = cv2.resize(self.__toGrayscale(croppedImage), (self.__ResizeWidth, self.__ResizeHeight))
predictionResult = []
for predictionObject in predictionObjectList:
prediction = None
if predictionObject.Model != None:
prediction = predictionObject.Model.predict(resizedImage)
try:
if prediction != None:
predictionResult.append({
'model': predictionObject.Name,
'value': predictionObject.Dictionary.keys()[predictionObject.Dictionary.values().index(prediction[0])],
'rawvalue': prediction[0],
'distance': prediction[1]
})
except Exception as e:
FileLogger().Error("ComputerVision: Value Error {0}".format(e))
result.append({
'face': {
'id': faceId,
'data': predictionResult,
'coords': {
'x': str(face[0]),
'y': str(face[1]),
'width': str(face[2]),
'height': str(face[3])
}
}
})
faceId += 1
return result, faces
def PredictStream(self, image, predictionObjectList, timeout=None):
if timeout is None:
timeout = self.__PredictionTimeout
# reset is timeout happened on last call
if self.__PredictStreamTimeoutBool:
self.__PredictStreamTimeoutDate = time.time() + timeout
self.__PredictStreamTimeoutBool = False
self.__PredictStreamLuckyShot = True
for predictionObject in predictionObjectList:
predictionObject.ResetResult()
#check if current call times out
if time.time() > self.__PredictStreamTimeoutDate:
self.__PredictStreamTimeoutBool = True
prediction, rawFaceData = self.Predict(image, predictionObjectList)
luckyShot = False
for key, value in enumerate(prediction):
dataArray = value['face']['data']
for data in dataArray:
for predictionObject in predictionObjectList:
if data['model'] == predictionObject.Name:
if int(data['distance']) > predictionObject.MaxPredictionDistance or self.__NotKnownDataTag in data['value']:
predictionObject.AddPrediction(key, self.__UnknownUserTag, int(data['distance']))
else:
predictionObject.AddPrediction(key, data['value'], int(data['distance']))
if predictionObject.Name == "Person" and self.__PredictStreamLuckyShot:
luckyShot = True
self.__PredictStreamLuckyShot = False
return predictionObjectList, self.__PredictStreamTimeoutBool, luckyShot, rawFaceData
| |
# -*- coding: utf-8 -*-
"""Parsers for MacOS fseventsd files."""
from dfdatetime import semantic_time as dfdatetime_semantic_time
from dfvfs.resolver import resolver as path_spec_resolver
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.lib import errors
from plaso.lib import specification
from plaso.parsers import dtfabric_parser
from plaso.parsers import manager
class FseventsdEventData(events.EventData):
"""MacOS file system event (fseventsd) event data
Attributes:
event_identifier (int): the record event identifier.
flags (int): flags stored in the record.
node_identifier (int): file system node identifier related to the file
system event.
path (str): path recorded in the fseventsd record.
"""
DATA_TYPE = 'macos:fseventsd:record'
def __init__(self):
"""Initializes an Fseventsd event data."""
super(FseventsdEventData, self).__init__(data_type=self.DATA_TYPE)
self.event_identifier = None
self.flags = None
self.node_identifier = None
self.path = None
class FseventsdParser(dtfabric_parser.DtFabricBaseParser):
"""Parser for fseventsd files.
This parser supports both version 1 and version 2 fseventsd files.
Refer to http://nicoleibrahim.com/apple-fsevents-forensics/ for details.
"""
NAME = 'fseventsd'
DATA_FORMAT = 'MacOS File System Events Disk Log Stream (fseventsd) file'
# The version 1 format was used in Mac OS X 10.5 (Leopard) through macOS 10.12
# (Sierra).
_DLS_V1_SIGNATURE = b'1SLD'
# The version 2 format was introduced in MacOS High Sierra (10.13).
_DLS_V2_SIGNATURE = b'2SLD'
_DEFINITION_FILE = 'fseventsd.yaml'
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification.
"""
format_specification = specification.FormatSpecification(cls.NAME)
format_specification.AddNewSignature(cls._DLS_V1_SIGNATURE, offset=0)
format_specification.AddNewSignature(cls._DLS_V2_SIGNATURE, offset=0)
return format_specification
def _ParseDLSPageHeader(self, file_object, page_offset):
"""Parses a DLS page header from a file-like object.
Args:
file_object (file): file-like object to read the header from.
page_offset (int): offset of the start of the page header, relative
to the start of the file.
Returns:
tuple: containing:
dls_page_header: parsed record structure.
int: header size.
Raises:
ParseError: when the header cannot be parsed.
"""
page_header_map = self._GetDataTypeMap('dls_page_header')
try:
page_header, page_size = self._ReadStructureFromFileObject(
file_object, page_offset, page_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse page header at offset: 0x{0:08x} '
'with error: {1!s}'.format(page_offset, exception))
return page_header, page_size
def _BuildEventData(self, record):
"""Builds an FseventsdData object from a parsed structure.
Args:
record (dls_record_v1|dls_record_v2): parsed record structure.
Returns:
FseventsdEventData: event data attribute container.
"""
event_data = FseventsdEventData()
event_data.path = record.path
event_data.flags = record.event_flags
event_data.event_identifier = record.event_identifier
# Node identifier is only set in DLS V2 records.
event_data.node_identifier = getattr(record, 'node_identifier', None)
return event_data
def _GetParentModificationTime(self, gzip_file_entry):
"""Retrieves the modification time of the file entry's parent file.
Note that this retrieves the time from the file entry of the parent of the
gzip file entry's path spec, which is different from trying to retrieve it
from the gzip file entry's parent file entry.
It would be preferable to retrieve the modification time from the metadata
in the gzip file itself, but it appears to not be set when the file is
written by fseventsd.
Args:
gzip_file_entry (dfvfs.FileEntry): file entry of the gzip file containing
the fseventsd data.
Returns:
dfdatetime.DateTimeValues: parent modification time, or None if not
available.
"""
parent_file_entry = path_spec_resolver.Resolver.OpenFileEntry(
gzip_file_entry.path_spec.parent)
if not parent_file_entry:
return None
return parent_file_entry.modification_time
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an fseventsd file.
Args:
parser_mediator (ParserMediator): parser mediator.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the header cannot be parsed.
"""
page_header_map = self._GetDataTypeMap('dls_page_header')
try:
page_header, file_offset = self._ReadStructureFromFileObject(
file_object, 0, page_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse page header with error: {0!s}'.format(
exception))
current_page_end = page_header.page_size
file_entry = parser_mediator.GetFileEntry()
date_time = self._GetParentModificationTime(file_entry)
# TODO: Change this to use a more representative time definition (time span)
# when https://github.com/log2timeline/dfdatetime/issues/65 is resolved.
if date_time:
timestamp_description = definitions.TIME_DESCRIPTION_RECORDED
else:
date_time = dfdatetime_semantic_time.NotSet()
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
file_size = file_object.get_size()
while file_offset < file_size:
if file_offset >= current_page_end:
try:
page_header, header_size = self._ParseDLSPageHeader(
file_object, file_offset)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to parse page header with error: {0!s}'.format(
exception))
break
current_page_end += page_header.page_size
file_offset += header_size
continue
if page_header.signature == self._DLS_V1_SIGNATURE:
record_map = self._GetDataTypeMap('dls_record_v1')
else:
record_map = self._GetDataTypeMap('dls_record_v2')
try:
record, record_length = self._ReadStructureFromFileObject(
file_object, file_offset, record_map)
file_offset += record_length
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to parse page record with error: {0!s}'.format(
exception))
break
event_data = self._BuildEventData(record)
parser_mediator.ProduceEventWithEventData(event, event_data)
manager.ParsersManager.RegisterParser(FseventsdParser)
| |
"""
Cache configuration.
This works in conjunction with dogpile.cache_ to provide caching for any Weasyl
project.
.. _dogpile.cache: http://dogpilecache.readthedocs.org/en/latest/
"""
import json
import threading
import dogpile.cache
import dogpile.cache.backends.memcached
import pylibmc
from dogpile.cache.api import CachedValue, NO_VALUE
from dogpile.cache.proxy import ProxyBackend
from dogpile.cache import make_region
region = make_region()
class ThreadCacheProxy(ProxyBackend):
"""
A thread-local caching proxy.
What this means is that all of the requests made to memcached (or whatever)
will be cached locally, and future requests will refer to the local cache
instead of having to make another memcached round trip.
This is convenient, but the cache must be periodically expired in order for
changes in memcached to propagate to the application. :py:meth:`.zap_cache`
will clear the entire cache for the current thread. It's intended to be
called, for example, at the end of an HTTP request's lifetime.
"""
_local = threading.local()
@classmethod
def zap_cache(cls):
"""
Clear the cache for the current thread.
If there wasn't any cache for the current thread, do nothing.
"""
try:
del cls._local.cache_dict
except AttributeError:
pass
@property
def _dict(self):
"""
Get the cache dict for the current thread.
Returns:
dict: The cache dict.
"""
if not hasattr(self._local, 'cache_dict'):
self._local.cache_dict = {}
return self._local.cache_dict
def get(self, key):
"""
Proxy a ``get`` call.
If *key* is in the thread-local cache, return that. Otherwise, fetch
from the proxied backend and store its result in the thread-local
cache as long as the value was not
:py:data:`~dogpile.cache.api.NO_VALUE`. Finally, return the fetched
value.
Parameters:
key: A :term:`native string`.
Returns:
Some value, or :py:data:`~dogpile.cache.api.NO_VALUE` if the
proxied backend returned that instead of a value.
"""
d = self._dict
if key in d:
return d[key]
ret = self.proxied.get(key)
if ret is not NO_VALUE:
d[key] = ret
return ret
def get_multi(self, keys):
"""
Proxy a ``get_multi`` call.
This works like :py:meth:`.get`, except *keys* is a list of keys, and
the result is a list of values.
Parameters:
keys: A list of :term:`native string` objects.
Returns:
list: The values corresponding to the *keys*.
"""
d = self._dict
to_fetch = []
ret = []
for key in keys:
ret.append(d.get(key, NO_VALUE))
if ret[-1] is NO_VALUE:
to_fetch.append((key, len(ret) - 1))
if not to_fetch:
return ret
keys_to_fetch, indices = zip(*to_fetch)
for key, index, value in zip(keys_to_fetch, indices, self.proxied.get_multi(keys_to_fetch)):
if value is NO_VALUE:
continue
d[key] = ret[index] = value
return ret
def set(self, key, value):
"""
Proxy a ``set`` call.
The set is passed through to the proxied backend, and the *value* is
stored in the thread-local cache under *key*.
Parameters:
key: A :term:`native string`.
value: Some object.
"""
self._dict[key] = value
self.proxied.set(key, value)
def set_multi(self, pairs):
"""
Proxy a ``set_multi`` call.
This works like :py:meth:`.set`, except *pairs* is a dict of key/value
mappings instead of a single key/value mapping.
Parameters:
pairs (dict): A mapping :term:`native string` of objects to any
objects.
"""
self._dict.update(pairs)
self.proxied.set_multi(pairs)
def delete(self, key):
"""
Proxy a ``delete`` call.
The delete is passed through to the proxied backend, and the *key* is
removed from the thread-local cache if it exists.
Parameters:
key: A :term:`native string`.
"""
self._dict.pop(key, None)
self.proxied.delete(key)
def delete_multi(self, keys):
"""
Proxy a ``delete_multi`` call.
This works like :py:meth:`.delete`, except *keys* is a list of keys.
Parameters:
keys (list): A list of :term:`native string` objects.
"""
d = self._dict
for key in keys:
d.pop(key, None)
self.proxied.delete_multi(keys)
class JsonClient(pylibmc.Client):
"""
A pylibmc.Client that stores only dogpile.cache entries, as JSON.
"""
def serialize(self, value):
return json.dumps(value).encode('ascii'), 0
def deserialize(self, bytestring, flag):
payload, metadata = json.loads(bytestring)
return CachedValue(payload, metadata)
class JsonPylibmcBackend(dogpile.cache.backends.memcached.PylibmcBackend):
def _imports(self):
pass
def _create_client(self):
return JsonClient(
self.url,
binary=self.binary,
behaviors=self.behaviors,
)
@classmethod
def register(cls):
dogpile.cache.register_backend('libweasyl.cache.pylibmc', 'libweasyl.cache', 'JsonPylibmcBackend')
| |
#! /usr/bin/python3
"""
This is a cleanup script for vexxhost/psi cloud.
To test openstack deployment on ovb jobs, on vexxhost/si cloud a heat
stack is created.
Normally, Heat stack is created and destroyed by job itself.
But, sometimes due to infra related issues heat stack failed to
create/delete by job itself.
In a scenario where heat stack is not deleted by job itself, We need manual
cleanup on Infrastrusture to avoid resource crunch.
Expectations from this script:-
* Check if vexx/PSI infrastructure is reachable.
* Find stack which are older than 6 hours.
* Delete stacks which are older than 6 hours.
* Find stacks which are i.e in `CREATE_FAILED` or `DELETE_FAILED` state
* Delete stacks which are i.e in `CREATE_FAILED` or `DELETE_FAILED` state
* Sleep(wait for stack to delete)
* If some stacks cannot be deleted - Find list of those stacks
* Extract identifier from those stack names
* Delete the individual resources which are associated to those stacks.
- Server
- port
- Subnet
- Network
- router
- Security group
* Attempt to delete the stacks again
* Sleep(wait for stack to delete)
* If all stacks are deleted - Success, if not - failure (logs with details to
reach out to infra team)
"""
import argparse
import datetime
import logging
import sys
import time
import openstack
def remove_prefix(text, prefix):
""" If a particular prefix exists in text string, This function
removes that prefix from string and then returns the remaining string
"""
if text.startswith(prefix):
return text[len(prefix):]
return text
def remove_suffix(text, suffix):
""" If a particular suffix exists in text string, This function
removes that suffix from string and then returns the remaining string
"""
if suffix and text.endswith(suffix):
return text[:-len(suffix)]
return text
def fetch_identifier(text, prefix, suffix):
""" Every heat stack have a unique text(identifier) in their name,
This identifier can be used to identify associated resources of
the heat stack.
If a particular prefix and suffix exists in text string,
This function removes prefix and suffix from string and
then returns the remaining string(identifier).
"""
text = remove_prefix(text, prefix)
identifier = remove_suffix(text, suffix)
return identifier
def env_accessibility_check(cloud_name):
""" This function checks if cloud is accessible """
conn = openstack.connect(cloud=cloud_name)
try:
conn.identity.get_token()
except Exception:
return False
return True
def old_heat_stacks(cloud_name, time_expired=360):
""" This function fetches list of heat stacks
which running longer than time_expired minutes.
"""
conn = openstack.connect(cloud=cloud_name)
old_stack_list = []
utc_current_time = datetime.datetime.now(datetime.timezone.utc)
utc_time_expired = (utc_current_time
- datetime.timedelta(minutes=time_expired))
utc_time_expired = utc_time_expired.strftime("%Y-%m-%dT%H:%M:%SZ")
for stack in conn.orchestration.stacks():
if stack['created_at'] < utc_time_expired:
old_stack_list.append(stack['id'])
return old_stack_list
def stack_delete(cloud_name, stack_list, dry_run=False):
""" This function takes a list of heat stacks and delete them if dry_run
is False. After each stack deletion this function waits for 20 seconds
deleted. This would avoid overwhelming the tenant with mass delete.
"""
conn = openstack.connect(cloud=cloud_name)
if stack_list:
if dry_run:
logger.info("DRY RUN - Stack list to "
"delete: %s", stack_list)
else:
logger.info("These stacks will be "
"deleted: %s", stack_list)
for stack in stack_list:
logger.info("Deleting stack id %s", stack)
conn.orchestration.delete_stack(stack)
time.sleep(20)
else:
logger.info("There are no stack to delete")
def progress_heat_stacks(cloud_name):
""" This function fetches list of all heat stacks that are in
DELETE_IN_PROGRESS state by ID and by Name.
"""
conn = openstack.connect(cloud=cloud_name)
progress_stack_id_list = []
progress_stack_name_list = []
for stack in conn.orchestration.stacks():
if "DELETE_IN_PROGRESS" in stack['status']:
progress_stack_id_list.append(stack["id"])
progress_stack_name_list.append(stack["name"])
return progress_stack_id_list, progress_stack_name_list
def mark_stack_check(cloud_name, stack_list, dry_run=False):
""" This function takes a list of heat stacks and check them if dry_run
is False.
"""
conn = openstack.connect(cloud=cloud_name)
if stack_list:
if dry_run:
logger.info("DRY RUN - Stack list to "
"check: %s", stack_list)
else:
logger.info("These stacks will be "
"checked: %s", stack_list)
for stack in stack_list:
logger.info("Checking stack id %s", stack)
conn.orchestration.check_stack(stack)
time.sleep(3)
else:
logger.info("There are no stack to check")
def failed_heat_stacks(cloud_name):
""" This function fetches list of all heat stacks that are in i.e
CREATE_FAILED or DELETE_FAILED state
"""
conn = openstack.connect(cloud=cloud_name)
failed_stack_list = []
for stack in conn.orchestration.stacks():
if "FAILED" in stack['status']:
failed_stack_list.append(stack["name"])
return failed_stack_list
def servers_with_identifier(cloud_name, identifier):
""" This function fetches list of servers that have a particular
text(identifier) in their name. If there are no servers with
text(identifier) in their name it returns an empty list.
"""
conn = openstack.connect(cloud=cloud_name)
server_list = []
for server in conn.compute.servers():
if identifier in server['name']:
server_list.append(server['id'])
else:
for network in server['addresses']:
if identifier in network:
server_list.append(server['id'])
return server_list
def server_delete(cloud_name, server_names, dry_run=False):
""" This function takes a list of servers and delete them if dry_run
is False.
"""
conn = openstack.connect(cloud=cloud_name)
if server_names:
if dry_run:
logger.info("DRY RUN - Servers to delete: %s", server_names)
else:
for server in server_names:
logger.info("Deleting server ID %s", server)
conn.compute.delete_server(server)
else:
logger.info("There are no servers to delete")
def subnets_with_identifier(cloud_name, identifier):
""" For every heat stack multiple subnets are created, This
function fetches list of all the subnets which have
text(identifier) in name.
"""
conn = openstack.connect(cloud=cloud_name)
subnet_with_identifier_list = []
for subnet in conn.network.subnets():
if subnet['name'].endswith(identifier):
subnet_with_identifier_list.append(subnet['id'])
return subnet_with_identifier_list
def ports_of_subnets(cloud_name, subnet_ids_list):
""" This functions takes list of subnet ids as input and fetches
list of all the ports belongs to those subnets.
"""
conn = openstack.connect(cloud=cloud_name)
port_list = []
if subnet_ids_list:
for subnet in subnet_ids_list:
for port in conn.network.get_subnet_ports(subnet):
port_list.append(port['id'])
return port_list
def port_delete(cloud_name, port_ids, dry_run=False):
""" This function takes a list of ports and delete them if dry_run
is False.
"""
conn = openstack.connect(cloud=cloud_name)
if port_ids:
if dry_run:
logger.info("DRY RUN - Ports to delete: %s", port_ids)
else:
for port in port_ids:
logger.info("Deleting port ID %s", port)
conn.network.delete_port(port)
else:
logger.info("There are no ports to delete")
def networks_with_identifier(cloud_name, identifier):
""" This function fetches list of networks that have a particular
text(identifier) in their name. If there are no networks with
text(identifier) in their name it returns an empty list.
"""
conn = openstack.connect(cloud=cloud_name)
network_list = []
for network in conn.network.networks():
if network['name'].endswith(identifier):
network_list.append(network['id'])
return network_list
def network_delete(cloud_name, network_ids, dry_run=False):
""" This function takes a list of networks and delete them if dry_run
is False.
"""
conn = openstack.connect(cloud=cloud_name)
if network_ids:
if dry_run:
logger.info("DRY RUN - Networks to delete: %s", network_ids)
else:
for network_id in network_ids:
logger.info("Deleting network ID %s", network_id)
conn.network.delete_network(network_id)
else:
logger.info("There are no networks to delete")
def routers_with_identifier(cloud_name, identifier):
""" This function fetches list of routers that have a particular
text(identifier) in their name. If there are no router with
text(identifier) in their name it returns an empty list.
"""
conn = openstack.connect(cloud=cloud_name)
router_list = []
for router in conn.network.routers():
if identifier in router['name']:
router_list.append(router['id'])
return router_list
def router_interface_delete(cloud_name, router_id, dry_run=False):
""" This function takes a router id and deattaches its
interfaces"""
conn = openstack.connect(cloud=cloud_name)
logger.info("Running interface dettach")
for port in conn.network.ports(device_id=router_id):
if (port['device_owner'] == 'network:router_interface'
or port['device_owner']
== 'network:ha_router_replicated_interface'):
logger.info("Deattaching %s from %s", port['id'], router_id)
if not dry_run:
conn.network.remove_interface_from_router(
router_id, port_id=port['id'])
def router_delete(cloud_name, router_ids, dry_run=False):
""" This function takes a list of routers and delete them if dry_run
is False.
"""
conn = openstack.connect(cloud=cloud_name)
if router_ids:
if dry_run:
logger.info("DRY RUN - Routers to delete: %s", router_ids)
else:
for router_id in router_ids:
logger.info("Deleting router ID %s", router_id)
router_interface_delete(cloud_name, router_id, dry_run)
conn.network.delete_router(router_id)
else:
logger.info("There are no router to delete")
def sec_gp_with_identifier(cloud_name, identifier):
""" This function fetches list of security group that have a particular
text(identifier) in their name. If there are no security group with
text(identifier) in their name it returns an empty list.
"""
conn = openstack.connect(cloud=cloud_name)
security_group = []
for sec_group in conn.network.security_groups():
if identifier in sec_group['name']:
security_group.append(sec_group['id'])
return security_group
def sec_group_delete(cloud_name, sec_group_ids, dry_run=False):
""" This function takes a list of security group and delete them if dry_run
is False.
"""
conn = openstack.connect(cloud=cloud_name)
if sec_group_ids:
if dry_run:
logger.info("DRY RUN: Security group to delete: %s", sec_group_ids)
else:
for sec_group_id in sec_group_ids:
logger.info("Deleting Security group with ID %s", sec_group_id)
conn.network.delete_security_group(sec_group_id)
else:
logger.info("There are no Security group to delete")
def delete_individual_resources(cloud_name, stack_list, prefix='baremetal_',
suffix="", dry_run=False):
""" This function takes a list of heat stack which failed to create
or delete successfully. It then deletes the individual resources
(including instances, ports, security group and networks) of that
heat stack.
"""
if stack_list == []:
logger.info("There are no stacks to delete")
else:
logger.info("There are stacks in CREATE_FAILED "
"or DELETE_FAILED state - %s", stack_list)
for stack in stack_list:
# Extract identfier for associated resources
logger.info("Removing individual resources which are associated "
"with stack %s", stack)
identifier = fetch_identifier(stack, prefix, suffix)
logger.info("Identifier is %s", identifier)
fetched_servers = servers_with_identifier(cloud_name, identifier)
server_delete(cloud_name, fetched_servers, dry_run)
fetched_routers = routers_with_identifier(cloud_name, identifier)
router_delete(cloud_name, fetched_routers, dry_run)
# delete empty ports associated with subnets and then networkso
fetched_subnets_list = subnets_with_identifier(cloud_name,
identifier)
logger.info("Subnets to delete %s", fetched_subnets_list)
fetched_subnet_ports = ports_of_subnets(cloud_name,
fetched_subnets_list)
port_delete(cloud_name, fetched_subnet_ports, dry_run)
fetched_networks = networks_with_identifier(cloud_name, identifier)
network_delete(cloud_name, fetched_networks, dry_run)
fetched_sec_groups = sec_gp_with_identifier(cloud_name, identifier)
sec_group_delete(cloud_name, fetched_sec_groups, dry_run)
def main(cloud_name, time_expired=360, dry_run=False,
prefix="baremetal_", suffix=""):
""" This is the main function called when script is executed.
It first checks if cloud is accessible, then it fetches
list of heat stack to be deleted depending on inputs.
This function first tries to delete the heat stack, if it
cannot delete the heat stack, it removes the associated resources of
heat stack and tries to delete it again.
"""
logger.info("==========================================================")
logger.info("Starting script for cleanup on %s", cloud_name)
# Check if vexx/PSI infrastructure is reachable.
if env_accessibility_check(cloud_name):
logger.info("Successfull able to talk with cloud")
else:
logger.info("Failed to talk with cloud, credentials should be"
" sourced or configured in cloud.yaml file.")
sys.exit()
# Find stacks which are older than time_expired and delete them.
old_stack_list = old_heat_stacks(cloud_name, time_expired)
logger.info("Stacks which was older than %s mins: %s",
time_expired, old_stack_list)
stack_delete(cloud_name, old_stack_list, dry_run)
# Find stacks which are in `FAILED` state and delete them.
failed_stack_list = failed_heat_stacks(cloud_name)
logger.info("Stacks which are in CREATE_FAILED or DELETE_FAILED state"
": %s", failed_stack_list)
stack_delete(cloud_name, failed_stack_list, dry_run)
# wait for stack to delete
if not dry_run:
logger.info("Waiting for 150s for stacks to delete ")
time.sleep(150)
# ReCheck if there are stacks left in CREATE_FAILED/ DELETE_FAILED state
logger.info("Rechecking if there are stacks left in CREATE_FAILED"
" or DELETE_FAILED state or DELETE_IN_PROGRESS")
# Check stack which are stuck in DELETE_IN_PROGRESS, and delete individual
# resources before trying to delete them again.
progress_stack_id_list, progress_stack_name_list = progress_heat_stacks(
cloud_name)
logger.info("Stacks which stuck in Delete %s", progress_stack_name_list)
if progress_stack_id_list:
mark_stack_check(cloud_name, progress_stack_id_list, dry_run)
delete_individual_resources(cloud_name, progress_stack_name_list,
prefix, suffix, dry_run)
stack_delete(cloud_name, progress_stack_name_list, dry_run)
if not dry_run:
logger.info("wait for 150s for stack to delete")
time.sleep(150)
failed_stack_list = failed_heat_stacks(cloud_name)
logger.info("Stacks which are in FAILED state %s", failed_stack_list)
# Delete the individual resources which are associated to stacks which
# cannot be deleted normally.
if failed_stack_list:
delete_individual_resources(cloud_name, failed_stack_list, prefix,
suffix, dry_run)
stack_delete(cloud_name, failed_stack_list, dry_run)
if not dry_run:
logger.info("wait for 150s for stack to delete")
time.sleep(150)
# Success/ Failure based on left over stacks
failed_stack_list = failed_heat_stacks(cloud_name)
if len(failed_stack_list) == 0:
logger.info("Script ran successfully: No Heat stack in failed state")
else:
logger.info("Script didn't executed Successfully, Manual intervention "
"needed for following stacks %s", failed_stack_list)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="OVB Stale resources cleanup"
" script for vexxhost/psi cloud")
parser.add_argument('-t',
'--time-expired',
type=int,
metavar='',
default=360,
help='Time, in minutes, a stack has been running when '
'it will be deleted. Defaults to 360 minutes(6 hours)')
parser.add_argument('-d',
'--dry-run',
action='store_true',
help='Do not delete any stacks or resources. '
'Print out the resources that would be deleted. '
'This option is off by default"')
parser.add_argument('-p',
'--prefix',
metavar='',
default='baremetal_',
help='Stack name prefix added before the stack unique '
'identifer. Default is "baremetal_" ')
parser.add_argument('-s',
'--suffix',
metavar='',
default='',
help='Stack name suffix added after the stack unique '
'identifer. Default is an empty string.')
parser.add_argument('-c',
'--cloud-name',
metavar='',
help='OpenStack Cloud name to connect to, It is '
'expected that you have a clouds.yaml file which '
'contains auth info about the cloud name you passes '
'here. File clouds.yaml will be looked i.e in the '
'current directory, $HOME/.config/openstack or '
'/etc/openstack')
parser.add_argument('-n',
'--stack',
metavar='',
default='',
help='Individual stack to delete ')
parser.add_argument('-l',
'--logfile',
metavar='',
default='clean_stacks.log.txt',
help='Log file path ')
args = parser.parse_args()
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(message)s')
formatter.converter = time.gmtime
file_handler = logging.FileHandler(args.logfile)
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
if args.stack:
delete_individual_resources(cloud_name=args.cloud_name,
stack_list=[args.stack],
prefix=args.prefix,
suffix=args.suffix,
dry_run=args.dry_run)
stack_delete(cloud_name=args.cloud_name,
stack_list=[args.stack],
dry_run=args.dry_run)
else:
main(time_expired=args.time_expired,
prefix=args.prefix,
suffix=args.suffix,
dry_run=args.dry_run,
cloud_name=args.cloud_name)
| |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from conary import conarycfg
from conary.deps import deps
from conary.lib import log
from conary.lib import options
from rmake import compat, errors
from rmake.compat import cvccmd as cvc
from rmake.cmdline import query
(NO_PARAM, ONE_PARAM) = (options.NO_PARAM, options.ONE_PARAM)
(OPT_PARAM, MULT_PARAM) = (options.OPT_PARAM, options.MULT_PARAM)
(NORMAL_HELP, VERBOSE_HELP) = (options.NORMAL_HELP, options.VERBOSE_HELP)
CG_MISC = 'Miscellaneous Commands'
CG_BUILD = 'Job Manipulation'
CG_INFO = 'Information Display'
# helper function to get list of commands we support
_commands = []
def register(cmd):
_commands.append(cmd)
class rMakeCommand(options.AbstractCommand):
defaultGroup = 'Common Options'
commandGroup = CG_MISC
docs = {'config' : (VERBOSE_HELP,
"Set config KEY to VALUE", "'KEY VALUE'"),
'server-config' : (VERBOSE_HELP,
"Set server config KEY to VALUE", "'KEY VALUE'"),
'config-file' : (VERBOSE_HELP,
"Read PATH config file", "PATH"),
'context' : (VERBOSE_HELP,
"Set the configuration context to use"),
'server-config-file' : (VERBOSE_HELP,
"Read PATH config file", "PATH"),
'conary-config-file' : (VERBOSE_HELP,
"Read PATH conary config file", "PATH"),
'build-config-file' : (VERBOSE_HELP,
"Read PATH config file", "PATH"),
'rmake-config-file' : (VERBOSE_HELP,
"Read PATH config file", "PATH"),
'skip-default-config': (VERBOSE_HELP,
"Don't read default configs"),
'verbose' : (VERBOSE_HELP,
"Display more detailed information where available") }
def addParameters(self, argDef):
d = {}
d["context"] = ONE_PARAM
d["config"] = MULT_PARAM
d["server-config"] = MULT_PARAM
d["server-config-file"] = MULT_PARAM
d["build-config-file"] = MULT_PARAM
d["conary-config-file"] = MULT_PARAM
d["skip-default-config"] = NO_PARAM
d["verbose"] = NO_PARAM
argDef[self.defaultGroup] = d
def _getContext(self, buildConfig, conaryConfig, argSet):
context = conaryConfig.context
if buildConfig.context:
context = buildConfig.context
if os.path.exists('CONARY'):
conaryState = compat.ConaryVersion().ConaryStateFromFile('CONARY',
parseSource=False)
if conaryState.hasContext():
context = conaryState.getContext()
context = os.environ.get('CONARY_CONTEXT', context)
context = argSet.pop('context', context)
return context
def _setContext(self, buildConfig, conaryConfig, argSet):
context = self._getContext(buildConfig, conaryConfig, argSet)
usedContext = False
if conaryConfig and context:
if conaryConfig.hasSection(context):
usedContext = True
conaryConfig.setContext(context)
buildConfig.useConaryConfig(conaryConfig)
if context and buildConfig.hasSection(context):
buildConfig.setContext(context)
usedContext = True
if not usedContext and context:
raise errors.RmakeError('No such context "%s"' % context)
def processConfigOptions(self, (buildConfig, conaryConfig, pluginManager),
cfgMap, argSet):
"""
Manage any config maps we've set up, converting
assigning them to the config object.
"""
configFileList = argSet.pop('build-config-file', [])
if not isinstance(configFileList, list):
configFileList = list(configFileList)
configFileList.extend(argSet.pop('config-file', []))
for path in configFileList:
buildConfig.read(path, exception=True)
configFileList = argSet.pop('conary-config-file', [])
if not isinstance(configFileList, list):
configFileList = list(configFileList)
if configFileList and not conaryConfig:
conaryConfig = conarycfg.ConaryConfiguration(readConfigFiles=False)
for path in configFileList:
conaryConfig.read(path, exception=True)
self._setContext(buildConfig, conaryConfig, argSet)
for (arg, data) in cfgMap.items():
cfgName, paramType = data[0:2]
value = argSet.pop(arg, None)
if value is not None:
if arg.startswith('no-'):
value = not value
buildConfig.configLine("%s %s" % (cfgName, value))
for line in argSet.pop('config', []):
buildConfig.configLine(line)
for line in argSet.pop('server-config', []):
serverConfig.configLine(line)
if argSet.pop('verbose', False):
log.setVerbosity(log.DEBUG)
def requireParameters(self, args, expected=None, allowExtra=False,
appendExtra=False, maxExtra=None):
args = args[1:] # cut off argv[0]
command = repr(args[0])
if isinstance(expected, str):
expected = [expected]
if expected is None:
expected = ['command']
else:
expected = ['command'] + expected
if expected:
missing = expected[len(args):]
if missing:
raise errors.BadParameters('%s missing %s command'
' parameter(s): %s' % (
command, len(missing),
', '.join(missing)))
extra = len(args) - len(expected)
if not allowExtra and not appendExtra:
maxExtra = 0
if maxExtra is not None and extra > maxExtra:
if maxExtra:
numParams = '%s-%s' % (len(expected)-1,
len(expected) + maxExtra - 1)
else:
numParams = '%s' % (len(expected)-1)
raise errors.BadParameters('%s takes %s arguments, received %s' % (command, numParams, len(args)-1))
if appendExtra:
# final parameter is list
return args[:len(expected)-1] + [args[len(expected)-1:]]
elif allowExtra:
return args[:len(expected)] + [args[len(expected):]]
else:
return args
def _getJobIdOrUUIds(val):
return [ _getJobIdOrUUId(x) for x in val ]
def _getJobIdOrUUId(val):
try:
return int(val)
except (ValueError, TypeError):
if isinstance(val, str) and len(val) == 32:
return val
else:
raise errors.ParseError, 'Not a valid jobId or UUID: %s' % val
class BuildCommand(rMakeCommand):
'''Builds the specified packages or recipes. '''
commands = ['build']
commandGroup = CG_BUILD
paramHelp = '<troveSpec>[{context}] [<troveSpec>][{context}]*'
help = 'Build packages or recipes'
docs = {'flavor' : "flavor to build with",
'host' : "host to limit build to",
'label' : "label to limit build to",
'match' : (options.VERBOSE_HELP,
"Only build troves that match the given specification"),
'no-watch' : "do not show build status",
'poll' : (options.VERBOSE_HELP, 'backwards compatibility option'),
'prep' : (options.VERBOSE_HELP,
'do not build package, only create chroot'),
'quiet' : "show less build info - don't tail logs",
'commit' : "commit job when it is done",
'message' : "Message to assign to troves upon commit",
'macro' : ('set macro NAME to VALUE', "'NAME VALUE'"),
'no-clean': 'do not remove build directory even if build is'
' successful',
'to-file': (options.VERBOSE_HELP,
'store job in a file instead of sending it'
' to the server. This makes it possible for others'
' to start the job.'),
'binary-search': (options.VERBOSE_HELP,
'Search for the binary'
'version of group and build the latest'
'sources on that branch with the same flavor'),
'reuse': ('reuse old chroot if possible instead of removing'
' and recreating'),
'info' : ('Gather and display all the information necessary to perform the build'),
'recurse': ('recurse groups, building all included sources'),
'ignore-rebuild-deps': ('Do not rebuild packages if the only'
' change to them is the packages to be'
' installed in their chroot.'),
'ignore-external-rebuild-deps': ('Do not rebuild packages unless'
' their source has changed or'
' another package in the job will'
' be installed in this package\'s'
' chroot')}
def addParameters(self, argDef):
self.addBuildParameters(argDef)
rMakeCommand.addParameters(self, argDef)
argDef['flavor'] = ONE_PARAM
argDef['host'] = MULT_PARAM
argDef['label'] = MULT_PARAM
argDef['match'] = MULT_PARAM
argDef['binary-search'] = NO_PARAM
argDef['recurse'] = NO_PARAM
def addBuildParameters(self, argDef):
argDef['commit'] = NO_PARAM
argDef['prep'] = NO_PARAM
argDef['macro'] = MULT_PARAM
argDef['message'] = '-m', ONE_PARAM
argDef['no-watch'] = NO_PARAM
argDef['poll'] = NO_PARAM
argDef['no-clean'] = NO_PARAM
argDef['to-file'] = ONE_PARAM
argDef['quiet'] = NO_PARAM
argDef['info'] = NO_PARAM
def addConfigOptions(self, cfgMap, argDef):
cfgMap['reuse'] = 'reuseRoots', NO_PARAM
rMakeCommand.addConfigOptions(self, cfgMap, argDef)
def runCommand(self, client, cfg, argSet, args):
if self.verbose:
log.setVerbosity(log.DEBUG)
else:
log.setVerbosity(log.INFO)
command, troveSpecs = self.requireParameters(args, 'troveSpec',
appendExtra=True)
if command == 'buildgroup':
log.warning('"buildgroup" is deprecated and will be removed in a future release - use "build --recurse" instead')
rebuild = (command == 'rebuild')
flavorSpec = argSet.pop('flavor', None)
if flavorSpec:
flavor = deps.parseFlavor(flavorSpec)
if flavor is None:
raise errors.ParseError("Invalid flavor: '%s'" % flavorSpec)
newFlavor = deps.overrideFlavor(client.buildConfig.buildFlavor,
flavor)
client.buildConfig.buildFlavor = newFlavor
newFlavors = []
for oldFlavor in client.buildConfig.flavor:
newFlavors.append(deps.overrideFlavor(oldFlavor, flavor))
client.buildConfig.flavor = newFlavors
matchSpecs = argSet.pop('match', [])
hosts = argSet.pop('host', [])
labels = argSet.pop('label', [])
recurseGroups = argSet.pop('recurse', False) or command == 'buildgroup'
if recurseGroups:
if argSet.pop('binary-search', False):
recurseGroups = client.BUILD_RECURSE_GROUPS_BINARY
elif not compat.ConaryVersion().supportsFindGroupSources():
log.warning('Your conary does not support recursing a group'
' source component, defaulting to searching the'
' binary version')
recurseGroups = client.BUILD_RECURSE_GROUPS_BINARY
else:
recurseGroups = client.BUILD_RECURSE_GROUPS_SOURCE
self._prep(client, argSet)
job = client.createBuildJob(troveSpecs, limitToHosts=hosts,
limitToLabels=labels,
recurseGroups=recurseGroups,
matchSpecs=matchSpecs,
rebuild=rebuild)
return self._build(client, job, argSet)
def _prep(self, client, argSet):
if 'no-clean' in argSet:
client.buildConfig.cleanAfterCook = False
del argSet['no-clean']
if 'prep' in argSet:
client.buildConfig.prepOnly = argSet.pop('prep')
if 'ignore-rebuild-deps' in argSet:
client.buildConfig.ignoreAllRebuildDeps = True
argSet.pop('ignore-rebuild-deps')
if 'ignore-external-rebuild-deps' in argSet:
client.buildConfig.ignoreExternalRebuildDeps = True
argSet.pop('ignore-external-rebuild-deps')
macros = argSet.pop('macro', [])
for macro in macros:
client.buildConfig.configLine('macros ' + macro)
if 'no-clean' in argSet:
client.buildConfig.cleanAfterCook = False
del argSet['no-clean']
def _build(self, client, job, argSet):
savePath = argSet.pop('to-file', False)
quiet = argSet.pop('quiet', False)
commit = argSet.pop('commit', False)
message = argSet.pop('message', None)
infoOnly = argSet.pop('info', False)
monitorJob = not argSet.pop('no-watch', False)
if infoOnly:
client.displayJob(job, quiet=quiet)
if savePath:
job.writeToFile(savePath, sanitize=True)
if infoOnly or savePath:
return 0
jobId = client.buildJob(job, quiet=quiet)
if monitorJob:
if quiet:
if not client.waitForJob(jobId):
return 1
elif not client.watch(jobId, showTroveLogs=not quiet,
showBuildLogs=not quiet,
commit=commit, message=message):
return 1
elif commit:
if not client.commitJob(jobId, commitWithFailures=False,
waitForJob=True, message=message):
return 1
return 0
register(BuildCommand)
class RebuildCommand(BuildCommand):
'''\
Rebuilds packages whose source or dependencies have changed.
'''
commands = ['rebuild']
commandGroup = CG_BUILD
paramHelp = '<troveSpec>[{context}] [<troveSpec>][{context}]*'
help = 'Rebuild packages or recipes if they\'ve changed'
def addParameters(self, argDef):
BuildCommand.addParameters(self, argDef)
argDef['ignore-rebuild-deps'] = NO_PARAM
argDef['ignore-external-rebuild-deps'] = NO_PARAM
register(RebuildCommand)
class LoadJobCommand(BuildCommand):
'''Loads a job from a file that was created with --to-file'''
commands = ['load']
commandGroup = CG_BUILD
paramHelp = '<path>'
def addParameters(self, argDef):
self.addBuildParameters(argDef)
rMakeCommand.addParameters(self, argDef)
def runCommand(self, client, cfg, argSet, args):
if self.verbose:
log.setVerbosity(log.DEBUG)
else:
log.setVerbosity(log.INFO)
command, loadPath = self.requireParameters(args, 'path')
self._prep(client, argSet)
job = client.loadJobFromFile(loadPath)
return self._build(client, job, argSet)
register(LoadJobCommand)
class RestartCommand(BuildCommand):
'''Restarts the specified job'''
commands = ['restart']
commandGroup = CG_BUILD
paramHelp = '<jobId> [<troveSpec>]*'
help = 'Restart an earlier job'
def addParameters(self, argDef):
self.addBuildParameters(argDef)
rMakeCommand.addParameters(self, argDef)
argDef['exclude'] = MULT_PARAM
argDef['update'] = MULT_PARAM
argDef['update-config'] = MULT_PARAM
argDef['no-update'] = NO_PARAM
argDef['clear-build-list'] = NO_PARAM
argDef['clear-prebuilt-list'] = NO_PARAM
argDef['ignore-rebuild-deps'] = NO_PARAM
argDef['ignore-external-rebuild-deps'] = NO_PARAM
def runCommand(self, client, cfg, argSet, args):
if self.verbose:
log.setVerbosity(log.DEBUG)
else:
log.setVerbosity(log.INFO)
command, jobId, troveSpecs = self.requireParameters(args, 'jobId',
allowExtra=True)
jobId = _getJobIdOrUUId(jobId)
noUpdate = argSet.pop('no-update', False)
clearBuildList = argSet.pop('clear-build-list', False)
clearPrebuiltList = argSet.pop('clear-prebuilt-list', False)
updateConfigKeys = argSet.pop('update-config', None)
if noUpdate:
updateSpecs = ['-*']
else:
updateSpecs = []
updateSpecs.extend(argSet.pop('update', []))
excludeSpecs = argSet.pop('exclude', [])
self._prep(client, argSet)
job = client.createRestartJob(jobId, troveSpecs,
updateSpecs=updateSpecs,
excludeSpecs=excludeSpecs,
updateConfigKeys=updateConfigKeys,
clearBuildList=clearBuildList,
clearPrebuiltList=clearPrebuiltList)
return self._build(client, job, argSet)
register(RestartCommand)
class ChangeSetCommand(rMakeCommand):
commands = ['changeset']
hidden = True
paramHelp = '''\
<jobId> <troveSpec>* <outfile>
Creates a changeset with the troves from the job <jobId> and stores in outFile'
'''
help = 'Create a changeset file from the packages in a job'
def runCommand(self, client, cfg, argSet, args):
command, jobId, path = self.requireParameters(args, ['jobId', 'path'],
appendExtra=True)
if len(path) > 1:
troveSpecs = path[:-1]
path = path[-1]
else:
troveSpecs = []
path = path[0]
jobId = _getJobIdOrUUId(jobId)
client.createChangeSetFile(jobId, path, troveSpecs)
register(ChangeSetCommand)
class CommitCommand(rMakeCommand):
commands = ['commit', 'ci']
commandGroup = CG_BUILD
paramHelp = '''<jobId> [<jobId>]
Commits the build packages from the jobs, moving them from rMake's internal
repository back into the repository where their source package came from.
'''
help = 'Commit a job'
docs = {'commit-outdated-sources' : ("Allow commits of source components when another"
" commit has been made upstream"),
'source-only' : "Only commit the source changes",
'exclude' : "Do not commit from specified"
" sources",
'message' : "The message to give for all"
" committed sources"}
def addParameters(self, argDef):
argDef['source-only'] = NO_PARAM
argDef['message'] = '-m', ONE_PARAM
argDef['exclude'] = MULT_PARAM
argDef['to-file'] = ONE_PARAM
argDef['commit-outdated-sources'] = NO_PARAM
rMakeCommand.addParameters(self, argDef)
def runCommand(self, client, cfg, argSet, args):
command, jobIds = self.requireParameters(args, ['jobId'],
appendExtra=True)
commitOutdated = argSet.pop('commit-outdated-sources', False)
sourceOnly = argSet.pop('source-only', False)
message = argSet.pop('message', None)
excludeSpecs = argSet.pop('exclude', None)
jobIds = _getJobIdOrUUIds(jobIds)
toFile = argSet.pop('to-file', None)
success = client.commitJobs(jobIds,
commitOutdatedSources=commitOutdated,
commitWithFailures=True, waitForJob=True,
sourceOnly=sourceOnly,
message=message,
excludeSpecs=excludeSpecs,
writeToFile=toFile)
if success:
return 0
else:
return 1
register(CommitCommand)
class ConfigCommand(rMakeCommand):
commands = ['config']
commandGroup = CG_INFO
help = 'Display the current configuration'
docs = {'show-passwords' : 'do not mask passwords'}
def addParameters(self, argDef):
rMakeCommand.addParameters(self, argDef)
argDef["show-passwords"] = NO_PARAM
def runCommand(self, client, cfg, argSet, args):
self.requireParameters(args)
showPasswords = argSet.pop('show-passwords', False)
try:
prettyPrint = sys.stdout.isatty()
except AttributeError:
prettyPrint = False
client.displayConfig(hidePasswords=not showPasswords,
prettyPrint=prettyPrint)
register(ConfigCommand)
class DeleteCommand(rMakeCommand):
commands = ['delete']
commandGroup = CG_BUILD
paramHelp = '<jobId>[-<jobId>]+'
help = 'Delete jobs from rmake\'s history'
def runCommand(self, client, cfg, argSet, args):
toDelete = []
command, jobList = self.requireParameters(args, 'jobId',
appendExtra=True)
for arg in jobList:
values = arg.split(',')
for value in values:
range = value.split('-', 1)
if len(range) == 1:
toDelete.append(_getJobIdOrUUId(value))
else:
fromVal = _getJobIdOrUUId(range[0])
toVal = _getJobIdOrUUId(range[1])
if (not isinstance(fromVal, int)
or not isinstance(toVal, int)):
raise ParseError('Must use jobIds when specifying'
' range to delete')
toDelete.extend(xrange(fromVal, toVal + 1))
client.deleteJobs(toDelete)
register(DeleteCommand)
class HelpCommand(rMakeCommand):
commands = ['help']
help = 'Display help information'
commandGroup = CG_INFO
def runCommand(self, client, cfg, argSet, args):
command, subCommands = self.requireParameters(args, allowExtra=True,
maxExtra=1)
if subCommands:
command = subCommands[0]
commands = self.mainHandler._supportedCommands
if not command in commands:
print "%s: no such command: '%s'" % (self.mainHandler.name,
command)
sys.exit(1)
commands[command].usage()
else:
self.mainHandler.usage(showAll=True)
return 0
register(HelpCommand)
class PollCommand(rMakeCommand):
commands = ['poll', 'watch']
commandGroup = CG_INFO
paramHelp = '''<jobId>
Watch the progress of job <jobId> as it builds its packages
'''
help = 'Watch a job build'
docs = { 'quiet' : 'Only display major job status changes',
'commit' : "Commit job when it is done"}
def addParameters(self, argDef):
rMakeCommand.addParameters(self, argDef)
argDef['quiet'] = NO_PARAM
argDef['commit'] = NO_PARAM
def runCommand(self, client, cfg, argSet, args):
command, jobId = self.requireParameters(args, 'jobId')
log.setVerbosity(log.INFO)
quiet = argSet.pop('quiet', False)
commit = argSet.pop('commit', False)
jobId = _getJobIdOrUUId(jobId)
success = client.watch(jobId, showBuildLogs = not quiet,
showTroveLogs = not quiet,
commit = commit)
if success:
return 0
else:
return 1
register(PollCommand)
class StopCommand(rMakeCommand):
commands = ['stop']
commandGroup = CG_BUILD
help = 'Stop job from building'
paramHelp = '''<jobId>
Stops job <jobId> from building.
'''
def runCommand(self, client, cfg, argSet, args):
command, jobId = self.requireParameters(args, 'jobId')
log.setVerbosity(log.INFO)
jobId = _getJobIdOrUUId(jobId)
client.stopJob(jobId)
register(StopCommand)
class QueryCommand(rMakeCommand):
commands = ['query', 'q']
commandGroup = CG_INFO
help = 'Display information about a job'
paramHelp = '''[<jobId> <troveSpec>*]
Display information about the job <jobId> (limited to <troveSpec>
if specified)
'''
docs = {'troves' : 'Display troves for this job',
'info' : 'Display details',
'logs' : 'Display logs associated with jobs/troves',
'watch' : 'Continually update status while job builds',
'full-versions' : 'Show full versions',
'labels' : 'Show labels',
'flavors' : 'Show full flavors',
'tracebacks' : 'Show tracebacks',
'all' : 'Show all jobs (not just last 20)',
'active' : 'Show only active jobs',
'show-config' : 'Show configuration for this job',
}
def addParameters(self, argDef):
argDef['troves'] = NO_PARAM
argDef['info'] = NO_PARAM
argDef['tracebacks'] = NO_PARAM
argDef['full-versions'] = NO_PARAM
argDef['labels'] = NO_PARAM
argDef['flavors'] = NO_PARAM
argDef['logs'] = NO_PARAM
argDef['watch'] = NO_PARAM
argDef['all'] = NO_PARAM
argDef['active'] = NO_PARAM
argDef['show-config'] = NO_PARAM
rMakeCommand.addParameters(self, argDef)
def runCommand(self, client, cfg, argSet, args):
command, args = self.requireParameters(args, allowExtra=True)
if args:
jobId = _getJobIdOrUUId(args[0])
troveSpecs = args[1:]
try:
jobId = int(jobId)
except ValueError:
self.usage()
log.error("bad jobId '%s'", jobId)
return 1
else:
jobId = None
troveSpecs = []
displayTroves = argSet.pop('troves', False)
displayDetails = argSet.pop('info', False)
showFullVersions = argSet.pop('full-versions', False)
showFullFlavors = argSet.pop('flavors', False)
showLabels = argSet.pop('labels', False)
showTracebacks = argSet.pop('tracebacks', False)
showLogs = argSet.pop('logs', False)
showConfig = argSet.pop('show-config', False)
if argSet.pop('all', False):
limit = None
else:
limit = 20
activeOnly = argSet.pop('active', False)
watchJob = argSet.pop('watch', False)
query.displayJobInfo(client, jobId, troveSpecs,
displayTroves=displayTroves,
displayDetails=displayDetails,
showLogs=showLogs,
showBuildLogs=showLogs,
showFullVersions=showFullVersions,
showFullFlavors=showFullFlavors,
showLabels=showLabels,
showTracebacks=showTracebacks,
showConfig=showConfig,
jobLimit=limit,
activeOnly=activeOnly)
if watchJob:
client.watch(jobId, showBuildLogs = True, showTroveLogs = True)
register(QueryCommand)
class ListCommand(rMakeCommand):
"""\
List information about the given rmake server.
Types Available:
list [ch]roots - lists chroots on this rmake server"""
commands = ['list']
paramHelp = "<type>"
help = 'List various information about this rmake server'
commandGroup = CG_INFO
docs = {'all' : 'Backwards compatibility option',
'active' : 'Display only active items' }
def addParameters(self, argDef):
argDef['all'] = NO_PARAM
argDef['active'] = NO_PARAM
rMakeCommand.addParameters(self, argDef)
def runCommand(self, client, cfg, argSet, args):
command, subCommand = self.requireParameters(args, 'command')
commandFn = getattr(self, 'list%s' % subCommand.title(), None)
if not commandFn:
self.usage()
raise errors.RmakeError('No such list command %s' % subCommand)
commandFn(client, cfg, argSet)
def listChroots(self, client, cfg, argSet):
allChroots = not argSet.pop('active', False)
query.listChroots(client, cfg, allChroots=allChroots)
listRoots = listChroots
register(ListCommand)
class ChrootCommand(rMakeCommand):
"""\
Runs /bin/sh in the given chroot.
This command allows you to debug problems that occur with a build in
rMake. By default, it enters the chroot as the user who built the
trove. With the --super parameter you can cause it to run as the
"rmake" user, who can then run commands like "conary update strace."\
"""
help = 'Run /bin/sh in a given chroot'
paramHelp = "<jobId> <trove>"
commands = ['chroot']
docs = {'super' :
'Run as a user capable of modifying the contents of the root',
'path' : 'Specify the chroot path to use'}
def addParameters(self, argDef):
argDef['super'] = NO_PARAM
argDef['path'] = ONE_PARAM
rMakeCommand.addParameters(self, argDef)
def _getChroot(self, chroot):
return '_local_', chroot
def runCommand(self, client, cfg, argSet, args):
command, jobId, troveSpec = self.requireParameters(args,
['jobId'],
allowExtra=True,
maxExtra=1)
superUser = argSet.pop('super', False)
path = argSet.pop('path', None)
if path:
chrootHost, chrootPath = self._getChroot(path)
else:
chrootHost = chrootPath = None
if not troveSpec:
troveSpec = None
else:
troveSpec = troveSpec[0]
client.startChrootSession(jobId, troveSpec, ['/bin/bash', '-l'],
superUser=superUser,
chrootHost=chrootHost,
chrootPath=chrootPath)
register(ChrootCommand)
class ArchiveCommand(rMakeCommand):
"""\
Archive a chroot so that it will not be overwritten by rmake during the
build process.
By default, rmake will reuse particular names for chroots
whenever building something with that same name. This command can be used
to safely move a chroot out of the way for further debugging without
requiring that normal rmake use be stopped."""
commands = ['archive']
paramHelp = '<chrootName> <newName>'
help = 'Archives a chroot for later use'
def addParameters(self, argDef):
rMakeCommand.addParameters(self, argDef)
def _getChroot(self, chroot):
return '_local_', chroot
def runCommand(self, client, cfg, argSet, args):
command, chroot, extra = self.requireParameters(args,
['chrootPath'],
allowExtra=1)
host, chroot = self._getChroot(chroot)
if extra:
newPath = extra[0]
else:
newPath = chroot
client.archiveChroot(host, chroot, newPath)
register(ArchiveCommand)
class CleanCommand(rMakeCommand):
"""\
Removes the given chroot, freeing its space.
This command simply removes the given chroot and everything within it,
freeing its diskspace.
Specifying --all means remove all old chroots.
"""
commands = ['clean']
help = 'Deletes a chroot'
paramHelp = '<chroot>'
def addParameters(self, argDef):
argDef['all'] = NO_PARAM
rMakeCommand.addParameters(self, argDef)
def _getChroot(self, chroot):
return '_local_', chroot
def runCommand(self, client, cfg, argSet, args):
if argSet.pop('all', False):
client.deleteAllChroots()
else:
command, chroot = self.requireParameters(args, ['chrootPath'])
client.deleteChroot(*self._getChroot(chroot))
register(CleanCommand)
class CheckoutCommand(cvc.CheckoutCommand,rMakeCommand):
# Move this to the same section as NewPkg
commandGroup = 'Setup Commands'
def processConfigOptions(self, *args, **kw):
return rMakeCommand.processConfigOptions(self, *args, **kw)
def runCommand(self, client, cfg, argSet, args):
return cvc.CheckoutCommand.runCommand(self, cfg, argSet, args,
repos=client.getRepos())
register(CheckoutCommand)
class NewPkgCommand(cvc.NewPkgCommand, rMakeCommand):
commandGroup = 'Setup Commands'
def processConfigOptions(self, *args, **kw):
return rMakeCommand.processConfigOptions(self, *args, **kw)
def runCommand(self, client, cfg, argSet, args):
return cvc.NewPkgCommand.runCommand(self, cfg, argSet, args,
repos=client.getRepos())
register(NewPkgCommand)
class ContextCommand(cvc.ContextCommand, rMakeCommand):
def processConfigOptions(self, *args, **kw):
return rMakeCommand.processConfigOptions(self, *args, **kw)
def runCommand(self, client, cfg, argSet, args):
return cvc.ContextCommand.runCommand(self, cfg, argSet, args,
repos=client.getRepos())
register(ContextCommand)
class BuildImageCommand(BuildCommand):
'''Builds the specified rbuilder image.'''
paramHelp = '<productName> <troveSpec> <imageType>'
docs = {
'option': ('options for the image build, e.g., swapSize=128', 'optionName=value'),
}
commands = ['buildimage']
commandGroup = CG_BUILD
def addParameters(self, argDef):
argDef['option'] = MULT_PARAM
rMakeCommand.addParameters(self, argDef)
def addConfigOptions(self, cfgMap, argDef):
rMakeCommand.addConfigOptions(self, cfgMap, argDef)
def runCommand(self, client, cfg, argSet, args):
(command, product,
troveSpec, imageType) = self.requireParameters(args, ['product',
'troveSpec',
'imageType'])
options = {}
for option in argSet.pop('option', []):
key, value = option.split('=', 1)
options[key] = value
job = client.createImageJob(product, [(troveSpec, imageType, options)])
return self._build(client, job, argSet)
register(BuildImageCommand)
def addCommands(main):
for command in _commands:
main._registerCommand(command)
| |
"""The tests for the Dialogflow component."""
import copy
import json
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import dialogflow, intent_script
from homeassistant.config import async_process_ha_core_config
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
SESSION_ID = "a9b84cec-46b6-484e-8f31-f65dba03ae6d"
INTENT_ID = "c6a74079-a8f0-46cd-b372-5a934d23591c"
INTENT_NAME = "tests"
REQUEST_ID = "19ef7e78-fe15-4e94-99dd-0c0b1e8753c3"
REQUEST_TIMESTAMP = "2017-01-21T17:54:18.952Z"
CONTEXT_NAME = "78a5db95-b7d6-4d50-9c9b-2fc73a5e34c3_id_dialog_context"
@pytest.fixture
async def calls(hass, fixture):
"""Return a list of Dialogflow calls triggered."""
calls = []
@callback
def mock_service(call):
"""Mock action call."""
calls.append(call)
hass.services.async_register("test", "dialogflow", mock_service)
return calls
@pytest.fixture
async def fixture(hass, aiohttp_client):
"""Initialize a Home Assistant server for testing this module."""
await async_setup_component(hass, dialogflow.DOMAIN, {"dialogflow": {}})
await async_setup_component(
hass,
intent_script.DOMAIN,
{
"intent_script": {
"WhereAreWeIntent": {
"speech": {
"type": "plain",
"text": """
{%- if is_state("device_tracker.paulus", "home")
and is_state("device_tracker.anne_therese",
"home") -%}
You are both home, you silly
{%- else -%}
Anne Therese is at {{
states("device_tracker.anne_therese")
}} and Paulus is at {{
states("device_tracker.paulus")
}}
{% endif %}
""",
}
},
"GetZodiacHoroscopeIntent": {
"speech": {
"type": "plain",
"text": "You told us your sign is {{ ZodiacSign }}.",
}
},
"CallServiceIntent": {
"speech": {"type": "plain", "text": "Service called"},
"action": {
"service": "test.dialogflow",
"data_template": {"hello": "{{ ZodiacSign }}"},
"entity_id": "switch.test",
},
},
}
},
)
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
"dialogflow", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
webhook_id = result["result"].data["webhook_id"]
return await aiohttp_client(hass.http.app), webhook_id
class _Data:
_v1 = {
"id": REQUEST_ID,
"timestamp": REQUEST_TIMESTAMP,
"result": {
"source": "agent",
"resolvedQuery": "my zodiac sign is virgo",
"action": "GetZodiacHoroscopeIntent",
"actionIncomplete": False,
"parameters": {"ZodiacSign": "virgo"},
"metadata": {
"intentId": INTENT_ID,
"webhookUsed": "true",
"webhookForSlotFillingUsed": "false",
"intentName": INTENT_NAME,
},
"fulfillment": {"speech": "", "messages": [{"type": 0, "speech": ""}]},
"score": 1,
},
"status": {"code": 200, "errorType": "success"},
"sessionId": SESSION_ID,
"originalRequest": None,
}
_v2 = {
"responseId": REQUEST_ID,
"timestamp": REQUEST_TIMESTAMP,
"queryResult": {
"queryText": "my zodiac sign is virgo",
"action": "GetZodiacHoroscopeIntent",
"allRequiredParamsPresent": True,
"parameters": {"ZodiacSign": "virgo"},
"intent": {
"name": INTENT_ID,
"webhookState": "true",
"displayName": INTENT_NAME,
},
"fulfillment": {"text": "", "messages": [{"type": 0, "speech": ""}]},
"intentDetectionConfidence": 1,
},
"status": {"code": 200, "errorType": "success"},
"session": SESSION_ID,
"originalDetectIntentRequest": None,
}
@property
def v1(self):
return copy.deepcopy(self._v1)
@property
def v2(self):
return copy.deepcopy(self._v2)
Data = _Data()
async def test_v1_data():
"""Test for version 1 api based on message."""
assert dialogflow.get_api_version(Data.v1) == 1
async def test_v2_data():
"""Test for version 2 api based on message."""
assert dialogflow.get_api_version(Data.v2) == 2
async def test_intent_action_incomplete_v1(fixture):
"""Test when action is not completed."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"]["actionIncomplete"] = True
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert await response.text() == ""
async def test_intent_action_incomplete_v2(fixture):
"""Test when action is not completed."""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"]["allRequiredParamsPresent"] = False
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert await response.text() == ""
async def test_intent_slot_filling_v1(fixture):
"""Test when Dialogflow asks for slot-filling return none."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"].update(
resolvedQuery="my zodiac sign is",
speech="",
actionIncomplete=True,
parameters={"ZodiacSign": ""},
contexts=[
{
"name": CONTEXT_NAME,
"parameters": {"ZodiacSign.original": "", "ZodiacSign": ""},
"lifespan": 2,
},
{
"name": "tests_ha_dialog_context",
"parameters": {"ZodiacSign.original": "", "ZodiacSign": ""},
"lifespan": 2,
},
{
"name": "tests_ha_dialog_params_zodiacsign",
"parameters": {"ZodiacSign.original": "", "ZodiacSign": ""},
"lifespan": 1,
},
],
fulfillment={
"speech": "What is the ZodiacSign?",
"messages": [{"type": 0, "speech": "What is the ZodiacSign?"}],
},
score=0.77,
)
data["result"]["metadata"].update(webhookForSlotFillingUsed="true")
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert await response.text() == ""
async def test_intent_request_with_parameters_v1(fixture):
"""Test a request with parameters."""
mock_client, webhook_id = fixture
data = Data.v1
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "You told us your sign is virgo."
async def test_intent_request_with_parameters_v2(fixture):
"""Test a request with parameters."""
mock_client, webhook_id = fixture
data = Data.v2
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "You told us your sign is virgo."
async def test_intent_request_with_parameters_but_empty_v1(fixture):
"""Test a request with parameters but empty value."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"].update(parameters={"ZodiacSign": ""})
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "You told us your sign is ."
async def test_intent_request_with_parameters_but_empty_v2(fixture):
"""Test a request with parameters but empty value."""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"].update(parameters={"ZodiacSign": ""})
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "You told us your sign is ."
async def test_intent_request_without_slots_v1(hass, fixture):
"""Test a request without slots."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"].update(
resolvedQuery="where are we",
action="WhereAreWeIntent",
parameters={},
contexts=[],
)
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "Anne Therese is at unknown and Paulus is at unknown"
hass.states.async_set("device_tracker.paulus", "home")
hass.states.async_set("device_tracker.anne_therese", "home")
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "You are both home, you silly"
async def test_intent_request_without_slots_v2(hass, fixture):
"""Test a request without slots."""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"].update(
queryText="where are we",
action="WhereAreWeIntent",
parameters={},
outputContexts=[],
)
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "Anne Therese is at unknown and Paulus is at unknown"
hass.states.async_set("device_tracker.paulus", "home")
hass.states.async_set("device_tracker.anne_therese", "home")
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "You are both home, you silly"
async def test_intent_request_calling_service_v1(fixture, calls):
"""Test a request for calling a service.
If this request is done async the test could finish before the action
has been executed. Hard to test because it will be a race condition.
"""
mock_client, webhook_id = fixture
data = Data.v1
data["result"]["action"] = "CallServiceIntent"
call_count = len(calls)
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert len(calls) == call_count + 1
call = calls[-1]
assert call.domain == "test"
assert call.service == "dialogflow"
assert call.data.get("entity_id") == ["switch.test"]
assert call.data.get("hello") == "virgo"
async def test_intent_request_calling_service_v2(fixture, calls):
"""Test a request for calling a service.
If this request is done async the test could finish before the action
has been executed. Hard to test because it will be a race condition.
"""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"]["action"] = "CallServiceIntent"
call_count = len(calls)
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert len(calls) == call_count + 1
call = calls[-1]
assert call.domain == "test"
assert call.service == "dialogflow"
assert call.data.get("entity_id") == ["switch.test"]
assert call.data.get("hello") == "virgo"
async def test_intent_with_no_action_v1(fixture):
"""Test an intent with no defined action."""
mock_client, webhook_id = fixture
data = Data.v1
del data["result"]["action"]
assert "action" not in data["result"]
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "You have not defined an action in your Dialogflow intent."
async def test_intent_with_no_action_v2(fixture):
"""Test an intent with no defined action."""
mock_client, webhook_id = fixture
data = Data.v2
del data["queryResult"]["action"]
assert "action" not in data["queryResult"]
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "You have not defined an action in your Dialogflow intent."
async def test_intent_with_unknown_action_v1(fixture):
"""Test an intent with an action not defined in the conf."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"]["action"] = "unknown"
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "This intent is not yet configured within Home Assistant."
async def test_intent_with_unknown_action_v2(fixture):
"""Test an intent with an action not defined in the conf."""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"]["action"] = "unknown"
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "This intent is not yet configured within Home Assistant."
| |
########################################################################
#
# Documentation generator for panda.
#
# How to use this module:
#
# from direct.directscripts import gendocs
# gendocs.generate(version, indirlist, directdirlist, docdir, header, footer, urlprefix, urlsuffix)
#
# - version is the panda version number
#
# - indirlist is the name of a directory, or a list of directories,
# containing the "xxx.in" files that interrogate generates. No
# slash at end.
#
# - directdirlist is the name of a directory, or a list of
# directories, containing the source code for "direct," as well as
# for other Python-based trees that should be included in the
# documentation pages. No slash at end.
#
# - docdir is the name of a directory into which HTML files
# will be emitted. No slash at end.
#
# - header is a string that will be placed at the front of
# every HTML page.
#
# - footer is a string that will be placed at the end of
# every HTML page.
#
# - urlprefix is a string that will be appended to the front of
# every URL.
#
# - urlsuffix is a string that will be appended to the end of
# every URL.
#
########################################################################
#
# The major subsystems are:
#
# * The module that loads interrogate databases.
#
# * The module that loads python parse-trees.
#
# * The "code database", which provides a single access point
# for both interrogate databases and python parse trees.
#
# * The HTML generator.
#
########################################################################
import os, sys, parser, symbol, token, re
########################################################################
#
# assorted utility functions
#
########################################################################
SECHEADER = re.compile("^[A-Z][a-z]+\s*:")
JUNKHEADER = re.compile("^((Function)|(Access))\s*:")
IMPORTSTAR = re.compile("^from\s+([a-zA-Z0-9_.]+)\s+import\s+[*]\s*$")
IDENTIFIER = re.compile("[a-zA-Z0-9_]+")
FILEHEADER = re.compile(
r"""^// Filename: [a-zA-Z.]+
// Created by: [a-zA-Z. ()0-9]+(
//)?
////////////////////////////////////////////////////////////////////
//
// PANDA 3D SOFTWARE
// Copyright \(c\) Carnegie Mellon University. All rights reserved.
//
// All use of this software is subject to the terms of the revised BSD
// license. You should have received a copy of this license along
// with this source code in a file named "LICENSE."
//
////////////////////////////////////////////////////////////////////""")
def readFile(fn):
try:
srchandle = open(fn, "r")
data = srchandle.read()
srchandle.close()
return data
except:
sys.exit("Cannot read "+fn)
def writeFile(wfile, data):
try:
dsthandle = open(wfile, "wb")
dsthandle.write(data)
dsthandle.close()
except:
sys.exit("Cannot write "+wfile)
def writeFileLines(wfile, lines):
try:
dsthandle = open(wfile, "wb")
for x in lines:
dsthandle.write(x)
dsthandle.write("\n")
dsthandle.close()
except:
sys.exit("Cannot write "+wfile)
def findFiles(dirlist, ext, ign, list):
if isinstance(dirlist, str):
dirlist = [dirlist]
for dir in dirlist:
for file in os.listdir(dir):
full = dir + "/" + file
if full not in ign and file not in ign:
if (os.path.isfile(full)):
if (file.endswith(ext)):
list.append(full)
elif (os.path.isdir(full)):
findFiles(full, ext, ign, list)
def pathToModule(result):
if (result[-3:]==".py"): result=result[:-3]
result = result.replace("/src/","/")
result = result.replace("/",".")
return result
def textToHTML(comment, sep, delsection=None):
sections = [""]
included = {}
for line in comment.split("\n"):
line = line.lstrip(" ").lstrip(sep).lstrip(" ").rstrip("\r").rstrip(" ")
if (line == ""):
sections.append("")
elif (line[0]=="*") or (line[0]=="-"):
sections.append(line)
sections.append("")
elif (SECHEADER.match(line)):
sections.append(line)
else:
sections[-1] = sections[-1] + " " + line
total = ""
for sec in sections:
if (sec != ""):
sec = sec.replace("&","&")
sec = sec.replace("<","<")
sec = sec.replace(">",">")
sec = sec.replace(" "," ")
sec = sec.replace(" "," ")
if (delsection != None) and (delsection.match(sec)):
included[sec] = 1
if sec not in included:
included[sec] = 1
total = total + sec + "<br>\n"
return total
def linkTo(link, text):
return '<a href="' + link + '">' + text + '</a>'
def convertToPythonFn(fn):
result = ""
lastc = 0
for c in fn:
if (c!="_"):
if (lastc=="_"):
result = result + c.upper()
else:
result = result + c
lastc = c
return result
def removeFileLicense(content):
# Removes the license part at the top of a file.
return re.sub(FILEHEADER, "", content).strip()
########################################################################
#
# Interrogate Database Tokenizer
#
########################################################################
class InterrogateTokenizer:
"""
A big string, with a "parse pointer", and routines to
extract integers and strings. The token syntax is that
used by interrogate databases.
"""
def __init__(self, fn):
self.fn = fn
self.pos = 0
self.data = readFile(fn)
def readint(self):
neg = 0
while (self.data[self.pos].isspace()):
self.pos += 1
if (self.data[self.pos] == "-"):
neg = 1
self.pos += 1
if (self.data[self.pos].isdigit()==0):
print("File position " + str(self.pos))
print("Text: " + self.data[self.pos:self.pos+50])
sys.exit("Syntax error in interrogate file format 0")
value = 0
while (self.data[self.pos].isdigit()):
value = value*10 + int(self.data[self.pos])
self.pos += 1
if (neg): value = -value
return value
def readstring(self):
length = self.readint()
if (self.data[self.pos].isspace()==0):
sys.exit("Syntax error in interrogate file format 1")
self.pos += 1
body = self.data[self.pos:self.pos+length]
if (len(body) != length):
sys.exit("Syntax error in interrogate file format 2")
self.pos += length
return body
########################################################################
#
# Interrogate Database Storage/Parsing
#
########################################################################
def parseInterrogateIntVec(tokzr):
length = tokzr.readint()
result = []
for i in range(length):
result.append(tokzr.readint())
return result
class InterrogateFunction:
def __init__(self, tokzr, db):
self.db = db
self.index = tokzr.readint()
self.componentname = tokzr.readstring()
self.flags = tokzr.readint()
self.classindex = tokzr.readint()
self.scopedname = tokzr.readstring()
self.cwrappers = parseInterrogateIntVec(tokzr)
self.pythonwrappers = parseInterrogateIntVec(tokzr)
self.comment = tokzr.readstring()
self.prototype = tokzr.readstring()
class InterrogateEnumValue:
def __init__(self, tokzr):
self.name = tokzr.readstring()
self.scopedname = tokzr.readstring()
self.value = tokzr.readint()
class InterrogateDerivation:
def __init__(self, tokzr):
self.flags = tokzr.readint()
self.base = tokzr.readint()
self.upcast = tokzr.readint()
self.downcast = tokzr.readint()
class InterrogateType:
def __init__(self, tokzr, db):
self.db = db
self.index = tokzr.readint()
self.componentname = tokzr.readstring()
self.flags = tokzr.readint()
self.scopedname = tokzr.readstring()
self.truename = tokzr.readstring()
self.outerclass = tokzr.readint()
self.atomictype = tokzr.readint()
self.wrappedtype = tokzr.readint()
self.constructors = parseInterrogateIntVec(tokzr)
self.destructor = tokzr.readint()
self.elements = parseInterrogateIntVec(tokzr)
self.methods = parseInterrogateIntVec(tokzr)
self.casts = parseInterrogateIntVec(tokzr)
self.derivations = []
nderivations = tokzr.readint()
for i in range(nderivations):
self.derivations.append(InterrogateDerivation(tokzr))
self.enumvalues = []
nenumvalues = tokzr.readint()
for i in range(nenumvalues):
self.enumvalues.append(InterrogateEnumValue(tokzr))
self.nested = parseInterrogateIntVec(tokzr)
self.comment = tokzr.readstring()
class InterrogateParameter:
def __init__(self, tokzr):
self.name = tokzr.readstring()
self.parameterflags = tokzr.readint()
self.type = tokzr.readint()
class InterrogateWrapper:
def __init__(self, tokzr, db):
self.db = db
self.index = tokzr.readint()
self.componentname = tokzr.readstring()
self.flags = tokzr.readint()
self.function = tokzr.readint()
self.returntype = tokzr.readint()
self.returnvaluedestructor = tokzr.readint()
self.uniquename = tokzr.readstring()
self.parameters = []
nparameters = tokzr.readint()
for i in range(nparameters):
self.parameters.append(InterrogateParameter(tokzr))
class InterrogateDatabase:
def __init__(self, tokzr):
self.fn = tokzr.fn
self.magic = tokzr.readint()
version1 = tokzr.readint()
version2 = tokzr.readint()
if (version1 != 2) or (version2 != 2):
sys.exit("This program only understands interrogate file format 2.2")
self.library = tokzr.readstring()
self.libhash = tokzr.readstring()
self.module = tokzr.readstring()
self.functions = {}
self.wrappers = {}
self.types = {}
self.namedtypes = {}
count_functions = tokzr.readint()
for i in range(count_functions):
fn = InterrogateFunction(tokzr, self)
self.functions[fn.index] = fn
count_wrappers = tokzr.readint()
for i in range(count_wrappers):
wr = InterrogateWrapper(tokzr, self)
self.wrappers[wr.index] = wr
count_types = tokzr.readint()
for i in range(count_types):
tp = InterrogateType(tokzr, self)
self.types[tp.index] = tp
self.namedtypes[tp.scopedname] = tp
########################################################################
#
# Pattern Matching for Python Parse Trees
#
########################################################################
def printTree(tree, indent):
spacing = " "[:indent]
if isinstance(tree, tuple) and isinstance(tree[0], int):
if tree[0] in symbol.sym_name:
for i in range(len(tree)):
if (i==0):
print(spacing + "(symbol." + symbol.sym_name[tree[0]] + ",")
else:
printTree(tree[i], indent+1)
print(spacing + "),")
elif tree[0] in token.tok_name:
print(spacing + "(token." + token.tok_name[tree[0]] + ", '" + tree[1] + "'),")
else:
print(spacing + str(tree))
else:
print(spacing + str(tree))
COMPOUND_STMT_PATTERN = (
symbol.stmt,
(symbol.compound_stmt, ['compound'])
)
DOCSTRING_STMT_PATTERN = (
symbol.stmt,
(symbol.simple_stmt,
(symbol.small_stmt,
(symbol.expr_stmt,
(symbol.testlist,
(symbol.test,
(symbol.or_test,
(symbol.and_test,
(symbol.not_test,
(symbol.comparison,
(symbol.expr,
(symbol.xor_expr,
(symbol.and_expr,
(symbol.shift_expr,
(symbol.arith_expr,
(symbol.term,
(symbol.factor,
(symbol.power,
(symbol.atom,
(token.STRING, ['docstring'])
))))))))))))))))),
(token.NEWLINE, '')
))
DERIVATION_PATTERN = (
symbol.test,
(symbol.or_test,
(symbol.and_test,
(symbol.not_test,
(symbol.comparison,
(symbol.expr,
(symbol.xor_expr,
(symbol.and_expr,
(symbol.shift_expr,
(symbol.arith_expr,
(symbol.term,
(symbol.factor,
(symbol.power,
(symbol.atom,
(token.NAME, ['classname'])
))))))))))))))
ASSIGNMENT_STMT_PATTERN = (
symbol.stmt,
(symbol.simple_stmt,
(symbol.small_stmt,
(symbol.expr_stmt,
(symbol.testlist,
(symbol.test,
(symbol.or_test,
(symbol.and_test,
(symbol.not_test,
(symbol.comparison,
(symbol.expr,
(symbol.xor_expr,
(symbol.and_expr,
(symbol.shift_expr,
(symbol.arith_expr,
(symbol.term,
(symbol.factor,
(symbol.power,
(symbol.atom,
(token.NAME, ['varname']),
))))))))))))))),
(token.EQUAL, '='),
(symbol.testlist, ['rhs']))),
(token.NEWLINE, ''),
))
class ParseTreeInfo:
docstring = ''
name = ''
def __init__(self, tree, name, file):
"""
The code can be a string (in which case it is parsed), or it
can be in parse tree form already.
"""
self.name = name
self.file = file
self.class_info = {}
self.function_info = {}
self.assign_info = {}
self.derivs = {}
if isinstance(tree, str):
try:
tree = parser.suite(tree+"\n").totuple()
if (tree):
found, vars = self.match(DOCSTRING_STMT_PATTERN, tree[1])
if found:
self.docstring = vars["docstring"]
except:
print("CAUTION --- Parse failed: " + name)
if isinstance(tree, tuple):
self.extract_info(tree)
def match(self, pattern, data, vars=None):
"""
pattern
Pattern to match against, possibly containing variables.
data
Data to be checked and against which variables are extracted.
vars
Dictionary of variables which have already been found. If not
provided, an empty dictionary is created.
The `pattern' value may contain variables of the form ['varname']
which are allowed to parseTreeMatch anything. The value that is
parseTreeMatched is returned as part of a dictionary which maps
'varname' to the parseTreeMatched value. 'varname' is not required
to be a string object, but using strings makes patterns and the code
which uses them more readable. This function returns two values: a
boolean indicating whether a parseTreeMatch was found and a
dictionary mapping variable names to their associated values.
"""
if vars is None:
vars = {}
if type(pattern) is list: # 'variables' are ['varname']
vars[pattern[0]] = data
return 1, vars
if type(pattern) is not tuple:
return (pattern == data), vars
if len(data) != len(pattern):
return 0, vars
for pattern, data in map(None, pattern, data):
same, vars = self.match(pattern, data, vars)
if not same:
break
return same, vars
def extract_info(self, tree):
# extract docstring
found = 0
if len(tree) == 2:
found, vars = self.match(DOCSTRING_STMT_PATTERN[1], tree[1])
elif len(tree) >= 4:
found, vars = self.match(DOCSTRING_STMT_PATTERN, tree[3])
if found:
self.docstring = eval(vars['docstring'])
# discover inner definitions
for node in tree[1:]:
found, vars = self.match(ASSIGNMENT_STMT_PATTERN, node)
if found:
self.assign_info[vars['varname']] = 1
found, vars = self.match(COMPOUND_STMT_PATTERN, node)
if found:
cstmt = vars['compound']
if cstmt[0] == symbol.funcdef:
name = cstmt[2][1]
# Workaround for a weird issue with static and classmethods
if name == "def":
name = cstmt[3][1]
self.function_info[name] = ParseTreeInfo(cstmt and cstmt[-1] or None, name, self.file)
self.function_info[name].prototype = self.extract_tokens("", cstmt[4])
else:
self.function_info[name] = ParseTreeInfo(cstmt and cstmt[-1] or None, name, self.file)
self.function_info[name].prototype = self.extract_tokens("", cstmt[3])
elif cstmt[0] == symbol.classdef:
name = cstmt[2][1]
self.class_info[name] = ParseTreeInfo(cstmt and cstmt[-1] or None, name, self.file)
self.extract_derivs(self.class_info[name], cstmt)
def extract_derivs(self, classinfo, tree):
if (len(tree)==8):
derivs = tree[4]
for deriv in derivs[1:]:
found, vars = self.match(DERIVATION_PATTERN, deriv)
if (found):
classinfo.derivs[vars["classname"]] = 1
def extract_tokens(self, str, tree):
if (isinstance(tree, tuple)):
if tree[0] in token.tok_name:
str = str + tree[1]
if (tree[1]==","): str=str+" "
elif tree[0] in symbol.sym_name:
for sub in tree[1:]:
str = self.extract_tokens(str, sub)
return str
########################################################################
#
# The code database contains:
#
# - a list of InterrogateDatabase objects representing C++ modules.
# - a list of ParseTreeInfo objects representing python modules.
#
# Collectively, these make up all the data about all the code.
#
########################################################################
class CodeDatabase:
def __init__(self, cxxlist, pylist):
self.types = {}
self.funcs = {}
self.goodtypes = {}
self.funcExports = {}
self.typeExports = {}
self.varExports = {}
self.globalfn = []
self.formattedprotos = {}
print("Reading C++ source files")
for cxx in cxxlist:
tokzr = InterrogateTokenizer(cxx)
idb = InterrogateDatabase(tokzr)
for type in idb.types.values():
if (type.flags & 8192) or type.scopedname not in self.types:
self.types[type.scopedname] = type
if (type.flags & 8192) and (type.atomictype == 0) and (type.scopedname.count(" ")==0) and (type.scopedname.count(":")==0):
self.goodtypes[type.scopedname] = type
self.typeExports.setdefault("pandac.PandaModules", []).append(type.scopedname)
for func in idb.functions.values():
type = idb.types.get(func.classindex)
func.pyname = convertToPythonFn(func.componentname)
if (type == None):
self.funcs["GLOBAL."+func.pyname] = func
self.globalfn.append("GLOBAL."+func.pyname)
self.funcExports.setdefault("pandac.PandaModules", []).append(func.pyname)
else:
self.funcs[type.scopedname+"."+func.pyname] = func
print("Reading Python sources files")
for py in pylist:
pyinf = ParseTreeInfo(readFile(py), py, py)
mod = pathToModule(py)
for type in pyinf.class_info.keys():
typinf = pyinf.class_info[type]
self.types[type] = typinf
self.goodtypes[type] = typinf
self.typeExports.setdefault(mod, []).append(type)
for func in typinf.function_info.keys():
self.funcs[type+"."+func] = typinf.function_info[func]
for func in pyinf.function_info.keys():
self.funcs["GLOBAL."+func] = pyinf.function_info[func]
self.globalfn.append("GLOBAL."+func)
self.funcExports.setdefault(mod, []).append(func)
for var in pyinf.assign_info.keys():
self.varExports.setdefault(mod, []).append(var)
def getClassList(self):
return list(self.goodtypes.keys())
def getGlobalFunctionList(self):
return self.globalfn
def getClassComment(self, cn):
type = self.types.get(cn)
if (isinstance(type, InterrogateType)):
return textToHTML(type.comment,"/")
elif (isinstance(type, ParseTreeInfo)):
return textToHTML(type.docstring,"#")
else:
return ""
def getClassParents(self, cn):
type = self.types.get(cn)
if (isinstance(type, InterrogateType)):
parents = []
for deriv in type.derivations:
basetype = type.db.types[deriv.base]
parents.append(basetype.scopedname)
return parents
elif (isinstance(type, ParseTreeInfo)):
return list(type.derivs.keys())
else:
return []
def getClassConstants(self, cn):
type = self.types.get(cn)
if (isinstance(type, InterrogateType)):
result = []
for subtype in type.nested:
enumtype = type.db.types[subtype]
if (len(enumtype.enumvalues)):
for enumvalue in enumtype.enumvalues:
name = convertToPythonFn(enumvalue.name)
result.append((name, "("+enumtype.componentname+")"))
result.append(("",""))
return result
else:
return []
def buildInheritance(self, inheritance, cn):
if (inheritance.count(cn) == 0):
inheritance.append(cn)
for parent in self.getClassParents(cn):
self.buildInheritance(inheritance, parent)
def getInheritance(self, cn):
inheritance = []
self.buildInheritance(inheritance, cn)
return inheritance
def getClassImport(self, cn):
type = self.types.get(cn)
if (isinstance(type, InterrogateType)):
return "pandac.PandaModules"
else:
return pathToModule(type.file)
def getClassConstructors(self, cn):
# Only detects C++ constructors, not Python constructors, since
# those are treated as ordinary methods.
type = self.types.get(cn)
result = []
if (isinstance(type, InterrogateType)):
for constructor in type.constructors:
func = type.db.functions[constructor]
if (func.classindex == type.index):
result.append(type.scopedname+"."+func.pyname)
return result
def getClassMethods(self, cn):
type = self.types.get(cn)
result = []
if (isinstance(type, InterrogateType)):
for method in type.methods:
func = type.db.functions[method]
if (func.classindex == type.index):
result.append(type.scopedname+"."+func.pyname)
elif (isinstance(type, ParseTreeInfo)):
for method in type.function_info.keys():
result.append(type.name + "." + method)
return result
def getFunctionName(self, fn):
func = self.funcs.get(fn)
if (isinstance(func, InterrogateFunction)):
return func.pyname
elif (isinstance(func, ParseTreeInfo)):
return func.name
else:
return fn
def getFunctionImport(self, fn):
func = self.funcs.get(fn)
if (isinstance(func, InterrogateFunction)):
return "pandac.PandaModules"
else:
return pathToModule(func.file)
def getFunctionPrototype(self, fn, urlprefix, urlsuffix):
func = self.funcs.get(fn)
if (isinstance(func, InterrogateFunction)):
if fn in self.formattedprotos:
proto = self.formattedprotos[fn]
else:
proto = func.prototype
proto = proto.replace(" inline "," ")
if (proto.startswith("inline ")): proto = proto[7:]
proto = proto.replace("basic_string< char >", "string")
proto = textToHTML(proto,"")
if "." in fn:
for c in self.goodtypes.keys():
if c != fn.split(".")[0] and (c in proto):
proto = re.sub("\\b%s\\b" % c, linkTo(urlprefix+c+urlsuffix, c), proto)
self.formattedprotos[fn] = proto
return proto
elif (isinstance(func, ParseTreeInfo)):
return textToHTML("def "+func.name+func.prototype,"")
return fn
def getFunctionComment(self, fn):
func = self.funcs.get(fn)
if (isinstance(func, InterrogateFunction)):
return textToHTML(removeFileLicense(func.comment), "/", JUNKHEADER)
elif (isinstance(func, ParseTreeInfo)):
return textToHTML(func.docstring, "#")
return fn
def isFunctionPython(self, fn):
func = self.funcs.get(fn)
if (isinstance(func, InterrogateFunction)):
return False
elif (isinstance(func, ParseTreeInfo)):
return True
return False
def getFuncExports(self, mod):
return self.funcExports.get(mod, [])
def getTypeExports(self, mod):
return self.typeExports.get(mod, [])
def getVarExports(self, mod):
return self.varExports.get(mod, [])
########################################################################
#
# The "Class Rename Dictionary" - Yech.
#
########################################################################
CLASS_RENAME_DICT = {
# No longer used, now empty.
}
########################################################################
#
# HTML generation
#
########################################################################
def makeCodeDatabase(indirlist, directdirlist):
if isinstance(directdirlist, str):
directdirlist = [directdirlist]
ignore = {}
ignore["__init__.py"] = 1
for directdir in directdirlist:
ignore[directdir + "/src/directscripts"] = 1
ignore[directdir + "/src/extensions"] = 1
ignore[directdir + "/src/extensions_native"] = 1
ignore[directdir + "/src/ffi"] = 1
ignore[directdir + "/built"] = 1
cxxfiles = []
pyfiles = []
findFiles(indirlist, ".in", ignore, cxxfiles)
findFiles(directdirlist, ".py", ignore, pyfiles)
return CodeDatabase(cxxfiles, pyfiles)
def generateFunctionDocs(code, method, urlprefix, urlsuffix):
name = code.getFunctionName(method)
proto = code.getFunctionPrototype(method, urlprefix, urlsuffix)
comment = code.getFunctionComment(method)
if (comment == ""): comment = "Undocumented function.<br>\n"
chunk = '<table bgcolor="e8e8e8" border=0 cellspacing=0 cellpadding=5 width="100%"><tr><td>' + "\n"
chunk = chunk + '<a name="' + name + '"><b>' + name + "</b></a><br>\n"
chunk = chunk + proto + "<br>\n"
chunk = chunk + comment
chunk = chunk + "</td></tr></table><br>\n"
return chunk
def generateLinkTable(link, text, cols, urlprefix, urlsuffix):
column = (len(link)+cols-1)/cols
percent = 100 / cols
result = '<table width="100%">\n'
for i in range(column):
line = ""
for j in range(cols):
slot = i + column*j
linkval = ""
textval = ""
if (slot < len(link)): linkval = link[slot]
if (slot < len(text)): textval = text[slot]
if (i==0):
line = line + '<td width="' + str(percent) + '%">' + linkTo(urlprefix+linkval+urlsuffix, textval) + "</td>"
else:
line = line + '<td>' + linkTo(urlprefix+linkval+urlsuffix, textval) + "</td>"
result = result + "<tr>" + line + "</tr>\n"
result = result + "</table>\n"
return result
def generate(pversion, indirlist, directdirlist, docdir, header, footer, urlprefix, urlsuffix):
code = makeCodeDatabase(indirlist, directdirlist)
classes = code.getClassList()[:]
classes.sort(None, str.lower)
xclasses = classes[:]
print("Generating HTML pages")
for type in classes:
body = "<h1>" + type + "</h1>\n"
comment = code.getClassComment(type)
body = body + "<ul>\nfrom " + code.getClassImport(type) + " import " + type + "</ul>\n\n"
body = body + "<ul>\n" + comment + "</ul>\n\n"
inheritance = code.getInheritance(type)
body = body + "<h2>Inheritance:</h2>\n<ul>\n"
for inh in inheritance:
line = " " + linkTo(urlprefix+inh+urlsuffix, inh) + ": "
for parent in code.getClassParents(inh):
line = line + linkTo(urlprefix+parent+urlsuffix, parent) + " "
body = body + line + "<br>\n"
body = body + "</ul>\n"
for sclass in inheritance:
methods = code.getClassMethods(sclass)[:]
methods.sort(None, str.lower)
constructors = code.getClassConstructors(sclass)
if (len(methods) > 0 or len(constructors) > 0):
body = body + "<h2>Methods of "+sclass+":</h2>\n<ul>\n"
if len(constructors) > 0:
fn = code.getFunctionName(constructors[0])
body = body + '<a href="#' + fn + '">' + fn + " (Constructor)</a><br>\n"
for method in methods:
fn = code.getFunctionName(method)
body = body + '<a href="#' + fn + '">' + fn + "</a><br>\n"
body = body + "</ul>\n"
for sclass in inheritance:
enums = code.getClassConstants(sclass)[:]
if (len(enums) > 0):
body = body + "<h2>Constants in "+sclass+":</h2>\n<ul><table>\n"
for (value, comment) in enums:
body = body + "<tr><td>" + value + "</td><td>" + comment + "</td></tr>\n"
body = body + "</table></ul>"
for sclass in inheritance:
constructors = code.getClassConstructors(sclass)
for constructor in constructors:
body = body + generateFunctionDocs(code, constructor, urlprefix, urlsuffix)
methods = code.getClassMethods(sclass)[:]
methods.sort(None, str.lower)
for method in methods:
body = body + generateFunctionDocs(code, method, urlprefix, urlsuffix)
body = header + body + footer
writeFile(docdir + "/" + type + ".html", body)
if type in CLASS_RENAME_DICT:
modtype = CLASS_RENAME_DICT[type]
writeFile(docdir + "/" + modtype + ".html", body)
xclasses.append(modtype)
xclasses.sort(None, str.lower)
index = "<h1>List of Classes - Panda " + pversion + "</h1>\n"
index = index + generateLinkTable(xclasses, xclasses, 3, urlprefix, urlsuffix)
fnlist = code.getGlobalFunctionList()[:]
fnlist.sort(None, str.lower)
fnnames = []
for i in range(len(fnlist)):
fnnames.append(code.getFunctionName(fnlist[i]))
index = header + index + footer
writeFile(docdir + "/classes.html", index)
index = "<h1>List of Global Functions - Panda " + pversion + "</h1>\n"
index = index + generateLinkTable(fnnames, fnnames, 3,"#","")
for func in fnlist:
index = index + generateFunctionDocs(code, func, urlprefix, urlsuffix)
index = header + index + footer
writeFile(docdir + "/functions.html", index)
table = {}
for type in classes:
for method in code.getClassMethods(type)[:]:
name = code.getFunctionName(method)
prefix = name[0].upper()
if prefix not in table:
table[prefix] = {}
if name not in table[prefix]:
table[prefix][name] = []
table[prefix][name].append(type)
index = "<h1>List of Methods - Panda " + pversion + "</h1>\n"
prefixes = list(table.keys())
prefixes.sort(None, str.lower)
for prefix in prefixes:
index = index + linkTo("#"+prefix, prefix) + " "
index = index + "<br><br>"
for prefix in prefixes:
index = index + '<a name="' + prefix + '">' + "\n"
names = list(table[prefix].keys())
names.sort(None, str.lower)
for name in names:
line = '<b>' + name + ":</b><ul>\n"
ctypes = table[prefix][name]
ctypes.sort(None, str.lower)
for type in ctypes:
line = line + "<li>" + linkTo(urlprefix+type+urlsuffix+"#"+name, type) + "\n"
line = line + "</ul>\n"
index = index + line + "\n"
index = header + index + footer
writeFile(docdir + "/methods.html", index)
index = "<h1>Panda " + pversion + "</h1>\n"
index = index + "<ul>\n"
index = index + "<li>" + linkTo(urlprefix+"classes"+urlsuffix, "List of all Classes") + "\n"
index = index + "</ul>\n"
index = index + "<ul>\n"
index = index + "<li>" + linkTo(urlprefix+"functions"+urlsuffix, "List of all Global Functions") + "\n"
index = index + "</ul>\n"
index = index + "<ul>\n"
index = index + "<li>" + linkTo(urlprefix+"methods"+urlsuffix, "List of all Methods (very long)") + "\n"
index = index + "</ul>\n"
writeFile(docdir + "/index.html", index)
########################################################################
#
# IMPORT repair
#
########################################################################
def expandImports(indirlist, directdirlist, fixdirlist):
code = makeCodeDatabase(indirlist, directdirlist)
fixfiles = []
findFiles(fixdirlist, ".py", {}, fixfiles)
for fixfile in fixfiles:
if (os.path.isfile(fixfile+".orig")):
text = readFile(fixfile+".orig")
else:
text = readFile(fixfile)
writeFile(fixfile+".orig", text)
text = text.replace("\r","")
lines = text.split("\n")
used = {}
for id in IDENTIFIER.findall(text):
used[id] = 1
result = []
for line in lines:
mat = IMPORTSTAR.match(line)
if (mat):
module = mat.group(1)
if (fixfile.count("/")!=0) and (module.count(".")==0):
modfile = os.path.dirname(fixfile)+"/"+module+".py"
if (os.path.isfile(modfile)):
module = pathToModule(modfile)
typeExports = code.getTypeExports(module)
funcExports = code.getFuncExports(module)
varExports = code.getVarExports(module)
if (len(typeExports)+len(funcExports)+len(varExports)==0):
result.append(line)
print(fixfile + " : " + module + " : no exports")
else:
print(fixfile + " : " + module + " : repairing")
for x in funcExports:
fn = code.getFunctionName(x)
if fn in used:
result.append("from "+module+" import "+fn)
for x in typeExports:
if x in used:
result.append("from "+module+" import "+x)
for x in varExports:
if x in used:
result.append("from "+module+" import "+x)
else:
result.append(line)
writeFileLines(fixfile, result)
| |
import sys
import time
import os
import errno
import shlex
import pydoc
import inspect
import collections
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def log(s): # , send_telegram=False):
print(s)
sys.stdout.flush()
class SimpleMessage(object):
def __init__(self, msg, logger=log):
self.msg = msg
self.logger = logger
def __enter__(self):
print(self.msg)
self.tstart = time.time()
def __exit__(self, etype, *args):
maybe_exc = "" if etype is None else " (with exception)"
self.logger("done%s in %.3f seconds" %
(maybe_exc, time.time() - self.tstart))
MESSAGE_DEPTH = 0
class Message(object):
def __init__(self, msg):
self.msg = msg
def __enter__(self):
global MESSAGE_DEPTH # pylint: disable=W0603
print(colorize('\t' * MESSAGE_DEPTH + '=: ' + self.msg, 'magenta'))
self.tstart = time.time()
MESSAGE_DEPTH += 1
def __exit__(self, etype, *args):
global MESSAGE_DEPTH # pylint: disable=W0603
MESSAGE_DEPTH -= 1
maybe_exc = "" if etype is None else " (with exception)"
print(colorize('\t' * MESSAGE_DEPTH + "done%s in %.3f seconds" % (maybe_exc, time.time() - self.tstart), 'magenta'))
def prefix_log(prefix, logger=log):
return lambda s: logger(prefix + s)
def tee_log(file_name):
f = open(file_name, 'w+')
def logger(s):
log(s)
f.write(s)
f.write('\n')
f.flush()
return logger
def collect_args():
splitted = shlex.split(' '.join(sys.argv[1:]))
return {arg_name[2:]: arg_val
for arg_name, arg_val in zip(splitted[::2], splitted[1::2])}
def type_hint(arg_name, arg_type):
def wrap(f):
meta = getattr(f, '__tweak_type_hint_meta__', None)
if meta is None:
f.__tweak_type_hint_meta__ = meta = {}
meta[arg_name] = arg_type
return f
return wrap
def tweak(fun_or_val, identifier=None):
if isinstance(fun_or_val, collections.Callable):
return tweakfun(fun_or_val, identifier)
return tweakval(fun_or_val, identifier)
def tweakval(val, identifier):
if not identifier:
raise ValueError('Must provide an identifier for tweakval to work')
args = collect_args()
for k, v in args.items():
stripped = k.replace('-', '_')
if stripped == identifier:
log('replacing %s in %s with %s' % (stripped, str(val), str(v)))
return type(val)(v)
return val
def tweakfun(fun, alt=None):
"""Make the arguments (or the function itself) tweakable from command line.
See tests/test_misc_console.py for examples.
NOTE: this only works for the initial launched process, since other processes
will get different argv. What this means is that tweak() calls wrapped in a function
to be invoked in a child process might not behave properly.
"""
cls = getattr(fun, 'im_class', None)
method_name = fun.__name__
if alt:
cmd_prefix = alt
elif cls:
cmd_prefix = cls + '.' + method_name
else:
cmd_prefix = method_name
cmd_prefix = cmd_prefix.lower()
args = collect_args()
if cmd_prefix in args:
fun = pydoc.locate(args[cmd_prefix])
if type(fun) == type:
argspec = inspect.getargspec(fun.__init__)
else:
argspec = inspect.getargspec(fun)
# TODO handle list arguments
defaults = dict(
list(zip(argspec.args[-len(argspec.defaults or []):], argspec.defaults or [])))
replaced_kwargs = {}
cmd_prefix += '-'
if type(fun) == type:
meta = getattr(fun.__init__, '__tweak_type_hint_meta__', {})
else:
meta = getattr(fun, '__tweak_type_hint_meta__', {})
for k, v in args.items():
if k.startswith(cmd_prefix):
stripped = k[len(cmd_prefix):].replace('-', '_')
if stripped in meta:
log('replacing %s in %s with %s' % (stripped, str(fun), str(v)))
replaced_kwargs[stripped] = meta[stripped](v)
elif stripped not in argspec.args:
raise ValueError(
'%s is not an explicit parameter of %s' % (stripped, str(fun)))
elif stripped not in defaults:
raise ValueError(
'%s does not have a default value in method %s' % (stripped, str(fun)))
elif defaults[stripped] is None:
raise ValueError(
'Cannot infer type of %s in method %s from None value' % (stripped, str(fun)))
else:
log('replacing %s in %s with %s' % (stripped, str(fun), str(v)))
# TODO more proper conversions
replaced_kwargs[stripped] = type(defaults[stripped])(v)
def tweaked(*args, **kwargs):
all_kw = dict(list(zip(argspec[0], args)) +
list(kwargs.items()) + list(replaced_kwargs.items()))
return fun(**all_kw)
return tweaked
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
| |
# Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
import os
import re
from base64 import b64decode
from datetime import datetime, time
from uuid import UUID
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.utils import DatabaseError
from django.utils import timezone
from django.utils.duration import duration_microseconds
from google.cloud.spanner_dbapi.parse_utils import (
DateStr,
TimestampStr,
escape_name,
)
class DatabaseOperations(BaseDatabaseOperations):
"""A Spanner-specific version of Django database operations."""
cast_data_types = {"CharField": "STRING", "TextField": "STRING"}
cast_char_field_without_max_length = "STRING"
compiler_module = "django_spanner.compiler"
# Django's lookup names that require a different name in Spanner's
# EXTRACT() function.
# https://cloud.google.com/spanner/docs/functions-and-operators#extract
extract_names = {
"iso_year": "isoyear",
"week": "isoweek",
"week_day": "dayofweek",
}
# TODO: Consider changing the hardcoded output to a linked value.
def max_name_length(self):
"""Get the maximum length of Spanner table and column names.
See also: https://cloud.google.com/spanner/quotas#tables
:rtype: int
:returns: Maximum length of the name of the table.
"""
return 128
def quote_name(self, name):
"""
Return a quoted version of the given table or column name. Also,
applies backticks to the name that either contain '-' or ' ', or is a
Cloud Spanner's reserved keyword.
Spanner says "column name not valid" if spaces or hyphens are present
(although according to the documantation, any character should be
allowed in quoted identifiers). Replace problematic characters when
running the Django tests to prevent crashes. (Don't modify names in
normal operation to prevent the possibility of colliding with another
column.)
See: https://github.com/googleapis/python-spanner-django/issues/204
:type name: str
:param name: The Quota name.
:rtype: :class:`str`
:returns: Name escaped if it has to be escaped.
"""
if os.environ.get("RUNNING_SPANNER_BACKEND_TESTS") == "1":
name = name.replace(" ", "_").replace("-", "_")
return escape_name(name)
def bulk_batch_size(self, fields, objs):
"""
Override the base class method. Returns the maximum number of the
query parameters.
:type fields: list
:param fields: Currently not used.
:type objs: list
:param objs: Currently not used.
:rtype: int
:returns: The maximum number of query parameters (constant).
"""
return self.connection.features.max_query_params
def bulk_insert_sql(self, fields, placeholder_rows):
"""
A helper method that stitches multiple values into a single SQL
record.
:type fields: list
:param fields: Currently not used.
:type placeholder_rows: list
:param placeholder_rows: Data "rows" containing values to combine.
:rtype: str
:returns: A SQL statement (a `VALUES` command).
"""
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def sql_flush(
self, style, tables, reset_sequences=False, allow_cascade=False
):
"""
Override the base class method. Returns a list of SQL statements
required to remove all data from the given database tables (without
actually removing the tables themselves).
:type style: :class:`~django.core.management.color.Style`
:param style: (Currently not used) An object as returned by either
color_style() or no_style().
:type tables: list
:param tables: A collection of Cloud Spanner Tables
:type reset_sequences: bool
:param reset_sequences: (Optional) Currently not used.
:type allow_cascade: bool
:param allow_cascade: (Optional) Currently not used.
:rtype: list
:returns: A list of SQL statements required to remove all data from
the given database tables.
"""
# Cloud Spanner doesn't support TRUNCATE so DELETE instead.
# A dummy WHERE clause is required.
if tables:
delete_sql = "%s %s %%s" % (
style.SQL_KEYWORD("DELETE"),
style.SQL_KEYWORD("FROM"),
)
return [
delete_sql % style.SQL_FIELD(self.quote_name(table))
for table in tables
]
else:
return []
def adapt_datefield_value(self, value):
"""Cast date argument into Spanner DB API DateStr format.
:type value: object
:param value: A date argument.
:rtype: :class:`~google.cloud.spanner_dbapi.types.DateStr`
:returns: Formatted Date.
"""
if value is None:
return None
return DateStr(str(value))
def adapt_datetimefield_value(self, value):
"""Reformat time argument into Cloud Spanner.
:type value: object
:param value: A time argument.
:rtype: :class:`~google.cloud.spanner_dbapi.types.TimestampStr`
:returns: Formatted Time.
"""
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, "resolve_expression"):
return value
# Cloud Spanner doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError(
"The Cloud Spanner backend does not support "
"timezone-aware datetimes when USE_TZ is False."
)
return TimestampStr(value.isoformat(timespec="microseconds") + "Z")
def adapt_decimalfield_value(
self, value, max_digits=None, decimal_places=None
):
"""
Convert value from decimal.Decimal to spanner compatible value.
Since spanner supports Numeric storage of decimal and python spanner
takes care of the conversion so this is a no-op method call.
:type value: :class:`decimal.Decimal`
:param value: A decimal field value.
:type max_digits: int
:param max_digits: (Optional) A maximum number of digits.
:type decimal_places: int
:param decimal_places: (Optional) The number of decimal places to store
with the number.
:rtype: decimal.Decimal
:returns: decimal value.
"""
return value
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
:type value: `datetime.datetime`
:param value: A time field value.
:rtype: :class:`~google.cloud.spanner_dbapi.types.TimestampStr`
:returns: Formatted Time.
"""
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, "resolve_expression"):
return value
# Column is TIMESTAMP, so prefix a dummy date to the datetime.time.
return TimestampStr(
"0001-01-01T" + value.isoformat(timespec="microseconds") + "Z"
)
def get_db_converters(self, expression):
"""Get a list of functions needed to convert field data.
:type expression: :class:`django.db.models.expressions.BaseExpression`
:param expression: A query expression to convert.
:rtype: list
:returns: Converter functions to apply to Spanner field values.
"""
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == "DateTimeField":
converters.append(self.convert_datetimefield_value)
elif internal_type == "TimeField":
converters.append(self.convert_timefield_value)
elif internal_type == "BinaryField":
converters.append(self.convert_binaryfield_value)
elif internal_type == "UUIDField":
converters.append(self.convert_uuidfield_value)
return converters
def convert_binaryfield_value(self, value, expression, connection):
"""Convert Spanner BinaryField value for Django.
:type value: bytes
:param value: A base64-encoded binary field value.
:type expression: :class:`django.db.models.expressions.BaseExpression`
:param expression: A query expression.
:type connection: :class:`~google.cloud.cpanner_dbapi.connection.Connection`
:param connection: Reference to a Spanner database connection.
:rtype: bytes
:returns: A base64 encoded bytes.
"""
if value is None:
return value
# Cloud Spanner stores bytes base64 encoded.
return b64decode(value)
def convert_datetimefield_value(self, value, expression, connection):
"""Convert Spanner DateTimeField value for Django.
:type value: `DatetimeWithNanoseconds`
:param value: A datetime field value.
:type expression: :class:`django.db.models.expressions.BaseExpression`
:param expression: A query expression.
:type connection: :class:`~google.cloud.cpanner_dbapi.connection.Connection`
:param connection: Reference to a Spanner database connection.
:rtype: datetime
:returns: A TZ-aware datetime.
"""
if value is None:
return value
# Cloud Spanner returns the
# google.api_core.datetime_helpers.DatetimeWithNanoseconds subclass
# of datetime with tzinfo=UTC (which should be replaced with the
# connection's timezone). Django doesn't support nanoseconds so that
# part is ignored.
dt = datetime(
value.year,
value.month,
value.day,
value.hour,
value.minute,
value.second,
value.microsecond,
)
return (
timezone.make_aware(dt, self.connection.timezone)
if settings.USE_TZ
else dt
)
def convert_timefield_value(self, value, expression, connection):
"""Convert Spanner TimeField value for Django.
:type value: :class:`~google.api_core.datetime_helpers.DatetimeWithNanoseconds`
:param value: A datetime/time field.
:type expression: :class:`django.db.models.expressions.BaseExpression`
:param expression: A query expression.
:type connection: :class:`~google.cloud.cpanner_dbapi.connection.Connection`
:param connection: Reference to a Spanner database connection.
:rtype: :class:`datetime.time`
:returns: A converted datetime.
"""
if value is None:
return value
# Convert DatetimeWithNanoseconds to time.
return time(value.hour, value.minute, value.second, value.microsecond)
def convert_uuidfield_value(self, value, expression, connection):
"""Convert a UUID field to Cloud Spanner.
:type value: str
:param value: A UUID-valued str.
:type expression: :class:`django.db.models.expressions.BaseExpression`
:param expression: A query expression.
:type connection: :class:`~google.cloud.cpanner_dbapi.connection.Connection`
:param connection: Reference to a Spanner database connection.
:rtype: :class:`uuid.UUID`
:returns: A converted UUID.
"""
if value is not None:
value = UUID(value)
return value
def date_extract_sql(self, lookup_type, field_name):
"""Extract date from the lookup.
:type lookup_type: str
:param lookup_type: A type of the lookup.
:type field_name: str
:param field_name: The name of the field.
:rtype: str
:returns: A SQL statement for extracting.
"""
lookup_type = self.extract_names.get(lookup_type, lookup_type)
return "EXTRACT(%s FROM %s)" % (lookup_type, field_name)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""Extract datetime from the lookup.
:type lookup_type: str
:param lookup_type: A type of the lookup.
:type field_name: str
:param field_name: The name of the field.
:type tzname: str
:param tzname: The time zone name. If using of time zone is not
allowed in settings default will be UTC.
:rtype: str
:returns: A SQL statement for extracting.
"""
tzname = tzname if settings.USE_TZ and tzname else "UTC"
lookup_type = self.extract_names.get(lookup_type, lookup_type)
return 'EXTRACT(%s FROM %s AT TIME ZONE "%s")' % (
lookup_type,
field_name,
tzname,
)
def time_extract_sql(self, lookup_type, field_name):
"""Extract time from the lookup.
:type lookup_type: str
:param lookup_type: A type of the lookup.
:type field_name: str
:param field_name: The name of the field.
:rtype: str
:returns: A SQL statement for extracting.
"""
# Time is stored as TIMESTAMP with UTC time zone.
return 'EXTRACT(%s FROM %s AT TIME ZONE "UTC")' % (
lookup_type,
field_name,
)
def date_trunc_sql(self, lookup_type, field_name, tzname=None):
"""Truncate date in the lookup.
:type lookup_type: str
:param lookup_type: A type of the lookup.
:type field_name: str
:param field_name: The name of the field.
:type tzname: str
:param tzname: The name of the timezone. This is ignored because
Spanner does not support Timezone conversion in DATE_TRUNC function.
:rtype: str
:returns: A SQL statement for truncating.
"""
# https://cloud.google.com/spanner/docs/functions-and-operators#date_trunc
if lookup_type == "week":
# Spanner truncates to Sunday but Django expects Monday. First,
# subtract a day so that a Sunday will be truncated to the previous
# week...
field_name = (
"DATE_SUB(CAST(" + field_name + " AS DATE), INTERVAL 1 DAY)"
)
sql = "DATE_TRUNC(CAST(%s AS DATE), %s)" % (field_name, lookup_type)
if lookup_type == "week":
# ...then add a day to get from Sunday to Monday.
sql = "DATE_ADD(CAST(" + sql + " AS DATE), INTERVAL 1 DAY)"
return sql
def datetime_trunc_sql(self, lookup_type, field_name, tzname="UTC"):
"""Truncate datetime in the lookup.
:type lookup_type: str
:param lookup_type: A type of the lookup.
:type field_name: str
:param field_name: The name of the field.
:type tzname: str
:param tzname: The name of the timezone.
:rtype: str
:returns: A SQL statement for truncating.
"""
# https://cloud.google.com/spanner/docs/functions-and-operators#timestamp_trunc
tzname = tzname if settings.USE_TZ and tzname else "UTC"
if lookup_type == "week":
# Spanner truncates to Sunday but Django expects Monday. First,
# subtract a day so that a Sunday will be truncated to the previous
# week...
field_name = "TIMESTAMP_SUB(" + field_name + ", INTERVAL 1 DAY)"
sql = 'TIMESTAMP_TRUNC(%s, %s, "%s")' % (
field_name,
lookup_type,
tzname,
)
if lookup_type == "week":
# ...then add a day to get from Sunday to Monday.
sql = "TIMESTAMP_ADD(" + sql + ", INTERVAL 1 DAY)"
return sql
def time_trunc_sql(self, lookup_type, field_name, tzname="UTC"):
"""Truncate time in the lookup.
:type lookup_type: str
:param lookup_type: A type of the lookup.
:type field_name: str
:param field_name: The name of the field.
:type tzname: str
:param tzname: The name of the timezone. Defaults to 'UTC' For backward compatability.
:rtype: str
:returns: A SQL statement for truncating.
"""
# https://cloud.google.com/spanner/docs/functions-and-operators#timestamp_trunc
tzname = tzname if settings.USE_TZ and tzname else "UTC"
return 'TIMESTAMP_TRUNC(%s, %s, "%s")' % (
field_name,
lookup_type,
tzname,
)
def datetime_cast_date_sql(self, field_name, tzname):
"""Cast date in the lookup.
:type field_name: str
:param field_name: The name of the field.
:type tzname: str
:param tzname: The time zone name. If using of time zone is not
allowed in settings default will be UTC.
:rtype: str
:returns: A SQL statement for casting.
"""
# https://cloud.google.com/spanner/docs/functions-and-operators#date
tzname = tzname if settings.USE_TZ and tzname else "UTC"
return 'DATE(%s, "%s")' % (field_name, tzname)
def datetime_cast_time_sql(self, field_name, tzname):
"""Cast time in the lookup.
:type field_name: str
:param field_name: The name of the field.
:type tzname: str
:param tzname: The time zone name. If using of time zone is not
allowed in settings default will be UTC.
:rtype: str
:returns: A SQL statement for casting.
"""
tzname = tzname if settings.USE_TZ and tzname else "UTC"
# Cloud Spanner doesn't have a function for converting
# TIMESTAMP to another time zone.
return (
"TIMESTAMP(FORMAT_TIMESTAMP("
"'%%Y-%%m-%%d %%R:%%E9S %%Z', %s, '%s'))" % (field_name, tzname)
)
def date_interval_sql(self, timedelta):
"""Get a date interval in microseconds.
:type timedelta: datetime
:param timedelta: A time delta for the interval.
:rtype: str
:returns: A SQL statement.
"""
return "INTERVAL %s MICROSECOND" % duration_microseconds(timedelta)
def format_for_duration_arithmetic(self, sql):
"""Do nothing since formatting is handled in the custom function.
:type sql: str
:param sql: A SQL statement.
:rtype: str
:return: A SQL statement.
"""
return "INTERVAL %s MICROSECOND" % sql
def combine_expression(self, connector, sub_expressions):
"""Recurrently combine expressions into single one using connector.
:type connector: str
:param connector: A type of connector operation.
:type sub_expressions: list
:param sub_expressions: A list of expressions to combine.
:rtype: str
:return: A SQL statement for combining.
"""
if connector == "%%":
return "MOD(%s)" % ", ".join(sub_expressions)
elif connector == "^":
return "POWER(%s)" % ", ".join(sub_expressions)
elif connector == "#":
# Connector '#' represents Bit Xor in django.
# Spanner represents the same fuction with '^' symbol.
return super().combine_expression("^", sub_expressions)
elif connector == ">>":
lhs, rhs = sub_expressions
# Use an alternate computation because Cloud Sapnner's '>>'
# operator does not do sign bit extension with a signed type (i.e.
# produces different results for negative numbers than what
# Django's tests expect). Cast float result as INT64 to allow
# assigning to both INT64 and FLOAT64 columns (otherwise the FLOAT
# result couldn't be assigned to INT64 columns).
return "CAST(FLOOR(%(lhs)s / POW(2, %(rhs)s)) AS INT64)" % {
"lhs": lhs,
"rhs": rhs,
}
return super().combine_expression(connector, sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
"""Combine duration expressions into single one using connector.
:type connector: str
:param connector: A type of connector operation.
:type sub_expressions: list
:param sub_expressions: A list of expressions to combine.
:raises: :class:`~django.db.utils.DatabaseError`
:rtype: str
:return: A SQL statement for combining.
"""
if connector == "+":
return "TIMESTAMP_ADD(" + ", ".join(sub_expressions) + ")"
elif connector == "-":
return "TIMESTAMP_SUB(" + ", ".join(sub_expressions) + ")"
else:
raise DatabaseError(
"Invalid connector for timedelta: %s." % connector
)
def lookup_cast(self, lookup_type, internal_type=None):
"""
Cast text lookups to string to allow things like filter(x__contains=4).
:type lookup_type: str
:param lookup_type: A type of the lookup.
:type internal_type: str
:param internal_type: (Optional)
:rtype: str
:return: A SQL statement.
"""
# Cast text lookups to string to allow things like
# filter(x__contains=4)
if lookup_type in (
"contains",
"icontains",
"startswith",
"istartswith",
"endswith",
"iendswith",
"regex",
"iregex",
"iexact",
):
return "CAST(%s AS STRING)"
return "%s"
def prep_for_like_query(self, x):
"""Lookups that use this method use REGEXP_CONTAINS instead of LIKE.
:type x: str
:param x: A query to prepare.
:rtype: str
:returns: A prepared query.
"""
return re.escape(str(x))
prep_for_iexact_query = prep_for_like_query
def no_limit_value(self):
"""The largest INT64: (2**63) - 1
:rtype: int
:returns: The largest INT64.
"""
return 9223372036854775807
def _get_limit_offset_params(self, low_mark, high_mark):
limit, offset = super()._get_limit_offset_params(low_mark, high_mark)
if offset and limit == self.connection.ops.no_limit_value():
# Subtract offset from the limit to avoid an INT64 overflow error
# from Cloud Spanner.
limit -= offset
return limit, offset
| |
from __future__ import absolute_import
from datetime import datetime
from django.db import models
from krankshaft.api import API as APIBase
from krankshaft.auth import Auth as AuthBase
from krankshaft.authn import Authn
from krankshaft.authz import Authz as AuthzBase
from krankshaft.resource import DjangoModelResource
from tests.base import TestCaseNoDB
import base64
import json
import os
import pytest
import shutil
import tempfile
import unittest
try:
from PIL import Image
except ImportError:
Image = None
IMAGE_JPG = base64.decodestring(''.join('''
/9j/4AAQSkZJRgABAQEAAQABAAD/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkI
CQkKDA8MCgsOCwkJDRENDg8QEBEQCgwSExIQEw8QEBD/2wBDAQMDAwQDBAgEBAgQCwkLEBAQEBAQ
EBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBD/wAARCAABAAEDASIA
AhEBAxEB/8QAFQABAQAAAAAAAAAAAAAAAAAAAAn/xAAUEAEAAAAAAAAAAAAAAAAAAAAA/8QAFAEB
AAAAAAAAAAAAAAAAAAAAAP/EABQRAQAAAAAAAAAAAAAAAAAAAAD/2gAMAwEAAhEDEQA/AKOgA//Z
'''.splitlines()))
IMAGE_JPG_INVALID = base64.decodestring(''.join('''
/9j/4AAQSkZJRgABAQEAAQABAAD/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkI
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
EBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBD/wAARCAABAAEDASIA
AhEBAxEB/8QAFQABAQAAAAAAAAAAAAAAAAAAAAn/xAAUEAEAAAAAAAAAAAAAAAAAAAAA/8QAFAEB
AAAAAAAAAAAAAAAAAAAAAP/EABQRAQAAAAAAAAAAAAAAAAAAAAD/2gAMAwEAAhEDEQA/AKOgA//Z
'''.splitlines()))
class Authz(AuthzBase):
def is_authorized_object(self, request, authned, obj):
return obj.__class__ is not ModelUnauthorized
class Auth(AuthBase):
authn = Authn()
authz = Authz(require_authned=False)
class API(APIBase):
Auth = Auth
def handler500(self, request, exc_info, error=None):
raise
class ModelForeign(models.Model):
char_indexed = models.CharField(max_length=20, db_index=True)
created = models.DateTimeField(default=datetime.now)
class ModelForeign2(models.Model):
char_indexed = models.CharField(max_length=20, db_index=True)
class ModelForeign3(models.Model):
char_indexed = models.CharField(max_length=20, db_index=True)
class ModelForeignNoResource(models.Model):
name = models.CharField(max_length=20, db_index=True)
class ModelForeignNoResourceForeign(models.Model):
foreign = models.ForeignKey(ModelForeignNoResource)
class ModelHasForeign3(models.Model):
foreign = models.ForeignKey(ModelForeign3)
class ModelMany(models.Model):
char_indexed = models.CharField(max_length=20, db_index=True)
class ModelName(models.Model):
name = models.CharField(max_length=20, db_index=True)
class ModelOther(models.Model):
seconds = models.PositiveIntegerField(default=0)
class ModelUnauthorized(models.Model):
name = models.CharField(max_length=20, db_index=True)
class Model(models.Model):
char_indexed = models.CharField(max_length=20, db_index=True)
foreign = models.ForeignKey(ModelForeign, null=True)
manytomany = models.ManyToManyField(ModelMany)
class ModelAllowed(models.Model):
name = models.CharField(max_length=20)
class ModelVersioned(models.Model):
name = models.CharField(max_length=20)
version = models.PositiveIntegerField(default=0)
def save(self, *args, **kwargs):
self.version += 1
return super(ModelVersioned, self).save(*args, **kwargs)
class ModelFiles(models.Model):
file = models.FileField(max_length=300, upload_to='files/')
image = models.ImageField(max_length=300, upload_to='images/')
@pytest.mark.django_db
class ResourceTest(TestCaseNoDB):
def _pre_setup(self):
api = self.api = API('v1', debug=True)
api2 = self.api2 = API('v2', debug=True)
@api
class NameResource(DjangoModelResource):
model = ModelName
@api
class ModelForeignResource(DjangoModelResource):
model = ModelForeign
excludes = ('created',)
@api2
class ModelForeign2Resource(DjangoModelResource):
model = ModelForeign2
use_location = True
@api
class ModelForeign3Resource(DjangoModelResource):
model = ModelForeign3
@api
class ModelForeignNoResourceForeignResource(DjangoModelResource):
model = ModelForeignNoResourceForeign
name = 'modelforeignnoresourceforeign'
@api
class ModelHasForeign3Resource(DjangoModelResource):
model = ModelHasForeign3
def serialize_foreign(self, instance, field):
resource = self.related_lookup(field)
return resource.serialize(getattr(instance, field.name))
@api
class ModelManyResource(DjangoModelResource):
model = ModelMany
@api
class ModelOtherResource(DjangoModelResource):
model = ModelOther
def deserialize_seconds(self, instance, field, data):
return data['seconds'] / 10
def serialize_seconds(self, instance, field):
return instance.seconds * 10
@api
class ModelUnauthorizedResource(DjangoModelResource):
model = ModelUnauthorized
@api
class ModelResource(DjangoModelResource):
model = Model
@api
class ModelAllowedResource(DjangoModelResource):
model = ModelAllowed
allowed_methods = ('get',)
@api
class ModelAllowed2Resource(DjangoModelResource):
model = ModelAllowed
allowed_methods_endpoint = {
'list': ('get',),
'single': ('get',),
'set': ('get',),
}
@api
class ModelVersionedResource(DjangoModelResource):
model = ModelVersioned
version_field = 'version'
@api
class ModelFilesResource(DjangoModelResource):
model = ModelFiles
# only to satisfy coverage (calling load() twice)
for resource in self.api.registered_resources:
resource.load()
super(ResourceTest, self)._pre_setup()
def tearDown(self):
for model in [
resource.model
for resource in self.api.registered_resources
] + [
resource.model
for resource in self.api2.registered_resources
] + [
ModelForeignNoResource,
]:
model.objects.all().delete()
def test_allowed(self):
ModelAllowed.objects.create(id=1, name='allowed')
ModelAllowed.objects.create(id=2, name='allowed2')
for code, method in (
(405, self.client.delete),
(200, self.client.get),
(405, self.client.post),
(405, self.client.put),
):
for endpoint, args in (
('list', ()),
('single', (1,)),
('set', ('1;2',)),
):
response = method(
self.api.reverse('modelallowed_' + endpoint, args=args)
)
assert response.status_code == code
response = method(
self.api.reverse('modelallowed2_' + endpoint, args=args)
)
assert response.status_code == code
def test_delete_list(self):
ModelForeign.objects.create(id=1, char_indexed='value')
ModelForeign.objects.create(id=2, char_indexed='value1')
ModelForeign.objects.create(id=3, char_indexed='value2')
ModelForeign.objects.create(id=4, char_indexed='other')
response = self.client.delete(
self.api.reverse('modelforeign_list')
+ '?char_indexed__startswith=value'
)
assert response.status_code == 204
assert response.content == ''
assert ModelForeign.objects.count() == 1
assert ModelForeign.objects.all()[0].id == 4
def test_delete_set(self):
ModelForeign.objects.create(id=1, char_indexed='value')
ModelForeign.objects.create(id=2, char_indexed='value1')
ModelForeign.objects.create(id=3, char_indexed='value2')
ModelForeign.objects.create(id=4, char_indexed='other')
response = self.client.delete(
self.api.reverse('modelforeign_set', args=('1;3',))
)
assert response.status_code == 204
assert response.content == ''
assert ModelForeign.objects.count() == 2
assert list(ModelForeign.objects.values_list('id', flat=True)) == [2, 4]
def test_delete_single(self):
ModelForeign.objects.create(id=1, char_indexed='value')
response = self.client.delete(
self.api.reverse('modelforeign_single', args=(1,))
)
assert response.status_code == 204
assert response.content == ''
def test_exclude_error(self):
class ModelResource(DjangoModelResource):
model = Model
exclude = 'anything'
self.assertRaises(ModelResource.Error, ModelResource)
def test_fetch_invalid_id(self):
resource, ids = self.api.resolve([
self.api.reverse('modelother_single', args=('invalid',)),
])
self.assertRaises(
self.api.ValueIssue,
resource.fetch,
*ids
)
def test_files_file(self):
tmpdir = tempfile.mkdtemp()
try:
with self.settings(MEDIA_ROOT=tmpdir), \
tempfile.NamedTemporaryFile() as tmp:
tmp.write('hello world\n')
tmp.seek(0)
response = self.client.post(
self.api.reverse('modelfiles_list'),
{'file': tmp}
)
assert response.status_code == 200
assert json.loads(response.content) == {
'file': 'files/' + os.path.basename(tmp.name),
'file_href': '/media/files/' + os.path.basename(tmp.name),
'id': 1,
'image': '',
'image_href': '',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelfiles/1/',
}
response = self.client.put(
self.api.reverse('modelfiles_single', args=(1,)),
{'file': 'wont change anything'}
)
assert response.status_code == 422
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
@unittest.skipIf(not Image, 'requires PIL/Pillow')
def test_files_image(self):
tmpdir = tempfile.mkdtemp()
try:
with self.settings(MEDIA_ROOT=tmpdir), \
tempfile.NamedTemporaryFile() as tmp:
tmp.write(IMAGE_JPG)
tmp.seek(0)
response = self.client.post(
self.api.reverse('modelfiles_list'),
{'image': tmp}
)
assert response.status_code == 200
assert json.loads(response.content) == {
'file': '',
'file_href': '',
'id': 2,
'image': 'images/' + os.path.basename(tmp.name),
'image_href': '/media/images/' + os.path.basename(tmp.name),
'_id': 2,
'_pk': 'id',
'_uri': '/api/v1/modelfiles/2/',
}
response = self.client.put(
self.api.reverse('modelfiles_single', args=(2,)),
{'image': 'wont change anything'}
)
assert response.status_code == 422
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
def test_get_list(self):
ModelForeign.objects.create(id=1, char_indexed='value')
ModelForeign.objects.create(id=2, char_indexed='value2')
ModelForeign.objects.create(id=3, char_indexed='value3')
ModelForeign.objects.create(id=4, char_indexed='other')
response = self.client.get(
self.api.reverse('modelforeign_list')
+ '?char_indexed__startswith=value&order_by=id'
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'meta': {
'limit': None,
'next': None,
'offset': 0,
'previous': None
},
'objects': [
{
'id': 1,
'char_indexed': 'value',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/1/'
},
{
'id': 2,
'char_indexed': 'value2',
'_id': 2,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/2/'
},
{
'id': 3,
'char_indexed': 'value3',
'_id': 3,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/3/'
},
]
}
def test_get_list_paginate(self):
ModelForeign.objects.create(id=1, char_indexed='value')
ModelForeign.objects.create(id=2, char_indexed='value2')
ModelForeign.objects.create(id=3, char_indexed='value3')
ModelForeign.objects.create(id=4, char_indexed='other')
response = self.client.get(
self.api.reverse('modelforeign_list')
+ '?char_indexed__startswith=value&order_by=id&limit=1'
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'meta': {
'limit': 1,
'next': '/api/v1/modelforeign/?order_by=id&char_indexed__startswith=value&limit=1&offset=1',
'offset': 0,
'previous': None
},
'objects': [
{
'id': 1,
'char_indexed': 'value',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/1/'
},
]
}
response = self.client.get(
'/api/v1/modelforeign/?order_by=id&char_indexed__startswith=value&limit=1&offset=1',
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'meta': {
'limit': 1,
'next': '/api/v1/modelforeign/?order_by=id&char_indexed__startswith=value&limit=1&offset=2',
'offset': 1,
'previous': '/api/v1/modelforeign/?order_by=id&char_indexed__startswith=value&limit=1&offset=0',
},
'objects': [
{
'id': 2,
'char_indexed': 'value2',
'_id': 2,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/2/'
},
]
}
def test_get_set(self):
ModelForeign.objects.create(id=1, char_indexed='value')
ModelForeign.objects.create(id=2, char_indexed='value2')
ModelForeign.objects.create(id=3, char_indexed='value3')
ModelForeign.objects.create(id=4, char_indexed='other')
response = self.client.get(
self.api.reverse('modelforeign_set', args=('1;3',))
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'objects': [
{
'id': 1,
'char_indexed': 'value',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/1/'
},
{
'id': 3,
'char_indexed': 'value3',
'_id': 3,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/3/'
},
]
}
def test_get_set_invalid_id(self):
response = self.client.get(
self.api.reverse('modelforeign_set', args=('invalid',))
)
assert response.status_code == 400
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'error': 'Invalid ID for model ModelForeign',
'invalid': [u"invalid literal for int() with base 10: 'invalid'"],
}
def test_get_set_missing(self):
ModelForeign.objects.create(id=1, char_indexed='value')
ModelForeign.objects.create(id=2, char_indexed='value2')
ModelForeign.objects.create(id=4, char_indexed='other')
response = self.client.get(
self.api.reverse('modelforeign_set', args=('1;3',))
)
assert response.status_code == 404
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'error': 'Missing some requested objects',
'missing': [3],
}
def test_get_set_trailing_separator(self):
response = self.client.get(
self.api.reverse('modelforeign_set', args=('1;2;',))
)
assert response.status_code == 400
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'error': 'Invalid ID for model ModelForeign',
'invalid': [u"int_or_none_range_1_to_2147483647_no_none_blank_not does not accept blank values"],
}
def test_get_single(self):
ModelForeign.objects.create(id=1, char_indexed='value')
response = self.client.get(
self.api.reverse('modelforeign_single', args=(1,))
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'char_indexed': 'value',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/1/'
}
def test_get_single_foreign_null(self):
Model.objects.create(id=1, foreign=None)
response = self.client.get(
self.api.reverse('model_single', args=(1,)),
)
assert response.status_code == 200
assert json.loads(response.content) == {
'id': 1,
'char_indexed': '',
'foreign': None,
'foreign_id': None,
'manytomany': [],
'manytomany_id': [],
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/model/1/',
}
def test_get_single_missing(self):
response = self.client.get(
self.api.reverse('modelforeign_single', args=(1,))
)
assert response.status_code == 404
assert response.content == ''
def test_get_single_no_foreign_resource(self):
ModelForeignNoResource.objects.create(id=1, name='foreign')
ModelForeignNoResourceForeign.objects.create(id=1, foreign_id=1)
response = self.client.get(
self.api.reverse('modelforeignnoresourceforeign_single', args=(1,))
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'foreign': 1,
'foreign_id': 1,
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelforeignnoresourceforeign/1/'
}
def test_get_single_defer_char_indexed(self):
ModelForeign.objects.create(id=1, char_indexed='value')
response = self.client.get(
self.api.reverse('modelforeign_single', args=(1,))
+ '?defer=char_indexed'
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/1/'
}
def test_get_single_only_id(self):
ModelForeign.objects.create(id=1, char_indexed='value')
response = self.client.get(
self.api.reverse('modelforeign_single', args=(1,))
+ '?only=id'
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/1/'
}
def test_get_single_unauthorized(self):
ModelUnauthorized.objects.create(id=1, name='notauthzed')
response = self.client.get(
self.api.reverse('modelunauthorized_single', args=(1,))
)
assert response.status_code == 401
assert response.content == ''
def test_hook_serialize_field(self):
ModelOther.objects.create(id=1, seconds=10)
response = self.client.get(
self.api.reverse('modelother_single', args=(1,))
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'seconds': 100,
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelother/1/'
}
response = self.client.put(
self.api.reverse('modelother_single', args=(1,)),
json.dumps({'seconds': 10}),
content_type='application/json'
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'seconds': 10,
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelother/1/'
}
other = ModelOther.objects.get(id=1)
assert other.seconds == 1
def test_many_to_many_update(self):
foreign = ModelForeign.objects.create(id=1, char_indexed='value')
many = ModelMany.objects.create(id=1, char_indexed='value')
many2 = ModelMany.objects.create(id=2, char_indexed='value2')
instance = Model.objects.create(id=1, char_indexed='value', foreign=foreign)
instance.manytomany.add(many)
response = self.client.get(self.api.reverse('model_single', args=(1,)))
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'char_indexed': 'value',
'foreign': '/api/v1/modelforeign/1/',
'foreign_id': 1,
'manytomany': ['/api/v1/modelmany/1/'],
'manytomany_id': [1],
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/model/1/'
}
response = self.client.put(
self.api.reverse('model_single', args=(1,)),
json.dumps({'manytomany': [
'/api/v1/modelmany/2/',
]}),
content_type='application/json'
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'char_indexed': 'value',
'foreign': '/api/v1/modelforeign/1/',
'foreign_id': 1,
'manytomany': ['/api/v1/modelmany/2/'],
'manytomany_id': [2],
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/model/1/'
}
def test_post_list(self):
response = self.client.post(
self.api.reverse('modelforeign_list'),
json.dumps({'id': 1, 'char_indexed': 'posted'}),
content_type='application/json'
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'char_indexed': 'posted',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/1/'
}
instance = ModelForeign.objects.get(id=1)
assert instance.char_indexed == 'posted'
assert instance.created is not None
def test_post_list_location(self):
response = self.client.post(
self.api2.reverse('modelforeign2_list'),
json.dumps({'id': 1, 'char_indexed': 'posted'}),
content_type='application/json'
)
assert response.status_code == 204
assert response.content == ''
assert response['Location'].endswith(self.api2.reverse('modelforeign2_single', args=(1,)))
response = self.client.get(response['Location'])
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'char_indexed': 'posted',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v2/modelforeign2/1/'
}
instance = ModelForeign2.objects.get(id=1)
assert instance.char_indexed == 'posted'
def test_put_list(self):
ModelForeign.objects.create(id=1, char_indexed='value')
ModelForeign.objects.create(id=2, char_indexed='value2')
ModelForeign.objects.create(id=3, char_indexed='value3')
ModelForeign.objects.create(id=4, char_indexed='other')
response = self.client.put(
self.api.reverse('modelforeign_list')
+ '?char_indexed__startswith=value&order_by=id',
json.dumps({'char_indexed': 'massupdate'}),
content_type='application/json'
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'meta': {
'limit': None,
'next': None,
'offset': 0,
'previous': None
},
'objects': [
{
'id': 1,
'char_indexed': 'massupdate',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/1/'
},
{
'id': 2,
'char_indexed': 'massupdate',
'_id': 2,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/2/'
},
{
'id': 3,
'char_indexed': 'massupdate',
'_id': 3,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/3/'
},
],
}
id1 = ModelForeign.objects.get(id=1)
id2 = ModelForeign.objects.get(id=2)
id3 = ModelForeign.objects.get(id=3)
id4 = ModelForeign.objects.get(id=4)
assert id1.char_indexed == 'massupdate'
assert id2.char_indexed == 'massupdate'
assert id3.char_indexed == 'massupdate'
assert id4.char_indexed == 'other'
def test_put_list_location(self):
ModelForeign2.objects.create(id=1, char_indexed='value')
ModelForeign2.objects.create(id=2, char_indexed='value2')
ModelForeign2.objects.create(id=3, char_indexed='value3')
ModelForeign2.objects.create(id=4, char_indexed='other')
response = self.client.put(
self.api2.reverse('modelforeign2_list')
+ '?char_indexed__startswith=value&order_by=id',
json.dumps({'char_indexed': 'massupdate'}),
content_type='application/json'
)
assert response.status_code == 204
assert response.content == ''
assert response['Location'].endswith(
self.api2.reverse('modelforeign2_set', args=('1;2;3', ))
)
response = self.client.get(response['Location'])
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'objects': [
{
'id': 1,
'char_indexed': 'massupdate',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v2/modelforeign2/1/'
},
{
'id': 2,
'char_indexed': 'massupdate',
'_id': 2,
'_pk': 'id',
'_uri': '/api/v2/modelforeign2/2/'
},
{
'id': 3,
'char_indexed': 'massupdate',
'_id': 3,
'_pk': 'id',
'_uri': '/api/v2/modelforeign2/3/'
},
],
}
id1 = ModelForeign2.objects.get(id=1)
id2 = ModelForeign2.objects.get(id=2)
id3 = ModelForeign2.objects.get(id=3)
id4 = ModelForeign2.objects.get(id=4)
assert id1.char_indexed == 'massupdate'
assert id2.char_indexed == 'massupdate'
assert id3.char_indexed == 'massupdate'
assert id4.char_indexed == 'other'
def test_put_set(self):
ModelForeign.objects.create(id=1, char_indexed='value')
ModelForeign.objects.create(id=2, char_indexed='value2')
ModelForeign.objects.create(id=3, char_indexed='value3')
ModelForeign.objects.create(id=4, char_indexed='other')
response = self.client.put(
self.api.reverse('modelforeign_set', args=('1;3',)),
json.dumps([
{'id': 1, 'char_indexed': 'setupdate1'},
{'id': 3, 'char_indexed': 'setupdate3'},
]),
content_type='application/json'
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'objects': [
{
'id': 1,
'char_indexed': 'setupdate1',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/1/'
},
{
'id': 3,
'char_indexed': 'setupdate3',
'_id': 3,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/3/'
},
],
}
id1 = ModelForeign.objects.get(id=1)
id3 = ModelForeign.objects.get(id=3)
assert id1.char_indexed == 'setupdate1'
assert id3.char_indexed == 'setupdate3'
def test_put_set_invalid_format(self):
ModelForeign.objects.create(id=1, char_indexed='value')
ModelForeign.objects.create(id=3, char_indexed='value3')
response = self.client.put(
self.api.reverse('modelforeign_set', args=('1;3',)),
json.dumps({}),
content_type='application/json'
)
assert response.status_code == 422
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'error': 'Supplied data was invalid',
'invalid': ["Unexpected type, expected <type 'list'>: <type 'dict'>"],
}
def test_put_set_location(self):
ModelForeign2.objects.create(id=1, char_indexed='value')
ModelForeign2.objects.create(id=2, char_indexed='value2')
ModelForeign2.objects.create(id=3, char_indexed='value3')
ModelForeign2.objects.create(id=4, char_indexed='other')
response = self.client.put(
self.api2.reverse('modelforeign2_set', args=('1;3',)),
json.dumps([
{'id': 1, 'char_indexed': 'setupdate1'},
{'id': 3, 'char_indexed': 'setupdate3'},
]),
content_type='application/json'
)
assert response.status_code == 204
assert response.content == ''
assert response['Location'].endswith(
self.api2.reverse('modelforeign2_set', args=('1;3', ))
)
response = self.client.get(response['Location'])
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'objects': [
{
'id': 1,
'char_indexed': 'setupdate1',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v2/modelforeign2/1/'
},
{
'id': 3,
'char_indexed': 'setupdate3',
'_id': 3,
'_pk': 'id',
'_uri': '/api/v2/modelforeign2/3/'
},
],
}
id1 = ModelForeign2.objects.get(id=1)
id3 = ModelForeign2.objects.get(id=3)
assert id1.char_indexed == 'setupdate1'
assert id3.char_indexed == 'setupdate3'
def test_put_set_must_include_id_in_representation(self):
ModelForeign.objects.create(id=1, char_indexed='value')
ModelForeign.objects.create(id=2, char_indexed='value2')
ModelForeign.objects.create(id=3, char_indexed='value3')
ModelForeign.objects.create(id=4, char_indexed='other')
response = self.client.put(
self.api.reverse('modelforeign_set', args=('1;3',)),
json.dumps([
{'char_indexed': 'setupdate1'},
{'char_indexed': 'setupdate3'},
]),
content_type='application/json'
)
assert response.status_code == 400
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'error': 'You must supply the primary key with each object',
}
def test_put_single(self):
ModelForeign.objects.create(id=1, char_indexed='value')
response = self.client.put(
self.api.reverse('modelforeign_single', args=(1,)),
json.dumps({'char_indexed': 'updated'}),
content_type='application/json'
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'char_indexed': 'updated',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/1/'
}
instance = ModelForeign.objects.get(id=1)
assert instance.char_indexed == 'updated'
assert instance.created is not None
def test_put_single_location(self):
ModelForeign2.objects.create(id=1, char_indexed='value')
response = self.client.put(
self.api2.reverse('modelforeign2_single', args=(1,)),
json.dumps({'char_indexed': 'updated'}),
content_type='application/json'
)
assert response.status_code == 204
assert response.content == ''
assert response['Location'].endswith(self.api2.reverse('modelforeign2_single', args=(1,)))
response = self.client.get(response['Location'])
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'char_indexed': 'updated',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v2/modelforeign2/1/'
}
instance = ModelForeign2.objects.get(id=1)
assert instance.char_indexed == 'updated'
def test_put_single_invalid_manytomany(self):
ModelForeign.objects.create(id=1)
Model.objects.create(id=1, foreign_id=1)
response = self.client.put(
self.api.reverse('model_single', args=(1,)),
json.dumps({
'manytomany': [
'/api/v1/modelforeign/invalid/',
]
}),
content_type='application/json'
)
assert response.status_code == 422
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'error': 'Supplied data was invalid',
'invalid': {
'manytomany': ["Unable to resolve related: [u'invalid'], Unexpected resource found: modelforeign (expected modelmany)"]
},
}
def test_query_issues(self):
ModelForeign.objects.create(id=1, char_indexed='value')
ModelForeign.objects.create(id=2, char_indexed='value1')
ModelForeign.objects.create(id=3, char_indexed='value2')
ModelForeign.objects.create(id=4, char_indexed='other')
response = self.client.get(
self.api.reverse('modelforeign_list')
+ '?order_by=created'
)
assert response.status_code == 403
assert json.loads(response.content) == {
'error': 'There are issues with your query',
'invalid': [
'You are required to use an indexed field in the order_by',
],
}
def test_query_issues_invalid_field(self):
response = self.client.get(
self.api.reverse('modelforeign_list')
+ '?notavalidfield=1'
)
assert response.status_code == 403
assert json.loads(response.content) == {
'error': 'There are issues with your query',
'invalid': [
"ModelForeign has no field named 'notavalidfield'",
],
}
def test_resource_deserialize(self):
response = self.client.post(
self.api.reverse('modelmany_list'),
json.dumps({'char_indexed': 'value'}),
content_type='application/json'
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'char_indexed': 'value',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelmany/1/',
}
instance_many = ModelMany.objects.get(id=1)
assert instance_many.id == 1
assert instance_many.char_indexed == 'value'
response = self.client.post(
self.api.reverse('modelforeign_list'),
json.dumps({'char_indexed': 'value'}),
content_type='application/json'
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'char_indexed': 'value',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelforeign/1/',
}
instance_foreign = ModelForeign.objects.get(id=1)
assert instance_foreign.id == 1
assert instance_foreign.char_indexed == 'value'
response = self.client.post(
self.api.reverse('model_list'),
json.dumps({
'char_indexed': 'value',
'foreign': self.api.reverse('modelforeign_single', args=(1,)),
'manytomany': [
self.api.reverse('modelmany_single', args=(1,)),
]
}),
content_type='application/json'
)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'char_indexed': 'value',
'foreign': '/api/v1/modelforeign/1/',
'foreign_id': 1,
'manytomany': [
'/api/v1/modelmany/1/',
],
'manytomany_id': [1],
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/model/1/',
}
def test_resource_deserialize_invalid_uri(self):
ModelForeign.objects.create(id=1, char_indexed='value')
Model.objects.create(id=1, char_indexed='value', foreign_id=1)
response = self.client.put(
self.api.reverse('model_single', args=(1,)),
json.dumps({
'foreign': '/api/v1/model/1/',
}),
content_type='application/json'
)
assert response.status_code == 422
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'error': 'Supplied data was invalid',
'invalid': {
'foreign': ["Unable to resolve related: [u'1'], Unexpected resource found: model (expected modelforeign)"]
}
}
def test_resource_resolve(self):
resource, ids = self.api.resolve([
self.api.reverse('modelother_single', args=(1,)),
])
self.assertRaises(
ModelOther.DoesNotExist,
resource.fetch,
*ids
)
def test_resource_serialize(self):
instance_foreign = ModelForeign.objects.create(id=1, char_indexed='value')
instance = Model.objects.create(id=1, char_indexed='value', foreign=instance_foreign)
instance.manytomany.add(*[
ModelMany.objects.create(id=1, char_indexed='value'),
ModelMany.objects.create(id=2, char_indexed='value2'),
ModelMany.objects.create(id=3, char_indexed='value3'),
])
response = self.client.get(self.api.reverse('model_single', args=(1,)))
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
assert json.loads(response.content) == {
'id': 1,
'char_indexed': 'value',
'foreign': '/api/v1/modelforeign/1/',
'foreign_id': 1,
'manytomany': [
'/api/v1/modelmany/1/',
'/api/v1/modelmany/2/',
'/api/v1/modelmany/3/',
],
'manytomany_id': [1,2,3],
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/model/1/',
}
def test_serialize_foreign3(self):
ModelForeign3.objects.create(id=1, char_indexed='value')
ModelHasForeign3.objects.create(id=1, foreign_id=1)
response = self.client.get(
self.api.reverse('modelhasforeign3_single', args=(1,))
)
assert json.loads(response.content) == {
'id': 1,
'foreign': {
'id': 1,
'char_indexed': 'value',
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelforeign3/1/',
},
'_id': 1,
'_pk': 'id',
'_uri': '/api/v1/modelhasforeign3/1/',
}
def test_version_field(self):
ModelVersioned.objects.create(id=1, name='initial')
_uri = self.api.reverse('modelversioned_single', args=(1,))
response = self.client.put(
_uri,
json.dumps({
'name': 'first',
'version': 1,
}),
content_type='application/json'
)
assert response.status_code == 200
assert json.loads(response.content) == {
'id': 1,
'name': 'first',
'version': 2,
'_id': 1,
'_pk': 'id',
'_uri': _uri,
}
response = self.client.put(
_uri,
json.dumps({
'name': 'invalid',
'version': 1,
}),
content_type='application/json'
)
assert response.status_code == 409
assert response.content == ''
response = self.client.put(
_uri,
json.dumps({
'name': 'second',
'version': 2,
}),
content_type='application/json'
)
assert response.status_code == 200
assert json.loads(response.content) == {
'id': 1,
'name': 'second',
'version': 3,
'_id': 1,
'_pk': 'id',
'_uri': _uri,
}
def test_version_field_missing_version_field(self):
ModelVersioned.objects.create(id=1, name='initial')
_uri = self.api.reverse('modelversioned_single', args=(1,))
response = self.client.put(
_uri,
json.dumps({
'name': 'first',
}),
content_type='application/json'
)
assert response.status_code == 400
assert json.loads(response.content) == {
'error': 'The "version" field must be specified',
}
def test_version_field_put_set(self):
ModelVersioned.objects.create(id=1, name='initial')
ModelVersioned.objects.create(id=2, name='initial')
response = self.client.put(
self.api.reverse('modelversioned_set', args=('1;2',)),
json.dumps([
{
'id': 1,
'name': 'invalid',
},
{
'id': 2,
'name': 'invalid',
},
]),
content_type='application/json'
)
assert response.status_code == 400
assert json.loads(response.content) == {
'invalid': {
'1': {'error': 'The "version" field must be specified'},
'2': {'error': 'The "version" field must be specified'},
}
}
response = self.client.put(
self.api.reverse('modelversioned_set', args=('1;2',)),
json.dumps([
{
'id': 1,
'name': 'invalid',
'version': 2,
},
{
'id': 2,
'name': 'invalid',
'version': 2,
},
]),
content_type='application/json'
)
assert response.status_code == 409
assert response.content == ''
response = self.client.put(
self.api.reverse('modelversioned_set', args=('1;2',)),
json.dumps([
{
'id': 1,
'name': 'invalid',
},
{
'id': 2,
'name': 'invalid',
'version': 2,
},
]),
content_type='application/json'
)
assert response.status_code == 400
assert json.loads(response.content) == {
'error': 'Mixed error codes',
'invalid': {
'1': {
'code': 400,
'error': 'The "version" field must be specified'
},
'2': {'code': 409},
}
}
@property
def urls(self):
from django.conf.urls import include, url
return self.make_urlconf(
url('^api/', include(self.api.urls)),
url('^api/', include(self.api2.urls)),
)
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the Swift backend store"""
import hashlib
import httplib
import mock
import StringIO
import tempfile
import urllib
import uuid
from oslo.config import cfg
import swiftclient
from glance.store import exceptions
from glance.store.location import get_location_from_uri
from glance.store._drivers import swift
from glance.tests.unit import base
CONF = cfg.CONF
FAKE_UUID = lambda: str(uuid.uuid4())
Store = glance.store.swift.Store
FIVE_KB = 5 * 1024
FIVE_GB = 5 * 1024 * 3
MAX_SWIFT_OBJECT_SIZE = FIVE_GB
SWIFT_PUT_OBJECT_CALLS = 0
SWIFT_CONF = {'verbose': True,
'debug': True,
'known_stores': ['glance.store.swift.Store'],
'default_store': 'swift',
'swift_store_user': 'user',
'swift_store_key': 'key',
'swift_store_auth_address': 'localhost:8080',
'swift_store_container': 'glance'}
# We stub out as little as possible to ensure that the code paths
# between glance.store.swift and swiftclient are tested
# thoroughly
def stub_out_swiftclient(test, swift_store_auth_version):
fixture_containers = ['glance']
fixture_container_headers = {}
fixture_headers = {
'glance/%s' % FAKE_UUID: {
'content-length': FIVE_KB,
'etag': 'c2e5db72bd7fd153f53ede5da5a06de3'
}
}
fixture_objects = {'glance/%s' % FAKE_UUID:
StringIO.StringIO("*" * FIVE_KB)}
def fake_head_container(url, token, container, **kwargs):
if container not in fixture_containers:
msg = "No container %s found" % container
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
return fixture_container_headers
def fake_put_container(url, token, container, **kwargs):
fixture_containers.append(container)
def fake_post_container(url, token, container, headers, http_conn=None):
for key, value in headers.iteritems():
fixture_container_headers[key] = value
def fake_put_object(url, token, container, name, contents, **kwargs):
# PUT returns the ETag header for the newly-added object
# Large object manifest...
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS += 1
CHUNKSIZE = 64 * units.Ki
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
if kwargs.get('headers'):
etag = kwargs['headers']['ETag']
fixture_headers[fixture_key] = {'manifest': True,
'etag': etag}
return etag
if hasattr(contents, 'read'):
fixture_object = StringIO.StringIO()
chunk = contents.read(CHUNKSIZE)
checksum = hashlib.md5()
while chunk:
fixture_object.write(chunk)
checksum.update(chunk)
chunk = contents.read(CHUNKSIZE)
etag = checksum.hexdigest()
else:
fixture_object = StringIO.StringIO(contents)
etag = hashlib.md5(fixture_object.getvalue()).hexdigest()
read_len = fixture_object.len
if read_len > MAX_SWIFT_OBJECT_SIZE:
msg = ('Image size:%d exceeds Swift max:%d' %
(read_len, MAX_SWIFT_OBJECT_SIZE))
raise swiftclient.ClientException(
msg, http_status=httplib.REQUEST_ENTITY_TOO_LARGE)
fixture_objects[fixture_key] = fixture_object
fixture_headers[fixture_key] = {
'content-length': read_len,
'etag': etag}
return etag
else:
msg = ("Object PUT failed - Object with key %s already exists"
% fixture_key)
raise swiftclient.ClientException(msg,
http_status=httplib.CONFLICT)
def fake_get_object(url, token, container, name, **kwargs):
# GET returns the tuple (list of headers, file object)
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
msg = "Object GET failed"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
fixture = fixture_headers[fixture_key]
if 'manifest' in fixture:
# Large object manifest... we return a file containing
# all objects with prefix of this fixture key
chunk_keys = sorted([k for k in fixture_headers.keys()
if k.startswith(fixture_key) and
k != fixture_key])
result = StringIO.StringIO()
for key in chunk_keys:
result.write(fixture_objects[key].getvalue())
return fixture_headers[fixture_key], result
else:
return fixture_headers[fixture_key], fixture_objects[fixture_key]
def fake_head_object(url, token, container, name, **kwargs):
# HEAD returns the list of headers for an object
try:
fixture_key = "%s/%s" % (container, name)
return fixture_headers[fixture_key]
except KeyError:
msg = "Object HEAD failed - Object does not exist"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
def fake_delete_object(url, token, container, name, **kwargs):
# DELETE returns nothing
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
msg = "Object DELETE failed - Object does not exist"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
else:
del fixture_headers[fixture_key]
del fixture_objects[fixture_key]
def fake_http_connection(*args, **kwargs):
return None
def fake_get_auth(url, user, key, snet, auth_version, **kwargs):
if url is None:
return None, None
if 'http' in url and '://' not in url:
raise ValueError('Invalid url %s' % url)
# Check the auth version against the configured value
if swift_store_auth_version != auth_version:
msg = 'AUTHENTICATION failed (version mismatch)'
raise swiftclient.ClientException(msg)
return None, None
to_mock = [('head_container', fake_head_container),
('put_container', fake_put_container),
('post_container', fake_post_container),
('put_object', fake_put_object),
('delete_object', fake_delete_object),
('head_object', fake_head_object),
('get_object', fake_get_object),
('get_auth', fake_get_auth),
('http_connection', fake_http_connection)]
for (meth, fake_meth) in to_mock:
mocked = mock.patch.object(swiftclient.client, meth).start()
mocked.side_effect = fake_meth
test.add_cleanUp(mocked.stop)
class SwiftTests(object):
@property
def swift_store_user(self):
return urllib.quote(CONF.swift_store_user)
def test_get_size(self):
"""
Test that we can get the size of an object in the swift store
"""
uri = "swift://%s:key@auth_address/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
image_size = self.store.get_size(loc)
self.assertEqual(image_size, 5120)
def test_get_size_with_multi_tenant_on(self):
"""Test that single tenant uris work with multi tenant on."""
uri = ("swift://%s:key@auth_address/glance/%s" %
(self.swift_store_user, FAKE_UUID))
self.config(swift_store_multi_tenant=True)
#NOTE(markwash): ensure the image is found
context = glance.context.RequestContext()
size = glance.store.get_size_from_backend(context, uri)
self.assertEqual(size, 5120)
def test_get(self):
"""Test a "normal" retrieval of an image in chunks"""
uri = "swift://%s:key@auth_address/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
(image_swift, image_size) = self.store.get(loc)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_with_http_auth(self):
"""
Test a retrieval from Swift with an HTTP authurl. This is
specified either via a Location header with swift+http:// or using
http:// in the swift_store_auth_address config value
"""
loc = get_location_from_uri("swift+http://%s:key@auth_address/"
"glance/%s" %
(self.swift_store_user, FAKE_UUID))
(image_swift, image_size) = self.store.get(loc)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_non_existing(self):
"""
Test that trying to retrieve a swift that doesn't exist
raises an error
"""
loc = get_location_from_uri("swift://%s:key@authurl/glance/noexist" % (
self.swift_store_user))
self.assertRaises(exceptions.NotFound,
self.store.get,
loc)
def test_add(self):
"""Test that we can add an image via the swift backend"""
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+https://%s:key@localhost:8080/glance/%s'
expected_location = loc % (self.swift_store_user,
expected_image_id)
image_swift = StringIO.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
# Expecting a single object to be created on Swift i.e. no chunking.
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = new_image_swift.getvalue()
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_auth_url_variations(self):
"""
Test that we can add an image via the swift backend with
a variety of different auth_address values
"""
variations = {
'http://localhost:80': 'swift+http://%s:key@localhost:80'
'/glance/%s',
'http://localhost': 'swift+http://%s:key@localhost/glance/%s',
'http://localhost/v1': 'swift+http://%s:key@localhost'
'/v1/glance/%s',
'http://localhost/v1/': 'swift+http://%s:key@localhost'
'/v1/glance/%s',
'https://localhost': 'swift+https://%s:key@localhost/glance/%s',
'https://localhost:8080': 'swift+https://%s:key@localhost:8080'
'/glance/%s',
'https://localhost/v1': 'swift+https://%s:key@localhost'
'/v1/glance/%s',
'https://localhost/v1/': 'swift+https://%s:key@localhost'
'/v1/glance/%s',
'localhost': 'swift+https://%s:key@localhost/glance/%s',
'localhost:8080/v1': 'swift+https://%s:key@localhost:8080'
'/v1/glance/%s',
}
for variation, expected_location in variations.items():
image_id = str(uuid.uuid4())
expected_location = expected_location % (
self.swift_store_user, image_id)
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = \
hashlib.md5(expected_swift_contents).hexdigest()
image_swift = StringIO.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
self.config(swift_store_auth_address=variation)
self.store = Store()
location, size, checksum, _ = self.store.add(image_id, image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = new_image_swift.getvalue()
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_no_container_no_create(self):
"""
Tests that adding an image with a non-existing container
raises an appropriate exception
"""
self.config(swift_store_create_container_on_put=False,
swift_store_container='noexist')
self.store = Store()
image_swift = StringIO.StringIO("nevergonnamakeit")
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
# We check the exception text to ensure the container
# missing text is found in it, otherwise, we would have
# simply used self.assertRaises here
exception_caught = False
try:
self.store.add(str(uuid.uuid4()), image_swift, 0)
except backend.BackendException as e:
exception_caught = True
self.assertTrue("container noexist does not exist "
"in Swift" in str(e))
self.assertTrue(exception_caught)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 0)
def test_add_no_container_and_create(self):
"""
Tests that adding an image with a non-existing container
creates the container automatically if flag is set
"""
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+https://%s:key@localhost:8080/noexist/%s'
expected_location = loc % (self.swift_store_user,
expected_image_id)
image_swift = StringIO.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
self.config(swift_store_create_container_on_put=True,
swift_store_container='noexist')
self.store = Store()
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = new_image_swift.getvalue()
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_large_object(self):
"""
Tests that adding a very large image. We simulate the large
object by setting store.large_object_size to a small number
and then verify that there have been a number of calls to
put_object()...
"""
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+https://%s:key@localhost:8080/glance/%s'
expected_location = loc % (self.swift_store_user,
expected_image_id)
image_swift = StringIO.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
self.config(swift_store_container='glance')
self.store = Store()
orig_max_size = self.store.large_object_size
orig_temp_size = self.store.large_object_chunk_size
try:
self.store.large_object_size = 1024
self.store.large_object_chunk_size = 1024
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
finally:
self.store.large_object_chunk_size = orig_temp_size
self.store.large_object_size = orig_max_size
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
# Expecting 6 objects to be created on Swift -- 5 chunks and 1
# manifest.
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 6)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = new_image_swift.getvalue()
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_large_object_zero_size(self):
"""
Tests that adding an image to Swift which has both an unknown size and
exceeds Swift's maximum limit of 5GB is correctly uploaded.
We avoid the overhead of creating a 5GB object for this test by
temporarily setting MAX_SWIFT_OBJECT_SIZE to 1KB, and then adding
an object of 5KB.
Bug lp:891738
"""
# Set up a 'large' image of 5KB
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+https://%s:key@localhost:8080/glance/%s'
expected_location = loc % (self.swift_store_user,
expected_image_id)
image_swift = StringIO.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
# Temporarily set Swift MAX_SWIFT_OBJECT_SIZE to 1KB and add our image,
# explicitly setting the image_length to 0
self.config(swift_store_container='glance')
self.store = Store()
orig_max_size = self.store.large_object_size
orig_temp_size = self.store.large_object_chunk_size
global MAX_SWIFT_OBJECT_SIZE
orig_max_swift_object_size = MAX_SWIFT_OBJECT_SIZE
try:
MAX_SWIFT_OBJECT_SIZE = 1024
self.store.large_object_size = 1024
self.store.large_object_chunk_size = 1024
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift, 0)
finally:
self.store.large_object_chunk_size = orig_temp_size
self.store.large_object_size = orig_max_size
MAX_SWIFT_OBJECT_SIZE = orig_max_swift_object_size
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
# Expecting 7 calls to put_object -- 5 chunks, a zero chunk which is
# then deleted, and the manifest. Note the difference with above
# where the image_size is specified in advance (there's no zero chunk
# in that case).
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 7)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = new_image_swift.getvalue()
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_already_existing(self):
"""
Tests that adding an image with an existing identifier
raises an appropriate exception
"""
image_swift = StringIO.StringIO("nevergonnamakeit")
self.assertRaises(exceptions.Duplicate,
self.store.add,
FAKE_UUID, image_swift, 0)
def test_add_saves_and_reraises_and_not_uses_wildcard_raise(self):
image_id = str(uuid.uuid4())
swift_size = self.store.large_object_size = 1024
loc = 'swift+https://%s:key@localhost:8080/glance/%s'
swift_contents = "*" * swift_size
connection = mock.Mock()
def fake_delete_chunk(connection,
container,
chunks):
try:
raise Exception()
except Exception:
pass
image_swift = StringIO.StringIO(swift_contents)
connection.put_object.side_effect = exceptions.ClientConnectionError
self.store._delete_stale_chunks = fake_delete_chunk
self.assertRaises(exceptions.ClientConnectionError,
self.store.add,
image_id,
image_swift,
swift_size,
connection)
def _option_required(self, key):
conf = self.getConfig()
conf[key] = None
try:
self.config(**conf)
self.store = Store()
return self.store.add == self.store.add_disabled
except Exception:
return False
return False
def test_no_user(self):
"""
Tests that options without user disables the add method
"""
self.assertTrue(self._option_required('swift_store_user'))
def test_no_key(self):
"""
Tests that options without key disables the add method
"""
self.assertTrue(self._option_required('swift_store_key'))
def test_no_auth_address(self):
"""
Tests that options without auth address disables the add method
"""
self.assertTrue(self._option_required('swift_store_auth_address'))
def test_delete(self):
"""
Test we can delete an existing image in the swift store
"""
uri = "swift://%s:key@authurl/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
self.store.delete(loc)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_delete_non_existing(self):
"""
Test that trying to delete a swift that doesn't exist
raises an error
"""
loc = get_location_from_uri("swift://%s:key@authurl/glance/noexist" % (
self.swift_store_user))
self.assertRaises(exceptions.NotFound, self.store.delete, loc)
def test_read_acl_public(self):
"""
Test that we can set a public read acl.
"""
self.config(swift_store_multi_tenant=True)
context = glance.context.RequestContext()
store = Store(context)
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
store.set_acls(loc, public=True)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Read'],
".r:*,.rlistings")
def test_read_acl_tenants(self):
"""
Test that we can set read acl for tenants.
"""
self.config(swift_store_multi_tenant=True)
context = glance.context.RequestContext()
store = Store(context)
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
read_tenants = ['matt', 'mark']
store.set_acls(loc, read_tenants=read_tenants)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Read'],
'matt:*,mark:*')
def test_write_acls(self):
"""
Test that we can set write acl for tenants.
"""
self.config(swift_store_multi_tenant=True)
context = glance.context.RequestContext()
store = Store(context)
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
read_tenants = ['frank', 'jim']
store.set_acls(loc, write_tenants=read_tenants)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Write'],
'frank:*,jim:*')
class TestStoreAuthV1(base.StoreClearingUnitTest, SwiftTests):
def getConfig(self):
conf = SWIFT_CONF.copy()
conf['swift_store_auth_version'] = '1'
conf['swift_store_user'] = 'user'
return conf
def setUp(self):
"""Establish a clean test environment"""
conf = self.getConfig()
self.config(**conf)
super(TestStoreAuthV1, self).setUp()
stub_out_swiftclient(self, conf['swift_store_auth_version'])
self.store = Store()
class TestStoreAuthV2(TestStoreAuthV1):
def getConfig(self):
conf = super(TestStoreAuthV2, self).getConfig()
conf['swift_store_user'] = 'tenant:user'
conf['swift_store_auth_version'] = '2'
return conf
def test_v2_with_no_tenant(self):
conf = self.getConfig()
conf['swift_store_user'] = 'failme'
uri = "swift://%s:key@auth_address/glance/%s" % (
conf['swift_store_user'], FAKE_UUID)
loc = get_location_from_uri(uri)
self.assertRaises(exceptions.BadStoreUri,
self.store.get,
loc)
def test_v2_multi_tenant_location(self):
conf = self.getConfig()
conf['swift_store_multi_tenant'] = True
uri = "swift://auth_address/glance/%s" % (FAKE_UUID)
loc = get_location_from_uri(uri)
self.assertEqual('swift', loc.store_name)
class FakeConnection(object):
def __init__(self, authurl, user, key, retries=5, preauthurl=None,
preauthtoken=None, snet=False, starting_backoff=1,
tenant_name=None, os_options={}, auth_version="1",
insecure=False, ssl_compression=True):
self.authurl = authurl
self.user = user
self.key = key
self.preauthurl = preauthurl
self.preauthtoken = preauthtoken
self.snet = snet
self.tenant_name = tenant_name
self.os_options = os_options
self.auth_version = auth_version
self.insecure = insecure
class TestSingleTenantStoreConnections(base.IsolatedUnitTest):
def setUp(self):
super(TestSingleTenantStoreConnections, self).setUp()
self.stubs.Set(swiftclient, 'Connection', FakeConnection)
self.store = glance.store.swift.SingleTenantStore()
specs = {'scheme': 'swift',
'auth_or_store_url': 'example.com/v2/',
'user': 'tenant:user',
'key': 'abcdefg',
'container': 'cont',
'obj': 'object'}
self.location = glance.store.swift.StoreLocation(specs)
def test_basic_connection(self):
connection = self.store.get_connection(self.location)
self.assertEqual(connection.authurl, 'https://example.com/v2/')
self.assertEqual(connection.auth_version, '2')
self.assertEqual(connection.user, 'user')
self.assertEqual(connection.tenant_name, 'tenant')
self.assertEqual(connection.key, 'abcdefg')
self.assertFalse(connection.snet)
self.assertEqual(connection.preauthurl, None)
self.assertEqual(connection.preauthtoken, None)
self.assertFalse(connection.insecure)
self.assertEqual(connection.os_options,
{'service_type': 'object-store',
'endpoint_type': 'publicURL'})
def test_connection_with_no_trailing_slash(self):
self.location.auth_or_store_url = 'example.com/v2'
connection = self.store.get_connection(self.location)
self.assertEqual(connection.authurl, 'https://example.com/v2/')
def test_connection_insecure(self):
self.config(swift_store_auth_insecure=True)
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertTrue(connection.insecure)
def test_connection_with_auth_v1(self):
self.config(swift_store_auth_version='1')
self.store.configure()
self.location.user = 'auth_v1_user'
connection = self.store.get_connection(self.location)
self.assertEqual(connection.auth_version, '1')
self.assertEqual(connection.user, 'auth_v1_user')
self.assertEqual(connection.tenant_name, None)
def test_connection_invalid_user(self):
self.store.configure()
self.location.user = 'invalid:format:user'
self.assertRaises(exceptions.BadStoreUri,
self.store.get_connection, self.location)
def test_connection_missing_user(self):
self.store.configure()
self.location.user = None
self.assertRaises(exceptions.BadStoreUri,
self.store.get_connection, self.location)
def test_connection_with_region(self):
self.config(swift_store_region='Sahara')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'region_name': 'Sahara',
'service_type': 'object-store',
'endpoint_type': 'publicURL'})
def test_connection_with_service_type(self):
self.config(swift_store_service_type='shoe-store')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'service_type': 'shoe-store',
'endpoint_type': 'publicURL'})
def test_connection_with_endpoint_type(self):
self.config(swift_store_endpoint_type='internalURL')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'service_type': 'object-store',
'endpoint_type': 'internalURL'})
def test_connection_with_snet(self):
self.config(swift_enable_snet=True)
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertTrue(connection.snet)
class TestMultiTenantStoreConnections(base.IsolatedUnitTest):
def setUp(self):
super(TestMultiTenantStoreConnections, self).setUp()
self.stubs.Set(swiftclient, 'Connection', FakeConnection)
self.context = glance.context.RequestContext(
user='user', tenant='tenant', auth_tok='0123')
self.store = glance.store.swift.MultiTenantStore(self.context)
specs = {'scheme': 'swift',
'auth_or_store_url': 'example.com',
'container': 'cont',
'obj': 'object'}
self.location = glance.store.swift.StoreLocation(specs)
def test_basic_connection(self):
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.authurl, None)
self.assertEqual(connection.auth_version, '2')
self.assertEqual(connection.user, 'user')
self.assertEqual(connection.tenant_name, 'tenant')
self.assertEqual(connection.key, None)
self.assertFalse(connection.snet)
self.assertEqual(connection.preauthurl, 'https://example.com')
self.assertEqual(connection.preauthtoken, '0123')
self.assertEqual(connection.os_options, {})
def test_connection_with_snet(self):
self.config(swift_enable_snet=True)
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertTrue(connection.snet)
class FakeGetEndpoint(object):
def __init__(self, response):
self.response = response
def __call__(self, service_catalog, service_type=None,
endpoint_region=None, endpoint_type=None):
self.service_type = service_type
self.endpoint_region = endpoint_region
self.endpoint_type = endpoint_type
return self.response
class TestCreatingLocations(base.IsolatedUnitTest):
def test_single_tenant_location(self):
self.config(swift_store_auth_address='example.com/v2',
swift_store_container='container',
swift_store_user='tenant:user',
swift_store_key='auth_key')
store = glance.store.swift.SingleTenantStore()
location = store.create_location('image-id')
self.assertEqual(location.scheme, 'swift+https')
self.assertEqual(location.swift_url, 'https://example.com/v2')
self.assertEqual(location.container, 'container')
self.assertEqual(location.obj, 'image-id')
self.assertEqual(location.user, 'tenant:user')
self.assertEqual(location.key, 'auth_key')
def test_single_tenant_location_http(self):
self.config(swift_store_auth_address='http://example.com/v2',
swift_store_container='container',
swift_store_user='tenant:user',
swift_store_key='auth_key')
store = glance.store.swift.SingleTenantStore()
location = store.create_location('image-id')
self.assertEqual(location.scheme, 'swift+http')
self.assertEqual(location.swift_url, 'http://example.com/v2')
def test_multi_tenant_location(self):
self.config(swift_store_container='container')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(glance.store.common.auth, 'get_endpoint', fake_get_endpoint)
context = glance.context.RequestContext(
user='user', tenant='tenant', auth_tok='123',
service_catalog={})
store = glance.store.swift.MultiTenantStore(context)
location = store.create_location('image-id')
self.assertEqual(location.scheme, 'swift+https')
self.assertEqual(location.swift_url, 'https://some_endpoint')
self.assertEqual(location.container, 'container_image-id')
self.assertEqual(location.obj, 'image-id')
self.assertEqual(location.user, None)
self.assertEqual(location.key, None)
self.assertEqual(fake_get_endpoint.service_type, 'object-store')
def test_multi_tenant_location_http(self):
fake_get_endpoint = FakeGetEndpoint('http://some_endpoint')
self.stubs.Set(glance.store.common.auth, 'get_endpoint', fake_get_endpoint)
context = glance.context.RequestContext(
user='user', tenant='tenant', auth_tok='123',
service_catalog={})
store = glance.store.swift.MultiTenantStore(context)
location = store.create_location('image-id')
self.assertEqual(location.scheme, 'swift+http')
self.assertEqual(location.swift_url, 'http://some_endpoint')
def test_multi_tenant_location_with_region(self):
self.config(swift_store_region='WestCarolina')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(glance.store.common.auth, 'get_endpoint', fake_get_endpoint)
context = glance.context.RequestContext(
user='user', tenant='tenant', auth_tok='123',
service_catalog={})
store = glance.store.swift.MultiTenantStore(context)
self.assertEqual(fake_get_endpoint.endpoint_region, 'WestCarolina')
def test_multi_tenant_location_custom_service_type(self):
self.config(swift_store_service_type='toy-store')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(glance.store.common.auth, 'get_endpoint', fake_get_endpoint)
context = glance.context.RequestContext(
user='user', tenant='tenant', auth_tok='123',
service_catalog={})
store = glance.store.swift.MultiTenantStore(context)
self.assertEqual(fake_get_endpoint.service_type, 'toy-store')
def test_multi_tenant_location_custom_endpoint_type(self):
self.config(swift_store_endpoint_type='InternalURL')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(glance.store.common.auth, 'get_endpoint', fake_get_endpoint)
context = glance.context.RequestContext(
user='user', tenant='tenant', auth_tok='123',
service_catalog={})
store = glance.store.swift.MultiTenantStore(context)
self.assertEqual(fake_get_endpoint.endpoint_type, 'InternalURL')
class TestChunkReader(base.StoreClearingUnitTest):
def test_read_all_data(self):
"""
Replicate what goes on in the Swift driver with the
repeated creation of the ChunkReader object
"""
CHUNKSIZE = 100
checksum = hashlib.md5()
data_file = tempfile.NamedTemporaryFile()
data_file.write('*' * units.Ki)
data_file.flush()
infile = open(data_file.name, 'rb')
bytes_read = 0
while True:
cr = glance.store.swift.ChunkReader(infile, checksum, CHUNKSIZE)
chunk = cr.read(CHUNKSIZE)
bytes_read += len(chunk)
if not chunk:
break
self.assertEqual(1024, bytes_read)
data_file.close()
| |
"""
This is adapted from the PyObjC PythonBrowser demo.
I beleive that demo was written by Just van Rossum.
"""
import AppKit
from objc import python_method
from operator import getitem, setitem
import inspect
from vanilla.vanillaBase import VanillaBaseObject
from vanilla.nsSubclasses import getNSSubclass
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
TYPE_COLUMN_MAP = {
"list" : "List",
"dict" : "Dict",
"NoneType" : "None",
"instance" : "",
"int" : "Integer",
"float" : "Float",
"str" : "String",
"instancemethod" : "Method"
}
class ObjectBrowser(VanillaBaseObject):
"""
An object browser.
**posSize** Tuple of form *(left, top, width, height)* or *"auto"* representing the position and
size of the browser.
**obj** The object to be displayed.
"""
def __init__(self, posSize, obj):
self._model = PythonBrowserModel.alloc().initWithObject_(obj)
self._posSize = posSize
self._nsObject = getNSSubclass("NSScrollView")(self)
self._nsObject.setAutohidesScrollers_(True)
self._nsObject.setHasHorizontalScroller_(True)
self._nsObject.setHasVerticalScroller_(True)
self._nsObject.setBorderType_(AppKit.NSBezelBorder)
self._nsObject.setDrawsBackground_(True)
self._outlineView = getNSSubclass("NSOutlineView")(self)
self._outlineView.setFrame_(((0, 0), (100, 100)))
self._outlineView.setUsesAlternatingRowBackgroundColors_(True)
self._outlineView.setAllowsColumnResizing_(True)
self._outlineView.setRowHeight_(17.0)
self._outlineView.setColumnAutoresizingStyle_(AppKit.NSTableViewUniformColumnAutoresizingStyle)
columns = [
("name", "Name"),
("type", "Type"),
("value", "Value"),
("arguments", "Arguments")
]
for key, title in columns:
column = AppKit.NSTableColumn.alloc().initWithIdentifier_(key)
column.setResizingMask_(AppKit.NSTableColumnAutoresizingMask | AppKit.NSTableColumnUserResizingMask)
column.headerCell().setTitle_(title)
dataCell = column.dataCell()
dataCell.setDrawsBackground_(False)
dataCell.setStringValue_("") # cells have weird default values
column.setEditable_(False)
self._outlineView.addTableColumn_(column)
if key == "name":
self._outlineView.setOutlineTableColumn_(column)
self._outlineView.setDataSource_(self._model)
self._outlineView.setDelegate_(self._model)
self._nsObject.setDocumentView_(self._outlineView)
self._setAutosizingFromPosSize(posSize)
def getNSScrollView(self):
return self._nsObject
def getNSOutlineView(self):
return self._outlineView
class PythonBrowserModel(AppKit.NSObject):
"""This is a delegate as well as a data source for NSOutlineViews."""
def initWithObject_(self, obj):
self = self.init()
self.setObject_(obj)
return self
def setObject_(self, obj):
self.root = PythonItem("<root>", obj, None, None)
# NSOutlineViewDataSource methods
def outlineView_numberOfChildrenOfItem_(self, view, item):
if item is None:
item = self.root
return len(item)
def outlineView_child_ofItem_(self, view, child, item):
if item is None:
item = self.root
return item.getChild(child)
def outlineView_isItemExpandable_(self, view, item):
if item is None:
item = self.root
return item.isExpandable()
def outlineView_objectValueForTableColumn_byItem_(self, view, col, item):
if item is None:
item = self.root
identifier = col.identifier()
value = getattr(item, identifier)
# filter the type values
if identifier == "type":
value = TYPE_COLUMN_MAP.get(value, value)
return value
def outlineView_shouldEditTableColumn_item_(self, view, col, item):
return False
def outlineView_toolTipForCell_rect_tableColumn_item_mouseLocation_(self, view, cell, rect, col, item, location):
## addig a tooltip, use the __doc__ from the object
return item.getDoc(), rect
# objects of these types are not eligable for expansion in the outline view
SIMPLE_TYPES = (str, int, float, complex)
def getChilderen(root):
childeren = []
for name, obj in inspect.getmembers(root):
## ignore private methods and attributes
if name.startswith("_"):
continue
## ignore imported modules
#elif inspect.ismodule(obj):
# continue
## ignore methods and attributed usind in pyobjc
elif name.startswith("pyobjc_"):
continue
## ignore methods and attributed usind in pyobjc
elif type(obj).__name__ in ["native_selector"]:
continue
childeren.append(name)
return childeren
def getArguments(obj):
"""
Return all arguments for a method of function
and leave 'self' out.
"""
try:
arguments = inspect.formatargspec(*inspect.getargspec(obj))
except TypeError:
arguments = ""
return arguments.replace("self, ", "").replace("self", "")
class PythonItem(AppKit.NSObject):
"""Wrapper class for items to be displayed in the outline view."""
# We keep references to all child items (once created). This is
# neccesary because NSOutlineView holds on to PythonItem instances
# without retaining them. If we don"t make sure they don"t get
# garbage collected, the app will crash. For the same reason this
# class _must_ derive from NSObject, since otherwise autoreleased
# proxies will be fed to NSOutlineView, which will go away too soon.
def __new__(cls, *args, **kwargs):
# "Pythonic" constructor
return cls.alloc().init()
def __init__(self, name, obj, parent, setvalue, ignoreAppKit=True):
self.realName = name
self.name = str(name)
self.parent = parent
self.arguments = ""
self.type = type(obj).__name__
if obj is None:
self.value = "None"
elif not isinstance(obj, SIMPLE_TYPES):
self.value = ""
else:
self.value = obj
## in pyOjbc a python_selector should have an attr callable with is actually the method or function
if self.type == "python_selector" and hasattr(obj, "callable"):
obj = obj.callable
self.object = obj
self.children = []
self.getters = dict()
self.setters = dict()
if isinstance(obj, dict):
self.children = sorted(obj.keys())
self._setGetters(self.children, getitem)
self._setSetters(self.children, setitem)
elif obj is None or isinstance(obj, SIMPLE_TYPES):
pass
elif isinstance(obj, (list, tuple, set)):
self.children = list(range(len(obj)))
self._setGetters(self.children, getitem)
self._setSetters(self.children, setitem)
elif isinstance(obj, property):
pass
elif inspect.ismethod(obj):
self.arguments = getArguments(obj)
elif inspect.isfunction(obj):
self.arguments = getArguments(obj)
else:
try:
l = list(obj)
self.children = list(range(len(l)))
self._setGetters(self.children, getitem)
self._setSetters(self.children, setitem)
except:
pass
try:
d = dict(obj)
self.children = sorted(d.keys())
self._setGetters(self.children, getitem)
self._setSetters(self.children, setitem)
except:
pass
children = getChilderen(obj)
self._setGetters(children, getattr)
self._setSetters(children, setattr)
self.children += children
if inspect.isclass(obj) and hasattr(obj, "__init__"):
self.arguments = getArguments(getattr(obj, "__init__"))
if ignoreAppKit:
self.children = [child for child in self.children if not (isinstance(child, str) and hasattr(AppKit, child))]
self._childRefs = {}
@python_method
def _setSetters(self, names, callback):
for name in names:
self.setters[name] = callback
@python_method
def _setGetters(self, names, callback):
for name in names:
self.getters[name] = callback
def isExpandable(self):
return bool(self.children)
@python_method
def getChild(self, child):
if child in self._childRefs:
return self._childRefs[child]
name = self.children[child]
getter = self.getters.get(name)
setter = self.setters.get(name)
obj = getter(self.object, name)
childObj = self.__class__(name, obj, self.object, setter)
self._childRefs[child] = childObj
return childObj
def getDoc(self):
doc = inspect.getdoc(self.object)
if doc:
return doc
return None
def __len__(self):
return len(self.children)
if __name__ == "__main__":
import vanilla
testObject = vanilla
class TestWindow():
def __init__(self):
self.w = vanilla.Window((400, 400), "inspect object browser %s" %testObject, minSize=(100, 100))
self.w.b = ObjectBrowser((0, 0, -0, -0), testObject)
self.w.open()
from vanilla.test.testTools import executeVanillaTest
executeVanillaTest(TestWindow)
| |
#!/usr/bin/env python3
# Copyright 2015-2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
import sys
from ros_buildfarm.argument import add_argument_build_tool
from ros_buildfarm.argument import add_argument_output_dir
from ros_buildfarm.common import Scope
from ros_buildfarm.rosdoc_index import RosdocIndex
from ros_buildfarm.rosdoc_lite import get_generator_output_folders
from ros_buildfarm.workspace import call_build_tool
from ros_buildfarm.workspace import clean_workspace
from ros_buildfarm.workspace import ensure_workspace_exists
import yaml
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Invoke 'rosdoc_lite' on each package of a workspace")
parser.add_argument(
'--rosdistro-name',
required=True,
help='The name of the ROS distro to identify the setup file to be '
'sourced (if available)')
parser.add_argument(
'--os-code-name',
required=True,
help="The OS code name (e.g. 'xenial')")
parser.add_argument(
'--arch',
required=True,
help="The architecture (e.g. 'amd64')")
add_argument_build_tool(parser, required=True)
parser.add_argument(
'--workspace-root',
required=True,
help='The root path of the workspace to compile')
parser.add_argument(
'--rosdoc-lite-dir',
required=True,
help='The root path of the rosdoc_lite repository')
parser.add_argument(
'--catkin-sphinx-dir',
required=True,
help='The root path of the catkin-sphinx repository')
parser.add_argument(
'--rosdoc-index-dir',
required=True,
help='The root path of the rosdoc_index folder')
parser.add_argument(
'--canonical-base-url',
help='The canonical base URL to add to all generated HTML files')
parser.add_argument(
'pkg_tuples',
nargs='*',
help='A list of package tuples in topological order, each containing '
'the name, the relative path and optionally the package-relative '
'path of the rosdoc config file separated by a colon')
add_argument_output_dir(parser, required=True)
args = parser.parse_args(argv)
ensure_workspace_exists(args.workspace_root)
clean_workspace(args.workspace_root)
with Scope('SUBSECTION', 'build workspace in isolation and install'):
env = dict(os.environ)
env.setdefault('MAKEFLAGS', '-j1')
rc = call_build_tool(
args.build_tool, args.rosdistro_name, args.workspace_root,
cmake_args=['-DCATKIN_SKIP_TESTING=1'], install=True, env=env)
# TODO compile error should still allow to generate doc from static parts
if rc:
return rc
rosdoc_index = RosdocIndex([
os.path.join(args.output_dir, args.rosdistro_name),
os.path.join(args.rosdoc_index_dir, args.rosdistro_name)])
source_space = os.path.join(args.workspace_root, 'src')
for pkg_tuple in args.pkg_tuples:
pkg_name, pkg_subfolder, pkg_rosdoc_config = pkg_tuple.split(':', 2)
with Scope('SUBSECTION', 'rosdoc_lite - %s' % pkg_name):
pkg_path = os.path.join(source_space, pkg_subfolder)
pkg_doc_path = os.path.join(
args.output_dir, 'api_rosdoc', pkg_name)
pkg_tag_path = os.path.join(
args.output_dir, 'symbols', '%s.tag' % pkg_name)
source_cmd = [
'.', os.path.join(
args.workspace_root, 'install_isolated', 'setup.sh'),
]
# for workspaces with only plain cmake packages the setup files
# generated by cmi won't implicitly source the underlays
setup_file = '/opt/ros/%s/setup.sh' % args.rosdistro_name
if os.path.exists(setup_file):
source_cmd = ['.', setup_file, '&&'] + source_cmd
rosdoc_lite_cmd = [
os.path.join(args.rosdoc_lite_dir, 'scripts', 'rosdoc_lite'),
pkg_path,
'-o', pkg_doc_path,
'-g', pkg_tag_path,
'-t', os.path.join(
args.output_dir, 'rosdoc_tags', '%s.yaml' % pkg_name),
]
if '3' == os.environ.get('ROS_PYTHON_VERSION'):
rosdoc_lite_cmd.insert(0, 'python3')
print("Invoking `rosdoc_lite` for package '%s': %s" %
(pkg_name, ' '.join(rosdoc_lite_cmd)))
pkg_rc = subprocess.call(
[
'sh', '-c',
' '.join(source_cmd) +
' && ' +
'PYTHONPATH=%s/src:%s/src:$PYTHONPATH ' % (
args.rosdoc_lite_dir, args.catkin_sphinx_dir) +
' '.join(rosdoc_lite_cmd)
], stderr=subprocess.STDOUT, cwd=pkg_path)
if pkg_rc:
rc = pkg_rc
# only if rosdoc runs generates a symbol file
# create the corresponding location file
if os.path.exists(pkg_tag_path):
data = {
'docs_url': '../../../api/%s/html' % pkg_name,
'location': '%s/symbols/%s.tag' %
(args.rosdistro_name, pkg_name),
'package': pkg_name,
}
# fetch generator specific output folders from rosdoc_lite
if pkg_rosdoc_config:
output_folders = get_generator_output_folders(
pkg_rosdoc_config, pkg_name)
for generator, output_folder in output_folders.items():
data['%s_output_folder' % generator] = output_folder
rosdoc_index.locations[pkg_name] = [data]
if args.canonical_base_url:
add_canonical_link(
pkg_doc_path, '%s/%s/api/%s' %
(args.canonical_base_url, args.rosdistro_name, pkg_name))
# merge manifest.yaml files
rosdoc_manifest_yaml_file = os.path.join(
pkg_doc_path, 'manifest.yaml')
job_manifest_yaml_file = os.path.join(
args.output_dir, 'manifests', pkg_name, 'manifest.yaml')
if os.path.exists(rosdoc_manifest_yaml_file):
with open(rosdoc_manifest_yaml_file, 'r') as h:
rosdoc_data = yaml.safe_load(h)
else:
# if rosdoc_lite failed to generate the file
rosdoc_data = {}
with open(job_manifest_yaml_file, 'r') as h:
job_data = yaml.safe_load(h)
rosdoc_data.update(job_data)
with open(rosdoc_manifest_yaml_file, 'w') as h:
yaml.safe_dump(rosdoc_data, h, default_flow_style=False)
rosdoc_index.write_modified_data(
args.output_dir, ['locations'])
return rc
def add_canonical_link(base_path, base_link):
print("add canonical link '%s' to all html files under '%s'" %
(base_link, base_path))
for path, dirs, files in os.walk(base_path):
for filename in [f for f in files if f.endswith('.html')]:
filepath = os.path.join(path, filename)
try:
with open(filepath, 'rb') as h:
data = h.read()
except Exception:
print("error reading file '%s'" % filepath)
raise
if data.find(b'rel="canonical"') != -1:
continue
rel_path = os.path.relpath(filepath, base_path)
link = os.path.join(base_link, rel_path)
data = data.replace(
b'</head>', b'<link rel="canonical" href="' + link.encode() +
b'" />\n</head>', 1)
with open(filepath, 'wb') as h:
h.write(data)
if __name__ == '__main__':
sys.exit(main())
| |
"""
Description:
Requirements: pySerial, wxPython Phoenix
glossary and of other descriptions:
DMM - digital multimeter
PSU - power supply
SBC - single board computer
INS - general instrument commands
GEN - general sequence instructions
"""
import wx
import theme
import base
# from wx.lib.agw import spinctrl
class SendReceive(wx.Dialog):
def __init__(self, parent, instruments, variables):
wx.Dialog.__init__(self,
parent,
title="Send and Receive Message")
self._variables = variables
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
sbox = wx.StaticBox(panel, label="")
sbox_sizer = wx.StaticBoxSizer(sbox, wx.HORIZONTAL)
grid = wx.GridBagSizer(5,5)
row = 0
# row += 1 #let's start at 1, to give some space
ins_lbl = wx.StaticText(panel, label="Instruments:")
choices = instruments
choices.extend(instruments)
self.cbox_ins = wx.ComboBox(panel, choices=choices)
self.cbox_ins.Bind(wx.EVT_COMBOBOX, self.OnInstrumentSelect)
grid.Add(ins_lbl, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.cbox_ins, pos=(row,1), span=(0,3), flag=wx.ALL|wx.EXPAND, border=5)
grid.AddGrowableCol(1)
row += 1
lbl_send = wx.StaticText(panel, label="Send Command:")
self.command = wx.TextCtrl(panel)
grid.Add(lbl_send, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.command, pos=(row,1), span=(0,3), flag=wx.ALL|wx.EXPAND, border=5)
row += 1
lbl_delay = wx.StaticText(panel, label="Delay before send:")
self.spin_delay = wx.SpinCtrl(panel, max=10, min=0, size=(50, -1))
self.spin_delay2 = wx.SpinCtrl(panel, max=59, min=0, size=(50, -1))
self.spin_delay.Bind(wx.EVT_SPINCTRL, self.OnSpinDelay)
self.spin_delay2.Bind(wx.EVT_SPINCTRL, self.OnSpinDelay)
self.lbl_delay = wx.StaticText(panel, label="0.0s")
grid.Add(lbl_delay, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.spin_delay, pos=(row,1), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.spin_delay2, pos=(row,2), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.lbl_delay, pos=(row,3), flag=wx.ALL|wx.ALIGN_BOTTOM, border=5)
row += 1
lbl_local = wx.StaticText(panel, label="Local Name:")
default = defaultname = "serial_read"
index = 1
while defaultname in self._variables["locals"]:
defaultname = default + str(index)
index += 1
self.text_local = wx.TextCtrl(panel, value=defaultname)
grid.Add(lbl_local, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.text_local, pos=(row,1), span=(0,2), flag=wx.ALL|wx.EXPAND, border=5)
row += 1
lbl_global = wx.StaticText(panel, label="Global Name:")
self.text_global = wx.TextCtrl(panel, value="")
grid.Add(lbl_global, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.text_global, pos=(row,1), span=(0,2), flag=wx.ALL|wx.EXPAND, border=5)
sbox_sizer.Add(grid, 1, wx.ALL|wx.EXPAND, 0)
sbox_sizer.AddSpacer(10)
#-----
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.AddStretchSpacer()
btn_cancel = wx.Button(panel, label="Cancel", id=wx.ID_CANCEL)
btn_cancel.Bind(wx.EVT_BUTTON, self.OnButton)
self.btn_add = wx.Button(panel, label="Add", id=wx.ID_OK)
self.btn_add.Bind(wx.EVT_BUTTON, self.OnButton)
hsizer.Add(btn_cancel, 0, wx.ALL|wx.EXPAND, 5)
hsizer.Add(self.btn_add, 0, wx.ALL|wx.EXPAND, 5)
#add to main sizer
sizer.Add(sbox_sizer, 0, wx.ALL|wx.EXPAND, 2)
sizer.Add(hsizer, 0, wx.ALL|wx.EXPAND, 5)
panel.SetSizer(sizer)
w, h = sizer.Fit(self)
# self.SetSize((w, h*1.5))
# self.SetMinSize((w, h*1.5))
# self.SetMaxSize(sizer.Fit(self))
try:
self.SetIcon(theme.GetIcon("psu_png"))
except:
pass
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
def OnKeyUp(self, event):
key = event.GetKeyCode()
print(event)
if key == wx.KEY_ESCAPE:
self.EndModal(wx.ID_CANCEL)
def OnSpinDelay(self, event=None):
s0 = self.spin_delay.GetValue()
s1 = self.spin_delay2.GetValue()
label = str(s0) + "." + str(s1) + "s"
self.lbl_delay.SetLabel(label)
def OnInstrumentSelect(self, event):
e = event.GetEventObject()
self.btn_add.Enable()
def OnButton(self, event):
e = event.GetEventObject()
label = e.GetLabel()
id = e.GetId()
if label == "Cancel":
self.EndModal(id)
elif label == "Add":
self.EndModal(id)
def SetValue(self, data):
params = data["parameters"]
params = "), " + params[1:-1] + ", (" #so we can split it easier
param_dict = {}
params = params.split("), (")
for param in params:
param = param[1: -1]
if param == "":
continue
key, value = param.split("', '")
param_dict[key] = value
self.command.SetValue(param_dict["command"])
self.cbox_ins.SetValue(param_dict["instrument"])
self.lbl_delay.SetLabel(param_dict["delay"]+"s")
spin1, spin2 = param_dict["delay"].split(".")
self.spin_delay.SetValue(spin1)
self.spin_delay2.SetValue(spin2)
self.text_local.SetValue(data["local"])
self.text_global.SetValue(data["global"])
def GetValue(self):
command = self.command.GetValue()
command = command.replace("), (", "_")
instrument = self.cbox_ins.GetValue()
instrument = instrument.replace("), (", "_")
data = [("command", command),
("instrument", instrument),
("delay", self.lbl_delay.GetLabel()[0:-1])]
data = {"action": "Send and Receive Message",
"parameters": str(data)}
local = self.text_local.GetValue()
if local != "":
for char in local:
if char.isdigit() or char.isalpha():
continue
local = local.replace(char, "_")
data["local"] = local
glob = self.text_global.GetValue()
if glob != "":
for char in glob:
if char.isdigit() or char.isalpha():
continue
glob = glob.replace(char, "_")
data["global"] = glob
return data
| |
#########################
### Domain Parameters ###
#########################
# Table grid resolution
RESOLUTION=0.1
# End-Effector Frame
FRAME = "end_effector_grasp"
# Object placement tolerance
EPSILON = 1e-4
########################
### Imported Modules ###
########################
# Import our planning packages
import aminopy as aa
import tmsmtpy as tm
import TMSMT
# Import python math package
from math import pi
# The Common Lisp runtime is also available
import CL as cl
#############
## Helpers ##
#############
def map_locations(function, scene):
for frame in tm.collect_frame_type(scene,"surface"):
name = frame['name']
for geom in frame['geometry']:
shape = geom['shape']
if aa.shape_is_box(shape):
d = shape['dimension']
x_max = d[0] / 2
y_max = d[1] / 2
x = 0
i = 0
while x <= x_max:
y = 0
j = 0
while y <= y_max:
function(name,i,j)
if i > 0: function(name,-i,j)
if j > 0: function(name,i,-j)
if i > 0 and j > 0: function(name,-i,-j)
y += RESOLUTION
j+=1
x += RESOLUTION
i+=1
def location_table(parent,frame):
parent_name = parent.name
name = frame.name
trans = aa.translation( aa.frame_fixed_tf(frame) )
i = int(round( trans[0] / RESOLUTION))
j = int(round( trans[1] / RESOLUTION))
position = tm.mangle(parent_name,i,j)
return (["ONTABLE", name, position], i, j)
############################
### Collision Constraint ###
############################
def collision_constraint(scene,op,objs):
# TODO: stacking
moveable = []
def collect_moveable(frame_name):
frame = scene[frame_name]
if aa.frame_isa(frame,"moveable"):
moveable.append(frame)
map(collect_moveable, objs)
conjunction = []
def handle_moveable(frame):
parent_name = frame.parent
parent = scene[parent_name]
if aa.frame_isa(parent, "surface"):
(x, i, j) = location_table(parent,frame)
conjunction.append(x)
map(handle_moveable, moveable)
return conjunction
##########################
## Scene State Function ##
##########################
## TODO: Does not work after an UNSTACK operatorion
## Problem is with the "Clear" predicate
def make_state(scene, configuration, is_goal):
'''Map the scene graph `scene' to a task state expression'''
## terms in the expression
conjunction = []
occupied = {}
moveable_frames = tm.collect_frame_type(scene,"moveable")
## Add object locations
handempty = [True]
def add_on(child,parent):
if parent == FRAME:
conjunction.append(["HOLDING", child])
conjunction.append(["NOT", ["HANDEMPTY"]])
handempty[0] = False
occupied[child] = True
else:
conjunction.append(["ON", child, parent])
occupied[parent] = True
def handle_moveable(frame):
name = frame.name
parent_name = frame.parent
try:
# If parent frame is a placement surface, position is the
# appropriate grid cell on the surface.
parent_frame = scene[parent_name]
if aa.frame_isa(parent_frame, "surface"):
(x, i, j) = location_table(parent_frame,frame)
conjunction.append(x)
occupied[(parent_name,i,j)] = True
else:
add_on(name,parent_name)
except NameError:
add_on(name,parent_name)
map(handle_moveable, moveable_frames)
if handempty[0]:
conjunction.append(["HANDEMPTY"])
## Clear things
def clear_block(frame):
name = frame.name
if not name in occupied:
conjunction.append(["CLEAR", name])
def clear_location(name,i,j):
if not (name,i,j) in occupied:
conjunction.append(["CLEAR", tm.mangle(name,i,j)])
if not is_goal:
map(clear_block, moveable_frames)
map_locations(clear_location,scene)
return conjunction
def scene_state(scene,configuration):
return make_state(scene, configuration, False)
def goal_state(scene,configuration):
return make_state(scene, configuration, True)
############################
## Scene Objects Function ##
############################
def scene_objects(scene):
'''Return the PDDL objects for `scene'.'''
obj = []
def type_names(thing):
return [ f.name
for f in
tm.collect_frame_type(scene,thing) ]
# Moveable objects are blocks
moveable = type_names("moveable")
moveable.insert(0, "BLOCK")
# Draw grid on surfaces
locations = ['LOCATION']
def add_loc(name,i,j):
locations.append(tm.mangle(name,i,j))
map_locations(add_loc,scene)
return [moveable, locations]
############################
### Operator Definitions ###
############################
def motion_plan(op, frame, goal):
scene = op.final_scene
sub_scenegraph = aa.scene_chain(scene, "", frame)
return tm.op_motion( op, sub_scenegraph, goal )
# def pick(op, obj):
# mp = motion_plan(op, FRAME, tm.op_tf_abs(op,obj))
# return tm.op_reparent(mp, FRAME, obj)
def place_tf(op, obj, dst_frame, g_tf_o ):
mp = motion_plan(op, obj, g_tf_o)
return tm.op_reparent(mp, dst_frame, obj)
def place_height(scene,name):
g = scene[name].collision
s = g[0].shape
if aa.shape_is_box(s):
return s.dimension[2] / 2
# def place(op, obj, dst, i, j):
# scene = op['final_scene']
# x = i*RESOLUTION
# y = j*RESOLUTION
# z = place_height(scene,obj) + place_height(scene,dst) + EPSILON
# d_tf_o = aa.tf2( 1, [x,y,z] )
# g_tf_d = tm.op_tf_abs(op,dst)
# g_tf_o = aa.mul(g_tf_d, d_tf_o );
# return place_tf(op, obj, dst, g_tf_o)
# def stack(op, obj, dst ):
# scene = op['final_scene']
# config = op['final_config']
# g_tf_d = tm.op_tf_abs(op,dst)
# d_tf_o = aa.tf2(1, [0,0, place_height(scene,obj) + place_height(scene,dst) + EPSILON])
# g_tf_o = aa.mul(g_tf_d, d_tf_o)
# return place_tf(op, obj, dst, g_tf_o)
def op_pick_up(scene, config, op):
#(a, obj, src, i, j) = op
obj = op[1]
nop = tm.op_nop(scene,config)
mp = motion_plan(nop, FRAME, tm.op_tf_abs(nop,obj))
return tm.op_reparent(mp, FRAME, obj)
def op_put_down(scene, config, op):
(a, obj, dst, i, j) = op
nop = tm.op_nop(scene,config)
x = i*RESOLUTION
y = j*RESOLUTION
z = place_height(scene,obj) + place_height(scene,dst) + EPSILON
d_tf_o = aa.tf2( 1, [x,y,z] )
g_tf_d = tm.op_tf_abs(nop,dst)
g_tf_o = aa.mul(g_tf_d, d_tf_o );
return place_tf(nop, obj, dst, g_tf_o)
def op_stack(scene, config, op):
(act,obj,dst) = op
nop = tm.op_nop(scene,config)
g_tf_d = tm.op_tf_abs(nop,dst)
d_tf_o = aa.tf2(1, [0,0, place_height(scene,obj) + place_height(scene,dst) + EPSILON])
g_tf_o = aa.mul(g_tf_d, d_tf_o)
return place_tf(nop, obj, dst, g_tf_o)
def op_unstack(scene, config, op):
return op_pick_up(scene,config,op)
##########################
### Register functions ###
##########################
tm.bind_scene_state(scene_state)
tm.bind_goal_state(goal_state)
tm.bind_scene_objects(scene_objects)
tm.bind_refine_operator(op_pick_up, "PICK-UP")
tm.bind_refine_operator(op_put_down, "PUT-DOWN")
tm.bind_refine_operator(op_stack, "STACK")
tm.bind_refine_operator(op_unstack, "UNSTACK")
tm.bind_collision_constraint(collision_constraint)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class OptimizerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testBasic(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
def loss(v0, v1):
return 5 * v0 + 3 * v1
# Note that for eager execution, minimize expects a function instead of a
# Tensor.
cost = loss if context.in_eager_mode() else loss(var0, var1)
global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int64), name='global_step_%d' % i)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
def testAggregationMethod(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost,
global_step, [var0, var1],
aggregation_method=gradients_impl.AggregationMethod.
EXPERIMENTAL_ACCUMULATE_N)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-14., -13.], var0.eval())
self.assertAllClose([-6., -5.], var1.eval())
def testPrecomputedGradient(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
grad_loss = constant_op.constant([42, -42], dtype=dtype)
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost, global_step, [var0, var1], grad_loss=grad_loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)],
var0.eval())
self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
var1.eval())
@test_util.run_in_graph_and_eager_modes()
def testNoVariables(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# pylint: disable=cell-var-from-loop
def loss():
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype, trainable=False, name='a')
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtype, trainable=False, name='b')
return 5 * var0 + var1
# pylint: enable=cell-var-from-loop
cost = loss if context.in_eager_mode() else loss()
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError, 'No.*variables'):
sgd_op.minimize(cost)
@test_util.run_in_graph_and_eager_modes()
def testNoGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b%d' % i)
# pylint: disable=cell-var-from-loop
def loss(_):
return 5 * var0
# pylint: enable=cell-var-from-loop
cost = loss if context.in_eager_mode() else loss(var1)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError, 'No gradients'):
# var1 has no gradient
sgd_op.minimize(cost, var_list=[var1])
@test_util.run_in_graph_and_eager_modes()
def testNoGradientsForAnyVariables_Minimize(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
def loss(unused_v1, unused_v2):
return constant_op.constant(5.0)
cost = loss if context.in_eager_mode() else loss(var0, var1)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError,
'No gradients provided for any variable'):
sgd_op.minimize(cost, var_list=[var0, var1])
@test_util.run_in_graph_and_eager_modes()
def testNoGradientsForAnyVariables_ApplyGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError,
'No gradients provided for any variable'):
sgd_op.apply_gradients([(None, var0), (None, var1)])
@test_util.run_in_graph_and_eager_modes()
def testGradientsAsVariables(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b%d' % i)
def loss(v0, v1):
return 5 * v0 + 3 * v1
cost = loss if context.in_eager_mode() else loss(var0, var1)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
grads_and_vars = sgd_op.compute_gradients(cost, [var0, var1])
# Convert gradients to tf.Variables
converted_grads = [
resource_variable_ops.ResourceVariable(array_ops.zeros([2], dtype),
name='c_%d_%d' % (i, j))
for j, gv in enumerate(grads_and_vars)
]
convert_ops = [
state_ops.assign(converted_grads[j], gv[0])
for j, gv in enumerate(grads_and_vars)
]
self.evaluate(variables.global_variables_initializer())
# Run convert_ops to achieve the gradietns converting
self.evaluate(convert_ops)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
converted_grads_and_vars = list(zip(converted_grads, [var0, var1]))
opt_op = sgd_op.apply_gradients(converted_grads_and_vars)
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
def testTrainOp(self):
with self.test_session():
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([3.0, 4.0])
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
self.assertTrue(opt_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
def testConstraint(self):
constraint_01 = lambda x: clip_ops.clip_by_value(x, -0.1, 0.)
constraint_0 = lambda x: clip_ops.clip_by_value(x, 0., 1.)
with self.test_session():
var0 = variables.Variable([1.0, 2.0],
constraint=constraint_01)
var1 = variables.Variable([3.0, 4.0],
constraint=constraint_0)
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-0.1, -0.1], var0.eval())
self.assertAllClose([0., 0.], var1.eval())
if __name__ == '__main__':
test.main()
| |
# -*- coding: utf-8 -*-
import logging
from modularodm import exceptions, StoredObject
from modularodm.fields import IntegerField
from modularodm.query.query import RawQuery as Q
from tests.base import ModularOdmTestCase
logger = logging.getLogger(__name__)
class BasicQueryTestCase(ModularOdmTestCase):
COUNT = 30
def define_objects(self):
class Foo(StoredObject):
_id = IntegerField(primary=True)
return Foo,
def set_up_objects(self):
self.foos = []
for idx in range(self.COUNT):
foo = self.Foo(_id=idx)
foo.save()
self.foos.append(foo)
def test_load_by_pk(self):
""" Given a known primary key, ``.get(pk)`` should return the object.
"""
self.assertEqual(
self.foos[0],
self.Foo.load(0)
)
def test_find_all(self):
""" If no query object is passed, ``.find()`` should return all objects.
"""
self.assertEqual(
len(self.Foo.find()),
len(self.foos)
)
def test_find_one(self):
""" Given a query with exactly one result record, ``.find_one()`` should
return that object.
"""
self.assertEqual(
self.Foo.find_one(Q('_id', 'eq', 0))._id,
self.foos[0]._id
)
def test_find_one_return_zero(self):
""" Given a query with zero result records, ``.find_one()`` should raise
an appropriate error.
"""
with self.assertRaises(exceptions.NoResultsFound):
self.Foo.find_one(Q('_id', 'eq', -1))
def test_find_one_return_many(self):
""" Given a query with >1 result record, ``.find_one()`` should raise
an appropriate error.
"""
with self.assertRaises(exceptions.MultipleResultsFound):
result = self.Foo.find_one()
logger.debug(result)
def test_slice(self):
queryset = self.Foo.find()
queryslice = queryset[1:]
self.assertEqual(queryset.count(), queryslice.count() + 1)
self.assertEqual(queryset[1], queryslice[0])
def test_slice_negative_index(self):
queryset = self.Foo.find()
if queryset._NEGATIVE_INDEXING:
self.assertEqual(queryset[-1], self.Foo.find().sort('-_id')[0])
else:
with self.assertRaises(IndexError):
queryset[-1]
def test_slice_negative_slice(self):
queryset = self.Foo.find()
with self.assertRaises(IndexError):
queryset[-5:-1]
def test_slice_step(self):
queryset = self.Foo.find()
with self.assertRaises(IndexError):
queryset[::2]
def test_slice_reverse(self):
queryset = self.Foo.find()
with self.assertRaises(IndexError):
queryset[5:0]
# individual filter tests (limit, offset, sort)
def test_limit(self):
""" For a query that returns > n results, `.limit(n)` should return the
first n.
"""
self.assertEqual(
len(self.Foo.find().limit(10)),
10,
)
self.assertEqual(
len(self.Foo.find().limit(self.COUNT+10)),
self.COUNT,
)
# TODO: test limit = 0
def test_offset(self):
"""For a query that returns n results, ``.offset(m)`` should return
n - m results, skipping the first m that would otherwise have been
returned.
"""
self.assertEqual(
len(self.Foo.find().offset(25)),
self.COUNT - 25,
)
# TODO: test offset = 0, offset > self.COUNT
def test_sort(self):
results = self.Foo.find().sort('-_id')
self.assertListEqual(
[x._id for x in results],
list(range(self.COUNT))[::-1],
)
# paired filter tests:
# limit + {limit,offset,sort}
# offset + {offset,sort}
# sort + sort
# each test sub tests the filters in both orders. i.e. limit + offset
# tests .limit().offset() AND .offset().limit()
def test_limit_limit(self):
self.assertEqual( len(self.Foo.find().limit(5).limit(10)), 10 )
self.assertEqual( len(self.Foo.find().limit(10).limit(5)), 5 )
def test_limit_offset(self):
self.assertEqual( len(self.Foo.find().limit(2).offset(2)), 2 )
self.assertEqual( len(self.Foo.find().offset(2).limit(2)), 2 )
tmp = 5
limit = tmp + 5
offset = self.COUNT - tmp
self.assertEqual(len(self.Foo.find().limit(limit).offset(offset)), tmp)
self.assertEqual(len(self.Foo.find().offset(offset).limit(limit)), tmp)
def test_limit_sort(self):
limit, sort, = [10, '-_id']
expect = list(range(self.COUNT-limit, self.COUNT)[::-1])
results = self.Foo.find().limit(limit).sort(sort)
self.assertListEqual([x._id for x in results], expect)
results = self.Foo.find().sort(sort).limit(limit)
self.assertListEqual([x._id for x in results], expect)
def test_offset_offset(self):
self.assertEqual(
len(self.Foo.find().offset(10).offset(17)),
self.COUNT-17
)
self.assertEqual(
len(self.Foo.find().offset(17).offset(10)),
self.COUNT-10
)
def test_offset_sort(self):
offset, sort = [27, '-_id']
expect = list(range(self.COUNT-offset)[::-1])
results = self.Foo.find().offset(offset).sort(sort)
self.assertListEqual([x._id for x in results], expect)
results = self.Foo.find().sort(sort).offset(offset)
self.assertListEqual([x._id for x in results], expect)
def test_sort_sort(self):
results = self.Foo.find().sort('-_id').sort('_id')
self.assertListEqual(
[x._id for x in results],
list(range(self.COUNT)),
)
results = self.Foo.find().sort('_id').sort('-_id')
self.assertListEqual(
[x._id for x in results],
list(range(self.COUNT)[::-1]),
)
# all three filters together
def test_limit_offset_sort(self):
test_sets = [
# limit offset sort expect
[ 10, 7, '-_id', list(range(self.COUNT-7-10, self.COUNT-7)[::-1]), ],
[ 20, 17, '_id', list(range(17, self.COUNT)), ],
[ 10, 5, '_id', list(range(5, 5+10)), ],
]
for test in test_sets:
limit, offset, sort, expect = test
all_combinations = [
self.Foo.find().limit(limit).offset(offset).sort(sort),
self.Foo.find().limit(limit).sort(sort).offset(offset),
self.Foo.find().offset(offset).limit(limit).sort(sort),
self.Foo.find().offset(offset).sort(sort).limit(limit),
self.Foo.find().sort(sort).limit(limit).offset(offset),
self.Foo.find().sort(sort).offset(offset).limit(limit),
]
for result in all_combinations:
self.assertListEqual( [x._id for x in result], expect )
| |
##
# Copyright (c) 2006-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Utilities to converting a Record to a vCard
"""
__all__ = [
"vCardFromRecord"
]
from pycalendar.vcard.adr import Adr
from pycalendar.vcard.n import N
from twext.python.log import Logger
from twext.who.idirectory import FieldName, RecordType
from twisted.internet.defer import inlineCallbacks, returnValue
from twistedcaldav.config import config
from twistedcaldav.vcard import Component, Property, vCardProductID
from txdav.who.idirectory import FieldName as CalFieldName, \
RecordType as CalRecordType
from txweb2.dav.util import joinURL
log = Logger()
recordTypeToVCardKindMap = {
RecordType.user: "individual",
RecordType.group: "group",
CalRecordType.location: "location",
CalRecordType.resource: "device",
}
vCardKindToRecordTypeMap = {
"individual": RecordType.user,
"group": RecordType.group,
"org": RecordType.group,
"location": CalRecordType.location,
"device": CalRecordType.resource,
}
# all possible generated parameters.
vCardPropToParamMap = {
# "PHOTO": {"ENCODING": ("B",), "TYPE": ("JPEG",), },
"ADR": {"TYPE": ("WORK", "PREF", "POSTAL", "PARCEL",),
"LABEL": None, "GEO": None, },
"LABEL": {"TYPE": ("POSTAL", "PARCEL",)},
# "TEL": {"TYPE": None, }, # None means param value can be anything
"EMAIL": {"TYPE": None, },
# "KEY": {"ENCODING": ("B",), "TYPE": ("PGPPUBILICKEY", "USERCERTIFICATE", "USERPKCS12DATA", "USERSMIMECERTIFICATE",)},
# "URL": {"TYPE": ("WEBLOG", "HOMEPAGE",)},
# "IMPP": {"TYPE": ("PREF",), "X-SERVICE-TYPE": None, },
# "X-ABRELATEDNAMES": {"TYPE": None, },
# "X-AIM": {"TYPE": ("PREF",), },
# "X-JABBER": {"TYPE": ("PREF",), },
# "X-MSN": {"TYPE": ("PREF",), },
# "X-ICQ": {"TYPE": ("PREF",), },
}
vCardConstantProperties = {
# ====================================================================
# 3.6 EXPLANATORY TYPES http://tools.ietf.org/html/rfc2426#section-3.6
# ====================================================================
# 3.6.3 PRODID
"PRODID": vCardProductID,
# 3.6.9 VERSION
"VERSION": "3.0",
}
@inlineCallbacks
def vCardFromRecord(record, forceKind=None, addProps=None, parentURI=None):
def isUniqueProperty(newProperty, ignoredParameters={}):
existingProperties = vcard.properties(newProperty.name())
for existingProperty in existingProperties:
if ignoredParameters:
existingProperty = existingProperty.duplicate()
for paramName, paramValues in ignoredParameters.iteritems():
for paramValue in paramValues:
existingProperty.removeParameterValue(paramName, paramValue)
if existingProperty == newProperty:
return False
return True
def addUniqueProperty(newProperty, ignoredParameters=None):
if isUniqueProperty(newProperty, ignoredParameters):
vcard.addProperty(newProperty)
else:
log.info(
"Ignoring property {prop!r} it is a duplicate",
prop=newProperty
)
# =======================================================================
# start
# =======================================================================
log.debug(
"vCardFromRecord: record={record}, forceKind={forceKind}, addProps={addProps}, parentURI={parentURI}",
record=record, forceKind=forceKind, addProps=addProps, parentURI=parentURI)
if forceKind is None:
kind = recordTypeToVCardKindMap.get(record.recordType, "individual")
else:
kind = forceKind
constantProperties = vCardConstantProperties.copy()
if addProps:
for key, value in addProps.iteritems():
if key not in constantProperties:
constantProperties[key] = value
# create vCard
vcard = Component("VCARD")
# add constant properties
for key, value in constantProperties.items():
vcard.addProperty(Property(key, value))
# ===========================================================================
# 2.1 Predefined Type Usage
# ===========================================================================
# 2.1.4 SOURCE Type http://tools.ietf.org/html/rfc2426#section-2.1.4
if parentURI:
uri = joinURL(parentURI, record.fields[FieldName.uid].encode("utf-8") + ".vcf")
# seems like this should be in some standard place.
if (config.EnableSSL or config.BehindTLSProxy) and config.SSLPort:
if config.SSLPort == 443:
source = "https://{server}{uri}".format(server=config.ServerHostName, uri=uri)
else:
source = "https://{server}:{port}{uri}".format(server=config.ServerHostName, port=config.SSLPort, uri=uri)
else:
if config.HTTPPort == 80:
source = "https://{server}{uri}".format(server=config.ServerHostName, uri=uri)
else:
source = "https://{server}:{port}{uri}".format(server=config.ServerHostName, port=config.HTTPPort, uri=uri)
vcard.addProperty(Property("SOURCE", source))
# =======================================================================
# 3.1 IDENTIFICATION TYPES http://tools.ietf.org/html/rfc2426#section-3.1
# =======================================================================
# 3.1.1 FN
vcard.addProperty(Property("FN", record.fields[FieldName.fullNames][0].encode("utf-8")))
# 3.1.2 N
# TODO: Better parsing
fullNameParts = record.fields[FieldName.fullNames][0].split()
first = fullNameParts[0] if len(fullNameParts) >= 2 else None
last = fullNameParts[len(fullNameParts) - 1]
middle = fullNameParts[1] if len(fullNameParts) == 3 else None
prefix = None
suffix = None
nameObject = N(
first=first.encode("utf-8") if first else None,
last=last.encode("utf-8") if last else None,
middle=middle.encode("utf-8") if middle else None,
prefix=prefix.encode("utf-8") if prefix else None,
suffix=suffix.encode("utf-8") if suffix else None,
)
vcard.addProperty(Property("N", nameObject))
# 3.1.3 NICKNAME
nickname = record.fields.get(CalFieldName.abbreviatedName)
if nickname:
vcard.addProperty(Property("NICKNAME", nickname.encode("utf-8")))
# UNIMPLEMENTED
# 3.1.4 PHOTO
# 3.1.5 BDAY
# ============================================================================
# 3.2 Delivery Addressing Types http://tools.ietf.org/html/rfc2426#section-3.2
# ============================================================================
# 3.2.1 ADR
#
# Experimental:
# Use vCard 4.0 ADR: http://tools.ietf.org/html/rfc6350#section-6.3.1
params = {}
geo = record.fields.get(CalFieldName.geographicLocation)
if geo:
params["GEO"] = geo.encode("utf-8")
label = record.fields.get(CalFieldName.streetAddress)
if label:
params["LABEL"] = label.encode("utf-8")
#
extended = record.fields.get(CalFieldName.floor)
# TODO: Parse?
street = record.fields.get(CalFieldName.streetAddress)
city = None
region = None
postalcode = None
country = None
if extended or street or city or region or postalcode or country or params:
params["TYPE"] = ["WORK", "PREF", "POSTAL", "PARCEL", ]
vcard.addProperty(
Property(
"ADR", Adr(
# pobox = box,
extended=extended.encode("utf-8") if extended else None,
street=street.encode("utf-8") if street else None,
locality=city.encode("utf-8") if city else None,
region=region.encode("utf-8") if region else None,
postalcode=postalcode.encode("utf-8") if postalcode else None,
country=country.encode("utf-8") if country else None,
),
params=params
)
)
# 3.2.2 LABEL
# label = record.fields.get(CalFieldName.streetAddress)
if label:
vcard.addProperty(Property("LABEL", label.encode("utf-8"), params={"TYPE": ["POSTAL", "PARCEL", ]}))
# ======================================================================================
# 3.3 TELECOMMUNICATIONS ADDRESSING TYPES http://tools.ietf.org/html/rfc2426#section-3.3
# ======================================================================================
#
# UNIMPLEMENTED
# 3.3.1 TEL
# 3.3.2 EMAIL
preferredWorkParams = {"TYPE": ["WORK", "PREF", "INTERNET", ], }
workParams = {"TYPE": ["WORK", "INTERNET", ], }
params = preferredWorkParams
for emailAddress in record.fields.get(FieldName.emailAddresses, ()):
addUniqueProperty(Property("EMAIL", emailAddress.encode("utf-8"), params=params), ignoredParameters={"TYPE": ["PREF", ]})
params = workParams
# UNIMPLEMENTED:
# 3.3.3 MAILER
#
# =====================================================================
# 3.4 GEOGRAPHICAL TYPES http://tools.ietf.org/html/rfc2426#section-3.4
# =====================================================================
#
# UNIMPLEMENTED:
# 3.4.1 TZ
#
# 3.4.2 GEO
geographicLocation = record.fields.get(CalFieldName.geographicLocation)
if geographicLocation:
vcard.addProperty(Property("GEO", geographicLocation.encode("utf-8")))
# =======================================================================
# 3.5 ORGANIZATIONAL TYPES http://tools.ietf.org/html/rfc2426#section-3.5
# =======================================================================
#
# UNIMPLEMENTED:
# 3.5.1 TITLE
# 3.5.2 ROLE
# 3.5.3 LOGO
# 3.5.4 AGENT
# 3.5.5 ORG
#
# ====================================================================
# 3.6 EXPLANATORY TYPES http://tools.ietf.org/html/rfc2426#section-3.6
# ====================================================================
#
# UNIMPLEMENTED:
# 3.6.1 CATEGORIES
# 3.6.2 NOTE
#
# ADDED WITH CONTSTANT PROPERTIES:
# 3.6.3 PRODID
#
# UNIMPLEMENTED:
# 3.6.5 SORT-STRING
# 3.6.6 SOUND
# 3.6.7 UID
vcard.addProperty(Property("UID", record.fields[FieldName.uid].encode("utf-8")))
# UNIMPLEMENTED:
# 3.6.8 URL
# ADDED WITH CONTSTANT PROPERTIES:
# 3.6.9 VERSION
# ===================================================================
# 3.7 SECURITY TYPES http://tools.ietf.org/html/rfc2426#section-3.7
# ===================================================================
# UNIMPLEMENTED:
# 3.7.1 CLASS
# 3.7.2 KEY
# ===================================================================
# X Properties
# ===================================================================
# UNIMPLEMENTED:
# X-<instant messaging type> such as:
# "AIM", "FACEBOOK", "GAGU-GAGU", "GOOGLE TALK", "ICQ", "JABBER", "MSN", "QQ", "SKYPE", "YAHOO",
# X-MAIDENNAME
# X-PHONETIC-FIRST-NAME
# X-PHONETIC-MIDDLE-NAME
# X-PHONETIC-LAST-NAME
# X-ABRELATEDNAMES
# X-ADDRESSBOOKSERVER-KIND
if kind == "group":
vcard.addProperty(Property("X-ADDRESSBOOKSERVER-KIND", kind))
# add members
# FIXME: members() is a deferred, so all of vCardFromRecord is deferred.
for memberRecord in (yield record.members()):
cua = memberRecord.canonicalCalendarUserAddress(False)
if cua:
vcard.addProperty(Property("X-ADDRESSBOOKSERVER-MEMBER", cua.encode("utf-8")))
# ===================================================================
# vCard 4.0 http://tools.ietf.org/html/rfc6350
# ===================================================================
# UNIMPLEMENTED:
# 6.4.3 IMPP http://tools.ietf.org/html/rfc6350#section-6.4.3
#
# 6.1.4 KIND http://tools.ietf.org/html/rfc6350#section-6.1.4
#
# see also: http://www.iana.org/assignments/vcard-elements/vcard-elements.xml
#
vcard.addProperty(Property("KIND", kind))
# one more X- related to kind
if kind == "org":
vcard.addProperty(Property("X-ABShowAs", "COMPANY"))
log.debug("vCardFromRecord: vcard=\n{vcard}", vcard=vcard)
returnValue(vcard)
| |
import tensorflow as tf
import numpy as np
import cv2
import sys
import os
import datetime
from scipy.ndimage.filters import maximum_filter
from dqn import DQNAgent
from memory import ExperienceMemory
from memory import MatchResults, GameHistory
from memory import Episode, EpisodeHistory
# TODO: Think where this should be located
def transformImage(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, image = cv2.threshold(image, 1, 255, cv2.THRESH_BINARY)
image = maximum_filter(image, size=(2, 4))
_, image = cv2.threshold(image, 1, 255, cv2.THRESH_BINARY)
image = cv2.resize(image, (80, 80), cv2.INTER_AREA)
_, image = cv2.threshold(image, 1, 255, cv2.THRESH_BINARY)
return np.reshape(image, (80, 80, 1)).astype(np.uint8)
class AgentTrainer(object):
def __init__(self, config):
# Create session to store trained parameters
self.session = tf.Session()
# Create and configure logging directories and file handles.
experiment_path = config["experiment_path"]
os.makedirs(experiment_path, exist_ok=True)
summary_dir = os.path.join(experiment_path, "summary")
os.makedirs(summary_dir, exist_ok=True)
self.summary_writer = tf.summary.FileWriter(summary_dir)
self.summary_output = open(os.path.join(experiment_path, "log.txt"), "a")
self.action_count = config["action_count"]
# Create agent for training
self.agent = DQNAgent(self.action_count)
self.step = tf.Variable(0, name="step")
self.increment_step = self.step.assign_add(1)
# Create memory to store observations
self.memory = ExperienceMemory(config["replay_memory_size"])
# Tools for saving and loading networks
self.saver = tf.train.Saver()
# Last action that agent performed
self.last_action_index = None
# Deque to keep track of average reward and play time
self.game_history = GameHistory(config["match_memory_size"])
# Deque to store losses
self.episode_history = EpisodeHistory(config["replay_memory_size"])
self.INITIAL_EPSILON = config["initial_epsilon"]
self.FINAL_EPSILON = config["final_epsilon"]
self.OBSERVE = config["observe_step_count"]
self.EXPLORE = config["explore_step_count"]
self.FRAME_PER_ACTION = config["frame_per_action"]
self.GAMMA = config["gamma"]
self.LOG_PERIOD = config["log_period"]
self.BATCH_SIZE = config["batch_size"]
def init_training(self):
# Initialize training parameters
self.session.run(tf.global_variables_initializer())
self.t = self.step.eval(self.session)
self.epsilon = self.compute_epsilon(self.t)
self.last_action_index = None
def compute_epsilon(self, t):
if t < self.OBSERVE:
return self.INITIAL_EPSILON
if t > self.EXPLORE:
return self.FINAL_EPSILON
alpha = t / self.EXPLORE
return self.INITIAL_EPSILON * (1 - alpha) + self.FINAL_EPSILON * alpha
def load_model(self, path):
checkpoint = tf.train.get_checkpoint_state(path)
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.session, checkpoint.model_checkpoint_path)
print("Successfully loaded: {}".format(checkpoint.model_checkpoint_path))
else:
print("Could not find old network weights")
def save_model(self, path):
if not os.path.exists(path):
os.makedirs(path)
self.saver.save(self.session, path + "/dqn", global_step=self.t)
def reset_state(self, initial_state):
# Get the first state by doing nothing and preprocess the image to 80x80x4
x_t = initial_state
x_t = transformImage(x_t)
self.s_t = np.concatenate((x_t, x_t, x_t, x_t), axis=2)
self.match_reward = 0
self.match_playtime = 0
self.gamma_pow = 1
def act(self):
# Choose an action epsilon greedily
action_index = 0
if self.t % self.FRAME_PER_ACTION == 0:
if np.random.random() <= self.epsilon:
action_index = np.random.randint(0, self.action_count)
else:
action_index = self.agent.act(self.session, self.s_t)
else:
action_index = self.last_action_index # do the same thing as before
self.last_action_index = action_index
return action_index
def process_frame(self, screen, reward, terminal):
if self.last_action_index is None:
self.reset_state(screen)
return
a_t = np.zeros([self.action_count])
a_t[self.last_action_index] = 1
# scale down epsilon
self.epsilon = self.compute_epsilon(self.t)
# run the selected action and observe next state and reward
x_t1, r_t = screen, reward
x_t1 = transformImage(x_t1)
s_t1 = np.append(x_t1, self.s_t[:, :, :3], axis=2)
# store the transition in memory
self.memory.add_experience((self.s_t, a_t, r_t, s_t1, terminal))
# only train if done observing
summaries = None
if self.t > self.OBSERVE:
summaries, loss = self.make_train_step()
self.episode_history.add_episode(Episode(loss))
# update the old values
self.s_t = s_t1
self.t = self.session.run(self.increment_step)
# print info
if self.t % self.LOG_PERIOD == 0:
now_string = datetime.datetime.now().strftime("%Y.%m.%d_%H:%M:%S")
message = "TIME {}, TIMESTEP {}, EPSILON {}, EPISODE_STATS {}, MATCH_STATS {}\n".format(
now_string,
self.t,
self.epsilon,
self.episode_history.get_average_stats(),
self.game_history.get_average_stats())
print(message)
sys.stdout.flush()
self.summary_output.write(message)
self.summary_output.flush()
if summaries is not None:
self.summary_writer.add_summary(summaries, self.t)
self.match_reward += r_t * self.gamma_pow
self.match_playtime += 1
self.gamma_pow *= self.GAMMA
if terminal:
self.game_history.add_match(MatchResults(
self.match_reward,
self.match_playtime,
reward))
episode_summary = tf.Summary()
episode_summary.value.add(simple_value=self.match_reward, node_name="match_reward", tag="match_reward")
episode_summary.value.add(simple_value=self.match_playtime, node_name="match_playtime", tag="match_playtime")
self.summary_writer.add_summary(episode_summary, self.t)
self.summary_writer.flush()
self.reset_state(screen)
def make_train_step(self):
# sample a minibatch to train on
minibatch = self.memory.sample(self.BATCH_SIZE)
# get the batch variables
s_j_batch = [d[0] for d in minibatch]
a_batch = [d[1] for d in minibatch]
r_batch = [d[2] for d in minibatch]
s_j1_batch = [d[3] for d in minibatch]
# get the batch variables
# s_j_batch, a_batch, r_batch, s_j1_batch, terminal_batch = zip(*minibatch)
action_scores_batch = np.array(self.agent.score_actions(self.session, s_j1_batch))
# r_future = GAMMA * (1 - np.array(terminal_batch)) * np.max(action_scores_batch, axis=1)
# y_batch = r_batch + r_future
y_batch = []
for i in range(0, len(minibatch)):
# if terminal only equals reward
if minibatch[i][4]:
y_batch.append(r_batch[i])
else:
y_batch.append(r_batch[i] + self.GAMMA * np.max(action_scores_batch[i]))
return self.agent.train(self.session, y_batch, a_batch, s_j_batch)
| |
## This script was modified from Dr. Bjarni J. Vilhjalmsson's code (https://bitbucket.org/bjarni_vilhjalmsson/ldpred). We thank him for sharing his code
try:
import scipy as sp
except Exception:
print 'Using Numpy instead of Scipy.'
import numpy as sp
#from numpy import linalg
from scipy import linalg
import pdb
import plinkio
from plinkio import plinkfile
import random
import time
import gzip
import itertools as it
from sklearn import metrics
import getopt
import sys
import traceback
import time
import os
import gzip
import itertools as it
import h5py
import scipy as sp
from scipy import stats
import cPickle
from sklearn import metrics
chromosomes_list = ['chrom_%d'%(x) for x in range(1,23)]
chromosomes_list.append('chrom_X')
def pred_accuracy(y_true, y_pred):
y_true = sp.copy(y_true)
if len(sp.unique(y_true))==2:
print 'dichotomous trait, calculating AUC'
y_min = y_true.min()
y_max = y_true.max()
if y_min!= 0 or y_max!=1:
y_true[y_true==y_min]=0
y_true[y_true==y_max]=1
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_pred)
auc = metrics.auc(fpr, tpr)
return auc
else:
print 'continuous trait, calculating COR'
cor = sp.corrcoef(y_true,y_pred)[0,1]
return cor
def get_LDpred_ld_tables(snps, ld_radius=100, ld_window_size=0):
"""
Calculates LD tables, and the LD score in one go...
"""
ld_dict = {}
m,n = snps.shape
print m,n
ld_scores = sp.ones(m)
ret_dict = {}
for snp_i, snp in enumerate(snps):
# Calculate D
start_i = max(0, snp_i - ld_radius)
stop_i = min(m, snp_i + ld_radius + 1)
X = snps[start_i: stop_i]
D_i = sp.dot(snp, X.T) / n
r2s = D_i ** 2
ld_dict[snp_i] = D_i
lds_i = sp.sum(r2s - (1-r2s) / (n-2),dtype='float32')
#lds_i = sp.sum(r2s - (1-r2s)*empirical_null_r2)
ld_scores[snp_i] =lds_i
ret_dict['ld_dict']=ld_dict
ret_dict['ld_scores']=ld_scores
if ld_window_size>0:
ref_ld_matrices = []
for i, wi in enumerate(range(0, m, ld_window_size)):
start_i = wi
stop_i = min(m, wi + ld_window_size)
curr_window_size = stop_i - start_i
X = snps[start_i: stop_i]
D = sp.dot(X, X.T) / n
ref_ld_matrices.append(D)
ret_dict['ref_ld_matrices']=ref_ld_matrices
return ret_dict
def annopred_inf(beta_hats, pr_sigi, n=1000, reference_ld_mats=None, ld_window_size=100):
"""
infinitesimal model with snp-specific heritability derived from annotation
used as the initial values for MCMC of non-infinitesimal model
"""
num_betas = len(beta_hats)
updated_betas = sp.empty(num_betas)
m = len(beta_hats)
for i, wi in enumerate(range(0, num_betas, ld_window_size)):
start_i = wi
stop_i = min(num_betas, wi + ld_window_size)
curr_window_size = stop_i - start_i
Li = 1.0/pr_sigi[start_i: stop_i]
D = reference_ld_mats[i]
A = (n/(1))*D + sp.diag(Li)
A_inv = linalg.pinv(A)
updated_betas[start_i: stop_i] = sp.dot(A_inv / (1.0/n) , beta_hats[start_i: stop_i]) # Adjust the beta_hats
return updated_betas
def annopred_genomewide(data_file=None, ld_radius = None, ld_dict=None, out_file_prefix=None, ps=None,
n=None, h2=None, num_iter=None, zero_jump_prob=0.05, burn_in=5, PRF=None):
"""
Calculate LDpred for a genome
"""
prf_chr = PRF['chrom']
prf_sids = PRF['sids']
prf_pi = PRF['pi']
prf_sigi2 = PRF['sigi2']
df = h5py.File(data_file,'r')
has_phenotypes=False
if 'y' in df.keys():
'Validation phenotypes found.'
y = df['y'][...] # Phenotype
num_individs = len(y)
has_phenotypes=True
risk_scores_pval_derived = sp.zeros(num_individs)
risk_scores_pval_derived_inf = sp.zeros(num_individs)
ld_scores_dict = ld_dict['ld_scores_dict']
chrom_ld_dict = ld_dict['chrom_ld_dict']
chrom_ref_ld_mats = ld_dict['chrom_ref_ld_mats']
print 'LD radius used: %d' % ld_radius
results_dict = {}
num_snps = 0
sum_beta2s = 0
cord_data_g = df['cord_data']
for chrom_str in chromosomes_list:
if chrom_str in cord_data_g.keys():
g = cord_data_g[chrom_str]
betas = g['betas'][...]
n_snps = len(betas)
num_snps += n_snps
sum_beta2s += sp.sum(betas ** 2)
L = ld_scores_dict['avg_gw_ld_score']
chi_square_lambda = sp.mean(n * sum_beta2s / float(num_snps))
# print 'Genome-wide lambda inflation:', chi_square_lambda,
print 'Genome-wide mean LD score:', L
gw_h2_ld_score_est = max(0.0001, (max(1, chi_square_lambda) - 1) / (n * (L / num_snps)))
print 'Estimated genome-wide heritability:', gw_h2_ld_score_est
#assert chi_square_lambda>1, 'Check the summary statistic file'
if h2 is None:
h2 = gw_h2_ld_score_est
print h2
h2_new = sp.sum(prf_sigi2)
sig_12 = (1.0)/n #######################
pr_sig = {}
pr_p = {}
annopred_inf_chrom_dict = {}
print 'Calculating initial values for MCMC using infinitesimal model'
for chrom_str in chromosomes_list:
if chrom_str in cord_data_g.keys():
print 'Calculating posterior betas for Chromosome %s'%((chrom_str.split('_'))[1])
g = cord_data_g[chrom_str]
#Filter monomorphic SNPs
snp_stds = g['snp_stds_ref'][...]
snp_stds = snp_stds.flatten()
ok_snps_filter = snp_stds>0
pval_derived_betas = g['betas'][...]
pval_derived_betas = pval_derived_betas[ok_snps_filter]
sids = g['sids'][...]
sids = sids[ok_snps_filter]
chri = int(chrom_str.split('_')[1])
prf_sids_chri = prf_sids[prf_chr==chri]
prf_pi_chri = prf_pi[prf_chr==chri]
prf_sigi2_chri = prf_sigi2[prf_chr==chri]
if len(prf_sids_chri)==len(sids):
if sum(prf_sids_chri==sids)==len(prf_sids_chri):
pr_p[chrom_str] = sp.copy(prf_pi_chri)
pr_sig[chrom_str] = sp.copy(prf_sigi2_chri)
else:
print 'Order of SNPs does not match, sorting prior files'
pr_p[chrom_str] = sp.zeros(len(sids))
pr_sig[chrom_str] = sp.zeros(len(sids))
for i, sid in enumerate(sids):
pr_p[chrom_str][i] = prf_pi_chri[prf_sids_chri==sid]
pr_sig[chrom_str][i] = prf_sigi2_chri[prf_sids_chri==sid]
else:
print 'More SNPs found in prior file, extracting SNPs from prior files'
pr_p[chrom_str] = sp.zeros(len(sids))
pr_sig[chrom_str] = sp.zeros(len(sids))
for i, sid in enumerate(sids):
pr_p[chrom_str][i] = prf_pi_chri[prf_sids_chri==sid]
pr_sig[chrom_str][i] = prf_sigi2_chri[prf_sids_chri==sid]
pr_sig[chrom_str] = h2*pr_sig[chrom_str]/h2_new
if h2 is not None:
h2_chrom = sp.sum(pr_sig[chrom_str])
else:
h2_chrom = gw_h2_ld_score_est * (n_snps / float(num_snps))
start_betas = annopred_inf(pval_derived_betas, pr_sigi=pr_sig[chrom_str], reference_ld_mats=chrom_ref_ld_mats[chrom_str], n=n, ld_window_size=2*ld_radius)
annopred_inf_chrom_dict[chrom_str]=start_betas
for p in ps:
print 'Starting AnnoPred with ', p
p_str = p
results_dict[p_str]={}
if out_file_prefix:
#Preparing output files
raw_effect_sizes = []
annopred_effect_sizes = []
annopred_inf_effect_sizes = []
out_sids = []
chromosomes = []
out_positions = []
out_nts = []
out = []
out_inf = []
out.append('The input prior p is '+str(prf_pi[0])+'\n')
out.append('Estimated Genome-wide heritability: '+str(gw_h2_ld_score_est)+'\n')
out.append('Posterior variance for each snp: '+str(sig_12)+'\n')
print 'Estimated Genome-wide heritability from Priors:', h2
print 'Posterior variance for each snp:', sig_12
for chrom_str in chromosomes_list:
if chrom_str in cord_data_g.keys():
g = cord_data_g[chrom_str]
if has_phenotypes:
if 'raw_snps_val' in g.keys():
raw_snps = g['raw_snps_val'][...]
else:
raw_snps = g['raw_snps_ref'][...]
#Filter monomorphic SNPs
snp_stds = g['snp_stds_ref'][...]
snp_stds = snp_stds.flatten()
ok_snps_filter = snp_stds>0
snp_stds = snp_stds[ok_snps_filter]
pval_derived_betas = g['betas'][...]
pval_derived_betas = pval_derived_betas[ok_snps_filter]
positions = g['positions'][...]
positions = positions[ok_snps_filter]
sids = g['sids'][...]
sids = sids[ok_snps_filter]
log_odds = g['log_odds'][...]
log_odds = log_odds[ok_snps_filter]
nts = g['nts'][...]
nts = nts[ok_snps_filter]
prf_pi_chri_sorted = pr_p[chrom_str]
prf_sigi2_chri_sorted = pr_sig[chrom_str]
if out_file_prefix:
chromosomes.extend([chrom_str]*len(pval_derived_betas))
out_positions.extend(positions)
out_sids.extend(sids)
raw_effect_sizes.extend(log_odds)
out_nts.extend(nts)
n_snps = len(pval_derived_betas)
if h2 is not None:
h2_chrom = sp.sum(prf_sigi2_chri_sorted)
#h2_chrom = h2 * (n_snps / float(num_snps))
else:
h2_chrom = gw_h2_ld_score_est * (n_snps / float(num_snps))
#print 'Prior parameters: p=%0.3f, n=%d, m=%d, h2_chrom=%0.4f' % (p, n, n_snps, h2_chrom)
res_dict = non_infinitesimal_mcmc(pval_derived_betas, Pi = prf_pi_chri_sorted, Sigi2=prf_sigi2_chri_sorted, sig_12=sig_12, h2=h2_chrom, n=n, ld_radius=ld_radius,
num_iter=num_iter, burn_in=burn_in, ld_dict=chrom_ld_dict[chrom_str],
start_betas=annopred_inf_chrom_dict[chrom_str], zero_jump_prob=zero_jump_prob)
updated_betas = res_dict['betas']
updated_inf_betas = res_dict['inf_betas']
sum_sqr_effects = sp.sum(updated_betas ** 2)
if sum_sqr_effects>gw_h2_ld_score_est:
print 'Sum of squared updated effects estimates seems too large:', sum_sqr_effects
print 'This suggests that the Gibbs sampler did not convergence.'
print 'Calculating scores for Chromosome %s'%((chrom_str.split('_'))[1])
updated_betas = updated_betas / (snp_stds.flatten())
updated_inf_betas = updated_inf_betas / (snp_stds.flatten())
annopred_effect_sizes.extend(updated_betas)
annopred_inf_effect_sizes.extend(updated_inf_betas)
#if has_phenotypes:
if has_phenotypes:
prs = sp.dot(updated_betas, raw_snps)
prs_inf = sp.dot(updated_inf_betas, raw_snps)
risk_scores_pval_derived += prs
risk_scores_pval_derived_inf += prs_inf
corr = sp.corrcoef(y, prs)[0, 1]
r2 = corr ** 2
corr_inf = sp.corrcoef(y, prs_inf)[0, 1]
r2_inf = corr_inf ** 2
# print 'The R2 prediction accuracy of PRS using %s was: %0.4f' %(chrom_str, r2)
# print 'The R2 prediction accuracy of PRS using %s was: %0.4f' %(chrom_str, r2_inf)
out.append('The R2 prediction accuracy of PRS using '+chrom_str+' was '+str(r2)+'\n')
out_inf.append('The R2 prediction accuracy of PRS using '+chrom_str+' was '+str(r2_inf)+'\n')
# print 'There were %d (SNP) effects' % num_snps
if has_phenotypes:
num_indivs = len(y)
results_dict[p_str]['y']=y
results_dict[p_str]['risk_scores_pd']=risk_scores_pval_derived
# print 'Prediction accuracy was assessed using %d individuals.'%(num_indivs)
out.append('Prediction accuracy was assessed using '+str(num_indivs)+' individuals\n')
corr = sp.corrcoef(y, risk_scores_pval_derived)[0, 1]
r2 = corr ** 2
results_dict[p_str]['r2_pd']=r2
# print 'The R2 prediction accuracy (observed scale) for the whole genome was: %0.4f (%0.6f)' % (r2, ((1-r2)**2)/num_indivs)
out.append('The R2 prediction accuracy (observed scale) for the whole genome was: '+str(r2)+' ('+str(((1-r2)**2)/num_indivs)+')\n')
corr_inf = sp.corrcoef(y, risk_scores_pval_derived_inf)[0, 1]
r2_inf = corr_inf ** 2
results_dict[p_str]['r2_pd']=r2_inf
# print 'The R2 prediction accuracy (observed scale) for the whole genome was: %0.4f (%0.6f)' % (r2_inf, ((1-r2_inf)**2)/num_indivs)
out_inf.append('The R2 prediction accuracy (observed scale) for the whole genome was: '+str(r2_inf)+' ('+str(((1-r2_inf)**2)/num_indivs)+')\n')
if corr<0:
risk_scores_pval_derived = -1* risk_scores_pval_derived
auc = pred_accuracy(y,risk_scores_pval_derived)
print 'AnnoPred AUC/COR for the whole genome was: %0.4f'%auc
out.append('AUC/COR for the whole genome was: '+str(auc)+'\n')
if corr_inf<0:
risk_scores_pval_derived_inf = -1* risk_scores_pval_derived_inf
auc_inf = pred_accuracy(y,risk_scores_pval_derived_inf)
print 'AnnoPred-inf AUC/COR for the whole genome was: %0.4f'%auc_inf
out_inf.append('AUC/COR for the whole genome was: '+str(auc_inf)+'\n')
sp.savetxt('%s_y_'%(out_file_prefix)+str(p)+'.txt',y)
#sp.savetxt('%s_prs_'%(out_file_prefix)+str(p)+'.txt',risk_scores_pval_derived)
#sp.savetxt('%s_prs-inf'%(out_file_prefix)+str(p)+'.txt',risk_scores_pval_derived_inf)
#Now calibration
denominator = sp.dot(risk_scores_pval_derived.T, risk_scores_pval_derived)
y_norm = (y-sp.mean(y))/sp.std(y)
numerator = sp.dot(risk_scores_pval_derived.T, y_norm)
regression_slope = (numerator / denominator)#[0][0]
# print 'The slope for predictions with P-value derived effects is:',regression_slope
out.append('The slope for predictions with P-value derived effects is: '+str(regression_slope)+'\n')
results_dict[p_str]['slope_pd']=regression_slope
ff = open('%s_non_inf_auc_'%(out_file_prefix)+str(p)+'.txt',"w")
ff.writelines(out)
ff.close()
ff_inf = open('%s_inf_auc_'%(out_file_prefix)+str(p)+'.txt',"w")
ff_inf.writelines(out_inf)
ff_inf.close()
sp.savetxt('%s_prs_'%(out_file_prefix)+str(p)+'.txt',risk_scores_pval_derived)
sp.savetxt('%s_prs-inf'%(out_file_prefix)+str(p)+'.txt',risk_scores_pval_derived_inf)
weights_out_file = '%s_non_inf_betas_'%(out_file_prefix)+str(p)+'.txt' ###################################
with open(weights_out_file,'w') as f:
f.write('chrom pos sid nt1 nt2 raw_beta AnnoPred_beta\n')
for chrom, pos, sid, nt, raw_beta, annopred_beta in it.izip(chromosomes, out_positions, out_sids, out_nts, raw_effect_sizes, annopred_effect_sizes):
nt1,nt2 = nt[0],nt[1]
f.write('%s %d %s %s %s %0.4e %0.4e\n'%(chrom, pos, sid, nt1, nt2, raw_beta, annopred_beta))
weights_out_file = '%s_inf_betas_'%(out_file_prefix)+str(p)+'.txt'
with open(weights_out_file,'w') as f:
f.write('chrom pos sid nt1 nt2 raw_beta AnnoPred_inf_beta \n')
for chrom, pos, sid, nt, raw_beta, annopred_inf_beta in it.izip(chromosomes, out_positions, out_sids, out_nts, raw_effect_sizes, annopred_inf_effect_sizes):
nt1,nt2 = nt[0],nt[1]
f.write('%s %d %s %s %s %0.4e %0.4e\n'%(chrom, pos, sid, nt1, nt2, raw_beta, annopred_inf_beta))
def non_infinitesimal_mcmc(beta_hats, Pi, Sigi2, sig_12, start_betas=None, h2=None, n=1000, ld_radius=100, num_iter=60, burn_in=10, zero_jump_prob=0.05, ld_dict=None):
"""
MCMC of non-infinitesimal model
"""
m = len(beta_hats)
curr_betas = sp.copy(start_betas)
curr_post_means = sp.zeros(m)
avg_betas = sp.zeros(m)
# Iterating over effect estimates in sequential order
iter_order = sp.arange(m)
for k in range(num_iter): #Big iteration
#Force an alpha shrink if estimates are way off compared to heritability estimates. (Improves MCMC convergence.)
h2_est = max(0.00001,sp.sum(curr_betas ** 2))
alpha = min(1-zero_jump_prob, 1.0 / h2_est, (h2 + 1 / sp.sqrt(n)) / h2_est)
rand_ps = sp.random.random(m)
for i, snp_i in enumerate(iter_order):
if Sigi2[snp_i]==0:
curr_post_means[snp_i] = 0
curr_betas[snp_i] = 0
else:
hdmp = (Sigi2[snp_i]/Pi[snp_i])#(h2 / Mp)
hdmpn = hdmp + sig_12#1.0 / n
hdmp_hdmpn = (hdmp / hdmpn)
c_const = (Pi[snp_i] / sp.sqrt(hdmpn))
d_const = (1 - Pi[snp_i]) / (sp.sqrt(sig_12))
start_i = max(0, snp_i - ld_radius)
focal_i = min(ld_radius, snp_i)
stop_i = min(m, snp_i + ld_radius + 1)
#Local LD matrix
D_i = ld_dict[snp_i]
#Local (most recently updated) effect estimates
local_betas = curr_betas[start_i: stop_i]
#Calculate the local posterior mean, used when sampling.
local_betas[focal_i] = 0
res_beta_hat_i = beta_hats[snp_i] - sp.dot(D_i , local_betas)
b2 = res_beta_hat_i ** 2
d_const_b2_exp = d_const * sp.exp(-b2 / (2.0*sig_12))
if sp.isreal(d_const_b2_exp):
numerator = c_const * sp.exp(-b2 / (2.0 * hdmpn))
if sp.isreal(numerator):
if numerator == 0:
postp = 0
else:
postp = numerator / (numerator + d_const_b2_exp)
assert sp.isreal(postp), 'Posterior mean is not a real number?'
else:
postp = 0
else:
postp = 1
curr_post_means[snp_i] = hdmp_hdmpn * postp * res_beta_hat_i
if rand_ps[i] < postp * alpha:
#Sample from the posterior Gaussian dist.
proposed_beta = stats.norm.rvs(0, (hdmp_hdmpn) * sig_12, size=1) + hdmp_hdmpn * res_beta_hat_i
else:
#Sample 0
proposed_beta = 0
curr_betas[snp_i] = proposed_beta #UPDATE BETA
if k >= burn_in:
avg_betas += curr_post_means #Averaging over the posterior means instead of samples.
avg_betas = avg_betas/float(num_iter-burn_in)
return {'betas':avg_betas, 'inf_betas':start_betas}
"""
p_dict = {'coord':None, 'ld_radius':None, 'local_ld_file_prefix':None, 'hfile':None, 'pfile':None, 'PS':None, 'out':None,
'N':None, 'num_iter': 60, 'H2':None, 'user_h2':None}
"""
def main(p_dict):
local_ld_dict_file = '%s_ldradius%d.pickled.gz'%(p_dict['local_ld_file_prefix'], p_dict['ld_radius'])
p_dict['PS'] = [p_dict['PS']]
if not os.path.isfile(local_ld_dict_file):
df = h5py.File(p_dict['coord'])
chrom_ld_scores_dict = {}
chrom_ld_dict = {}
chrom_ref_ld_mats = {}
ld_score_sum = 0
num_snps = 0
print 'Calculating LD information w. radius %d'% p_dict['ld_radius']
cord_data_g = df['cord_data']
for chrom_str in cord_data_g.keys():
print 'Working on %s'%chrom_str
g = cord_data_g[chrom_str]
if 'raw_snps_ref' in g.keys():
raw_snps = g['raw_snps_ref'][...]
snp_stds = g['snp_stds_ref'][...]
snp_means = g['snp_means_ref'][...]
#Filter monomorphic SNPs
ok_snps_filter = snp_stds>0
ok_snps_filter = ok_snps_filter.flatten()
raw_snps = raw_snps[ok_snps_filter]
snp_means = snp_means[ok_snps_filter]
snp_stds = snp_stds[ok_snps_filter]
n_snps = len(raw_snps)
snp_means.shape = (n_snps,1)
snp_stds.shape = (n_snps,1)
# Normalize SNPs..
snps = sp.array((raw_snps - snp_means)/snp_stds,dtype='float32')
assert snps.shape==raw_snps.shape, 'Array Shape mismatch'
ret_dict = get_LDpred_ld_tables(snps, ld_radius=p_dict['ld_radius'], ld_window_size=2*p_dict['ld_radius'])
chrom_ld_dict[chrom_str] = ret_dict['ld_dict']
chrom_ref_ld_mats[chrom_str] = ret_dict['ref_ld_matrices']
ld_scores = ret_dict['ld_scores']
chrom_ld_scores_dict[chrom_str] = {'ld_scores':ld_scores, 'avg_ld_score':sp.mean(ld_scores)}
ld_score_sum += sp.sum(ld_scores)
num_snps += n_snps
avg_gw_ld_score = ld_score_sum / float(num_snps)
ld_scores_dict = {'avg_gw_ld_score': avg_gw_ld_score, 'chrom_dict':chrom_ld_scores_dict}
print 'Done calculating the LD table and LD score, writing to file:', local_ld_dict_file
print 'Genome-wide average LD score was:', ld_scores_dict['avg_gw_ld_score']
ld_dict = {'ld_scores_dict':ld_scores_dict, 'chrom_ld_dict':chrom_ld_dict, 'chrom_ref_ld_mats':chrom_ref_ld_mats}
f = gzip.open(local_ld_dict_file, 'wb')
cPickle.dump(ld_dict, f, protocol=2)
f.close()
print 'LD information is now pickled.'
else:
print 'Loading LD information from file: %s'%local_ld_dict_file
f = gzip.open(local_ld_dict_file, 'r')
ld_dict = cPickle.load(f)
f.close()
if p_dict['user_h2'] is not None:
print 'Starting calculation using user provided h2 files as priors'
print 'Loading prior information from file: %s'%p_dict['user_h2']
with open(p_dict['user_h2']) as f:
data = f.readlines()
prf_chr = sp.empty(len(data),dtype='int8')
prf_sids = []
prf_pi = sp.zeros(len(data))
prf_sigi2 = sp.zeros(len(data))
for i,line in enumerate(data):
li = line.split()
prf_chr[i] = int(li[0])
prf_sids.append(li[1])
prf_pi[i] = p_dict['PS'][0]
prf_sigi2[i] = float(li[2])
print 'The input prior p is: ', p_dict['PS']
prf_sids = sp.array(prf_sids,dtype='str')
prf = {}
prf['chrom'] = prf_chr
prf['sids'] = prf_sids
prf['pi'] = prf_pi
prf['sigi2'] = prf_sigi2
out_user_h2_prefix = p_dict['out']+'_user_h2'
annopred_genomewide(data_file=p_dict['coord'], out_file_prefix=out_user_h2_prefix, ps=p_dict['PS'], ld_radius=p_dict['ld_radius'],
ld_dict = ld_dict, n=p_dict['N'], num_iter=p_dict['num_iter'], h2=p_dict['H2'], PRF = prf)
else:
#### no user-provided heritability files provided ####
##################### using hfile as prior #######################
print 'Starting calculation using h2 files as priors'
print 'Loading prior information from file: %s'%p_dict['hfile']
with open(p_dict['hfile']) as f:
data = f.readlines()
prf_chr = sp.empty(len(data),dtype='int8')
prf_sids = []
prf_pi = sp.zeros(len(data))
prf_sigi2 = sp.zeros(len(data))
for i,line in enumerate(data):
li = line.split()
prf_chr[i] = int(li[0])
prf_sids.append(li[1])
prf_pi[i] = p_dict['PS'][0]
prf_sigi2[i] = float(li[2])
print 'The input prior p is: ', p_dict['PS']
prf_sids = sp.array(prf_sids,dtype='str')
prf = {}
prf['chrom'] = prf_chr
prf['sids'] = prf_sids
prf['pi'] = prf_pi
prf['sigi2'] = prf_sigi2
out_h2_prefix = p_dict['out']+'_h2'
#H2 = sp.sum(prf_sigi2)
annopred_genomewide(data_file=p_dict['coord'], out_file_prefix=out_h2_prefix, ps=p_dict['PS'], ld_radius=p_dict['ld_radius'],
ld_dict = ld_dict, n=p_dict['N'], num_iter=p_dict['num_iter'], PRF = prf)
##################### using pfile as prior #######################
if p_dict['pfile'] is not None:
print 'Starting calculation using p_T files as priors'
print 'Loading prior information from file: %s'%p_dict['pfile']
with open(p_dict['pfile']) as f:
data = f.readlines()
prf_chr = sp.empty(len(data),dtype='int8')
prf_sids = []
prf_pi = sp.zeros(len(data))
prf_sigi2 = sp.zeros(len(data))
for i,line in enumerate(data):
li = line.split()
prf_chr[i] = int(li[0])
prf_sids.append(li[1])
prf_pi[i] = float(li[2])
prf_sigi2[i] = float(li[3])
print 'The input prior p is: ', p_dict['PS']
prf_sids = sp.array(prf_sids,dtype='str')
prf = {}
prf['chrom'] = prf_chr
prf['sids'] = prf_sids
prf['pi'] = prf_pi
prf['sigi2'] = prf_sigi2
#H2 = sp.sum(prf_sigi2)
out_pT_prefix = p_dict['out']+'_pT'
annopred_genomewide(data_file=p_dict['coord'], out_file_prefix=out_pT_prefix, ps=p_dict['PS'], ld_radius=p_dict['ld_radius'],
ld_dict = ld_dict, n=p_dict['N'], num_iter=p_dict['num_iter'], PRF = prf)
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import datetime
import logging
import os
import re
import time
from lib.bucket import BUCKET_ID
from lib.exceptions import EmptyDumpException, InvalidDumpException
from lib.exceptions import ObsoleteDumpVersionException, ParsingException
from lib.pageframe import PageFrame
from lib.range_dict import ExclusiveRangeDict
from lib.symbol import procfs
LOGGER = logging.getLogger('dmprof')
# Heap Profile Dump versions
# DUMP_DEEP_[1-4] are obsolete.
# DUMP_DEEP_2+ distinct mmap regions and malloc chunks.
# DUMP_DEEP_3+ don't include allocation functions in their stack dumps.
# DUMP_DEEP_4+ support comments with '#' and global stats "nonprofiled-*".
# DUMP_DEEP_[1-2] should be processed by POLICY_DEEP_1.
# DUMP_DEEP_[3-4] should be processed by POLICY_DEEP_2 or POLICY_DEEP_3.
DUMP_DEEP_1 = 'DUMP_DEEP_1'
DUMP_DEEP_2 = 'DUMP_DEEP_2'
DUMP_DEEP_3 = 'DUMP_DEEP_3'
DUMP_DEEP_4 = 'DUMP_DEEP_4'
DUMP_DEEP_OBSOLETE = (DUMP_DEEP_1, DUMP_DEEP_2, DUMP_DEEP_3, DUMP_DEEP_4)
# DUMP_DEEP_5 doesn't separate sections for malloc and mmap.
# malloc and mmap are identified in bucket files.
# DUMP_DEEP_5 should be processed by POLICY_DEEP_4.
DUMP_DEEP_5 = 'DUMP_DEEP_5'
# DUMP_DEEP_6 adds a mmap list to DUMP_DEEP_5.
DUMP_DEEP_6 = 'DUMP_DEEP_6'
class Dump(object):
"""Represents a heap profile dump."""
_PATH_PATTERN = re.compile(r'^(.*)\.([0-9]+)\.([0-9]+)\.heap$')
_HOOK_PATTERN = re.compile(
r'^ ([ \(])([a-f0-9]+)([ \)])-([ \(])([a-f0-9]+)([ \)])\s+'
r'(hooked|unhooked)\s+(.+)$', re.IGNORECASE)
_HOOKED_PATTERN = re.compile(r'(?P<TYPE>.+ )?(?P<COMMITTED>[0-9]+) / '
'(?P<RESERVED>[0-9]+) @ (?P<BUCKETID>[0-9]+)')
_UNHOOKED_PATTERN = re.compile(r'(?P<TYPE>.+ )?(?P<COMMITTED>[0-9]+) / '
'(?P<RESERVED>[0-9]+)')
_OLD_HOOKED_PATTERN = re.compile(r'(?P<TYPE>.+) @ (?P<BUCKETID>[0-9]+)')
_OLD_UNHOOKED_PATTERN = re.compile(r'(?P<TYPE>.+) (?P<COMMITTED>[0-9]+)')
_TIME_PATTERN_FORMAT = re.compile(
r'^Time: ([0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9]+:[0-9]+)(\.[0-9]+)?')
_TIME_PATTERN_SECONDS = re.compile(r'^Time: ([0-9]+)$')
def __init__(self, path, modified_time):
self._path = path
matched = self._PATH_PATTERN.match(path)
self._pid = int(matched.group(2))
self._count = int(matched.group(3))
self._time = modified_time
self._map = {}
self._procmaps = ExclusiveRangeDict(ProcMapsEntryAttribute)
self._stacktrace_lines = []
self._global_stats = {} # used only in apply_policy
self._run_id = ''
self._pagesize = 4096
self._pageframe_length = 0
self._pageframe_encoding = ''
self._has_pagecount = False
self._version = ''
self._lines = []
@property
def path(self):
return self._path
@property
def count(self):
return self._count
@property
def time(self):
return self._time
@property
def iter_map(self):
for region in sorted(self._map.iteritems()):
yield region[0], region[1]
def iter_procmaps(self):
for begin, end, attr in self._map.iter_range():
yield begin, end, attr
@property
def iter_stacktrace(self):
for line in self._stacktrace_lines:
yield line
def global_stat(self, name):
return self._global_stats[name]
@property
def run_id(self):
return self._run_id
@property
def pagesize(self):
return self._pagesize
@property
def pageframe_length(self):
return self._pageframe_length
@property
def pageframe_encoding(self):
return self._pageframe_encoding
@property
def has_pagecount(self):
return self._has_pagecount
@staticmethod
def load(path, log_header='Loading a heap profile dump: '):
"""Loads a heap profile dump.
Args:
path: A file path string to load.
log_header: A preceding string for log messages.
Returns:
A loaded Dump object.
Raises:
ParsingException for invalid heap profile dumps.
"""
dump = Dump(path, os.stat(path).st_mtime)
with open(path, 'r') as f:
dump.load_file(f, log_header)
return dump
def load_file(self, f, log_header):
self._lines = [line for line in f
if line and not line.startswith('#')]
try:
self._version, ln = self._parse_version()
self._parse_meta_information()
if self._version == DUMP_DEEP_6:
self._parse_mmap_list()
self._parse_global_stats()
self._extract_stacktrace_lines(ln)
except EmptyDumpException:
LOGGER.info('%s%s ...ignored an empty dump.' % (log_header, self._path))
except ParsingException, e:
LOGGER.error('%s%s ...error %s' % (log_header, self._path, e))
raise
else:
LOGGER.info('%s%s (version:%s)' % (log_header, self._path, self._version))
def _parse_version(self):
"""Parses a version string in self._lines.
Returns:
A pair of (a string representing a version of the stacktrace dump,
and an integer indicating a line number next to the version string).
Raises:
ParsingException for invalid dump versions.
"""
version = ''
# Skip until an identifiable line.
headers = ('STACKTRACES:\n', 'MMAP_STACKTRACES:\n', 'heap profile: ')
if not self._lines:
raise EmptyDumpException('Empty heap dump file.')
(ln, found) = skip_while(
0, len(self._lines),
lambda n: not self._lines[n].startswith(headers))
if not found:
raise InvalidDumpException('No version header.')
# Identify a version.
if self._lines[ln].startswith('heap profile: '):
version = self._lines[ln][13:].strip()
if version in (DUMP_DEEP_5, DUMP_DEEP_6):
(ln, _) = skip_while(
ln, len(self._lines),
lambda n: self._lines[n] != 'STACKTRACES:\n')
elif version in DUMP_DEEP_OBSOLETE:
raise ObsoleteDumpVersionException(version)
else:
raise InvalidDumpException('Invalid version: %s' % version)
elif self._lines[ln] == 'STACKTRACES:\n':
raise ObsoleteDumpVersionException(DUMP_DEEP_1)
elif self._lines[ln] == 'MMAP_STACKTRACES:\n':
raise ObsoleteDumpVersionException(DUMP_DEEP_2)
return (version, ln)
def _parse_global_stats(self):
"""Parses lines in self._lines as global stats."""
(ln, _) = skip_while(
0, len(self._lines),
lambda n: self._lines[n] != 'GLOBAL_STATS:\n')
global_stat_names = [
'total', 'absent', 'file-exec', 'file-nonexec', 'anonymous', 'stack',
'other', 'nonprofiled-absent', 'nonprofiled-anonymous',
'nonprofiled-file-exec', 'nonprofiled-file-nonexec',
'nonprofiled-stack', 'nonprofiled-other',
'profiled-mmap', 'profiled-malloc']
for prefix in global_stat_names:
(ln, _) = skip_while(
ln, len(self._lines),
lambda n: self._lines[n].split()[0] != prefix)
words = self._lines[ln].split()
self._global_stats[prefix + '_virtual'] = int(words[-2])
self._global_stats[prefix + '_committed'] = int(words[-1])
def _parse_meta_information(self):
"""Parses lines in self._lines for meta information."""
(ln, found) = skip_while(
0, len(self._lines),
lambda n: self._lines[n] != 'META:\n')
if not found:
return
ln += 1
while True:
if self._lines[ln].startswith('Time:'):
matched_seconds = self._TIME_PATTERN_SECONDS.match(self._lines[ln])
matched_format = self._TIME_PATTERN_FORMAT.match(self._lines[ln])
if matched_format:
self._time = time.mktime(datetime.datetime.strptime(
matched_format.group(1), '%Y/%m/%d %H:%M:%S').timetuple())
if matched_format.group(2):
self._time += float(matched_format.group(2)[1:]) / 1000.0
elif matched_seconds:
self._time = float(matched_seconds.group(1))
elif self._lines[ln].startswith('Reason:'):
pass # Nothing to do for 'Reason:'
elif self._lines[ln].startswith('PageSize: '):
self._pagesize = int(self._lines[ln][10:])
elif self._lines[ln].startswith('CommandLine:'):
pass
elif (self._lines[ln].startswith('PageFrame: ') or
self._lines[ln].startswith('PFN: ')):
if self._lines[ln].startswith('PageFrame: '):
words = self._lines[ln][11:].split(',')
else:
words = self._lines[ln][5:].split(',')
for word in words:
if word == '24':
self._pageframe_length = 24
elif word == 'Base64':
self._pageframe_encoding = 'base64'
elif word == 'PageCount':
self._has_pagecount = True
elif self._lines[ln].startswith('RunID: '):
self._run_id = self._lines[ln][7:].strip()
elif (self._lines[ln].startswith('MMAP_LIST:') or
self._lines[ln].startswith('GLOBAL_STATS:')):
# Skip until "MMAP_LIST:" or "GLOBAL_STATS" is found.
break
else:
pass
ln += 1
def _parse_mmap_list(self):
"""Parses lines in self._lines as a mmap list."""
(ln, found) = skip_while(
0, len(self._lines),
lambda n: self._lines[n] != 'MMAP_LIST:\n')
if not found:
return {}
ln += 1
self._map = {}
current_vma = {}
pageframe_list = []
while True:
entry = procfs.ProcMaps.parse_line(self._lines[ln])
if entry:
current_vma = {}
for _, _, attr in self._procmaps.iter_range(entry.begin, entry.end):
for key, value in entry.as_dict().iteritems():
attr[key] = value
current_vma[key] = value
ln += 1
continue
if self._lines[ln].startswith(' PF: '):
for pageframe in self._lines[ln][5:].split():
pageframe_list.append(PageFrame.parse(pageframe, self._pagesize))
ln += 1
continue
matched = self._HOOK_PATTERN.match(self._lines[ln])
if not matched:
break
# 2: starting address
# 5: end address
# 7: hooked or unhooked
# 8: additional information
if matched.group(7) == 'hooked':
submatched = self._HOOKED_PATTERN.match(matched.group(8))
if not submatched:
submatched = self._OLD_HOOKED_PATTERN.match(matched.group(8))
elif matched.group(7) == 'unhooked':
submatched = self._UNHOOKED_PATTERN.match(matched.group(8))
if not submatched:
submatched = self._OLD_UNHOOKED_PATTERN.match(matched.group(8))
else:
assert matched.group(7) in ['hooked', 'unhooked']
submatched_dict = submatched.groupdict()
region_info = { 'vma': current_vma }
if submatched_dict.get('TYPE'):
region_info['type'] = submatched_dict['TYPE'].strip()
if submatched_dict.get('COMMITTED'):
region_info['committed'] = int(submatched_dict['COMMITTED'])
if submatched_dict.get('RESERVED'):
region_info['reserved'] = int(submatched_dict['RESERVED'])
if submatched_dict.get('BUCKETID'):
region_info['bucket_id'] = int(submatched_dict['BUCKETID'])
if matched.group(1) == '(':
start = current_vma['begin']
else:
start = int(matched.group(2), 16)
if matched.group(4) == '(':
end = current_vma['end']
else:
end = int(matched.group(5), 16)
if pageframe_list and pageframe_list[0].start_truncated:
pageframe_list[0].set_size(
pageframe_list[0].size - start % self._pagesize)
if pageframe_list and pageframe_list[-1].end_truncated:
pageframe_list[-1].set_size(
pageframe_list[-1].size - (self._pagesize - end % self._pagesize))
region_info['pageframe'] = pageframe_list
pageframe_list = []
self._map[(start, end)] = (matched.group(7), region_info)
ln += 1
def _extract_stacktrace_lines(self, line_number):
"""Extracts the position of stacktrace lines.
Valid stacktrace lines are stored into self._stacktrace_lines.
Args:
line_number: A line number to start parsing in lines.
Raises:
ParsingException for invalid dump versions.
"""
if self._version in (DUMP_DEEP_5, DUMP_DEEP_6):
(line_number, _) = skip_while(
line_number, len(self._lines),
lambda n: not self._lines[n].split()[0].isdigit())
stacktrace_start = line_number
(line_number, _) = skip_while(
line_number, len(self._lines),
lambda n: self._check_stacktrace_line(self._lines[n]))
self._stacktrace_lines = self._lines[stacktrace_start:line_number]
elif self._version in DUMP_DEEP_OBSOLETE:
raise ObsoleteDumpVersionException(self._version)
else:
raise InvalidDumpException('Invalid version: %s' % self._version)
@staticmethod
def _check_stacktrace_line(stacktrace_line):
"""Checks if a given stacktrace_line is valid as stacktrace.
Args:
stacktrace_line: A string to be checked.
Returns:
True if the given stacktrace_line is valid.
"""
words = stacktrace_line.split()
if len(words) < BUCKET_ID + 1:
return False
if words[BUCKET_ID - 1] != '@':
return False
return True
class DumpList(object):
"""Represents a sequence of heap profile dumps."""
def __init__(self, dump_list):
self._dump_list = dump_list
@staticmethod
def load(path_list):
LOGGER.info('Loading heap dump profiles.')
dump_list = []
for path in path_list:
dump_list.append(Dump.load(path, ' '))
return DumpList(dump_list)
def __len__(self):
return len(self._dump_list)
def __iter__(self):
for dump in self._dump_list:
yield dump
def __getitem__(self, index):
return self._dump_list[index]
class ProcMapsEntryAttribute(ExclusiveRangeDict.RangeAttribute):
"""Represents an entry of /proc/maps in range_dict.ExclusiveRangeDict."""
_DUMMY_ENTRY = procfs.ProcMapsEntry(
0, # begin
0, # end
'-', # readable
'-', # writable
'-', # executable
'-', # private
0, # offset
'00', # major
'00', # minor
0, # inode
'' # name
)
def __init__(self):
super(ProcMapsEntryAttribute, self).__init__()
self._entry = self._DUMMY_ENTRY.as_dict()
def __str__(self):
return str(self._entry)
def __repr__(self):
return 'ProcMapsEntryAttribute' + str(self._entry)
def __getitem__(self, key):
return self._entry[key]
def __setitem__(self, key, value):
if key not in self._entry:
raise KeyError(key)
self._entry[key] = value
def copy(self):
new_entry = ProcMapsEntryAttribute()
for key, value in self._entry.iteritems():
new_entry[key] = copy.deepcopy(value)
return new_entry
def skip_while(index, max_index, skipping_condition):
"""Increments |index| until |skipping_condition|(|index|) is False.
Returns:
A pair of an integer indicating a line number after skipped, and a
boolean value which is True if found a line which skipping_condition
is False for.
"""
while skipping_condition(index):
index += 1
if index >= max_index:
return index, False
return index, True
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Partially based on AboutMessagePassing in the Ruby Koans
#
from runner.koan import *
class AboutAttributeAccess(Koan):
class TypicalObject(object):
pass
def test_calling_undefined_functions_normally_results_in_errors(self):
typical = self.TypicalObject()
try:
typical.foobar()
except Exception as exception:
self.assertEqual('AttributeError', exception.__class__.__name__)
self.assertMatch("'TypicalObject' object has no attribute 'foobar'", exception[0])
def test_calling_getattribute_causes_an_attribute_error(self):
typical = self.TypicalObject()
try:
typical.__getattribute__('foobar')
except AttributeError as exception:
self.assertMatch("'TypicalObject' object has no attribute 'foobar'", exception[0])
# THINK ABOUT IT:
#
# If the method __getattribute__() causes the AttributeError, then
# what would happen if we redefine __getattribute__()?
# ------------------------------------------------------------------
class CatchAllAttributeReads(object):
def __getattribute__(self, attr_name):
return "Someone called '" + attr_name + \
"' and it could not be found"
def test_all_attribute_reads_are_caught(self):
catcher = self.CatchAllAttributeReads()
self.assertMatch("Someone called 'foobar' and it could not be found", catcher.foobar)
def test_intercepting_return_values_can_disrupt_the_call_chain(self):
catcher = self.CatchAllAttributeReads()
self.assertMatch("Someone called 'foobaz' and it could not be found", catcher.foobaz) # This is fine
try:
catcher.foobaz(1)
except TypeError as ex:
self.assertMatch("'str' object is not callable", ex[0])
# foobaz returns a string. What happens to the '(1)' part?
# Try entering this into a python console to reproduce the issue:
#
# "foobaz"(1)
#
def test_changing_getattribute_will_affect__the_getattr_function(self):
catcher = self.CatchAllAttributeReads()
self.assertMatch("Someone called 'any_attribute' and it could not be found", getattr(catcher, 'any_attribute'))
# ------------------------------------------------------------------
class WellBehavedFooCatcher(object):
def __getattribute__(self, attr_name):
if attr_name[:3] == "foo":
return "Foo to you too"
else:
return \
super(AboutAttributeAccess.WellBehavedFooCatcher, self). \
__getattribute__(attr_name)
def test_foo_attributes_are_caught(self):
catcher = self.WellBehavedFooCatcher()
self.assertEqual("Foo to you too", catcher.foo_bar)
self.assertEqual("Foo to you too", catcher.foo_baz)
def test_non_foo_messages_are_treated_normally(self):
catcher = self.WellBehavedFooCatcher()
try:
catcher.normal_undefined_attribute
except AttributeError as ex:
self.assertMatch("'WellBehavedFooCatcher' object has no attribute 'normal_undefined_attribute'", ex[0])
# ------------------------------------------------------------------
global stack_depth
stack_depth = 0
class RecursiveCatcher(object):
def __init__(self):
global stack_depth
stack_depth = 0
self.no_of_getattribute_calls = 0
def __getattribute__(self, attr_name):
#Uncomment for debugging info:
#print 'Debug __getattribute__(' + type(self).__name__ + \
# "." + attr_name + ") dict=" + str(self.__dict__)
# We need something that is outside the scope of this class:
global stack_depth
stack_depth += 1
if stack_depth <= 10: # to prevent a stack overflow
self.no_of_getattribute_calls += 1
# Oops! We just accessed an attribute: no_of_getattribute_calls
# Guess what happens when self.no_of_getattribute_calls is
# accessed?
# Using 'object' directly because using super() here will also
# trigger a __getattribute__() call.
return object.__getattribute__(self, attr_name)
def my_method(self):
pass
def test_getattribute_is_a_bit_overzealous_sometimes(self):
catcher = self.RecursiveCatcher()
catcher.my_method()
global stack_depth
self.assertEqual(11, stack_depth)
# ------------------------------------------------------------------
class MinimalCatcher(object):
class DuffObject(object):
pass
def __init__(self):
self.no_of_getattr_calls = 0
def __getattr__(self, attr_name):
self.no_of_getattr_calls += 1
return self.DuffObject
def my_method(self):
pass
def test_getattr_ignores_known_attributes(self):
catcher = self.MinimalCatcher()
catcher.my_method()
self.assertEqual(0, catcher.no_of_getattr_calls)
def test_getattr_only_catches_unknown_attributes(self):
catcher = self.MinimalCatcher()
catcher.purple_flamingos()
catcher.free_pie()
self.assertEqual('DuffObject',
catcher.give_me_duff_or_give_me_death().__class__.__name__)
self.assertEqual(3, catcher.no_of_getattr_calls)
# ------------------------------------------------------------------
class PossessiveSetter(object):
def __setattr__(self, attr_name, value):
new_attr_name = attr_name
if attr_name[-5:] == 'comic':
new_attr_name = "my_" + new_attr_name
elif attr_name[-3:] == 'pie':
new_attr_name = "a_" + new_attr_name
object.__setattr__(self, new_attr_name, value)
def test_setattr_intercepts_attribute_assignments(self):
fanboy = self.PossessiveSetter()
fanboy.comic = 'The Laminator, issue #1'
fanboy.pie = 'blueberry'
self.assertEqual("blueberry", fanboy.a_pie)
#
# NOTE: Change the prefix to make this next assert pass
#
prefix = 'my'
self.assertEqual(
"The Laminator, issue #1",
getattr(fanboy, prefix + '_comic'))
# ------------------------------------------------------------------
class ScarySetter(object):
def __init__(self):
self.num_of_coconuts = 9
self._num_of_private_coconuts = 2
def __setattr__(self, attr_name, value):
new_attr_name = attr_name
if attr_name[0] != '_':
new_attr_name = "altered_" + new_attr_name
object.__setattr__(self, new_attr_name, value)
def test_it_modifies_external_attribute_as_expected(self):
setter = self.ScarySetter()
setter.e = "mc hammer"
self.assertEqual("mc hammer", setter.altered_e)
def test_it_mangles_some_internal_attributes(self):
setter = self.ScarySetter()
try:
coconuts = setter.num_of_coconuts
except AttributeError:
self.assertEqual(9, setter.altered_num_of_coconuts)
def test_in_this_case_private_attributes_remain_unmangled(self):
setter = self.ScarySetter()
self.assertEqual(2, setter._num_of_private_coconuts)
| |
# Copyright 2015 Alex Meade
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for manila.share.rpcapi.
"""
import copy
from oslo_config import cfg
from oslo_serialization import jsonutils
from manila.common import constants
from manila import context
from manila.share import rpcapi as share_rpcapi
from manila import test
from manila.tests import db_utils
CONF = cfg.CONF
class ShareRpcAPITestCase(test.TestCase):
def setUp(self):
super(ShareRpcAPITestCase, self).setUp()
share = db_utils.create_share(
availability_zone=CONF.storage_availability_zone,
status=constants.STATUS_AVAILABLE
)
snapshot = db_utils.create_snapshot(share_id=share['id'])
share_replica = db_utils.create_share_replica(
id='fake_replica',
share_id='fake_share_id',
host='fake_host',
)
share_group = {'id': 'fake_share_group_id', 'host': 'fake_host'}
share_group_snapshot = {'id': 'fake_share_group_id'}
host = 'fake_host'
share_server = db_utils.create_share_server(host=host)
share_network_subnet = {
'availability_zone_id': 'fake_az_id',
'neutron_net_id': 'fake_neutron_net_id',
'neutron_subnet_id': 'fake_neutron_subnet_id',
'ip_version': 4,
'cidr': '127.0.0.0/28',
'gateway': '127.0.0.1',
'mtu': 1500,
'network_type': 'vlan',
'segmentation_id': 3000,
}
self.fake_share = jsonutils.to_primitive(share)
# mock out the getattr on the share db model object since jsonutils
# doesn't know about those extra attributes to pull in
self.fake_share['instance'] = jsonutils.to_primitive(share.instance)
self.fake_share_replica = jsonutils.to_primitive(share_replica)
self.fake_snapshot = jsonutils.to_primitive(snapshot)
self.fake_snapshot['share_instance'] = jsonutils.to_primitive(
snapshot.instance)
self.fake_share_server = jsonutils.to_primitive(share_server)
self.fake_share_group = jsonutils.to_primitive(share_group)
self.fake_share_group_snapshot = jsonutils.to_primitive(
share_group_snapshot)
self.fake_host = jsonutils.to_primitive(host)
self.fake_share_network_subnet = jsonutils.to_primitive(
share_network_subnet)
self.ctxt = context.RequestContext('fake_user', 'fake_project')
self.rpcapi = share_rpcapi.ShareAPI()
def test_serialized_share_has_id(self):
self.assertIn('id', self.fake_share)
def _test_share_api(self, method, rpc_method, **kwargs):
expected_retval = 'foo' if method == 'call' else None
target = {
"version": kwargs.pop('version', self.rpcapi.BASE_RPC_API_VERSION)
}
expected_msg = copy.deepcopy(kwargs)
if 'share' in expected_msg and method != 'get_connection_info':
share = expected_msg['share']
del expected_msg['share']
expected_msg['share_id'] = share['id']
if 'share_instance' in expected_msg:
share_instance = expected_msg.pop('share_instance', None)
expected_msg['share_instance_id'] = share_instance['id']
if 'share_group' in expected_msg:
share_group = expected_msg['share_group']
del expected_msg['share_group']
expected_msg['share_group_id'] = share_group['id']
if 'share_group_snapshot' in expected_msg:
snap = expected_msg['share_group_snapshot']
del expected_msg['share_group_snapshot']
expected_msg['share_group_snapshot_id'] = snap['id']
if 'host' in expected_msg:
del expected_msg['host']
if 'snapshot' in expected_msg:
snapshot = expected_msg['snapshot']
del expected_msg['snapshot']
expected_msg['snapshot_id'] = snapshot['id']
if 'dest_host' in expected_msg:
del expected_msg['dest_host']
expected_msg['dest_host'] = self.fake_host
if 'share_replica' in expected_msg:
share_replica = expected_msg.pop('share_replica', None)
expected_msg['share_replica_id'] = share_replica['id']
expected_msg['share_id'] = share_replica['share_id']
if 'replicated_snapshot' in expected_msg:
snapshot = expected_msg.pop('replicated_snapshot', None)
expected_msg['snapshot_id'] = snapshot['id']
expected_msg['share_id'] = snapshot['share_id']
if 'src_share_instance' in expected_msg:
share_instance = expected_msg.pop('src_share_instance', None)
expected_msg['src_instance_id'] = share_instance['id']
if 'update_access' in expected_msg:
share_instance = expected_msg.pop('share_instance', None)
expected_msg['share_instance_id'] = share_instance['id']
if 'snapshot_instance' in expected_msg:
snapshot_instance = expected_msg.pop('snapshot_instance', None)
expected_msg['snapshot_instance_id'] = snapshot_instance['id']
share_server_id_methods = [
'manage_share_server', 'unmanage_share_server',
'share_server_migration_start', 'share_server_migration_check']
src_dest_share_server_methods = [
'share_server_migration_cancel',
'share_server_migration_get_progress',
'share_server_migration_complete']
if ('share_server' in expected_msg
and method in share_server_id_methods):
share_server = expected_msg.pop('share_server', None)
expected_msg['share_server_id'] = share_server['id']
if ('share_server' in expected_msg
and method in src_dest_share_server_methods):
share_server = expected_msg.pop('share_server', None)
expected_msg['src_share_server_id'] = share_server['id']
if ('dest_share_server' in expected_msg
and method in src_dest_share_server_methods):
share_server = expected_msg.pop('dest_share_server', None)
expected_msg['dest_share_server_id'] = share_server['id']
if 'host' in kwargs:
host = kwargs['host']
elif 'share_group' in kwargs:
host = kwargs['share_group']['host']
elif 'share_instance' in kwargs:
host = kwargs['share_instance']['host']
elif 'share_server' in kwargs:
host = kwargs['share_server']['host']
elif 'share_replica' in kwargs:
host = kwargs['share_replica']['host']
elif 'replicated_snapshot' in kwargs:
host = kwargs['share']['instance']['host']
elif 'share' in kwargs:
host = kwargs['share']['host']
else:
host = self.fake_host
target['server'] = host
target['topic'] = '%s.%s' % (CONF.share_topic, host)
self.fake_args = None
self.fake_kwargs = None
def _fake_prepare_method(*args, **kwds):
for kwd in kwds:
self.assertEqual(target[kwd], kwds[kwd])
return self.rpcapi.client
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
self.mock_object(self.rpcapi.client, "prepare", _fake_prepare_method)
self.mock_object(self.rpcapi.client, rpc_method, _fake_rpc_method)
retval = getattr(self.rpcapi, method)(self.ctxt, **kwargs)
self.assertEqual(expected_retval, retval)
expected_args = [self.ctxt, method]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(expected_arg, arg)
for kwarg, value in self.fake_kwargs.items():
self.assertEqual(expected_msg[kwarg], value)
def test_create_share_instance(self):
self._test_share_api('create_share_instance',
rpc_method='cast',
version='1.4',
share_instance=self.fake_share,
host='fake_host1',
snapshot_id='fake_snapshot_id',
filter_properties=None,
request_spec=None)
def test_delete_share_instance(self):
self._test_share_api('delete_share_instance',
rpc_method='cast',
version='1.4',
share_instance=self.fake_share,
force=False)
def test_update_access(self):
self._test_share_api('update_access',
rpc_method='cast',
version='1.14',
share_instance=self.fake_share)
def test_create_snapshot(self):
self._test_share_api('create_snapshot',
rpc_method='cast',
share=self.fake_share,
snapshot=self.fake_snapshot)
def test_delete_snapshot(self):
self._test_share_api('delete_snapshot',
rpc_method='cast',
snapshot=self.fake_snapshot,
host='fake_host',
force=False)
def test_delete_share_server(self):
self._test_share_api('delete_share_server',
rpc_method='cast',
share_server=self.fake_share_server)
def test_extend_share(self):
self._test_share_api('extend_share',
rpc_method='cast',
version='1.2',
share=self.fake_share,
new_size=123,
reservations={'fake': 'fake'})
def test_shrink_share(self):
self._test_share_api('shrink_share',
rpc_method='cast',
version='1.3',
share=self.fake_share,
new_size=123)
def test_create_share_group(self):
self._test_share_api('create_share_group',
version='1.16',
rpc_method='cast',
share_group=self.fake_share_group,
host='fake_host1')
def test_delete_share_group(self):
self._test_share_api('delete_share_group',
version='1.16',
rpc_method='cast',
share_group=self.fake_share_group)
def test_create_share_group_snapshot(self):
self._test_share_api(
'create_share_group_snapshot',
version='1.16',
rpc_method='cast',
share_group_snapshot=self.fake_share_group_snapshot,
host='fake_host1')
def test_delete_share_group_snapshot(self):
self._test_share_api(
'delete_share_group_snapshot',
version='1.16',
rpc_method='cast',
share_group_snapshot=self.fake_share_group_snapshot,
host='fake_host1')
def test_migration_start(self):
self._test_share_api('migration_start',
rpc_method='cast',
version='1.15',
share=self.fake_share,
dest_host=self.fake_host,
force_host_assisted_migration=True,
preserve_metadata=True,
writable=True,
nondisruptive=False,
preserve_snapshots=True,
new_share_network_id='fake_net_id',
new_share_type_id='fake_type_id')
def test_connection_get_info(self):
self._test_share_api('connection_get_info',
rpc_method='call',
version='1.12',
share_instance=self.fake_share)
def test_migration_complete(self):
self._test_share_api('migration_complete',
rpc_method='cast',
version='1.12',
src_share_instance=self.fake_share['instance'],
dest_instance_id='new_fake_ins_id')
def test_migration_cancel(self):
self._test_share_api('migration_cancel',
rpc_method='cast',
version='1.12',
src_share_instance=self.fake_share['instance'],
dest_instance_id='ins2_id')
def test_migration_get_progress(self):
self._test_share_api('migration_get_progress',
rpc_method='call',
version='1.12',
src_share_instance=self.fake_share['instance'],
dest_instance_id='ins2_id')
def test_delete_share_replica(self):
self._test_share_api('delete_share_replica',
rpc_method='cast',
version='1.8',
share_replica=self.fake_share_replica,
force=False)
def test_promote_share_replica(self):
self._test_share_api('promote_share_replica',
rpc_method='cast',
version='1.8',
share_replica=self.fake_share_replica)
def test_update_share_replica(self):
self._test_share_api('update_share_replica',
rpc_method='cast',
version='1.8',
share_replica=self.fake_share_replica)
def test_manage_snapshot(self):
self._test_share_api('manage_snapshot',
rpc_method='cast',
version='1.9',
snapshot=self.fake_snapshot,
host='fake_host',
driver_options={'volume_snapshot_id': 'fake'})
def test_unmanage_snapshot(self):
self._test_share_api('unmanage_snapshot',
rpc_method='cast',
version='1.9',
snapshot=self.fake_snapshot,
host='fake_host')
def test_manage_share_server(self):
self._test_share_api('manage_share_server',
rpc_method='cast',
version='1.19',
share_server=self.fake_share_server,
identifier='fake',
driver_opts={})
def test_unmanage_share_server(self):
self._test_share_api('unmanage_share_server',
rpc_method='cast',
version='1.19',
share_server=self.fake_share_server,
force='fake_force')
def test_revert_to_snapshot(self):
self._test_share_api('revert_to_snapshot',
rpc_method='cast',
version='1.18',
share=self.fake_share,
snapshot=self.fake_snapshot,
host='fake_host',
reservations={'fake': 'fake'})
def test_create_replicated_snapshot(self):
self._test_share_api('create_replicated_snapshot',
rpc_method='cast',
version='1.11',
replicated_snapshot=self.fake_snapshot,
share=self.fake_share)
def test_delete_replicated_snapshot(self):
self._test_share_api('delete_replicated_snapshot',
rpc_method='cast',
version='1.11',
replicated_snapshot=self.fake_snapshot,
share_id=self.fake_snapshot['share_id'],
force=False,
host='fake_host')
def test_provide_share_server(self):
self._test_share_api('provide_share_server',
rpc_method='call',
version='1.12',
share_instance=self.fake_share['instance'],
share_network_id='fake_network_id',
snapshot_id='fake_snapshot_id')
def test_create_share_server(self):
self._test_share_api('create_share_server',
rpc_method='cast',
version='1.20',
share_instance=self.fake_share['instance'],
share_server_id='fake_server_id')
def test_snapshot_update_access(self):
self._test_share_api('snapshot_update_access',
rpc_method='cast',
version='1.17',
snapshot_instance=self.fake_snapshot[
'share_instance'])
def test_share_server_migration_start(self):
self._test_share_api('share_server_migration_start',
rpc_method='cast',
version='1.21',
share_server=self.fake_share_server,
dest_host=self.fake_host,
writable=True,
nondisruptive=False,
preserve_snapshots=True,
new_share_network_id='fake_share_network_id')
def test_share_server_migration_check(self):
self._test_share_api('share_server_migration_check',
rpc_method='call',
version='1.21',
share_server_id=self.fake_share_server['id'],
dest_host=self.fake_host,
writable=True,
nondisruptive=False,
preserve_snapshots=True,
new_share_network_id='fake_net_id')
def test_share_server_migration_cancel(self):
self._test_share_api('share_server_migration_cancel',
rpc_method='cast',
version='1.21',
dest_host=self.fake_host,
share_server=self.fake_share_server,
dest_share_server=self.fake_share_server)
def test_share_server_migration_get_progress(self):
self._test_share_api('share_server_migration_get_progress',
rpc_method='call',
version='1.21',
dest_host=self.fake_host,
share_server=self.fake_share_server,
dest_share_server=self.fake_share_server)
def test_share_server_migration_complete(self):
self._test_share_api('share_server_migration_complete',
rpc_method='cast',
version='1.21',
dest_host=self.fake_host,
share_server=self.fake_share_server,
dest_share_server=self.fake_share_server)
def test_update_access_for_share_instances(self):
self._test_share_api(
'update_access_for_instances',
rpc_method='cast',
version='1.21',
dest_host=self.fake_host,
share_instance_ids=[self.fake_share['instance']['id']],
share_server_id=self.fake_share_server['id'])
def test_update_share_network_security_service(self):
self._test_share_api(
'update_share_network_security_service',
rpc_method='cast',
version='1.22',
dest_host=self.fake_host,
share_network_id='fake_net_id',
new_security_service_id='fake_sec_service_id',
current_security_service_id='fake_sec_service_id')
def test_check_update_share_network_security_service(self):
self._test_share_api('check_update_share_network_security_service',
rpc_method='cast',
version='1.22',
dest_host=self.fake_host,
share_network_id='fake_net_id',
new_security_service_id='fake_sec_service_id',
current_security_service_id='fake_sec_service_id')
def test_check_update_share_server_network_allocations(self):
self._test_share_api(
'check_update_share_server_network_allocations',
rpc_method='cast',
version='1.23',
dest_host=self.fake_host,
share_network_id='fake_net_id',
new_share_network_subnet=self.fake_share_network_subnet)
def test_update_share_server_network_allocations(self):
self._test_share_api(
'update_share_server_network_allocations',
rpc_method='cast',
version='1.23',
dest_host=self.fake_host,
share_network_id='fake_net_id',
new_share_network_subnet_id='new_share_network_subnet_id')
| |
# -*- coding: utf-8 -*-
# __author__ = 'dsedad'
from uuid import uuid4
import inspect
from flask_xadmin.xadm_salib import *
from flask import flash, current_app
from flask import redirect
from flask import request
from flask import url_for
from flask import session
from flask_admin.contrib.sqla import ModelView
from flask_admin.helpers import get_redirect_target
from flask_admin.form import FormOpts
from flask_admin.model.base import get_mdict_item_or_list
from flask_security import current_user, logout_user
from flask_admin import Admin, expose, AdminIndexView, BaseView
from sqlalchemy.ext.declarative import AbstractConcreteBase
from flask_admin.contrib.fileadmin import FileAdmin
from flask_xadmin.forms import EditModeForm
from flask_security.utils import get_url
from flask_security.utils import encrypt_password
from wtforms import PasswordField
#from config import XADMIN_ROLE, XADMIN_EDIT_ROLE
LIST_TEMPLATE = 'admin/models/custom_list.html'
FILE_LIST_TEMPLATE = 'admin/files/custom_file_list.html'
DETAILS_TEMPLATE = 'admin/models/custom_details.html'
EDIT_TEMPLATE = 'admin/models/custom_edit.html'
CREATE_TEMPLATE = 'admin/models/custom_create.html'
PAGE_SIZE = 10
from flask_admin.contrib.sqla.form import AdminModelConverter
from flask_admin.model.form import converts
from wtforms import PasswordField
from wtforms import widgets
def xadmin_role():
role = current_app.config.get('XADMIN_ROLE')
if role == None:
role = 'flask-xadmin'
current_app.config['XADMIN_ROLE'] = role
return role
def xadmin_edit_role():
role = current_app.config.get('XADMIN_EDIT_ROLE')
if role == None:
role = 'flask-xadmin-edit'
current_app.config['XADMIN_EDIT_ROLE'] = role
return role
def is_user_authenticated():
"""
Wrapper for user.is_authenticated
:return:
"""
try:
result = current_user.is_authenticated()
except:
result = current_user.is_authenticated
return result
def is_super_admin():
return current_user.has_role(xadmin_role())
def is_super_admin_edit():
return current_user.has_role(xadmin_edit_role())
class CustomPasswordInput(widgets.Input):
input_type = 'password'
def __init__(self, hide_value=True):
self.hide_value = hide_value
def __call__(self, field, **kwargs):
return super(CustomPasswordInput, self).__call__(field, **kwargs)
class CustomPasswordField(PasswordField):
#custom password filed, does not hide value
widget = CustomPasswordInput()
class PasswordAdminModelConverter(AdminModelConverter):
#custom model converter for converting Password column types to custom password filed form field
@converts('Password')
def conv_Password(self, field_args, **extra):
field_args.setdefault('label', u'Password')
return CustomPasswordField(**field_args)
def current_edit_mode():
return session.get('xadm_edit_mode', False)
def set_edit_mode(mode):
if mode:
if is_super_admin_edit():
session['xadm_edit_mode'] = True
else:
raise Exception(u'Not allowed')
else:
edit_mode = session.get('xadm_edit_mode', None)
if edit_mode != None:
session.pop('xadm_edit_mode')
class BaseClass(AbstractConcreteBase):
# table page size
page_size = PAGE_SIZE
details_modal = False
def is_accessible(self):
if is_user_authenticated():
return is_super_admin()
# if view is not accessible redirect to login page
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for('security.login', next=request.url))
list_template = LIST_TEMPLATE
details_template = DETAILS_TEMPLATE
edit_template = EDIT_TEMPLATE
create_template = CREATE_TEMPLATE
class xModelView(BaseClass, ModelView):
column_display_pk = True
read_only = False
encrypt_password_fields = True
_password_type_name = 'password'
def on_model_change(self, form, model_obj, is_created):
if self.encrypt_password_fields:
model = inspect_sa(model_obj).mapper.class_ #we need model not model object
keys = sa_type_keys(model, self._password_type_name) #column type is in lowercase
for k in keys:
password_changed = sa_column_changed(model_obj, k)
if password_changed:
password_field = getattr(form, k)
setattr(model_obj, k, encrypt_password(password_field.data))
def set_permissions(self, edit_mode):
"""
edit_mode == True => allow edit, delete, create. Otherwise prevent edit, delete, create.
:return:
"""
if not is_super_admin_edit():
edit_mode = False
if not(edit_mode):
self.can_create = False
self.can_edit = False
self.can_delete = False
self.can_view_details = True
else:
if (hasattr(self, 'read_only') and self.read_only):
self.can_create = False
self.can_edit = False
self.can_delete = False
self.can_view_details = True
else:
self.can_create = True
self.can_edit = True
self.can_delete = True
self.can_view_details = True
# return dict(edit_mode=True)
def doc(self):
return inspect.getdoc(self.model).strip()
@expose('/details/', methods=('GET', 'POST'))
def details_view(self):
return_url = get_redirect_target() or self.get_url('.index_view')
id = get_mdict_item_or_list(request.args, 'id')
if id is None:
return redirect(return_url)
model = self.get_one(id)
if model is None:
return redirect(return_url)
form = self.edit_form(obj=model)
form_opts = FormOpts(widget_args=self.form_widget_args,
form_rules=self._form_edit_rules)
self.on_form_prefill(form, id)
return self.render(self.details_template,
model=model,
form=form,
form_opts=form_opts,
return_url=return_url)
def scaffold_list_filters(self):
cols = self.scaffold_list_columns()
# Columns
res_cols = []
# Relationships
res_rels = []
for c in cols:
col_type = sa_column_type(self.model, c)
if col_type is None:
res_rels.append(c)
elif sa_column_type(self.model, c) not in ('password', 'guid', 'largebinary'): #If we use custom filed filter will not work
res_cols.append(c)
# Filter show list of columns, then list of relationships
return res_cols + res_rels
def get_form_columns(self, directions=[MANYTOMANY, ONETOMANY]):
return self.scaffold_list_columns() + sa_relationships_keys(self.model, directions=directions)
def get_column_searchable_list(self):
return sa_column_searchable_list(self.model)
def get_column_list(self):
return self.scaffold_list_columns()
def get_column_list_filters(self):
return self.scaffold_list_filters()
def get_column_descriptions(self):
return sa_column_descriptions(self.model)
def get_column_formatters(self):
return gen_href_formatter(self.model)
def get_column_details_list(self):
return self.get_form_columns(directions=[MANYTOMANY, ONETOMANY])
def __init__(self, *args, **kwargs):
# if not(self.column_formatters):
# self.column_formatters = gen_href_formatter(model, relationship_names=['log_create_user'])
self.model = kwargs.get('model')
if not self.model:
self.model = args[0]
ahref_fmt = '<a href="#" data-toggle="modal" title="View Record" data-target="#fa_modal_window" data-remote="%s&modal=True">%s</a>'
if not getattr(self, "column_formatters"):
formatters = dict(self.get_column_formatters())
self.column_formatters = formatters
if not getattr(self, "column_descriptions"):
self.column_descriptions = self.get_column_descriptions()
if not getattr(self, "column_filters"):
self.column_filters = self.get_column_list_filters()
pass
if not getattr(self, "column_list"):
self.column_list = self.get_column_list()
if not getattr(self, "column_searchable_list"):
self.column_searchable_list = self.get_column_searchable_list()
if not getattr(self, "form_columns"):
self.form_columns = self.get_form_columns(directions=[MANYTOMANY, ONETOMANY])
if not getattr(self, "column_details_list"):
self.column_details_list = self.get_column_details_list()
if self.encrypt_password_fields:
self.model_form_converter = PasswordAdminModelConverter
super(xModelView, self).__init__(*args, **kwargs)
class xAdminIndexView(AdminIndexView):
@expose('/')
def index(self):
return super(xAdminIndexView, self).index()
@expose('/logout/')
def logout_view(self):
set_edit_mode(False)
logout_user()
return redirect(url_for('.index'))
class xEditModeView(BaseView):
def is_accessible(self):
if is_user_authenticated():
return is_super_admin()
return False
def is_visible(self):
return False
# if view is not accessible redirect to login page
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for('security.login', next=request.url))
#enter edit mode function
@expose('/', methods=('GET', 'POST'))
def change_mode(self):
form = EditModeForm()
if form.validate_on_submit():
set_edit_mode(True)
flash(u'You are in EDIT mode. Be wise and careful!')
return redirect(form.next.data)
form.next.data = get_url(request.args.get('next')) or '/'
return self.render('admin/edit_mode.html', edit_mode_form=form)
# from flask_security.utils import verify_and_update_password
#if request.method == 'GET':
# return self.render('admin/edit_mode.html',edit_mode_form=form)
#else:
# password = request.form['password']
# # previous_page = request.form['previous_page']
# if verify_and_update_password(password, current_user):
# session['xadm_edit_mode'] = True
# flash(u'You are in EDIT mode. Be wise and careful!')
# return redirect('/')
# #return self.render('index.html')
# else:
# flash(u'Wrong password', category='error')
# return self.render('admin/edit_mode.html',edit_mode_form=form)
# exit edit mode function
@expose('/leave_edit', methods=['GET'])
def leave_edit(self):
try:
set_edit_mode(False)
except:
pass
flash(u"You've left EDIT mode.")
return redirect(request.referrer or '/')
# Custom base file admin class
class xFileAdmin(FileAdmin):
list_template = FILE_LIST_TEMPLATE
read_only = False
def doc(self):
return ""
def is_accessible(self):
if is_user_authenticated():
return is_super_admin()
return False
# if view is not accessible redirect to login page
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for('security.login', next=request.url))
def set_permissions(self, edit_mode):
"""
edit_mode == True => allow edit, delete, create. Otherwise prevent edit, delete, create.
:return:
"""
if not (edit_mode):
self.can_download = True
self.can_mkdir = False
self.can_delete_dirs = False
self.can_delete = False
self.can_rename = False
self.can_upload = False
else:
if (hasattr(self, 'read_only') and self.read_only):
self.can_download = True
self.can_mkdir = False
self.can_delete_dirs = False
self.can_delete = False
self.can_rename = False
self.can_upload = False
else:
self.can_download = True
self.can_mkdir = True
self.can_delete_dirs = True
self.can_delete = True
self.can_rename = True
self.can_upload = True
# return dict(edit_mode=True)
| |
#!/usr/bin/env python
import mock
import os
import socket
import threading
import time
import unittest
from uhppote_rfid import ControllerSocket, SocketConnectionException, SocketTransmitException
class TestControllerSocket(unittest.TestCase):
"""
Tests UHPPOTE socket transmission by emulating the control board's server.
"""
def setUp(self):
"""
.. function:: setUp()
Runs a server locally on port 60000 to listen for connections to respond accordingly.
"""
self.server = socket.socket()
self.server.bind(('127.0.0.1', 60000))
self.server.listen(1)
self.socket = ControllerSocket('127.0.0.1')
def tearDown(self):
"""
.. function:: tearDown()
Cleanly shuts down the test suite's server.
"""
self.socket.close()
self.sockt = None
self.server.close()
self.server = None
# Socket.__init__
def test_constructor_NegativePort_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('127.0.0.1', -1)
def test_constructor_ZeroPort_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('127.0.0.1', 0)
def test_constructor_LargePort_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('127.0.0.1', 65535 + 1)
def test_constructor_BlankPort_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('127.0.0.1', '')
def test_constructor_NonIntStringPort_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('127.0.0.1', 'ab')
def test_constructor_FloatPort_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('127.0.0.1', 1.1)
def test_constructor_ByteArrayPort_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('127.0.0.1', bytearray([0, 5, 2]))
def test_constructor_EmptyHost_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('')
def test_constructor_IntegerHost_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket(55)
def test_constructor_ByteArrayHost_TooLongException(self):
with self.assertRaises(ValueError):
ControllerSocket(bytearray([127, 0, 0, 1, 5]))
def test_constructor_ByteArrayHost_TooShortException(self):
with self.assertRaises(ValueError):
ControllerSocket(bytearray([127, 0, 0]))
def test_constructor_NegativeIP01_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('-1.0.0.0')
def test_constructor_NegativeIP02_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('0.-1.0.0')
def test_constructor_NegativeIP03_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('0.0.-3.0')
def test_constructor_NegativeIP04_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('0.0.0.-1')
def test_constructor_TooLongHost_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.long')
def test_constructor_BadChar_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('Hello*World')
def test_constructor_DefaultPort_Valid(self):
self.assertEquals(self.socket.getPort(), 60000)
def test_constructor_IntegerPort_Valid(self):
socket = ControllerSocket("127.0.0.1", 59)
self.assertEquals(socket.getPort(), 59)
def test_constructor_StringIntegerPort_Valid(self):
socket = ControllerSocket("127.0.0.1", '128')
self.assertEquals(socket.getPort(), 128)
def test_constructor_StringHost_Valid(self):
self.assertEquals(self.socket.getHost(), "127.0.0.1")
def test_constructor_ByteArrayHost_Valid(self):
socket = ControllerSocket(bytearray([127, 0, 0, 1]))
self.assertEquals(socket.getHost(), "127.0.0.1")
def test_constructor_DotAtEndHost_Valid(self):
socket = ControllerSocket("localhost.")
self.assertEquals(socket.getHost(), "localhost")
# Socket.connect
def test_connect_ZeroAttempts_Exception(self):
with self.assertRaises(ValueError):
self.socket.connect(0)
def test_connect_NegativeAttempts_Exception(self):
with self.assertRaises(ValueError):
self.socket.connect(-1)
def test_connect_DefaultAttemptsFail_Exception(self):
socket = ControllerSocket('badhost')
with self.assertRaises(SocketConnectionException):
socket.connect()
def test_connect_ConnectLocal_Success(self):
try:
self.socket.connect()
except SocketConnectionException, e:
self.fail("Unexpected SocketConnectionException raisesd: %s" % str(e))
# Socket.close
def test_close_CloseInactive_Success(self):
try:
self.socket.close()
except Exception, e:
self.fail("Unexpected Exception raised: %s" % str(e))
def test_close_CloseActive_Success(self):
self.socket.connect()
try:
self.socket.close()
except Exception, e:
self.fail("Unexpected Exception raisesd: %s" % str(e))
def test_close_ClosedNotConnected_Success(self):
self.assertFalse(self.socket.isConnected())
self.socket.connect()
self.assertTrue(self.socket.isConnected())
self.socket.close()
self.assertFalse(self.socket.isConnected())
# Socket.send
def test_send_Integer_Exception(self):
with self.assertRaises(ValueError):
self.socket.send(42)
def test_send_Float_Exception(self):
with self.assertRaises(ValueError):
self.socket.send(4.2)
def test_send_Complex_Exception(self):
with self.assertRaises(ValueError):
self.socket.send(complex(4, 2))
def test_send_Tuple_Exception(self):
with self.assertRaises(ValueError):
self.socket.send((4, 2))
def test_send_List_Exception(self):
with self.assertRaises(ValueError):
self.socket.send([4, 2])
def test_send_Dict_Exception(self):
with self.assertRaises(ValueError):
self.socket.send({
'a': 4,
'b': 2,
})
def test_send_Set_Exception(self):
with self.assertRaises(ValueError):
self.socket.send(set([4, 2]))
def test_send_FrozenSet_Exception(self):
with self.assertRaises(ValueError):
self.socket.send(frozenset([4, 2]))
def test_send_EmptyString_Exception(self):
with self.assertRaises(ValueError):
self.socket.send('')
def test_send_EmptyByteArray_Exception(self):
with self.assertRaises(ValueError):
self.socket.send(bytearray())
def test_send_EmptyBytes_Exception(self):
with self.assertRaises(ValueError):
self.socket.send(bytes(''))
def test_send_ClosedSocket_Exception(self):
self.socket.close()
with self.assertRaises(SocketConnectionException):
self.socket.send('hello')
def test_send_Interrupt_Exception(self):
with mock.patch('uhppote_rfid.controller_socket.socket') as mock_socket:
mockSocket = ControllerSocket('127.0.0.1')
mockSocket.socket.send.return_value = 0
mockSocket.connect()
with self.assertRaises(SocketTransmitException):
mockSocket.send('hello')
def test_send_String_Valid(self):
data = 'Hello World'
with mock.patch('uhppote_rfid.controller_socket.socket') as mock_socket:
mockSocket = ControllerSocket('127.0.0.1')
mockSocket.socket.send.return_value = len(data)
mockSocket.connect()
mockSocket.send(data)
mockSocket.socket.send.assert_called_with(data)
def test_send_ByteArray_Valid(self):
data = bytearray(['h', 'e', 'l', 'l', 'o'])
with mock.patch('uhppote_rfid.controller_socket.socket') as mock_socket:
mockSocket = ControllerSocket('127.0.0.1')
mockSocket.socket.send.return_value = len(data)
mockSocket.connect()
mockSocket.send(data)
mockSocket.socket.send.assert_called_with(data)
def test_send_Bytes_Valid(self):
data = bytes([10, 20, 30, 40])
with mock.patch('uhppote_rfid.controller_socket.socket') as mock_socket:
mockSocket = ControllerSocket('127.0.0.1')
mockSocket.socket.send.return_value = len(data)
mockSocket.connect()
mockSocket.send(data)
mockSocket.socket.send.assert_called_with(data)
# Socket.receive
def test_receive_NegativeLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive(-1)
def test_receive_ZeroLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive(0)
def test_receive_NotMultipleOf8_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive(50)
def test_receive_FloatLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive(8.8)
def test_receive_ComplexLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive(complex(4, 2))
def test_receive_TupleLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive((4, 2))
def test_receive_ListLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive([4])
def test_receive_DictLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive({
'a': 1
})
def test_receive_SetLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive(set([4, 2]))
def test_receive_FrozenSetLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive(frozenset([4, 2]))
def test_receive_StringEmpty_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive('')
def test_receive_StringAlpha_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive('a')
def test_receive_StringZero_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive('0')
def test_receive_StringNegative_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive('-1')
def test_receive_StringNotMultipleOf8_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive('50')
def test_receive_StringFloat_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive('8.8')
def test_receive_StringSize_Valid(self):
pass
def test_receive_ClosedSocket_Exception(self):
self.socket.close()
with self.assertRaises(SocketConnectionException):
self.socket.receive()
def test_receive_Cutoff_Exception(self):
with mock.patch('uhppote_rfid.controller_socket.socket') as mock_socket:
mockSocket = ControllerSocket('127.0.0.1')
mockSocket.socket.recv.return_value = ''
mockSocket.connect()
with self.assertRaises(SocketTransmitException):
mockSocket.receive()
def test_receive_DefaultLength_Valid(self):
arr = [1, 2, 3, 4, 5, 6, 7, 8]
with mock.patch('uhppote_rfid.controller_socket.socket') as mock_socket:
mockSocket = ControllerSocket('127.0.0.1')
mockSocket.socket.recv.return_value = bytearray(arr)
data = bytearray()
for i in range(0, 8):
data.extend(arr)
mockSocket.connect()
self.assertEquals(mockSocket.receive(), data)
def test_receive_SetLength_Valid(self):
data = bytearray([1, 2, 3, 4, 5, 6, 7, 8])
with mock.patch('uhppote_rfid.controller_socket.socket') as mock_socket:
mockSocket = ControllerSocket('127.0.0.1')
mockSocket.socket.recv.return_value = data
mockSocket.connect()
self.assertEquals(mockSocket.receive(len(data)), data)
if __name__ == '__main__':
unittest.main()
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .Banners import *
from .DataModel import *
import sys
import pyperclip
class SearchWindow(QtWidgets.QDialog):
def __init__(self, parent, plugin, searchable):
super(SearchWindow, self).__init__(parent)
self.searchable = searchable
self.parent = parent
self.plugin = plugin
self.oshow = super(SearchWindow, self).show
root = os.path.dirname(os.path.realpath(__file__))
self.ui = loadUi(os.path.join(root, 'search.ui'), baseinstance=self)
self.ui.setWindowTitle('Search')
self._lastText = ''
self.initUI()
def show(self):
# TODO: remember position? resize plugin windows when parent resize?
width = self.ui.size().width() + 15
height = self.ui.size().height() + 15
self.move((self.parent.width() - width) // 2, (self.parent.height() - height) // 2)
self.ui.lineEdit.setText(self._lastText)
self.ui.lineEdit.selectAll()
self.oshow()
def initUI(self):
self.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
shortcut = QtWidgets.QShortcut(QtGui.QKeySequence("/"), self, self.close, self.close)
self.ui.pushButton.clicked.connect(self.onClicked)
width = self.ui.size().width() + 15
height = self.ui.size().height() + 15
self.setFixedSize(width, height)
def onClicked(self):
text = self.ui.lineEdit.text()
text = str(text)
hexstr = '0123456789abcdefABCDEF'
if self.ui.checkHex.isChecked():
T = text.split(' ')
oldtext = text
text = ''
for t in T:
if len(t) != 2:
reply = QtWidgets.QMessageBox.warning(self, 'Qiew', "Hex string with errors.",
QtWidgets.QMessageBox.Ok)
self.close()
return
if t[0] in hexstr and t[1] in hexstr:
o = int(t, 16)
text += chr(o)
else:
reply = QtWidgets.QMessageBox.warning(self, 'Qiew', "Hex string with errors.",
QtWidgets.QMessageBox.Ok)
self.close()
return
self._lastText = oldtext
else:
self._lastText = text
if not self.ui.checkHex.isChecked():
text = text.encode('utf-8')
idx = self.searchable.search(text)
if idx == -1:
reply = QtWidgets.QMessageBox.warning(self, 'Qiew', "Nothing found.", QtWidgets.QMessageBox.Ok)
self.parent.viewMode.draw(refresh=True)
self.close()
class Observable(object):
def __init__(self):
self.Callbacks = []
def addHandler(self, h):
if h not in self.Callbacks:
self.Callbacks.append(h)
def notify(self, viewMode):
for cbk in self.Callbacks:
cbk.changeViewMode(viewMode)
class Observer(object):
def changeViewMode(self, viewMode):
self._viewMode = viewMode
class Searchable(Observer):
def __init__(self, dataModel, viewMode):
self._viewMode = viewMode
self._dataModel = dataModel
self._lastIdx = -1
self._lastText = ''
def next(self, start=None):
data = self._dataModel.getData()
text = self._lastText
if not start:
idx = self._lastIdx + 1
else:
idx = start
if idx > -1:
self._search(data, text, idx)
@property
def lastText(self):
return self._lastText
def previous(self, start=None):
data = self._dataModel.getData()
text = self._lastText
if not start:
idx = self._lastIdx
else:
idx = start
if idx > -1:
self._search(data, text, idx, previous=True)
def _search(self, data, text, start, previous=False):
self._lastText = text
if text == '':
return -1
if not previous:
idx1 = string.find(data, text, start)
text1 = '\0'.join(text)
idx2 = string.find(data, text1, start)
idx = idx1
if idx1 == -1:
idx = idx2
else:
if idx2 < idx1 and idx2 != -1:
idx = idx2
else:
idx1 = string.rfind(data, text, 0, start)
text1 = '\0'.join(text)
idx2 = string.rfind(data, text1, 0, start)
idx = idx1
if idx1 == -1:
idx = idx2
else:
if idx2 > idx1 and idx2 != -1:
idx = idx2
if idx > -1:
self._lastIdx = idx
if idx > -1:
self._viewMode.selector.addSelection((idx, idx + len(text), QtGui.QBrush(QtGui.QColor(125, 0, 100)), 0.8),
type=TextSelection.SelectionType.NORMAL)
self._viewMode.goTo(idx)
return idx
def search(self, text):
data = self._dataModel.getData()
return self._search(data, text, 0)
class binWidget(QtWidgets.QWidget, Observable):
scrolled = QtCore.pyqtSignal(int, name='scroll')
def __init__(self, parent, source, title):
super(binWidget, self).__init__()
Observable.__init__(self)
self.parent = parent
self.title = title
self.active = False
# offset for text window
# self.data = mapped
self.dataOffset = 0
self.dataModel = source
self.cursor = Cursor(0, 0)
self.themes = {
'font': QtGui.QFont('Monaco', 9, QtGui.QFont.Light),
'background': QtGui.QColor(0x00, 0x2b, 0x36),
'background_cursor': QtGui.QColor(255, 255, 0),
'selection': QtGui.QColor(125, 255, 0),
'pen': QtGui.QColor(0xb5, 0x89, 0x00)
}
self.multipleViewModes = []
for view_mode in self.dataModel.GetViews():
v = view_mode(self.themes, self.size().width(), self.size().height(), self.dataModel, self.cursor, self)
textDecorator = HighlightASCII(TextDecorator(v))
v.setTransformationEngine(textDecorator)
self.multipleViewModes.append(v)
self.viewMode = self.multipleViewModes[0]
self.Banners = Banners()
self.Banners.add(FileAddrBanner(self.themes, self.dataModel, self.viewMode))
self.Banners.add(TopBanner(self.themes, self.dataModel, self.viewMode))
self.Banners.add(BottomBanner(self.themes, self.dataModel, self.viewMode))
self.offsetWindow_h = 0
self.offsetWindow_v = 0
self.searchable = Searchable(self.dataModel, self.viewMode)
self.initUI()
self.searchWindow = SearchWindow(self, None, self.searchable)
self.addHandler(self.searchable)
self.addHandler(self.Banners)
self.notify(self.viewMode)
def enable(self):
self.active = True
def disable(self):
self.active = False
def scroll_from_outside(self, i):
# print 'slot-signal ' + str(i)
# self.scroll_pdown = True
self.update()
def initUI(self):
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setMinimumSize(1, 30)
self.activateWindow()
self.setFocus()
def switchViewMode(self):
self.multipleViewModes = self.multipleViewModes[1:] + [self.multipleViewModes[0]]
self.viewMode = self.multipleViewModes[0]
# notify obervers
self.notify(self.viewMode)
def _resize(self):
self.Banners.resize(self.size().width() - self.offsetWindow_h, self.size().height() - self.offsetWindow_v)
# compute space ocupated by banners
offsetLeft = self.offsetWindow_h + self.Banners.getLeftOffset()
offsetBottom = self.offsetWindow_v + self.Banners.getBottomOffset() + self.Banners.getTopOffset()
# resize window, substract space ocupated by banners
self.viewMode.resize(self.size().width() - offsetLeft, self.size().height() - offsetBottom)
# event handlers
def resizeEvent(self, e):
self._resize()
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
qp.setOpacity(1)
offsetLeft = self.offsetWindow_h + self.Banners.getLeftOffset()
offsetBottom = self.offsetWindow_v + self.Banners.getTopOffset()
# self.viewMode.draw2(qp, refresh=True)
# start = time()
qp.drawPixmap(offsetLeft, offsetBottom, self.viewMode.getPixmap())
# print 'Draw ' + str(time() - start)
self.Banners.draw(qp, self.offsetWindow_h, self.offsetWindow_v, self.size().height())
qp.end()
def eventFilter(self, watched, event):
if not self.active:
return False
if event.type() == QtCore.QEvent.KeyRelease:
key = event.key()
modifiers = event.modifiers()
if self.viewMode.handleKeyEvent(modifiers, key, event=event):
self.update()
if event.type() == QtCore.QEvent.KeyPress:
# TODO: should we accept only certain keys ?
key = event.key()
modifiers = event.modifiers()
if key == QtCore.Qt.Key_F2:
if self.viewMode.isEditable():
if self.viewMode.isInEditMode():
self.viewMode.setEditMode(False)
else:
self.viewMode.setEditMode(True)
self.viewMode.draw(refresh=False)
# switch view mode
if key == QtCore.Qt.Key_V:
print('SWITCH VIEW')
offs = self.viewMode.getCursorOffsetInPage()
base = self.viewMode.getDataModel().getOffset()
self.switchViewMode()
self._resize()
self.viewMode.goTo(base + offs)
self.update()
if key == QtCore.Qt.Key_S:
print('OPEN SOURCE')
self.parent.openSourceWindow(self.dataModel.current_class)
if event.modifiers() & QtCore.Qt.ControlModifier:
if key == QtCore.Qt.Key_Insert:
if self.viewMode.selector.getCurrentSelection():
a, b = self.viewMode.selector.getCurrentSelection()
# print a, b
hx = ''
for s in self.dataModel.getStream(a, b):
hx += '{:02x}'.format(s)
pyperclip.copy(hx)
del pyperclip
# print pyperclip.paste()
# print 'coppied'
if event.modifiers() & QtCore.Qt.ShiftModifier:
if key == QtCore.Qt.Key_Insert:
import re
hx = pyperclip.paste()
# print hx
L = re.findall(r'.{1,2}', hx, re.DOTALL)
array = ''
for s in L:
array += chr(int(s, 16))
# print 'write '
# print 'write'
# print array
self.dataModel.write(0, array)
self.viewMode.draw(True)
del pyperclip
# print array
if key == QtCore.Qt.Key_F4:
self.unp = WUnpack(self, None)
self.unp.show()
if key == QtCore.Qt.Key_F10:
self.dataModel.flush()
self.w = WHeaders(self, None)
self.w.show()
if not self.viewMode.isInEditMode():
if key == QtCore.Qt.Key_Slash:
self.searchWindow.show()
if key == QtCore.Qt.Key_N:
self.searchable.next(self.viewMode.getCursorAbsolutePosition() + 1)
if key == QtCore.Qt.Key_B:
self.searchable.previous(self.viewMode.getCursorAbsolutePosition())
# handle keys to view plugin
if self.viewMode.handleKeyEvent(modifiers, key, event=event):
event.accept()
self.update()
return True
return False
def setTextViewport(self, qp):
qp.setViewport(self.offsetWindow_h, self.offsetWindow_v, self.size().width(), self.size().height())
qp.setWindow(0, 0, self.size().width(), self.size().height())
def needsSave(self):
return self.dataModel.isDirty()
def save(self):
return self.dataModel.flush()
| |
# kytten/frame.py
# Copyrighted (C) 2009 by Conrad "Lynx" Wong
# Classes which wrap one Widget.
# Wrapper: a base class for Widgets which contain one other Widget.
# Frame: positions its contained Widget within a graphic, which it stretches
# to cover the Widget's area, or the space within which it is contained.
# TitleFrame: like Frame, but has a title region on top as well.
from widgets import Widget, Control, Graphic, Label
from layout import HorizontalLayout, VerticalLayout, GetRelativePoint
from layout import VALIGN_BOTTOM, HALIGN_LEFT, HALIGN_CENTER, HALIGN_RIGHT
from layout import ANCHOR_CENTER
class Wrapper(Widget):
"""
Wrapper is simply a wrapper around a widget. While the default
Wrapper does nothing more interesting, subclasses might decorate the
widget in some fashion, i.e. Panel might place the widget onto a
panel, or Scrollable might provide scrollbars to let the widget
be panned about within its display area.
"""
def __init__(self, content=None,
is_expandable=False, anchor=ANCHOR_CENTER, offset=(0, 0)):
"""
Creates a new Wrapper around an included Widget.
@param content The Widget to be wrapped.
"""
Widget.__init__(self)
self.content = content
self.expandable = is_expandable
self.anchor = anchor
self.content_offset = offset
def _get_controls(self):
"""Returns Controls contained by the Wrapper."""
return self.content._get_controls()
def delete(self):
"""Deletes graphic elements within the Wrapper."""
if self.content is not None:
self.content.delete()
Widget.delete(self)
def expand(self, width, height):
if self.content.is_expandable():
self.content.expand(width, height)
self.width = width
self.height = height
def is_expandable(self):
return self.expandable
def layout(self, x, y):
"""
Assigns a new position to the Wrapper.
@param x X coordinate of the Wrapper's lower left corner
@param y Y coordinate of the Wrapper's lower left corner
"""
Widget.layout(self, x, y)
if self.content is not None:
x, y = GetRelativePoint(
self, self.anchor,
self.content, self.anchor, self.content_offset)
self.content.layout(x, y)
def set(self, dialog, content):
"""
Sets a new Widget to be contained in the Wrapper.
@param dialog The Dialog which contains the Wrapper
@param content The new Widget to be wrapped
"""
if self.content is not None:
self.content.delete()
self.content = content
dialog.set_needs_layout()
def size(self, dialog):
"""
The default Wrapper wraps up its Widget snugly.
@param dialog The Dialog which contains the Wrapper
"""
if dialog is None:
return
Widget.size(self, dialog)
if self.content is not None:
self.content.size(dialog)
self.width, self.height = self.content.width, self.content.height
else:
self.width = self.height = 0
def teardown(self):
self.content.teardown()
self.content = None
Widget.teardown(self)
class Frame(Wrapper):
"""
Frame draws an untitled frame which encloses the dialog's content.
"""
def __init__(self, content=None, path=['frame'], image_name='image',
is_expandable=False, anchor=ANCHOR_CENTER,
use_bg_group=False):
"""
Creates a new Frame surrounding a widget or layout.
"""
Wrapper.__init__(self, content,
is_expandable=is_expandable, anchor=anchor)
self.frame = None
self.path = path
self.image_name = image_name
self.use_bg_group = use_bg_group
def delete(self):
"""
Removes the Frame's graphical elements.
"""
if self.frame is not None:
self.frame.delete()
self.frame = None
Wrapper.delete(self)
def expand(self, width, height):
if self.content.is_expandable():
content_width, content_height = \
self.frame.get_content_size(width, height)
self.content.expand(content_width, content_height)
self.width, self.height = width, height
def layout(self, x, y):
"""
Positions the Frame.
@param x X coordinate of lower left corner
@param y Y coordinate of lower left corner
"""
self.x, self.y = x, y
self.frame.update(x, y, self.width, self.height)
# In some cases the frame graphic element may allocate more space for
# the content than the content actually fills, due to repeating
# texture constraints. Always center the content.
x, y, width, height = self.frame.get_content_region()
interior = Widget(width, height)
interior.x, interior.y = x, y
x, y = GetRelativePoint(interior, self.anchor,
self.content, self.anchor, self.content_offset)
self.content.layout(x, y)
def size(self, dialog):
"""
Determine minimum size of the Frame.
@param dialog Dialog which contains the Frame
"""
if dialog is None:
return
Wrapper.size(self, dialog)
if self.frame is None:
if self.use_bg_group:
group = dialog.bg_group
else:
group = dialog.panel_group
template = dialog.theme[self.path][self.image_name]
self.frame = template.generate(
dialog.theme[self.path]['gui_color'],
dialog.batch,
group)
self.width, self.height = self.frame.get_needed_size(
self.content.width, self.content.height)
class TitleFrame(VerticalLayout):
def __init__(self, title, content):
VerticalLayout.__init__(self, content=[
HorizontalLayout([
Graphic(path=["titlebar", "left"], is_expandable=True),
Frame(Label(title, path=["titlebar"]),
path=["titlebar", "center"]),
Graphic(path=["titlebar", "right"], is_expandable=True),
], align=VALIGN_BOTTOM, padding=0),
Frame(content, path=["titlebar", "frame"], is_expandable=True),
], padding=0)
class SectionHeader(HorizontalLayout):
def __init__(self, title, align=HALIGN_CENTER):
if align == HALIGN_LEFT:
left_expand = False
right_expand = True
elif align == HALIGN_CENTER:
left_expand = True
right_expand = True
else: # HALIGN_RIGHT
left_expand = True
right_expand = False
HorizontalLayout.__init__(self, content=[
Graphic(path=["section", "left"], is_expandable=left_expand),
Frame(Label(title, path=["section"]),
path=['section', 'center'],
use_bg_group=True),
Graphic(path=["section", "right"], is_expandable=right_expand),
], align=VALIGN_BOTTOM, padding=0)
class FoldingSection(Control, VerticalLayout):
def __init__(self, title, content=None, is_open=True, align=HALIGN_CENTER):
Control.__init__(self)
if align == HALIGN_LEFT:
left_expand = False
right_expand = True
elif align == HALIGN_CENTER:
left_expand = True
right_expand = True
else: # HALIGN_RIGHT
left_expand = True
right_expand = False
self.is_open = is_open
self.folding_content = content
self.book = Graphic(self._get_image_path())
self.header = HorizontalLayout([
Graphic(path=["section", "left"], is_expandable=left_expand),
Frame(HorizontalLayout([
self.book,
Label(title, path=["section"]),
]), path=["section", "center"],
use_bg_group=True),
Graphic(path=["section", "right"], is_expandable=right_expand),
], align=VALIGN_BOTTOM, padding=0)
layout = [self.header]
if self.is_open:
layout.append(content)
VerticalLayout.__init__(self, content=layout, align=align)
def _get_controls(self):
return VerticalLayout._get_controls(self) + \
[(self, self.header.x, self.header.x + self.header.width,
self.header.y + self.header.height, self.header.y)]
def _get_image_path(self):
if self.is_open:
return ["section", "opened"]
else:
return ["section", "closed"]
def hit_test(self, x, y):
return self.header.hit_test(x, y)
def on_mouse_press(self, x, y, button, modifiers):
self.is_open = not self.is_open
self.book.delete()
self.book.path = self._get_image_path()
if self.is_open:
self.add(self.folding_content)
else:
self.remove(self.folding_content)
self.folding_content.delete()
def teardown(self):
self.folding_content.teardown()
self.folding_content = None
VerticalLayout.teardown(self)
| |
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
import warnings
class TestConfig(object):
@classmethod
def setup_class(cls):
from copy import deepcopy
cls.cf = pd.core.config
cls.gc = deepcopy(getattr(cls.cf, '_global_config'))
cls.do = deepcopy(getattr(cls.cf, '_deprecated_options'))
cls.ro = deepcopy(getattr(cls.cf, '_registered_options'))
def setup_method(self, method):
setattr(self.cf, '_global_config', {})
setattr(self.cf, 'options', self.cf.DictWrapper(
self.cf._global_config))
setattr(self.cf, '_deprecated_options', {})
setattr(self.cf, '_registered_options', {})
# Our test fixture in conftest.py sets "chained_assignment"
# to "raise" only after all test methods have been setup.
# However, after this setup, there is no longer any
# "chained_assignment" option, so re-register it.
self.cf.register_option('chained_assignment', 'raise')
def teardown_method(self, method):
setattr(self.cf, '_global_config', self.gc)
setattr(self.cf, '_deprecated_options', self.do)
setattr(self.cf, '_registered_options', self.ro)
def test_api(self):
# the pandas object exposes the user API
assert hasattr(pd, 'get_option')
assert hasattr(pd, 'set_option')
assert hasattr(pd, 'reset_option')
assert hasattr(pd, 'describe_option')
def test_is_one_of_factory(self):
v = self.cf.is_one_of_factory([None, 12])
v(12)
v(None)
pytest.raises(ValueError, v, 1.1)
def test_register_option(self):
self.cf.register_option('a', 1, 'doc')
# can't register an already registered option
pytest.raises(KeyError, self.cf.register_option, 'a', 1, 'doc')
# can't register an already registered option
pytest.raises(KeyError, self.cf.register_option, 'a.b.c.d1', 1,
'doc')
pytest.raises(KeyError, self.cf.register_option, 'a.b.c.d2', 1,
'doc')
# no python keywords
pytest.raises(ValueError, self.cf.register_option, 'for', 0)
pytest.raises(ValueError, self.cf.register_option, 'a.for.b', 0)
# must be valid identifier (ensure attribute access works)
pytest.raises(ValueError, self.cf.register_option,
'Oh my Goddess!', 0)
# we can register options several levels deep
# without predefining the intermediate steps
# and we can define differently named options
# in the same namespace
self.cf.register_option('k.b.c.d1', 1, 'doc')
self.cf.register_option('k.b.c.d2', 1, 'doc')
def test_describe_option(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b', 1, 'doc2')
self.cf.deprecate_option('b')
self.cf.register_option('c.d.e1', 1, 'doc3')
self.cf.register_option('c.d.e2', 1, 'doc4')
self.cf.register_option('f', 1)
self.cf.register_option('g.h', 1)
self.cf.register_option('k', 2)
self.cf.deprecate_option('g.h', rkey="k")
self.cf.register_option('l', "foo")
# non-existent keys raise KeyError
pytest.raises(KeyError, self.cf.describe_option, 'no.such.key')
# we can get the description for any key we registered
assert 'doc' in self.cf.describe_option('a', _print_desc=False)
assert 'doc2' in self.cf.describe_option('b', _print_desc=False)
assert 'precated' in self.cf.describe_option('b', _print_desc=False)
assert 'doc3' in self.cf.describe_option('c.d.e1', _print_desc=False)
assert 'doc4' in self.cf.describe_option('c.d.e2', _print_desc=False)
# if no doc is specified we get a default message
# saying "description not available"
assert 'vailable' in self.cf.describe_option('f', _print_desc=False)
assert 'vailable' in self.cf.describe_option('g.h', _print_desc=False)
assert 'precated' in self.cf.describe_option('g.h', _print_desc=False)
assert 'k' in self.cf.describe_option('g.h', _print_desc=False)
# default is reported
assert 'foo' in self.cf.describe_option('l', _print_desc=False)
# current value is reported
assert 'bar' not in self.cf.describe_option('l', _print_desc=False)
self.cf.set_option("l", "bar")
assert 'bar' in self.cf.describe_option('l', _print_desc=False)
def test_case_insensitive(self):
self.cf.register_option('KanBAN', 1, 'doc')
assert 'doc' in self.cf.describe_option('kanbaN', _print_desc=False)
assert self.cf.get_option('kanBaN') == 1
self.cf.set_option('KanBan', 2)
assert self.cf.get_option('kAnBaN') == 2
# gets of non-existent keys fail
pytest.raises(KeyError, self.cf.get_option, 'no_such_option')
self.cf.deprecate_option('KanBan')
assert self.cf._is_deprecated('kAnBaN')
def test_get_option(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('b.b', None, 'doc2')
# gets of existing keys succeed
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'hullo'
assert self.cf.get_option('b.b') is None
# gets of non-existent keys fail
pytest.raises(KeyError, self.cf.get_option, 'no_such_option')
def test_set_option(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('b.b', None, 'doc2')
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'hullo'
assert self.cf.get_option('b.b') is None
self.cf.set_option('a', 2)
self.cf.set_option('b.c', 'wurld')
self.cf.set_option('b.b', 1.1)
assert self.cf.get_option('a') == 2
assert self.cf.get_option('b.c') == 'wurld'
assert self.cf.get_option('b.b') == 1.1
pytest.raises(KeyError, self.cf.set_option, 'no.such.key', None)
def test_set_option_empty_args(self):
pytest.raises(ValueError, self.cf.set_option)
def test_set_option_uneven_args(self):
pytest.raises(ValueError, self.cf.set_option, 'a.b', 2, 'b.c')
def test_set_option_invalid_single_argument_type(self):
pytest.raises(ValueError, self.cf.set_option, 2)
def test_set_option_multiple(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('b.b', None, 'doc2')
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'hullo'
assert self.cf.get_option('b.b') is None
self.cf.set_option('a', '2', 'b.c', None, 'b.b', 10.0)
assert self.cf.get_option('a') == '2'
assert self.cf.get_option('b.c') is None
assert self.cf.get_option('b.b') == 10.0
def test_validation(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
validator=self.cf.is_text)
pytest.raises(ValueError, self.cf.register_option, 'a.b.c.d2',
'NO', 'doc', validator=self.cf.is_int)
self.cf.set_option('a', 2) # int is_int
self.cf.set_option('b.c', 'wurld') # str is_str
pytest.raises(
ValueError, self.cf.set_option, 'a', None) # None not is_int
pytest.raises(ValueError, self.cf.set_option, 'a', 'ab')
pytest.raises(ValueError, self.cf.set_option, 'b.c', 1)
validator = self.cf.is_one_of_factory([None, self.cf.is_callable])
self.cf.register_option('b', lambda: None, 'doc',
validator=validator)
self.cf.set_option('b', '%.1f'.format) # Formatter is callable
self.cf.set_option('b', None) # Formatter is none (default)
pytest.raises(ValueError, self.cf.set_option, 'b', '%.1f')
def test_reset_option(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
validator=self.cf.is_str)
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'hullo'
self.cf.set_option('a', 2)
self.cf.set_option('b.c', 'wurld')
assert self.cf.get_option('a') == 2
assert self.cf.get_option('b.c') == 'wurld'
self.cf.reset_option('a')
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'wurld'
self.cf.reset_option('b.c')
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'hullo'
def test_reset_option_all(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
validator=self.cf.is_str)
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'hullo'
self.cf.set_option('a', 2)
self.cf.set_option('b.c', 'wurld')
assert self.cf.get_option('a') == 2
assert self.cf.get_option('b.c') == 'wurld'
self.cf.reset_option("all")
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'hullo'
def test_deprecate_option(self):
# we can deprecate non-existent options
self.cf.deprecate_option('foo')
assert self.cf._is_deprecated('foo')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
try:
self.cf.get_option('foo')
except KeyError:
pass
else:
self.fail("Nonexistent option didn't raise KeyError")
assert len(w) == 1 # should have raised one warning
assert 'deprecated' in str(w[-1]) # we get the default message
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('foo', 'hullo', 'doc2')
self.cf.deprecate_option('a', removal_ver='nifty_ver')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.cf.get_option('a')
assert len(w) == 1 # should have raised one warning
assert 'eprecated' in str(w[-1]) # we get the default message
assert 'nifty_ver' in str(w[-1]) # with the removal_ver quoted
pytest.raises(
KeyError, self.cf.deprecate_option, 'a') # can't depr. twice
self.cf.deprecate_option('b.c', 'zounds!')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.cf.get_option('b.c')
assert len(w) == 1 # should have raised one warning
assert 'zounds!' in str(w[-1]) # we get the custom message
# test rerouting keys
self.cf.register_option('d.a', 'foo', 'doc2')
self.cf.register_option('d.dep', 'bar', 'doc2')
assert self.cf.get_option('d.a') == 'foo'
assert self.cf.get_option('d.dep') == 'bar'
self.cf.deprecate_option('d.dep', rkey='d.a') # reroute d.dep to d.a
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert self.cf.get_option('d.dep') == 'foo'
assert len(w) == 1 # should have raised one warning
assert 'eprecated' in str(w[-1]) # we get the custom message
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.cf.set_option('d.dep', 'baz') # should overwrite "d.a"
assert len(w) == 1 # should have raised one warning
assert 'eprecated' in str(w[-1]) # we get the custom message
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert self.cf.get_option('d.dep') == 'baz'
assert len(w) == 1 # should have raised one warning
assert 'eprecated' in str(w[-1]) # we get the custom message
def test_config_prefix(self):
with self.cf.config_prefix("base"):
self.cf.register_option('a', 1, "doc1")
self.cf.register_option('b', 2, "doc2")
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b') == 2
self.cf.set_option('a', 3)
self.cf.set_option('b', 4)
assert self.cf.get_option('a') == 3
assert self.cf.get_option('b') == 4
assert self.cf.get_option('base.a') == 3
assert self.cf.get_option('base.b') == 4
assert 'doc1' in self.cf.describe_option('base.a', _print_desc=False)
assert 'doc2' in self.cf.describe_option('base.b', _print_desc=False)
self.cf.reset_option('base.a')
self.cf.reset_option('base.b')
with self.cf.config_prefix("base"):
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b') == 2
def test_callback(self):
k = [None]
v = [None]
def callback(key):
k.append(key)
v.append(self.cf.get_option(key))
self.cf.register_option('d.a', 'foo', cb=callback)
self.cf.register_option('d.b', 'foo', cb=callback)
del k[-1], v[-1]
self.cf.set_option("d.a", "fooz")
assert k[-1] == "d.a"
assert v[-1] == "fooz"
del k[-1], v[-1]
self.cf.set_option("d.b", "boo")
assert k[-1] == "d.b"
assert v[-1] == "boo"
del k[-1], v[-1]
self.cf.reset_option("d.b")
assert k[-1] == "d.b"
def test_set_ContextManager(self):
def eq(val):
assert self.cf.get_option("a") == val
self.cf.register_option('a', 0)
eq(0)
with self.cf.option_context("a", 15):
eq(15)
with self.cf.option_context("a", 25):
eq(25)
eq(15)
eq(0)
self.cf.set_option("a", 17)
eq(17)
def test_attribute_access(self):
holder = []
def f():
options.b = 1
def f2():
options.display = 1
def f3(key):
holder.append(True)
self.cf.register_option('a', 0)
self.cf.register_option('c', 0, cb=f3)
options = self.cf.options
assert options.a == 0
with self.cf.option_context("a", 15):
assert options.a == 15
options.a = 500
assert self.cf.get_option("a") == 500
self.cf.reset_option("a")
assert options.a == self.cf.get_option("a", 0)
pytest.raises(KeyError, f)
pytest.raises(KeyError, f2)
# make sure callback kicks when using this form of setting
options.c = 1
assert len(holder) == 1
def test_option_context_scope(self):
# Ensure that creating a context does not affect the existing
# environment as it is supposed to be used with the `with` statement.
# See https://github.com/pandas-dev/pandas/issues/8514
original_value = 60
context_value = 10
option_name = 'a'
self.cf.register_option(option_name, original_value)
# Ensure creating contexts didn't affect the current context.
ctx = self.cf.option_context(option_name, context_value)
assert self.cf.get_option(option_name) == original_value
# Ensure the correct value is available inside the context.
with ctx:
assert self.cf.get_option(option_name) == context_value
# Ensure the current context is reset
assert self.cf.get_option(option_name) == original_value
def test_dictwrapper_getattr(self):
options = self.cf.options
# GH 19789
pytest.raises(self.cf.OptionError, getattr, options, 'bananas')
assert not hasattr(options, 'bananas')
| |
from datetime import datetime
from io import StringIO
import re
from typing import Dict, List, Union
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
@pytest.fixture
def mix_ab() -> Dict[str, List[Union[int, str]]]:
return {"a": list(range(4)), "b": list("ab..")}
@pytest.fixture
def mix_abc() -> Dict[str, List[Union[float, str]]]:
return {"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", np.nan, "d"]}
class TestDataFrameReplace:
def test_replace_inplace(self, datetime_frame, float_string_frame):
datetime_frame["A"][:5] = np.nan
datetime_frame["A"][-5:] = np.nan
tsframe = datetime_frame.copy()
tsframe.replace(np.nan, 0, inplace=True)
tm.assert_frame_equal(tsframe, datetime_frame.fillna(0))
# mixed type
mf = float_string_frame
mf.iloc[5:20, mf.columns.get_loc("foo")] = np.nan
mf.iloc[-10:, mf.columns.get_loc("A")] = np.nan
result = float_string_frame.replace(np.nan, 0)
expected = float_string_frame.fillna(value=0)
tm.assert_frame_equal(result, expected)
tsframe = datetime_frame.copy()
tsframe.replace([np.nan], [0], inplace=True)
tm.assert_frame_equal(tsframe, datetime_frame.fillna(0))
def test_regex_replace_scalar(self, mix_ab):
obj = {"a": list("ab.."), "b": list("efgh")}
dfobj = DataFrame(obj)
dfmix = DataFrame(mix_ab)
# simplest cases
# regex -> value
# obj frame
res = dfobj.replace(r"\s*\.\s*", np.nan, regex=True)
tm.assert_frame_equal(dfobj, res.fillna("."))
# mixed
res = dfmix.replace(r"\s*\.\s*", np.nan, regex=True)
tm.assert_frame_equal(dfmix, res.fillna("."))
# regex -> regex
# obj frame
res = dfobj.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True)
objc = obj.copy()
objc["a"] = ["a", "b", "...", "..."]
expec = DataFrame(objc)
tm.assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True)
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True)
tm.assert_frame_equal(dfobj, res.fillna("."))
# mixed
res = dfmix.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True)
tm.assert_frame_equal(dfmix, res.fillna("."))
# regex -> regex
# obj frame
res = dfobj.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1")
objc = obj.copy()
objc["a"] = ["a", "b", "...", "..."]
expec = DataFrame(objc)
tm.assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1")
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
res = dfmix.replace(regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1")
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
res = dfmix.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1")
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
def test_regex_replace_scalar_inplace(self, mix_ab):
obj = {"a": list("ab.."), "b": list("efgh")}
dfobj = DataFrame(obj)
dfmix = DataFrame(mix_ab)
# simplest cases
# regex -> value
# obj frame
res = dfobj.copy()
res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True)
tm.assert_frame_equal(dfobj, res.fillna("."))
# mixed
res = dfmix.copy()
res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True)
tm.assert_frame_equal(dfmix, res.fillna("."))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True)
objc = obj.copy()
objc["a"] = ["a", "b", "...", "..."]
expec = DataFrame(objc)
tm.assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True)
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True)
tm.assert_frame_equal(dfobj, res.fillna("."))
# mixed
res = dfmix.copy()
res.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True)
tm.assert_frame_equal(dfmix, res.fillna("."))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True)
objc = obj.copy()
objc["a"] = ["a", "b", "...", "..."]
expec = DataFrame(objc)
tm.assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True)
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
res = dfobj.copy()
res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True)
tm.assert_frame_equal(dfobj, res.fillna("."))
# mixed
res = dfmix.copy()
res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True)
tm.assert_frame_equal(dfmix, res.fillna("."))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True)
objc = obj.copy()
objc["a"] = ["a", "b", "...", "..."]
expec = DataFrame(objc)
tm.assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True)
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True)
tm.assert_frame_equal(dfobj, res.fillna("."))
# mixed
res = dfmix.copy()
res.replace(regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True)
tm.assert_frame_equal(dfmix, res.fillna("."))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True)
objc = obj.copy()
objc["a"] = ["a", "b", "...", "..."]
expec = DataFrame(objc)
tm.assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True)
mixc = mix_ab.copy()
mixc["b"] = ["a", "b", "...", "..."]
expec = DataFrame(mixc)
tm.assert_frame_equal(res, expec)
def test_regex_replace_list_obj(self):
obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r"\s*\.\s*", r"e|f|g"]
values = [np.nan, "crap"]
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame(
{
"a": ["a", "b", np.nan, np.nan],
"b": ["crap"] * 3 + ["h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r"\s*(\.)\s*", r"(e|f|g)"]
values = [r"\1\1", r"\1_crap"]
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["e_crap", "f_crap", "g_crap", "h"],
"c": ["h", "e_crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r"\s*(\.)\s*", r"e"]
values = [r"\1\1", r"crap"]
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["crap", "f", "g", "h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
to_replace_res = [r"\s*(\.)\s*", r"e"]
values = [r"\1\1", r"crap"]
res = dfobj.replace(value=values, regex=to_replace_res)
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["crap", "f", "g", "h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
def test_regex_replace_list_obj_inplace(self):
# same as above with inplace=True
# lists of regexes and values
obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r"\s*\.\s*", r"e|f|g"]
values = [np.nan, "crap"]
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame(
{
"a": ["a", "b", np.nan, np.nan],
"b": ["crap"] * 3 + ["h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r"\s*(\.)\s*", r"(e|f|g)"]
values = [r"\1\1", r"\1_crap"]
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["e_crap", "f_crap", "g_crap", "h"],
"c": ["h", "e_crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r"\s*(\.)\s*", r"e"]
values = [r"\1\1", r"crap"]
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["crap", "f", "g", "h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
to_replace_res = [r"\s*(\.)\s*", r"e"]
values = [r"\1\1", r"crap"]
res = dfobj.copy()
res.replace(value=values, regex=to_replace_res, inplace=True)
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["crap", "f", "g", "h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
def test_regex_replace_list_mixed(self, mix_ab):
# mixed frame to make sure this doesn't break things
dfmix = DataFrame(mix_ab)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r"\s*\.\s*", r"a"]
values = [np.nan, "crap"]
mix2 = {"a": list(range(4)), "b": list("ab.."), "c": list("halo")}
dfmix2 = DataFrame(mix2)
res = dfmix2.replace(to_replace_res, values, regex=True)
expec = DataFrame(
{
"a": mix2["a"],
"b": ["crap", "b", np.nan, np.nan],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r"\s*(\.)\s*", r"(a|b)"]
values = [r"\1\1", r"\1_crap"]
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({"a": mix_ab["a"], "b": ["a_crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"]
values = [r"\1\1", r"crap", r"\1_crap"]
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"]
values = [r"\1\1", r"crap", r"\1_crap"]
res = dfmix.replace(regex=to_replace_res, value=values)
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
def test_regex_replace_list_mixed_inplace(self, mix_ab):
dfmix = DataFrame(mix_ab)
# the same inplace
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r"\s*\.\s*", r"a"]
values = [np.nan, "crap"]
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b", np.nan, np.nan]})
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r"\s*(\.)\s*", r"(a|b)"]
values = [r"\1\1", r"\1_crap"]
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({"a": mix_ab["a"], "b": ["a_crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"]
values = [r"\1\1", r"crap", r"\1_crap"]
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"]
values = [r"\1\1", r"crap", r"\1_crap"]
res = dfmix.copy()
res.replace(regex=to_replace_res, value=values, inplace=True)
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
def test_regex_replace_dict_mixed(self, mix_abc):
dfmix = DataFrame(mix_abc)
# dicts
# single dict {re1: v1}, search the whole frame
# need test for this...
# list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole
# frame
res = dfmix.replace({"b": r"\s*\.\s*"}, {"b": np.nan}, regex=True)
res2 = dfmix.copy()
res2.replace({"b": r"\s*\.\s*"}, {"b": np.nan}, inplace=True, regex=True)
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
# list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the
# whole frame
res = dfmix.replace({"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, regex=True)
res2 = dfmix.copy()
res2.replace({"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, inplace=True, regex=True)
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
res = dfmix.replace(regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"})
res2 = dfmix.copy()
res2.replace(regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}, inplace=True)
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
# scalar -> dict
# to_replace regex, {value: value}
expec = DataFrame(
{"a": mix_abc["a"], "b": [np.nan, "b", ".", "."], "c": mix_abc["c"]}
)
res = dfmix.replace("a", {"b": np.nan}, regex=True)
res2 = dfmix.copy()
res2.replace("a", {"b": np.nan}, regex=True, inplace=True)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
res = dfmix.replace("a", {"b": np.nan}, regex=True)
res2 = dfmix.copy()
res2.replace(regex="a", value={"b": np.nan}, inplace=True)
expec = DataFrame(
{"a": mix_abc["a"], "b": [np.nan, "b", ".", "."], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
def test_regex_replace_dict_nested(self, mix_abc):
# nested dicts will not work until this is implemented for Series
dfmix = DataFrame(mix_abc)
res = dfmix.replace({"b": {r"\s*\.\s*": np.nan}}, regex=True)
res2 = dfmix.copy()
res4 = dfmix.copy()
res2.replace({"b": {r"\s*\.\s*": np.nan}}, inplace=True, regex=True)
res3 = dfmix.replace(regex={"b": {r"\s*\.\s*": np.nan}})
res4.replace(regex={"b": {r"\s*\.\s*": np.nan}}, inplace=True)
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
tm.assert_frame_equal(res4, expec)
def test_regex_replace_dict_nested_non_first_character(self):
# GH 25259
df = pd.DataFrame({"first": ["abc", "bca", "cab"]})
expected = pd.DataFrame({"first": [".bc", "bc.", "c.b"]})
result = df.replace({"a": "."}, regex=True)
tm.assert_frame_equal(result, expected)
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({"Type": ["Q", "T", "Q", "Q", "T"], "tmp": 2})
expected = DataFrame({"Type": [0, 1, 0, 0, 1], "tmp": 2})
result = df.replace({"Type": {"Q": 0, "T": 1}})
tm.assert_frame_equal(result, expected)
def test_regex_replace_list_to_scalar(self, mix_abc):
df = DataFrame(mix_abc)
expec = DataFrame(
{
"a": mix_abc["a"],
"b": np.array([np.nan] * 4),
"c": [np.nan, np.nan, np.nan, "d"],
}
)
res = df.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True)
res2 = df.copy()
res3 = df.copy()
res2.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True, inplace=True)
res3.replace(regex=[r"\s*\.\s*", "a|b"], value=np.nan, inplace=True)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
def test_regex_replace_str_to_numeric(self, mix_abc):
# what happens when you try to replace a numeric value with a regex?
df = DataFrame(mix_abc)
res = df.replace(r"\s*\.\s*", 0, regex=True)
res2 = df.copy()
res2.replace(r"\s*\.\s*", 0, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=r"\s*\.\s*", value=0, inplace=True)
expec = DataFrame({"a": mix_abc["a"], "b": ["a", "b", 0, 0], "c": mix_abc["c"]})
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
def test_regex_replace_regex_list_to_numeric(self, mix_abc):
df = DataFrame(mix_abc)
res = df.replace([r"\s*\.\s*", "b"], 0, regex=True)
res2 = df.copy()
res2.replace([r"\s*\.\s*", "b"], 0, regex=True, inplace=True)
res3 = df.copy()
res3.replace(regex=[r"\s*\.\s*", "b"], value=0, inplace=True)
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", 0, 0, 0], "c": ["a", 0, np.nan, "d"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
def test_regex_replace_series_of_regexes(self, mix_abc):
df = DataFrame(mix_abc)
s1 = Series({"b": r"\s*\.\s*"})
s2 = Series({"b": np.nan})
res = df.replace(s1, s2, regex=True)
res2 = df.copy()
res2.replace(s1, s2, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=s1, value=s2, inplace=True)
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
def test_regex_replace_numeric_to_object_conversion(self, mix_abc):
df = DataFrame(mix_abc)
expec = DataFrame({"a": ["a", 1, 2, 3], "b": mix_abc["b"], "c": mix_abc["c"]})
res = df.replace(0, "a")
tm.assert_frame_equal(res, expec)
assert res.a.dtype == np.object_
@pytest.mark.parametrize("metachar", ["[]", "()", r"\d", r"\w", r"\s"])
def test_replace_regex_metachar(self, metachar):
df = DataFrame({"a": [metachar, "else"]})
result = df.replace({"a": {metachar: "paren"}})
expected = DataFrame({"a": ["paren", "else"]})
tm.assert_frame_equal(result, expected)
def test_replace(self, datetime_frame):
datetime_frame["A"][:5] = np.nan
datetime_frame["A"][-5:] = np.nan
zero_filled = datetime_frame.replace(np.nan, -1e8)
tm.assert_frame_equal(zero_filled, datetime_frame.fillna(-1e8))
tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), datetime_frame)
datetime_frame["A"][:5] = np.nan
datetime_frame["A"][-5:] = np.nan
datetime_frame["B"][:5] = -1e8
# empty
df = DataFrame(index=["a", "b"])
tm.assert_frame_equal(df, df.replace(5, 7))
# GH 11698
# test for mixed data types.
df = pd.DataFrame(
[("-", pd.to_datetime("20150101")), ("a", pd.to_datetime("20150102"))]
)
df1 = df.replace("-", np.nan)
expected_df = pd.DataFrame(
[(np.nan, pd.to_datetime("20150101")), ("a", pd.to_datetime("20150102"))]
)
tm.assert_frame_equal(df1, expected_df)
def test_replace_list(self):
obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]
to_replace_res = [r".", r"e"]
values = [np.nan, "crap"]
res = dfobj.replace(to_replace_res, values)
expec = DataFrame(
{
"a": ["a", "b", np.nan, np.nan],
"b": ["crap", "f", "g", "h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
# list of [v1, v2, ..., vN] -> [v1, v2, .., vN]
to_replace_res = [r".", r"f"]
values = [r"..", r"crap"]
res = dfobj.replace(to_replace_res, values)
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["e", "crap", "g", "h"],
"c": ["h", "e", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
def test_replace_with_empty_list(self):
# GH 21977
s = pd.Series([["a", "b"], [], np.nan, [1]])
df = pd.DataFrame({"col": s})
expected = df
result = df.replace([], np.nan)
tm.assert_frame_equal(result, expected)
# GH 19266
with pytest.raises(ValueError, match="cannot assign mismatch"):
df.replace({np.nan: []})
with pytest.raises(ValueError, match="cannot assign mismatch"):
df.replace({np.nan: ["dummy", "alt"]})
def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({"zero": {"a": 0.0, "b": 1}, "one": {"a": 2.0, "b": 0}})
result = df.replace(0, {"zero": 0.5, "one": 1.0})
expected = DataFrame({"zero": {"a": 0.5, "b": 1}, "one": {"a": 2.0, "b": 1.0}})
tm.assert_frame_equal(result, expected)
result = df.replace(0, df.mean())
tm.assert_frame_equal(result, expected)
# series to series/dict
df = DataFrame({"zero": {"a": 0.0, "b": 1}, "one": {"a": 2.0, "b": 0}})
s = Series({"zero": 0.0, "one": 2.0})
result = df.replace(s, {"zero": 0.5, "one": 1.0})
expected = DataFrame({"zero": {"a": 0.5, "b": 1}, "one": {"a": 1.0, "b": 0.0}})
tm.assert_frame_equal(result, expected)
result = df.replace(s, df.mean())
tm.assert_frame_equal(result, expected)
def test_replace_convert(self):
# gh 3907
df = DataFrame([["foo", "bar", "bah"], ["bar", "foo", "bah"]])
m = {"foo": 1, "bar": 2, "bah": 3}
rep = df.replace(m)
expec = Series([np.int64] * 3)
res = rep.dtypes
tm.assert_series_equal(expec, res)
def test_replace_mixed(self, float_string_frame):
mf = float_string_frame
mf.iloc[5:20, mf.columns.get_loc("foo")] = np.nan
mf.iloc[-10:, mf.columns.get_loc("A")] = np.nan
result = float_string_frame.replace(np.nan, -18)
expected = float_string_frame.fillna(value=-18)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result.replace(-18, np.nan), float_string_frame)
result = float_string_frame.replace(np.nan, -1e8)
expected = float_string_frame.fillna(value=-1e8)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result.replace(-1e8, np.nan), float_string_frame)
# int block upcasting
df = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0, 1], dtype="int64"),
}
)
expected = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0.5, 1], dtype="float64"),
}
)
result = df.replace(0, 0.5)
tm.assert_frame_equal(result, expected)
df.replace(0, 0.5, inplace=True)
tm.assert_frame_equal(df, expected)
# int block splitting
df = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0, 1], dtype="int64"),
"C": Series([1, 2], dtype="int64"),
}
)
expected = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0.5, 1], dtype="float64"),
"C": Series([1, 2], dtype="int64"),
}
)
result = df.replace(0, 0.5)
tm.assert_frame_equal(result, expected)
# to object block upcasting
df = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0, 1], dtype="int64"),
}
)
expected = DataFrame(
{
"A": Series([1, "foo"], dtype="object"),
"B": Series([0, 1], dtype="int64"),
}
)
result = df.replace(2, "foo")
tm.assert_frame_equal(result, expected)
expected = DataFrame(
{
"A": Series(["foo", "bar"], dtype="object"),
"B": Series([0, "foo"], dtype="object"),
}
)
result = df.replace([1, 2], ["foo", "bar"])
tm.assert_frame_equal(result, expected)
# test case from
df = DataFrame(
{"A": Series([3, 0], dtype="int64"), "B": Series([0, 3], dtype="int64")}
)
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype("float64")
m = df.mean()
expected.iloc[0, 0] = m[0]
expected.iloc[1, 1] = m[1]
tm.assert_frame_equal(result, expected)
def test_replace_simple_nested_dict(self):
df = DataFrame({"col": range(1, 5)})
expected = DataFrame({"col": ["a", 2, 3, "b"]})
result = df.replace({"col": {1: "a", 4: "b"}})
tm.assert_frame_equal(expected, result)
# in this case, should be the same as the not nested version
result = df.replace({1: "a", 4: "b"})
tm.assert_frame_equal(expected, result)
def test_replace_simple_nested_dict_with_nonexistent_value(self):
df = DataFrame({"col": range(1, 5)})
expected = DataFrame({"col": ["a", 2, 3, "b"]})
result = df.replace({-1: "-", 1: "a", 4: "b"})
tm.assert_frame_equal(expected, result)
result = df.replace({"col": {-1: "-", 1: "a", 4: "b"}})
tm.assert_frame_equal(expected, result)
def test_replace_value_is_none(self, datetime_frame):
orig_value = datetime_frame.iloc[0, 0]
orig2 = datetime_frame.iloc[1, 0]
datetime_frame.iloc[0, 0] = np.nan
datetime_frame.iloc[1, 0] = 1
result = datetime_frame.replace(to_replace={np.nan: 0})
expected = datetime_frame.T.replace(to_replace={np.nan: 0}).T
tm.assert_frame_equal(result, expected)
result = datetime_frame.replace(to_replace={np.nan: 0, 1: -1e8})
tsframe = datetime_frame.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
expected = tsframe
tm.assert_frame_equal(expected, result)
datetime_frame.iloc[0, 0] = orig_value
datetime_frame.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self, datetime_frame):
# dtypes
tsframe = datetime_frame.copy().astype(np.float32)
tsframe["A"][:5] = np.nan
tsframe["A"][-5:] = np.nan
zero_filled = tsframe.replace(np.nan, -1e8)
tm.assert_frame_equal(zero_filled, tsframe.fillna(-1e8))
tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), tsframe)
tsframe["A"][:5] = np.nan
tsframe["A"][-5:] = np.nan
tsframe["B"][:5] = -1e8
b = tsframe["B"]
b[b == -1e8] = np.nan
tsframe["B"] = b
result = tsframe.fillna(method="bfill")
tm.assert_frame_equal(result, tsframe.fillna(method="bfill"))
@pytest.mark.parametrize(
"frame, to_replace, value, expected",
[
(DataFrame({"ints": [1, 2, 3]}), 1, 0, DataFrame({"ints": [0, 2, 3]})),
(
DataFrame({"ints": [1, 2, 3]}, dtype=np.int32),
1,
0,
DataFrame({"ints": [0, 2, 3]}, dtype=np.int32),
),
(
DataFrame({"ints": [1, 2, 3]}, dtype=np.int16),
1,
0,
DataFrame({"ints": [0, 2, 3]}, dtype=np.int16),
),
(
DataFrame({"bools": [True, False, True]}),
False,
True,
DataFrame({"bools": [True, True, True]}),
),
(
DataFrame({"complex": [1j, 2j, 3j]}),
1j,
0,
DataFrame({"complex": [0j, 2j, 3j]}),
),
(
DataFrame(
{
"datetime64": Index(
[
datetime(2018, 5, 28),
datetime(2018, 7, 28),
datetime(2018, 5, 28),
]
)
}
),
datetime(2018, 5, 28),
datetime(2018, 7, 28),
DataFrame({"datetime64": Index([datetime(2018, 7, 28)] * 3)}),
),
# GH 20380
(
DataFrame({"dt": [datetime(3017, 12, 20)], "str": ["foo"]}),
"foo",
"bar",
DataFrame({"dt": [datetime(3017, 12, 20)], "str": ["bar"]}),
),
(
DataFrame(
{
"A": date_range("20130101", periods=3, tz="US/Eastern"),
"B": [0, np.nan, 2],
}
),
Timestamp("20130102", tz="US/Eastern"),
Timestamp("20130104", tz="US/Eastern"),
DataFrame(
{
"A": [
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130104", tz="US/Eastern"),
Timestamp("20130103", tz="US/Eastern"),
],
"B": [0, np.nan, 2],
}
),
),
],
)
def test_replace_dtypes(self, frame, to_replace, value, expected):
result = getattr(frame, "replace")(to_replace, value)
tm.assert_frame_equal(result, expected)
def test_replace_input_formats_listlike(self):
# both dicts
to_rep = {"A": np.nan, "B": 0, "C": ""}
values = {"A": 0, "B": -1, "C": "missing"}
df = DataFrame(
{"A": [np.nan, 0, np.inf], "B": [0, 2, 5], "C": ["", "asdf", "fd"]}
)
filled = df.replace(to_rep, values)
expected = {k: v.replace(to_rep[k], values[k]) for k, v in df.items()}
tm.assert_frame_equal(filled, DataFrame(expected))
result = df.replace([0, 2, 5], [5, 2, 0])
expected = DataFrame(
{"A": [np.nan, 5, np.inf], "B": [5, 2, 0], "C": ["", "asdf", "fd"]}
)
tm.assert_frame_equal(result, expected)
# scalar to dict
values = {"A": 0, "B": -1, "C": "missing"}
df = DataFrame(
{"A": [np.nan, 0, np.nan], "B": [0, 2, 5], "C": ["", "asdf", "fd"]}
)
filled = df.replace(np.nan, values)
expected = {k: v.replace(np.nan, values[k]) for k, v in df.items()}
tm.assert_frame_equal(filled, DataFrame(expected))
# list to list
to_rep = [np.nan, 0, ""]
values = [-2, -1, "missing"]
result = df.replace(to_rep, values)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], values[i], inplace=True)
tm.assert_frame_equal(result, expected)
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
df.replace(to_rep, values[1:])
def test_replace_input_formats_scalar(self):
df = DataFrame(
{"A": [np.nan, 0, np.inf], "B": [0, 2, 5], "C": ["", "asdf", "fd"]}
)
# dict to scalar
to_rep = {"A": np.nan, "B": 0, "C": ""}
filled = df.replace(to_rep, 0)
expected = {k: v.replace(to_rep[k], 0) for k, v in df.items()}
tm.assert_frame_equal(filled, DataFrame(expected))
msg = "value argument must be scalar, dict, or Series"
with pytest.raises(TypeError, match=msg):
df.replace(to_rep, [np.nan, 0, ""])
# list to scalar
to_rep = [np.nan, 0, ""]
result = df.replace(to_rep, -1)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], -1, inplace=True)
tm.assert_frame_equal(result, expected)
def test_replace_limit(self):
pass
def test_replace_dict_no_regex(self):
answer = Series(
{
0: "Strongly Agree",
1: "Agree",
2: "Neutral",
3: "Disagree",
4: "Strongly Disagree",
}
)
weights = {
"Agree": 4,
"Disagree": 2,
"Neutral": 3,
"Strongly Agree": 5,
"Strongly Disagree": 1,
}
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
tm.assert_series_equal(result, expected)
def test_replace_series_no_regex(self):
answer = Series(
{
0: "Strongly Agree",
1: "Agree",
2: "Neutral",
3: "Disagree",
4: "Strongly Disagree",
}
)
weights = Series(
{
"Agree": 4,
"Disagree": 2,
"Neutral": 3,
"Strongly Agree": 5,
"Strongly Disagree": 1,
}
)
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
tm.assert_series_equal(result, expected)
def test_replace_dict_tuple_list_ordering_remains_the_same(self):
df = DataFrame(dict(A=[np.nan, 1]))
res1 = df.replace(to_replace={np.nan: 0, 1: -1e8})
res2 = df.replace(to_replace=(1, np.nan), value=[-1e8, 0])
res3 = df.replace(to_replace=[1, np.nan], value=[-1e8, 0])
expected = DataFrame({"A": [0, -1e8]})
tm.assert_frame_equal(res1, res2)
tm.assert_frame_equal(res2, res3)
tm.assert_frame_equal(res3, expected)
def test_replace_doesnt_replace_without_regex(self):
raw = """fol T_opp T_Dir T_Enh
0 1 0 0 vo
1 2 vr 0 0
2 2 0 0 0
3 3 0 bt 0"""
df = pd.read_csv(StringIO(raw), sep=r"\s+")
res = df.replace({r"\D": 1})
tm.assert_frame_equal(df, res)
def test_replace_bool_with_string(self):
df = DataFrame({"a": [True, False], "b": list("ab")})
result = df.replace(True, "a")
expected = DataFrame({"a": ["a", False], "b": df.b})
tm.assert_frame_equal(result, expected)
def test_replace_pure_bool_with_string_no_op(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace("asdf", "fdsa")
tm.assert_frame_equal(df, result)
def test_replace_bool_with_bool(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace(False, True)
expected = DataFrame(np.ones((2, 2), dtype=bool))
tm.assert_frame_equal(result, expected)
def test_replace_with_dict_with_bool_keys(self):
df = DataFrame({0: [True, False], 1: [False, True]})
with pytest.raises(TypeError, match="Cannot compare types .+"):
df.replace({"asdf": "asdb", True: "yes"})
def test_replace_truthy(self):
df = DataFrame({"a": [True, True]})
r = df.replace([np.inf, -np.inf], np.nan)
e = df
tm.assert_frame_equal(r, e)
def test_nested_dict_overlapping_keys_replace_int(self):
# GH 27660 keep behaviour consistent for simple dictionary and
# nested dictionary replacement
df = DataFrame({"a": list(range(1, 5))})
result = df.replace({"a": dict(zip(range(1, 5), range(2, 6)))})
expected = df.replace(dict(zip(range(1, 5), range(2, 6))))
tm.assert_frame_equal(result, expected)
def test_nested_dict_overlapping_keys_replace_str(self):
# GH 27660
a = np.arange(1, 5)
astr = a.astype(str)
bstr = np.arange(2, 6).astype(str)
df = DataFrame({"a": astr})
result = df.replace(dict(zip(astr, bstr)))
expected = df.replace({"a": dict(zip(astr, bstr))})
tm.assert_frame_equal(result, expected)
def test_replace_swapping_bug(self):
df = pd.DataFrame({"a": [True, False, True]})
res = df.replace({"a": {True: "Y", False: "N"}})
expect = pd.DataFrame({"a": ["Y", "N", "Y"]})
tm.assert_frame_equal(res, expect)
df = pd.DataFrame({"a": [0, 1, 0]})
res = df.replace({"a": {0: "Y", 1: "N"}})
expect = pd.DataFrame({"a": ["Y", "N", "Y"]})
tm.assert_frame_equal(res, expect)
def test_replace_period(self):
d = {
"fname": {
"out_augmented_AUG_2011.json": pd.Period(year=2011, month=8, freq="M"),
"out_augmented_JAN_2011.json": pd.Period(year=2011, month=1, freq="M"),
"out_augmented_MAY_2012.json": pd.Period(year=2012, month=5, freq="M"),
"out_augmented_SUBSIDY_WEEK.json": pd.Period(
year=2011, month=4, freq="M"
),
"out_augmented_AUG_2012.json": pd.Period(year=2012, month=8, freq="M"),
"out_augmented_MAY_2011.json": pd.Period(year=2011, month=5, freq="M"),
"out_augmented_SEP_2013.json": pd.Period(year=2013, month=9, freq="M"),
}
}
df = pd.DataFrame(
[
"out_augmented_AUG_2012.json",
"out_augmented_SEP_2013.json",
"out_augmented_SUBSIDY_WEEK.json",
"out_augmented_MAY_2012.json",
"out_augmented_MAY_2011.json",
"out_augmented_AUG_2011.json",
"out_augmented_JAN_2011.json",
],
columns=["fname"],
)
assert set(df.fname.values) == set(d["fname"].keys())
# We don't support converting object -> specialized EA in
# replace yet.
expected = DataFrame(
{"fname": [d["fname"][k] for k in df.fname.values]}, dtype=object
)
result = df.replace(d)
tm.assert_frame_equal(result, expected)
def test_replace_datetime(self):
d = {
"fname": {
"out_augmented_AUG_2011.json": pd.Timestamp("2011-08"),
"out_augmented_JAN_2011.json": pd.Timestamp("2011-01"),
"out_augmented_MAY_2012.json": pd.Timestamp("2012-05"),
"out_augmented_SUBSIDY_WEEK.json": pd.Timestamp("2011-04"),
"out_augmented_AUG_2012.json": pd.Timestamp("2012-08"),
"out_augmented_MAY_2011.json": pd.Timestamp("2011-05"),
"out_augmented_SEP_2013.json": pd.Timestamp("2013-09"),
}
}
df = pd.DataFrame(
[
"out_augmented_AUG_2012.json",
"out_augmented_SEP_2013.json",
"out_augmented_SUBSIDY_WEEK.json",
"out_augmented_MAY_2012.json",
"out_augmented_MAY_2011.json",
"out_augmented_AUG_2011.json",
"out_augmented_JAN_2011.json",
],
columns=["fname"],
)
assert set(df.fname.values) == set(d["fname"].keys())
expected = DataFrame({"fname": [d["fname"][k] for k in df.fname.values]})
result = df.replace(d)
tm.assert_frame_equal(result, expected)
def test_replace_datetimetz(self):
# GH 11326
# behaving poorly when presented with a datetime64[ns, tz]
df = DataFrame(
{
"A": date_range("20130101", periods=3, tz="US/Eastern"),
"B": [0, np.nan, 2],
}
)
result = df.replace(np.nan, 1)
expected = DataFrame(
{
"A": date_range("20130101", periods=3, tz="US/Eastern"),
"B": Series([0, 1, 2], dtype="float64"),
}
)
tm.assert_frame_equal(result, expected)
result = df.fillna(1)
tm.assert_frame_equal(result, expected)
result = df.replace(0, np.nan)
expected = DataFrame(
{
"A": date_range("20130101", periods=3, tz="US/Eastern"),
"B": [np.nan, np.nan, 2],
}
)
tm.assert_frame_equal(result, expected)
result = df.replace(
Timestamp("20130102", tz="US/Eastern"),
Timestamp("20130104", tz="US/Eastern"),
)
expected = DataFrame(
{
"A": [
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130104", tz="US/Eastern"),
Timestamp("20130103", tz="US/Eastern"),
],
"B": [0, np.nan, 2],
}
)
tm.assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Eastern"))
tm.assert_frame_equal(result, expected)
# coerce to object
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Pacific"))
expected = DataFrame(
{
"A": [
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130104", tz="US/Pacific"),
Timestamp("20130103", tz="US/Eastern"),
],
"B": [0, np.nan, 2],
}
)
tm.assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({"A": np.nan}, Timestamp("20130104"))
expected = DataFrame(
{
"A": [
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130104"),
Timestamp("20130103", tz="US/Eastern"),
],
"B": [0, np.nan, 2],
}
)
tm.assert_frame_equal(result, expected)
def test_replace_with_empty_dictlike(self, mix_abc):
# GH 15289
df = DataFrame(mix_abc)
tm.assert_frame_equal(df, df.replace({}))
tm.assert_frame_equal(df, df.replace(Series([], dtype=object)))
tm.assert_frame_equal(df, df.replace({"b": {}}))
tm.assert_frame_equal(df, df.replace(Series({"b": {}})))
@pytest.mark.parametrize(
"to_replace, method, expected",
[
(0, "bfill", {"A": [1, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}),
(
np.nan,
"bfill",
{"A": [0, 1, 2], "B": [5.0, 7.0, 7.0], "C": ["a", "b", "c"]},
),
("d", "ffill", {"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}),
(
[0, 2],
"bfill",
{"A": [1, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]},
),
(
[1, 2],
"pad",
{"A": [0, 0, 0], "B": [5, np.nan, 7], "C": ["a", "b", "c"]},
),
(
(1, 2),
"bfill",
{"A": [0, 2, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]},
),
(
["b", "c"],
"ffill",
{"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "a", "a"]},
),
],
)
def test_replace_method(self, to_replace, method, expected):
# GH 19632
df = DataFrame({"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]})
result = df.replace(to_replace=to_replace, value=None, method=method)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"replace_dict, final_data",
[({"a": 1, "b": 1}, [[3, 3], [2, 2]]), ({"a": 1, "b": 2}, [[3, 1], [2, 3]])],
)
def test_categorical_replace_with_dict(self, replace_dict, final_data):
# GH 26988
df = DataFrame([[1, 1], [2, 2]], columns=["a", "b"], dtype="category")
final_data = np.array(final_data)
a = pd.Categorical(final_data[:, 0], categories=[3, 2])
excat = [3, 2] if replace_dict["b"] == 1 else [1, 3]
b = pd.Categorical(final_data[:, 1], categories=excat)
expected = DataFrame({"a": a, "b": b})
result = df.replace(replace_dict, 3)
tm.assert_frame_equal(result, expected)
with pytest.raises(AssertionError):
# ensure non-inplace call does not affect original
tm.assert_frame_equal(df, expected)
df.replace(replace_dict, 3, inplace=True)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"df, to_replace, exp",
[
(
{"col1": [1, 2, 3], "col2": [4, 5, 6]},
{4: 5, 5: 6, 6: 7},
{"col1": [1, 2, 3], "col2": [5, 6, 7]},
),
(
{"col1": [1, 2, 3], "col2": ["4", "5", "6"]},
{"4": "5", "5": "6", "6": "7"},
{"col1": [1, 2, 3], "col2": ["5", "6", "7"]},
),
],
)
def test_replace_commutative(self, df, to_replace, exp):
# GH 16051
# DataFrame.replace() overwrites when values are non-numeric
# also added to data frame whilst issue was for series
df = pd.DataFrame(df)
expected = pd.DataFrame(exp)
result = df.replace(to_replace)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"replacer",
[
pd.Timestamp("20170827"),
np.int8(1),
np.int16(1),
np.float32(1),
np.float64(1),
],
)
def test_replace_replacer_dtype(self, replacer):
# GH26632
df = pd.DataFrame(["a"])
result = df.replace({"a": replacer, "b": replacer})
expected = pd.DataFrame([replacer])
tm.assert_frame_equal(result, expected)
def test_replace_after_convert_dtypes(self):
# GH31517
df = pd.DataFrame({"grp": [1, 2, 3, 4, 5]}, dtype="Int64")
result = df.replace(1, 10)
expected = pd.DataFrame({"grp": [10, 2, 3, 4, 5]}, dtype="Int64")
tm.assert_frame_equal(result, expected)
def test_replace_invalid_to_replace(self):
# GH 18634
# API: replace() should raise an exception if invalid argument is given
df = pd.DataFrame({"one": ["a", "b ", "c"], "two": ["d ", "e ", "f "]})
msg = (
r"Expecting 'to_replace' to be either a scalar, array-like, "
r"dict or None, got invalid type.*"
)
with pytest.raises(TypeError, match=msg):
df.replace(lambda x: x.strip())
@pytest.mark.parametrize("dtype", ["float", "float64", "int64", "Int64", "boolean"])
@pytest.mark.parametrize("value", [np.nan, pd.NA])
def test_replace_no_replacement_dtypes(self, dtype, value):
# https://github.com/pandas-dev/pandas/issues/32988
df = pd.DataFrame(np.eye(2), dtype=dtype)
result = df.replace(to_replace=[None, -np.inf, np.inf], value=value)
tm.assert_frame_equal(result, df)
@pytest.mark.parametrize("replacement", [np.nan, 5])
def test_replace_with_duplicate_columns(self, replacement):
# GH 24798
result = pd.DataFrame({"A": [1, 2, 3], "A1": [4, 5, 6], "B": [7, 8, 9]})
result.columns = list("AAB")
expected = pd.DataFrame(
{"A": [1, 2, 3], "A1": [4, 5, 6], "B": [replacement, 8, 9]}
)
expected.columns = list("AAB")
result["B"] = result["B"].replace(7, replacement)
tm.assert_frame_equal(result, expected)
| |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 30 10:13:41 2017
@author: Niv Haim (Weizmann Institute of Science)
"""
import numpy as np
import numba as nb
from numpy.linalg import norm
from sim.utils import REASON_NONE, OrbitalParameters, get_dt0
spec = [
# main arrays
('X', nb.double[:, :]),
('V', nb.double[:, :]),
('DT', nb.double[:]),
('T', nb.double[:]),
# last steps arrays
('Xlast', nb.double[:, :]),
('Vlast', nb.double[:, :]),
('DTlast', nb.double[:]),
('Tlast', nb.double[:]),
# closest approaches arrays
('Xca', nb.double[:, :]),
('Vca', nb.double[:, :]),
('Tca', nb.double[:]),
('Ica', nb.int64[:]),
('Jzeffca', nb.double[:]),
# specific points arrays
('dE_max_x', nb.double[:]),
('dE_max_v', nb.double[:]),
('jz_eff_min_x', nb.double[:]),
('jz_eff_min_v', nb.double[:]),
# indexes / flags
('i', nb.int64),
('idx', nb.int64),
('caidx', nb.int64),
('dE_max_i', nb.int64),
('nP', nb.int64),
('save_every_P_i', nb.int64),
# parameters discovered during run
('steps_per_P', nb.int64),
('fin_reason', nb.int64),
('closest_approach_r', nb.double),
('dE_max', nb.double),
('jz_eff', nb.double),
('jz_eff_crossings', nb.int64),
('jz_eff_n', nb.int64),
('jz_eff_min', nb.double),
('jz_eff_mean', nb.double),
('jz_eff_M2', nb.double),
# configuration variables
# physical
('G', nb.double),
('m1', nb.double),
('m2', nb.double),
('m3', nb.double),
('a', nb.double),
('e', nb.double),
('M0_in', nb.double),
('M0_out', nb.double),
('inclination', nb.double),
('Omega', nb.double),
('omega', nb.double),
('rper_over_a', nb.double),
('eper', nb.double),
# simulation
('dt00', nb.double),
('max_periods', nb.int64),
('save_every', nb.int64),
('dump_every', nb.int64),
('save_every_P', nb.int64),
('samples_per_Pcirc', nb.int64),
('save_last', nb.int64),
('rmax', nb.double),
('ca_saveall', nb.double),
# computed from configuration variables
('f_in', nb.double),
('f_out', nb.double),
('E0', nb.double),
('U_init', nb.double),
('P_in', nb.double),
('P_out', nb.double),
('jz_eff0', nb.double),
('dt0', nb.double),
]
@nb.jitclass(spec)
class SimState(object):
def __init__(self, vsize, save_last):
# define all arrays
self.X = np.empty((9, vsize), dtype=np.double)
self.V = np.empty((9, vsize), dtype=np.double)
self.DT = np.empty(vsize, dtype=np.double)
self.T = np.empty(vsize, dtype=np.double)
self.Xlast = np.empty((9, save_last), dtype=np.double)
self.Vlast = np.empty((9, save_last), dtype=np.double)
self.DTlast = np.empty(save_last, dtype=np.double)
self.Tlast = np.empty(save_last, dtype=np.double)
self.Xca = np.empty((9, 100000), dtype=np.double)
self.Vca = np.empty((9, 100000), dtype=np.double)
self.Tca = np.empty(100000, dtype=np.double)
self.Ica = np.empty(100000, dtype=np.int64)
self.Jzeffca = np.empty(100000, dtype=np.double)
self.dE_max_x = np.zeros(9, dtype=np.double)
self.dE_max_v = np.zeros(9, dtype=np.double)
self.jz_eff_min_x = np.zeros(9, dtype=np.double)
self.jz_eff_min_v = np.zeros(9, dtype=np.double)
def inject_config_params(s, G, m1, m2, m3, a, e, M0_in,
M0_out, inclination, Omega, omega, rper_over_a, eper,
dt00, max_periods, dump_every, save_every, save_every_P,
samples_per_Pcirc, save_last, rmax, ca_saveall):
# dump args to properties
s.G = G
s.m1 = m1
s.m2 = m2
s.m3 = m3
s.a = a
s.e = e
s.M0_in = M0_in
s.M0_out = M0_out
s.inclination = inclination
s.Omega = Omega
s.omega = omega
s.rper_over_a = rper_over_a
s.eper = eper
s.dt00 = dt00
s.max_periods = max_periods
s.dump_every = dump_every
s.save_every = save_every
s.save_every_P = save_every_P
s.samples_per_Pcirc = samples_per_Pcirc
s.save_last = save_last
s.rmax = rmax
s.ca_saveall = ca_saveall
def initialize_state(s):
"""should be called only the first time SimState is created
s: SimState object
"""
# compute orbital parameters from config params
op = OrbitalParameters(G=s.G, m1=s.m1, m2=s.m2, m3=s.m3, e=s.e, a=s.a, M0_in=s.M0_in,
rper_over_a=s.rper_over_a, eper=s.eper, M0_out=s.M0_out,
inclination=s.inclination, Omega=s.Omega, omega=s.omega)
# set orbital params
s.f_in = op.f_in
s.f_out = op.f_out
s.E0 = op.E0
s.jz_eff0 = op.jz_eff
s.P_in = op.P_in
s.P_out = op.P_out
# set dt0 and U_init
s.dt0 = get_dt0(s.G, s.m1, s.m2, op.a_in0, s.samples_per_Pcirc, s.dt00)
s.U_init = - s.G * s.m1 * s.m2 / norm(op.a_in0)
# initialize integration flags
s.fin_reason = REASON_NONE
s.closest_approach_r = np.infty
s.save_every_P_i = 1
s.i = s.idx = s.dE_max_i = 0
s.nP = s.steps_per_P = s.dE_max = 0
# jz_eff stuff
s.jz_eff = op.jz_eff
s.jz_eff_crossings = 0
s.jz_eff_min = np.infty
s.jz_eff_mean = op.jz_eff
s.jz_eff_n = 1
s.jz_eff_M2 = 0
# set initial simulation params
s.Xlast[:, 0] = op.x0
s.Vlast[:, 0] = op.v0
s.DTlast[0] = 0
s.Tlast[0] = 0
# set first ca values
s.Xca[:, 0] = op.x0
s.Vca[:, 0] = op.v0
s.Tca[0] = 0
s.Ica[0] = 0
s.Jzeffca[0] = op.jz_eff
s.caidx = 1
def make_copy(s):
""" Returns a copy of sim_state s (a new SimState instance with all attributes copied) """
vsize = max(s.idx, s.caidx)
s_copy = SimState(vsize, s.save_last)
for var_name, var_type in spec:
# if var is an array make a copy
if isinstance(var_type, nb.types.npytypes.Array):
setattr(s_copy, var_name, getattr(s, var_name).copy())
else:
setattr(s_copy, var_name, getattr(s, var_name))
return s_copy
def chop_arrays(s):
""" Chops the irrelevant end of the storage arrays using the relevant end-index """
s.X = s.X[:, :s.idx]
s.V = s.V[:, :s.idx]
s.DT = s.DT[:s.idx]
s.T = s.T[:s.idx]
s.Ica = s.Ica[:s.caidx]
s.Xca = s.Xca[:, :s.caidx]
s.Vca = s.Vca[:, :s.caidx]
s.Tca = s.Tca[:s.caidx]
s.Jzeffca = s.Jzeffca[:s.caidx]
def get_state_dict(s):
"""Dumps all sim state variables into a file"""
chop_arrays(s)
# # fix (shift) lasts arrays
# s.Xlast = np.roll(s.Xlast, - s.i % s.save_last, axis=1)
# s.Vlast = np.roll(s.Vlast, - s.i % s.save_last, axis=1)
# s.Tlast = np.roll(s.Tlast, - s.i % s.save_last)
# dump state variables into dictionary
state_d = {}
for name, _ in spec:
state_d[name] = getattr(s, name)
return state_d
| |
# Copyright 2012 Pinterest.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pytest
import six
from pymemcache.client.base import Client
from pymemcache.exceptions import (
MemcacheIllegalInputError,
MemcacheClientError
)
@pytest.mark.integration()
def test_get_set(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.get('key')
assert result is None
client.set(b'key', b'value', noreply=False)
result = client.get(b'key')
assert result == b'value'
client.set(b'key2', b'value2', noreply=True)
result = client.get(b'key2')
assert result == b'value2'
result = client.get_many([b'key', b'key2'])
assert result == {b'key': b'value', b'key2': b'value2'}
result = client.get_many([])
assert result == {}
@pytest.mark.integration()
def test_add_replace(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.add(b'key', b'value', noreply=False)
assert result is True
result = client.get(b'key')
assert result == b'value'
result = client.add(b'key', b'value2', noreply=False)
assert result is False
result = client.get(b'key')
assert result == b'value'
result = client.replace(b'key1', b'value1', noreply=False)
assert result is False
result = client.get(b'key1')
assert result is None
result = client.replace(b'key', b'value2', noreply=False)
assert result is True
result = client.get(b'key')
assert result == b'value2'
@pytest.mark.integration()
def test_append_prepend(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.append(b'key', b'value', noreply=False)
assert result is False
result = client.get(b'key')
assert result is None
result = client.set(b'key', b'value', noreply=False)
assert result is True
result = client.append(b'key', b'after', noreply=False)
assert result is True
result = client.get(b'key')
assert result == b'valueafter'
result = client.prepend(b'key1', b'value', noreply=False)
assert result is False
result = client.get(b'key1')
assert result is None
result = client.prepend(b'key', b'before', noreply=False)
assert result is True
result = client.get(b'key')
assert result == b'beforevalueafter'
@pytest.mark.integration()
def test_cas(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.cas(b'key', b'value', b'1', noreply=False)
assert result is None
result = client.set(b'key', b'value', noreply=False)
assert result is True
result = client.cas(b'key', b'value', b'1', noreply=False)
assert result is False
result, cas = client.gets(b'key')
assert result == b'value'
result = client.cas(b'key', b'value1', cas, noreply=False)
assert result is True
result = client.cas(b'key', b'value2', cas, noreply=False)
assert result is False
@pytest.mark.integration()
def test_gets(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.gets(b'key')
assert result == (None, None)
result = client.set(b'key', b'value', noreply=False)
assert result is True
result = client.gets(b'key')
assert result[0] == b'value'
@pytest.mark.delete()
def test_delete(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.delete(b'key', noreply=False)
assert result is False
result = client.get(b'key')
assert result is None
result = client.set(b'key', b'value', noreply=False)
assert result is True
result = client.delete(b'key', noreply=False)
assert result is True
result = client.get(b'key')
assert result is None
@pytest.mark.integration()
def test_incr_decr(client_class, host, port, socket_module):
client = Client((host, port), socket_module=socket_module)
client.flush_all()
result = client.incr(b'key', 1, noreply=False)
assert result is None
result = client.set(b'key', b'0', noreply=False)
assert result is True
result = client.incr(b'key', 1, noreply=False)
assert result == 1
def _bad_int():
client.incr(b'key', b'foobar')
with pytest.raises(MemcacheClientError):
_bad_int()
result = client.decr(b'key1', 1, noreply=False)
assert result is None
result = client.decr(b'key', 1, noreply=False)
assert result == 0
result = client.get(b'key')
assert result == b'0'
@pytest.mark.integration()
def test_misc(client_class, host, port, socket_module):
client = Client((host, port), socket_module=socket_module)
client.flush_all()
@pytest.mark.integration()
def test_serialization_deserialization(host, port, socket_module):
def _ser(key, value):
return json.dumps(value).encode('ascii'), 1
def _des(key, value, flags):
if flags == 1:
return json.loads(value.decode('ascii'))
return value
client = Client((host, port), serializer=_ser, deserializer=_des,
socket_module=socket_module)
client.flush_all()
value = {'a': 'b', 'c': ['d']}
client.set(b'key', value)
result = client.get(b'key')
assert result == value
@pytest.mark.integration()
def test_errors(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
def _key_with_ws():
client.set(b'key with spaces', b'value', noreply=False)
with pytest.raises(MemcacheIllegalInputError):
_key_with_ws()
def _key_with_illegal_carriage_return():
client.set(b'\r\nflush_all', b'value', noreply=False)
with pytest.raises(MemcacheIllegalInputError):
_key_with_illegal_carriage_return()
def _key_too_long():
client.set(b'x' * 1024, b'value', noreply=False)
with pytest.raises(MemcacheClientError):
_key_too_long()
def _unicode_key_in_set():
client.set(six.u('\u0FFF'), b'value', noreply=False)
with pytest.raises(MemcacheClientError):
_unicode_key_in_set()
def _unicode_key_in_get():
client.get(six.u('\u0FFF'))
with pytest.raises(MemcacheClientError):
_unicode_key_in_get()
def _unicode_value_in_set():
client.set(b'key', six.u('\u0FFF'), noreply=False)
with pytest.raises(MemcacheClientError):
_unicode_value_in_set()
| |
#! /usr/bin/env python
#
# Copyright (c) 2015 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
## Tool to automatically download an image into the HAPS board and boot it
#
from __future__ import print_function
from util import error
import os
import subprocess
import threading
import Queue
import serial
import termios
import Adafruit_GPIO as GPIO
import Adafruit_GPIO.FT232H as FT232H
# haps_monitor class "monitor" status values
HAPS_MONITOR_TIMEOUT = 0
HAPS_MONITOR_STOP = 1
HAPS_MONITOR_PASS = 2
HAPS_MONITOR_FAIL = 3
# HAPS character timeout (1 second wait on characters, in 0.1 sec units)
HAPS_CHAR_TIMEOUT = 10
# HAPS boot timeout (~30 sec in character timeout counts)
HAPS_BOOT_TIMEOUT_COUNT = 30
JLINK_RESET_SCRIPT = "cmd-jlink-start-1" # "cmd-jlink-start-1"
JLINK_POST_RESET_SCRIPT = "cmd-jlink-start-2" # "cmd-jlink-start-2"
# e-Fuse settings
efuses = {
"VID": 0x00000000,
"PID": 0x00000000,
"SN0": 0x00000000,
"SN1": 0x00000000,
"IMS0": 0x00000000,
"IMS1": 0x00000000,
"IMS2": 0x00000000,
"IMS3": 0x00000000,
"IMS4": 0x00000000,
"IMS5": 0x00000000,
"IMS6": 0x00000000,
"IMS7": 0x00000000,
"IMS8": 0x00000000,
"CMS0": 0x00000000,
"CMS1": 0x00000000,
"CMS2": 0x00000000,
"CMS3": 0x00000000,
"CMS4": 0x00000000,
"CMS5": 0x00000000,
"CMS6": 0x00000000,
"SCR": 0x00000000,
"JTAG_CONTROL": 0x00000000,
"ECCERROR": 0x00000000}
# AdaFruit FT232H GPIO pins.
# Pins 0 to 7 = D0 to D7.
# Pins 8 to 15 = C0 to C7.
#
# |<----- MPSSE ----->|
# Pin Signal GPIO UART SPI I2C
# --- ------ ---- ---- --- ---
# J1.1 +5V - - - -
# J1.2 Gnd - - - -
# J1.3 D0 0 TxD ClkOut SCL
# J1.4 D1 1 RxD MOSI \_ SDA
# J1.5 D2 2 RTS# MISO /
# J1.6 D3 3 CTS# SelOut
# J1.7 D4 4 DTR#
# J1.8 D5 5 DSR#
# J1.9 D6 6 DCD#
# J1.10 D7 7 RI#
#
# J2.1 C0 8
# J2.2 C1 9
# J2.3 C2 10
# J2.4 C3 11
# J2.5 C4 12
# J2.6 C5 13
# J2.7 C6 14
# J2.8 C7* 15
# J2.9 C8** - - - -
# J2.10 C9** - - - -
#
# * C7 connected to voltage divider
# ** C8, C9 drive red, green LEDs respectively
# The daughterboard reset line has a pull-up to 3v3. The "operate" position
# of switch DW1.4 is "ON" which shorts it to ground (i.e., "Run" = Low,
# "Reset" = high). Even though the FT232H can nominally drive the IO to 3v3,
# it would be better to instead simply tristate the IO and let the pull-up
# do the work.
# Daughterboard reset GPIO.
# Note: To simplify wiring, we use an IO pin on the connector having a
# ground pin
SPIROM_RESET_GPIO = 0
# Global to note that the Adafruit GPIO adapter has been initialized
adafruit_initialized = False
# Reset mechanisms
RESET_MANUAL = 0
RESET_FT232H = 1
reset_mode = RESET_FT232H
ft232h = None
def create_jlink_scripts(script_path, binfile, efuses):
with open(os.path.join(script_path, JLINK_RESET_SCRIPT), "w") as fd:
fd.write("w4 0xE000EDFC 0x01000001\n")
fd.write("w4 0x40000100 0x1\n")
fd.write("q\n")
with open(os.path.join(script_path, JLINK_POST_RESET_SCRIPT), "w") as fd:
fd.write("halt\n")
fd.write("loadbin {0:s} 0x00000000\n".format(binfile))
fd.write("w4 0xE000EDFC 0x01000000\n")
# Set ARA_VID:
fd.write("w4 0x40000700 0x{0:08x}\n".format(efuses["VID"]))
# Set ARA_PID:
fd.write("w4 0x40000704 0x{0:08x}\n".format(efuses["PID"]))
# Set Serial No (SN0, SN1):
fd.write("w4 0x40084300 0x{0:08x}\n".format(efuses["SN0"]))
fd.write("w4 0x40084304 0x{0:08x}\n".format(efuses["SN1"]))
# Set IMS (IMS0..IMS8):
fd.write("w4 0x40084100 0x{0:08x}\n".format(efuses["IMS0"]))
fd.write("w4 0x40084104 0x{0:08x}\n".format(efuses["IMS1"]))
fd.write("w4 0x40084108 0x{0:08x}\n".format(efuses["IMS2"]))
fd.write("w4 0x4008410C 0x{0:08x}\n".format(efuses["IMS3"]))
fd.write("w4 0x40084110 0x{0:08x}\n".format(efuses["IMS4"]))
fd.write("w4 0x40084114 0x{0:08x}\n".format(efuses["IMS5"]))
fd.write("w4 0x40084118 0x{0:08x}\n".format(efuses["IMS6"]))
fd.write("w4 0x4008411c 0x{0:08x}\n".format(efuses["IMS7"]))
fd.write("w4 0x40084120 0x{0:08x}\n".format(efuses["IMS8"]))
# Note: CMS, SCR and JTAG_CONTROL not used
fd.write("w4 0x400004c4 0x{0:08x}\n".format(efuses["ECCERROR"]))
# Pulse the Cortex reset
fd.write("w4 0x40000000 0x1\n")
fd.write("w4 0x40000100 0x1\n")
fd.write("q\n")
def remove_jlink_scripts(script_path):
fname = os.path.join(script_path, JLINK_RESET_SCRIPT)
if os.path.isfile(fname):
os.remove(fname)
fname = os.path.join(script_path, JLINK_POST_RESET_SCRIPT)
if os.path.isfile(fname):
os.remove(fname)
def haps_board_ready(chipit_name):
# Wait for the HAPS board to finish initializing
#
# Monitor the ChipIT TTY and return when we see the "HAPS62>" prompt.
# Will actively probe for the prompt after a while.
# Returns True when synchronized, False if not
have_prompt = False
issued_boot_msg = False
with serial.Serial(chipit_name, 230400, serial.EIGHTBITS,
serial.PARITY_NONE, serial.STOPBITS_ONE, 1) as chipit:
# Scan TTY for the "HAPS62>" prompt
num_timeouts = 0
num_attempts = 0
buffer = ""
try:
while (not have_prompt) and (num_attempts < 2):
# Poke HAPS.
# If it's already booted, it'll issue a prompt which we'll
# capture immediately. If not, the poke gets lost in the
# aether while the HAPS boots up. The boot sequence ends in
# the HAPS prompt
chipit.write("\r\n")
# Look for the prompt, waiting through the bootup sequence
# as needed
while not have_prompt:
ch = chipit.read(1)
if ch:
buffer += ch
num_timeouts = 0
if "HAPS62>" in buffer:
have_prompt = True
break
if ch == "\n":
# We've already checked for the prompt, so just
# purge the buffer
buffer = ""
if not issued_boot_msg:
print("Waiting for HAPS...")
issued_boot_msg = True
else:
# Read timed out
num_timeouts += 1
if num_timeouts > HAPS_BOOT_TIMEOUT_COUNT:
print("No response from HAPS, retrying...")
# set up for the next attempt
print("Please ensure the HAPS board is powered")
num_attempts += 1
num_timeouts = 0
break
except IOError:
pass
return have_prompt
def init_adafruit_ft232h():
# Apply or remove the reset from the SPIROM daughterboard
# via a GPIO on the AdaFruit FT232H SPI/I2C/UART/GPIO breakout board.
global ft232h, adafruit_initialized
if not adafruit_initialized:
# Temporarily disable the built-in FTDI serial driver on Mac & Linux
# platforms.
FT232H.use_FT232H()
# Create an FT232H object that grabs the first available FT232H device
# found.
ft232h = FT232H.FT232H()
# The daughterboard reset line has a pull-up to 3v3. The "operate"
# position of switch DW1.4 is "ON" which shorts it to ground (i.e.,
# "Run" = Low, "Reset" = high). Even though the FT232H can nominally
# drive the IO to 3v3, it would be better to instead simply tristate
# the IO and let the pull-up do the work.
# For initialization, we'll drive it low.
ft232h.setup(SPIROM_RESET_GPIO, GPIO.OUT)
ft232h.output(SPIROM_RESET_GPIO, GPIO.LOW)
# Note that we're now initialized
adafruit_initialized = True
def reset_spirom_daughterboard_adafruit_ft232h(apply_reset):
# Apply or remove the reset from the SPIROM daughterboard
# via a GPIO on the AdaFruit FT232H SPI/I2C/UART/GPIO breakout board.
global ft232h, adafruit_initialized
if not adafruit_initialized:
init_adafruit_ft232h()
if apply_reset:
# For "Reset", configure as input and let daughterboard pull-up
# drive the line high.
ft232h.setup(SPIROM_RESET_GPIO, GPIO.IN)
else:
# For "Run", configure as an output and drive low
ft232h.setup(SPIROM_RESET_GPIO, GPIO.OUT)
ft232h.output(SPIROM_RESET_GPIO, GPIO.LOW)
def reset_spirom_daughterboard_manual(apply_reset):
# Apply or remove the reset from the SPIROM daughterboard
# by prompting the user to manipulate the daughterboard
# reset switch.
if apply_reset:
raw_input("set DW1.4 to the 'OFF' position and press Return")
else:
raw_input("set DW1.4 to the 'ON' position and press Return")
def reset_spirom_daughterboard(apply_reset, reset_mode):
# Apply or remove the reset from the SPIROM daughterboard
if reset_mode == RESET_MANUAL:
reset_spirom_daughterboard_manual(apply_reset)
elif reset_mode == RESET_FT232H:
reset_spirom_daughterboard_adafruit_ft232h(apply_reset)
else:
raise ValueError("unknown daughterboard reset mode:", reset_mode)
def jtag_reset_phase(jlink_serial_no, script_path, reset_mode):
# Apply the reset and run the "during-reset" JTAG script
# (JLINK_RESET_SCRIPT)
# Notes:
# 1. Current version of JLinkExe doesn't return non-zero status on
# error, so "check_call" is there for future releases.
# 2. We ues "check_output" to hide the debug spew from JLinkExe, but
# otherwise have no need for it.
reset_spirom_daughterboard(True, reset_mode)
subprocess.check_output(["JLinkExe", "-SelectEmuBySN", jlink_serial_no,
"-CommanderScript",
os.path.join(script_path, JLINK_RESET_SCRIPT)])
def jtag_post_reset_phase(jlink_serial_no, script_path, reset_mode):
# Remove the reset and run the "post-reset" JTAG script
# (JLINK_POST_RESET_SCRIPT)
# NB: Current version of JLinkExe doesn't return non-zero status on error,
# so "check_call" is there for future releases.
reset_spirom_daughterboard(False, reset_mode)
spew = subprocess.check_output(["JLinkExe", "-SelectEmuBySN",
jlink_serial_no, "-CommanderScript",
os.path.join(script_path,
JLINK_POST_RESET_SCRIPT)])
# Check the JLinkExe debug spew for errors
if "WARNING: CPU could not be halted" in spew:
raise IOError("CPU could not be halted")
for line in spew.splitlines():
if line.startswith("Downloading file ["):
if not "]...O.K." in line:
if "Could not find emulator with USB serial number" in spew:
error("Couldn't find j-Link unit", jlink_serial_no,
"- is it plugged in?")
raise IOError("Unable to download [" + line.partition("[")[2])
def download_and_boot_haps(chipit_tty, script_path, jlink_sn, reset_mode,
bootrom_image_pathname, efuses):
""" Wait for HAPS board readiness, then download and run a BootRom image.
chipit_tty: typically "/dev/ttyUSBx"
script_path: The path to where the JLink scripts will be written
jlink_sn: The serial number of the JLink JTAG module (found on the bottom)
bootrom_image_pathname: absolute or relative pathname to the BootRom.bin
file ("~" is not allowed)
efuses: A list of eFuse names and values to write (see the global "efuses")
Raises ValueError or IOError on failure, as appropriate
"""
if '~' in bootrom_image_pathname:
raise ValueError("BootRom pathanme cannot contain '~'")
# Wait for the HAPS board to finish initializing
if haps_board_ready(chipit_tty):
# Create (scratch) JLink scripts from the efuse list and
# bootrom_image file. (Required because JLink doesn't support
# symbolic substitution in its script files
create_jlink_scripts(script_path, bootrom_image_pathname, efuses)
# Go through the JTAG download and boot sequence
jtag_reset_phase(jlink_sn, script_path, reset_mode)
jtag_post_reset_phase(jlink_sn, script_path, reset_mode)
# Clean up the scratch JLink scripts
remove_jlink_scripts(script_path)
else:
raise IOError("HAPS board unresponsive")
class WorkerThread(threading.Thread):
""" A worker thread to read the daughterboard dbgserial in the background
Output is done by placing captured lines into the Queue passed in
result_q.
Ask the thread to stop by calling its join() method.
"""
def __init__(self, dbgser_tty_name, result_q, stop_strings=None):
super(WorkerThread, self).__init__()
self.dbgser_tty_name = dbgser_tty_name
self.result_q = result_q
self.stop_strings = stop_strings
self.stoprequest = threading.Event()
def run(self):
if os.name != "posix":
raise ValueError("Can only be run on Posix systems")
return
buffer = ""
# While PySerial would be preferable and more machine-independant,
# it does not support echo suppression
with open(self.dbgser_tty_name, 'r+') as dbgser:
# Config the debug serial port
oldattrs = termios.tcgetattr(dbgser)
newattrs = termios.tcgetattr(dbgser)
newattrs[4] = termios.B115200 # ispeed
newattrs[5] = termios.B115200 # ospeed
newattrs[3] = newattrs[3] & ~termios.ICANON & ~termios.ECHO
newattrs[6][termios.VMIN] = 0
newattrs[6][termios.VTIME] = 10
termios.tcsetattr(dbgser, termios.TCSANOW, newattrs)
# As long as we weren't asked to stop, try to capture dbgserial
# output and push each line up the result queue.
try:
while not self.stoprequest.isSet():
ch = dbgser.read(1)
if ch:
if ch == "\n":
# Push the line (sans newline) into our queue
# and clear the buffer for the next line.
self.result_q.put(buffer)
buffer = ""
elif ch != "\r":
buffer += ch
except IOError:
pass
finally:
# Restore previous settings
termios.tcsetattr(dbgser, termios.TCSAFLUSH, oldattrs)
# Flush any partial buffer
if buffer:
self.result_q.put(buffer)
def join(self, timeout=None):
# Automatically stop our selves when the client joins to us
self.stoprequest.set()
super(WorkerThread, self).join(timeout)
def download_and_boot_haps_capture(chipit_tty, script_path, jlink_sn,
reset_mode, bootrom_image_pathname, efuses,
dbgser_tty_name, timeout,
pass_strings, fail_strings, stop_strings):
"""Wait for HAPS board, then download/run a BootRom image, capturing output
This is a superset of "download_and_boot_haps" that captures the debug
serial output
Arguments:
chipit_tty:
The TTY used by the HAPS board ChipIT supervisor (typically
"/dev/ttyUSBx")
script_path:
The path to where the JLink scripts will be written
jlink_sn:
The serial number of the JLink JTAG module (found on the bottom
of the JLink module)
bootrom_image_pathname:
Absolute or relative pathname to the BootRom.bin file ("~" is
not allowed)
efuses:
A list of eFuse names and values to write (see the global
"efuses")
dbgser_tty_name:
The TTY used by the daugherboard debug serial output
timeout:
How long, in seconds, to wait before concluding that serial
output from the BootRom has ceased.
stop_strings:
(optional) List of strings to look for in the debug spew. If any
are encountered, capture stops. (The stop string is retained/
outputed)
Returns: A list of the debug spew, one line per entry.
"""
# Start the debug serial reader background thread
result_q = Queue.Queue()
dbgser_monitor = WorkerThread(dbgser_tty_name, result_q)
dbgser_monitor.start()
# Download and launch the test image
download_and_boot_haps(chipit_tty, script_path, jlink_sn, reset_mode,
bootrom_image_pathname, efuses)
# Harvest the debug serial until we see a stop string or it times out.
stop = False
capture = []
while not stop:
# Use a blocking 'get' from the queue
try:
result = result_q.get(True, timeout)
except Queue.Empty:
stop = True
else:
# Display/capture the line of debug spew
capture.append(result)
# Check for landmarks in the debug spew
if fail_strings:
# Stop on any failure string
for term in fail_strings:
if term in result:
stop = True
break
if stop_strings:
# Stop on any stop string
for term in stop_strings:
if term in result:
stop = True
break
# Stop our worker thread
dbgser_monitor.join()
return capture
class haps_capture_monitor(object):
""" Encapsulation of a HAPS download-boot-monitor-stop cycle
"""
def __init__(self, chipit_tty, script_path, jlink_sn, reset_mode,
bootrom_image_pathname, efuses, dbgser_tty_name, timeout,
fail_strings, stop_strings):
"""Wait for HAPS board, then download/run a BootRom image
Use "haps_capture_monitor.monitor to monitor the debug spew
Arguments:
chipit_tty:
The TTY used by the HAPS board ChipIT supervisor (typically
"/dev/ttyUSBx")
script_path:
The path to where the scratch JLink scripts will be written
jlink_sn:
The serial number of the JLink JTAG module (found on the bottom
of the JLink module)
bootrom_image_pathname:
Absolute or relative pathname to the BootRom.bin file ("~" is
not allowed)
efuses:
A list of eFuse names and values to write (see the global
"efuses")
dbgser_tty_name:
The TTY used by the daugherboard debug serial output
timeout:
How long, in seconds, to wait before concluding that serial
output from the BootRom has ceased. (This will result in a
"t"imeout status.)
fail_strings:
List of failure strings to look for in the debug spew. If any
are encountered, capture stops.
stop_strings:
List of strings to look for in the debug spew. If any
are encountered, capture stops.
"""
self.chipit_tty = chipit_tty
self.script_path = script_path
self.jlink_sn = jlink_sn
self.reset_mode = reset_mode
self.bootrom_image_pathname = bootrom_image_pathname
self.efuses = efuses
self.dbgser_tty_name = dbgser_tty_name
self.timeout = timeout
self.fail_strings = fail_strings
self.stop_strings = stop_strings
self.result_q = None
self.dbgser_monitor = None
# Start the debug serial reader background thread
self.result_q = Queue.Queue()
self.dbgser_monitor = WorkerThread(self.dbgser_tty_name, self.result_q)
self.dbgser_monitor.start()
# Download and launch the test image
download_and_boot_haps(self.chipit_tty, self.script_path,
self.jlink_sn, self.reset_mode,
self.bootrom_image_pathname, self.efuses)
def __del__(self):
""" Stop our worker thread """
# Stop our worker thread
if self.dbgser_monitor:
self.dbgser_monitor.join()
self.dbgser_monitor = None
self.result_q = None
def __enter__(self):
""" Compatability with 'with' statement """
return self
def __exit__(self, exc_type, exc_value, traceback):
""" Compatability with 'with' statement """
self.__del__()
def monitor(self, pass_strings=None):
"""Capture output from HAPS board until encountering a landmark string
Parameters:
pass_strings An optional set of landmark strings to check for a
"test passed" condition. Since the norm for pass strings is
that all must be present for the test run to succeed, the
caller typically removes strings from this list as we encounter
them. Since the fail and stop strings operate on first-
occurrance, they can be treated as static for the life of the
test and are cached in the class.
Returns: On encountering any landmark string, returns a 3-element
tuple consisting of:
- The reason the capture stopped (timeout, stop, pass or fail)
- The index into the appropriate xxx_strings array for the matched
string. (This will be zero in the case of a test timeout)
- A list of the debug spew captured thus far, one line per entry.
"""
# Harvest the debug serial until we see a landmark string or it
# times out.
stop = False
capture = []
status = HAPS_MONITOR_TIMEOUT
index = 0
while not stop:
# Use a blocking 'get' from the queue
try:
result = self.result_q.get(True, self.timeout)
except Queue.Empty:
# Timeout - test died "silently"
stop = True
else:
# Save the line of debug spew
capture.append(result)
# Check for landmarks in the debug spew
if pass_strings:
# Stop on a pass string
for index, term in enumerate(pass_strings):
if term in result:
status = HAPS_MONITOR_PASS
stop = True
break
elif self.fail_strings and not stop:
# Stop on any failure string
for index, term in enumerate(self.fail_strings):
if term in result:
status = HAPS_MONITOR_FAIL
stop = True
break
elif self.stop_strings and not stop:
# Stop on any stop string
for index, term in enumerate(self.stop_strings):
if term in result:
status = HAPS_MONITOR_STOP
stop = True
break
return [status, index, capture]
| |
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from cinder import context
from cinder.i18n import _LW
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vmax_common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class EMCVMAXFCDriver(driver.FibreChannelDriver):
"""EMC FC Drivers for VMAX using SMI-S.
Version history:
1.0.0 - Initial driver
1.1.0 - Multiple pools and thick/thin provisioning,
performance enhancement.
2.0.0 - Add driver requirement functions
2.1.0 - Add consistency group functions
"""
VERSION = "2.1.0"
def __init__(self, *args, **kwargs):
super(EMCVMAXFCDriver, self).__init__(*args, **kwargs)
self.common = emc_vmax_common.EMCVMAXCommon(
'FC',
configuration=self.configuration)
self.zonemanager_lookup_service = fczm_utils.create_lookup_service()
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a EMC(VMAX/VNX) volume."""
volpath = self.common.create_volume(volume)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
volpath = self.common.create_volume_from_snapshot(volume, snapshot)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
volpath = self.common.create_cloned_volume(volume, src_vref)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def delete_volume(self, volume):
"""Deletes an EMC volume."""
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
ctxt = context.get_admin_context()
volumename = snapshot['volume_name']
index = volumename.index('-')
volumeid = volumename[index + 1:]
volume = self.db.volume_get(ctxt, volumeid)
volpath = self.common.create_snapshot(snapshot, volume)
model_update = {}
snapshot['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = snapshot['provider_location']
return model_update
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
ctxt = context.get_admin_context()
volumename = snapshot['volume_name']
index = volumename.index('-')
volumeid = volumename[index + 1:]
volume = self.db.volume_get(ctxt, volumeid)
self.common.delete_snapshot(snapshot, volume)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
}
}
"""
device_info = self.common.initialize_connection(
volume, connector)
device_number = device_info['hostlunid']
storage_system = device_info['storagesystem']
target_wwns, init_targ_map = self._build_initiator_target_map(
storage_system, volume, connector)
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': device_number,
'target_discovered': True,
'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug("Return FC data for zone addition: %(data)s.",
{'data': data})
return data
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:param volume: the volume object
:param connector: the connector object
:returns: dict -- the target_wwns and initiator_target_map if the
zone is to be removed, otherwise empty
"""
data = {}
loc = volume['provider_location']
name = eval(loc)
storage_system = name['keybindings']['SystemName']
LOG.debug("Start FC detach process for volume: %(volume)s.",
{'volume': volume['name']})
mvInstanceName = self.common.get_masking_view_by_volume(
volume, connector)
data = {'driver_volume_type': 'fibre_channel',
'data': {}}
if mvInstanceName is not None:
portGroupInstanceName = (
self.common.get_port_group_from_masking_view(
mvInstanceName))
LOG.debug("Found port group: %(portGroup)s "
"in masking view %(maskingView)s.",
{'portGroup': portGroupInstanceName,
'maskingView': mvInstanceName})
self.common.terminate_connection(volume, connector)
LOG.debug("Looking for masking views still associated with "
"Port Group %s.", portGroupInstanceName)
mvInstances = self.common.get_masking_views_by_port_group(
portGroupInstanceName)
if len(mvInstances) > 0:
LOG.debug("Found %(numViews)lu MaskingViews.",
{'numViews': len(mvInstances)})
else: # No views found.
target_wwns, init_targ_map = self._build_initiator_target_map(
storage_system, volume, connector)
LOG.debug("No MaskingViews were found. Deleting zone.")
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug("Return FC data for zone removal: %(data)s.",
{'data': data})
else:
LOG.warn(_LW("Volume %(volume)s is not in any masking view."),
{'volume': volume['name']})
return data
def _build_initiator_target_map(self, storage_system, volume, connector):
"""Build the target_wwns and the initiator target map."""
target_wwns = []
init_targ_map = {}
initiator_wwns = connector['wwpns']
if self.zonemanager_lookup_service:
fc_targets = self.common.get_target_wwns_from_masking_view(
storage_system, volume, connector)
mapping = (
self.zonemanager_lookup_service.
get_device_mapping_from_network(initiator_wwns, fc_targets))
for entry in mapping:
map_d = mapping[entry]
target_wwns.extend(map_d['target_port_wwn_list'])
for initiator in map_d['initiator_port_wwn_list']:
init_targ_map[initiator] = map_d['target_port_wwn_list']
else: # No lookup service, pre-zoned case.
target_wwns = self.common.get_target_wwns(storage_system,
connector)
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwns
return list(set(target_wwns)), init_targ_map
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
:param refresh: boolean -- If True, run update the stats first.
:returns: dict -- the stats dict
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
data = self.common.update_volume_stats()
data['storage_protocol'] = 'FC'
data['driver_version'] = self.VERSION
self._stats = data
def migrate_volume(self, ctxt, volume, host):
"""Migrate a volume from one Volume Backend to another.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param host: the host dict holding the relevant target(destination)
information
:returns: boolean -- Always returns True
:returns: dict -- Empty dict {}
"""
return self.common.migrate_volume(ctxt, volume, host)
def retype(self, ctxt, volume, new_type, diff, host):
"""Migrate volume to another host using retype.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param new_type: the new volume type.
:param diff: Unused parameter.
:param host: the host dict holding the relevant
target(destination) information
:returns: boolean -- True if retype succeeded, Fasle if error
"""
return self.common.retype(ctxt, volume, new_type, diff, host)
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
self.common.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group):
"""Deletes a consistency group."""
volumes = self.db.volume_get_all_by_group(context, group['id'])
return self.common.delete_consistencygroup(
context, group, volumes)
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates a cgsnapshot."""
return self.common.create_cgsnapshot(context, cgsnapshot, self.db)
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes a cgsnapshot."""
return self.common.delete_cgsnapshot(context, cgsnapshot, self.db)
| |
#!/usr/bin/python3
#
# getISOTimeStamp
# secondsSinceEpoch - an arbitrary number of seconds since the Unix epoch
# returns an ISO8601 formatted timestamp in UTC
# example: 2017-01-01T02:25:00+00:00
#
def getISOTimeStamp(secondsSinceEpoch):
timeStamp = datetime.fromtimestamp(secondsSinceEpoch, tz=timezone.utc)
timeStampNoMicroseconds = timeStamp.replace(microsecond=0)
ISOTimeStamp = timeStampNoMicroseconds.isoformat()
return ISOTimeStamp
#
# getIndex
# haystack - an array
# returns a random value from an array
#
def getIndex(haystack):
needle = random.randint(0, len(haystack) - 1)
return needle
#
# createProxyLine
# ts - a timestamp
# u - a username
# ip - an IP address
# s - a site domain (ex: www.google.com)
# returns a JSON-formatted log entry of the "proxy" type
#
def createProxyLine(ts, u, ip, s):
a_line = '{'
a_line = a_line + '"timestamp":"' + ts + '",'
a_line = a_line + '"ip_address":"' + ip + '",'
a_line = a_line + '"username":"' + u + '",'
a_line = a_line + '"site":"' + s + '"'
a_line = a_line + '}\n'
return a_line
#
# createProxyLogs
# ts_time - timestamp (in seconds) of the first log entry
# this increases by one second each iteration of the loop
# end_time - timestamp (in seconds) of the last log entry
# consistent_logs - if this is true, every entry for
# a given username will always use the same IP
# this function creates one log entry per second between
# start_time and end_time and batch writes them, 10000 at a time,
# to a file called proxy.log
#
def createProxyLogs(ts_time, end_time, users, sites, user_ips, consistent_logs = False):
proxy_out = open('proxy.log', 'w')
line_count = 0
line_batch = ""
while (ts_time <= end_time):
iso_ts = getISOTimeStamp(ts_time)
user_offset = getIndex(users)
user = users[user_offset]
if(consistent_logs):
ip = user_ips[user_offset]
else:
ip = user_ips[getIndex(user_ips)]
site = sites[getIndex(sites)]
line_batch = line_batch + createProxyLine(iso_ts, user, ip, site)
if(line_count%10000 == 0 or ts_time == end_time):
proxy_out.write(line_batch)
line_batch = ""
ts_time = ts_time + 1
line_count = line_count + 1
print("Number of proxy logs written: " + str(line_count))
#
# createDNSLine
# ts - a timestamp
# i_dns - the internal DNS server
# e_dns - the responding DNS server
# resp_ip - the response to the DNS query
# domain - the domain requested
# returns a JSON-formatted log entry of the "dns" type
#
def createDNSLine(ts, i_dns, e_dns, query, answer):
a_line = '{'
a_line = a_line + '"timestamp":"' + ts + '",'
a_line = a_line + '"client_ip":"' + i_dns + '",'
a_line = a_line + '"dns_server":"' + e_dns + '",'
a_line = a_line + '"query":"' + query + '",'
a_line = a_line + '"answer":"' + answer + '"'
a_line = a_line + '}\n'
return a_line
def createDNSLogs(ts_time, end_time, i_dns, e_dns, sites, bbc_uk, bbc_com, google, cnn):
dns_out = open('dns.log', 'w')
line_count = 0
line_batch = ""
while(ts_time <= end_time):
i_dns_ip = i_dns[random.randint(0, len(i_dns) - 1)]
e_dns_ip = e_dns[random.randint(0, len(e_dns) - 1)]
query = sites[random.randint(0, len(sites) - 1)]
if 'bbc.com' in query:
response = bbc_com[random.randint(0, len(bbc_com) - 1)]
elif 'bbc.co.uk' in query:
response = bbc_uk[random.randint(0, len(bbc_uk) - 1)]
elif 'cnn' in query:
response = cnn[random.randint(0, len(cnn) - 1)]
elif 'google' in query:
response = google[random.randint(0, len(google) - 1)]
else:
print('unknown query: ' + domain)
exit()
iso_ts = getISOTimeStamp(ts_time)
line_batch = line_batch + createDNSLine(iso_ts, i_dns_ip, e_dns_ip, query, response)
if(line_count%10000 == 0 or ts_time >= end_time):
dns_out.write(line_batch)
line_batch = ""
ts_time = ts_time + 1
line_count = line_count + 1
print("Number of DNS lines written: " + str(line_count))
def createDHCPLogs(start_time, end_time):
print("In createDHCPLogs")
print("Start time: " + getISOTimeStamp(start_time))
print("End time: " + getISOTimeStamp(end_time))
import argparse
from datetime import datetime, timezone
from time import time
import random
#
# all of the possible arguments
#
parser = argparse.ArgumentParser()
parser.add_argument('--days', dest='days', help='How many days back to start generating logs; the default is fourteen days', default='14')
parser.add_argument('--dhcp', dest='dhcplogs', help='If set, generate something that looks like DHCP logs', action='store_true')
parser.add_argument('--dns', dest='dnslogs', help='If set, generate something that looks like DNS logs', action='store_true')
parser.add_argument('--proxy', dest='proxylogs', help='If set, generate something that looks like proxy logs', action='store_true')
parser.add_argument('--all', dest='alllogs', help='If set, generate all log types', action='store_true')
parser.add_argument('--consistent', dest='consistentlogs', help='If set, users and IPs may or may not be consistent...', action='store_true')
args=parser.parse_args()
num_days = int(args.days)
dhcp_logs = args.dhcplogs
dns_logs = args.dnslogs
proxy_logs = args.proxylogs
all_logs = args.alllogs
consistent_logs = args.consistentlogs
#
# the users for proxy logs
#
users = ["alvin", "simon", "theodore", "piper", "prue", "phoebe", "paige"]
#
# the sites for proxy and DNS logs
#
sites = ["www.bbc.co.uk", "www.bbc.com", "www.google.com", "www.cnn.com"]
#
# the IPs for proxy and DHCP logs
#
user_ips = ["10.10.10.1", "10.10.10.3", "10.10.10.8", "10.10.10.21", "10.10.20.2", "10.10.20.5", "10.10.20.13"]
#
# server IPs for passive DNS/DHCP logs
#
i_dns = ["172.16.1.1", "172.24.1.1"]
e_dns = ["8.8.8.8", "208.67.222.222", "208.67.220.220"]
dhcp = ["10.10.10.251", "10.10.20.251"]
#
# site IPs for passive DNS logs
#
bbc_uk = ["212.58.244.70", "212.58.246.94"]
bbc_com = ["151.101.56.81", "151.101.32.81"]
google = ["216.58.193.132"]
cnn = ["151.101.1.67", "151.101.193.67", "151.101.65.67", "151.101.129.67"]
#
# time setup
#
curr_time = time()
iso_curr_time = getISOTimeStamp(curr_time)
time_offset = num_days * 24 * 60 * 60
start_time = curr_time - time_offset
iso_start_time = getISOTimeStamp(start_time)
print()
print('Generating logs for ' + str(num_days) + ' days')
print('Log start time: ' + iso_start_time)
print('Log end time: ' + iso_curr_time)
print()
if(dhcp_logs or all_logs):
createDHCPLogs(start_time, curr_time)
if(dns_logs or all_logs):
createDNSLogs(start_time, curr_time, i_dns, e_dns, sites, bbc_uk, bbc_com, google, cnn)
if(proxy_logs or all_logs):
createProxyLogs(start_time, curr_time, users, sites, user_ips, consistent_logs)
print()
print('Finished writing logs, exiting')
print()
exit()
| |
from mock import MagicMock
from nose.tools import assert_raises
from paradrop.confd import wireless
from paradrop.confd.wireless import ConfigWifiIface, HostapdConfGenerator
def test_get_cipher_list():
assert wireless.get_cipher_list("psk2+tkip+ccmp") == ["TKIP", "CCMP"]
assert wireless.get_cipher_list("psk2+tkip+aes") == ["TKIP", "CCMP"]
assert wireless.get_cipher_list("psk2+tkip") == ["TKIP"]
assert wireless.get_cipher_list("psk2+ccmp") == ["CCMP"]
assert wireless.get_cipher_list("psk2+aes") == ["CCMP"]
assert wireless.get_cipher_list("psk2") == ["CCMP"]
assert wireless.get_cipher_list("psk+tkip+ccmp") == ["TKIP", "CCMP"]
assert wireless.get_cipher_list("psk+tkip+aes") == ["TKIP", "CCMP"]
assert wireless.get_cipher_list("psk+tkip") == ["TKIP"]
assert wireless.get_cipher_list("psk+ccmp") == ["CCMP"]
assert wireless.get_cipher_list("psk+aes") == ["CCMP"]
assert wireless.get_cipher_list("psk") == ["CCMP"]
assert wireless.get_cipher_list("psk-mixed+tkip+ccmp") == ["TKIP", "CCMP"]
assert wireless.get_cipher_list("psk-mixed+tkip+aes") == ["TKIP", "CCMP"]
assert wireless.get_cipher_list("psk-mixed+tkip") == ["TKIP"]
assert wireless.get_cipher_list("psk-mixed+ccmp") == ["CCMP"]
assert wireless.get_cipher_list("psk-mixed+aes") == ["CCMP"]
assert wireless.get_cipher_list("psk-mixed") == ["CCMP"]
assert wireless.get_cipher_list("wpa2+tkip+ccmp") == ["TKIP", "CCMP"]
assert wireless.get_cipher_list("wpa2+tkip+aes") == ["TKIP", "CCMP"]
assert wireless.get_cipher_list("wpa2+tkip") == ["TKIP"]
assert wireless.get_cipher_list("wpa2+ccmp") == ["CCMP"]
assert wireless.get_cipher_list("wpa2+aes") == ["CCMP"]
assert wireless.get_cipher_list("wpa2") == ["CCMP"]
assert wireless.get_cipher_list("wpa+tkip+ccmp") == ["TKIP", "CCMP"]
assert wireless.get_cipher_list("wpa+tkip+aes") == ["TKIP", "CCMP"]
assert wireless.get_cipher_list("wpa+tkip") == ["TKIP"]
assert wireless.get_cipher_list("wpa+ccmp") == ["CCMP"]
assert wireless.get_cipher_list("wpa+aes") == ["CCMP"]
assert wireless.get_cipher_list("wpa") == ["TKIP"]
assert wireless.get_cipher_list("wpa-mixed+tkip+ccmp") == ["TKIP", "CCMP"]
assert wireless.get_cipher_list("wpa-mixed+tkip+aes") == ["TKIP", "CCMP"]
assert wireless.get_cipher_list("wpa-mixed+tkip") == ["TKIP"]
assert wireless.get_cipher_list("wpa-mixed+ccmp") == ["CCMP"]
assert wireless.get_cipher_list("wpa-mixed+aes") == ["CCMP"]
assert wireless.get_cipher_list("wpa-mixed") == ["CCMP"]
def test_ConfigWifiIface_apply():
"""
Test the ConfigWifiIface apply method
"""
wifiDevice = MagicMock()
interface = MagicMock()
allConfigs = {
("wireless", "wifi-device", "phy0"): wifiDevice,
("network", "interface", "lan"): interface
}
config = ConfigWifiIface()
config.manager = MagicMock()
config.makeHostapdConf = MagicMock()
config.device = "phy0"
config.mode = "ap"
config.ssid = "Paradrop"
config.network = "lan"
config.encryption = "psk2"
config.key = "password"
wifiDevice.name = "phy0"
interface.config_ifname = "wlan0"
commands = config.apply(allConfigs)
def test_HostapdConfGenerator_getMainOptions():
wifiIface = MagicMock()
wifiIface.ssid = "Paradrop"
wifiIface.maxassoc = 200
wifiIface.wmm = True
wifiIface._ifname = "wlan0"
wifiDevice = MagicMock()
wifiDevice.country = "US"
wifiDevice.hwmode = "11a"
wifiDevice.channel = 36
wifiDevice.beacon_int = 100
wifiDevice.rts = -1
wifiDevice.frag = -1
interface = MagicMock()
interface.type = "bridge"
interface.config_ifname = "br-lan"
generator = HostapdConfGenerator(wifiIface, wifiDevice, interface)
options = generator.getMainOptions()
print(options)
assert ("interface", "wlan0") in options
assert ("bridge", "br-lan") in options
assert ("ssid", "Paradrop") in options
assert ("country_code", "US") in options
assert ("ieee80211d", 1) in options
assert ("hw_mode", "a") in options
assert ("beacon_int", 100) in options
assert ("max_num_sta", 200) in options
assert ("rts_threshold", -1) in options
assert ("fragm_threshold", -1) in options
assert ("wmm_enabled", 1) in options
def test_HostapdConfGenerator_get11nOptions():
wifiDevice = MagicMock()
wifiDevice.enable11n = True
wifiDevice.htmode = "HT40+"
wifiDevice.short_gi_20 = True
wifiDevice.short_gi_40 = True
wifiDevice.tx_stbc = 1
wifiDevice.rx_stbc = 2
wifiDevice.dsss_cck_40 = True
wifiDevice.require_mode = "n"
wifiIface = MagicMock()
interface = MagicMock()
generator = HostapdConfGenerator(wifiIface, wifiDevice, interface)
options = generator.get11nOptions()
options = dict(options)
print(options)
assert "HT40+" in options['ht_capab']
assert options['ieee80211n'] == 1
assert options['require_ht'] == 1
# Enabling 11ac (VHT80) and using channel 40 should configure HT40- mode
# for 11n clients.
wifiDevice.htmode = "VHT80"
wifiDevice.channel = 40
options = generator.get11nOptions()
options = dict(options)
print(options)
assert "HT40-" in options['ht_capab']
# Enabling 11ac (VHT80) and using channel 36 should configure HT40+ mode
# for 11n clients.
wifiDevice.htmode = "VHT80"
wifiDevice.channel = 36
options = generator.get11nOptions()
options = dict(options)
print(options)
assert "HT40+" in options['ht_capab']
def test_HostapdConfGenerator_get11acOptions():
wifiDevice = MagicMock()
wifiDevice.enable11ac = True
wifiDevice.htmode = "VHT40"
wifiDevice.short_gi_80 = True
wifiDevice.short_gi_160 = True
wifiDevice.tx_stbc = 1
wifiDevice.rx_stbc = 2
wifiDevice.require_mode = "ac"
wifiDevice.channel = 36
wifiIface = MagicMock()
interface = MagicMock()
generator = HostapdConfGenerator(wifiIface, wifiDevice, interface)
options = generator.get11acOptions()
print(options)
assert ("ieee80211ac", 1) in options
assert ("vht_capab", "[RXLDPC][SHORT-GI-80][SHORT-GI-160][TX-STBC-2BY1][RX-ANTENNA-PATTERN][TX-ANTENNA-PATTERN][RX-STBC-12]") in options
assert ("vht_oper_chwidth", 0) in options
assert ("vht_oper_centr_freq_seg0_idx", 38) in options
# Try 80 MHz channel.
wifiDevice.htmode = "VHT80"
options = generator.get11acOptions()
assert ("vht_oper_chwidth", 1) in options
assert ("vht_oper_centr_freq_seg0_idx", 42) in options
# Try 160 MHz channel.
wifiDevice.htmode = "VHT160"
options = generator.get11acOptions()
assert ("vht_oper_chwidth", 2) in options
assert ("vht_oper_centr_freq_seg0_idx", 50) in options
def test_HostapdConfGenerator_getRadiusOptions():
wifiIface = ConfigWifiIface()
wifiIface.ssid = "Paradrop"
wifiIface.maxassoc = 200
wifiIface.wmm = True
wifiIface.ifname = "wlan0"
wifiIface.auth_server = "10.42.0.1"
wifiIface.auth_secret = "secret"
wifiIface.acct_server = "10.42.0.1"
wifiIface.acct_secret = "secret"
wifiDevice = MagicMock()
wifiDevice.country = "US"
wifiDevice.hwmode = "11a"
wifiDevice.channel = 36
wifiDevice.beacon_int = 100
wifiDevice.rts = -1
wifiDevice.frag = -1
interface = MagicMock()
interface.type = "bridge"
interface.config_ifname = "br-lan"
generator = HostapdConfGenerator(wifiIface, wifiDevice, interface)
options = generator.getRadiusOptions()
print(options)
def test_HostapdConfGenerator_get11rOptions():
wifiIface = ConfigWifiIface()
wifiIface.r0kh = [
"02:01:02:03:04:05 r0kh-1.example.com 000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f",
"02:01:02:03:04:06 r0kh-2.example.com 00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff"
]
wifiIface.r1kh = [
"00:00:00:00:00:00 00:00:00:00:00:00 00112233445566778899aabbccddeeff"
]
wifiDevice = MagicMock()
interface = MagicMock()
generator = HostapdConfGenerator(wifiIface, wifiDevice, interface)
# Should raise an exception because nasid is not set.
assert_raises(Exception, generator.get11rOptions)
wifiIface.nasid = "ap.example.com"
options = generator.get11rOptions()
# There should be two r0kh entries and one r1kh entry.
assert sum(k[0] == 'r0kh' for k in options) == 2
assert sum(k[0] == 'r1kh' for k in options) == 1
| |
"""
Copyright 2006-2008 SpringSource (http://springsource.com), All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import Cookie
import logging
import re
import pickle
import types
from springpython.context import ApplicationContextAware
from springpython.aop import utils
from springpython.security import AccessDeniedException
from springpython.security import AuthenticationException
from springpython.security.context import SecurityContext
from springpython.security.context import SecurityContextHolder
from springpython.security.intercept import AbstractSecurityInterceptor
from springpython.security.intercept import ObjectDefinitionSource
from springpython.security.providers import UsernamePasswordAuthenticationToken
logger = logging.getLogger("springpython.security.web")
class Filter(object):
"""This is the interface definition of a filter. It must process a request/response."""
def doNextFilter(self, environ, start_response):
results = None
try:
nextFilter = environ["SPRINGPYTHON_FILTER_CHAIN"].next()
if isinstance(nextFilter, tuple):
func = nextFilter[0]
args = nextFilter[1]
results = func(args)
else:
results = nextFilter(environ, start_response)
except StopIteration:
pass
# Apparently, passing back a generator trips up CherryPy and causes it to skip
# the filters. If a generator is detected, convert it to a standard array.
if type(results) == types.GeneratorType:
results = [line for line in results]
return results
class FilterChain(object):
"""
Collection of WSGI filters. It allows dynamic re-chaining of filters as the situation is needed.
In order to link in 3rd party WSGI middleware, see MiddlewareFilter.
"""
def __init__(self):
self.chain = []
def addFilter(self, filter):
self.chain.append(filter)
def getFilterChain(self):
for filter in self.chain:
yield filter
class FilterChainProxy(Filter, ApplicationContextAware):
"""
This acts as filter, and delegates to a chain of filters. Each time a web page is called, it dynamically
assembles a FilterChain, and then iterates over it. This is different than the conventional style of
wrapping applications for WSGI, because each URL pattern might have a different chained combination
of the WSGI filters.
Because most middleware objects define the wrapped application using __init__, Spring provides
the MiddlewareFilter, to help wrap any middleware object so that it can participate in a
FilterChain.
"""
def __init__(self, filterInvocationDefinitionSource=None):
"""This class must be application-context aware in case it is instantiated inside an IoC container."""
ApplicationContextAware.__init__(self)
if filterInvocationDefinitionSource is None:
self.filterInvocationDefinitionSource = []
else:
self.filterInvocationDefinitionSource = filterInvocationDefinitionSource
self.logger = logging.getLogger("springpython.security.web.FilterChainProxy")
self.application = None
def __call__(self, environ, start_response):
"""This will route all requests/responses through the chain of filters."""
filterChain = FilterChain()
for urlPattern, chainOfFilters in self.filterInvocationDefinitionSource:
if re.compile(urlPattern).match(environ["PATH_INFO"].lower()):
self.logger.debug("We had a match of %s against %s" % (environ["PATH_INFO"], urlPattern))
for filter in chainOfFilters:
try:
filterChain.addFilter(self.app_context.get_object(filter))
except AttributeError, e:
filterChain.addFilter(filter)
break
# Put the actual application on the end of the chain.
if self.application:
filterChain.addFilter(self.application)
environ["SPRINGPYTHON_FILTER_CHAIN"] = filterChain.getFilterChain()
return self.doNextFilter(environ, start_response)
class SessionStrategy(object):
"""
This is an interface definition in defining access to session data. There may be many
ways to implement session data. This makes the mechanism pluggable.
"""
def getHttpSession(self, environ):
raise NotImplementedError()
def setHttpSession(self, key, value):
raise NotImplementedError()
class HttpSessionContextIntegrationFilter(Filter):
"""
This filter is meant to pull security context information from the HttpSession, and store it in the
SecurityContextHolder. Then on the response, copy and SecurityContext information back into the HttpSession.
"""
# Key to the SecurityContext data stored in an HttpSession dictionary.
SPRINGPYTHON_SECURITY_CONTEXT_KEY = "SPRINGPYTHON_SECURITY_CONTEXT_KEY"
# Class name used
context = SecurityContext
def __init__(self, sessionStrategy=None):
self.sessionStrategy = sessionStrategy
self.logger = logging.getLogger("springpython.security.web.HttpSessionContextIntegrationFilter")
def __call__(self, environ, start_response):
"""This filter copies SecurityContext information back and forth between the HttpSession and the SecurityContextHolder."""
httpSession = self.sessionStrategy.getHttpSession(environ)
contextWhenChainProceeded = None
if httpSession is not None:
contextFromSessionObject = None
if self.SPRINGPYTHON_SECURITY_CONTEXT_KEY in httpSession:
contextFromSessionObject = pickle.loads(httpSession[self.SPRINGPYTHON_SECURITY_CONTEXT_KEY])
if contextFromSessionObject is not None:
if isinstance(contextFromSessionObject, SecurityContext):
self.logger.debug("Obtained from SPRINGPYTHON_SECURITY_CONTEXT_KEY a valid SecurityContext and set "
+ "to SecurityContextHolder: '%s'" % contextFromSessionObject)
SecurityContextHolder.setContext(contextFromSessionObject)
else:
self.logger.warn("SPRINGPYTHON_SECURITY_CONTEXT_KEY did not contain a SecurityContext but contained: '%s'" % contextFromSessionObject
+ "'; are you improperly modifying the HttpSession directly (you should always use "
+ "SecurityContextHolder) or using the HttpSession attribute reserved for this class? "
+ "- new SecurityContext instance associated with SecurityContextHolder")
SecurityContextHolder.setContext(self.generateNewContext())
else:
self.logger.debug("HttpSession returned null object for SPRINGPYTHON_SECURITY_CONTEXT_KEY " +
"- new SecurityContext instance associated with SecurityContextHolder")
SecurityContextHolder.setContext(self.generateNewContext())
else:
self.logger.debug("No HttpSession currently exists - new SecurityContext instance associated with SecurityContextHolder")
SecurityContextHolder.setContext(self.generateNewContext())
self.logger.debug("Setting contextWhenChainProceeded to %s" % SecurityContextHolder.getContext())
contextWhenChainProceeded = str(SecurityContextHolder.getContext())
results = self.doNextFilter(environ, start_response)
self.sessionStrategy.setHttpSession(self.SPRINGPYTHON_SECURITY_CONTEXT_KEY,
pickle.dumps(SecurityContextHolder.getContext()))
self.logger.debug("SecurityContext stored to HttpSession: '%s'" % SecurityContextHolder.getContext())
SecurityContextHolder.clearContext()
self.logger.debug("SecurityContextHolder cleared out, as request processing completed")
return results
def setContext(self, clazz):
"""This is a factory setter. The context parameter is used to create new security context objects."""
self.context = clazz
def generateNewContext(self):
"""This is a factory method that instantiates the assigned class, and populates it with an empty token."""
context = self.context()
context.authentication = UsernamePasswordAuthenticationToken()
return context
def saveContext(self):
self.sessionStrategy.setHttpSession(self.SPRINGPYTHON_SECURITY_CONTEXT_KEY,
pickle.dumps(SecurityContextHolder.getContext()))
class RedirectStrategy(object):
"""
This class provides a mechanism to redirect users to another page. Currently, it returns a
standard forwarding message to the browser. This may not be the most efficient, but it guarantees
the entire WSGI stack is processed on both request and response.
"""
def redirect(self, url):
"""This is a 0-second redirect."""
return """<META HTTP-EQUIV="Refresh" CONTENT="0; URL=%s">""" % url
class AuthenticationProcessingFilter(Filter):
"""
This filter utilizes the authentication manager to make sure the requesting person is authenticated.
It expects the SecurityContextHolder to be populated when it runs, so it is always good to preceed it
with the HttpSessionContextIntegrationFilter.
"""
def __init__(self, auth_manager=None, alwaysReauthenticate=False):
self.auth_manager = auth_manager
self.alwaysReauthenticate = alwaysReauthenticate
self.logger = logging.getLogger("springpython.security.web.AuthenticationProcessingFilter")
def __call__(self, environ, start_response):
"""
Check if the user is trying to access the login url. Then see if they are already authenticated (and
alwaysReauthenticate is disabled). Finally, try to authenticate the user. If successful, stored credentials
in SecurityContextHolder. Otherwise, redirect to the login page.
"""
# If the user is already authenticated, skip this filter.
if not self.alwaysReauthenticate and SecurityContextHolder.getContext().authentication.isAuthenticated():
self.logger.debug("You are not required to reauthenticate everytime, and appear to already be authenticted, access GRANTED.")
return self.doNextFilter(environ, start_response)
try:
# Authenticate existing credentials using the authentication manager.
token = SecurityContextHolder.getContext().authentication
self.logger.debug("Trying to authenticate %s using the authentication manager" % token)
SecurityContextHolder.getContext().authentication = self.auth_manager.authenticate(token)
self.logger.debug("%s was successfully authenticated, access GRANTED." % token.username)
except AuthenticationException, e:
self.logger.debug("Authentication failure, access DENIED.")
raise
return self.doNextFilter(environ, start_response)
def logout(self):
SecurityContextHolder.getContext().authentication = UsernamePasswordAuthenticationToken()
class FilterInvocation:
"""Holds objects associated with a WSGI filter, such as environ. This is the web-application equivalent to MethodInvocation."""
def __init__(self, environ):
self.environ = environ
def requestUrl(self):
return self.environ["PATH_INFO"]
class AbstractFilterInvocationDefinitionSource(ObjectDefinitionSource):
"""Abstract implementation of ObjectDefinitionSource."""
def get_attributes(self, obj):
try:
return self.lookupAttributes(obj.requestUrl())
except AttributeError:
raise TypeError("obj must be a FilterInvocation")
def lookupAttributes(self, url):
raise NotImplementedError()
class RegExpBasedFilterInvocationDefinitionMap(AbstractFilterInvocationDefinitionSource):
"""
Maintains a list of ObjectDefinitionSource's associated with different HTTP request URL regular expression patterns.
Regular expressions are used to match a HTTP request URL against a ConfigAttributeDefinition. The order of registering
the regular expressions is very important. The system will identify the first matching regular expression for a given
HTTP URL. It will not proceed to evaluate later regular expressions if a match has already been found.
Accordingly, the most specific regular expressions should be registered first, with the most general regular expressions registered last.
"""
def __init__(self, obj_def_source):
self.obj_def_source = obj_def_source
def lookupAttributes(self, url):
if self.obj_def_source:
for rule, attr in self.obj_def_source:
if re.compile(rule).match(url):
return attr
return None
class FilterSecurityInterceptor(Filter, AbstractSecurityInterceptor):
"""
Performs security handling of HTTP resources via a filter implementation.
The ObjectDefinitionSource required by this security interceptor is of type AbstractFilterInvocationDefinitionSource.
Refer to AbstractSecurityInterceptor for details on the workflow.
"""
# Key to the FilterSecurityInterceptor's token data stored in an HttpSession dictionary.
SPRINGPYTHON_FILTER_SECURITY_INTERCEPTOR_KEY = "SPRINGPYTHON_FILTER_SECURITY_INTERCEPTOR_KEY"
def __init__(self, auth_manager = None, access_decision_mgr = None, obj_def_source = None, sessionStrategy=None):
Filter.__init__(self)
AbstractSecurityInterceptor.__init__(self, auth_manager, access_decision_mgr, obj_def_source)
self.sessionStrategy = sessionStrategy
self.obj_def_source = obj_def_source
def __setattr__(self, name, value):
if name == "obj_def_source" and value is not None:
self.__dict__[name] = RegExpBasedFilterInvocationDefinitionMap(value)
else:
self.__dict__[name] = value
def obtain_obj_def_source(self):
return self.obj_def_source
def __call__(self, environ, start_response):
httpSession = self.sessionStrategy.getHttpSession(environ)
self.logger.debug("Trying to check if you are authorized for this.")
fi = FilterInvocation(environ)
token = self.before_invocation(fi)
if httpSession is not None:
httpSession[self.SPRINGPYTHON_FILTER_SECURITY_INTERCEPTOR_KEY] = token
return self.doNextFilter(environ, start_response)
if httpSession is not None and self.SPRINGPYTHON_FILTER_SECURITY_INTERCEPTOR_KEY in httpSession:
token = httpSession[self.SPRINGPYTHON_FILTER_SECURITY_INTERCEPTOR_KEY]
self.after_invocation(token, None)
return results
class ExceptionTranslationFilter(Filter):
"""
Handles any AccessDeniedException and AuthenticationException thrown within the filter chain.
This filter is necessary because it provides the bridge between Python exceptions and HTTP responses.
It is solely concerned with maintaining the user interface. This filter does not do any actual security enforcement.
If an AuthenticationException is detected, the filter will launch the authenticationEntryPoint. This allows common
handling of authentication failures originating from any subclass of AuthenticationProcessingFilter.
If an AccessDeniedException is detected, the filter will launch the accessDeniedHandler. This allows common
handling of access failures originating from any subclass of AbstractSecurityInterceptor.
"""
def __init__(self, authenticationEntryPoint=None, accessDeniedHandler=None, redirectStrategy=None):
Filter.__init__(self)
self.authenticationEntryPoint = authenticationEntryPoint
self.accessDeniedHandler = accessDeniedHandler
self.logger = logging.getLogger("springpython.security.web.ExceptionTranslationFilter")
def __call__(self, environ, start_response):
try:
return self.doNextFilter(environ, start_response)
except AuthenticationException, e:
self.logger.debug("AuthenticationException => %s, redirecting through authenticationEntryPoint" % e)
return self.authenticationEntryPoint(environ, start_response)
except AccessDeniedException, e:
self.logger.debug("AccessDeniedException => %s, redirect through accessDeniedHandler" % e)
return self.accessDeniedHandler(environ, start_response)
class AuthenticationProcessingFilterEntryPoint(Filter):
"""This object holds the location of the login form, and is used to commence a redirect to that form."""
def __init__(self, loginFormUrl=None, redirectStrategy=None):
Filter.__init__(self)
self.loginFormUrl = loginFormUrl
self.redirectStrategy = redirectStrategy
self.logger = logging.getLogger("springpython.security.web.AuthenticationProcessingFilterEntryPoint")
def __call__(self, environ, start_response):
self.logger.debug("Redirecting to login page %s" % self.loginFormUrl)
return self.redirectStrategy.redirect(self.loginFormUrl)
class AccessDeniedHandler(Filter):
"""Used by ExceptionTranslationFilter to handle an AccessDeniedException."""
def __init__(self):
Filter.__init__(self)
class SimpleAccessDeniedHandler(AccessDeniedHandler):
"""A simple default implementation of the AccessDeniedHandler interface."""
def __init__(self, errorPage=None, redirectStrategy=None):
AccessDeniedHandler.__init__(self)
self.errorPage = errorPage
self.redirectStrategy = redirectStrategy
self.logger = logging.getLogger("springpython.security.web.SimpleAccessDeniedHandler")
def __call__(self, environ, start_response):
self.logger.debug("Redirecting to error page %s" % self.errorPage)
return self.redirectStrategy.redirect(self.errorPage)
class MiddlewareFilter(Filter):
"""
This filter allows you to wrap any WSGI-compatible middleware and use it as a Spring Python filter.
This is primary because lots of middleware objects requires the wrapped WSGI app to be included
in the __init__ method. Spring's IoC container currently doesn't support constructor arguments.
"""
def __init__(self, clazz = None, appAttribute = None):
Filter.__init__(self)
self.clazz = clazz
self.appAttribute = appAttribute
def __setattr__(self, name, value):
if name == "clazz" and value is not None:
self.__dict__[name] = value
self.middleware = utils.getClass(value)(None)
else:
self.__dict__[name] = value
def __call__(self, environ, start_response):
setattr(self.middleware, self.appAttribute, environ["SPRINGPYTHON_FILTER_CHAIN"].next())
return self.middleware(environ, start_response)
| |
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.utils import importlib
from django.utils.translation import ugettext_lazy as _
from django.views.generic.base import TemplateResponseMixin
from socialregistration import signals
from socialregistration.settings import SESSION_KEY
import urlparse
class CommonMixin(TemplateResponseMixin):
"""
Provides default functionality used such as authenticating and signing
in users, redirecting etc.
"""
def import_attribute(self, path):
"""
Import an attribute from a module.
"""
module = '.'.join(path.split('.')[:-1])
function = path.split('.')[-1]
module = importlib.import_module(module)
return getattr(module, function)
def get_next(self, request):
"""
Returns a url to redirect to after the login / signup.
"""
if 'next' in request.session:
next = request.session['next']
del request.session['next']
elif 'next' in request.GET:
next = request.GET.get('next')
elif 'next' in request.POST:
next = request.POST.get('next')
else:
next = getattr(settings, 'LOGIN_REDIRECT_URL', '/')
netloc = urlparse.urlparse(next)[1]
if netloc and netloc != request.get_host():
next = getattr(settings, 'LOGIN_REDIRECT_URL', '/')
return next
def authenticate(self, **kwargs):
"""
Authenticate a user against all configured authentication backends.
"""
return authenticate(**kwargs)
def login(self, request, user):
"""
Sign a user in.
"""
return login(request, user)
def inactive_response(self):
"""
Return an inactive message.
"""
inactive_url = getattr(settings, 'LOGIN_INACTIVE_REDIRECT_URL', '')
if inactive_url:
return HttpResponseRedirect(inactive_url)
else:
return self.render_to_response({'error': _("This user account is marked as inactive.")})
def redirect(self, request):
"""
Redirect the user back to the ``next`` session/request variable.
"""
return HttpResponseRedirect(self.get_next(request))
class ClientMixin(object):
"""
Views such as ``OAuthRedirectView`` require a client to work with. This is
the interface to it.
"""
#: The client class we'll be working with
client = None
def get_client(self):
"""
Return the client class or raise an ``AttributeError`` if
``self.client`` is not set.
"""
if self.client is None:
raise AttributeError('`self.client` is `None`')
return self.client
class ProfileMixin(object):
"""
Views such as ``SetupCallback`` require a profile model to work with. This is
the interface to it.
"""
#: The profile model that we'll be working with
profile = None
def get_lookup_kwargs(self, request, client):
"""
Return a dictionary to look up a profile object.
"""
raise NotImplementedError
def get_model(self):
"""
Return the profile model or raise an ``AttributeError``
if ``self.profile`` is not set.
"""
if self.profile is None:
raise AttributeError('`self.profile` is `None`')
return self.profile
def create_user(self):
"""
Create and return an empty user model.
"""
return User()
def create_profile(self, user, save=False, **kwargs):
"""
Create a profile model.
:param user: A user object
:param save: If this is set, the profile will
be saved to DB straight away
:type save: bool
"""
profile = self.get_model()(user=user, **kwargs)
if save:
profile.save()
return profile
def get_profile(self, **kwargs):
"""
Return a profile object
"""
return self.get_model().objects.get(**kwargs)
def get_or_create_profile(self, user, save=False, **kwargs):
"""
Return a profile from DB or if there is none, create a new one.
:param user: A user object
:param save: If set, a new profile will be saved.
:type save: bool
"""
try:
profile = self.get_model().objects.get(user=user, **kwargs)
return profile, False
except self.get_model().DoesNotExist:
profile = self.create_profile(user, save=save, **kwargs)
return profile, True
class SessionMixin(object):
"""
When a new user is signing up the user and profile models and api client
need to be carried accross two views via session. This mixin handles
storage, retrieval and cleanup of said values.
"""
def store_profile(self, request, profile):
"""
Store the profile data to the session
"""
request.session['%sprofile' % SESSION_KEY] = profile
def store_user(self, request, user):
"""
Store the user data to the session
"""
request.session['%suser' % SESSION_KEY] = user
def store_client(self, request, client):
"""
Store the client to the session
"""
request.session['%sclient' % SESSION_KEY] = client
def get_session_data(self, request):
"""
Return a tuple ``(user, profile, client)`` from the session.
"""
user = request.session['%suser' % SESSION_KEY]
profile = request.session['%sprofile' % SESSION_KEY]
client = request.session['%sclient' % SESSION_KEY]
return user, profile, client
def delete_session_data(self, request):
"""
Clear all session data.
"""
for key in ['user', 'profile', 'client']:
try: del request.session['%s%s' % (SESSION_KEY, key)]
except KeyError: pass
class SignalMixin(object):
"""
When signing users up or signing users in we need to send out signals to
notify other parts of the code. This mixin provides an interface for sending
the signals.
"""
def send_login_signal(self, request, user, profile, client):
"""
Send a signal that a user logged in. This signal should be sent only if
the user was *not* logged into Django.
"""
signals.login.send(sender=profile.__class__, user=user,
profile=profile, client=client, request=request)
def send_connect_signal(self, request, user, profile, client):
"""
Send a signal that a user connected a social profile to his Django
account. This signal should be sent *only* when the a new social
connection was created.
"""
signals.connect.send(sender=profile.__class__, user=user, profile=profile,
client=client, request=request)
class SocialRegistration(CommonMixin, ClientMixin, ProfileMixin, SessionMixin,
SignalMixin):
"""
Combine all mixins into a single class.
"""
pass
| |
# -*- coding: utf-8 -*-
"""
Project
@author: Michael Howden (michael@sahanafoundation.org)
@date-created: 2010-08-25
Project Tracking
project_project and project_task moved from 05_org.py
"""
application = "project"
if deployment_settings.has_module("project"):
#==============================================================================
# Projects:
# the projects which each organization is engaged in
#
project_project_status_opts = {
1: T("active"),
2: T("completed"),
99: T("inactive")
}
resourcename = "project"
tablename = application + "_" + resourcename
table = db.define_table(tablename,
Field("code"),
Field("name"),
organisation_id(),
location_id(),
cluster_id(),
Field("status", "integer",
requires = IS_IN_SET(project_project_status_opts, zero=None),
# default = 99,
label = T("Project Status"),
represent = lambda opt: project_project_status_opts.get(opt, UNKNOWN_OPT)),
Field("description", "text"),
Field("beneficiaries", "integer"), #@todo: change this field name to total_bnf
Field("start_date", "date"),
Field("end_date", "date"),
Field("funded", "boolean"),
donor_id(),
Field("budgeted_cost", "double"),
migrate=migrate, *s3_meta_fields())
#@todo: Fix the widget for this before displaying - should donor be component?
table.donor_id.readable = table.donor_id.writable = False
# Field settings
table.code.requires = [IS_NOT_EMPTY(error_message=T("Please fill this!")),
IS_NOT_IN_DB(db, "project_project.code")]
table.start_date.requires = IS_NULL_OR(IS_DATE())
table.end_date.requires = IS_NULL_OR(IS_DATE())
table.budgeted_cost.requires = IS_NULL_OR(IS_FLOAT_IN_RANGE(0, 999999999))
# Project Resource called from multiple controllers
# - so we define strings in the model
table.code.label = T("Code")
table.name.label = T("Title")
table.start_date.label = T("Start date")
table.end_date.label = T("End date")
table.description.label = T("Description")
table.beneficiaries.label = T("Total Beneficiaries")
table.status.label = T("Status")
ADD_PROJECT = T("Add Project")
s3.crud_strings[tablename] = Storage(
title_create = ADD_PROJECT,
title_display = T("Project Details"),
title_list = T("List Projects"),
title_update = T("Edit Project"),
title_search = T("Search Projects"),
subtitle_create = T("Add New Project"),
subtitle_list = T("Projects"),
label_list_button = T("List Projects"),
label_create_button = ADD_PROJECT,
label_delete_button = T("Delete Project"),
msg_record_created = T("Project added"),
msg_record_modified = T("Project updated"),
msg_record_deleted = T("Project deleted"),
msg_list_empty = T("No Projects currently registered"))
# Reusable field
project_id = S3ReusableField("project_id", db.project_project, sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db, "project_project.id", "%(code)s")),
represent = lambda id: (id and [db.project_project[id].code] or [NONE])[0],
comment = DIV(A(ADD_PROJECT, _class="colorbox", _href=URL(r=request, c="org", f="project", args="create", vars=dict(format="popup")), _target="top", _title=ADD_PROJECT),
DIV( _class="tooltip", _title=ADD_PROJECT + "|" + T("Add new project."))),
label = "Project",
ondelete = "RESTRICT"
)
# Projects as component of Orgs & Locations
s3xrc.model.add_component(application, resourcename,
multiple=True,
#joinby=dict(project_organisation="organisation_id", gis_location="location_id"),
joinby=dict(org_organisation="organisation_id"))
s3xrc.model.configure(table,
#listadd=False,
main="code",
list_fields=["id",
"organisation_id",
"location_id",
"cluster_id",
"code",
"name",
"status",
"start_date",
"end_date",
"budgeted_cost"])
# -----------------------------------------------------------------------------
# shn_project_search_location:
# form function to search projects by location
#
def shn_project_search_location(xrequest, **attr):
if attr is None:
attr = {}
if not shn_has_permission("read", db.project_project):
session.error = UNAUTHORISED
redirect(URL(r=request, c="default", f="user", args="login", vars={"_next":URL(r=request, args="search_location", vars=request.vars)}))
if xrequest.representation == "html":
# Check for redirection
if request.vars._next:
next = str.lower(request.vars._next)
else:
next = URL(r=request, c="org", f="project", args="[id]")
# Custom view
response.view = "%s/project_search.html" % xrequest.prefix
# Title and subtitle
title = T("Search for a Project")
subtitle = T("Matching Records")
# Select form:
l_opts = [OPTION(_value="")]
l_opts += [OPTION(location.name, _value=location.id)
for location in db(db.gis_location.deleted == False).select(db.gis_location.ALL, cache=(cache.ram, 3600))]
form = FORM(TABLE(
TR(T("Location: "),
SELECT(_name="location", *l_opts, **dict(name="location", requires=IS_NULL_OR(IS_IN_DB(db, "gis_location.id"))))),
TR("", INPUT(_type="submit", _value=T("Search")))
))
output = dict(title=title, subtitle=subtitle, form=form, vars=form.vars)
# Accept action
items = None
if form.accepts(request.vars, session):
table = db.project_project
query = (table.deleted == False)
if form.vars.location is None:
results = db(query).select(table.ALL)
else:
query = query & (table.location_id == form.vars.location)
results = db(query).select(table.ALL)
if results and len(results):
records = []
for result in results:
href = next.replace("%5bid%5d", "%s" % result.id)
records.append(TR(
A(result.name, _href=href),
result.start_date or NONE,
result.end_date or NONE,
result.description or NONE,
result.status and project_project_status_opts[result.status] or "unknown",
))
items=DIV(TABLE(THEAD(TR(
TH("ID"),
TH("Organization"),
TH("Location"),
TH("Sector(s)"),
TH("Code"),
TH("Name"),
TH("Status"),
TH("Start date"),
TH("End date"),
TH("Budgeted Cost"))),
TBODY(records), _id="list", _class="display"))
else:
items = T(NONE)
try:
label_create_button = s3.crud_strings["project_project"].label_create_button
except:
label_create_button = s3.crud_strings.label_create_button
add_btn = A(label_create_button, _href=URL(r=request, f="project", args="create"), _class="action-btn")
output.update(dict(items=items, add_btn=add_btn))
return output
else:
session.error = BADFORMAT
redirect(URL(r=request))
# Plug into REST controller
s3xrc.model.set_method(application, "project", method="search_location", action=shn_project_search_location )
# -----------------------------------------------------------------------------
def shn_project_rheader(jr, tabs=[]):
if jr.representation == "html":
rheader_tabs = shn_rheader_tabs(jr, tabs)
if jr.name == "project":
_next = jr.here()
_same = jr.same()
project = jr.record
sectors = TABLE()
if project.cluster_id:
# @ToDo@ Fix for list: type
_sectors = re.split("\|", project.cluster_id)[1:-1]
for sector in _sectors:
sectors.append(TR(db(db.org_cluster.id == sector).select(db.org_cluster.name, limitby=(0, 1)).first().name))
if project:
rheader = DIV(TABLE(
TR(
TH(T("Code") + ": "),
project.code,
TH(A(T("Clear Selection"),
_href=URL(r=request, f="project", args="clear", vars={"_next": _same})))
),
TR(
TH(T("Name") + ": "),
project.name,
TH(T("Location") + ": "),
location_id.location_id.represent(project.location_id),
),
TR(
TH(T("Status") + ": "),
"%s" % project_project_status_opts[project.status],
TH(T("Cluster(s)") + ": "),
sectors,
#TH(A(T("Edit Project"),
# _href=URL(r=request, f="project", args=[jr.id, "update"], vars={"_next": _next})))
)
), rheader_tabs)
return rheader
return None
#==============================================================================
# Activity Type
#
resourcename = "activity_type"
tablename = "%s_%s" % (application, resourcename)
table = db.define_table(tablename,
Field("name", length=128, notnull=True, unique=True),
migrate=migrate, *s3_meta_fields())
ADD_ACTIVITY_TYPE = T("Add Activity Type")
def activity_type_comment():
if auth.has_membership(auth.id_group(1)):
return DIV(A(ADD_ACTIVITY_TYPE,
_class="colorbox",
_href=URL(r=request, c="project", f="activity_type", args="create", vars=dict(format="popup")),
_target="top",
_title=ADD_ACTIVITY_TYPE
)
)
else:
return None
activity_type_id = S3ReusableField("activity_type_id", db.project_activity_type, sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db, "project_activity_type.id","%(name)s", sort=True)),
represent = lambda id: shn_get_db_field_value(db = db,
table = "project_activity_type",
field = "name",
look_up = id),
label = T("Activity Type"),
comment = activity_type_comment(),
ondelete = "RESTRICT"
)
#==============================================================================
# Activity
#
opt_bnf_type = { 1: T("Individuals"),
2: T("Families/HH")
}
resourcename = "activity"
tablename = "%s_%s" % (application, resourcename)
table = db.define_table(tablename,
Field("name"),
organisation_id("donor_id",
label = T("Funding Organization"),
comment = DIV(A(ADD_ORGANIZATION,
_class="colorbox",
_href=organisation_popup_url,
_target="top",
_title=ADD_ORGANIZATION),
DIV(DIV(_class="tooltip",
_title=ADD_ORGANIZATION + "|" + T("The Organization which is funding this Activity."))))
),
organisation_id(),
cluster_id(),
#cluster_subsector_id(),
#Field("quantity"),
#Field("unit"), # Change to link to supply
Field("start_date","date"),
Field("end_date","date"),
location_id(),
#shelter_id(),
Field("total_bnf","integer"),
#Field("bnf_type","integer"),
#Field("bnf_date","date"),
#Field("total_bnf_target","integer"),
#Field("male","integer"),
#Field("female","integer"),
#Field("child_2","integer"),
#Field("child_5","integer"),
#Field("child_15","integer"),
#Field("cba_women","integer"),
#Field("pl_women","integer"),
person_id(),
comments(),
migrate=migrate, *s3_meta_fields())
table.name.label = T("Short Description")
table.total_bnf.label = T("Total Beneficiaries")
#table.bnf_type.label = T("Beneficiary Type")
#table.bnf_date.label = T("Date of Latest Information on Beneficiaries Reached")
#table.total_bnf_target.label = T("Total # of Target Beneficiaries")
#table.child_2.label = T("Children (< 2 years)")
#table.child_5.label = T("Children (2-5 years)")
#table.child_15.label = T("Children (5-15 years)")
#table.cba_women.label = T("CBA Women")
#table.cba_women.comment = DIV( _class="tooltip", _title= T("Women of Child Bearing Age"))
#table.pl_women.label = T("PL Women")
#table.pl_women.comment = DIV( _class="tooltip", _title= T("Women who are Pregnant or in Labour"))
table.person_id.label = T("Contact Person")
#table.comments.comment = T("(Constraints Only)")
for field in table:
if field.type == "integer":
field.requires = IS_NULL_OR( IS_INT_IN_RANGE(0,99999999) )
#table.bnf_type.requires = IS_NULL_OR(IS_IN_SET(opt_bnf_type))
#table.bnf_type.represent = lambda opt: opt_bnf_type.get(opt, NONE)
# CRUD Strings
ADD_ACTIVITY = T("Add Activity")
LIST_ACTIVITIES = T("List Activities")
s3.crud_strings[tablename] = Storage(title_create = ADD_ACTIVITY,
title_display = T("Activity Details"),
title_list = LIST_ACTIVITIES,
title_update = T("Edit Activity"),
title_search = T("Search Activities"),
subtitle_create = T("Add New Activity"),
subtitle_list = T("Activities"),
label_list_button = LIST_ACTIVITIES,
label_create_button = ADD_ACTIVITY,
msg_record_created = T("Activity Added"),
msg_record_modified = T("Activity Updated"),
msg_record_deleted = T("Activity Deleted"),
msg_list_empty = T("No Activities Found")
)
activity_id = S3ReusableField( "activity_id", db.project_activity, sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db, "project_activity.id","%(name)s", sort=True)),
represent = lambda id: shn_get_db_field_value(db = db,
table = "project_activity",
field = "name",
look_up = id),
label = T("Activity"),
comment = DIV(A(ADD_ACTIVITY,
_class="colorbox",
_href=URL(r=request, c="project", f="activity", args="create", vars=dict(format="popup")),
_target="top",
_title=ADD_ACTIVITY
)
),
ondelete = "RESTRICT"
)
# Activities as component of Orgs
s3xrc.model.add_component(application, resourcename,
multiple=True,
joinby=dict(org_organisation="organisation_id"))
#==============================================================================
# project_task:
# a task within a project/activity
#
project_task_status_opts = {
1: T("new"),
2: T("assigned"),
3: T("completed"),
4: T("postponed"),
5: T("feedback"),
6: T("cancelled"),
99: T("unspecified")
}
project_task_priority_opts = {
4: T("normal"),
1: T("immediately"),
2: T("urgent"),
3: T("high"),
5: T("low")
}
resourcename = "task"
tablename = application + "_" + resourcename
table = db.define_table(tablename,
Field("priority", "integer",
requires = IS_IN_SET(project_task_priority_opts, zero=None),
# default = 4,
label = T("Priority"),
represent = lambda opt: project_task_priority_opts.get(opt, UNKNOWN_OPT)),
Field("subject", length=80, notnull=True),
Field("description", "text"),
project_id(),
office_id(),
person_id(),
Field("status", "integer",
requires = IS_IN_SET(project_task_status_opts, zero=None),
# default = 1,
label = T("Status"),
represent = lambda opt: project_task_status_opts.get(opt, UNKNOWN_OPT)),
migrate=migrate, *s3_meta_fields())
# Task Resource called from multiple controllers
# - so we define strings in the model
table.subject.requires = IS_NOT_EMPTY()
table.subject.label = T("Subject")
table.person_id.label = T("Assigned to")
def shn_project_task_onvalidation(form):
""" Task form validation """
if str(form.vars.status) == "2" and not form.vars.person_id:
form.errors.person_id = T("Select a person in charge for status 'assigned'")
return False
# CRUD Strings
ADD_TASK = T("Add Task")
LIST_TASKS = T("List Tasks")
s3.crud_strings[tablename] = Storage(
title_create = ADD_TASK,
title_display = T("Task Details"),
title_list = LIST_TASKS,
title_update = T("Edit Task"),
title_search = T("Search Tasks"),
subtitle_create = T("Add New Task"),
subtitle_list = T("Tasks"),
label_list_button = LIST_TASKS,
label_create_button = ADD_TASK,
msg_record_created = T("Task added"),
msg_record_modified = T("Task updated"),
msg_record_deleted = T("Task deleted"),
msg_list_empty = T("No tasks currently registered"))
# Task as Component of Project, Office, (Organisation to come? via Project? Can't rely on that as multi-Org projects)
s3xrc.model.add_component(application, resourcename,
multiple=True,
joinby=dict(project_project="project_id",
project_office="office_id"))
s3xrc.model.configure(table,
listadd=False,
onvalidation = lambda form: shn_project_task_onvalidation(form),
list_fields=["id",
"project_id",
"office_id",
"priority",
"subject",
"person_id",
"status"],
main="subject", extra="description")
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import hashlib
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
def assert_close(
x, y, data=None, summarize=None, message=None, name="assert_close"):
"""Assert that x and y are within machine epsilon of each other.
Args:
x: Floating-point `Tensor`
y: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if |x - y| > machine epsilon.
"""
message = message or ""
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
if data is None:
data = [
message,
"Condition x ~= y did not hold element-wise: x = ", x.name, x, "y = ",
y.name, y
]
if x.dtype.is_integer:
return check_ops.assert_equal(
x, y, data=data, summarize=summarize, message=message, name=name)
with ops.name_scope(name, "assert_close", [x, y, data]):
tol = np.finfo(x.dtype.as_numpy_dtype).eps
condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x-y), tol))
return control_flow_ops.Assert(
condition, data, summarize=summarize)
def assert_integer_form(
x, data=None, summarize=None, message=None,
int_dtype=None, name="assert_integer_form"):
"""Assert that x has integer components (or floats equal to integers).
Args:
x: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)
implies the smallest possible signed int will be used for casting.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.
"""
with ops.name_scope(name, values=[x, data]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return control_flow_ops.no_op()
message = message or "{} has non-integer components".format(x.op.name)
if int_dtype is None:
try:
int_dtype = {
dtypes.float16: dtypes.int16,
dtypes.float32: dtypes.int32,
dtypes.float64: dtypes.int64,
}[x.dtype.base_dtype]
except KeyError:
raise TypeError("Unrecognized type {}".format(x.dtype.name))
return check_ops.assert_equal(
x, math_ops.cast(math_ops.cast(x, int_dtype), x.dtype),
data=data, summarize=summarize, message=message, name=name)
def assert_symmetric(matrix):
matrix_t = array_ops.matrix_transpose(matrix)
return control_flow_ops.with_dependencies(
[check_ops.assert_equal(matrix, matrix_t)], matrix)
def embed_check_nonnegative_integer_form(
x, name="embed_check_nonnegative_integer_form"):
"""Assert x is a non-negative tensor, and optionally of integers."""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
assertions = [
check_ops.assert_non_negative(
x, message="'{}' must be non-negative.".format(x.op.name)),
]
if not x.dtype.is_integer:
assertions += [
assert_integer_form(
x, message="'{}' cannot contain fractional components.".format(
x.op.name)),
]
return control_flow_ops.with_dependencies(assertions, x)
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
"""
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
# Here we can't just do math_ops.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in math_ops.equal.
def all_shapes_equal():
return math_ops.reduce_all(math_ops.equal(
array_ops.concat([array_ops.shape(a), array_ops.shape(b)], 0),
array_ops.concat([array_ops.shape(b), array_ops.shape(a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
all_shapes_equal,
lambda: constant_op.constant(False))
def get_logits_and_probs(logits=None,
probs=None,
multidimensional=False,
validate_args=False,
name="get_logits_and_probs"):
"""Converts logit to probabilities (or vice-versa), and returns both.
Args:
logits: Floating-point `Tensor` representing log-odds.
probs: Floating-point `Tensor` representing probabilities.
multidimensional: Python `bool`, default `False`.
If `True`, represents whether the last dimension of `logits` or `probs`,
a `[N1, N2, ... k]` dimensional tensor, representing the
logit or probability of `shape[-1]` classes.
validate_args: Python `bool`, default `False`. When `True`, either assert
`0 <= probs <= 1` (if not `multidimensional`) or that the last dimension
of `probs` sums to one.
name: A name for this operation (optional).
Returns:
logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or
`1`, then the corresponding entry in the returned logit will be `-Inf` and
`Inf` respectively.
Raises:
ValueError: if neither `probs` nor `logits` were passed in, or both were.
"""
with ops.name_scope(name, values=[probs, logits]):
if (probs is None) == (logits is None):
raise ValueError("Must pass probs or logits, but not both.")
if probs is None:
logits = ops.convert_to_tensor(logits, name="logits")
if not logits.dtype.is_floating:
raise TypeError("logits must having floating type.")
# We can early return since we constructed probs and therefore know
# they're valid.
if multidimensional:
if validate_args:
logits = embed_check_categorical_event_shape(logits)
return logits, nn.softmax(logits, name="probs")
return logits, math_ops.sigmoid(logits, name="probs")
probs = ops.convert_to_tensor(probs, name="probs")
if not probs.dtype.is_floating:
raise TypeError("probs must having floating type.")
if validate_args:
with ops.name_scope("validate_probs"):
one = constant_op.constant(1., probs.dtype)
dependencies = [check_ops.assert_non_negative(probs)]
if multidimensional:
probs = embed_check_categorical_event_shape(probs)
dependencies += [assert_close(math_ops.reduce_sum(probs, -1), one,
message="probs does not sum to 1.")]
else:
dependencies += [check_ops.assert_less_equal(
probs, one, message="probs has components greater than 1.")]
probs = control_flow_ops.with_dependencies(dependencies, probs)
with ops.name_scope("logits"):
if multidimensional:
# Here we don't compute the multidimensional case, in a manner
# consistent with respect to the unidimensional case. We do so
# following the TF convention. Typically, you might expect to see
# logits = log(probs) - log(probs[pivot]). A side-effect of
# being consistent with the TF approach is that the unidimensional case
# implicitly handles the second dimension but the multidimensional case
# explicitly keeps the pivot dimension.
return math_ops.log(probs), probs
return math_ops.log(probs) - math_ops.log1p(-1. * probs), probs
def _is_known_unsigned_by_dtype(dt):
"""Helper returning True if dtype is known to be unsigned."""
return {
dtypes.bool: True,
dtypes.uint8: True,
dtypes.uint16: True,
}.get(dt.base_dtype, False)
def _is_known_signed_by_dtype(dt):
"""Helper returning True if dtype is known to be signed."""
return {
dtypes.float16: True,
dtypes.float32: True,
dtypes.float64: True,
dtypes.int8: True,
dtypes.int16: True,
dtypes.int32: True,
dtypes.int64: True,
}.get(dt.base_dtype, False)
def _is_known_dtype(dt):
"""Helper returning True if dtype is known."""
return _is_known_unsigned_by_dtype(dt) or _is_known_signed_by_dtype(dt)
def _largest_integer_by_dtype(dt):
"""Helper returning the largest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if dt.is_floating:
return int(2**(np.finfo(dt.as_numpy_dtype).nmant + 1))
if dt.is_integer:
return np.iinfo(dt.as_numpy_dtype).max
if dt.base_dtype == dtypes.bool:
return int(1)
# We actually can't land here but keep the case for completeness.
raise TypeError("Unrecognized dtype: {}".format(dt.name))
def _smallest_integer_by_dtype(dt):
"""Helper returning the smallest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if _is_known_unsigned_by_dtype(dt):
return 0
return -1 * _largest_integer_by_dtype(dt)
def _is_integer_like_by_dtype(dt):
"""Helper returning True if dtype.is_interger or is `bool`."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
return dt.is_integer or dt.base_dtype == dtypes.bool
def embed_check_categorical_event_shape(
categorical_param,
name="embed_check_categorical_event_shape"):
"""Embeds checks that categorical distributions don't have too many classes.
A categorical-type distribution is one which, e.g., returns the class label
rather than a one-hot encoding. E.g., `Categorical(probs)`.
Since distributions output samples in the same dtype as the parameters, we
must ensure that casting doesn't lose precision. That is, the
`parameter.dtype` implies a maximum number of classes. However, since shape is
`int32` and categorical variables are presumed to be indexes into a `Tensor`,
we must also ensure that the number of classes is no larger than the largest
possible `int32` index, i.e., `2**31-1`.
In other words the number of classes, `K`, must satisfy the following
condition:
```python
K <= min(
int(2**31 - 1), # Largest float as an index.
{
dtypes.float16: int(2**11), # Largest int as a float16.
dtypes.float32: int(2**24),
dtypes.float64: int(2**53),
}.get(categorical_param.dtype.base_dtype, 0))
```
Args:
categorical_param: Floating-point `Tensor` representing parameters of
distribution over categories. The rightmost shape is presumed to be the
number of categories.
name: A name for this operation (optional).
Returns:
categorical_param: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `categorical_param` has an unknown `dtype`.
ValueError: if we can statically identify `categorical_param` as being too
large (for being closed under int32/float casting).
"""
with ops.name_scope(name, values=[categorical_param]):
x = ops.convert_to_tensor(categorical_param, name="categorical_param")
# The size must not exceed both of:
# - The largest possible int32 (since categorical values are presumed to be
# indexes into a Tensor).
# - The largest possible integer exactly representable under the given
# floating-point dtype (since we need to cast to/from).
#
# The chosen floating-point thresholds are 2**(1 + mantissa_bits).
# For more details, see:
# https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation
x_dtype = x.dtype.base_dtype
max_event_size = (_largest_integer_by_dtype(x_dtype)
if x_dtype.is_floating else 0)
if max_event_size is 0:
raise TypeError("Unable to validate size of unrecognized dtype "
"({}).".format(x_dtype.name))
try:
x_shape_static = x.get_shape().with_rank_at_least(1)
except ValueError:
raise ValueError("A categorical-distribution parameter must have "
"at least 1 dimension.")
if x_shape_static[-1].value is not None:
event_size = x_shape_static[-1].value
if event_size < 2:
raise ValueError("A categorical-distribution parameter must have at "
"least 2 events.")
if event_size > max_event_size:
raise ValueError(
"Number of classes exceeds `dtype` precision, i.e., "
"{} implies shape ({}) cannot exceed {}.".format(
x_dtype.name, event_size, max_event_size))
return x
else:
event_size = array_ops.shape(x, name="x_shape")[-1]
return control_flow_ops.with_dependencies([
check_ops.assert_rank_at_least(
x, 1, message=("A categorical-distribution parameter must have "
"at least 1 dimension.")),
check_ops.assert_greater_equal(
array_ops.shape(x)[-1], 2,
message=("A categorical-distribution parameter must have at "
"least 2 events.")),
check_ops.assert_less_equal(
event_size, max_event_size,
message="Number of classes exceeds `dtype` precision, "
"i.e., {} dtype cannot exceed {} shape.".format(
x_dtype.name, max_event_size)),
], x)
def embed_check_integer_casting_closed(
x,
target_dtype,
assert_nonnegative=True,
name="embed_check_casting_closed"):
"""Ensures integers remain unaffected despite casting to/from int/float types.
Example integer-types: `uint8`, `int32`, `bool`.
Example floating-types: `float32`, `float64`.
The largest possible integer representable by an IEEE754 floating-point is
`2**(1 + mantissa_bits)` yet the largest possible integer as an int-type is
`2**(bits - 1) - 1`. This function ensures that a `Tensor` purporting to have
integer-form values can be cast to some other type without loss of precision.
The smallest representable integer is the negative of the largest
representable integer, except for types: `uint8`, `uint16`, `bool`. For these
types, the smallest representable integer is `0`.
Args:
x: `Tensor` representing integer-form values.
target_dtype: TF `dtype` under which `x` should have identical values.
assert_nonnegative: `bool` indicating `x` should contain nonnegative values.
name: A name for this operation (optional).
Returns:
x: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `x` is neither integer- nor floating-type.
TypeError: if `target_dtype` is neither integer- nor floating-type.
TypeError: if neither `x` nor `target_dtype` are integer-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if (not _is_integer_like_by_dtype(x.dtype)
and not x.dtype.is_floating):
raise TypeError("{}.dtype must be floating- or "
"integer-type.".format(x.dtype.name))
if (not _is_integer_like_by_dtype(target_dtype)
and not target_dtype.is_floating):
raise TypeError("target_dtype ({}) must be floating- or "
"integer-type.".format(target_dtype.name))
if (not _is_integer_like_by_dtype(x.dtype)
and not _is_integer_like_by_dtype(target_dtype)):
raise TypeError("At least one of {}.dtype ({}) and target_dtype ({}) "
"must be integer-type.".format(
x.op.name, x.dtype.name, target_dtype.name))
assertions = []
if assert_nonnegative:
assertions += [
check_ops.assert_non_negative(
x, message="Elements must be non-negative."),
]
if x.dtype.is_floating:
# Being here means _is_integer_like_by_dtype(target_dtype) = True.
# Since this check implies the magnitude check below, we need only it.
assertions += [
assert_integer_form(
x, int_dtype=target_dtype,
message="Elements must be {}-equivalent.".format(
target_dtype.name)),
]
else:
if (_largest_integer_by_dtype(x.dtype)
> _largest_integer_by_dtype(target_dtype)):
# Cast may lose integer precision.
assertions += [
check_ops.assert_less_equal(
x, _largest_integer_by_dtype(target_dtype),
message=("Elements cannot exceed {}.".format(
_largest_integer_by_dtype(target_dtype)))),
]
if (not assert_nonnegative and
(_smallest_integer_by_dtype(x.dtype)
< _smallest_integer_by_dtype(target_dtype))):
assertions += [
check_ops.assert_greater_equal(
x, _smallest_integer_by_dtype(target_dtype),
message=("Elements cannot be smaller than {}.".format(
_smallest_integer_by_dtype(target_dtype)))),
]
if not assertions:
return x
return control_flow_ops.with_dependencies(assertions, x)
def log_combinations(n, counts, name="log_combinations"):
"""Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`.
"""
# First a bit about the number of ways counts could have come in:
# E.g. if counts = [1, 2], then this is 3 choose 2.
# In general, this is (sum counts)! / sum(counts!)
# The sum should be along the last dimension of counts. This is the
# "distribution" dimension. Here n a priori represents the sum of counts.
with ops.name_scope(name, values=[n, counts]):
n = ops.convert_to_tensor(n, name="n")
counts = ops.convert_to_tensor(counts, name="counts")
total_permutations = math_ops.lgamma(n + 1)
counts_factorial = math_ops.lgamma(counts + 1)
redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1])
return total_permutations - redundant_permutations
def matrix_diag_transform(matrix, transform=None, name=None):
"""Transform diagonal of [batch-]matrix, leave rest of matrix unchanged.
Create a trainable covariance defined by a Cholesky factor:
```python
# Transform network layer into 2 x 2 array.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
# Make the diagonal positive. If the upper triangle was zero, this would be a
# valid Cholesky factor.
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# LinearOperatorTriL ignores the upper triangle.
operator = LinearOperatorTriL(chol)
```
Example of heteroskedastic 2-D linear regression.
```python
# Get a trainable Cholesky factor.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# Get a trainable mean.
mu = tf.contrib.layers.fully_connected(activations, 2)
# This is a fully trainable multivariate normal!
dist = tf.contrib.distributions.MVNCholesky(mu, chol)
# Standard log loss. Minimizing this will "train" mu and chol, and then dist
# will be a distribution predicting labels as multivariate Gaussians.
loss = -1 * tf.reduce_mean(dist.log_prob(labels))
```
Args:
matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are
equal.
transform: Element-wise function mapping `Tensors` to `Tensors`. To
be applied to the diagonal of `matrix`. If `None`, `matrix` is returned
unchanged. Defaults to `None`.
name: A name to give created ops.
Defaults to "matrix_diag_transform".
Returns:
A `Tensor` with same shape and `dtype` as `matrix`.
"""
with ops.name_scope(name, "matrix_diag_transform", [matrix]):
matrix = ops.convert_to_tensor(matrix, name="matrix")
if transform is None:
return matrix
# Replace the diag with transformed diag.
diag = array_ops.matrix_diag_part(matrix)
transformed_diag = transform(diag)
transformed_mat = array_ops.matrix_set_diag(matrix, transformed_diag)
return transformed_mat
def rotate_transpose(x, shift, name="rotate_transpose"):
"""Circularly moves dims left or right.
Effectively identical to:
```python
numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift))
```
When `validate_args=False` additional graph-runtime checks are
performed. These checks entail moving data from to GPU to CPU.
Example:
```python
x = ... # Tensor of shape [1, 2, 3, 4].
rotate_transpose(x, -1) # result shape: [2, 3, 4, 1]
rotate_transpose(x, -2) # result shape: [3, 4, 1, 2]
rotate_transpose(x, 1) # result shape: [4, 1, 2, 3]
rotate_transpose(x, 2) # result shape: [3, 4, 1, 2]
rotate_transpose(x, 7) == rotate_transpose(x, 3)
rotate_transpose(x, -7) == rotate_transpose(x, -3)
```
Args:
x: `Tensor`.
shift: `Tensor`. Number of dimensions to transpose left (shift<0) or
transpose right (shift>0).
name: Python `str`. The name to give this op.
Returns:
rotated_x: Input `Tensor` with dimensions circularly rotated by shift.
Raises:
TypeError: if shift is not integer type.
"""
with ops.name_scope(name, values=[x, shift]):
x = ops.convert_to_tensor(x, name="x")
shift = ops.convert_to_tensor(shift, name="shift")
# We do not assign back to preserve constant-ness.
check_ops.assert_integer(shift)
shift_value_static = tensor_util.constant_value(shift)
ndims = x.get_shape().ndims
if ndims is not None and shift_value_static is not None:
if ndims < 2: return x
shift_value_static = np.sign(shift_value_static) * (
abs(shift_value_static) % ndims)
if shift_value_static == 0: return x
perm = np.roll(np.arange(ndims), shift_value_static)
return array_ops.transpose(x, perm=perm)
else:
# Consider if we always had a positive shift, and some specified
# direction.
# When shifting left we want the new array:
# last(x, n-shift) + first(x, shift)
# and if shifting right then we want:
# last(x, shift) + first(x, n-shift)
# Observe that last(a) == slice(a, n) and first(a) == slice(0, a).
# Also, we can encode direction and shift as one: direction * shift.
# Combining these facts, we have:
# a = cond(shift<0, -shift, n-shift)
# last(x, n-a) + first(x, a) == x[a:n] + x[0:a]
# Finally, we transform shift by modulo length so it can be specified
# independently from the array upon which it operates (like python).
ndims = array_ops.rank(x)
shift = array_ops.where(math_ops.less(shift, 0),
math_ops.mod(-shift, ndims),
ndims - math_ops.mod(shift, ndims))
first = math_ops.range(0, shift)
last = math_ops.range(shift, ndims)
perm = array_ops.concat([last, first], 0)
return array_ops.transpose(x, perm=perm)
def pick_vector(cond,
true_vector,
false_vector,
name="pick_vector"):
"""Picks possibly different length row `Tensor`s based on condition.
Value `Tensor`s should have exactly one dimension.
If `cond` is a python Boolean or `tf.constant` then either `true_vector` or
`false_vector` is immediately returned. I.e., no graph nodes are created and
no validation happens.
Args:
cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.
true_vector: `Tensor` of one dimension. Returned when cond is `True`.
false_vector: `Tensor` of one dimension. Returned when cond is `False`.
name: Python `str`. The name to give this op.
Example:
```python
pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15, 18))
# result is tensor: [10, 11].
pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15, 18))
# result is tensor: [15, 16, 17].
```
Returns:
true_or_false_vector: `Tensor`.
Raises:
TypeError: if `cond.dtype != tf.bool`
TypeError: if `cond` is not a constant and
`true_vector.dtype != false_vector.dtype`
"""
with ops.name_scope(name, values=(cond, true_vector, false_vector)):
cond = ops.convert_to_tensor(cond, name="cond")
if cond.dtype != dtypes.bool:
raise TypeError("%s.dtype=%s which is not %s" %
(cond.name, cond.dtype, dtypes.bool))
cond_value_static = tensor_util.constant_value(cond)
if cond_value_static is not None:
return true_vector if cond_value_static else false_vector
true_vector = ops.convert_to_tensor(true_vector, name="true_vector")
false_vector = ops.convert_to_tensor(false_vector, name="false_vector")
if true_vector.dtype != false_vector.dtype:
raise TypeError(
"%s.dtype=%s does not match %s.dtype=%s"
% (true_vector.name, true_vector.dtype,
false_vector.name, false_vector.dtype))
n = array_ops.shape(true_vector)[0]
return array_ops.slice(
array_ops.concat([true_vector, false_vector], 0),
[array_ops.where(cond, 0, n)], [array_ops.where(cond, n, -1)])
def gen_new_seed(seed, salt):
"""Generate a new seed, from the given seed and salt."""
if seed is None:
return None
string = (str(seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
def fill_lower_triangular(x, validate_args=False, name="fill_lower_triangular"):
"""Creates a (batch of) lower triangular matrix from a vector of inputs.
If `x.get_shape()` is `[b1, b2, ..., bK, d]` then the output shape is `[b1,
b2, ..., bK, n, n]` where `n` is such that `d = n(n+1)/2`, i.e.,
`n = int(0.5 * (math.sqrt(1. + 8. * d) - 1.))`.
Although the non-batch complexity is O(n**2), large constants and sub-optimal
vectorization means the complexity of this function is 5x slower than zeroing
out the upper triangular, i.e., `tf.matrix_band_part(X, -1, 0)`. This
function becomes competitive only when several matmul/cholesky/etc ops can be
ellided in constructing the input. Example: wiring a fully connected layer as
a covariance matrix; this function reduces the final layer by 2x and possibly
reduces the network arch complexity considerably. In most cases it is better
to simply build a full matrix and zero out the upper triangular elements,
e.g., `tril = tf.matrix_band_part(full, -1, 0)`, rather than directly
construct a lower triangular.
Warning: This Op is intended for convenience, not efficiency.
Example:
```python
fill_lower_triangular([1, 2, 3, 4, 5, 6])
# Returns: [[1, 0, 0],
# [2, 3, 0],
# [4, 5, 6]]
```
For comparison, a pure numpy version of this function can be found in
`distribution_util_test.py`, function `_fill_lower_triangular`.
Args:
x: `Tensor` representing lower triangular elements.
validate_args: Python `bool`, default `False`. Whether to ensure the shape
of `x` can be mapped to a lower triangular matrix (controls non-static
checks only).
name: Python `str`. The name to give this op.
Returns:
tril: `Tensor` with lower triangular elements filled from `x`.
Raises:
ValueError: if shape if `x` has static shape which cannot be mapped to a
lower triangular matrix.
"""
# TODO(jvdillon): Replace this code with dedicated op when it exists.
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if (x.get_shape().ndims is not None and
x.get_shape()[-1].value is not None):
d = x.get_shape()[-1].value
# d = n(n+1)/2 implies n is:
n = int(0.5 * (math.sqrt(1. + 8. * d) - 1.))
d_inferred = n * (n + 1) /2
if d != d_inferred:
raise ValueError("Input cannot be mapped to a lower triangular; "
"n*(n+1)/2 = %d != %d" % (d_inferred, d))
final_shape = x.get_shape()[:-1].concatenate(
tensor_shape.TensorShape([n, n]))
else:
d = math_ops.cast(array_ops.shape(x)[-1], dtype=dtypes.float32)
# d = n(n+1)/2 implies n is:
n = math_ops.cast(0.5 * (dtypes.sqrt(1. + 8. * d) - 1.),
dtype=dtypes.int32)
if validate_args:
is_valid_input_shape = check_ops.assert_equal(
n * (n + 1) / 2, d,
message="Input cannot be mapped to a lower triangular.")
n = control_flow_ops.with_dependencies([is_valid_input_shape], n)
final_shape = x.get_shape()[:-1].concatenate(
tensor_shape.TensorShape([None, None]))
def tril_ids(n):
"""Internal helper to create vector of linear indices into y."""
# Build the ids statically; chose 512 because it implies 1MiB.
if not tensor_util.is_tensor(n) and n <= 512:
ids = np.arange(n**2, dtype=np.int32)
rows = (ids / n).astype(np.int32) # Implicit floor.
# We need to stop incrementing the index when we encounter
# upper-triangular elements. The idea here is to compute the
# lower-right number of zeros then by "symmetry" subtract this from the
# total number of zeros, n(n-1)/2.
# Then we note that: n(n-1)/2 - (n-r)*(n-r-1)/2 = r(2n-r-1)/2
offset = (rows * (2 * n - rows - 1) / 2).astype(np.int32)
# We could also zero out when (rows < cols) == (rows < ids-n*rows).
# mask = (ids <= (n + 1) * rows).astype(np.int32)
else:
ids = math_ops.range(n**2)
rows = math_ops.cast(ids / n, dtype=dtypes.int32)
offset = math_ops.cast(rows * (2 * n - rows - 1) / 2,
dtype=dtypes.int32)
return ids - offset
# Special-case non-batch case.
if x.get_shape().ndims == 1:
y = array_ops.gather(x, array_ops.reshape(tril_ids(n), [n, n]))
y = array_ops.matrix_band_part(y, -1, 0)
y.set_shape(y.get_shape().merge_with(final_shape))
return y
# Make ids for each batch dim.
if (x.get_shape().ndims is not None and
x.get_shape()[:-1].is_fully_defined()):
batch_shape = np.asarray(x.get_shape()[:-1].as_list(), dtype=np.int32)
m = np.prod(batch_shape).astype(np.int32)
else:
batch_shape = array_ops.shape(x)[:-1]
m = math_ops.reduce_prod(array_ops.shape(x)[:-1])
batch_ids = math_ops.range(m)
# Assemble the tril_ids into batch,tril_id pairs.
idx = array_ops.stack([
array_ops.tile(array_ops.expand_dims(batch_ids, 1), [1, n * n]),
array_ops.tile(array_ops.expand_dims(tril_ids(n), 0), [m, 1])
])
idx = array_ops.transpose(idx, [1, 2, 0])
# Gather up, reshape, and return.
y = array_ops.reshape(x, [-1, d])
y = array_ops.gather_nd(y, idx)
y = array_ops.reshape(y, array_ops.concat([batch_shape, [n, n]], 0))
y = array_ops.matrix_band_part(y, -1, 0)
y.set_shape(y.get_shape().merge_with(final_shape))
return y
def tridiag(below=None, diag=None, above=None, name=None):
"""Creates a matrix with values set above, below, and on the diagonal.
Example:
```python
tridiag(below=[1., 2., 3.],
diag=[4., 5., 6., 7.],
above=[8., 9., 10.])
# ==> array([[ 4., 8., 0., 0.],
# [ 1., 5., 9., 0.],
# [ 0., 2., 6., 10.],
# [ 0., 0., 3., 7.]], dtype=float32)
```
Warning: This Op is intended for convenience, not efficiency.
Args:
below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below
diagonal part. `None` is logically equivalent to `below = 0`.
diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal
part. `None` is logically equivalent to `diag = 0`.
above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above
diagonal part. `None` is logically equivalent to `above = 0`.
name: Python `str`. The name to give this op.
Returns:
tridiag: `Tensor` with values set above, below and on the diagonal.
Raises:
ValueError: if all inputs are `None`.
"""
def _pad(x):
"""Prepends and appends a zero to every vector in a batch of vectors."""
shape = array_ops.concat([array_ops.shape(x)[:-1], [1]], axis=0)
z = array_ops.zeros(shape, dtype=x.dtype)
return array_ops.concat([z, x, z], axis=-1)
def _add(*x):
"""Adds list of Tensors, ignoring `None`."""
s = None
for y in x:
if y is None:
continue
elif s is None:
s = y
else:
s += y
if s is None:
raise ValueError("Must specify at least one of `below`, `diag`, `above`.")
return s
with ops.name_scope(name, "tridiag", [below, diag, above]):
if below is not None:
below = ops.convert_to_tensor(below, name="below")
below = array_ops.matrix_diag(_pad(below))[..., :-1, 1:]
if diag is not None:
diag = ops.convert_to_tensor(diag, name="diag")
diag = array_ops.matrix_diag(diag)
if above is not None:
above = ops.convert_to_tensor(above, name="above")
above = array_ops.matrix_diag(_pad(above))[..., 1:, :-1]
# TODO(jvdillon): Consider using scatter_nd instead of creating three full
# matrices.
return _add(below, diag, above)
# TODO(jvdillon): Merge this test back into:
# tensorflow/python/ops/softplus_op_test.py
# once TF core is accepting new ops.
def softplus_inverse(x, name=None):
"""Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).
Mathematically this op is equivalent to:
```none
softplus_inverse = log(exp(x) - 1.)
```
Args:
x: `Tensor`. Non-negative (not enforced), floating-point.
name: A name for the operation (optional).
Returns:
`Tensor`. Has the same type/shape as input `x`.
"""
with ops.name_scope(name, "softplus_inverse", values=[x]):
x = ops.convert_to_tensor(x, name="x")
# We begin by deriving a more numerically stable softplus_inverse:
# x = softplus(y) = Log[1 + exp{y}], (which means x > 0).
# ==> exp{x} = 1 + exp{y} (1)
# ==> y = Log[exp{x} - 1] (2)
# = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]
# = Log[(1 - exp{-x}) / 1] + Log[exp{x}]
# = Log[1 - exp{-x}] + x (3)
# (2) is the "obvious" inverse, but (3) is more stable than (2) for large x.
# For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will
# be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0.
#
# In addition to the numerically stable derivation above, we clamp
# small/large values to be congruent with the logic in:
# tensorflow/core/kernels/softplus_op.h
#
# Finally, we set the input to one whenever the input is too large or too
# small. This ensures that no unchosen codepath is +/- inf. This is
# necessary to ensure the gradient doesn't get NaNs. Recall that the
# gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`
# thus an `inf` in an unselected path results in `0*inf=nan`. We are careful
# to overwrite `x` with ones only when we will never actually use this
# value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.
threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2.
is_too_small = math_ops.less(x, np.exp(threshold))
is_too_large = math_ops.greater(x, -threshold)
too_small_value = math_ops.log(x)
too_large_value = x
# This `where` will ultimately be a NOP because we won't select this
# codepath whenever we used the surrogate `ones_like`.
x = array_ops.where(math_ops.logical_or(is_too_small, is_too_large),
array_ops.ones_like(x), x)
y = x + math_ops.log(-math_ops.expm1(-x)) # == log(expm1(x))
return array_ops.where(is_too_small, too_small_value,
array_ops.where(is_too_large, too_large_value, y))
# TODO(b/35290280): Add unit-tests.
def dimension_size(x, axis):
"""Returns the size of a specific dimension."""
# Since tf.gather isn't "constant-in, constant-out", we must first check the
# static shape or fallback to dynamic shape.
num_rows = (None if x.get_shape().ndims is None
else x.get_shape()[axis].value)
if num_rows is not None:
return num_rows
return array_ops.shape(x)[axis]
class AppendDocstring(object):
"""Helper class to promote private subclass docstring to public counterpart.
Example:
```python
class TransformedDistribution(Distribution):
@distribution_util.AppendDocstring(
additional_note="A special note!",
kwargs_dict={"foo": "An extra arg."})
def _prob(self, y, foo=None):
pass
```
In this case, the `AppendDocstring` decorator appends the `additional_note` to
the docstring of `prob` (not `_prob`) and adds a new `kwargs`
section with each dictionary item as a bullet-point.
For a more detailed example, see `TransformedDistribution`.
"""
def __init__(self, additional_note="", kwargs_dict=None):
"""Initializes the AppendDocstring object.
Args:
additional_note: Python string added as additional docstring to public
version of function.
kwargs_dict: Python string/string dictionary representing
specific kwargs expanded from the **kwargs input.
Raises:
ValueError: if kwargs_dict.key contains whitespace.
ValueError: if kwargs_dict.value contains newlines.
"""
self._additional_note = additional_note
if kwargs_dict:
bullets = []
for key in sorted(kwargs_dict.keys()):
value = kwargs_dict[key]
if any(x.isspace() for x in key):
raise ValueError(
"Parameter name \"%s\" contains whitespace." % key)
value = value.lstrip()
if "\n" in value:
raise ValueError(
"Parameter description for \"%s\" contains newlines." % key)
bullets.append("* `%s`: %s" % (key, value))
self._additional_note += ("\n\n##### `kwargs`:\n\n" +
"\n".join(bullets))
def __call__(self, fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
return fn(*args, **kwargs)
if _fn.__doc__ is None:
_fn.__doc__ = self._additional_note
else:
_fn.__doc__ += "\n%s" % self._additional_note
return _fn
| |
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import numpy as np
from ..base import BaseEstimator
from ..utils import check_random_state, deprecated
from ..utils.extmath import logsumexp
from .. import cluster
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covars : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
from scipy import linalg
U, s, V = linalg.svd(covar)
sqrtS = np.diag(np.sqrt(s))
sqrt_covar = np.dot(U, np.dot(sqrtS, V))
rand = np.dot(sqrt_covar, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string (read-only), optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
rng : numpy.random object, optional
Must support the full numpy random number generator API.
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
thresh : float, optional
Convergence threshold.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by the GMM. \
Must be one of 'spherical', 'tied', 'diag', 'full'.
`weights_` : array, shape (`n_components`,)
Mixing weights for each mixture component.
`means_` : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
`covars_` : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
`converged_` : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Ininite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type=None, min_covar=0.001, n_components=2,
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]])
array([1, 1, 0, 0])
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type=None, min_covar=0.001, n_components=2,
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=1e-2, min_covar=1e-3):
self.n_components = n_components
self._covariance_type = covariance_type
self.thresh = thresh
self.min_covar = min_covar
self.random_state = random_state
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type: ' + str(covariance_type))
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on `cvtype`::
(`n_states`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
"""
if self._covariance_type == 'full':
return self.covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self._covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self._covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_components)
self.covars_ = covars
def eval(self, X):
"""Evaluate the model on data
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob: array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('the shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(
X, self.means_, self.covars_, self._covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
@deprecated("""will be removed in v0.12;
use the score or predict method instead, depending on the question""")
def decode(self, X):
"""Find most likely mixture components for each point in X.
DEPRECATED IN VERSION 0.10; WILL BE REMOVED IN VERSION 0.12
use the score or predict method instead, depending on the question.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprobs : array_like, shape (n_samples,)
Log probability of each point in `obs` under the model.
components : array_like, shape (n_samples,)
Index of the most likelihod mixture components for each observation
"""
logprob, posteriors = self.eval(X)
return logprob, posteriors.argmax(axis=1)
def score(self, X):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.eval(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.eval(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.eval(X)
return responsibilities
@deprecated("""will be removed in v0.12;
use the score or predict method instead, depending on the question""")
def rvs(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
DEPRECATED IN VERSION 0.11; WILL BE REMOVED IN VERSION 0.12
use sample instead
"""
return self.sample(n_samples, random_state)
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in xrange(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self._covariance_type == 'tied':
cv = self.covars_
elif self._covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self._covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit(self, X, n_iter=100, n_init=1, thresh=1e-2, params='wmc',
init_params='wmc'):
"""Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string ''. Likewise, if you
would like just to do an initialization, call this method with
n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
"""
## initialization step
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = - np.infty
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
for _ in range(n_init):
if 'm' in init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
k=self.n_components).fit(X).cluster_centers_
if 'w' in init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if 'c' in init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_components)
# EM algorithms
log_likelihood = []
# reset self.converged_ to False
self.converged_ = False
for i in xrange(n_iter):
# Expectation step
curr_log_likelihood, responsibilities = self.eval(X)
log_likelihood.append(curr_log_likelihood.sum())
# Check for convergence.
if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2]) < \
self.thresh:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, responsibilities, params, self.min_covar)
# if the results is better, keep it
if n_iter:
if log_likelihood[-1] > max_log_prob:
max_log_prob = log_likelihood[-1]
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self._covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self._covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self._covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self._covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self._covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (- 2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
## some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
from scipy import linalg
n_samples, n_dim = X.shape
icv = linalg.pinv(covars)
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 0.1)
+ np.sum(X * np.dot(X, icv), 1)[:, np.newaxis]
- 2 * np.dot(np.dot(X, icv), means.T)
+ np.sum(means * np.dot(means, icv), 1))
return lpr
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
from scipy import linalg
import itertools
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(itertools.izip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + \
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape"
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in xrange(gmm.n_components):
post = responsibilities[:, c]
# Underflow Errors in doing post * X.T are not important
np.seterr(under='ignore')
avg_cv = np.dot(post * X.T, X) / (post.sum() + 10 * EPS)
mu = gmm.means_[c][np.newaxis]
cv[c] = (avg_cv - np.dot(mu.T, mu) + min_covar * np.eye(n_features))
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
n_features = X.shape[1]
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
return (avg_X2 - avg_means2 + min_covar * np.eye(n_features)) / X.shape[0]
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| |
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import ddt
import uuid
from iso8601 import iso8601
import mock
from oslo_versionedobjects import fields
from sqlalchemy import sql
from cinder import context
from cinder import db
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder import objects
from cinder.objects import fields as c_fields
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_objects
from cinder.tests.unit import objects as test_objects
class TestCinderObjectVersionHistory(test_objects.BaseObjectsTestCase):
def test_add(self):
history = test_objects.obj_base.CinderObjectVersionsHistory()
v10 = {'Backup': '2.0'}
v11 = {'Backup': '2.1'}
history.add('1.0', v10)
history.add('1.1', v11)
# We have 3 elements because we have the liberty version by default
self.assertEqual(2 + 1, len(history))
expected_v10 = history['liberty'].copy()
expected_v10.update(v10)
expected_v11 = history['liberty'].copy()
expected_v11.update(v11)
self.assertEqual('1.1', history.get_current())
self.assertEqual(expected_v11, history.get_current_versions())
self.assertEqual(expected_v10, history['1.0'])
def test_add_existing(self):
history = test_objects.obj_base.CinderObjectVersionsHistory()
history.add('1.0', {'Backup': '1.0'})
self.assertRaises(exception.ProgrammingError,
history.add, '1.0', {'Backup': '1.0'})
class TestCinderObject(test_objects.BaseObjectsTestCase):
"""Tests methods from CinderObject."""
def setUp(self):
super(TestCinderObject, self).setUp()
self.obj = fake_objects.ChildObject(
scheduled_at=None,
uuid=uuid.uuid4(),
text='text')
self.obj.obj_reset_changes()
def test_cinder_obj_get_changes_no_changes(self):
self.assertDictEqual({}, self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_other_changes(self):
self.obj.text = 'text2'
self.assertDictEqual({'text': 'text2'},
self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_datetime_no_tz(self):
now = datetime.datetime.utcnow()
self.obj.scheduled_at = now
self.assertDictEqual({'scheduled_at': now},
self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_datetime_tz_utc(self):
now_tz = iso8601.parse_date('2015-06-26T22:00:01Z')
now = now_tz.replace(tzinfo=None)
self.obj.scheduled_at = now_tz
self.assertDictEqual({'scheduled_at': now},
self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_datetime_tz_non_utc_positive(self):
now_tz = iso8601.parse_date('2015-06-26T22:00:01+01')
now = now_tz.replace(tzinfo=None) - datetime.timedelta(hours=1)
self.obj.scheduled_at = now_tz
self.assertDictEqual({'scheduled_at': now},
self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_datetime_tz_non_utc_negative(self):
now_tz = iso8601.parse_date('2015-06-26T10:00:01-05')
now = now_tz.replace(tzinfo=None) + datetime.timedelta(hours=5)
self.obj.scheduled_at = now_tz
self.assertDictEqual({'scheduled_at': now},
self.obj.cinder_obj_get_changes())
@mock.patch('cinder.objects.base.CinderPersistentObject.get_by_id')
def test_refresh(self, get_by_id):
@objects.base.CinderObjectRegistry.register_if(False)
class MyTestObject(objects.base.CinderObject,
objects.base.CinderObjectDictCompat,
objects.base.CinderComparableObject,
objects.base.CinderPersistentObject):
fields = {'id': fields.UUIDField(),
'name': fields.StringField()}
test_obj = MyTestObject(id=fake.OBJECT_ID, name='foo')
refresh_obj = MyTestObject(id=fake.OBJECT_ID, name='bar')
get_by_id.return_value = refresh_obj
test_obj.refresh()
self._compare(self, refresh_obj, test_obj)
@mock.patch('cinder.objects.base.CinderPersistentObject.get_by_id')
def test_refresh_readonly(self, get_by_id_mock):
@objects.base.CinderObjectRegistry.register_if(False)
class MyTestObject(objects.base.CinderObject,
objects.base.CinderObjectDictCompat,
objects.base.CinderComparableObject,
objects.base.CinderPersistentObject):
fields = {'id': fields.UUIDField(),
'name': fields.StringField(read_only=True)}
test_obj = MyTestObject(id=fake.OBJECT_ID, name='foo')
refresh_obj = MyTestObject(id=fake.OBJECT_ID, name='bar')
get_by_id_mock.return_value = refresh_obj
test_obj.refresh()
self._compare(self, refresh_obj, test_obj)
def test_refresh_no_id_field(self):
@objects.base.CinderObjectRegistry.register_if(False)
class MyTestObjectNoId(objects.base.CinderObject,
objects.base.CinderObjectDictCompat,
objects.base.CinderComparableObject,
objects.base.CinderPersistentObject):
fields = {'uuid': fields.UUIDField()}
test_obj = MyTestObjectNoId(uuid=fake.OBJECT_ID, name='foo')
self.assertRaises(NotImplementedError, test_obj.refresh)
@mock.patch('cinder.objects.base.objects', mock.Mock())
def test_cls_init(self):
"""Test that class init method gets called on registration."""
@objects.base.CinderObjectRegistry.register
class MyTestObject(objects.base.CinderObject,
objects.base.CinderPersistentObject):
cinder_ovo_cls_init = mock.Mock()
MyTestObject.cinder_ovo_cls_init.assert_called_once_with()
class TestCinderComparableObject(test_objects.BaseObjectsTestCase):
def test_comparable_objects(self):
@objects.base.CinderObjectRegistry.register
class MyComparableObj(objects.base.CinderObject,
objects.base.CinderObjectDictCompat,
objects.base.CinderComparableObject):
fields = {'foo': fields.Field(fields.Integer())}
class NonVersionedObject(object):
pass
obj1 = MyComparableObj(foo=1)
obj2 = MyComparableObj(foo=1)
obj3 = MyComparableObj(foo=2)
obj4 = NonVersionedObject()
self.assertTrue(obj1 == obj2)
self.assertFalse(obj1 == obj3)
self.assertFalse(obj1 == obj4)
self.assertIsNotNone(obj1)
@ddt.ddt
class TestCinderObjectConditionalUpdate(test.TestCase):
def setUp(self):
super(TestCinderObjectConditionalUpdate, self).setUp()
self.context = context.get_admin_context()
def _create_volume(self):
vol = {
'display_description': 'Test Desc',
'size': 1,
'status': 'available',
'availability_zone': 'az',
'host': 'dummy',
'attach_status': c_fields.VolumeAttachStatus.DETACHED,
}
volume = objects.Volume(context=self.context, **vol)
volume.create()
return volume
def _create_snapshot(self, volume):
snapshot = objects.Snapshot(context=self.context, volume_id=volume.id)
snapshot.create()
return snapshot
def _check_volume(self, volume, status, size, reload=False, dirty_keys=(),
**kwargs):
if reload:
volume = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual(status, volume.status)
self.assertEqual(size, volume.size)
dirty = volume.cinder_obj_get_changes()
self.assertEqual(list(dirty_keys), list(dirty.keys()))
for key, value in kwargs.items():
self.assertEqual(value, getattr(volume, key))
def test_conditional_update_non_iterable_expected(self):
volume = self._create_volume()
# We also check that we can check for None values
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 2},
{'status': 'available', 'migration_status': None}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 2)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True)
def test_conditional_update_non_iterable_expected_model_field(self):
volume = self._create_volume()
# We also check that we can check for None values
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 2,
'previous_status': volume.model.status},
{'status': 'available', 'migration_status': None}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 2, previous_status='available')
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True,
previous_status='available')
def test_conditional_update_non_iterable_expected_save_all(self):
volume = self._create_volume()
volume.size += 1
# We also check that we can check for not None values
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available', 'availability_zone': volume.Not(None)},
save_all=True))
# Check that the object in memory has been updated and that the size
# is not a dirty key
self._check_volume(volume, 'deleting', 2)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True)
def test_conditional_update_non_iterable_expected_dont_save_all(self):
volume = self._create_volume()
volume.size += 1
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available'}, save_all=False))
# Check that the object in memory has been updated with the new status
# but that size has not been saved and is a dirty key
self._check_volume(volume, 'deleting', 2, False, ['size'])
# Check that the volume in the DB also has been updated but not the
# size
self._check_volume(volume, 'deleting', 1, True)
def test_conditional_update_fail_non_iterable_expected_save_all(self):
volume = self._create_volume()
volume.size += 1
self.assertFalse(volume.conditional_update(
{'status': 'available'},
{'status': 'deleting'}, save_all=True))
# Check that the object in memory has not been updated and that the
# size is still a dirty key
self._check_volume(volume, 'available', 2, False, ['size'])
# Check that the volume in the DB hasn't been updated
self._check_volume(volume, 'available', 1, True)
def test_default_conditional_update_non_iterable_expected(self):
volume = self._create_volume()
self.assertTrue(volume.conditional_update({'status': 'deleting'}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 1, True)
def test_default_conditional_fail_update_non_iterable_expected(self):
volume_in_db = self._create_volume()
volume = objects.Volume.get_by_id(self.context, volume_in_db.id)
volume_in_db.size += 1
volume_in_db.save()
# This will fail because size in DB is different
self.assertFalse(volume.conditional_update({'status': 'deleting'}))
# Check that the object in memory has not been updated
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed the status but has
# the size we changed before the conditional update
self._check_volume(volume_in_db, 'available', 2, True)
def test_default_conditional_update_non_iterable_expected_with_dirty(self):
volume_in_db = self._create_volume()
volume = objects.Volume.get_by_id(self.context, volume_in_db.id)
volume_in_db.size += 1
volume_in_db.save()
volume.size = 33
# This will fail because even though we have excluded the size from
# the default condition when we dirtied it in the volume object, we
# still have the last update timestamp that will be included in the
# condition
self.assertFalse(volume.conditional_update({'status': 'deleting'}))
# Check that the object in memory has not been updated
self._check_volume(volume, 'available', 33, False, ['size'])
# Check that the volume in the DB hasn't changed the status but has
# the size we changed before the conditional update
self._check_volume(volume_in_db, 'available', 2, True)
def test_conditional_update_negated_non_iterable_expected(self):
volume = self._create_volume()
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 2},
{'status': db.Not('in-use'), 'size': db.Not(2)}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 2)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True)
def test_conditional_update_non_iterable_expected_filter(self):
# Volume we want to change
volume = self._create_volume()
# Another volume that has no snapshots
volume2 = self._create_volume()
# A volume with snapshots
volume3 = self._create_volume()
self._create_snapshot(volume3)
# Update only it it has no snapshot
filters = (~sql.exists().where(
models.Snapshot.volume_id == models.Volume.id),)
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 2},
{'status': 'available'},
filters))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 2)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True)
# Check that the other volumes in the DB haven't changed
self._check_volume(volume2, 'available', 1, True)
self._check_volume(volume3, 'available', 1, True)
def test_conditional_update_iterable_expected(self):
volume = self._create_volume()
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 20},
{'status': ('error', 'available'), 'size': range(10)}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 20)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 20, True)
def test_conditional_update_negated_iterable_expected(self):
volume = self._create_volume()
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 20},
{'status': db.Not(('creating', 'in-use')), 'size': range(10)}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 20)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 20, True)
def test_conditional_update_fail_non_iterable_expected(self):
volume = self._create_volume()
self.assertFalse(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available', 'size': 2}))
# Check that the object in memory hasn't changed
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_fail_negated_non_iterable_expected(self):
volume = self._create_volume()
result = volume.conditional_update({'status': 'deleting'},
{'status': db.Not('in-use'),
'size': 2})
self.assertFalse(result)
# Check that the object in memory hasn't changed
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_fail_iterable_expected(self):
volume = self._create_volume()
self.assertFalse(volume.conditional_update(
{'status': 'available'},
{'status': ('error', 'creating'), 'size': range(2, 10)}))
# Check that the object in memory hasn't changed
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_fail_negated_iterable_expected(self):
volume = self._create_volume()
self.assertFalse(volume.conditional_update(
{'status': 'error'},
{'status': db.Not(('available', 'in-use')), 'size': range(2, 10)}))
# Check that the object in memory hasn't changed
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_fail_non_iterable_expected_filter(self):
# Volume we want to change
volume = self._create_volume()
self._create_snapshot(volume)
# A volume that has no snapshots
volume2 = self._create_volume()
# Another volume with snapshots
volume3 = self._create_volume()
self._create_snapshot(volume3)
# Update only it it has no snapshot
filters = (~sql.exists().where(
models.Snapshot.volume_id == models.Volume.id),)
self.assertFalse(volume.conditional_update(
{'status': 'deleting', 'size': 2},
{'status': 'available'},
filters))
# Check that the object in memory hasn't been updated
self._check_volume(volume, 'available', 1)
# Check that no volume in the DB also has been updated
self._check_volume(volume, 'available', 1, True)
self._check_volume(volume2, 'available', 1, True)
self._check_volume(volume3, 'available', 1, True)
def test_conditional_update_non_iterable_case_value(self):
# Volume we want to change and has snapshots
volume = self._create_volume()
self._create_snapshot(volume)
# Filter that checks if a volume has snapshots
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
# We want the updated value to depend on whether it has snapshots or
# not
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
self.assertTrue(volume.conditional_update({'status': case_values},
{'status': 'available'}))
# Check that the object in memory has been updated
self._check_volume(volume, 'has-snapshot', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'has-snapshot', 1, True)
def test_conditional_update_non_iterable_case_value_else(self):
# Volume we want to change
volume = self._create_volume()
# Filter that checks if a volume has snapshots
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
# We want the updated value to depend on whether it has snapshots or
# not
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
self.assertTrue(volume.conditional_update({'status': case_values},
{'status': 'available'}))
# Check that the object in memory has been updated
self._check_volume(volume, 'no-snapshot', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'no-snapshot', 1, True)
def test_conditional_update_non_iterable_case_value_fail(self):
# Volume we want to change doesn't have snapshots
volume = self._create_volume()
# Filter that checks if a volume has snapshots
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
# We want the updated value to depend on whether it has snapshots or
# not
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
# We won't update because volume status is available
self.assertFalse(volume.conditional_update({'status': case_values},
{'status': 'deleting'}))
# Check that the object in memory has not been updated
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB also hasn't been updated either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_iterable_with_none_expected(self):
volume = self._create_volume()
# We also check that we can check for None values in an iterable
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': (None, 'available'),
'migration_status': (None, 'finished')}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 1, True)
def test_conditional_update_iterable_with_not_none_expected(self):
volume = self._create_volume()
# We also check that we can check for None values in a negated iterable
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': volume.Not((None, 'in-use'))}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 1, True)
def test_conditional_update_iterable_with_not_includes_null(self):
volume = self._create_volume()
# We also check that negation includes None values by default like we
# do in Python and not like MySQL does
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available',
'migration_status': volume.Not(('migrating', 'error'))}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 1, True)
def test_conditional_update_iterable_with_not_includes_null_fails(self):
volume = self._create_volume()
# We also check that negation excludes None values if we ask it to
self.assertFalse(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available',
'migration_status': volume.Not(('migrating', 'error'),
auto_none=False)}))
# Check that the object in memory has not been updated
self._check_volume(volume, 'available', 1, False)
# Check that the volume in the DB hasn't been updated
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_use_operation_in_value(self):
volume = self._create_volume()
expected_size = volume.size + 1
# We also check that using fields in requested changes will work as
# expected
self.assertTrue(volume.conditional_update(
{'status': 'deleting',
'size': volume.model.size + 1},
{'status': 'available'}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', expected_size, False)
# Check that the volume in the DB has also been updated
self._check_volume(volume, 'deleting', expected_size, True)
def test_conditional_update_auto_order(self):
volume = self._create_volume()
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
values = {'status': 'deleting',
'previous_status': volume.model.status,
'migration_status': case_values}
with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query:
update = model_query.return_value.filter.return_value.update
update.return_value = 0
self.assertFalse(volume.conditional_update(
values, {'status': 'available'}))
# We check that we are passing values to update to SQLAlchemy in the
# right order
self.assertEqual(1, update.call_count)
self.assertListEqual(
[('previous_status', volume.model.status),
('migration_status', mock.ANY),
('status', 'deleting')],
list(update.call_args[0][0]))
self.assertDictEqual(
{'synchronize_session': False,
'update_args': {'preserve_parameter_order': True}},
update.call_args[1])
def test_conditional_update_force_order(self):
volume = self._create_volume()
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
values = {'status': 'deleting',
'previous_status': volume.model.status,
'migration_status': case_values}
order = ['status']
with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query:
update = model_query.return_value.filter.return_value.update
update.return_value = 0
self.assertFalse(volume.conditional_update(
values, {'status': 'available'}, order=order))
# We check that we are passing values to update to SQLAlchemy in the
# right order
self.assertEqual(1, update.call_count)
self.assertListEqual(
[('status', 'deleting'),
('previous_status', volume.model.status),
('migration_status', mock.ANY)],
list(update.call_args[0][0]))
self.assertDictEqual(
{'synchronize_session': False,
'update_args': {'preserve_parameter_order': True}},
update.call_args[1])
def test_conditional_update_no_order(self):
volume = self._create_volume()
values = {'status': 'deleting',
'previous_status': 'available',
'migration_status': None}
with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query:
update = model_query.return_value.filter.return_value.update
update.return_value = 0
self.assertFalse(volume.conditional_update(
values, {'status': 'available'}))
# Check that arguments passed to SQLAlchemy's update are correct (order
# is not relevant).
self.assertEqual(1, update.call_count)
arg = update.call_args[0][0]
self.assertIsInstance(arg, dict)
self.assertEqual(set(values.keys()), set(arg.keys()))
def test_conditional_update_multitable_fail(self):
volume = self._create_volume()
self.assertRaises(exception.ProgrammingError,
volume.conditional_update,
{'status': 'deleting',
objects.Snapshot.model.status: 'available'},
{'status': 'available'})
def test_conditional_update_multitable_fail_fields_different_models(self):
volume = self._create_volume()
self.assertRaises(exception.ProgrammingError,
volume.conditional_update,
{objects.Backup.model.status: 'available',
objects.Snapshot.model.status: 'available'})
def test_conditional_update_not_multitable(self):
volume = self._create_volume()
with mock.patch('cinder.db.sqlalchemy.api._create_facade_lazily') as m:
res = volume.conditional_update(
{objects.Volume.model.status: 'deleting',
objects.Volume.model.size: 12}, reflect_changes=False)
self.assertTrue(res)
self.assertTrue(m.called)
@ddt.data(('available', 'error', None),
('error', 'rolling_back', [{'fake_filter': 'faked'}]))
@ddt.unpack
@mock.patch('cinder.objects.base.'
'CinderPersistentObject.conditional_update')
def test_update_status_where(self, value, expected, filters, mock_update):
volume = self._create_volume()
if filters:
volume.update_single_status_where(value, expected, filters)
mock_update.assert_called_with({'status': value},
{'status': expected},
filters)
else:
volume.update_single_status_where(value, expected)
mock_update.assert_called_with({'status': value},
{'status': expected},
())
class TestCinderDictObject(test_objects.BaseObjectsTestCase):
@objects.base.CinderObjectRegistry.register_if(False)
class TestDictObject(objects.base.CinderObjectDictCompat,
objects.base.CinderObject):
obj_extra_fields = ['foo']
fields = {
'abc': fields.StringField(nullable=True),
'def': fields.IntegerField(nullable=True),
}
@property
def foo(self):
return 42
def test_dict_objects(self):
obj = self.TestDictObject()
self.assertNotIn('non_existing', obj)
self.assertEqual('val', obj.get('abc', 'val'))
self.assertNotIn('abc', obj)
obj.abc = 'val2'
self.assertEqual('val2', obj.get('abc', 'val'))
self.assertEqual(42, obj.get('foo'))
self.assertEqual(42, obj.get('foo', None))
self.assertIn('foo', obj)
self.assertIn('abc', obj)
self.assertNotIn('def', obj)
@mock.patch('cinder.objects.base.OBJ_VERSIONS', fake_objects.MyHistory())
class TestCinderObjectSerializer(test_objects.BaseObjectsTestCase):
BACKPORT_MSG = ('Backporting %(obj_name)s from version %(src_vers)s to '
'version %(dst_vers)s')
def setUp(self):
super(TestCinderObjectSerializer, self).setUp()
self.obj = fake_objects.ChildObject(scheduled_at=None,
uuid=uuid.uuid4(),
text='text',
integer=1)
self.parent = fake_objects.ParentObject(uuid=uuid.uuid4(),
child=self.obj,
scheduled_at=None)
self.parent_list = fake_objects.ParentObjectList(objects=[self.parent])
def test_serialize_init_current_has_no_manifest(self):
"""Test that pinned to current version we have no manifest."""
serializer = objects.base.CinderObjectSerializer('1.6')
# Serializer should not have a manifest
self.assertIsNone(serializer.manifest)
def test_serialize_init_no_cap_has_no_manifest(self):
"""Test that without cap we have no manifest."""
serializer = objects.base.CinderObjectSerializer()
# Serializer should not have a manifest
self.assertIsNone(serializer.manifest)
def test_serialize_init_pinned_has_manifest(self):
"""Test that pinned to older version we have manifest."""
objs_version = '1.5'
serializer = objects.base.CinderObjectSerializer(objs_version)
# Serializer should have the right manifest
self.assertDictEqual(fake_objects.MyHistory()[objs_version],
serializer.manifest)
def test_serialize_entity_unknown_version(self):
"""Test that bad cap version will prevent serializer creation."""
self.assertRaises(exception.CappedVersionUnknown,
objects.base.CinderObjectSerializer, '0.9')
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_basic_no_backport(self, log_debug_mock):
"""Test single element serializer with no backport."""
serializer = objects.base.CinderObjectSerializer('1.6')
primitive = serializer.serialize_entity(self.context, self.obj)
self.assertEqual('1.2', primitive['versioned_object.version'])
data = primitive['versioned_object.data']
self.assertEqual(1, data['integer'])
self.assertEqual('text', data['text'])
log_debug_mock.assert_not_called()
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_basic_backport(self, log_debug_mock):
"""Test single element serializer with backport."""
serializer = objects.base.CinderObjectSerializer('1.5')
primitive = serializer.serialize_entity(self.context, self.obj)
self.assertEqual('1.1', primitive['versioned_object.version'])
data = primitive['versioned_object.data']
self.assertNotIn('integer', data)
self.assertEqual('text', data['text'])
log_debug_mock.assert_called_once_with(self.BACKPORT_MSG,
{'obj_name': 'ChildObject',
'src_vers': '1.2',
'dst_vers': '1.1'})
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_full_no_backport(self, log_debug_mock):
"""Test related elements serialization with no backport."""
serializer = objects.base.CinderObjectSerializer('1.6')
primitive = serializer.serialize_entity(self.context, self.parent_list)
self.assertEqual('1.1', primitive['versioned_object.version'])
parent = primitive['versioned_object.data']['objects'][0]
self.assertEqual('1.1', parent['versioned_object.version'])
child = parent['versioned_object.data']['child']
self.assertEqual('1.2', child['versioned_object.version'])
log_debug_mock.assert_not_called()
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_full_backport_last_children(self,
log_debug_mock):
"""Test related elements serialization with backport of the last child.
Test that using the manifest we properly backport a child object even
when all its parents have not changed their version.
"""
serializer = objects.base.CinderObjectSerializer('1.5')
primitive = serializer.serialize_entity(self.context, self.parent_list)
self.assertEqual('1.1', primitive['versioned_object.version'])
parent = primitive['versioned_object.data']['objects'][0]
self.assertEqual('1.1', parent['versioned_object.version'])
# Only the child has been backported
child = parent['versioned_object.data']['child']
self.assertEqual('1.1', child['versioned_object.version'])
# Check that the backport has been properly done
data = child['versioned_object.data']
self.assertNotIn('integer', data)
self.assertEqual('text', data['text'])
log_debug_mock.assert_called_once_with(self.BACKPORT_MSG,
{'obj_name': 'ChildObject',
'src_vers': '1.2',
'dst_vers': '1.1'})
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_full_backport(self, log_debug_mock):
"""Test backport of the whole tree of related elements."""
serializer = objects.base.CinderObjectSerializer('1.3')
primitive = serializer.serialize_entity(self.context, self.parent_list)
# List has been backported
self.assertEqual('1.0', primitive['versioned_object.version'])
parent = primitive['versioned_object.data']['objects'][0]
# Parent has been backported as well
self.assertEqual('1.0', parent['versioned_object.version'])
# And the backport has been properly done
data = parent['versioned_object.data']
self.assertNotIn('scheduled_at', data)
# And child as well
child = parent['versioned_object.data']['child']
self.assertEqual('1.1', child['versioned_object.version'])
# Check that the backport has been properly done
data = child['versioned_object.data']
self.assertNotIn('integer', data)
self.assertEqual('text', data['text'])
log_debug_mock.assert_has_calls([
mock.call(self.BACKPORT_MSG, {'obj_name': 'ParentObjectList',
'src_vers': '1.1',
'dst_vers': '1.0'}),
mock.call(self.BACKPORT_MSG, {'obj_name': 'ParentObject',
'src_vers': '1.1',
'dst_vers': '1.0'}),
mock.call(self.BACKPORT_MSG, {'obj_name': 'ChildObject',
'src_vers': '1.2',
'dst_vers': '1.1'})])
| |
# -*- coding: utf-8 -*-
from integration import BaseMongoTest
from tavi.documents import Document, EmbeddedDocument
from tavi import fields
class Address(EmbeddedDocument):
street = fields.StringField("street")
city = fields.StringField("city")
state = fields.StringField("state")
postal_code = fields.StringField("postal_code")
class OrderLine(EmbeddedDocument):
quantity = fields.IntegerField("quantity")
total_price = fields.FloatField("total_price")
created_at = fields.DateTimeField("created_at")
last_modified_at = fields.DateTimeField("last_modified_at")
class Order(Document):
name = fields.StringField("name")
address = fields.EmbeddedField("address", Address)
email = fields.StringField("email")
pay_type = fields.StringField("pay_type")
order_lines = fields.ListField("order_lines", OrderLine)
discount_codes = fields.ArrayField("discount_codes")
class OrderTest(BaseMongoTest):
def test_initialize_with_attributes(self):
order = Order(
name="John Doe",
email="jdoe@example.com",
pay_type="Mastercard"
)
self.assertEqual("John Doe", order.name)
self.assertEqual("jdoe@example.com", order.email)
self.assertEqual("Mastercard", order.pay_type)
def test_address_is_embedded_document(self):
address = Address(
street="123 Elm St.",
city="Anywhere",
state="NJ",
postal_code="00000"
)
order = Order(address=address)
self.assertEqual("123 Elm St.", order.address.street)
self.assertEqual("Anywhere", order.address.city)
self.assertEqual("NJ", order.address.state)
self.assertEqual("00000", order.address.postal_code)
def test_insert_with_address(self):
order = Order(
name="John Doe",
email="jdoe@example.com",
pay_type="Mastercard"
)
order.address = Address()
order.address.street = "123 Elm St."
order.address.city = "Anywhere"
order.address.state = "NJ"
order.address.postal_code = "00000"
assert order.save(), order.errors.full_messages
orders = list(self.db.orders.find())
self.assertEqual(1, len(orders))
self.assertEqual("123 Elm St.", orders[0]["address"]["street"])
self.assertEqual("Anywhere", orders[0]["address"]["city"])
self.assertEqual("NJ", orders[0]["address"]["state"])
self.assertEqual("00000", orders[0]["address"]["postal_code"])
def test_update_with_address(self):
order = Order(
name="John Doe",
email="jdoe@example.com",
pay_type="Mastercard"
)
order.address = Address()
order.address.street = "123 Elm St."
order.address.city = "Anywhere"
order.address.state = "NJ"
order.address.postal_code = "00000"
assert order.save(), order.errors.full_messages
order.address.street = "1313 Mockingbird Lane"
assert order.save(), order.errors.full_messages
orders = list(self.db.orders.find())
self.assertEqual(1, len(orders))
self.assertEqual(
"1313 Mockingbird Lane",
orders[0]["address"]["street"]
)
def test_insert_with_order_lines(self):
order = Order(
name="John Doe",
email="jdoe@example.com",
pay_type="Mastercard"
)
line_a = OrderLine(quantity=1, total_price=19.99)
line_b = OrderLine(quantity=3, total_price=39.99)
order.order_lines.append(line_a)
order.order_lines.append(line_b)
assert order.save(), order.errors.full_messages
orders = list(self.db.orders.find())
lines = orders[0]["order_lines"]
self.assertEqual(1, len(orders))
self.assertEqual(2, len(lines))
self.assertEqual(1, lines[0]["quantity"])
self.assertEqual(19.99, lines[0]["total_price"])
self.assertIsNotNone(lines[0]["created_at"])
self.assertEqual(lines[0]["created_at"], lines[0]["last_modified_at"])
self.assertEqual(3, lines[1]["quantity"])
self.assertEqual(39.99, lines[1]["total_price"])
self.assertIsNotNone(lines[1]["created_at"])
self.assertEqual(lines[1]["created_at"], lines[1]["last_modified_at"])
def test_update_with_order_lines(self):
order = Order(
name="John Doe",
email="jdoe@example.com",
pay_type="Mastercard"
)
line_a = OrderLine(quantity=1, total_price=19.99)
line_b = OrderLine(quantity=3, total_price=39.99)
order.order_lines.append(line_a)
order.order_lines.append(line_b)
assert order.save(), order.errors.full_messages
orders = list(self.db.orders.find())
self.assertEqual(1, len(orders))
self.assertEqual(2, len(orders[0]["order_lines"]))
order.order_lines[0].quantity = 42
assert order.save(), order.errors.full_messages
orders = list(self.db.orders.find())
lines = orders[0]["order_lines"]
self.assertEqual(1, len(orders))
self.assertEqual(2, len(lines))
self.assertEqual(42, lines[0]["quantity"])
self.assertEqual(19.99, lines[0]["total_price"])
self.assertIsNotNone(lines[0]["created_at"])
self.assertAlmostEqual(
0,
(lines[0]["created_at"] -
lines[0]["last_modified_at"]).total_seconds(),
delta=1
)
self.assertEqual(3, lines[1]["quantity"])
self.assertEqual(39.99, lines[1]["total_price"])
self.assertIsNotNone(lines[1]["created_at"])
self.assertAlmostEqual(
0,
(lines[1]["created_at"] -
lines[1]["last_modified_at"]).total_seconds(),
delta=1
)
def test_query_order_lines(self):
order = Order(
name="John Doe",
email="jdoe@example.com",
pay_type="Mastercard"
)
line_a = OrderLine(quantity=1, total_price=19.99)
line_b = OrderLine(quantity=3, total_price=39.99)
order.order_lines.append(line_a)
order.order_lines.append(line_b)
assert order.save(), order.errors.full_messages
db_orders = Order.find_all()
self.assertEqual(1, len(db_orders))
db_lines = db_orders[0].order_lines
self.assertEqual(1, db_lines[0].quantity)
self.assertEqual(19.99, db_lines[0].total_price)
def test_update_with_multiple_order_lines(self):
order = Order(
name="John Doe",
email="jdoe@example.com",
pay_type="Mastercard"
)
line_a = OrderLine(quantity=1, total_price=19.99)
line_b = OrderLine(quantity=3, total_price=39.99)
order.order_lines.append(line_a)
order.order_lines.append(line_b)
assert order.save(), order.errors.full_messages
orders = list(self.db.orders.find())
self.assertEqual(1, len(orders))
self.assertEqual(2, len(orders[0]["order_lines"]))
order.order_lines[0].quantity = 42
assert order.save(), order.errors.full_messages
orders = list(self.db.orders.find())
lines = orders[0]["order_lines"]
self.assertEqual(1, len(orders))
self.assertEqual(2, len(lines))
self.assertEqual(42, lines[0]["quantity"])
self.assertEqual(19.99, lines[0]["total_price"])
self.assertNotEqual(
lines[0]["created_at"],
lines[0]["last_modified_at"]
)
self.assertEqual(3, lines[1]["quantity"])
self.assertEqual(39.99, lines[1]["total_price"])
self.assertNotEqual(
lines[1]["created_at"],
lines[1]["last_modified_at"]
)
def test_query_discount_codes(self):
order = Order(
name="John Doe",
email="jdoe@example.com",
pay_type="Mastercard",
discount_codes=["HelloInternet"],
)
assert order.save(), order.errors.full_messages
db_orders = Order.find_all()
self.assertEqual(1, len(db_orders))
db_codes = db_orders[0].discount_codes
self.assertEqual(db_codes[0], "HelloInternet")
def test_update_discount_codes(self):
order = Order(
name="John Doe",
email="jdoe@example.com",
pay_type="Mastercard",
discount_codes=["HelloInternet", "rosebud"],
)
assert order.save(), order.errors.full_messages
orders = list(self.db.orders.find())
self.assertEqual(1, len(orders))
self.assertEqual(2, len(orders[0]["discount_codes"]))
order.discount_codes[1] = "weaknesspays"
assert order.save(), order.errors.full_messages
orders = list(self.db.orders.find())
codes = orders[0]["discount_codes"]
self.assertEqual(1, len(orders))
self.assertEqual(2, len(codes))
self.assertEqual("weaknesspays", codes[1])
| |
class Node(object):
def __init__(self, x, nxt):
self.x = x
self.next = nxt
# From problem 8.5.
def find_cycle(n):
# fast and slow pointers will collide if there is a cycle
slow = n
fast = n.next
while slow != fast:
slow = slow.next
fast = fast.next
if fast is None: return None
fast = fast.next
if fast is None: return None
# find the length of the cycle
k = 1
p = slow.next
while p != slow:
p = p.next
k += 1
# now set off two pointers k apart and stop when they collide
behind = n
ahead = n
for _ in xrange(k):
ahead = ahead.next
while behind != ahead:
behind = behind.next
ahead = ahead.next
return ahead
def traverse_list(n):
"""
Traverse the whole list. Return the last node and the number of steps to
the last node.
"""
steps = 0
while n.next is not None:
n = n.next
steps += 1
return n, steps
def last_node(n):
last, _ = traverse_list(n)
return last
# From problem 8.6.
def lists_overlap_nocycles(l1, l2):
t1, n1 = traverse_list(l1)
t2, n2 = traverse_list(l2)
if t1 != t2:
return None
# ensure l1 is the shorter
if n1 > n2:
t1, t2 = t2, t1
n1, n2 = n2, n1
# step l2 until it's as far away from the end as l1
while n2 > n1:
l2 = l2.next
n2 -= 1
# step them together until they collide
while l1 != l2:
l1 = l1.next
l2 = l2.next
return l1
def lists_overlap(l1, l2):
c1 = find_cycle(l1)
c2 = find_cycle(l2)
# neither is cyclic then defer to the non-cyclic solution
if c1 is None and c2 is None:
return lists_overlap_nocycles(l1, l2)
# if one is cyclic but not the other, they don't overlap
if c1 is None and c2 is not None:
return None
if c1 is not None and c2 is None:
return None
# here we know both are cyclic, but they may still be completely disjoint
# if they collide in the tail, they'll have the same base of the cycle
if c1 == c2:
s1 = 0
p1 = l1
while p1 != c1:
s1 += 1
p1 = p1.next
s2 = 0
p2 = l2
while p2 != c2:
s2 += 1
p2 = p2.next
if s1 > s2:
l1, l2 = l2, l1
s1, s2 = s2, s1
while s2 > s1:
l2 = l2.next
s2 -= 1
while l1 != l2:
l1 = l1.next
l2 = l2.next
return l1
# otherwise they collide on the cycle itself
r = c1.next
while r is not c1:
if r == c2:
return r
r = r.next
return None
def to_list(n):
"""
to_list converts the linked list n to a list.
"""
L = []
while n is not None:
L.append(n.x)
n = n.next
return L
def from_list(L):
"""
from_list builds a linked list from the given list.
"""
n = None
for i in xrange(len(L)-1, -1, -1):
n = Node(x=L[i], nxt=n)
return n
def display(n):
"""
display prints a view of the linked list.
"""
print ' -> '.join(map(str, to_list(n)))
def test():
# no cycle case
l1 = from_list(range(10))
l2 = from_list(range(50, 55))
e2 = last_node(l2)
e2.next = l1.next.next.next.next
c = lists_overlap(l1, l2)
assert c is not None
assert 4 == c.x
# one has a cycle
l1 = from_list(range(10))
l2 = from_list(range(10, 20))
last_node(l2).next = l2.next.next
assert lists_overlap(l1, l2) is None
# overlap in the tail
l1 = from_list(range(10))
last_node(l1).next = l1.next.next.next.next
l2 = from_list(range(20, 30))
last_node(l2).next = l1.next.next
c = lists_overlap(l1, l2)
assert c is not None
assert 2 == c.x
# overlap in the cycle
l1 = from_list(range(10))
e1 = last_node(l1)
e1.next = l1.next.next.next.next
l2 = from_list(range(30, 37))
last_node(l2).next = e1
c = lists_overlap(l1, l2)
assert c is not None
assert 9 == c.x
# disjoint cyclic
l1 = from_list(range(10))
last_node(l1).next = l1.next.next
l2 = from_list(range(10, 33))
last_node(l2).next = l2.next.next.next.next
assert lists_overlap(l1, l2) is None
print 'pass'
def main():
test()
if __name__ == '__main__':
main()
| |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import argparse
import logging
import os
import posixpath
import shutil
import sys
from abc import ABC, abstractmethod
from contextlib import contextmanager
from dataclasses import dataclass
from functools import reduce
from typing import Any, List, Optional, Tuple, cast
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.engine.rules import rule
from pants.fs.archive import archiver_for_path
from pants.net.http.fetcher import Fetcher
from pants.option.global_options import GlobalOptions
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.subsystem.subsystem import Subsystem
from pants.util.contextutil import temporary_file
from pants.util.dirutil import chmod_plus_x, safe_concurrent_creation, safe_open
from pants.util.memo import memoized_classproperty, memoized_method, memoized_property
from pants.util.ordered_set import OrderedSet
from pants.util.osutil import (
SUPPORTED_PLATFORM_NORMALIZED_NAMES,
get_closest_mac_host_platform_pair,
)
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class HostPlatform:
"""Describes a platform to resolve binaries for. Determines the binary's location on disk.
:class:`BinaryToolUrlGenerator` instances receive this to generate download urls.
"""
os_name: Optional[str]
arch_or_version: Optional[str]
@memoized_classproperty
def empty(cls):
return cls(None, None)
def binary_path_components(self):
"""These strings are used as consecutive components of the path where a binary is fetched.
This is also used in generating urls from --binaries-baseurls in PantsHosted.
"""
return [self.os_name, self.arch_or_version]
class BinaryToolUrlGenerator(ABC):
"""Encapsulates the selection of urls to download for some binary tool.
:API: public
:class:`BinaryTool` subclasses can return an instance of a class mixing this in to
get_external_url_generator(self) to download their file or archive from some specified url or set
of urls.
"""
@abstractmethod
def generate_urls(self, version, host_platform) -> List[str]:
"""Return a list of urls to download some binary tool from given a version and platform.
Each url is tried in order to resolve the binary -- if the list of urls is empty, or downloading
from each of the urls fails, Pants will raise an exception when the binary tool is fetched which
should describe why the urls failed to work.
:param str version: version string for the requested binary (e.g. '2.0.1').
:param host_platform: description of the platform to fetch binaries for.
:type host_platform: :class:`HostPlatform`
:returns: a list of urls to download the binary tool from.
:rtype: list
"""
pass
class PantsHosted(BinaryToolUrlGenerator):
"""Given a binary request and --binaries-baseurls, generate urls to download the binary from.
This url generator is used if get_external_url_generator(self) is not overridden by a BinaryTool
subclass, or if --allow-external-binary-tool-downloads is False.
NB: "pants-hosted" is referring to the organization of the urls being specific to pants. It also
happens that most binaries are downloaded from S3 hosting at binaries.pantsbuild.org by default --
but setting --binaries-baseurls to anything else will only download binaries from the baseurls
given, not from binaries.pantsbuild.org.
"""
class NoBaseUrlsError(ValueError):
pass
def __init__(self, binary_request, baseurls):
super().__init__()
self._binary_request = binary_request
if not baseurls:
raise self.NoBaseUrlsError(
"Error constructing pants-hosted urls for the {} binary: no baseurls were provided.".format(
binary_request.name
)
)
self._baseurls = baseurls
def generate_urls(self, version, host_platform):
"""Append the file's download path to each of --binaries-baseurls.
This assumes that the urls in --binaries-baseurls point somewhere that mirrors Pants's
organization of the downloaded binaries on disk. Each url is tried in order until a request
succeeds.
"""
binary_path = self._binary_request.get_download_path(host_platform)
return [posixpath.join(baseurl, binary_path) for baseurl in self._baseurls]
# TODO: Deprecate passing in an explicit supportdir? Seems like we should be able to
# organize our binary hosting so that it's not needed. It's also used to calculate the binary
# download location, though.
@dataclass(frozen=True)
class BinaryRequest:
"""Describes a request for a binary to download."""
supportdir: Any
version: Any
name: Any
platform_dependent: Any
external_url_generator: Optional[Any]
archiver: Optional[Any]
def _full_name(self):
if self.archiver:
return "{}.{}".format(self.name, self.archiver.extension)
return self.name
def get_download_path(self, host_platform):
binary_path_components = [self.supportdir]
if self.platform_dependent:
# TODO(John Sirois): finish doc of the path structure expected under base_path.
binary_path_components.extend(host_platform.binary_path_components())
binary_path_components.extend([self.version, self._full_name()])
return os.path.join(*binary_path_components)
@dataclass(frozen=True)
class BinaryFetchRequest:
"""Describes a request to download a file."""
download_path: Any
urls: Tuple
def __post_init__(self):
if not self.urls:
raise self.NoDownloadUrlsError(f"No urls were provided to {self.__name__}: {self!r}.")
@memoized_property
def file_name(self):
return os.path.basename(self.download_path)
class NoDownloadUrlsError(ValueError):
pass
class BinaryToolFetcher:
@classmethod
def _default_http_fetcher(cls):
"""Return a fetcher that resolves local file paths against the build root.
Currently this is used everywhere except in testing.
"""
return Fetcher(get_buildroot())
def __init__(self, bootstrap_dir, timeout_secs, fetcher=None, ignore_cached_download=False):
"""
:param str bootstrap_dir: The root directory where Pants downloads binaries to.
:param int timeout_secs: The number of seconds to wait before timing out on a request for some
url.
:param fetcher: object to fetch urls with, overridden in testing.
:type fetcher: :class:`pants.net.http.fetcher.Fetcher`
:param bool ignore_cached_download: whether to fetch a binary even if it already exists on disk.
"""
self._bootstrap_dir = bootstrap_dir
self._timeout_secs = timeout_secs
self._fetcher = fetcher or self._default_http_fetcher()
self._ignore_cached_download = ignore_cached_download
class BinaryNotFound(TaskError):
def __init__(self, name, accumulated_errors):
super(BinaryToolFetcher.BinaryNotFound, self).__init__(
"Failed to fetch {name} binary from any source: ({error_msgs})".format(
name=name, error_msgs=", ".join(accumulated_errors)
)
)
@contextmanager
def _select_binary_stream(self, name, urls):
"""Download a file from a list of urls, yielding a stream after downloading the file.
URLs are tried in order until they succeed.
:raises: :class:`BinaryToolFetcher.BinaryNotFound` if requests to all the given urls fail.
"""
downloaded_successfully = False
accumulated_errors = []
for url in OrderedSet(urls): # De-dup URLS: we only want to try each URL once.
logger.info(
"Attempting to fetch {name} binary from: {url} ...".format(name=name, url=url)
)
try:
with temporary_file() as dest:
logger.debug(
"in BinaryToolFetcher: url={}, timeout_secs={}".format(
url, self._timeout_secs
)
)
self._fetcher.download(
url,
listener=Fetcher.ProgressListener(),
path_or_fd=dest,
timeout_secs=self._timeout_secs,
)
logger.info("Fetched {name} binary from: {url} .".format(name=name, url=url))
downloaded_successfully = True
dest.seek(0)
yield dest
break
except (IOError, Fetcher.Error, ValueError) as e:
accumulated_errors.append(
"Failed to fetch binary from {url}: {error}".format(url=url, error=e)
)
if not downloaded_successfully:
raise self.BinaryNotFound(name, accumulated_errors)
def _do_fetch(self, download_path, file_name, urls):
with safe_concurrent_creation(download_path) as downloadpath:
with self._select_binary_stream(file_name, urls) as binary_tool_stream:
with safe_open(downloadpath, "wb") as bootstrapped_binary:
shutil.copyfileobj(binary_tool_stream, bootstrapped_binary)
def fetch_binary(self, fetch_request):
"""Fulfill a binary fetch request."""
bootstrap_dir = os.path.realpath(os.path.expanduser(self._bootstrap_dir))
bootstrapped_binary_path = os.path.join(bootstrap_dir, fetch_request.download_path)
logger.debug("bootstrapped_binary_path: {}".format(bootstrapped_binary_path))
file_name = fetch_request.file_name
urls = fetch_request.urls
if self._ignore_cached_download or not os.path.exists(bootstrapped_binary_path):
self._do_fetch(bootstrapped_binary_path, file_name, urls)
logger.debug(
"Selected {binary} binary bootstrapped to: {path}".format(
binary=file_name, path=bootstrapped_binary_path
)
)
return bootstrapped_binary_path
class BinaryUtil:
"""Wraps utility methods for finding binary executables."""
class Factory(Subsystem):
"""
:API: public
"""
# N.B. `BinaryUtil` sources all of its options from bootstrap options, so that
# `BinaryUtil` instances can be created prior to `Subsystem` bootstrapping. So
# this options scope is unused, but required to remain a `Subsystem`.
options_scope = "binaries"
@classmethod
def create(cls) -> "BinaryUtil":
# NB: create is a class method to ~force binary fetch location to be global.
return cast(BinaryUtil, cls._create_for_cls(BinaryUtil))
@classmethod
def _create_for_cls(cls, binary_util_cls):
# NB: We read global bootstrap options, but through our own scoped options instance.
options = cls.global_instance().get_options()
binary_tool_fetcher = BinaryToolFetcher(
bootstrap_dir=options.pants_bootstrapdir,
timeout_secs=options.binaries_fetch_timeout_secs,
)
return binary_util_cls(
baseurls=options.binaries_baseurls,
binary_tool_fetcher=binary_tool_fetcher,
path_by_id=options.binaries_path_by_id,
allow_external_binary_tool_downloads=options.allow_external_binary_tool_downloads,
)
class MissingMachineInfo(TaskError):
"""Indicates that pants was unable to map this machine's OS to a binary path prefix."""
pass
class NoBaseUrlsError(TaskError):
"""Indicates that no URLs were specified in pants.toml."""
pass
class BinaryResolutionError(TaskError):
"""Raised to wrap other exceptions raised in the select() method to provide context."""
def __init__(self, binary_request, base_exception):
super(BinaryUtil.BinaryResolutionError, self).__init__(
"Error resolving binary request {}: {}".format(binary_request, base_exception),
base_exception,
)
def __init__(
self,
baseurls,
binary_tool_fetcher,
path_by_id=None,
allow_external_binary_tool_downloads=True,
uname_func=None,
):
"""Creates a BinaryUtil with the given settings to define binary lookup behavior.
This constructor is primarily used for testing. Production code will usually initialize
an instance using the BinaryUtil.Factory.create() method.
:param baseurls: URL prefixes which represent repositories of binaries.
:type baseurls: list of string
:param int timeout_secs: Timeout in seconds for url reads.
:param string bootstrapdir: Directory to use for caching binaries. Uses this directory to
search for binaries in, or download binaries to if needed.
:param dict path_by_id: Additional mapping from (sysname, id) -> (os, arch) for tool
directory naming
:param bool allow_external_binary_tool_downloads: If False, use --binaries-baseurls to download
all binaries, regardless of whether an
external_url_generator field is provided.
:param function uname_func: method to use to emulate os.uname() in testing
"""
self._baseurls = baseurls
self._binary_tool_fetcher = binary_tool_fetcher
self._path_by_id = SUPPORTED_PLATFORM_NORMALIZED_NAMES.copy()
if path_by_id:
self._path_by_id.update((tuple(k), tuple(v)) for k, v in path_by_id.items())
self._allow_external_binary_tool_downloads = allow_external_binary_tool_downloads
self._uname_func = uname_func or os.uname
_ID_BY_OS = {
"darwin": lambda release, machine: ("darwin", release.split(".")[0]),
"linux": lambda release, machine: ("linux", machine),
}
# TODO: we create a HostPlatform in this class instead of in the constructor because we don't want
# to fail until a binary is requested. The HostPlatform should be a parameter that gets lazily
# resolved by the v2 engine.
@memoized_method
def host_platform(self, uname=None):
uname_result = uname if uname else self._uname_func()
sysname, _, release, _, machine = uname_result
os_id_key = sysname.lower()
try:
os_id_fun = self._ID_BY_OS[os_id_key]
os_id_tuple = os_id_fun(release, machine)
except KeyError:
# TODO: test this!
raise self.MissingMachineInfo(
"Pants could not resolve binaries for the current host: platform '{}' was not recognized. "
"Recognized platforms are: [{}].".format(
os_id_key, ", ".join(sorted(self._ID_BY_OS.keys()))
)
)
try:
os_name, arch_or_version = self._path_by_id[os_id_tuple]
return HostPlatform(os_name, arch_or_version)
except KeyError:
# In the case of MacOS, arch_or_version represents a version, and newer releases
# can run binaries built for older releases.
# It's better to allow that as a fallback, than for Pants to be broken on each new version
# of MacOS until we get around to adding binaries for that new version, and modifying config
# appropriately.
# If some future version of MacOS cannot run binaries built for a previous
# release, then we're no worse off than we were before (except that the error will be
# less obvious), and we can fix it by pushing appropriate binaries and modifying
# SUPPORTED_PLATFORM_NORMALIZED_NAMES appropriately. This is only likely to happen with a
# major architecture change, so we'll have plenty of warning.
if os_id_tuple[0] == "darwin":
os_name, version = get_closest_mac_host_platform_pair(os_id_tuple[1])
if os_name is not None and version is not None:
return HostPlatform(os_name, version)
# We fail early here because we need the host_platform to identify where to download
# binaries to.
raise self.MissingMachineInfo(
"Pants could not resolve binaries for the current host. Update --binaries-path-by-id to "
"find binaries for the current host platform {}.\n"
"--binaries-path-by-id was: {}.".format(os_id_tuple, self._path_by_id)
)
def _get_download_path(self, binary_request):
return binary_request.get_download_path(self.host_platform())
def get_url_generator(self, binary_request):
external_url_generator = binary_request.external_url_generator
logger.debug(
"self._allow_external_binary_tool_downloads: {}".format(
self._allow_external_binary_tool_downloads
)
)
logger.debug("external_url_generator: {}".format(external_url_generator))
if external_url_generator and self._allow_external_binary_tool_downloads:
url_generator = external_url_generator
else:
if not self._baseurls:
raise self.NoBaseUrlsError("--binaries-baseurls is empty.")
url_generator = PantsHosted(binary_request=binary_request, baseurls=self._baseurls)
return url_generator
def _get_urls(self, url_generator, binary_request):
return url_generator.generate_urls(binary_request.version, self.host_platform())
def select(self, binary_request):
"""Fetches a file, unpacking it if necessary."""
logger.debug("binary_request: {!r}".format(binary_request))
try:
download_path = self._get_download_path(binary_request)
except self.MissingMachineInfo as e:
raise self.BinaryResolutionError(binary_request, e)
try:
url_generator = self.get_url_generator(binary_request)
except self.NoBaseUrlsError as e:
raise self.BinaryResolutionError(binary_request, e)
urls = self._get_urls(url_generator, binary_request)
if not isinstance(urls, list):
# TODO: add test for this error!
raise self.BinaryResolutionError(
binary_request, TypeError("urls must be a list: was '{}'.".format(urls))
)
fetch_request = BinaryFetchRequest(download_path=download_path, urls=tuple(urls))
logger.debug("fetch_request: {!r}".format(fetch_request))
try:
downloaded_file = self._binary_tool_fetcher.fetch_binary(fetch_request)
except BinaryToolFetcher.BinaryNotFound as e:
raise self.BinaryResolutionError(binary_request, e)
# NB: we mark the downloaded file executable if it is not an archive.
archiver = binary_request.archiver
if archiver is None:
chmod_plus_x(downloaded_file)
return downloaded_file
download_dir = os.path.dirname(downloaded_file)
# Use the 'name' given in the request as the directory name to extract to.
unpacked_dirname = os.path.join(download_dir, binary_request.name)
if not os.path.isdir(unpacked_dirname):
logger.info("Extracting {} to {} .".format(downloaded_file, unpacked_dirname))
archiver.extract(downloaded_file, unpacked_dirname, concurrency_safe=True)
return unpacked_dirname
def _make_deprecated_binary_request(self, supportdir, version, name):
return BinaryRequest(
supportdir=supportdir,
version=version,
name=name,
platform_dependent=True,
external_url_generator=None,
archiver=None,
)
def select_binary(self, supportdir, version, name):
binary_request = self._make_deprecated_binary_request(supportdir, version, name)
return self.select(binary_request)
def _make_deprecated_script_request(self, supportdir, version, name):
return BinaryRequest(
supportdir=supportdir,
version=version,
name=name,
platform_dependent=False,
external_url_generator=None,
archiver=None,
)
def select_script(self, supportdir, version, name):
binary_request = self._make_deprecated_script_request(supportdir, version, name)
return self.select(binary_request)
def _create_bootstrap_binary_arg_parser():
parser = argparse.ArgumentParser(
description="""\
Helper for download_binary.sh to use BinaryUtil to download the appropriate binaries.
Downloads the specified binary at the specified version if it's not already present.
Outputs an absolute path to the binary, whether fetched or already present, to stdout.
If the file ends in ".tar.gz", untars the file and outputs the directory to which the files were
untar'd. Otherwise, makes the file executable.
If a binary tool with the requested name, version, and filename does not exist, the
script will exit with an error and print a message to stderr.
See binary_util.py for more information.
"""
)
parser.add_argument(
"util_name", help="Subdirectory for the requested tool in the pants hosted binary schema."
)
parser.add_argument("version", help="Version of the requested binary tool to download.")
parser.add_argument(
"filename",
nargs="?",
default=None,
help="Filename to download. Defaults to the value provided for `util_name`.",
)
return parser
def select(argv):
# Parse positional arguments to the script.
args = _create_bootstrap_binary_arg_parser().parse_args(argv[1:])
# Resolve bootstrap options with a fake empty command line.
options_bootstrapper = OptionsBootstrapper.create(args=[argv[0]])
subsystems = (GlobalOptions, BinaryUtil.Factory)
known_scope_infos = reduce(set.union, (ss.known_scope_infos() for ss in subsystems), set())
options = options_bootstrapper.get_full_options(known_scope_infos)
# Initialize Subsystems.
Subsystem.set_options(options)
# If the filename provided ends in a known archive extension (such as ".tar.gz"), then we get the
# appropriate Archiver to pass to BinaryUtil.
archiver_for_current_binary = None
filename = args.filename or args.util_name
try:
archiver_for_current_binary = archiver_for_path(filename)
# BinaryRequest requires the `name` field to be provided without an extension, as it appends the
# archiver's extension if one is provided, so we have to remove it here.
filename = filename[: -(len(archiver_for_current_binary.extension) + 1)]
except ValueError:
pass
binary_util = BinaryUtil.Factory.create()
binary_request = BinaryRequest(
supportdir="bin/{}".format(args.util_name),
version=args.version,
name=filename,
platform_dependent=True,
external_url_generator=None,
archiver=archiver_for_current_binary,
)
return binary_util.select(binary_request)
if __name__ == "__main__":
print(select(sys.argv))
@rule
def provide_binary_util() -> BinaryUtil:
return BinaryUtil.Factory.create()
def rules():
return [
provide_binary_util,
]
| |
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import web_idl
from . import name_style
from .blink_v8_bridge import blink_class_name
from .blink_v8_bridge import blink_type_info
from .blink_v8_bridge import make_v8_to_blink_value
from .blink_v8_bridge import native_value_tag
from .blink_v8_bridge import v8_bridge_class_name
from .code_node import EmptyNode
from .code_node import FormatNode
from .code_node import ListNode
from .code_node import SequenceNode
from .code_node import SymbolDefinitionNode
from .code_node import SymbolNode
from .code_node import SymbolScopeNode
from .code_node import TextNode
from .code_node_cxx import CxxBlockNode
from .code_node_cxx import CxxClassDefNode
from .code_node_cxx import CxxFuncDeclNode
from .code_node_cxx import CxxFuncDefNode
from .code_node_cxx import CxxNamespaceNode
from .code_node_cxx import CxxSwitchNode
from .code_node_cxx import CxxUnlikelyIfNode
from .codegen_accumulator import CodeGenAccumulator
from .codegen_context import CodeGenContext
from .codegen_format import format_template as _format
from .codegen_utils import collect_forward_decls_and_include_headers
from .codegen_utils import component_export
from .codegen_utils import component_export_header
from .codegen_utils import enclose_with_header_guard
from .codegen_utils import make_copyright_header
from .codegen_utils import make_forward_declarations
from .codegen_utils import make_header_include_directives
from .codegen_utils import write_code_node_to_file
from .mako_renderer import MakoRenderer
from .package_initializer import package_initializer
from .path_manager import PathManager
from .task_queue import TaskQueue
class _UnionMember(object):
"""
_UnionMember represents the properties that the code generator directly
needs while web_idl.Union represents properties of IDL union independent
from ECMAScript binding. _UnionMember is specific to not only ECMAScript
binding but also Blink implementation of IDL union and its flattened member
types.
"""
def __init__(self, base_name):
assert isinstance(base_name, str)
self._base_name = base_name
self._is_null = False
# Do not apply |name_style| in order to respect the original name
# (Web spec'ed name) as much as possible.
self._content_type = "k{}".format(self._base_name)
self._api_pred = "Is{}".format(self._base_name)
self._api_get = "GetAs{}".format(self._base_name)
self._api_set = "Set"
self._var_name = name_style.member_var("member", self._base_name)
self._idl_type = None
self._type_info = None
self._typedef_aliases = ()
@property
def is_null(self):
return self._is_null
def content_type(self, with_enum_name=True):
if with_enum_name:
return "ContentType::{}".format(self._content_type)
else:
return self._content_type
@property
def api_pred(self):
return self._api_pred
@property
def api_get(self):
return self._api_get
@property
def api_set(self):
return self._api_set
@property
def var_name(self):
return self._var_name
@property
def idl_type(self):
return self._idl_type
@property
def type_info(self):
return self._type_info
@property
def typedef_aliases(self):
return self._typedef_aliases
class _UnionMemberImpl(_UnionMember):
"""
Represents a flattened member type of an union type or the special null
type, which represents that the union type includes a nullable type.
For example, either of (A? or B) or (A or B?) is represented as a list of
[_UnionMemberImpl(A), _UnionMemberImpl(B), _UnionMemberImpl(null)].
"""
def __init__(self, union, idl_type):
assert isinstance(union, web_idl.Union)
assert idl_type is None or isinstance(idl_type, web_idl.IdlType)
if idl_type is None:
base_name = "Null"
else:
base_name = idl_type.type_name_with_extended_attribute_key_values
_UnionMember.__init__(self, base_name=base_name)
self._is_null = idl_type is None
if not self._is_null:
self._idl_type = idl_type
self._type_info = blink_type_info(idl_type)
self._typedef_aliases = tuple([
_UnionMemberAlias(impl=self, typedef=typedef)
for typedef in union.typedef_members
if typedef.idl_type == idl_type
])
class _UnionMemberSubunion(_UnionMember):
"""
Represents a subset of flattened member types in an union type as
'subunion'.
For example, given an union type X = (A or B or C) with the following use
cases,
((A or B) or C)
(A or (B or C))
subunions of the union type X are represented as
[_UnionMemberSubunion(A or B), _UnionMemberSubunion(B or C)].
"""
def __init__(self, union, subunion):
assert isinstance(union, web_idl.Union)
assert isinstance(subunion, web_idl.Union)
_UnionMember.__init__(self, base_name=blink_class_name(subunion))
self._type_info = blink_type_info(subunion.idl_types[0])
self._typedef_aliases = tuple(
map(lambda typedef: _UnionMemberAlias(impl=self, typedef=typedef),
subunion.aliasing_typedefs))
self._blink_class_name = blink_class_name(subunion)
@property
def blink_class_name(self):
return self._blink_class_name
class _UnionMemberAlias(_UnionMember):
"""
Represents a typedef'ed aliases to a flattened member type or subunion of
an union type.
For example, given the following Web IDL fragments,
typedef (A or B) T1;
typedef B T2;
(T1 or C)
_UnionMemberAlias(T1) represents an alias to _UnionMemberSubunion(A or B)
and _UnionMemberAlias(T2) represents an alias to _UnionMemberImpl(B).
"""
def __init__(self, impl, typedef):
assert isinstance(impl, (_UnionMemberImpl, _UnionMemberSubunion))
assert isinstance(typedef, web_idl.Typedef)
_UnionMember.__init__(self, base_name=blink_class_name(typedef))
self._var_name = impl.var_name
self._type_info = impl.type_info
def create_union_members(union):
assert isinstance(union, web_idl.Union)
union_members = list(map(
lambda member_type: _UnionMemberImpl(union, member_type),
union.flattened_member_types))
if union.does_include_nullable_type:
union_members.append(_UnionMemberImpl(union, idl_type=None))
return tuple(union_members)
def make_check_assignment_value(cg_context, union_member, assignment_value):
assert isinstance(cg_context, CodeGenContext)
assert isinstance(union_member, _UnionMember)
assert isinstance(assignment_value, str)
if union_member.idl_type and union_member.idl_type.is_object:
return TextNode("DCHECK({}.IsObject());".format(assignment_value))
if union_member.type_info.is_gc_type:
return TextNode("DCHECK({});".format(assignment_value))
return None
def make_content_type_enum_class_def(cg_context):
assert isinstance(cg_context, CodeGenContext)
entries = []
for member in cg_context.union_members:
entries.append(member.content_type(with_enum_name=False))
for alias in member.typedef_aliases:
entries.append("{} = {}".format(
alias.content_type(with_enum_name=False),
member.content_type(with_enum_name=False)))
return ListNode([
TextNode("// The type of the content value of this IDL union."),
TextNode("enum class ContentType {"),
ListNode(map(TextNode, entries), separator=", "),
TextNode("};"),
])
def make_factory_methods(cg_context):
assert isinstance(cg_context, CodeGenContext)
S = SymbolNode
T = TextNode
F = FormatNode
func_decl = CxxFuncDeclNode(name="Create",
arg_decls=[
"v8::Isolate* isolate",
"v8::Local<v8::Value> v8_value",
"ExceptionState& exception_state",
],
return_type="${class_name}*",
static=True)
func_def = CxxFuncDefNode(name="Create",
arg_decls=[
"v8::Isolate* isolate",
"v8::Local<v8::Value> v8_value",
"ExceptionState& exception_state",
],
return_type="${class_name}*",
class_name="${class_name}")
func_def.set_base_template_vars(cg_context.template_bindings())
body = func_def.body
body.add_template_vars({
"isolate": "isolate",
"v8_value": "v8_value",
"exception_state": "exception_state",
})
# Create an instance from v8::Value based on the conversion algorithm.
#
# 3.2.24. Union types
# https://webidl.spec.whatwg.org/#es-union
union_members = cg_context.union_members
member = None # Will be a found member in union_members.
def find_by_member(test):
for member in union_members:
if test(member):
return member
return None
def find_by_type(test):
for member in union_members:
if member.idl_type and test(member.idl_type):
return member
return None
def dispatch_if(cond_text, value_symbol=None, target_node=body):
assert isinstance(cond_text, str) or cond_text is True
assert value_symbol is None or isinstance(value_symbol, SymbolNode)
assert isinstance(target_node, SequenceNode)
if member.type_info and member.type_info.is_move_effective:
text = ("return MakeGarbageCollected<${class_name}>"
"(std::move(${blink_value}));")
else:
text = ("return MakeGarbageCollected<${class_name}>"
"(${blink_value});")
scope_node = SymbolScopeNode([T(text)])
if not value_symbol:
value_symbol = make_v8_to_blink_value(
"blink_value",
"${v8_value}",
member.idl_type,
error_exit_return_statement="return nullptr;")
scope_node.register_code_symbol(value_symbol)
if cond_text is True:
target_node.append(CxxBlockNode(body=scope_node))
else:
target_node.append(
CxxUnlikelyIfNode(cond=cond_text, body=scope_node))
# 2. If the union type includes a nullable type and V is null or undefined,
# ...
member = find_by_member(lambda m: m.is_null)
if member:
dispatch_if("${v8_value}->IsNullOrUndefined()",
S("blink_value", "auto&& ${blink_value} = nullptr;"))
# 4. If V is null or undefined, then:
# 4.1. If types includes a dictionary type, ...
member = find_by_type(lambda t: t.is_dictionary)
if member:
if member.idl_type.type_definition_object.has_required_member:
dispatch_if("${v8_value}->IsNullOrUndefined()")
else:
dispatch_if(
"${v8_value}->IsNullOrUndefined()",
# Shortcut to reduce the binary size
S("blink_value", (_format(
"auto&& ${blink_value} = {}::Create(${isolate});",
blink_class_name(
member.idl_type.type_definition_object)))))
# 5. If V is a platform object, then:
# 5.1. If types includes an interface type that V implements, ...
interface_members = filter(
lambda member: member.idl_type and member.idl_type.is_interface,
union_members)
interface_members = sorted(
interface_members,
key=lambda member: (len(member.idl_type.type_definition_object.
inclusive_inherited_interfaces), member.
idl_type.type_definition_object.identifier),
reverse=True)
# Attempt to match from most derived to least derived.
for member in interface_members:
v8_bridge_name = v8_bridge_class_name(
member.idl_type.type_definition_object)
dispatch_if(
_format("{}::HasInstance(${isolate}, ${v8_value})",
v8_bridge_name),
# Shortcut to reduce the binary size
S("blink_value", (_format(
"auto&& ${blink_value} = "
"{}::ToWrappableUnsafe(${v8_value}.As<v8::Object>());",
v8_bridge_name))))
# 6. If Type(V) is Object and V has an [[ArrayBufferData]] internal slot,
# then:
# 6.1. If types includes ArrayBuffer, ...
member = find_by_type(lambda t: t.is_array_buffer)
if member:
dispatch_if("${v8_value}->IsArrayBuffer() || "
"${v8_value}->IsSharedArrayBuffer()")
# V8 specific optimization: ArrayBufferView
member = find_by_type(lambda t: t.is_array_buffer_view)
if member:
dispatch_if("${v8_value}->IsArrayBufferView()")
# 7. If Type(V) is Object and V has a [[DataView]] internal slot, then:
# 7.1. If types includes DataView, ...
member = find_by_type(lambda t: t.is_data_view)
if member:
dispatch_if("${v8_value}->IsDataView()")
# 8. If Type(V) is Object and V has a [[TypedArrayName]] internal slot,
# then:
# 8.1. If types includes a typed array type whose name is the value of V's
# [[TypedArrayName]] internal slot, ...
typed_array_types = ("Int8Array", "Int16Array", "Int32Array",
"BigInt64Array", "Uint8Array", "Uint16Array",
"Uint32Array", "BigUint64Array", "Uint8ClampedArray",
"Float32Array", "Float64Array")
for typed_array_type in typed_array_types:
member = find_by_type(lambda t: t.keyword_typename == typed_array_type)
if member:
dispatch_if(_format("${v8_value}->Is{}()", typed_array_type))
# 9. If IsCallable(V) is true, then:
# 9.1. If types includes a callback function type, ...
member = find_by_type(lambda t: t.is_callback_function)
if member:
dispatch_if(
"${v8_value}->IsFunction()",
# Shortcut to reduce the binary size
S("blink_value", (_format(
"auto&& ${blink_value} = "
"{}::Create(${v8_value}.As<v8::Function>());",
blink_class_name(member.idl_type.type_definition_object)))))
# 10. If Type(V) is Object, then:
# 10.1. If types includes a sequence type, ...
# 10.2. If types includes a frozen array type, ...
member = find_by_type(lambda t: t.is_sequence or t.is_frozen_array)
if member:
# TODO(crbug.com/715122): Excessive optimization
dispatch_if("${v8_value}->IsArray()")
# Create an IDL sequence from an iterable object.
scope_node = SymbolScopeNode()
body.append(
CxxUnlikelyIfNode(cond="${v8_value}->IsObject()", body=scope_node))
scope_node.extend([
T("ScriptIterator script_iterator = ScriptIterator::FromIterable("
"${isolate}, ${v8_value}.As<v8::Object>(), "
"${exception_state});"),
CxxUnlikelyIfNode(
cond="UNLIKELY(${exception_state}.HadException())",
body=T("return nullptr;")),
])
def blink_value_from_iterator(union_member):
def symbol_definition_constructor(symbol_node):
node = SymbolDefinitionNode(symbol_node)
node.extend([
F(("auto&& ${blink_value} = "
"bindings::CreateIDLSequenceFromIterator<{}>("
"${isolate}, std::move(script_iterator), "
"${exception_state});"),
native_value_tag(
union_member.idl_type.unwrap().element_type)),
CxxUnlikelyIfNode(
cond="UNLIKELY(${exception_state}.HadException())",
body=T("return nullptr;")),
])
return node
return symbol_definition_constructor
dispatch_if(
"!script_iterator.IsNull()",
S("blink_value",
definition_constructor=blink_value_from_iterator(member)),
target_node=scope_node)
# 10. If Type(V) is Object, then:
# 10.3. If types includes a dictionary type, ...
# 10.4. If types includes a record type, ...
member = find_by_type(lambda t: t.is_dictionary or t.is_record)
if member:
dispatch_if("${v8_value}->IsObject()")
# 10. If Type(V) is Object, then:
# 10.5. If types includes a callback interface type, ...
member = find_by_type(lambda t: t.is_callback_interface)
if member:
dispatch_if(
"${v8_value}->IsObject()",
# Shortcut to reduce the binary size
S("blink_value", (_format(
"auto&& ${blink_value} = "
"{}::Create(${v8_value}.As<v8::Object>();",
blink_class_name(member.idl_type.type_definition_object)))))
# 10. If Type(V) is Object, then:
# 10.6. If types includes object, ...
member = find_by_type(lambda t: t.is_object)
if member:
dispatch_if(
"${v8_value}->IsObject()",
# Shortcut to reduce the binary size
S("blink_value",
(_format("auto&& ${blink_value} = "
"ScriptValue(${isolate}, ${v8_value});"))))
# 11. If Type(V) is Boolean, then:
# 11.1. If types includes boolean, ...
member = find_by_type(lambda t: t.is_boolean)
if member:
dispatch_if(
"${v8_value}->IsBoolean()",
# Shortcut to reduce the binary size
S("blink_value", ("auto&& ${blink_value} = "
"${v8_value}.As<v8::Boolean>()->Value();")))
# 12. If Type(V) is Number, then:
# 12.1. If types includes a numeric type, ...
member = find_by_type(lambda t: t.is_numeric)
if member:
dispatch_if("${v8_value}->IsNumber()")
# 14. If types includes a string type, ...
# 16. If types includes a numeric type, ...
# 17. If types includes boolean, ...
member = (find_by_type(lambda t: t.is_enumeration or t.is_string)
or find_by_type(lambda t: t.is_numeric)
or find_by_type(lambda t: t.is_boolean))
if member:
dispatch_if(True)
else:
# 19. Throw a TypeError.
body.append(
T("ThrowTypeErrorNotOfType"
"(${exception_state}, UnionNameInIDL());"))
body.append(T("return nullptr;"))
return func_decl, func_def
def make_constructors(cg_context):
assert isinstance(cg_context, CodeGenContext)
decls = ListNode()
defs = ListNode()
for member in cg_context.union_members:
if member.is_null:
func_def = CxxFuncDefNode(name=cg_context.class_name,
arg_decls=["std::nullptr_t"],
return_type="",
explicit=True,
member_initializer_list=[
"content_type_({})".format(
member.content_type()),
])
decls.append(func_def)
elif member.type_info.is_move_effective:
func_decl = CxxFuncDeclNode(
name=cg_context.class_name,
arg_decls=["{} value".format(member.type_info.member_ref_t)],
return_type="",
explicit=True)
func_def = CxxFuncDefNode(
name=cg_context.class_name,
arg_decls=["{} value".format(member.type_info.member_ref_t)],
return_type="",
class_name=cg_context.class_name,
member_initializer_list=[
"content_type_({})".format(member.content_type()),
"{}(value)".format(member.var_name),
])
func_def.set_base_template_vars(cg_context.template_bindings())
func_def.body.append(
make_check_assignment_value(cg_context, member, "value"))
decls.append(func_decl)
defs.append(func_def)
defs.append(EmptyNode())
func_decl = CxxFuncDeclNode(
name=cg_context.class_name,
arg_decls=["{}&& value".format(member.type_info.value_t)],
return_type="",
explicit=True)
func_def = CxxFuncDefNode(
name=cg_context.class_name,
arg_decls=["{}&& value".format(member.type_info.value_t)],
return_type="",
class_name=cg_context.class_name,
member_initializer_list=[
"content_type_({})".format(member.content_type()),
"{}(std::move(value))".format(member.var_name),
])
func_def.set_base_template_vars(cg_context.template_bindings())
func_def.body.append(
make_check_assignment_value(cg_context, member, "value"))
decls.append(func_decl)
defs.append(func_def)
defs.append(EmptyNode())
else:
func_def = CxxFuncDefNode(
name=cg_context.class_name,
arg_decls=["{} value".format(member.type_info.member_ref_t)],
return_type="",
explicit=True,
member_initializer_list=[
"content_type_({})".format(member.content_type()),
"{}(value)".format(member.var_name),
])
func_def.body.append(
make_check_assignment_value(cg_context, member, "value"))
decls.append(func_def)
return decls, defs
def make_accessor_functions(cg_context):
assert isinstance(cg_context, CodeGenContext)
T = TextNode
F = FormatNode
decls = ListNode()
defs = ListNode()
func_def = CxxFuncDefNode(name="GetContentType",
arg_decls=[],
return_type="ContentType",
const=True)
func_def.set_base_template_vars(cg_context.template_bindings())
func_def.body.append(T("return content_type_;"))
decls.extend([
T("// Returns the type of the content value."),
func_def,
EmptyNode(),
])
def make_api_pred(member):
func_def = CxxFuncDefNode(name=member.api_pred,
arg_decls=[],
return_type="bool",
const=True)
func_def.set_base_template_vars(cg_context.template_bindings())
func_def.body.append(
F("return content_type_ == {};", member.content_type()))
return func_def, None
def make_api_get(member):
func_def = CxxFuncDefNode(name=member.api_get,
arg_decls=[],
return_type=member.type_info.member_ref_t,
const=True)
func_def.set_base_template_vars(cg_context.template_bindings())
func_def.body.extend([
F("DCHECK_EQ(content_type_, {});", member.content_type()),
F("return {};", member.var_name),
])
return func_def, None
def make_api_set(member):
func_def = CxxFuncDefNode(
name=member.api_set,
arg_decls=["{} value".format(member.type_info.member_ref_t)],
return_type="void")
func_def.set_base_template_vars(cg_context.template_bindings())
func_def.body.extend([
make_check_assignment_value(cg_context, member, "value"),
T("Clear();"),
F("{} = value;", member.var_name),
F("content_type_ = {};", member.content_type()),
])
return func_def, None
def make_api_set_copy_and_move(member):
copy_func_decl = CxxFuncDeclNode(
name=member.api_set,
arg_decls=["{} value".format(member.type_info.member_ref_t)],
return_type="void")
copy_func_def = CxxFuncDefNode(
name=member.api_set,
arg_decls=["{} value".format(member.type_info.member_ref_t)],
return_type="void",
class_name=cg_context.class_name)
copy_func_def.set_base_template_vars(cg_context.template_bindings())
copy_func_def.body.extend([
make_check_assignment_value(cg_context, member, "value"),
T("Clear();"),
F("{} = value;", member.var_name),
F("content_type_ = {};", member.content_type()),
])
move_func_decl = CxxFuncDeclNode(
name=member.api_set,
arg_decls=["{}&& value".format(member.type_info.value_t)],
return_type="void")
move_func_def = CxxFuncDefNode(
name=member.api_set,
arg_decls=["{}&& value".format(member.type_info.value_t)],
return_type="void",
class_name=cg_context.class_name)
move_func_def.set_base_template_vars(cg_context.template_bindings())
move_func_def.body.extend([
make_check_assignment_value(cg_context, member, "value"),
T("Clear();"),
F("{} = std::move(value);", member.var_name),
F("content_type_ = {};", member.content_type()),
])
decls = ListNode([copy_func_decl, move_func_decl])
defs = ListNode([copy_func_def, EmptyNode(), move_func_def])
return decls, defs
def make_api_set_null(member):
func_def = CxxFuncDefNode(name=member.api_set,
arg_decls=["std::nullptr_t"],
return_type="void")
func_def.set_base_template_vars(cg_context.template_bindings())
func_def.body.extend([
T("Clear();"),
F("content_type_ = {};", member.content_type()),
])
return func_def, None
def make_api_subunion_pred(subunion, subunion_members):
func_def = CxxFuncDefNode(name=subunion.api_pred,
arg_decls=[],
return_type="bool",
const=True)
func_def.set_base_template_vars(cg_context.template_bindings())
expr = " || ".join(
map(
lambda member: "content_type_ == {}".format(
member.content_type()), subunion_members))
func_def.body.append(F("return {};", expr))
return func_def, None
def make_api_subunion_get(subunion, subunion_members):
func_decl = CxxFuncDeclNode(name=subunion.api_get,
arg_decls=[],
return_type=subunion.type_info.value_t,
const=True)
func_def = CxxFuncDefNode(name=subunion.api_get,
arg_decls=[],
return_type=subunion.type_info.value_t,
const=True,
class_name=cg_context.class_name)
func_def.set_base_template_vars(cg_context.template_bindings())
node = CxxSwitchNode(cond="content_type_")
node.append(case=None,
body=[T("NOTREACHED();"),
T("return nullptr;")],
should_add_break=False)
for member in subunion_members:
node.append(case=member.content_type(),
body=F("return MakeGarbageCollected<{}>({}());",
subunion.blink_class_name, member.api_get),
should_add_break=False)
func_def.body.append(node)
return func_decl, func_def
def make_api_subunion_set(subunion, subunion_members):
func_decl = CxxFuncDeclNode(
name=subunion.api_set,
arg_decls=["{} value".format(subunion.type_info.const_ref_t)],
return_type="void")
func_def = CxxFuncDefNode(
name=subunion.api_set,
arg_decls=["{} value".format(subunion.type_info.const_ref_t)],
return_type="void",
class_name=cg_context.class_name)
func_def.set_base_template_vars(cg_context.template_bindings())
node = CxxSwitchNode(cond="value->GetContentType()")
for member in subunion_members:
node.append(case=F("{}::{}", subunion.blink_class_name,
member.content_type()),
body=F("Set(value->{}());", member.api_get))
func_def.body.append(node)
return func_decl, func_def
def make_api_subunion_alias_pred(subunion, alias):
func_def = CxxFuncDefNode(name=alias.api_pred,
arg_decls=[],
return_type="bool",
const=True)
func_def.set_base_template_vars(cg_context.template_bindings())
func_def.body.append(F("return {}();", subunion.api_pred))
return func_def, None
def make_api_subunion_alias_get(subunion, alias):
func_def = CxxFuncDefNode(name=alias.api_get,
arg_decls=[],
return_type=alias.type_info.value_t,
const=True)
func_def.set_base_template_vars(cg_context.template_bindings())
func_def.body.append(F("return {}();", subunion.api_get))
return func_def, None
def add(func_decl, func_def):
decls.append(func_decl)
defs.append(func_def)
defs.append(EmptyNode())
# Accessors to member types of the union type
for member in cg_context.union_members:
if member.is_null:
add(*make_api_pred(member))
add(*make_api_set_null(member))
else:
add(*make_api_pred(member))
add(*make_api_get(member))
if member.type_info.is_move_effective:
add(*make_api_set_copy_and_move(member))
else:
add(*make_api_set(member))
for alias in member.typedef_aliases:
add(*make_api_pred(alias))
add(*make_api_get(alias))
decls.append(EmptyNode())
# Accessors to subunions in the union type
for subunion in cg_context.union.union_members:
subunion_members = create_union_members(subunion)
subunion = _UnionMemberSubunion(cg_context.union, subunion)
add(*make_api_subunion_pred(subunion, subunion_members))
add(*make_api_subunion_get(subunion, subunion_members))
add(*make_api_subunion_set(subunion, subunion_members))
for alias in subunion.typedef_aliases:
add(*make_api_subunion_alias_pred(subunion, alias))
add(*make_api_subunion_alias_get(subunion, alias))
decls.append(EmptyNode())
return decls, defs
def make_tov8value_function(cg_context):
assert isinstance(cg_context, CodeGenContext)
func_decl = CxxFuncDeclNode(name="ToV8Value",
arg_decls=["ScriptState* script_state"],
return_type="v8::MaybeLocal<v8::Value>",
const=True,
override=True)
func_def = CxxFuncDefNode(name="ToV8Value",
arg_decls=["ScriptState* script_state"],
return_type="v8::MaybeLocal<v8::Value>",
class_name=cg_context.class_name,
const=True)
func_def.set_base_template_vars(cg_context.template_bindings())
body = func_def.body
body.add_template_vars({"script_state": "script_state"})
branches = CxxSwitchNode(cond="content_type_")
for member in cg_context.union_members:
if member.is_null:
text = "return v8::Null(${script_state}->GetIsolate());"
else:
text = _format("return ToV8Traits<{}>::ToV8(${script_state}, {});",
native_value_tag(member.idl_type), member.var_name)
branches.append(case=member.content_type(),
body=TextNode(text),
should_add_break=False)
body.extend([
branches,
EmptyNode(),
TextNode("NOTREACHED();"),
TextNode("return v8::MaybeLocal<v8::Value>();"),
])
return func_decl, func_def
def make_trace_function(cg_context):
assert isinstance(cg_context, CodeGenContext)
func_decl = CxxFuncDeclNode(name="Trace",
arg_decls=["Visitor* visitor"],
return_type="void",
const=True,
override=True)
func_def = CxxFuncDefNode(name="Trace",
arg_decls=["Visitor* visitor"],
return_type="void",
class_name=cg_context.class_name,
const=True)
func_def.set_base_template_vars(cg_context.template_bindings())
body = func_def.body
for member in cg_context.union_members:
if member.is_null:
continue
body.append(
TextNode("TraceIfNeeded<{}>::Trace(visitor, {});".format(
member.type_info.member_t, member.var_name)))
body.append(TextNode("${base_class_name}::Trace(visitor);"))
return func_decl, func_def
def make_clear_function(cg_context):
assert isinstance(cg_context, CodeGenContext)
func_decl = CxxFuncDeclNode(name="Clear", arg_decls=[], return_type="void")
func_def = CxxFuncDefNode(name="Clear",
arg_decls=[],
return_type="void",
class_name=cg_context.class_name)
func_def.set_base_template_vars(cg_context.template_bindings())
body = func_def.body
for member in cg_context.union_members:
if member.is_null:
continue
clear_expr = member.type_info.clear_member_var_expr(member.var_name)
if clear_expr:
body.append(TextNode("{};".format(clear_expr)))
return func_decl, func_def
def make_name_function(cg_context):
assert isinstance(cg_context, CodeGenContext)
func_def = CxxFuncDefNode(name="UnionNameInIDL",
arg_decls=[],
return_type="const char*",
static=True,
constexpr=True)
func_def.set_base_template_vars(cg_context.template_bindings())
body = func_def.body
member_type_names = sorted(
map(lambda idl_type: idl_type.syntactic_form,
cg_context.union.flattened_member_types))
body.append(
TextNode("return \"({}){}\";".format(
" or ".join(member_type_names),
"?" if cg_context.union.does_include_nullable_type else "")))
return func_def, None
def make_member_vars_def(cg_context):
assert isinstance(cg_context, CodeGenContext)
member_vars_def = ListNode()
member_vars_def.extend([
TextNode("ContentType content_type_;"),
EmptyNode(),
])
for member in cg_context.union_members:
if member.is_null:
continue
if member.idl_type.is_enumeration:
# Since the IDL enumeration class is not default constructible,
# construct the IDL enumeration with 0th enum value. Note that
# this is necessary only for compilation, and the value must never
# be used due to the guard by `content_type_`.
pattern = "{} {}{{static_cast<{}::Enum>(0)}};"
else:
pattern = "{} {};"
node = FormatNode(pattern, member.type_info.member_t, member.var_name,
member.type_info.value_t)
member_vars_def.append(node)
return member_vars_def
def generate_union(union_identifier):
assert isinstance(union_identifier, web_idl.Identifier)
web_idl_database = package_initializer().web_idl_database()
union = web_idl_database.find(union_identifier)
path_manager = PathManager(union)
assert path_manager.api_component == path_manager.impl_component
api_component = path_manager.api_component
for_testing = union.code_generator_info.for_testing
# Class names
class_name = blink_class_name(union)
cg_context = CodeGenContext(union=union,
union_members=create_union_members(union),
class_name=class_name,
base_class_name="bindings::UnionBase")
# Filepaths
header_path = path_manager.api_path(ext="h")
source_path = path_manager.api_path(ext="cc")
# Root nodes
header_node = ListNode(tail="\n")
header_node.set_accumulator(CodeGenAccumulator())
header_node.set_renderer(MakoRenderer())
source_node = ListNode(tail="\n")
source_node.set_accumulator(CodeGenAccumulator())
source_node.set_renderer(MakoRenderer())
# Namespaces
header_blink_ns = CxxNamespaceNode(name_style.namespace("blink"))
source_blink_ns = CxxNamespaceNode(name_style.namespace("blink"))
# Class definition
class_def = CxxClassDefNode(cg_context.class_name,
base_class_names=["bindings::UnionBase"],
final=True,
export=component_export(
api_component, for_testing))
class_def.set_base_template_vars(cg_context.template_bindings())
# Implementation parts
content_type_enum_class_def = make_content_type_enum_class_def(cg_context)
factory_decls, factory_defs = make_factory_methods(cg_context)
ctor_decls, ctor_defs = make_constructors(cg_context)
accessor_decls, accessor_defs = make_accessor_functions(cg_context)
tov8value_func_decls, tov8value_func_defs = make_tov8value_function(
cg_context)
trace_func_decls, trace_func_defs = make_trace_function(cg_context)
clear_func_decls, clear_func_defs = make_clear_function(cg_context)
name_func_decls, name_func_defs = make_name_function(cg_context)
member_vars_def = make_member_vars_def(cg_context)
# Header part (copyright, include directives, and forward declarations)
header_node.extend([
make_copyright_header(),
EmptyNode(),
enclose_with_header_guard(
ListNode([
make_header_include_directives(header_node.accumulator),
EmptyNode(),
header_blink_ns,
]), name_style.header_guard(header_path)),
])
header_blink_ns.body.extend([
make_forward_declarations(header_node.accumulator),
EmptyNode(),
])
source_node.extend([
make_copyright_header(),
EmptyNode(),
TextNode("#include \"{}\"".format(header_path)),
EmptyNode(),
make_header_include_directives(source_node.accumulator),
EmptyNode(),
source_blink_ns,
])
source_blink_ns.body.extend([
make_forward_declarations(source_node.accumulator),
EmptyNode(),
])
# Assemble the parts.
header_node.accumulator.add_class_decls([
"ExceptionState",
])
header_node.accumulator.add_include_headers([
component_export_header(api_component, for_testing),
"third_party/blink/renderer/platform/bindings/union_base.h",
])
source_node.accumulator.add_include_headers([
"third_party/blink/renderer/bindings/core/v8/generated_code_helper.h",
"third_party/blink/renderer/bindings/core/v8/native_value_traits_impl.h",
"third_party/blink/renderer/bindings/core/v8/to_v8_traits.h",
"third_party/blink/renderer/platform/bindings/exception_state.h",
])
header_node.accumulator.add_class_decls(
map(blink_class_name, union.union_members))
source_node.accumulator.add_include_headers(
map(lambda subunion: PathManager(subunion).api_path(ext="h"),
union.union_members))
source_node.accumulator.add_include_headers([
PathManager(idl_type.type_definition_object).api_path(ext="h")
for idl_type in union.flattened_member_types if idl_type.is_interface
])
(header_forward_decls, header_include_headers, source_forward_decls,
source_include_headers) = collect_forward_decls_and_include_headers(
union.flattened_member_types)
header_node.accumulator.add_class_decls(header_forward_decls)
header_node.accumulator.add_include_headers(header_include_headers)
source_node.accumulator.add_class_decls(source_forward_decls)
source_node.accumulator.add_include_headers(source_include_headers)
header_blink_ns.body.append(class_def)
header_blink_ns.body.append(EmptyNode())
class_def.public_section.append(content_type_enum_class_def)
class_def.public_section.append(EmptyNode())
class_def.public_section.append(factory_decls)
class_def.public_section.append(EmptyNode())
source_blink_ns.body.append(factory_defs)
source_blink_ns.body.append(EmptyNode())
class_def.public_section.append(ctor_decls)
class_def.public_section.append(EmptyNode())
source_blink_ns.body.append(ctor_defs)
source_blink_ns.body.append(EmptyNode())
class_def.public_section.append(accessor_decls)
class_def.public_section.append(EmptyNode())
source_blink_ns.body.append(accessor_defs)
source_blink_ns.body.append(EmptyNode())
class_def.public_section.append(tov8value_func_decls)
class_def.public_section.append(EmptyNode())
source_blink_ns.body.append(tov8value_func_defs)
source_blink_ns.body.append(EmptyNode())
class_def.public_section.append(trace_func_decls)
class_def.public_section.append(EmptyNode())
source_blink_ns.body.append(trace_func_defs)
source_blink_ns.body.append(EmptyNode())
class_def.private_section.append(clear_func_decls)
class_def.private_section.append(EmptyNode())
source_blink_ns.body.append(clear_func_defs)
source_blink_ns.body.append(EmptyNode())
class_def.private_section.append(name_func_decls)
class_def.private_section.append(EmptyNode())
source_blink_ns.body.append(name_func_defs)
source_blink_ns.body.append(EmptyNode())
class_def.private_section.append(member_vars_def)
class_def.private_section.append(EmptyNode())
# Write down to the files.
write_code_node_to_file(header_node, path_manager.gen_path_to(header_path))
write_code_node_to_file(source_node, path_manager.gen_path_to(source_path))
def generate_unions(task_queue):
assert isinstance(task_queue, TaskQueue)
web_idl_database = package_initializer().web_idl_database()
for union in web_idl_database.union_types:
task_queue.post_task(generate_union, union.identifier)
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import urllib
import urlparse
from tempest import config
from tempest.services.object_storage import base
CONF = config.CONF
class ObjectClient(base.ObjectStorageClient):
def create_object(self, container, object_name, data,
params=None, metadata=None, headers=None):
"""Create storage object."""
if headers is None:
headers = self.get_headers()
if not data:
headers['content-length'] = '0'
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
url = "%s/%s" % (str(container), str(object_name))
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.put(url, data, headers)
self.expected_success(201, resp.status)
return resp, body
def update_object(self, container, object_name, data):
"""Upload data to replace current storage object."""
resp, body = self.create_object(container, object_name, data)
self.expected_success(201, resp.status)
return resp, body
def delete_object(self, container, object_name, params=None):
"""Delete storage object."""
url = "%s/%s" % (str(container), str(object_name))
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.delete(url, headers={})
self.expected_success([200, 204], resp.status)
return resp, body
def update_object_metadata(self, container, object_name, metadata,
metadata_prefix='X-Object-Meta-'):
"""Add, remove, or change X-Object-Meta metadata for storage object."""
headers = {}
for key in metadata:
headers["%s%s" % (str(metadata_prefix), str(key))] = metadata[key]
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.post(url, None, headers=headers)
self.expected_success(202, resp.status)
return resp, body
def list_object_metadata(self, container, object_name):
"""List all storage object X-Object-Meta- metadata."""
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.head(url)
self.expected_success(200, resp.status)
return resp, body
def get_object(self, container, object_name, metadata=None):
"""Retrieve object's data."""
headers = {}
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
url = "{0}/{1}".format(container, object_name)
resp, body = self.get(url, headers=headers)
self.expected_success([200, 206], resp.status)
return resp, body
def copy_object_in_same_container(self, container, src_object_name,
dest_object_name, metadata=None):
"""Copy storage object's data to the new object using PUT."""
url = "{0}/{1}".format(container, dest_object_name)
headers = {}
headers['X-Copy-From'] = "%s/%s" % (str(container),
str(src_object_name))
headers['content-length'] = '0'
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
resp, body = self.put(url, None, headers=headers)
self.expected_success(201, resp.status)
return resp, body
def copy_object_across_containers(self, src_container, src_object_name,
dst_container, dst_object_name,
metadata=None):
"""Copy storage object's data to the new object using PUT."""
url = "{0}/{1}".format(dst_container, dst_object_name)
headers = {}
headers['X-Copy-From'] = "%s/%s" % (str(src_container),
str(src_object_name))
headers['content-length'] = '0'
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
resp, body = self.put(url, None, headers=headers)
self.expected_success(201, resp.status)
return resp, body
def copy_object_2d_way(self, container, src_object_name, dest_object_name,
metadata=None):
"""Copy storage object's data to the new object using COPY."""
url = "{0}/{1}".format(container, src_object_name)
headers = {}
headers['Destination'] = "%s/%s" % (str(container),
str(dest_object_name))
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
resp, body = self.copy(url, headers=headers)
self.expected_success(201, resp.status)
return resp, body
def create_object_segments(self, container, object_name, segment, data):
"""Creates object segments."""
url = "{0}/{1}/{2}".format(container, object_name, segment)
resp, body = self.put(url, data)
self.expected_success(201, resp.status)
return resp, body
def put_object_with_chunk(self, container, name, contents, chunk_size):
"""
Put an object with Transfer-Encoding header
"""
if self.base_url is None:
self._set_auth()
headers = {'Transfer-Encoding': 'chunked'}
if self.token:
headers['X-Auth-Token'] = self.token
conn = put_object_connection(self.base_url, container, name, contents,
chunk_size, headers)
resp = conn.getresponse()
body = resp.read()
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
self._error_checker('PUT', None, headers, contents, resp, body)
self.expected_success(201, resp.status)
return resp.status, resp.reason, resp_headers
def create_object_continue(self, container, object_name,
data, metadata=None):
"""Create storage object."""
headers = {}
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
if not data:
headers['content-length'] = '0'
if self.base_url is None:
self._set_auth()
headers['X-Auth-Token'] = self.token
conn = put_object_connection(self.base_url, str(container),
str(object_name), data, None, headers)
response = conn.response_class(conn.sock,
strict=conn.strict,
method=conn._method)
version, status, reason = response._read_status()
resp = {'version': version,
'status': str(status),
'reason': reason}
return resp
def put_object_connection(base_url, container, name, contents=None,
chunk_size=65536, headers=None, query_string=None):
"""
Helper function to make connection to put object with httplib
:param base_url: base_url of an object client
:param container: container name that the object is in
:param name: object name to put
:param contents: a string or a file like object to read object data
from; if None, a zero-byte put will be done
:param chunk_size: chunk size of data to write; it defaults to 65536;
used only if the the contents object has a 'read'
method, eg. file-like objects, ignored otherwise
:param headers: additional headers to include in the request, if any
:param query_string: if set will be appended with '?' to generated path
"""
parsed = urlparse.urlparse(base_url)
if parsed.scheme == 'https':
conn = httplib.HTTPSConnection(parsed.netloc)
else:
conn = httplib.HTTPConnection(parsed.netloc)
path = str(parsed.path) + "/"
path += "%s/%s" % (str(container), str(name))
if query_string:
path += '?' + query_string
if headers:
headers = dict(headers)
else:
headers = {}
if hasattr(contents, 'read'):
conn.putrequest('PUT', path)
for header, value in headers.iteritems():
conn.putheader(header, value)
if 'Content-Length' not in headers:
if 'Transfer-Encoding' not in headers:
conn.putheader('Transfer-Encoding', 'chunked')
conn.endheaders()
chunk = contents.read(chunk_size)
while chunk:
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
chunk = contents.read(chunk_size)
conn.send('0\r\n\r\n')
else:
conn.endheaders()
left = headers['Content-Length']
while left > 0:
size = chunk_size
if size > left:
size = left
chunk = contents.read(size)
conn.send(chunk)
left -= len(chunk)
else:
conn.request('PUT', path, contents, headers)
return conn
| |
"""The HTTP api to control the cloud integration."""
import asyncio
from functools import wraps
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.data_validator import (
RequestDataValidator)
from homeassistant.components import websocket_api
from homeassistant.components.alexa import smart_home as alexa_sh
from homeassistant.components.google_assistant import smart_home as google_sh
from . import auth_api
from .const import (
DOMAIN, REQUEST_TIMEOUT, PREF_ENABLE_ALEXA, PREF_ENABLE_GOOGLE,
PREF_GOOGLE_ALLOW_UNLOCK)
from .iot import STATE_DISCONNECTED, STATE_CONNECTED
_LOGGER = logging.getLogger(__name__)
WS_TYPE_STATUS = 'cloud/status'
SCHEMA_WS_STATUS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_STATUS,
})
WS_TYPE_UPDATE_PREFS = 'cloud/update_prefs'
SCHEMA_WS_UPDATE_PREFS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_UPDATE_PREFS,
vol.Optional(PREF_ENABLE_GOOGLE): bool,
vol.Optional(PREF_ENABLE_ALEXA): bool,
vol.Optional(PREF_GOOGLE_ALLOW_UNLOCK): bool,
})
WS_TYPE_SUBSCRIPTION = 'cloud/subscription'
SCHEMA_WS_SUBSCRIPTION = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_SUBSCRIPTION,
})
WS_TYPE_HOOK_CREATE = 'cloud/cloudhook/create'
SCHEMA_WS_HOOK_CREATE = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_HOOK_CREATE,
vol.Required('webhook_id'): str
})
WS_TYPE_HOOK_DELETE = 'cloud/cloudhook/delete'
SCHEMA_WS_HOOK_DELETE = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_HOOK_DELETE,
vol.Required('webhook_id'): str
})
async def async_setup(hass):
"""Initialize the HTTP API."""
hass.components.websocket_api.async_register_command(
WS_TYPE_STATUS, websocket_cloud_status,
SCHEMA_WS_STATUS
)
hass.components.websocket_api.async_register_command(
WS_TYPE_SUBSCRIPTION, websocket_subscription,
SCHEMA_WS_SUBSCRIPTION
)
hass.components.websocket_api.async_register_command(
WS_TYPE_UPDATE_PREFS, websocket_update_prefs,
SCHEMA_WS_UPDATE_PREFS
)
hass.components.websocket_api.async_register_command(
WS_TYPE_HOOK_CREATE, websocket_hook_create,
SCHEMA_WS_HOOK_CREATE
)
hass.components.websocket_api.async_register_command(
WS_TYPE_HOOK_DELETE, websocket_hook_delete,
SCHEMA_WS_HOOK_DELETE
)
hass.http.register_view(GoogleActionsSyncView)
hass.http.register_view(CloudLoginView)
hass.http.register_view(CloudLogoutView)
hass.http.register_view(CloudRegisterView)
hass.http.register_view(CloudResendConfirmView)
hass.http.register_view(CloudForgotPasswordView)
_CLOUD_ERRORS = {
auth_api.UserNotFound: (400, "User does not exist."),
auth_api.UserNotConfirmed: (400, 'Email not confirmed.'),
auth_api.Unauthenticated: (401, 'Authentication failed.'),
auth_api.PasswordChangeRequired: (400, 'Password change required.'),
asyncio.TimeoutError: (502, 'Unable to reach the Home Assistant cloud.')
}
def _handle_cloud_errors(handler):
"""Webview decorator to handle auth errors."""
@wraps(handler)
async def error_handler(view, request, *args, **kwargs):
"""Handle exceptions that raise from the wrapped request handler."""
try:
result = await handler(view, request, *args, **kwargs)
return result
except (auth_api.CloudError, asyncio.TimeoutError) as err:
err_info = _CLOUD_ERRORS.get(err.__class__)
if err_info is None:
err_info = (502, 'Unexpected error: {}'.format(err))
status, msg = err_info
return view.json_message(msg, status_code=status,
message_code=err.__class__.__name__)
return error_handler
class GoogleActionsSyncView(HomeAssistantView):
"""Trigger a Google Actions Smart Home Sync."""
url = '/api/cloud/google_actions/sync'
name = 'api:cloud:google_actions/sync'
@_handle_cloud_errors
async def post(self, request):
"""Trigger a Google Actions sync."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
websession = hass.helpers.aiohttp_client.async_get_clientsession()
with async_timeout.timeout(REQUEST_TIMEOUT, loop=hass.loop):
await hass.async_add_job(auth_api.check_token, cloud)
with async_timeout.timeout(REQUEST_TIMEOUT, loop=hass.loop):
req = await websession.post(
cloud.google_actions_sync_url, headers={
'authorization': cloud.id_token
})
return self.json({}, status_code=req.status)
class CloudLoginView(HomeAssistantView):
"""Login to Home Assistant cloud."""
url = '/api/cloud/login'
name = 'api:cloud:login'
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({
vol.Required('email'): str,
vol.Required('password'): str,
}))
async def post(self, request, data):
"""Handle login request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT, loop=hass.loop):
await hass.async_add_job(auth_api.login, cloud, data['email'],
data['password'])
hass.async_add_job(cloud.iot.connect)
return self.json({'success': True})
class CloudLogoutView(HomeAssistantView):
"""Log out of the Home Assistant cloud."""
url = '/api/cloud/logout'
name = 'api:cloud:logout'
@_handle_cloud_errors
async def post(self, request):
"""Handle logout request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT, loop=hass.loop):
await cloud.logout()
return self.json_message('ok')
class CloudRegisterView(HomeAssistantView):
"""Register on the Home Assistant cloud."""
url = '/api/cloud/register'
name = 'api:cloud:register'
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({
vol.Required('email'): str,
vol.Required('password'): vol.All(str, vol.Length(min=6)),
}))
async def post(self, request, data):
"""Handle registration request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT, loop=hass.loop):
await hass.async_add_job(
auth_api.register, cloud, data['email'], data['password'])
return self.json_message('ok')
class CloudResendConfirmView(HomeAssistantView):
"""Resend email confirmation code."""
url = '/api/cloud/resend_confirm'
name = 'api:cloud:resend_confirm'
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({
vol.Required('email'): str,
}))
async def post(self, request, data):
"""Handle resending confirm email code request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT, loop=hass.loop):
await hass.async_add_job(
auth_api.resend_email_confirm, cloud, data['email'])
return self.json_message('ok')
class CloudForgotPasswordView(HomeAssistantView):
"""View to start Forgot Password flow.."""
url = '/api/cloud/forgot_password'
name = 'api:cloud:forgot_password'
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({
vol.Required('email'): str,
}))
async def post(self, request, data):
"""Handle forgot password request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT, loop=hass.loop):
await hass.async_add_job(
auth_api.forgot_password, cloud, data['email'])
return self.json_message('ok')
@callback
def websocket_cloud_status(hass, connection, msg):
"""Handle request for account info.
Async friendly.
"""
cloud = hass.data[DOMAIN]
connection.send_message(
websocket_api.result_message(msg['id'], _account_data(cloud)))
def _require_cloud_login(handler):
"""Websocket decorator that requires cloud to be logged in."""
@wraps(handler)
def with_cloud_auth(hass, connection, msg):
"""Require to be logged into the cloud."""
cloud = hass.data[DOMAIN]
if not cloud.is_logged_in:
connection.send_message(websocket_api.error_message(
msg['id'], 'not_logged_in',
'You need to be logged in to the cloud.'))
return
handler(hass, connection, msg)
return with_cloud_auth
def _handle_aiohttp_errors(handler):
"""Websocket decorator that handlers aiohttp errors.
Can only wrap async handlers.
"""
@wraps(handler)
async def with_error_handling(hass, connection, msg):
"""Handle aiohttp errors."""
try:
await handler(hass, connection, msg)
except asyncio.TimeoutError:
connection.send_message(websocket_api.error_message(
msg['id'], 'timeout', 'Command timed out.'))
except aiohttp.ClientError:
connection.send_message(websocket_api.error_message(
msg['id'], 'unknown', 'Error making request.'))
return with_error_handling
@_require_cloud_login
@websocket_api.async_response
async def websocket_subscription(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT, loop=hass.loop):
response = await cloud.fetch_subscription_info()
if response.status != 200:
connection.send_message(websocket_api.error_message(
msg['id'], 'request_failed', 'Failed to request subscription'))
data = await response.json()
# Check if a user is subscribed but local info is outdated
# In that case, let's refresh and reconnect
if data.get('provider') and cloud.iot.state != STATE_CONNECTED:
_LOGGER.debug(
"Found disconnected account with valid subscriotion, connecting")
await hass.async_add_executor_job(
auth_api.renew_access_token, cloud)
# Cancel reconnect in progress
if cloud.iot.state != STATE_DISCONNECTED:
await cloud.iot.disconnect()
hass.async_create_task(cloud.iot.connect())
connection.send_message(websocket_api.result_message(msg['id'], data))
@_require_cloud_login
@websocket_api.async_response
async def websocket_update_prefs(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
changes = dict(msg)
changes.pop('id')
changes.pop('type')
await cloud.prefs.async_update(**changes)
connection.send_message(websocket_api.result_message(msg['id']))
@_require_cloud_login
@websocket_api.async_response
@_handle_aiohttp_errors
async def websocket_hook_create(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
hook = await cloud.cloudhooks.async_create(msg['webhook_id'])
connection.send_message(websocket_api.result_message(msg['id'], hook))
@_require_cloud_login
@websocket_api.async_response
async def websocket_hook_delete(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
await cloud.cloudhooks.async_delete(msg['webhook_id'])
connection.send_message(websocket_api.result_message(msg['id']))
def _account_data(cloud):
"""Generate the auth data JSON response."""
if not cloud.is_logged_in:
return {
'logged_in': False,
'cloud': STATE_DISCONNECTED,
}
claims = cloud.claims
return {
'logged_in': True,
'email': claims['email'],
'cloud': cloud.iot.state,
'prefs': cloud.prefs.as_dict(),
'google_entities': cloud.google_actions_user_conf['filter'].config,
'google_domains': list(google_sh.DOMAIN_TO_GOOGLE_TYPES),
'alexa_entities': cloud.alexa_config.should_expose.config,
'alexa_domains': list(alexa_sh.ENTITY_ADAPTERS),
}
| |
# Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import netaddr
import uuid
import shutil
from . import base
from . import actorbase
from . import table
from . import st
from .utils import utc_millisec
from .utils import RESERVED_ATTRIBUTES
LOG = logging.getLogger(__name__)
WL_LEVEL = st.MAX_LEVEL
class MWUpdate(object):
def __init__(self, start, end, uuids):
self.start = start
self.end = end
self.uuids = set(uuids)
s = netaddr.IPAddress(start)
e = netaddr.IPAddress(end)
self._indicator = '%s-%s' % (s, e)
def indicator(self):
return self._indicator
def __repr__(self):
return 'MWUpdate('+self._indicator+', %r)' % self.uuids
def __hash__(self):
return hash(self._indicator)
def __eq__(self, other):
return self.start == other.start and \
self.end == other.end
class AggregateIPv4FT(actorbase.ActorBaseFT):
def __init__(self, name, chassis, config):
self.active_requests = []
super(AggregateIPv4FT, self).__init__(name, chassis, config)
def configure(self):
super(AggregateIPv4FT, self).configure()
self.whitelist_prefixes = self.config.get('whitelist_prefixes', [])
self.enable_list_merge = self.config.get('enable_list_merge', False)
def _initialize_tables(self, truncate=False):
self.table = table.Table(
self.name,
bloom_filter_bits=10,
truncate=truncate
)
self.table.create_index('_id')
self.st = st.ST(self.name+'_st', 32, truncate=truncate)
def initialize(self):
self._initialize_tables()
def rebuild(self):
self._initialize_tables(truncate=True)
def reset(self):
self._initialize_tables(truncate=True)
def _indicator_key(self, indicator, source):
return indicator+'\x00'+source
def _calc_indicator_value(self, uuids, additional_uuid=None, additional_value=None):
mv = {'sources': []}
for uuid_ in uuids:
if uuid_ == additional_uuid:
v = additional_value
else:
# uuid_ = str(uuid.UUID(bytes=uuid_))
k, v = next(
self.table.query('_id', from_key=uuid_, to_key=uuid_,
include_value=True),
(None, None)
)
if k is None:
LOG.error("Unable to find key associated with uuid: %s", uuid_)
for vk in v:
if vk in mv and vk in RESERVED_ATTRIBUTES:
mv[vk] = RESERVED_ATTRIBUTES[vk](mv[vk], v[vk])
else:
if self.enable_list_merge and vk in mv and isinstance(mv[vk], list):
if not isinstance(v[vk], list):
mv[vk] = v[vk]
else:
mv[vk].extend(v[vk])
else:
mv[vk] = v[vk]
return mv
def _merge_values(self, origin, ov, nv):
result = {'sources': []}
result['_added'] = ov['_added']
result['_id'] = ov['_id']
for k in nv.keys():
result[k] = nv[k]
return result
def _add_indicator(self, origin, indicator, value):
added = False
now = utc_millisec()
ik = self._indicator_key(indicator, origin)
v = self.table.get(ik)
if v is None:
v = {
'_id': str(uuid.uuid4()),
'_added': now
}
added = True
self.statistics['added'] += 1
v = self._merge_values(origin, v, value)
v['_updated'] = now
self.table.put(ik, v)
return v, added
def _calc_ipranges(self, start, end):
"""Calc IP Ranges overlapping the range between start and end
Args:
start (int): start of the range
end (int): end of the range
Returns:
set: set of ranges
"""
result = set()
# collect the endpoint between start and end
eps = set()
for epaddr, _, _, _ in self.st.query_endpoints(start=start, stop=end):
eps.add(epaddr)
eps = sorted(eps)
if len(eps) == 0:
return result
# walk thru the endpoints, tracking last endpoint
# current level, active segments and segments levels
oep = None
oeplevel = -1
live_ids = set()
slevels = {}
for epaddr in eps:
# for each endpoint we track which segments are starting
# and which ones are ending with that specific endpoint
end_ids = set()
start_ids = set()
eplevel = 0
for cuuid, clevel, cstart, cend in self.st.cover(epaddr):
slevels[cuuid] = clevel
if clevel > eplevel:
eplevel = clevel
if cstart == epaddr:
start_ids.add(cuuid)
if cend == epaddr:
end_ids.add(cuuid)
if cend != epaddr and cstart != epaddr:
if cuuid not in live_ids:
assert epaddr == eps[0]
live_ids.add(cuuid)
assert len(end_ids) + len(start_ids) > 0
if len(start_ids) != 0:
if oep is not None and oep != epaddr and len(live_ids) != 0:
if oeplevel != WL_LEVEL:
result.add(MWUpdate(oep, epaddr-1,
live_ids))
oep = epaddr
oeplevel = eplevel
live_ids = live_ids | start_ids
if len(end_ids) != 0:
if oep is not None and len(live_ids) != 0:
if eplevel < WL_LEVEL:
result.add(MWUpdate(oep, epaddr, live_ids))
oep = epaddr+1
live_ids = live_ids - end_ids
oeplevel = eplevel
if len(live_ids) != 0:
oeplevel = max([slevels[id_] for id_ in live_ids])
return result
def _range_from_indicator(self, indicator):
if '-' in indicator:
start, end = map(
lambda x: int(netaddr.IPAddress(x)),
indicator.split('-', 1)
)
elif '/' in indicator:
ipnet = netaddr.IPNetwork(indicator)
start = int(ipnet.ip)
end = start+ipnet.size-1
else:
start = int(netaddr.IPAddress(indicator))
end = start
if (not (start >= 0 and start <= 0xFFFFFFFF)) or \
(not (end >= 0 and end <= 0xFFFFFFFF)):
LOG.error('%s - {%s} invalid IPv4 indicator',
self.name, indicator)
return None, None
return start, end
def _endpoints_from_range(self, start, end):
"""Return last endpoint before range and first endpoint after range
Args:
start (int): range start
end (int): range stop
Returns:
tuple: (last endpoint before, first endpoint after)
"""
rangestart = next(
self.st.query_endpoints(start=0, stop=max(start-1, 0),
reverse=True),
None
)
if rangestart is not None:
rangestart = rangestart[0]
LOG.debug('%s - range start: %s', self.name, rangestart)
rangestop = next(
self.st.query_endpoints(reverse=False,
start=min(end+1, self.st.max_endpoint),
stop=self.st.max_endpoint,
include_start=False),
None
)
if rangestop is not None:
rangestop = rangestop[0]
LOG.debug('%s - range stop: %s', self.name, rangestop)
return rangestart, rangestop
@base._counting('update.processed')
def filtered_update(self, source=None, indicator=None, value=None):
vtype = value.get('type', None)
if vtype != 'IPv4':
self.statistics['update.ignored'] += 1
return
v, newindicator = self._add_indicator(source, indicator, value)
start, end = self._range_from_indicator(indicator)
if start is None or end is None:
return
level = 1
for p in self.whitelist_prefixes:
if source.startswith(p):
level = WL_LEVEL
break
LOG.debug("%s - update: indicator: (%s) %s %s level: %s",
self.name, indicator, start, end, level)
rangestart, rangestop = self._endpoints_from_range(start, end)
rangesb = set(self._calc_ipranges(rangestart, rangestop))
LOG.debug('%s - ranges before update: %s', self.name, rangesb)
if not newindicator and level != WL_LEVEL:
for u in rangesb:
self.emit_update(
u.indicator(),
self._calc_indicator_value(u.uuids)
)
return
uuidbytes = v['_id']
self.st.put(uuidbytes, start, end, level=level)
rangesa = set(self._calc_ipranges(rangestart, rangestop))
LOG.debug('%s - ranges after update: %s', self.name, rangesa)
added = rangesa-rangesb
LOG.debug("%s - IP ranges added: %s", self.name, added)
removed = rangesb-rangesa
LOG.debug("%s - IP ranges removed: %s", self.name, removed)
for u in added:
self.emit_update(
u.indicator(),
self._calc_indicator_value(u.uuids)
)
for u in rangesa - added:
for ou in rangesb:
if u == ou and len(u.uuids ^ ou.uuids) != 0:
LOG.debug("IP range updated: %s", repr(u))
self.emit_update(
u.indicator(),
self._calc_indicator_value(u.uuids)
)
for u in removed:
self.emit_withdraw(
u.indicator(),
value=self._calc_indicator_value(u.uuids)
)
@base._counting('withdraw.processed')
def filtered_withdraw(self, source=None, indicator=None, value=None):
LOG.debug("%s - withdraw from %s - %s", self.name, source, indicator)
if value is not None and value.get('type', None) != 'IPv4':
self.statistics['withdraw.ignored'] += 1
return
ik = self._indicator_key(indicator, source)
v = self.table.get(ik)
LOG.debug("%s - v: %s", self.name, v)
if v is None:
return
self.table.delete(ik)
self.statistics['removed'] += 1
start, end = self._range_from_indicator(indicator)
if start is None or end is None:
return
level = 1
for p in self.whitelist_prefixes:
if source.startswith(p):
level = WL_LEVEL
break
rangestart, rangestop = self._endpoints_from_range(start, end)
rangesb = set(self._calc_ipranges(rangestart, rangestop))
LOG.debug("ranges before: %s", rangesb)
uuidbytes = v['_id']
self.st.delete(uuidbytes, start, end, level=level)
rangesa = set(self._calc_ipranges(rangestart, rangestop))
LOG.debug("ranges after: %s", rangesa)
added = rangesa-rangesb
LOG.debug("IP ranges added: %s", added)
removed = rangesb-rangesa
LOG.debug("IP ranges removed: %s", removed)
for u in added:
self.emit_update(
u.indicator(),
self._calc_indicator_value(u.uuids)
)
for u in rangesa - added:
for ou in rangesb:
if u == ou and len(u.uuids ^ ou.uuids) != 0:
LOG.debug("IP range updated: %s", repr(u))
self.emit_update(
u.indicator(),
self._calc_indicator_value(u.uuids)
)
for u in removed:
self.emit_withdraw(
u.indicator(),
value=self._calc_indicator_value(
u.uuids,
additional_uuid=v['_id'],
additional_value=v
)
)
def _send_indicators(self, source=None, from_key=None, to_key=None):
if from_key is None:
from_key = 0
if to_key is None:
to_key = 0xFFFFFFFF
result = self._calc_ipranges(from_key, to_key)
for u in result:
self.do_rpc(
source,
"update",
indicator=u.indicator(),
value=self._calc_indicator_value(u.uuids)
)
def get(self, source=None, indicator=None):
if not type(indicator) in [str, unicode]:
raise ValueError("Invalid indicator type")
indicator = int(netaddr.IPAddress(indicator))
result = self._calc_ipranges(indicator, indicator)
if len(result) == 0:
return None
u = result.pop()
return self._calc_indicator_value(u.uuids)
def get_all(self, source=None):
self._send_indicators(source=source)
return 'OK'
def get_range(self, source=None, index=None, from_key=None, to_key=None):
if index is not None:
raise ValueError('Index not found')
if from_key is not None:
from_key = int(netaddr.IPAddress(from_key))
if to_key is not None:
to_key = int(netaddr.IPAddress(to_key))
self._send_indicators(
source=source,
from_key=from_key,
to_key=to_key
)
return 'OK'
def length(self, source=None):
return self.table.num_indicators
def stop(self):
super(AggregateIPv4FT, self).stop()
for g in self.active_requests:
g.kill()
self.active_requests = []
self.table.close()
LOG.info("%s - # indicators: %d", self.name, self.table.num_indicators)
@staticmethod
def gc(name, config=None):
actorbase.ActorBaseFT.gc(name, config=config)
shutil.rmtree(name, ignore_errors=True)
shutil.rmtree('{}_st'.format(name), ignore_errors=True)
| |
# modmail_ticketmanager
# Python script developed on behalf on a need on /r/civcraft.
# Goal is to actively monitor the modmail queue on a particular subreddit
# and if a new modmail (or update) comes through, to push that into a
# Request Tracker (ticket manager) instance.
#
# Dependencies:
# Python - 2.6.8 or higher
# PRAW - You will need to install this yourself. (pip install)
# RequestTracker instance - You will need to install and configure this yourself. (see http://requesttracker.wikia.com/wiki/DebianSqueezeInstallGuide )
# Sqlite - You will need to install this yourself. (apt-get)
# argparse - You will need to install this yourself. (pip install)
# RequestTracker python helper rtkit (pip install)
# mysql-connector- pip install mysql-connector-python
#
# To use, change the relevant items in the #Definitions section. You should not change
# anything below that line. When ran, this will put itself into a loop with waits in
# between and process modmails during its runtime. This makes use of the fact that
# the latest changed modmail thread will be the first one to come through. Using that
# fact, we dont have to pull down all possible modmail threads each time - just enough
# until we get to a thread that we have already fully processed. If we get to this thread
# then we are done for that iteration and can sleep for a bit.
#
# Note: This order-by behavior we are exploiting is NOT defined, so while likely to continue
# in the future, we still have to get all messages every once in a while. By default, thats
# every 30 minutes (configurable). When that comes up, we will process all modmail messages
# with the newest reply > up to 8 days in the past (configureable).
#
# We keep track of items we have already processed by storing it in a sqlite database that you
# define the name of. It is expected that you will handle backing up this item on an intermittent
# basis. Woe unto those who choose not to do so as duplication in your ticket managing software
# is the possible ramification if you do not.
#
# It has not been tested what happens if you get a modmail update to something you mark DELETED
# in the ticket management software, but since this deletion is soft deletion and not hard
# it is expected that it will still be fine.
# Definitions - Change the items below.
debug = False #If set to True then will output a very large amount of data allowing you to debug what is going on.
# Reddit
redditUsername = ''
redditPassword = ''
redditSleepIntervalInSecondsBetweenRequests = 60
# The MinutesBetweenExtendedValidationMode and MaximumAmountOfDaysToAllowLookbackForMissingReplies are pretty tightly coupled concepts.
# Since we process things from newest to oldest, we are using a shortcut that lets us know when to quit (when we hit the first message
# that is already 100% processed we can end for now). This is great for speeding up processing but horrible when you realize that
# reddit fails quite a lot. This means we could process the newest message but we havent processed messages after that leaving
# those replies to be 'lost.' This means every-so-often we need to look back over a good chunk of messages to make sure we have
# everything we should. We would automatically 'find' the messages if anything replied to the chain but if nothing does
# they will be picked up in the extended validation. Given the default for this is ~30 minutes, the lookback being a single day
# would suffice. We are setting the default to 8 to allow a downtime interval. This process can recover from downtime up to
# ~7 ish days with this setting. If we have a downtime event > 7 days then you either need to change this variable or accept
# that some replies could be missing until someone touches that thread again. Downtime > a week should be rare one would hope.
redditMinutesBetweenExtendedValidationMode = 30
redditMaximumAmountOfDaysToAllowLookbackForMissingReplies = 8
redditSubredditToMonitor = '' # in text, like civcraft
# Explicit limiter on the number of modmails to pull. This is the max you will ever get - you won't even see threads if they
# exist beyond this limit. Change this if you feel the need. This is not -replies- in a thread but the master / root threads.
# A larger number will let you track more threads initially but this will slow you down for each processing cycle. This should
# be set just high enough for your uses and needs to be set by whoever owns a subreddit.
redditMaximumNumberOfRootThreadsToLookBack = 5000
redditAbsoluteOldestModmailRootNodeDateToConsider = 1420070400 # Epoch Notation for Jan 01 2015.
# If you want to pull in tons and tons of history you could make this 0.
# SqlLite Information
sqliteDatabaseFilename = 'ModMailTicketManager.sqlite' # If this doesnt exist, it creates.
sqliteDatabaseTablename = 'HandledTickets' # TableName you wish to use for handled tickets. We will create it.
# Request Tracker
requestTrackerRestApiUrl = 'http://192.168.25.129/rt/REST/1.0/' # Pretty much your url + /Rest/1.0/
# General Queue
requestTrackerQueueToPostTo = 1 # Tools -> Configuration -> Queues -> Select, whichever queue you wish for general messges.
# Optional Author-Specific Queue
# If you wish certain root-authors to go to certain queues (automoderator for example), set the mapping up here.
# same as always though - if the script user doesnt have permission to go there, you will have a bad time.
requestTrackerOptionalAuthorToQueueMapping = [['automoderator',1],['different_user_goes_here',1]] # This example has automod posts going to queue 1. Can accept multiple author/queue tuples.
# Request Tracker - User to use to post.
requestTrackerUsername = ''
requestTrackerPassword = ''
# Section on auto-transition of tickets
requestTrackerShouldWeTransitionTicketsOnReply = True
requestTrackerTicketStatesThatWeShouldTransition = ['resolved','others_go_here'] # Lower case here please! I am not doing case comparisons.
requestTrackerTicketStateWeShouldTransitionTo = 'open'
# Request Tracker -> Modmail replies Section.
# This deals with what you have to do to allow request tracker to push modmail replies back into Reddit.
# To enable this you need to add a Custom Field of type 'Fill in one text area' that applies to 'Tickets' that is Enabled.
# Once done, edit this custom field and change 'Applies To' to apply it to the different queues you wish reddit replies to come from.
# Do note that the custom field description is unused in request tracker - the name is what matters.
# Do note: Make absolutely 100% sure that the modmail request tracker bot has access to 'Modify Custom Field Values' in the queues tickets
# it will operate in. Failure to do so breaks the process because once we process a reply we set the custom field to empty-string to note that
# there isnt something queued up for posting. Also needs "Modify Ticket" privilege obviously.
# Request Tracker Bug: Make the custom field just simple text. No colons, etc. Seriously, theres a bug in request tracker. It will not work correctly in
# all api calls if you choose not to follow this. Buyer beware.
requestTrackerAllowModmailRepliesToBeSentToReddit = False # Change to True if you wish to allow replies.
requestTrackerCustomFieldForRedditReplies = 'New Reddit Modmail Reply' # Must be set to the -exact- custom field Name.
requestTrackerRedditModmailReply = 'Reply from the ModMail group:\n\n{Content}' # Change to whatever you would like. {Content} token is replaced with your message.
# Tokenized data used for choosing what is shown in the ticketing system for the initial ticket creation comment and replies.
# Allowed tokens for the following area (case matters!)
# {Author} = person in reddit who posted this.
# {ModmailMessageUrl} = URL for modmail message if you need to jump to it.
# {Content} = data that the user actually posted into modmail.
# {Subject} - only valid for initial creation comment/subject, as only the root message in a thread has that.
requestTrackerInitialTicketCreationSubject = 'Modmail - {Author} - {Subject}'
requestTrackerInitialTicketCreationComment = 'Post from {Author}\nResponse URL: {ModmailMessageUrl}\nContents:\n{Content}'
requestTrackerThreadReply = 'Post from {Author}\nContents:\n{Content}'
reddit_account_info_Field = 'MC Accounts' # The field for where it should populate mc accounts to the reddit account.
# Set to true to allow users who have their accounts authenticated to auto populate.
autoPopulateFields = False
# Mysql information, be sure to point this to the location of the tables of the bukkit plugin, leave if you wont use this feature.
mysql_username = ''
mysql_password = ''
mysql_host = 'localhost'
mysql_dbname = ''
# End Definitions - Do not modify files below this line.
# Request Tracker Specific
# https://github.com/z4r/python-rtkit#comment-on-a-ticket-with-attachments
from rtkit.resource import RTResource
from rtkit.authenticators import CookieAuthenticator
from rtkit.errors import RTResourceError
# Switched from BasicAuthenticator to CookieAuthenticator due to issues with basic auth.
# http://stackoverflow.com/questions/17890098/how-to-create-a-ticket-in-rt-using-python-rtkit
resource = RTResource(requestTrackerRestApiUrl, requestTrackerUsername, requestTrackerPassword, CookieAuthenticator)
# other
import argparse
import logging
import praw
import time
import sqlite3
import sys, traceback
from datetime import datetime
from datetime import timedelta
import unicodedata # normalize unicode strings.
import mysql.connector
prawUserAgent = 'ModMailTicketCreator v0.01 by /u/Pentom'
# Command line argument parsing
arg_parser = argparse.ArgumentParser(description='Modmail / RequestTracker ticket daemon')
arg_parser.add_argument('-l', '--logfile', help='The log file to store output in addition to stdout')
cnx = None
get_mc_users_from_redditaccount = 'select name from redditrelations where reddit_name %(reddit_name)s'
def logException():
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = ['*** print_exc:', traceback.format_exc(), '*** tb_lineno: {0}'.format(exc_traceback.tb_lineno)]
log.debug('\n'.join(msg))
def setupLogger(log_level=logging.INFO, log_file=None):
global log
logfmt = logging.Formatter('[%(asctime)s] %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(log_level)
stdout_handler.setFormatter(logfmt)
file_handler = None
if log_file:
try:
file_h = open(log_file, 'a')
file_handler = logging.StreamHandler(file_h)
file_handler.setLevel(log_level)
file_handler.setFormatter(logfmt)
except Exception as ex:
print('UNABLE TO OPEN LOG {0}: {1}'.format(log_file, str(ex)))
logException()
file_handler = None
log = logging.getLogger('script')
log.addHandler(stdout_handler)
if file_handler:
log.addHandler(file_handler)
log.setLevel(log_level)
def init():
global sqlConn
global sqlCursor
global nextExtendedValidationInterval
sqlConn = None
sqlCursor = None
period = (datetime.now() + timedelta(minutes=redditMinutesBetweenExtendedValidationMode) - datetime(1970,1,1))
nextExtendedValidationInterval = period.days * 86400 + period.seconds
openSqlConnections()
sql = 'CREATE TABLE IF NOT EXISTS ' + sqliteDatabaseTablename + '(CommentId TEXT PRIMARY KEY, ParentCommentId TEXT, TicketId INTEGER, CHECK((ParentCommentId is null and TicketId is not null) OR (ParentCommentId is not null and TicketId is null)));'
sqlCursor.execute(sql)
closeSqlConnections()
openSqlConnections()
sql = 'CREATE UNIQUE INDEX IF NOT EXISTS UQ_' + sqliteDatabaseTablename + '_ParentCommentId_CommentId ON ' + sqliteDatabaseTablename + '(ParentCommentId, CommentId);'
sqlCursor.execute(sql)
closeSqlConnections()
setGlobalVariablesForExtendedValidationMode()
def openSqlConnections():
global sqlConn
global sqlCursor
if sqlConn == None:
sqlConn = sqlite3.connect(sqliteDatabaseFilename)
if sqlCursor == None:
sqlCursor = sqlConn.cursor()
def closeSqlConnections():
global sqlConn
global sqlCursor
if not sqlConn == None:
sqlConn.commit()
sqlCursor.close()
sqlConn.close()
sqlCursor = None
sqlConn = None
def processModMail():
global nextExtendedValidationInterval
try:
r = praw.Reddit(user_agent=prawUserAgent)
r.login(redditUsername,redditPassword)
inExtendedValidationMode = False
# see if its time to process in extended validation mode.
period = (datetime.now() - datetime(1970,1,1))
if (nextExtendedValidationInterval < (period.days * 86400 + period.seconds)):
log.info('Processing in ExtendedValidationMode')
setGlobalVariablesForExtendedValidationMode()
period = (datetime.now() + timedelta(minutes=redditMinutesBetweenExtendedValidationMode) - datetime(1970,1,1))
nextExtendedValidationInterval = period.days * 86400 + period.seconds
inExtendedValidationMode = True
log.debug('Logged into Reddit.')
sub = r.get_subreddit(redditSubredditToMonitor)
for mail in sub.get_mod_mail(limit=redditMaximumNumberOfRootThreadsToLookBack):
# When we are processing a message, we have the information to know if we should continue
# processing. This will keep returning true until we hit some message where we should hit falses.
shouldContinueProcessing = processModMailRootMessage(debug, mail, inExtendedValidationMode)
if not shouldContinueProcessing:
break
except:
# Errors will happen here, Reddit fails all the time.
# Do not vulgarly error out.
e = str(sys.exc_info()[0])
l = str(sys.exc_traceback.tb_lineno)
error = str(datetime.utcnow()) + ' - Error when attempting to review modmail on line number ' + l + '. Exception: ' + e
log.error(error)
logException()
closeSqlConnections() # in case we have open connections, commit changes and exit. Changes safe to commit due to order of operations.
pass
def shouldAnyMoreMessagesBeProcessed(wasMessageAlreadyFullyInSystem, newestMessageEpochTimeUtc, inExtendedValidationMode):
# If the newest message is before our drop-dead oldest value, then we stop.
# (redditAbsoluteOldestModmailRootNodeDateToConsider)
# If the message is fully processed and its newest message is more than _x period_ old
# then we stop. Why? We are basically giving the max amount of time before we consider
# we give up looking. Since we call this pretty blasted often, this would require an
# extended downtime period. In that case, its on the devops staff to change this period
# if they need to recover from extended downtime.
# Else continue.
continueProcessing = True
if newestMessageEpochTimeUtc < redditAbsoluteOldestModmailRootNodeDateToConsider:
continueProcessing = False
log.debug("shouldAnyMoreMessagesBeProcessed: Negative! Message is older than the oldest message root node to consider.")
elif wasMessageAlreadyFullyInSystem and not inExtendedValidationMode:
continueProcessing = False
log.debug("shouldAnyMoreMessagesBeProcessed: Negative! Message is already fully in our system and not in extended validation mode.")
elif wasMessageAlreadyFullyInSystem and inExtendedValidationMode and newestMessageEpochTimeUtc < extendedValidationModeOldDatePeriod:
continueProcessing = False
log.debug("shouldAnyMoreMessagesBeProcessed: Negative! Message is already fully in our system and even though in inExtendedValidationMode the newest reply is older than our extended validation age (redditMaximumAmountOfDaysToAllowLookbackForMissingReplies).")
return continueProcessing
# UTC vs Local date-time issue here I think.
# TODO: Fix if we care. For now, just push it out one more date.
# (no issue will be more than 12 hours so +24 and 'who cares for now')
# When called, will update our understanding of our extended period end date.
def setGlobalVariablesForExtendedValidationMode():
global extendedValidationModeOldDatePeriod
period = (datetime.now() - timedelta(days=redditMaximumAmountOfDaysToAllowLookbackForMissingReplies) - datetime(1970,1,1))
extendedValidationModeOldDatePeriod = period.days * 86400 + period.seconds
def processModMailRootMessage(debug, mail, inExtendedValidationMode):
shouldContinueProcessingMail = True
alreadyProcessedAllItems = True
weCreatedModmailRootMessage = False
#Helping debug output.
firstTime = True
debugText = ''
if firstTime and debug:
firstTime = False
log.debug('Found at least one item in modmail.')
rootAge = int(round(float(str(mail.created_utc))))
rootAuthor = str(unicodedata.normalize('NFKD', mail.author).encode('ascii','ignore')) if type(mail.author) is unicode else str(mail.author)
rootSubject = str(unicodedata.normalize('NFKD', mail.subject).encode('ascii','ignore')) if type(mail.subject) is unicode else str(mail.subject)
rootBody = str(unicodedata.normalize('NFKD', mail.body).encode('ascii','ignore')) if type(mail.body) is unicode else str(mail.body)
rootMessageId = str(mail.id) # Base 36, contains alphanumeric
rootResponseUrl = 'https://www.reddit.com/message/messages/' + rootMessageId
rootReplies = mail.replies
# Early out - If this is reddit, just quit.
if rootAuthor.lower() == 'reddit' or rootSubject.lower() == 'moderator added' or rootSubject.lower() == 'moderator invited':
return True # Get out and ignore this message.
queueIdToCreateTicketsIn = requestTrackerQueueToPostTo # Default Queue
for authorQueueMapping in requestTrackerOptionalAuthorToQueueMapping:
if rootAuthor.lower() == authorQueueMapping[0].lower():
log.debug('Found a matching author-queue mapping, redirecting user to specified queue for ticket creation if needed')
queueIdToCreateTicketsIn = authorQueueMapping[1]
break
# track the newest age value amongst root and replies.
messageNewestAge = rootAge
log.debug('Checking if core message is handled yet. Subject: ' + rootSubject)
# Has the current parent item been handled yet?
ticketId = getTicketIdForAlreadyProcessedRootMessage(rootMessageId)
#If we dont find it, we need to add it in.
if ticketId == None:
alreadyProcessedAllItems = False # There is at least one thing that we didnt find.
weCreatedModmailRootMessage = True
log.debug('Core message not found in system. Processing.')
ticketId = createTicket(rootAuthor, rootSubject, rootBody, rootResponseUrl, queueIdToCreateTicketsIn)
handle_pushing_player_accounts(ticketId, rootAuthor)
log.debug('Added ticket to ticket system - ticket id: {0}'.format(ticketId))
if ticketId < 1:
raise LookupError('Did not get back appropriate ticket id to store from ticket system')
noteTheFactWeProcessedAMessageId(rootMessageId, None, ticketId)
else:
log.debug('Core message found in system already.')
log.debug('Checking children that may exist.')
# At this point, variable ticketId is the appropriate integer ticket number where the parent is already at.
# Now that we have handled the parent, check for each of the children within this root parent.
messageReplyReturn = handleMessageReplies(debug, ticketId, rootMessageId, rootReplies, messageNewestAge, rootResponseUrl)
allRepliesHandled = messageReplyReturn['foundAllItems']
messageNewestAge = messageReplyReturn['messageNewestAge']
alreadyProcessedAllItems = alreadyProcessedAllItems and allRepliesHandled
# If we have any replies and we didnt just create this modmail root message,
# then we need to assume the ticket could be closed. Do we need to open it?
if not weCreatedModmailRootMessage and messageReplyReturn['foundReplyBySomeoneOtherThanTicketManager'] and requestTrackerShouldWeTransitionTicketsOnReply:
transitionTicketToExpectedState(ticketId)
shouldContinueProcessingMail = shouldAnyMoreMessagesBeProcessed(alreadyProcessedAllItems, messageNewestAge, inExtendedValidationMode)
return shouldContinueProcessingMail
def handle_pushing_player_accounts(ticketId, author):
if not autoPopulateFields:
return
accounts = ''
for account in getRequiredAccountInfo(author):
accounts += account + ' '
content = {
'content': {
'CF.{' + handle_pushing_player_accounts + '}': accounts
}
}
try:
response = resource.post(path='ticket/' + str(ticketId) + '/edit', payload=content,)
if response.status_int != 200:
raise LookupError('Was unable to update expected ticket, we should defensively exit here.')
except:
# Display error and Fail.
# Lets not play around with errors where we cant remove from the ticket.
# In this case, we could cause a never ending stream of reddit replies and noone wants that.
e = str(sys.exc_info()[0])
l = str(sys.exc_traceback.tb_lineno)
log.error('Error when attempting to update the ticket on line number {0}. Exception: {1}'.format(l, e))
logException()
closeSqlConnections() # in case we have open connections, commit changes and exit. Changes safe to commit due to order of operations.
sys.exit(1)
def getRequiredAccountInfo(author):
names = []
try:
cursor = cnx.cursor()
cursor.execute(get_mc_users_from_redditaccount)
for name in cursor.fetchmany():
names.append(name)
finally:
cnx.commit()
cursor.close()
return names
def getTicketData(ticketId):
try:
getTicketStatusUrl = 'ticket/' + str(ticketId)
response = resource.get(path=getTicketStatusUrl)
responseObj = []
for ticket in response.parsed:
responseObj.append({})
for attribute in ticket:
responseObj[len(responseObj)-1][attribute[0]] = attribute[1]
return responseObj
except RTResourceError as e:
log.error('Failed to get ticket information for ticket id {0}.'.format(ticketId))
logException()
return []
except:
# Do not vulgarly error out.
e = str(sys.exc_info()[0])
l = str(sys.exc_traceback.tb_lineno)
log.error('Error when attempting to getTicketData on line number {0}. Exception: {1}'.format(l, e))
logException()
return []
def setTicketStateTo(ticketId, newState):
try:
content = {
'content': {
'Status': newState,
}
}
responseUrl = 'ticket/' + str(ticketId) + '/edit'
response = resource.post(path=responseUrl, payload=content,)
except:
# Do not vulgarly error out.
e = str(sys.exc_info()[0])
l = str(sys.exc_traceback.tb_lineno)
log.error('Error when attempting to setTicketStateTo (transitioning the ticket) on line number {0}. Exception: {1}'.format(l, e))
logException()
pass
def transitionTicketToExpectedState(ticketId):
try:
ticketData = getTicketData(ticketId)
if len(ticketData) > 0:
currentTicketStatus = ticketData[0]['Status']
#Is this status one that we are transitioning?
if currentTicketStatus.lower() in requestTrackerTicketStatesThatWeShouldTransition:
#Transition it!
setTicketStateTo(ticketId, requestTrackerTicketStateWeShouldTransitionTo)
except:
# Do not vulgarly error out.
e = str(sys.exc_info()[0])
l = str(sys.exc_traceback.tb_lineno)
log.error('Error when attempting to transitionTicketToExpectedState on line number {0}. Exception: {1}'.format(l, e))
logException()
pass
def noteTheFactWeProcessedAMessageId(messageId, parentMessageId, ticketId):
openSqlConnections()
sql = ''
if parentMessageId == None:
sql = 'INSERT INTO ' + sqliteDatabaseTablename + '(ParentCommentId, CommentId, TicketId) values (null, ?, ?);'
sqlCursor.execute(sql, (messageId,ticketId))
else:
sql = 'INSERT INTO ' + sqliteDatabaseTablename + '(ParentCommentId, CommentId, TicketId) values (?, ?, null);'
sqlCursor.execute(sql, (parentMessageId,messageId))
sqlConn.commit()
closeSqlConnections()
def getHasReplyBeenProcessed(rootMessageId, replyMessageId):
processed = True
openSqlConnections()
# Has the current child item been handled yet?
sql = 'select 1 from ' + sqliteDatabaseTablename + ' where ParentCommentId = ? and CommentId = ?;'
sqlCursor.execute(sql, (rootMessageId,replyMessageId))
#If we dont find it, we need to add it in.
sqlrow = sqlCursor.fetchone()
if sqlrow == None:
processed = False
closeSqlConnections()
return processed
def getTicketIdForAlreadyProcessedRootMessage(rootMessageId):
ticketId = None
openSqlConnections()
sql = 'select TicketId from ' + sqliteDatabaseTablename + ' where ParentCommentId is null and CommentId = ?;'
sqlCursor.execute(sql, (rootMessageId,)) # [sic] you have to pass in a sequence.
sqlrow = sqlCursor.fetchone()
if sqlrow != None:
ticketId = sqlrow[0]
closeSqlConnections()
return ticketId
# In reply object
# out - Object with two properties that denote if we already processed all items and the newest message age.
def handleMessageReplies(debug, ticketId, rootMessageId, replies, messageNewestAge, rootResponseUrl):
firstTimeWithReply = True
messageReplyReturn = {'foundAllItems':True, 'messageNewestAge':messageNewestAge, 'foundReplyBySomeoneOtherThanTicketManager':False}
for reply in replies:
if debug and firstTimeWithReply:
firstTimeWithReply = False
log.debug('Found at least one reply to core message.')
replyAuthor = str(unicodedata.normalize('NFKD', reply.author).encode('ascii','ignore')) if type(reply.author) is unicode else str(reply.author)
replyBody = str(unicodedata.normalize('NFKD', reply.body).encode('ascii','ignore')) if type(reply.body) is unicode else str(reply.body)
replyMessageId = str(reply.id) # Base 36, contains alphanumeric
replyAge = int(round(float(str(reply.created_utc))))
if replyAge > messageReplyReturn['messageNewestAge']:
if debug:
debugText = 'Found a message component with a newer age. Old lowest-age = ' + str(messageReplyReturn['messageNewestAge']) + ', New lowest-age = ' + str(replyAge)
log.debug(debugText)
messageReplyReturn['messageNewestAge'] = replyAge
log.debug('Checking if message reply is handled yet. Body: ' + replyBody)
# Has the current child item been handled yet?
alreadyProcessed = getHasReplyBeenProcessed(rootMessageId, replyMessageId)
if not alreadyProcessed:
messageReplyReturn['foundAllItems'] = False # There is at least one thing that we didnt find.
if replyAuthor.lower() != redditUsername.lower():
messageReplyReturn['foundReplyBySomeoneOtherThanTicketManager'] = True
log.debug('Reply message not found in system. Processing.')
log.debug('Updating ticket found in our system: {0}'.format(ticketId))
addTicketComment(ticketId, replyAuthor, replyBody, rootResponseUrl)
noteTheFactWeProcessedAMessageId(replyMessageId, rootMessageId, None)
else:
log.debug('Reply message already found in system.')
return messageReplyReturn
# no error handling, let errors bubble up.
# in - message information
# out integer ticket id.
def createTicket(author, subject, body, modmailMessageUrl, rtQueueId):
postedSubject = requestTrackerInitialTicketCreationSubject.replace("{Author}", author).replace("{Subject}", subject).replace("{ModmailMessageUrl}", modmailMessageUrl).replace("{Content}", body)
postedBody = requestTrackerInitialTicketCreationComment.replace("{Author}", author).replace("{Subject}", subject).replace("{ModmailMessageUrl}", modmailMessageUrl).replace("{Content}", body)
content = {
'content': {
'Queue': rtQueueId,
'Subject': postedSubject,
'Text': postedBody,
}
}
log.debug('Creating core ticket for queue: ' + str(rtQueueId))
response = resource.post(path='ticket/new', payload=content,)
# if this wasnt successful, the following statements will error out and send us down to the catch.
strTicket = (response.parsed[0][0][1]).split('/')[1]
ticketId = int(strTicket)
return ticketId
# no error handling, let errors bubble up.
# in - message information
# out None
def addTicketComment(ticketId, author, body, modmailMessageUrl):
postedBody = requestTrackerThreadReply.replace("{Author}", author).replace("{ModmailMessageUrl}", modmailMessageUrl).replace("{Content}", body)
params = {
'content': {
'Action': 'comment',
'Text': postedBody,
}
}
ticketUpdatePath = 'ticket/' + str(ticketId) + '/comment'
response = resource.post(path=ticketUpdatePath, payload=params,)
# if this wasnt successful, the type will not be 200 and we will be sent down to the except.
if response.status_int != 200:
raise LookupError('Was unable to find/update expected ticket.')
def processRequestTrackerRepliesToModMail():
try:
log.debug('Processing Request Tracker Replies to ModMail.')
queryText = '\'CF.{' + requestTrackerCustomFieldForRedditReplies.replace(" ", "%20") + '}\'>\'\''
fullQuery = 'search/ticket?query=' + queryText + '&orderby=-LastUpdated&format=l'
response = resource.get(path=fullQuery)
responseObj = []
for ticket in response.parsed:
responseObj.append({})
for attribute in ticket:
responseObj[len(responseObj)-1][attribute[0]] = attribute[1]
if len(responseObj) > 0:
r = praw.Reddit(user_agent=prawUserAgent)
r.login(redditUsername,redditPassword)
cfAttr = 'CF.{' + requestTrackerCustomFieldForRedditReplies + '}'
# for each items with a reply, handle said ticket reply.
for ticket in responseObj:
strTicket = ticket['id'].split('/')[1]
ticketId = int(strTicket)
reply = ticket[cfAttr]
processTicketModmailReply(ticketId, reply, r)
except SystemExit:
closeSqlConnections() # in case we have open connections, commit changes and exit. Changes safe to commit due to order of operations.
sys.exit(1)
except:
# Errors will happen here, Reddit fails all the time.
# Do not vulgarly error out.
e = str(sys.exc_info()[0])
l = str(sys.exc_traceback.tb_lineno)
log.error('Error when attempting to process modmail replies on line number {0}. Exception: {1}'.format(l, e))
logException()
closeSqlConnections() # in case we have open connections, commit changes and exit. Changes safe to commit due to order of operations.
pass
def processTicketModmailReply(ticketId, replyText, prawContext):
redditUrl = getRedditPostUrlFromTicketId(ticketId)
if redditUrl == None:
log.warning('Could not find reddit post url for ticket id ' + str(ticketId) + '.')
return
# Edge case - we didnt note that we replied into reddit but we actually did.
# Probable cause request tracker or network glitch or reddit marking a 'failed' action for something that succeeded.
# Lets check to see if we have handled this.
alreadyHandledModmailReply = checkIfAlreadyHandledModmailReply(ticketId, redditUrl, replyText)
if not alreadyHandledModmailReply:
postRedditModmailReply(redditUrl, replyText, prawContext)
removeModmailReplyFromTicket(ticketId)
# Due to the way modmail/request tracker work together, and reddits rampant failures,
# its possible that we make a post to reddit that is accepted by reddit but the request
# times out before it can acknowledge - so we don't note that it was accepted. We should try
# to work around this by checking if the modmail reply was accepted into the ticket system
# manually before posting again. This won't guarantee non-duplication but will significantly
# help such.
# Note - this is a 'nice to have' so if we have an issue with this call, we can assume that it hasnt got
# a reply - just to keep this train moving.
def checkIfAlreadyHandledModmailReply(ticketId, modmailMessageUrl, replyText):
isAlreadyHandled = False
try:
response = resource.get(path='ticket/' + str(ticketId) + '/history?format=l')
responseObj = []
for ticket in response.parsed:
responseObj.append({})
for attribute in ticket:
responseObj[len(responseObj)-1][attribute[0]] = attribute[1]
fullReplyText = requestTrackerThreadReply.replace("{Author}", redditUsername).replace("{ModmailMessageUrl}", modmailMessageUrl).replace("{Content}", requestTrackerRedditModmailReply).replace("{Content}", replyText)
idForSettingModmailResponse = -1
for change in responseObj:
if change['Type'] == 'CustomField' and change['OldValue'] == '' and requestTrackerCustomFieldForRedditReplies in change['Description'] and replyText == change['NewValue']:
idForSettingModmailResponse = int(change['id'])
# Did we find the area where we set a modmail response? Not guaranteed if someones been monkeying with the tokens.
if idForSettingModmailResponse > -1:
# We found it, so lets go ahead and check to see if this has been handled so far!
for change in responseObj:
if int(change['id']) > idForSettingModmailResponse and change['Type'] == 'Comment' and change['Content'].lower() == fullReplyText.lower():
isAlreadyHandled = True
except:
# Do not vulgarly error out.
e = str(sys.exc_info()[0])
l = str(sys.exc_traceback.tb_lineno)
log.error('Error when attempting to checkIfAlreadyHandledModmailReply on line number {0}. Exception: {1}'.format(l, e))
logException()
return False
return isAlreadyHandled
# No error handling, let errors fail this call and bubble up.
def postRedditModmailReply(redditUrl, replyText, prawContext):
log.debug('Sending modmail reply to redditurl ' + redditUrl + ': ' + replyText)
full_reply_text = requestTrackerRedditModmailReply.replace("{Content}", replyText)
message_link = prawContext.get_content(url=redditUrl)
for message in message_link:
message.reply(full_reply_text)
def removeModmailReplyFromTicket(ticketId):
log.debug('Removing modmail reply attribute from ticket ' + str(ticketId) + '.')
content = {
'content': {
'CF.{' + requestTrackerCustomFieldForRedditReplies + '}': ''
}
}
try:
response = resource.post(path='ticket/' + str(ticketId) + '/edit', payload=content,)
if response.status_int != 200:
raise LookupError('Was unable to update expected ticket, we should defensively exit here.')
except:
# Display error and Fail.
# Lets not play around with errors where we cant remove from the ticket.
# In this case, we could cause a never ending stream of reddit replies and noone wants that.
e = str(sys.exc_info()[0])
l = str(sys.exc_traceback.tb_lineno)
log.error('Error when attempting to update the ticket on line number {0}. Exception: {1}'.format(l, e))
logException()
closeSqlConnections() # in case we have open connections, commit changes and exit. Changes safe to commit due to order of operations.
sys.exit(1)
def getRedditPostUrlFromTicketId(ticketId):
returnValue = None
openSqlConnections()
sql = 'select CommentId from ' + sqliteDatabaseTablename + ' where ParentCommentId is null and TicketId = ?;'
sqlCursor.execute(sql, (ticketId,)) # [sic] you have to pass in a sequence.
sqlrow = sqlCursor.fetchone()
if sqlrow != None:
log.debug('Found CommentId for ticketId')
returnValue = 'https://www.reddit.com/message/messages/' + str(sqlrow[0])
log.debug('Reddit main modmail reply url is \'' + returnValue + '\'')
closeSqlConnections()
return returnValue
def check_mysql_connection():
try:
cnx = mysql.connector.connect(user=mysql_username, password=mysql_password,
host=mysql_host,
database=mysql_dbname)
return True
except mysql.connector.Error as err:
log('Mysql connection could not be established')
return False
def mainloop():
while True:
log.debug('Waking... Processing modmail.')
processModMail()
if requestTrackerAllowModmailRepliesToBeSentToReddit:
processRequestTrackerRepliesToModMail()
log.debug('Modmail processed. Sleeping...')
time.sleep(redditSleepIntervalInSecondsBetweenRequests) # sleep x seconds and do it again.
if __name__ == '__main__':
args = arg_parser.parse_args()
log_level = logging.INFO
if not check_mysql_connection():
autoPopulateFields = False # So the script can continue without this.
log.debug('The Mysql failed to connect to the server.')
if debug:
log_level = logging.DEBUG
setupLogger(log_level=log_level, log_file=args.logfile)
init()
mainloop()
| |
import functools
import mock
import warnings
try:
import urlparse
except ImportError: # Python 3
from urllib import parse as urlparse
from django.core.urlresolvers import NoReverseMatch, set_urlconf
from django.template import Context, Template
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from subdomains.middleware import (SubdomainMiddleware,
SubdomainURLRoutingMiddleware)
from subdomains.utils import reverse, urljoin
def prefix_values(dictionary, prefix):
return dict((key, '%s.%s' % (prefix, value))
for key, value in dictionary.items())
class SubdomainTestMixin(object):
DOMAIN = 'example.com'
URL_MODULE_PATH = 'subdomains.tests.urls'
def setUp(self):
super(SubdomainTestMixin, self).setUp()
from django.contrib.sites.models import Site
self.site = Site.objects.get_current()
self.site.domain = self.DOMAIN
self.site.save()
@override_settings(
DEFAULT_URL_SCHEME='http',
ROOT_URLCONF='%s.application' % URL_MODULE_PATH,
SUBDOMAIN_URLCONFS=prefix_values({
None: 'marketing',
'api': 'api',
'www': 'marketing',
}, prefix=URL_MODULE_PATH),
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'subdomains.middleware.SubdomainURLRoutingMiddleware',
))
def run(self, *args, **kwargs):
super(SubdomainTestMixin, self).run(*args, **kwargs)
def get_path_to_urlconf(self, name):
"""
Returns the full path to the given urlconf.
"""
return '.'.join((self.URL_MODULE_PATH, name))
def get_host_for_subdomain(self, subdomain=None):
"""
Returns the hostname for the provided subdomain.
"""
if subdomain is not None:
host = '%s.%s' % (subdomain, self.site.domain)
else:
host = '%s' % self.site.domain
return host
class SubdomainMiddlewareTestCase(SubdomainTestMixin, TestCase):
def setUp(self):
super(SubdomainMiddlewareTestCase, self).setUp()
self.middleware = SubdomainMiddleware()
def test_subdomain_attribute(self):
def subdomain(subdomain):
"""
Returns the subdomain associated with the request by the middleware
for the given subdomain.
"""
host = self.get_host_for_subdomain(subdomain)
request = RequestFactory().get('/', HTTP_HOST=host)
self.middleware.process_request(request)
return request.subdomain
self.assertEqual(subdomain(None), None)
self.assertEqual(subdomain('www'), 'www')
self.assertEqual(subdomain('www.subdomain'), 'www.subdomain')
self.assertEqual(subdomain('subdomain'), 'subdomain')
self.assertEqual(subdomain('another.subdomain'), 'another.subdomain')
def test_www_domain(self):
def host(host):
"""
Returns the subdomain for the provided HTTP Host.
"""
request = RequestFactory().get('/', HTTP_HOST=host)
self.middleware.process_request(request)
return request.subdomain
self.site.domain = 'www.%s' % self.DOMAIN
self.site.save()
with override_settings(REMOVE_WWW_FROM_DOMAIN=False):
self.assertEqual(host('www.%s' % self.DOMAIN), None)
# Squelch the subdomain warning for cleaner test output, since we
# already know that this is an invalid subdomain.
with warnings.catch_warnings(record=True) as warnlist:
self.assertEqual(host('www.subdomain.%s' % self.DOMAIN), None)
self.assertEqual(host('subdomain.%s' % self.DOMAIN), None)
# Trick pyflakes into not warning us about variable usage.
del warnlist
self.assertEqual(host('subdomain.www.%s' % self.DOMAIN),
'subdomain')
self.assertEqual(host('www.subdomain.www.%s' % self.DOMAIN),
'www.subdomain')
with override_settings(REMOVE_WWW_FROM_DOMAIN=True):
self.assertEqual(host('www.%s' % self.DOMAIN), 'www')
self.assertEqual(host('subdomain.%s' % self.DOMAIN), 'subdomain')
self.assertEqual(host('subdomain.www.%s' % self.DOMAIN),
'subdomain.www')
def test_case_insensitive_subdomain(self):
host = 'WWW.%s' % self.DOMAIN
request = RequestFactory().get('/', HTTP_HOST=host)
self.middleware.process_request(request)
self.assertEqual(request.subdomain, 'www')
host = 'www.%s' % self.DOMAIN.upper()
request = RequestFactory().get('/', HTTP_HOST=host)
self.middleware.process_request(request)
self.assertEqual(request.subdomain, 'www')
class SubdomainURLRoutingTestCase(SubdomainTestMixin, TestCase):
def setUp(self):
super(SubdomainURLRoutingTestCase, self).setUp()
self.middleware = SubdomainURLRoutingMiddleware()
def test_url_routing(self):
def urlconf(subdomain):
"""
Returns the URLconf associated with this request.
"""
host = self.get_host_for_subdomain(subdomain)
request = RequestFactory().get('/', HTTP_HOST=host)
self.middleware.process_request(request)
return getattr(request, 'urlconf', None)
self.assertEqual(urlconf(None), self.get_path_to_urlconf('marketing'))
self.assertEqual(urlconf('www'), self.get_path_to_urlconf('marketing'))
self.assertEqual(urlconf('api'), self.get_path_to_urlconf('api'))
# Falls through to the actual ROOT_URLCONF.
self.assertEqual(urlconf('subdomain'), None)
def test_appends_slash(self):
for subdomain in (None, 'api', 'wildcard'):
host = self.get_host_for_subdomain(subdomain)
path = '/example' # No trailing slash.
response = self.client.get(path, HTTP_HOST=host)
self.assertEqual(response.status_code, 301)
# Whether the response's Location header contains the URL prefix
# here doesn't actually matter, since it will be considered
# relative to the request URL, which *did* include the HTTP Host
# header. To pave over inconsistencies between Django versions, we
# normalize them both to be prefixed with the requested host. (If a
# *different* base host is returned in the Location header, this
# should override our default base and error.)
normalize = functools.partial(
urlparse.urljoin,
'http://%s/' % (host,),
)
self.assertEqual(
normalize(response['Location']),
normalize(path + '/'),
)
class SubdomainURLReverseTestCase(SubdomainTestMixin, TestCase):
def test_url_join(self):
self.assertEqual(urljoin(self.DOMAIN), 'http://%s' % self.DOMAIN)
self.assertEqual(urljoin(self.DOMAIN, scheme='https'),
'https://%s' % self.DOMAIN)
with override_settings(DEFAULT_URL_SCHEME='https'):
self.assertEqual(urljoin(self.DOMAIN), 'https://%s' % self.DOMAIN)
self.assertEqual(urljoin(self.DOMAIN, path='/example/'),
'http://%s/example/' % self.DOMAIN)
def test_implicit_reverse(self):
# Uses settings.SUBDOMAIN_URLCONFS[None], if it exists.
# Otherwise would perform the same behavior as `test_wildcard_reverse`.
self.assertEqual(reverse('home'), 'http://%s/' % self.DOMAIN)
def test_explicit_reverse(self):
# Uses explicitly provided settings.SUBDOMAIN_URLCONF[subdomain]
self.assertEqual(reverse('home', subdomain='api'),
'http://api.%s/' % self.DOMAIN)
self.assertEqual(reverse('view', subdomain='api'),
'http://api.%s/view/' % self.DOMAIN)
def test_wildcard_reverse(self):
# Falls through to settings.ROOT_URLCONF
subdomain = 'wildcard'
self.assertEqual(reverse('home', subdomain),
'http://%s.%s/' % (subdomain, self.DOMAIN))
self.assertEqual(reverse('view', subdomain),
'http://%s.%s/view/' % (subdomain, self.DOMAIN))
def test_reverse_subdomain_mismatch(self):
self.assertRaises(NoReverseMatch, lambda: reverse('view'))
def test_reverse_invalid_urlconf_argument(self):
self.assertRaises(TypeError,
lambda: reverse('home',
urlconf=self.get_path_to_urlconf('marketing')))
def test_using_not_default_urlconf(self):
# Ensure that changing the currently active URLconf to something other
# than the default still resolves wildcard subdomains correctly.
set_urlconf(self.get_path_to_urlconf('api'))
subdomain = 'wildcard'
# This will raise NoReverseMatch if we're using the wrong URLconf for
# the provided subdomain.
self.assertEqual(reverse('application', subdomain=subdomain),
'http://%s.%s/application/' % (subdomain, self.DOMAIN))
class SubdomainTemplateTagTestCase(SubdomainTestMixin, TestCase):
def make_template(self, template):
return Template('{% load subdomainurls %}' + template)
def test_without_subdomain(self):
defaults = {'view': 'home'}
template = self.make_template('{% url view %}')
context = Context(defaults)
rendered = template.render(context).strip()
self.assertEqual(rendered, 'http://%s/' % self.DOMAIN)
def test_with_subdomain(self):
defaults = {'view': 'home'}
template = self.make_template('{% url view subdomain=subdomain %}')
for subdomain in ('www', 'api', 'wildcard'):
context = Context(dict(defaults, subdomain=subdomain))
rendered = template.render(context).strip()
self.assertEqual(rendered,
'http://%s.%s/' % (subdomain, self.DOMAIN))
def test_no_reverse(self):
template = self.make_template('{% url view subdomain=subdomain %}')
context = Context({'view': '__invalid__'})
self.assertRaises(NoReverseMatch, lambda: template.render(context))
def test_implied_subdomain_from_request(self):
template = self.make_template('{% url view %}')
defaults = {'view': 'home'}
request = mock.Mock()
request.subdomain = None
context = Context(dict(defaults, request=request))
rendered = template.render(context).strip()
self.assertEqual(rendered, 'http://%s/' % self.DOMAIN)
for subdomain in ('www', 'api', 'wildcard'):
request = mock.Mock()
request.subdomain = subdomain
context = Context(dict(defaults, request=request))
rendered = template.render(context).strip()
self.assertEqual(rendered,
'http://%s.%s/' % (subdomain, self.DOMAIN))
| |
from pprint import pprint
from django.conf import settings
from apps.reader.models import MUserStory
from apps.rss_feeds.models import Feed, MStory, MFeedPage
from apps.rss_feeds.models import MFeedIcon, FeedIcon
from apps.analyzer.models import MClassifierTitle, MClassifierAuthor, MClassifierFeed, MClassifierTag
import mongoengine, pymongo
import sys
from mongoengine.queryset import OperationError
from utils import json_functions as json
MONGO_DB = settings.MONGO_DB
db = mongoengine.connect(MONGO_DB['NAME'], host=MONGO_DB['HOST'], port=MONGO_DB['PORT'])
def bootstrap_stories():
print "Mongo DB stories: %s" % MStory.objects().count()
# db.stories.drop()
print "Dropped! Mongo DB stories: %s" % MStory.objects().count()
print "Stories: %s" % Story.objects.all().count()
pprint(db.stories.index_information())
feeds = Feed.objects.all().order_by('-average_stories_per_month')
feed_count = feeds.count()
i = 0
for feed in feeds:
i += 1
print "%s/%s: %s (%s stories)" % (i, feed_count,
feed, Story.objects.filter(story_feed=feed).count())
sys.stdout.flush()
stories = Story.objects.filter(story_feed=feed).values()
for story in stories:
# story['story_tags'] = [tag.name for tag in Tag.objects.filter(story=story['id'])]
try:
story['story_tags'] = json.decode(story['story_tags'])
except:
continue
del story['id']
del story['story_author_id']
try:
MStory(**story).save()
except:
continue
print "\nMongo DB stories: %s" % MStory.objects().count()
def bootstrap_userstories():
print "Mongo DB userstories: %s" % MUserStory.objects().count()
# db.userstories.drop()
print "Dropped! Mongo DB userstories: %s" % MUserStory.objects().count()
print "UserStories: %s" % UserStory.objects.all().count()
pprint(db.userstories.index_information())
userstories = UserStory.objects.all().values()
for userstory in userstories:
try:
story = Story.objects.get(pk=userstory['story_id'])
except Story.DoesNotExist:
continue
try:
userstory['story'] = MStory.objects(story_feed_id=story.story_feed.pk, story_guid=story.story_guid)[0]
except:
print '!',
continue
print '.',
del userstory['id']
del userstory['opinion']
del userstory['story_id']
try:
MUserStory(**userstory).save()
except:
print '\n\n!\n\n'
continue
print "\nMongo DB userstories: %s" % MUserStory.objects().count()
def bootstrap_classifiers():
for sql_classifier, mongo_classifier in ((ClassifierTitle, MClassifierTitle),
(ClassifierAuthor, MClassifierAuthor),
(ClassifierFeed, MClassifierFeed),
(ClassifierTag, MClassifierTag)):
collection = mongo_classifier.meta['collection']
print "Mongo DB classifiers: %s - %s" % (collection, mongo_classifier.objects().count())
# db[collection].drop()
print "Dropped! Mongo DB classifiers: %s - %s" % (collection, mongo_classifier.objects().count())
print "%s: %s" % (sql_classifier._meta.object_name, sql_classifier.objects.all().count())
pprint(db[collection].index_information())
for userclassifier in sql_classifier.objects.all().values():
del userclassifier['id']
if sql_classifier._meta.object_name == 'ClassifierAuthor':
author = StoryAuthor.objects.get(pk=userclassifier['author_id'])
userclassifier['author'] = author.author_name
del userclassifier['author_id']
if sql_classifier._meta.object_name == 'ClassifierTag':
tag = Tag.objects.get(pk=userclassifier['tag_id'])
userclassifier['tag'] = tag.name
del userclassifier['tag_id']
print '.',
try:
mongo_classifier(**userclassifier).save()
except:
print '\n\n!\n\n'
continue
print "\nMongo DB classifiers: %s - %s" % (collection, mongo_classifier.objects().count())
def bootstrap_feedpages():
print "Mongo DB feed_pages: %s" % MFeedPage.objects().count()
# db.feed_pages.drop()
print "Dropped! Mongo DB feed_pages: %s" % MFeedPage.objects().count()
print "FeedPages: %s" % FeedPage.objects.count()
pprint(db.feed_pages.index_information())
feeds = Feed.objects.all().order_by('-average_stories_per_month')
feed_count = feeds.count()
i = 0
for feed in feeds:
i += 1
print "%s/%s: %s" % (i, feed_count, feed,)
sys.stdout.flush()
if not MFeedPage.objects(feed_id=feed.pk):
feed_page = FeedPage.objects.filter(feed=feed).values()
if feed_page:
del feed_page[0]['id']
feed_page[0]['feed_id'] = feed.pk
try:
MFeedPage(**feed_page[0]).save()
except:
print '\n\n!\n\n'
continue
print "\nMongo DB feed_pages: %s" % MFeedPage.objects().count()
def bootstrap_feedicons():
print "Mongo DB feed_icons: %s" % MFeedIcon.objects().count()
db.feed_icons.drop()
print "Dropped! Mongo DB feed_icons: %s" % MFeedIcon.objects().count()
print "FeedIcons: %s" % FeedIcon.objects.count()
pprint(db.feed_icons.index_information())
feeds = Feed.objects.all().order_by('-average_stories_per_month')
feed_count = feeds.count()
i = 0
for feed in feeds:
i += 1
print "%s/%s: %s" % (i, feed_count, feed,)
sys.stdout.flush()
if not MFeedIcon.objects(feed_id=feed.pk):
feed_icon = FeedIcon.objects.filter(feed=feed).values()
if feed_icon:
try:
MFeedIcon(**feed_icon[0]).save()
except:
print '\n\n!\n\n'
continue
print "\nMongo DB feed_icons: %s" % MFeedIcon.objects().count()
def compress_stories():
count = MStory.objects().count()
print "Mongo DB stories: %s" % count
p = 0.0
i = 0
feeds = Feed.objects.all().order_by('-average_stories_per_month')
feed_count = feeds.count()
f = 0
for feed in feeds:
f += 1
print "%s/%s: %s" % (f, feed_count, feed,)
sys.stdout.flush()
for story in MStory.objects(story_feed_id=feed.pk):
i += 1.0
if round(i / count * 100) != p:
p = round(i / count * 100)
print '%s%%' % p
story.save()
def reindex_stories():
db = pymongo.Connection().newsblur
count = MStory.objects().count()
print "Mongo DB stories: %s" % count
p = 0.0
i = 0
feeds = Feed.objects.all().order_by('-average_stories_per_month')
feed_count = feeds.count()
f = 0
for feed in feeds:
f += 1
print "%s/%s: %s" % (f, feed_count, feed,)
sys.stdout.flush()
for story in MStory.objects(story_feed_id=feed.pk):
i += 1.0
if round(i / count * 100) != p:
p = round(i / count * 100)
print '%s%%' % p
if isinstance(story.id, unicode):
story.story_guid = story.id
story.id = pymongo.objectid.ObjectId()
try:
story.save()
except OperationError, e:
print " ***> OperationError: %s" % e
except e:
print ' ***> Unknown Error: %s' % e
db.stories.remove({"_id": story.story_guid})
if __name__ == '__main__':
# bootstrap_stories()
# bootstrap_userstories()
# bootstrap_classifiers()
# bootstrap_feedpages()
# compress_stories()
# reindex_stories()
bootstrap_feedicons()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""GCS file system implementation for accessing files on GCS."""
# pytype: skip-file
from typing import BinaryIO # pylint: disable=unused-import
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystem import CompressedFile
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystem import FileMetadata
from apache_beam.io.filesystem import FileSystem
from apache_beam.io.gcp import gcsio
__all__ = ['GCSFileSystem']
class GCSFileSystem(FileSystem):
"""A GCS ``FileSystem`` implementation for accessing files on GCS.
"""
CHUNK_SIZE = gcsio.MAX_BATCH_OPERATION_SIZE # Chuck size in batch operations
GCS_PREFIX = 'gs://'
@classmethod
def scheme(cls):
"""URI scheme for the FileSystem
"""
return 'gs'
def join(self, basepath, *paths):
"""Join two or more pathname components for the filesystem
Args:
basepath: string path of the first component of the path
paths: path components to be added
Returns: full path after combining all the passed components
"""
if not basepath.startswith(GCSFileSystem.GCS_PREFIX):
raise ValueError('Basepath %r must be GCS path.' % basepath)
path = basepath
for p in paths:
path = path.rstrip('/') + '/' + p.lstrip('/')
return path
def split(self, path):
"""Splits the given path into two parts.
Splits the path into a pair (head, tail) such that tail contains the last
component of the path and head contains everything up to that.
Head will include the GCS prefix ('gs://').
Args:
path: path as a string
Returns:
a pair of path components as strings.
"""
path = path.strip()
if not path.startswith(GCSFileSystem.GCS_PREFIX):
raise ValueError('Path %r must be GCS path.' % path)
prefix_len = len(GCSFileSystem.GCS_PREFIX)
last_sep = path[prefix_len:].rfind('/')
if last_sep >= 0:
last_sep += prefix_len
if last_sep > 0:
return (path[:last_sep], path[last_sep + 1:])
elif last_sep < 0:
return (path, '')
else:
raise ValueError('Invalid path: %s' % path)
def mkdirs(self, path):
"""Recursively create directories for the provided path.
Args:
path: string path of the directory structure that should be created
Raises:
IOError: if leaf directory already exists.
"""
pass
def has_dirs(self):
"""Whether this FileSystem supports directories."""
return False
def _list(self, dir_or_prefix):
"""List files in a location.
Listing is non-recursive, for filesystems that support directories.
Args:
dir_or_prefix: (string) A directory or location prefix (for filesystems
that don't have directories).
Returns:
Generator of ``FileMetadata`` objects.
Raises:
``BeamIOError``: if listing fails, but not if no files were found.
"""
try:
for path, size in gcsio.GcsIO().list_prefix(dir_or_prefix).items():
yield FileMetadata(path, size)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("List operation failed", {dir_or_prefix: e})
def _path_open(
self,
path,
mode,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
"""Helper functions to open a file in the provided mode.
"""
compression_type = FileSystem._get_compression_type(path, compression_type)
mime_type = CompressionTypes.mime_type(compression_type, mime_type)
raw_file = gcsio.GcsIO().open(path, mode, mime_type=mime_type)
if compression_type == CompressionTypes.UNCOMPRESSED:
return raw_file
return CompressedFile(raw_file, compression_type=compression_type)
def create(
self,
path,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
# type: (...) -> BinaryIO
"""Returns a write channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
"""
return self._path_open(path, 'wb', mime_type, compression_type)
def open(
self,
path,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
# type: (...) -> BinaryIO
"""Returns a read channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
"""
return self._path_open(path, 'rb', mime_type, compression_type)
def copy(self, source_file_names, destination_file_names):
"""Recursively copy the file tree from the source to the destination
Args:
source_file_names: list of source file objects that needs to be copied
destination_file_names: list of destination of the new object
Raises:
``BeamIOError``: if any of the copy operations fail
"""
err_msg = (
"source_file_names and destination_file_names should "
"be equal in length")
assert len(source_file_names) == len(destination_file_names), err_msg
def _copy_path(source, destination):
"""Recursively copy the file tree from the source to the destination
"""
if not destination.startswith(GCSFileSystem.GCS_PREFIX):
raise ValueError('Destination %r must be GCS path.' % destination)
# Use copy_tree if the path ends with / as it is a directory
if source.endswith('/'):
gcsio.GcsIO().copytree(source, destination)
else:
gcsio.GcsIO().copy(source, destination)
exceptions = {}
for source, destination in zip(source_file_names, destination_file_names):
try:
_copy_path(source, destination)
except Exception as e: # pylint: disable=broad-except
exceptions[(source, destination)] = e
if exceptions:
raise BeamIOError("Copy operation failed", exceptions)
def rename(self, source_file_names, destination_file_names):
"""Rename the files at the source list to the destination list.
Source and destination lists should be of the same size.
Args:
source_file_names: List of file paths that need to be moved
destination_file_names: List of destination_file_names for the files
Raises:
``BeamIOError``: if any of the rename operations fail
"""
err_msg = (
"source_file_names and destination_file_names should "
"be equal in length")
assert len(source_file_names) == len(destination_file_names), err_msg
gcs_batches = []
gcs_current_batch = []
for src, dest in zip(source_file_names, destination_file_names):
gcs_current_batch.append((src, dest))
if len(gcs_current_batch) == self.CHUNK_SIZE:
gcs_batches.append(gcs_current_batch)
gcs_current_batch = []
if gcs_current_batch:
gcs_batches.append(gcs_current_batch)
# Execute GCS renames if any and return exceptions.
exceptions = {}
for batch in gcs_batches:
copy_statuses = gcsio.GcsIO().copy_batch(batch)
copy_succeeded = []
for src, dest, exception in copy_statuses:
if exception:
exceptions[(src, dest)] = exception
else:
copy_succeeded.append((src, dest))
delete_batch = [src for src, dest in copy_succeeded]
delete_statuses = gcsio.GcsIO().delete_batch(delete_batch)
for i, (src, exception) in enumerate(delete_statuses):
dest = copy_succeeded[i][1]
if exception:
exceptions[(src, dest)] = exception
if exceptions:
raise BeamIOError("Rename operation failed", exceptions)
def exists(self, path):
"""Check if the provided path exists on the FileSystem.
Args:
path: string path that needs to be checked.
Returns: boolean flag indicating if path exists
"""
return gcsio.GcsIO().exists(path)
def size(self, path):
"""Get size of path on the FileSystem.
Args:
path: string path in question.
Returns: int size of path according to the FileSystem.
Raises:
``BeamIOError``: if path doesn't exist.
"""
return gcsio.GcsIO().size(path)
def last_updated(self, path):
"""Get UNIX Epoch time in seconds on the FileSystem.
Args:
path: string path of file.
Returns: float UNIX Epoch time
Raises:
``BeamIOError``: if path doesn't exist.
"""
return gcsio.GcsIO().last_updated(path)
def checksum(self, path):
"""Fetch checksum metadata of a file on the
:class:`~apache_beam.io.filesystem.FileSystem`.
Args:
path: string path of a file.
Returns: string containing checksum
Raises:
``BeamIOError``: if path isn't a file or doesn't exist.
"""
try:
return gcsio.GcsIO().checksum(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("Checksum operation failed", {path: e})
def delete(self, paths):
"""Deletes files or directories at the provided paths.
Directories will be deleted recursively.
Args:
paths: list of paths that give the file objects to be deleted
"""
def _delete_path(path):
"""Recursively delete the file or directory at the provided path.
"""
if path.endswith('/'):
path_to_use = path + '*'
else:
path_to_use = path
match_result = self.match([path_to_use])[0]
statuses = gcsio.GcsIO().delete_batch(
[m.path for m in match_result.metadata_list])
failures = [e for (_, e) in statuses if e is not None]
if failures:
raise failures[0]
exceptions = {}
for path in paths:
try:
_delete_path(path)
except Exception as e: # pylint: disable=broad-except
exceptions[path] = e
if exceptions:
raise BeamIOError("Delete operation failed", exceptions)
| |
"""
Note: this code is a Theano translation of the linesearch implemented in
scipy.optimize.linesearch
See :
https://github.com/scipy/scipy/blob/master/scipy/optimize/linesearch.py
"""
import theano
import theano.tensor as TT
from theano.ifelse import ifelse
from theano.sandbox.scan import scan
import numpy
one = TT.constant(numpy.asarray(1, dtype=theano.config.floatX))
zero = TT.constant(numpy.asarray(0, dtype=theano.config.floatX))
nan = TT.constant(numpy.asarray(numpy.nan, dtype=theano.config.floatX))
true = TT.constant(numpy.asarray(1, dtype='int8'))
false = TT.constant(numpy.asarray(0, dtype='int8'))
def lazy_or(name='none', *args):
"""
.. todo::
WRITEME
"""
def apply_me(args):
if len(args) == 1:
return args[0]
else:
rval = ifelse(args[0], true, apply_me(args[1:]),
name=name + str(len(args)))
return rval
return apply_me(args)
def lazy_and(name='node', *args):
"""
.. todo::
WRITEME
"""
def apply_me(args):
if len(args) == 1:
return args[0]
else:
rval = ifelse(TT.eq(args[0], zero), false, apply_me(args[1:]),
name=name + str(len(args)))
return rval
return apply_me(args)
def my_not(arg):
"""
.. todo::
WRITEME
"""
return TT.eq(arg, zero)
def constant(value):
"""
.. todo::
WRITEME
"""
return TT.constant(numpy.asarray(value, dtype=theano.config.floatX))
def scalar_armijo_search(phi, phi0, derphi0, c1=constant(1e-4),
n_iters=10, profile=0):
"""
.. todo::
WRITEME
"""
alpha0 = one
phi_a0 = phi(alpha0)
alpha1 = -(derphi0) * alpha0 ** 2 / 2.0 /\
(phi_a0 - phi0 - derphi0 * alpha0)
phi_a1 = phi(alpha1)
csol1 = phi_a0 <= phi0 + c1 * derphi0
csol2 = phi_a1 <= phi0 + c1 * alpha1 * derphi0
def armijo(alpha0, alpha1, phi_a0, phi_a1):
factor = alpha0 ** 2 * alpha1 ** 2 * (alpha1 - alpha0)
a = alpha0 ** 2 * (phi_a1 - phi0 - derphi0 * alpha1) - \
alpha1 ** 2 * (phi_a0 - phi0 - derphi0 * alpha0)
a = a / factor
b = -alpha0 ** 3 * (phi_a1 - phi0 - derphi0 * alpha1) + \
alpha1 ** 3 * (phi_a0 - phi0 - derphi0 * alpha0)
b = b / factor
alpha2 = (-b + TT.sqrt(abs(b ** 2 - 3 * a * derphi0))) / (3.0 * a)
phi_a2 = phi(alpha2)
end_condition = phi_a2 <= phi0 + c1 * alpha2 * derphi0
end_condition = TT.bitwise_or(
TT.isnan(alpha2), end_condition)
end_condition = TT.bitwise_or(
TT.isinf(alpha2), end_condition)
alpha2 = TT.switch(
TT.bitwise_or(alpha1 - alpha2 > alpha1 / constant(2.),
one - alpha2 / alpha1 < 0.96),
alpha1 / constant(2.),
alpha2)
return [alpha1, alpha2, phi_a1, phi_a2], \
theano.scan_module.until(end_condition)
states = []
states += [TT.unbroadcast(TT.shape_padleft(alpha0), 0)]
states += [TT.unbroadcast(TT.shape_padleft(alpha1), 0)]
states += [TT.unbroadcast(TT.shape_padleft(phi_a0), 0)]
states += [TT.unbroadcast(TT.shape_padleft(phi_a1), 0)]
# print 'armijo'
rvals, _ = scan(
armijo,
states=states,
n_steps=n_iters,
name='armijo',
mode=theano.Mode(linker='cvm'),
profile=profile)
sol_scan = rvals[1][0]
a_opt = ifelse(csol1, one,
ifelse(csol2, alpha1,
sol_scan))
score = ifelse(csol1, phi_a0,
ifelse(csol2, phi_a1,
rvals[2][0]))
return a_opt, score
def scalar_search_wolfe2(phi,
derphi,
phi0=None,
old_phi0=None,
derphi0=None,
n_iters=20,
c1=1e-4,
c2=0.9,
profile=False):
"""
Find alpha that satisfies strong Wolfe conditions.
alpha > 0 is assumed to be a descent direction.
Parameters
----------
phi : callable f(x)
Objective scalar function.
derphi : callable f'(x)
Objective function derivative (can be None)
phi0 : float, optional
Value of phi at s=0
old_phi0 : float, optional
Value of phi at previous point
derphi0 : float, optional
Value of derphi at s=0
c1 : float
Parameter for Armijo condition rule.
c2 : float
Parameter for curvature condition rule.
profile : flag (boolean)
True if you want printouts of profiling information
Returns
-------
alpha_star : float
Best alpha
phi_star : WRITEME
phi at alpha_star
phi0 : WRITEME
phi at 0
derphi_star : WRITEME
derphi at alpha_star
Notes
-----
Uses the line search algorithm to enforce strong Wolfe
conditions. See Wright and Nocedal, 'Numerical Optimization',
1999, pg. 59-60.
For the zoom phase it uses an algorithm by [...].
"""
if phi0 is None:
phi0 = phi(zero)
else:
phi0 = phi0
if derphi0 is None and derphi is not None:
derphi0 = derphi(zero)
else:
derphi0 = derphi0
alpha0 = zero
alpha0.name = 'alpha0'
if old_phi0 is not None:
alpha1 = TT.minimum(one,
numpy.asarray(1.01, dtype=theano.config.floatX) *
numpy.asarray(2, dtype=theano.config.floatX) * \
(phi0 - old_phi0) / derphi0)
else:
old_phi0 = nan
alpha1 = one
alpha1 = TT.switch(alpha1 < zero, one, alpha1)
alpha1.name = 'alpha1'
# This shouldn't happen. Perhaps the increment has slipped below
# machine precision? For now, set the return variables skip the
# useless while loop, and raise warnflag=2 due to possible imprecision.
phi0 = TT.switch(TT.eq(alpha1, zero), old_phi0, phi0)
# I need a lazyif for alpha1 == 0 !!!
phi_a1 = ifelse(TT.eq(alpha1, zero), phi0,
phi(alpha1), name='phi_a1')
phi_a1.name = 'phi_a1'
phi_a0 = phi0
phi_a0.name = 'phi_a0'
derphi_a0 = derphi0
derphi_a0.name = 'derphi_a0'
# Make sure variables are tensors otherwise strange things happen
c1 = TT.as_tensor_variable(c1)
c2 = TT.as_tensor_variable(c2)
maxiter = n_iters
def while_search(alpha0, alpha1, phi_a0, phi_a1, derphi_a0, i_t,
alpha_star, phi_star, derphi_star):
derphi_a1 = derphi(alpha1)
cond1 = TT.bitwise_or(phi_a1 > phi0 + c1 * alpha1 * derphi0,
TT.bitwise_and(phi_a1 >= phi_a0, i_t > zero))
cond2 = abs(derphi_a1) <= -c2 * derphi0
cond3 = derphi_a1 >= zero
alpha_star_c1, phi_star_c1, derphi_star_c1 = \
_zoom(alpha0, alpha1, phi_a0, phi_a1, derphi_a0,
phi, derphi, phi0, derphi0, c1, c2,
profile=profile)
alpha_star_c3, phi_star_c3, derphi_star_c3 = \
_zoom(alpha1, alpha0, phi_a1, phi_a0, derphi_a1, phi,
derphi, phi0, derphi0, c1, c2,
profile=profile)
nw_alpha1 = alpha1 * numpy.asarray(2, dtype=theano.config.floatX)
nw_phi = phi(nw_alpha1)
alpha_star, phi_star, derphi_star = \
ifelse(cond1,
(alpha_star_c1, phi_star_c1, derphi_star_c1),
ifelse(cond2,
(alpha1, phi_a1, derphi_a1),
ifelse(cond3,
(alpha_star_c3, phi_star_c3, derphi_star_c3),
(nw_alpha1, nw_phi, nan),
name='alphastar_c3'),
name='alphastar_c2'),
name='alphastar_c1')
return ([alpha1,
nw_alpha1,
phi_a1,
ifelse(lazy_or('allconds',
cond1,
cond2,
cond3),
phi_a1,
nw_phi,
name='nwphi1'),
ifelse(cond1, derphi_a0, derphi_a1, name='derphi'),
i_t + one,
alpha_star,
phi_star,
derphi_star],
theano.scan_module.scan_utils.until(
lazy_or('until_cond_',
TT.eq(nw_alpha1, zero),
cond1,
cond2,
cond3)))
states = []
states += [TT.unbroadcast(TT.shape_padleft(alpha0), 0)]
states += [TT.unbroadcast(TT.shape_padleft(alpha1), 0)]
states += [TT.unbroadcast(TT.shape_padleft(phi_a0), 0)]
states += [TT.unbroadcast(TT.shape_padleft(phi_a1), 0)]
states += [TT.unbroadcast(TT.shape_padleft(derphi_a0), 0)]
# i_t
states += [TT.unbroadcast(TT.shape_padleft(zero), 0)]
# alpha_star
states += [TT.unbroadcast(TT.shape_padleft(zero), 0)]
# phi_star
states += [TT.unbroadcast(TT.shape_padleft(zero), 0)]
# derphi_star
states += [TT.unbroadcast(TT.shape_padleft(zero), 0)]
# print 'while_search'
outs, updates = scan(while_search,
states=states,
n_steps=maxiter,
name='while_search',
mode=theano.Mode(linker='cvm_nogc'),
profile=profile)
# print 'done_while_search'
out3 = outs[-3][0]
out2 = outs[-2][0]
out1 = outs[-1][0]
alpha_star, phi_star, derphi_star = \
ifelse(TT.eq(alpha1, zero),
(nan, phi0, nan),
(out3, out2, out1), name='main_alphastar')
return alpha_star, phi_star, phi0, derphi_star
def _cubicmin(a, fa, fpa, b, fb, c, fc):
"""
Finds the minimizer for a cubic polynomial that goes through the
points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.
If no minimizer can be found return None
Parameters
----------
a : WRITEME
fa : WRITEME
fpa : WRITEME
b : WRITEME
fb : WRITEME
c : WRITEME
fc : WRITEME
Returns
-------
WRITEME
"""
# f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D
a.name = 'a'
fa.name = 'fa'
fpa.name = 'fpa'
fb.name = 'fb'
fc.name = 'fc'
C = fpa
D = fa
db = b - a
dc = c - a
denom = (db * dc) ** 2 * (db - dc)
d1_00 = dc ** 2
d1_01 = -db ** 2
d1_10 = -dc ** 3
d1_11 = db ** 3
t1_0 = fb - fa - C * db
t1_1 = fc - fa - C * dc
A = d1_00 * t1_0 + d1_01 * t1_1
B = d1_10 * t1_0 + d1_11 * t1_1
A /= denom
B /= denom
radical = B * B - 3 * A * C
radical.name = 'radical'
db.name = 'db'
dc.name = 'dc'
b.name = 'b'
c.name = 'c'
A.name = 'A'
#cond = TT.bitwise_or(radical < zero,
# TT.bitwise_or(TT.eq(db,zero),
# TT.bitwise_or(TT.eq(dc,zero),
# TT.bitwise_or(TT.eq(b, c),
# TT.eq(A, zero)))))
cond = lazy_or('cubicmin',
radical < zero,
TT.eq(db, zero),
TT.eq(dc, zero),
TT.eq(b, c),
TT.eq(A, zero))
# Note: `lazy if` would make more sense, but it is not
# implemented in C right now
xmin = TT.switch(cond, constant(numpy.nan),
a + (-B + TT.sqrt(radical)) / (3 * A))
return xmin
def _quadmin(a, fa, fpa, b, fb):
"""
Finds the minimizer for a quadratic polynomial that goes through
the points (a,fa), (b,fb) with derivative at a of fpa.
Parameters
----------
a : WRITEME
fa : WRITEME
fpa : WRITEME
b : WRITEME
fb : WRITEME
Returns
-------
WRITEME
"""
# f(x) = B*(x-a)^2 + C*(x-a) + D
D = fa
C = fpa
db = b - a * one
B = (fb - D - C * db) / (db * db)
# Note : `lazy if` would make more sense, but it is not
# implemented in C right now
# lazy_or('quadmin',TT.eq(db , zero), (B <= zero)),
# xmin = TT.switch(TT.bitwise_or(TT.eq(db,zero), B <= zero),
xmin = TT.switch(lazy_or(TT.eq(db, zero), B <= zero),
nan,
a - C /\
(numpy.asarray(2, dtype=theano.config.floatX) * B))
return xmin
def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo,
phi, derphi, phi0, derphi0, c1, c2,
n_iters=10,
profile=False):
"""
WRITEME
Part of the optimization algorithm in `scalar_search_wolfe2`.
Parameters
----------
a_lo : float
Step size
a_hi : float
Step size
phi_lo : float
Value of f at a_lo
phi_hi : float
Value of f at a_hi
derphi_lo : float
Value of derivative at a_lo
phi : callable
Generates computational graph
derphi : callable
Generates computational graph
phi0 : float
Value of f at 0
derphi0 : float
Value of the derivative at 0
c1 : float
Wolfe parameter
c2 : float
Wolfe parameter
profile : bool
True if you want printouts of profiling information
"""
# Function reprensenting the computations of one step of the while loop
def while_zoom(phi_rec, a_rec, a_lo, a_hi, phi_hi,
phi_lo, derphi_lo, a_star, val_star, valprime):
# interpolate to find a trial step length between a_lo and
# a_hi Need to choose interpolation here. Use cubic
# interpolation and then if the result is within delta *
# dalpha or outside of the interval bounded by a_lo or a_hi
# then use quadratic interpolation, if the result is still too
# close, then use bisection
dalpha = a_hi - a_lo
a = TT.switch(dalpha < zero, a_hi, a_lo)
b = TT.switch(dalpha < zero, a_lo, a_hi)
# minimizer of cubic interpolant
# (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
#
# if the result is too close to the end points (or out of the
# interval) then use quadratic interpolation with phi_lo,
# derphi_lo and phi_hi if the result is stil too close to the
# end points (or out of the interval) then use bisection
# cubic interpolation
cchk = delta1 * dalpha
a_j_cubic = _cubicmin(a_lo, phi_lo, derphi_lo,
a_hi, phi_hi, a_rec, phi_rec)
# quadric interpolation
qchk = delta2 * dalpha
a_j_quad = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
cond_q = lazy_or('condq',
TT.isnan(a_j_quad),
a_j_quad > b - qchk,
a_j_quad < a + qchk)
a_j_quad = TT.switch(cond_q, a_lo +
numpy.asarray(0.5, dtype=theano.config.floatX) * \
dalpha, a_j_quad)
# pick between the two ..
cond_c = lazy_or('condc',
TT.isnan(a_j_cubic),
TT.bitwise_or(a_j_cubic > b - cchk,
a_j_cubic < a + cchk))
# this lazy if actually decides if we need to run the quadric
# interpolation
a_j = TT.switch(cond_c, a_j_quad, a_j_cubic)
#a_j = ifelse(cond_c, a_j_quad, a_j_cubic)
# Check new value of a_j
phi_aj = phi(a_j)
derphi_aj = derphi(a_j)
stop = lazy_and('stop',
TT.bitwise_and(phi_aj <= phi0 + c1 * a_j * derphi0,
phi_aj < phi_lo),
abs(derphi_aj) <= -c2 * derphi0)
cond1 = TT.bitwise_or(phi_aj > phi0 + c1 * a_j * derphi0,
phi_aj >= phi_lo)
cond2 = derphi_aj * (a_hi - a_lo) >= zero
# Switches just make more sense here because they have a C
# implementation and they get composed
phi_rec = ifelse(cond1,
phi_hi,
TT.switch(cond2, phi_hi, phi_lo),
name='phi_rec')
a_rec = ifelse(cond1,
a_hi,
TT.switch(cond2, a_hi, a_lo),
name='a_rec')
a_hi = ifelse(cond1, a_j,
TT.switch(cond2, a_lo, a_hi),
name='a_hi')
phi_hi = ifelse(cond1, phi_aj,
TT.switch(cond2, phi_lo, phi_hi),
name='phi_hi')
a_lo = TT.switch(cond1, a_lo, a_j)
phi_lo = TT.switch(cond1, phi_lo, phi_aj)
derphi_lo = ifelse(cond1, derphi_lo, derphi_aj, name='derphi_lo')
a_star = a_j
val_star = phi_aj
valprime = ifelse(cond1, nan,
TT.switch(cond2, derphi_aj, nan), name='valprime')
return ([phi_rec,
a_rec,
a_lo,
a_hi,
phi_hi,
phi_lo,
derphi_lo,
a_star,
val_star,
valprime],
theano.scan_module.scan_utils.until(stop))
maxiter = n_iters
# cubic interpolant check
delta1 = TT.constant(numpy.asarray(0.2,
dtype=theano.config.floatX))
# quadratic interpolant check
delta2 = TT.constant(numpy.asarray(0.1,
dtype=theano.config.floatX))
phi_rec = phi0
a_rec = zero
# Initial iteration
dalpha = a_hi - a_lo
a = TT.switch(dalpha < zero, a_hi, a_lo)
b = TT.switch(dalpha < zero, a_lo, a_hi)
#a = ifelse(dalpha < 0, a_hi, a_lo)
#b = ifelse(dalpha < 0, a_lo, a_hi)
# minimizer of cubic interpolant
# (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
#
# if the result is too close to the end points (or out of the
# interval) then use quadratic interpolation with phi_lo,
# derphi_lo and phi_hi if the result is stil too close to the
# end points (or out of the interval) then use bisection
# quadric interpolation
qchk = delta2 * dalpha
a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
cond_q = lazy_or('mcond_q',
TT.isnan(a_j),
TT.bitwise_or(a_j > b - qchk,
a_j < a + qchk))
a_j = TT.switch(cond_q, a_lo +
numpy.asarray(0.5, dtype=theano.config.floatX) * \
dalpha, a_j)
# Check new value of a_j
phi_aj = phi(a_j)
derphi_aj = derphi(a_j)
cond1 = TT.bitwise_or(phi_aj > phi0 + c1 * a_j * derphi0,
phi_aj >= phi_lo)
cond2 = derphi_aj * (a_hi - a_lo) >= zero
# Switches just make more sense here because they have a C
# implementation and they get composed
phi_rec = ifelse(cond1,
phi_hi,
TT.switch(cond2, phi_hi, phi_lo),
name='mphirec')
a_rec = ifelse(cond1,
a_hi,
TT.switch(cond2, a_hi, a_lo),
name='marec')
a_hi = ifelse(cond1,
a_j,
TT.switch(cond2, a_lo, a_hi),
name='mahi')
phi_hi = ifelse(cond1,
phi_aj,
TT.switch(cond2, phi_lo, phi_hi),
name='mphihi')
onlyif = lazy_and('only_if',
TT.bitwise_and(phi_aj <= phi0 + c1 * a_j * derphi0,
phi_aj < phi_lo),
abs(derphi_aj) <= -c2 * derphi0)
a_lo = TT.switch(cond1, a_lo, a_j)
phi_lo = TT.switch(cond1, phi_lo, phi_aj)
derphi_lo = ifelse(cond1, derphi_lo, derphi_aj, name='derphi_lo_main')
phi_rec.name = 'phi_rec'
a_rec.name = 'a_rec'
a_lo.name = 'a_lo'
a_hi.name = 'a_hi'
phi_hi.name = 'phi_hi'
phi_lo.name = 'phi_lo'
derphi_lo.name = 'derphi_lo'
vderphi_aj = ifelse(cond1, nan, TT.switch(cond2, derphi_aj, nan),
name='vderphi_aj')
states = []
states += [TT.unbroadcast(TT.shape_padleft(phi_rec), 0)]
states += [TT.unbroadcast(TT.shape_padleft(a_rec), 0)]
states += [TT.unbroadcast(TT.shape_padleft(a_lo), 0)]
states += [TT.unbroadcast(TT.shape_padleft(a_hi), 0)]
states += [TT.unbroadcast(TT.shape_padleft(phi_hi), 0)]
states += [TT.unbroadcast(TT.shape_padleft(phi_lo), 0)]
states += [TT.unbroadcast(TT.shape_padleft(derphi_lo), 0)]
states += [TT.unbroadcast(TT.shape_padleft(zero), 0)]
states += [TT.unbroadcast(TT.shape_padleft(zero), 0)]
states += [TT.unbroadcast(TT.shape_padleft(zero), 0)]
# print'while_zoom'
outs, updates = scan(while_zoom,
states=states,
n_steps=maxiter,
name='while_zoom',
mode=theano.Mode(linker='cvm_nogc'),
profile=profile)
# print 'done_while'
a_star = ifelse(onlyif, a_j, outs[7][0], name='astar')
val_star = ifelse(onlyif, phi_aj, outs[8][0], name='valstar')
valprime = ifelse(onlyif, vderphi_aj, outs[9][0], name='valprime')
## WARNING !! I ignore updates given by scan which I should not do !!!
return a_star, val_star, valprime
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import curses
import sys
from gotask import GoTask
class Ui:
def __init__(self):
self.gotask = GoTask()
self.screen = curses.initscr()
curses.start_color()
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)
self.screen.keypad(1)
def build_tasklists(self, list_id=None, start=0, offset=(2, 4)):
"""Ui for displaying all user's tasklists
"""
curses.curs_set(0)
curses.noecho()
self.tasklists = self.gotask.list_tasklists()
nb_tasklists = len(self.tasklists)
if list_id is not None:
for i in range(nb_tasklists):
if list_id == self.tasklists[i]['id']:
start = i
opt = start
offset_y, offset_x = offset
select = -1
while select < 0:
self.screen.clear()
self.screen.addstr(offset_y, offset_x, 'Term - Google Task')
self.screen.addstr(offset_y+2, offset_x, 'Please select one tasklist for more details...', curses.A_BOLD)
for i in range(nb_tasklists):
if i == opt:
self.screen.addstr(offset_y+i+4, offset_x, '-> ' + str(i+1) + '. ' + self.tasklists[i]['title'], curses.color_pair(1))
else:
self.screen.addstr(offset_y+i+4, offset_x+3, str(i+1) + '. ' + self.tasklists[i]['title'])
self.screen.addstr(offset_y+nb_tasklists+4, offset_x,
'(<Enter>: watch list, <r>: refresh list, <u>: update list name, <d>: delete list, <n>: new list..., <q>: quit)', curses.color_pair(2))
self.screen.refresh()
q = self.screen.getch()
if q == curses.KEY_UP or q == ord('k'): # KEY_UP or 'k' on vi/vim mode
opt = (opt - 1) % nb_tasklists
elif q == curses.KEY_DOWN or q == ord('j'): # KEY_DOWN or 'j' on vi/vim mode
opt = (opt + 1) % nb_tasklists
elif q == ord('\n'): # Watch a tasklist
self.build_tasks(opt)
elif q == ord('n'): # New a tasklist
self.new_tasklist(opt)
elif q == ord('u'): # Update the selected tasklist name
self.rename_tasklist(opt)
elif q == ord('r'): # Refresh lists
self.build_tasklists()
elif q == ord('d'): # Delete the selected tasklist
tasklist_id = self.tasklists[opt]['id']
self.gotask.del_tasklist(tasklist_id)
self.build_tasklists()
elif q == ord('q'):
self.quit()
curses.endwin()
def build_tasks(self, list_num_selected, start=0, offset=(2, 4)):
"""Ui for displaying all tasks of the selected tasklist
"""
offset_y, offset_x = offset
select = -1
curses.curs_set(0)
curses.noecho()
tasklist_id = self.tasklists[list_num_selected]['id']
tasklist_title = self.tasklists[list_num_selected]['title']
tasks = self.gotask.list_tasks(tasklist_id)
nb_tasks = len(tasks)
opt = start
while select < 0:
self.screen.clear()
self.screen.addstr(offset_y, offset_x, 'Term - Google Task')
self.screen.addstr(offset_y + 2, offset_x, 'Tasks of tasklist - ' + tasklist_title, curses.A_BOLD)
if nb_tasks == 0:
self.screen.addstr(offset_y + 4, offset_x, 'Sorry. The list is empty')
self.screen.addstr(offset_y + 5, offset_x, '(<n>: new task, <b>: back to lists, <q>: quit)', curses.color_pair(2))
else:
delta = 0
for i in range(nb_tasks):
info = '';
if 'due' in tasks[i]:
info = info + ' [due]'
if 'notes' in tasks[i]:
info = info + ' [notes]'
info = info + ' [' + tasks[i]['status'] + ']'
if 'parent' in tasks[i]:
if tasks[i]['parent'] == tasks[i-1]['id'] and 'parent' not in tasks[i-1]:
delta = 1
if tasks[i]['parent'] != tasks[i-1]['id'] and 'parent' in tasks[i-1] and tasks[i]['parent'] != tasks[i-1]['parent']:
delta = 1
if tasks[i]['parent'] == tasks[i-1]['id'] and 'parent' in tasks[i-1]:
delta = 2
else:
delta = 0
if i == opt:
self.screen.addstr(offset_y + i + 4, offset_x + delta*2, '-> ' + str(i+1) + '. ' + tasks[i]['title'] + info, curses.color_pair(1))
else:
self.screen.addstr(offset_y + i + 4, offset_x + delta*2 + 3, str(i+1) + '. ' + tasks[i]['title'] + info)
self.screen.addstr(offset_y + nb_tasks + 4, offset_x,
'(<Enter>: watch task, <n>: new task, <m>: mark as completed, <u>: unmark as completed, <w>: move up task, <s>: move down task, <e>: edit task, <c>: clear task, <d>: delete task, <b>: back to lists, <q>: quit)', curses.color_pair(2))
self.screen.refresh()
q = self.screen.getch()
if nb_tasks > 0:
if q == curses.KEY_DOWN or q == ord('j'):
opt = (opt + 1) % nb_tasks
elif q == curses.KEY_UP or q == ord('k'):
opt = (opt - 1) % nb_tasks
elif q == ord('\n'):
self.build_task(tasks[opt], opt, list_num_selected)
elif q == ord('d'):
self.gotask.del_task(tasklist_id, tasks[opt]['id'])
self.build_tasks(list_num_selected)
elif q == ord('m'):
self.gotask.complete_task(tasklist_id, tasks[opt])
self.build_tasks(list_num_selected, opt)
elif q == ord('u'):
self.gotask.uncomplete_task(tasklist_id, tasks[opt])
self.build_tasks(list_num_selected, opt)
elif q == ord('w'):
self.move_up_task(tasks, opt, list_num_selected)
elif q == ord('s'):
self.move_down_task(tasks, opt, list_num_selected)
elif q == ord('u'):
self.edit_task(tasks[opt], opt, list_num_selected)
elif q == ord('c'):
self.gotask.clear_task(tasklist_id)
self.build_tasks(list_num_selected)
if q == ord('n'):
self.new_task(list_num_selected)
elif q == ord('b'):
self.build_tasklists(None, list_num_selected)
elif q == ord('q'):
self.quit()
def move_up_task(self, tasks, task_num_selected, list_num_selected):
if task_num_selected == 0:
pass
elif task_num_selected == 1:
self.gotask.move_task(self.tasklists[list_num_selected]['id'], tasks[task_num_selected]['id'])
self.build_tasks(list_num_selected)
else:
self.gotask.move_task(self.tasklists[list_num_selected]['id'], tasks[task_num_selected]['id'], tasks[task_num_selected-2]['id'])
self.build_tasks(list_num_selected, task_num_selected-1)
def move_down_task(self, tasks, task_num_selected, list_num_selected):
if task_num_selected == len(tasks) - 1:
pass
else:
self.gotask.move_task(self.tasklists[list_num_selected]['id'], tasks[task_num_selected]['id'], tasks[task_num_selected+1]['id'])
self.build_tasks(list_num_selected, task_num_selected+1)
def build_task(self, task, task_num_selected, list_num_selected, offset=(2, 4)):
"""Ui for displaying the task details
"""
offset_y, offset_x = offset
key = -1
temp = dict()
while key < 0:
self.screen.clear()
self.screen.addstr(offset_y, offset_x, 'Term - Google Task')
self.screen.addstr(offset_y + 2, offset_x, 'Task details', curses.A_BOLD)
if task['title'] == '':
temp['title'] = '<empty>'
else:
temp['title'] = task['title']
if 'due' not in task:
temp['due'] = '<empty>'
else:
temp['due'] = task['due']
if 'notes' not in task:
temp['notes'] = '<empty>'
else:
temp['notes'] = task['notes']
self.screen.addstr(offset_y + 4, offset_x, 'Title: ' + temp['title'])
self.screen.addstr(offset_y + 5, offset_x, 'Due to: ' + temp['due'])
self.screen.addstr(offset_y + 6, offset_x, 'Notes: ' + temp['notes'])
if task['status'] == 'completed':
self.screen.addstr(offset_y + 7, offset_x, 'Status: ' + task['status'] + ' (' + task['completed'] + ')', curses.color_pair(3))
else:
self.screen.addstr(offset_y + 7, offset_x, 'Status: ' + task['status'])
self.screen.addstr(offset_y + 8, offset_x, '(<m>: mark as completed, <u>: unmark as completed, <e>: edit task, <b>: back to list, <q>: quit)', curses.color_pair(2))
self.screen.refresh()
q = self.screen.getch()
if q == ord('m'):
tasklist_id = self.tasklists[list_num_selected]['id']
task = self.gotask.complete_task(tasklist_id, task)
self.build_task(task, task_num_selected, list_num_selected)
elif q == ord('u'):
tasklist_id = self.tasklists[list_num_selected]['id']
task = self.gotask.uncomplete_task(tasklist_id, task)
self.build_task(task, task_num_selected, list_num_selected)
elif q == ord('e'):
self.edit_task(task, task_num_selected, list_num_selected)
elif q == ord('b'):
self.build_tasks(list_num_selected, task_num_selected)
elif q == ord('q'):
self.quit()
def rename_tasklist(self, select, offset=(2, 4)):
"""Ui for renaming a tasklist
"""
offset_y, offset_x = offset
curses.curs_set(1)
curses.echo()
self.screen.clear()
self.screen.addstr(offset_y, offset_x, 'Term - Google Task')
self.screen.addstr(offset_y+2, offset_x, 'Please rename the tasklist. Press <Enter> to return.', curses.A_BOLD)
self.screen.addstr(offset_y+4, offset_x, 'old list name: ' + self.tasklists[select]['title'])
self.screen.addstr(offset_y+5, offset_x, 'new list name: ')
self.screen.refresh()
new_title = self.screen.getstr()
if new_title != '':
self.gotask.rename_tasklist(self.tasklists[select], new_title)
self.build_tasklists(None, select)
def new_tasklist(self, select, offset=(2, 4)):
"""Ui for creating a new tasklist
"""
offset_y, offset_x = offset
curses.curs_set(1)
curses.echo()
self.screen.clear()
self.screen.addstr(offset_y, offset_x, 'Term - Google Task')
self.screen.addstr(offset_y + 2, offset_x, 'Please give a name to the new list. Press <Enter> to return.', curses.A_BOLD)
self.screen.addstr(offset_y + 4, offset_x, 'new list name: ')
self.screen.refresh()
new_title = self.screen.getstr()
new_tasklist_id = None
if new_title != '':
new_tasklist_id = self.gotask.new_tasklist(new_title)
self.build_tasklists(new_tasklist_id, select)
def new_task(self, list_num_selected, offset=(2, 4)):
"""Ui for creating a new task
"""
offset_y, offset_x = offset
curses.curs_set(1)
curses.echo()
opt = 0
title = ''
due_to = ''
notes = ''
while opt < 3:
self.screen.clear()
self.screen.addstr(offset_y, offset_x, 'Term - Google Task')
self.screen.addstr(offset_y + 2, offset_x, 'Create a new task', curses.A_BOLD)
self.screen.addstr(offset_y + 4, offset_x, 'Title: ' + title)
self.screen.addstr(offset_y + 5, offset_x, 'Due to (YYYY-MM-DD): ' + due_to)
self.screen.addstr(offset_y + 6, offset_x, 'Notes: ' + notes)
self.screen.refresh()
if opt == 0:
title = self.screen.getstr(offset_y + 4, offset_x + 7)
elif opt == 1:
due_to = self.screen.getstr(offset_y + 5, offset_x + 21)
elif opt == 2:
notes = self.screen.getstr(offset_y + 6, offset_x + 7)
opt += 1
tasklist_id = self.tasklists[list_num_selected]['id']
task = dict()
task['title'] = title
if due_to != '':
task['due'] = due_to + 'T12:00:00.000Z'
if notes != '':
task['notes'] = notes
self.gotask.new_task(tasklist_id, task)
self.build_tasks(list_num_selected)
def edit_task(self, task, task_num_selected, list_num_selected, offset=(2, 4)):
"""Ui for updating task
"""
offset_y, offset_x = offset
curses.curs_set(1)
curses.echo()
temp = dict()
opt = 0
title = ''
due_to = ''
notes = ''
while opt < 3:
self.screen.clear()
self.screen.addstr(offset_y, offset_x, 'Term - Google Task')
self.screen.addstr(offset_y + 2, offset_x, 'Modify task', curses.A_BOLD)
if task['title'] == '':
temp['title'] = '<empty>'
else:
temp['title'] = task['title']
if 'due' not in task:
temp['due'] = '<empty>'
else:
temp['due'] = task['due']
if 'notes' not in task:
temp['notes'] = '<empty>'
else:
temp['notes'] = task['notes']
self.screen.addstr(offset_y + 4, offset_x, 'Title: ' + temp['title'])
self.screen.addstr(offset_y + 5, offset_x, 'Due to: ' + temp['due'])
self.screen.addstr(offset_y + 6, offset_x, 'Notes: ' + temp['notes'])
self.screen.addstr(offset_y + 8, offset_x, 'Title: ' + title)
self.screen.addstr(offset_y + 9, offset_x, 'Due to (YYYY-MM-DD): ' + due_to)
self.screen.addstr(offset_y + 10, offset_x, 'Notes: ' + notes)
self.screen.refresh()
if opt == 0:
title = self.screen.getstr(offset_y + 8, offset_x + 7)
elif opt == 1:
due_to = self.screen.getstr(offset_y + 9, offset_x + 21)
elif opt == 2:
notes = self.screen.getstr(offset_y + 10, offset_x + 7)
opt += 1
tasklist_id = self.tasklists[list_num_selected]['id']
task['title'] = title
if due_to != '':
task['due'] = due_to + 'T12:00:00.000Z'
else:
task.pop('due', None)
if notes != '':
task['notes'] = notes
else:
task.pop('notes', None)
self.gotask.update_task(tasklist_id, task)
self.build_task(task, task_num_selected, list_num_selected)
def quit(self):
curses.endwin()
sys.exit(0)
if __name__ == '__main__':
ui = Ui()
ui.build_tasklists()
| |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2013-2015 thewizardplusplus <thewizardplusplus@yandex.ru>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
bl_info = {
"name": "Export to Anna Object (*.ao)",
"author": "thewizardplusplus",
"version": (1, 0, 0),
"blender": (2, 67, 0),
"location": "File > Export > Anna Object (*.ao)",
"description": "Export mesh objects to Anna Object (*.ao)",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Import-Export"}
import bpy
import mathutils
from bpy_extras.io_utils import ExportHelper
from bpy.props import *
import time
import platform
import math
class InvalidObjectDataError(Exception):
pass
class ErrorDialogOperator(bpy.types.Operator):
bl_idname = "error.dialog"
bl_label = "Error!"
message = StringProperty()
def execute(self, context):
self.report({ "INFO" }, self.message)
return { "FINISHED" }
def invoke(self, context, event):
return context.window_manager.invoke_popup(self, width = 320, height = \
240)
def draw(self, context):
layout = self.layout
layout.label(text = "Error!", icon = "ERROR")
layout.label(self.message)
class Key:
index_of_mesh = None
frame = None
type = None
transformation = None
def __init__(self, index_of_mesh, frame, type, transformation):
self.index_of_mesh = index_of_mesh
self.frame = frame
self.type = type
self.transformation = transformation
class AnnaObjectExport(bpy.types.Operator, ExportHelper):
""" Export mesh objects to Anna Object (*.ao). """
bl_idname = "export_mesh.ao"
bl_label = "Anna Object (*.ao)"
filename_ext = ".ao"
selected_only = BoolProperty(name = "Selected only", description = \
"Export only selected objects", default = False)
export_animation = BoolProperty(name = "Export animation", description = \
"Export animation data", default = True)
def execute(self, context):
print("\nStart export mesh objects to Anna Object (*.ao)...")
start_time = time.time()
filepath = bpy.path.ensure_ext(self.filepath, self.filename_ext)
exported = self._export(context, filepath)
if exported:
print("Export finished in {0} seconds to \"{1}\".\n".format(time. \
time() - start_time, filepath))
else:
print("Export failed.\n")
return { "FINISHED" }
def invoke(self, context, event):
wm = context.window_manager
if True:
# file selector (will run self.execute())
wm.fileselect_add(self)
return { "RUNNING_MODAL" }
elif True:
# search the enum
wm.invoke_search_popup(self)
return { "RUNNING_MODAL" }
elif False:
# redo popup
return wm.invoke_props_popup(self, event)
elif False:
return self.execute(context)
def _export(self, context, filepath):
description = ""
try:
scene = context.scene
current_frame = scene.frame_current
scene.frame_set(scene.frame_start)
animation_keys = []
number_of_object = 0
objects = []
if not self.properties.selected_only:
objects = bpy.data.objects;
else:
objects = context.selected_objects;
for object in [object for object in objects if object.type == \
"MESH"]:
object_description, object_animation_keys = self. \
_getObjectData(context, object, number_of_object)
description += object_description
animation_keys.extend(object_animation_keys)
number_of_object += 1
scene.frame_set(current_frame)
begin_of_description = "object:\n"
begin_of_description += "\tmeshes:\n"
begin_of_description += "\t\tnumber: " + str(number_of_object) \
+ "\n"
description = begin_of_description + description
end_of_description = "\tanimation_keys:\n"
end_of_description += "\t\tnumber: " + str(len(animation_keys)) \
+ "\n"
for key in animation_keys:
end_of_description += "\t\tkey:\n"
end_of_description += "\t\t\tindex_of_mesh: " + str(key. \
index_of_mesh) + "\n"
end_of_description += "\t\t\tframe: " + str(key.frame) + "\n"
end_of_description += "\t\t\ttype: " + str(key.type) + "\n"
end_of_description += "\t\t\ttransformation: " + str(key. \
transformation.x) + " " + str(key.transformation.y) + \
" " + str(key.transformation.z) + "\n"
description += end_of_description
except InvalidObjectDataError as exception:
message = str(exception)
print(message)
message = message.replace("\tError: ", "")
message = message.capitalize()
bpy.ops.error.dialog("INVOKE_DEFAULT", message = message)
return False
file = open(filepath, "w")
file.write(description)
file.close()
return True
def _getObjectData(self, context, object, number_of_object):
name = object.name
if object.rotation_euler.order != "XYZ":
raise InvalidObjectDataError("\tError: invalid rotation order of " \
+ "the object \"" + name + "\" (must be \"XYZ\").")
material = object.active_material
if not material:
raise InvalidObjectDataError("\tError: the object \"" + name + \
"\" hasn't material.")
use_transparency = material.use_transparency
transparency_method = material.transparency_method
if use_transparency and (transparency_method != "MASK" and \
transparency_method != "Z_TRANSPARENCY"):
raise InvalidObjectDataError("\tError: the object \"" + name + \
"\" has invalid transparency type (must by \"Mask\" or \"Z " + \
"Transparency\").")
mesh = object.data
if not mesh.uv_textures:
raise InvalidObjectDataError("\tError: the object \"" + name + \
"\" hasn't uv-data.")
object_transformations = object.matrix_world
description = ""
description += "\t\tmesh:\n"
position = object_transformations.to_translation()
description += "\t\t\tposition: " + str(position.x) + " " + str( \
position.y) + " " + str(position.z) + "\n"
rotation = mathutils.Vector((0.0, 0.0, 0.0))
euler_angles = object_transformations.to_euler("XYZ")
rotation.x = math.degrees(euler_angles.x)
rotation.y = math.degrees(euler_angles.y)
rotation.z = math.degrees(euler_angles.z)
description += "\t\t\trotation: " + str(rotation.x) + " " + str( \
rotation.y) + " " + str(rotation.z) + "\n"
scale = object_transformations.to_scale()
description += "\t\t\tscale: " + str(scale.x) + " " + str(scale.y) + \
" " + str(scale.z) + "\n"
texture_slots = material.texture_slots
image_textures = [texture_slots[key].texture for key in texture_slots. \
keys() if texture_slots[key].texture.type == "IMAGE"]
image_files = [texture.image.filepath for texture in image_textures if \
getattr(texture.image, "source", "") == "FILE"]
if not image_files:
raise InvalidObjectDataError("\tError: the object \"" + name + \
"\" hasn't image texture.")
texture = image_files[0].replace("//", "")
description += "\t\t\tmaterial:\n"
description += "\t\t\t\ttexture: " + texture + "\n"
description += "\t\t\t\ttransparency_type: " + ("NONE" if not \
use_transparency else "ALPHA_TEST" if transparency_method == \
"MASK" else "BLENDING") + "\n"
description += "\t\t\t\tallow_ambient_light: " + str(material.ambient \
== 1.0).lower() + "\n"
description += "\t\t\t\tallow_fog: " + str(material.use_mist).lower() \
+ "\n"
vertices = []
for polygon in mesh.polygons:
positions = []
for position in [mesh.vertices[index] for index in polygon. \
vertices]:
positions.append(position.co)
uvs = []
for uv in [mesh.uv_layers.active.data[index] for index in polygon. \
loop_indices]:
uvs.append(uv.uv)
zipped = list(zip(positions, uvs))
if len(zipped) == 4:
zipped = [zipped[0], zipped[1], zipped[2], zipped[2], zipped[ \
3], zipped[0]]
vertices.extend(zipped)
description += "\t\t\tvertices:\n"
description += "\t\t\t\tnumber: " + str(len(vertices)) + "\n"
for vertex in vertices:
description += "\t\t\t\tvertex:\n"
description += "\t\t\t\t\tposition: " + str(vertex[0].x) + " " + \
str(vertex[0].y) + " " + str(vertex[0].z) + "\n"
description += "\t\t\t\t\tuv: " + str(vertex[1].x) + " " + str( \
vertex[1].y) + "\n"
animation_keys = []
if self.properties.export_animation and not object.animation_data is \
None:
scene = context.scene
for frame in range(scene.frame_start, scene.frame_end):
scene.frame_set(frame)
object_transformations = object.matrix_world
key = Key(number_of_object, frame, "POSITION", \
object_transformations.to_translation())
animation_keys.append(key)
rotation = mathutils.Vector((0.0, 0.0, 0.0))
euler_angles = object_transformations.to_euler("XYZ")
rotation.x = math.degrees(euler_angles.x)
rotation.y = math.degrees(euler_angles.y)
rotation.z = math.degrees(euler_angles.z)
key = Key(number_of_object, frame, "ROTATION", rotation)
animation_keys.append(key)
key = Key(number_of_object, frame, "SCALE", \
object_transformations.to_scale())
animation_keys.append(key)
return description, animation_keys
def menu_func(self, context):
self.layout.operator(AnnaObjectExport.bl_idname, text = "Anna Object " + \
"(*.ao)")
def register():
bpy.utils.register_class(ErrorDialogOperator)
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func)
def unregister():
bpy.types.INFO_MT_file_export.remove(menu_func)
bpy.utils.unregister_module(__name__)
bpy.utils.unregister_class(ErrorDialogOperator)
if __name__ == "__main__":
register()
| |
# -*- coding: utf-8 -*-
# Opcodes in Csound 6.05 from
# csound --list-opcodes
# except
# cggoto <http://www.csounds.com/manual/html/cggoto.html>
# cigoto <http://www.csounds.com/manual/html/cigoto.html>
# cingoto (undocumented)
# ckgoto <http://www.csounds.com/manual/html/ckgoto.html>
# cngoto <http://www.csounds.com/manual/html/cngoto.html>
# endin <http://www.csounds.com/manual/html/endin.html
# endop <http://www.csounds.com/manual/html/endop.html
# goto <http://www.csounds.com/manual/html/goto.html>
# igoto <http://www.csounds.com/manual/html/igoto.html>
# instr <http://www.csounds.com/manual/html/instr.html>
# kgoto <http://www.csounds.com/manual/html/kgoto.html>
# loop_ge <http://www.csounds.com/manual/html/loop_ge.html>
# loop_gt <http://www.csounds.com/manual/html/loop_gt.html>
# loop_le <http://www.csounds.com/manual/html/loop_le.html>
# loop_lt <http://www.csounds.com/manual/html/loop_lt.html>
# opcode <http://www.csounds.com/manual/html/opcode.html>
# return <http://www.csounds.com/manual/html/return.html>
# rigoto <http://www.csounds.com/manual/html/rigoto.html>
# tigoto <http://www.csounds.com/manual/html/tigoto.html>
# timout <http://www.csounds.com/manual/html/timout.html>
# which are treated as keywords; the scoreline opcodes
# scoreline <http://www.csounds.com/manual/html/scoreline.html>
# scoreline_i <http://www.csounds.com/manual/html/scoreline_i.html>
# which allow Csound Score highlighting; the pyrun opcodes
# <http://www.csounds.com/manual/html/pyrun.html>
# pylrun
# pylruni
# pylrunt
# pyrun
# pyruni
# pyrunt
# which allow Python highlighting; and the Lua opcodes
# lua_exec <http://www.csounds.com/manual/html/lua_exec.html>
# lua_opdef <http://www.csounds.com/manual/html/lua_opdef.html>
# which allow Lua highlighting.
OPCODES = set((
'ATSadd',
'ATSaddnz',
'ATSbufread',
'ATScross',
'ATSinfo',
'ATSinterpread',
'ATSpartialtap',
'ATSread',
'ATSreadnz',
'ATSsinnoi',
'FLbox',
'FLbutBank',
'FLbutton',
'FLcloseButton',
'FLcolor',
'FLcolor2',
'FLcount',
'FLexecButton',
'FLgetsnap',
'FLgroup',
'FLgroupEnd',
'FLgroup_end',
'FLhide',
'FLhvsBox',
'FLhvsBoxSetValue',
'FLjoy',
'FLkeyIn',
'FLknob',
'FLlabel',
'FLloadsnap',
'FLmouse',
'FLpack',
'FLpackEnd',
'FLpack_end',
'FLpanel',
'FLpanelEnd',
'FLpanel_end',
'FLprintk',
'FLprintk2',
'FLroller',
'FLrun',
'FLsavesnap',
'FLscroll',
'FLscrollEnd',
'FLscroll_end',
'FLsetAlign',
'FLsetBox',
'FLsetColor',
'FLsetColor2',
'FLsetFont',
'FLsetPosition',
'FLsetSize',
'FLsetSnapGroup',
'FLsetText',
'FLsetTextColor',
'FLsetTextSize',
'FLsetTextType',
'FLsetVal',
'FLsetVal_i',
'FLsetVali',
'FLsetsnap',
'FLshow',
'FLslidBnk',
'FLslidBnk2',
'FLslidBnk2Set',
'FLslidBnk2Setk',
'FLslidBnkGetHandle',
'FLslidBnkSet',
'FLslidBnkSetk',
'FLslider',
'FLtabs',
'FLtabsEnd',
'FLtabs_end',
'FLtext',
'FLupdate',
'FLvalue',
'FLvkeybd',
'FLvslidBnk',
'FLvslidBnk2',
'FLxyin',
'MixerClear',
'MixerGetLevel',
'MixerReceive',
'MixerSend',
'MixerSetLevel',
'MixerSetLevel_i',
'OSCinit',
'OSClisten',
'OSCsend',
'a',
'abs',
'active',
'adsr',
'adsyn',
'adsynt',
'adsynt2',
'aftouch',
'alpass',
'alwayson',
'ampdb',
'ampdbfs',
'ampmidi',
'ampmidid',
'areson',
'aresonk',
'array',
'atone',
'atonek',
'atonex',
'babo',
'balance',
'bamboo',
'barmodel',
'bbcutm',
'bbcuts',
'betarand',
'bexprnd',
'bformdec',
'bformdec1',
'bformenc',
'bformenc1',
'binit',
'biquad',
'biquada',
'birnd',
'bqrez',
'buchla',
'butbp',
'butbr',
'buthp',
'butlp',
'butterbp',
'butterbr',
'butterhp',
'butterlp',
'button',
'buzz',
'c2r',
'cabasa',
'cauchy',
'cauchyi',
'ceil',
'cell',
'cent',
'centroid',
'ceps',
#'cggoto',
'chanctrl',
'changed',
'chani',
'chano',
'chebyshevpoly',
'checkbox',
'chn_S',
'chn_a',
'chn_k',
'chnclear',
'chnexport',
'chnget',
'chnmix',
'chnparams',
'chnset',
'chuap',
#'cigoto',
#'cingoto',
#'ckgoto',
'clear',
'clfilt',
'clip',
'clockoff',
'clockon',
'cmplxprod',
#'cngoto',
'comb',
'combinv',
'compilecsd',
'compileorc',
'compilestr',
'compress',
'connect',
'control',
'convle',
'convolve',
'copy2ftab',
'copy2ttab',
'copya2ftab',
'copyf2array',
'cos',
'cosh',
'cosinv',
'cosseg',
'cossegb',
'cossegr',
'cps2pch',
'cpsmidi',
'cpsmidib',
'cpsmidinn',
'cpsoct',
'cpspch',
'cpstmid',
'cpstun',
'cpstuni',
'cpsxpch',
'cpuprc',
'cross2',
'crossfm',
'crossfmi',
'crossfmpm',
'crossfmpmi',
'crosspm',
'crosspmi',
'crunch',
'ctlchn',
'ctrl14',
'ctrl21',
'ctrl7',
'ctrlinit',
'cuserrnd',
'dam',
'date',
'dates',
'db',
'dbamp',
'dbfsamp',
'dcblock',
'dcblock2',
'dconv',
'delay',
'delay1',
'delayk',
'delayr',
'delayw',
'deltap',
'deltap3',
'deltapi',
'deltapn',
'deltapx',
'deltapxw',
'denorm',
'diff',
'diskgrain',
'diskin',
'diskin2',
'dispfft',
'display',
'distort',
'distort1',
'divz',
'doppler',
'downsamp',
'dripwater',
'dumpk',
'dumpk2',
'dumpk3',
'dumpk4',
'duserrnd',
'dust',
'dust2',
#'endin',
#'endop',
'envlpx',
'envlpxr',
'ephasor',
'eqfil',
'evalstr',
'event',
'event_i',
'exciter',
'exitnow',
'exp',
'expcurve',
'expon',
'exprand',
'exprandi',
'expseg',
'expsega',
'expsegb',
'expsegba',
'expsegr',
'fareylen',
'fareyleni',
'faustaudio',
'faustcompile',
'faustctl',
'faustgen',
'fft',
'fftinv',
'ficlose',
'filebit',
'filelen',
'filenchnls',
'filepeak',
'filesr',
'filevalid',
'fillarray',
'filter2',
'fin',
'fini',
'fink',
'fiopen',
'flanger',
'flashtxt',
'flooper',
'flooper2',
'floor',
'fluidAllOut',
'fluidCCi',
'fluidCCk',
'fluidControl',
'fluidEngine',
'fluidLoad',
'fluidNote',
'fluidOut',
'fluidProgramSelect',
'fluidSetInterpMethod',
'fmb3',
'fmbell',
'fmmetal',
'fmpercfl',
'fmrhode',
'fmvoice',
'fmwurlie',
'fof',
'fof2',
'fofilter',
'fog',
'fold',
'follow',
'follow2',
'foscil',
'foscili',
'fout',
'fouti',
'foutir',
'foutk',
'fprintks',
'fprints',
'frac',
'fractalnoise',
'freeverb',
'ftchnls',
'ftconv',
'ftcps',
'ftfree',
'ftgen',
'ftgenonce',
'ftgentmp',
'ftlen',
'ftload',
'ftloadk',
'ftlptim',
'ftmorf',
'ftresize',
'ftresizei',
'ftsave',
'ftsavek',
'ftsr',
'gain',
'gainslider',
'gauss',
'gaussi',
'gausstrig',
'gbuzz',
'genarray',
'genarray_i',
'gendy',
'gendyc',
'gendyx',
'getcfg',
'getcol',
'getrow',
'gogobel',
#'goto',
'grain',
'grain2',
'grain3',
'granule',
'guiro',
'harmon',
'harmon2',
'harmon3',
'harmon4',
'hdf5read',
'hdf5write',
'hilbert',
'hrtfearly',
'hrtfer',
'hrtfmove',
'hrtfmove2',
'hrtfreverb',
'hrtfstat',
'hsboscil',
'hvs1',
'hvs2',
'hvs3',
'i',
'iceps',
#'igoto',
'ihold',
'imagecreate',
'imagefree',
'imagegetpixel',
'imageload',
'imagesave',
'imagesetpixel',
'imagesize',
'in',
'in32',
'inch',
'inh',
'init',
'initc14',
'initc21',
'initc7',
'inleta',
'inletf',
'inletk',
'inletkid',
'inletv',
'ino',
'inq',
'inrg',
'ins',
'insglobal',
'insremot',
#'instr',
'int',
'integ',
'interp',
'invalue',
'inx',
'inz',
'jitter',
'jitter2',
'jspline',
'k',
#'kgoto',
'ktableseg',
'lenarray',
'lentab',
'lfo',
'limit',
'line',
'linen',
'linenr',
'lineto',
'linrand',
'linseg',
'linsegb',
'linsegr',
'locsend',
'locsig',
'log',
'log10',
'log2',
'logbtwo',
'logcurve',
#'loop_ge',
#'loop_gt',
#'loop_le',
#'loop_lt',
'loopseg',
'loopsegp',
'looptseg',
'loopxseg',
'lorenz',
'loscil',
'loscil3',
'loscilx',
'lowpass2',
'lowres',
'lowresx',
'lpf18',
'lpform',
'lpfreson',
'lphasor',
'lpinterp',
'lposcil',
'lposcil3',
'lposcila',
'lposcilsa',
'lposcilsa2',
'lpread',
'lpreson',
'lpshold',
'lpsholdp',
'lpslot',
#'lua_exec',
'lua_ikopcall',
#'lua_opdef',
'mac',
'maca',
'madsr',
'mags',
'mandel',
'mandol',
'maparray',
'maparray_i',
'marimba',
'massign',
'max',
'max_k',
'maxabs',
'maxabsaccum',
'maxaccum',
'maxalloc',
'maxarray',
'maxtab',
'mclock',
'mdelay',
'median',
'mediank',
'metro',
'midglobal',
'midic14',
'midic21',
'midic7',
'midichannelaftertouch',
'midichn',
'midicontrolchange',
'midictrl',
'mididefault',
'midifilestatus',
'midiin',
'midinoteoff',
'midinoteoncps',
'midinoteonkey',
'midinoteonoct',
'midinoteonpch',
'midion',
'midion2',
'midiout',
'midipgm',
'midipitchbend',
'midipolyaftertouch',
'midiprogramchange',
'miditempo',
'midremot',
'min',
'minabs',
'minabsaccum',
'minaccum',
'minarray',
'mincer',
'mintab',
'mirror',
'mode',
'modmatrix',
'monitor',
'moog',
'moogladder',
'moogvcf',
'moogvcf2',
'moscil',
'mp3bitrate',
'mp3in',
'mp3len',
'mp3nchnls',
'mp3sr',
'mpulse',
'mrtmsg',
'multitap',
'mute',
'mxadsr',
'nestedap',
'nlalp',
'nlfilt',
'nlfilt2',
'noise',
'noteoff',
'noteon',
'noteondur',
'noteondur2',
'notnum',
'nreverb',
'nrpn',
'nsamp',
'nstance',
'nstrnum',
'ntrpol',
'octave',
'octcps',
'octmidi',
'octmidib',
'octmidinn',
'octpch',
#'opcode',
'oscbnk',
'oscil',
'oscil1',
'oscil1i',
'oscil3',
'oscili',
'oscilikt',
'osciliktp',
'oscilikts',
'osciln',
'oscils',
'oscilx',
'out',
'out32',
'outc',
'outch',
'outh',
'outiat',
'outic',
'outic14',
'outipat',
'outipb',
'outipc',
'outkat',
'outkc',
'outkc14',
'outkpat',
'outkpb',
'outkpc',
'outleta',
'outletf',
'outletk',
'outletkid',
'outletv',
'outo',
'outq',
'outq1',
'outq2',
'outq3',
'outq4',
'outrg',
'outs',
'outs1',
'outs2',
'outvalue',
'outx',
'outz',
'p',
'pan',
'pan2',
'pareq',
'partials',
'partikkel',
'partikkelget',
'partikkelset',
'partikkelsync',
'passign',
'pcauchy',
'pchbend',
'pchmidi',
'pchmidib',
'pchmidinn',
'pchoct',
'pconvolve',
'pcount',
'pdclip',
'pdhalf',
'pdhalfy',
'peak',
'pgmassign',
'pgmchn',
'phaser1',
'phaser2',
'phasor',
'phasorbnk',
'phs',
'pindex',
'pinker',
'pinkish',
'pitch',
'pitchac',
'pitchamdf',
'planet',
'platerev',
'plltrack',
'pluck',
'poisson',
'pol2rect',
'polyaft',
'polynomial',
'pop',
'pop_f',
'port',
'portk',
'poscil',
'poscil3',
'pow',
'powershape',
'powoftwo',
'prealloc',
'prepiano',
'print',
'print_type',
'printf',
'printf_i',
'printk',
'printk2',
'printks',
'printks2',
'prints',
'product',
'pset',
'ptable',
'ptable3',
'ptablei',
'ptableiw',
'ptablew',
'ptrack',
'push',
'push_f',
'puts',
'pvadd',
'pvbufread',
'pvcross',
'pvinterp',
'pvoc',
'pvread',
'pvs2array',
'pvs2tab',
'pvsadsyn',
'pvsanal',
'pvsarp',
'pvsbandp',
'pvsbandr',
'pvsbin',
'pvsblur',
'pvsbuffer',
'pvsbufread',
'pvsbufread2',
'pvscale',
'pvscent',
'pvsceps',
'pvscross',
'pvsdemix',
'pvsdiskin',
'pvsdisp',
'pvsenvftw',
'pvsfilter',
'pvsfread',
'pvsfreeze',
'pvsfromarray',
'pvsftr',
'pvsftw',
'pvsfwrite',
'pvsgain',
'pvsgendy',
'pvshift',
'pvsifd',
'pvsin',
'pvsinfo',
'pvsinit',
'pvslock',
'pvsmaska',
'pvsmix',
'pvsmooth',
'pvsmorph',
'pvsosc',
'pvsout',
'pvspitch',
'pvstanal',
'pvstencil',
'pvsvoc',
'pvswarp',
'pvsynth',
'pwd',
'pyassign',
'pyassigni',
'pyassignt',
'pycall',
'pycall1',
'pycall1i',
'pycall1t',
'pycall2',
'pycall2i',
'pycall2t',
'pycall3',
'pycall3i',
'pycall3t',
'pycall4',
'pycall4i',
'pycall4t',
'pycall5',
'pycall5i',
'pycall5t',
'pycall6',
'pycall6i',
'pycall6t',
'pycall7',
'pycall7i',
'pycall7t',
'pycall8',
'pycall8i',
'pycall8t',
'pycalli',
'pycalln',
'pycallni',
'pycallt',
'pyeval',
'pyevali',
'pyevalt',
'pyexec',
'pyexeci',
'pyexect',
'pyinit',
'pylassign',
'pylassigni',
'pylassignt',
'pylcall',
'pylcall1',
'pylcall1i',
'pylcall1t',
'pylcall2',
'pylcall2i',
'pylcall2t',
'pylcall3',
'pylcall3i',
'pylcall3t',
'pylcall4',
'pylcall4i',
'pylcall4t',
'pylcall5',
'pylcall5i',
'pylcall5t',
'pylcall6',
'pylcall6i',
'pylcall6t',
'pylcall7',
'pylcall7i',
'pylcall7t',
'pylcall8',
'pylcall8i',
'pylcall8t',
'pylcalli',
'pylcalln',
'pylcallni',
'pylcallt',
'pyleval',
'pylevali',
'pylevalt',
'pylexec',
'pylexeci',
'pylexect',
#'pylrun',
#'pylruni',
#'pylrunt',
#'pyrun',
#'pyruni',
#'pyrunt',
'qinf',
'qnan',
'r2c',
'rand',
'randh',
'randi',
'random',
'randomh',
'randomi',
'rbjeq',
'readclock',
'readf',
'readfi',
'readk',
'readk2',
'readk3',
'readk4',
'readks',
'readscore',
'readscratch',
'rect2pol',
'reinit',
'release',
'remoteport',
'remove',
'repluck',
'reson',
'resonk',
'resonr',
'resonx',
'resonxk',
'resony',
'resonz',
'resyn',
#'return',
'reverb',
'reverb2',
'reverbsc',
'rewindscore',
'rezzy',
'rfft',
'rifft',
#'rigoto',
'rireturn',
'rms',
'rnd',
'rnd31',
'round',
'rspline',
'rtclock',
's16b14',
's32b14',
'samphold',
'sandpaper',
'scale',
'scalearray',
'scalet',
'scanhammer',
'scans',
'scantable',
'scanu',
'schedkwhen',
'schedkwhennamed',
'schedule',
'schedwhen',
#'scoreline',
#'scoreline_i',
'seed',
'sekere',
'semitone',
'sense',
'sensekey',
'seqtime',
'seqtime2',
'serialBegin',
'serialEnd',
'serialFlush',
'serialPrint',
'serialRead',
'serialWrite',
'serialWrite_i',
'setcol',
'setctrl',
'setksmps',
'setrow',
'setscorepos',
'sfilist',
'sfinstr',
'sfinstr3',
'sfinstr3m',
'sfinstrm',
'sfload',
'sflooper',
'sfpassign',
'sfplay',
'sfplay3',
'sfplay3m',
'sfplaym',
'sfplist',
'sfpreset',
'shaker',
'shiftin',
'shiftout',
'signalflowgraph',
'signum',
'sin',
'sinh',
'sininv',
'sinsyn',
'sleighbells',
'slicearray',
'slider16',
'slider16f',
'slider16table',
'slider16tablef',
'slider32',
'slider32f',
'slider32table',
'slider32tablef',
'slider64',
'slider64f',
'slider64table',
'slider64tablef',
'slider8',
'slider8f',
'slider8table',
'slider8tablef',
'sliderKawai',
'sndload',
'sndloop',
'sndwarp',
'sndwarpst',
'sockrecv',
'sockrecvs',
'socksend',
'socksends',
'soundin',
'soundout',
'soundouts',
'space',
'spat3d',
'spat3di',
'spat3dt',
'spdist',
'specaddm',
'specdiff',
'specdisp',
'specfilt',
'spechist',
'specptrk',
'specscal',
'specsum',
'spectrum',
'splitrig',
'sprintf',
'sprintfk',
'spsend',
'sqrt',
'stack',
'statevar',
'stix',
'strcat',
'strcatk',
'strchar',
'strchark',
'strcmp',
'strcmpk',
'strcpy',
'strcpyk',
'strecv',
'streson',
'strfromurl',
'strget',
'strindex',
'strindexk',
'strlen',
'strlenk',
'strlower',
'strlowerk',
'strrindex',
'strrindexk',
'strset',
'strsub',
'strsubk',
'strtod',
'strtodk',
'strtol',
'strtolk',
'strupper',
'strupperk',
'stsend',
'subinstr',
'subinstrinit',
'sum',
'sumarray',
'sumtab',
'svfilter',
'syncgrain',
'syncloop',
'syncphasor',
'system',
'system_i',
'tab',
'tab2pvs',
'tab_i',
'tabgen',
'table',
'table3',
'table3kt',
'tablecopy',
'tablefilter',
'tablefilteri',
'tablegpw',
'tablei',
'tableicopy',
'tableigpw',
'tableikt',
'tableimix',
'tableiw',
'tablekt',
'tablemix',
'tableng',
'tablera',
'tableseg',
'tableshuffle',
'tableshufflei',
'tablew',
'tablewa',
'tablewkt',
'tablexkt',
'tablexseg',
'tabmap',
'tabmap_i',
'tabmorph',
'tabmorpha',
'tabmorphak',
'tabmorphi',
'tabplay',
'tabrec',
'tabslice',
'tabsum',
'tabw',
'tabw_i',
'tambourine',
'tan',
'tanh',
'taninv',
'taninv2',
'tb0',
'tb0_init',
'tb1',
'tb10',
'tb10_init',
'tb11',
'tb11_init',
'tb12',
'tb12_init',
'tb13',
'tb13_init',
'tb14',
'tb14_init',
'tb15',
'tb15_init',
'tb1_init',
'tb2',
'tb2_init',
'tb3',
'tb3_init',
'tb4',
'tb4_init',
'tb5',
'tb5_init',
'tb6',
'tb6_init',
'tb7',
'tb7_init',
'tb8',
'tb8_init',
'tb9',
'tb9_init',
'tbvcf',
'tempest',
'tempo',
'temposcal',
'tempoval',
#'tigoto',
'timedseq',
'timeinstk',
'timeinsts',
'timek',
'times',
#'timout',
'tival',
'tlineto',
'tone',
'tonek',
'tonex',
'tradsyn',
'trandom',
'transeg',
'transegb',
'transegr',
'trcross',
'trfilter',
'trhighest',
'trigger',
'trigseq',
'trirand',
'trlowest',
'trmix',
'trscale',
'trshift',
'trsplit',
'turnoff',
'turnoff2',
'turnon',
'unirand',
'unwrap',
'upsamp',
'urd',
'vactrol',
'vadd',
'vadd_i',
'vaddv',
'vaddv_i',
'vaget',
'valpass',
'vaset',
'vbap',
'vbap16',
'vbap4',
'vbap4move',
'vbap8',
'vbap8move',
'vbapg',
'vbapgmove',
'vbaplsinit',
'vbapmove',
'vbapz',
'vbapzmove',
'vcella',
'vco',
'vco2',
'vco2ft',
'vco2ift',
'vco2init',
'vcomb',
'vcopy',
'vcopy_i',
'vdel_k',
'vdelay',
'vdelay3',
'vdelayk',
'vdelayx',
'vdelayxq',
'vdelayxs',
'vdelayxw',
'vdelayxwq',
'vdelayxws',
'vdivv',
'vdivv_i',
'vecdelay',
'veloc',
'vexp',
'vexp_i',
'vexpseg',
'vexpv',
'vexpv_i',
'vibes',
'vibr',
'vibrato',
'vincr',
'vlimit',
'vlinseg',
'vlowres',
'vmap',
'vmirror',
'vmult',
'vmult_i',
'vmultv',
'vmultv_i',
'voice',
'vosim',
'vphaseseg',
'vport',
'vpow',
'vpow_i',
'vpowv',
'vpowv_i',
'vpvoc',
'vrandh',
'vrandi',
'vsubv',
'vsubv_i',
'vtaba',
'vtabi',
'vtabk',
'vtable1k',
'vtablea',
'vtablei',
'vtablek',
'vtablewa',
'vtablewi',
'vtablewk',
'vtabwa',
'vtabwi',
'vtabwk',
'vwrap',
'waveset',
'weibull',
'wgbow',
'wgbowedbar',
'wgbrass',
'wgclar',
'wgflute',
'wgpluck',
'wgpluck2',
'wguide1',
'wguide2',
'wiiconnect',
'wiidata',
'wiirange',
'wiisend',
'window',
'wrap',
'writescratch',
'wterrain',
'xadsr',
'xin',
'xout',
'xscanmap',
'xscans',
'xscansmap',
'xscanu',
'xtratim',
'xyin',
'zacl',
'zakinit',
'zamod',
'zar',
'zarg',
'zaw',
'zawm',
'zfilter2',
'zir',
'ziw',
'ziwm',
'zkcl',
'zkmod',
'zkr',
'zkw',
'zkwm'
))
| |
"""
sentry.plugins.base
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
__all__ = ('Plugin', 'plugins', 'register', 'unregister')
import logging
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from sentry.utils.managers import InstanceManager
from sentry.utils.safe import safe_execute
from threading import local
class Response(object):
def __init__(self, template, context=None):
self.template = template
self.context = context
def respond(self, request, context=None):
return HttpResponse(self.render(request, context))
def render(self, request, context=None):
from sentry.web.helpers import render_to_string
if not context:
context = {}
if self.context:
context.update(self.context)
context.update(csrf(request))
return render_to_string(self.template, context, request)
class PluginManager(InstanceManager):
def __iter__(self):
return iter(self.all())
def __len__(self):
return sum(1 for i in self.all())
def all(self):
for plugin in sorted(super(PluginManager, self).all(), key=lambda x: x.get_title()):
if not plugin.is_enabled():
continue
yield plugin
def for_project(self, project):
for plugin in self.all():
if not safe_execute(plugin.is_enabled, project):
continue
yield plugin
def for_site(self):
for plugin in self.all():
if not plugin.has_site_conf():
continue
yield plugin
def get(self, slug):
for plugin in self.all():
if plugin.slug == slug:
return plugin
raise KeyError(slug)
def first(self, func_name, *args, **kwargs):
for plugin in self.all():
try:
result = getattr(plugin, func_name)(*args, **kwargs)
except Exception, e:
logger = logging.getLogger('sentry.plugins')
logger.error('Error processing %s() on %r: %s', func_name, plugin.__class__, e, extra={
'func_arg': args,
'func_kwargs': kwargs,
}, exc_info=True)
continue
if result is not None:
return result
def register(self, cls):
self.add('%s.%s' % (cls.__module__, cls.__name__))
return cls
def unregister(self, cls):
self.remove('%s.%s' % (cls.__module__, cls.__name__))
return cls
plugins = PluginManager()
register = plugins.register
unregister = plugins.unregister
class PluginMount(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
if IPlugin in bases:
return new_cls
if not new_cls.title:
new_cls.title = new_cls.__name__
if not new_cls.slug:
new_cls.slug = new_cls.title.replace(' ', '-').lower()
return new_cls
class IPlugin(local):
"""
Plugin interface. Should not be inherited from directly.
A plugin should be treated as if it were a singleton. The owner does not
control when or how the plugin gets instantiated, nor is it guaranteed that
it will happen, or happen more than once.
>>> from sentry.plugins import Plugin # NOQA
>>> class MyPlugin(Plugin):
>>> title = 'My Plugin'
>>>
>>> def widget(self, request, group, **kwargs):
>>> return self.render('myplugin/widget.html')
All children should allow ``**kwargs`` on all inherited methods.
"""
# Generic plugin information
title = None
slug = None
description = None
version = None
author = None
author_url = None
resource_links = ()
# Configuration specifics
conf_key = None
conf_title = None
project_conf_form = None
project_conf_template = 'sentry/plugins/project_configuration.html'
site_conf_form = None
site_conf_template = 'sentry/plugins/site_configuration.html'
# Global enabled state
enabled = True
can_disable = True
# Should this plugin be enabled by default for projects?
project_default_enabled = False
def _get_option_key(self, key):
return '%s:%s' % (self.get_conf_key(), key)
def is_enabled(self, project=None):
"""
Returns a boolean representing if this plugin is enabled.
If ``project`` is passed, it will limit the scope to that project.
>>> plugin.is_enabled()
"""
if not self.enabled:
return False
if not self.can_disable:
return True
if not self.can_enable_for_projects():
return True
if project:
project_enabled = self.get_option('enabled', project)
if project_enabled is not None:
return project_enabled
else:
return self.project_default_enabled
return True
def reset_options(self, project=None, user=None):
from .helpers import reset_options
return reset_options(self.get_conf_key(), project, user)
def get_option(self, key, project=None, user=None):
"""
Returns the value of an option in your plugins keyspace, or ``None`` if
one is not present.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> value = plugin.get_option('my_option')
"""
from .helpers import get_option
return get_option(self._get_option_key(key), project, user)
def set_option(self, key, value, project=None, user=None):
"""
Updates the value of an option in your plugins keyspace.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> plugin.set_option('my_option', 'http://example.com')
"""
from .helpers import set_option
return set_option(self._get_option_key(key), value, project, user)
def unset_option(self, key, project=None, user=None):
"""
Removes an option in your plugins keyspace.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> plugin.unset_option('my_option')
"""
from .helpers import unset_option
return unset_option(self._get_option_key(key), project, user)
def get_url(self, group):
"""
Returns the absolute URL to this plugins group action handler.
>>> plugin.get_url(group)
"""
return reverse('sentry-group-plugin-action', args=(group.team.slug, group.project.slug, group.pk, self.slug))
def get_conf_key(self):
"""
Returns a string representing the configuration keyspace prefix for this plugin.
"""
if not self.conf_key:
return self.get_conf_title().lower().replace(' ', '_')
return self.conf_key
def get_conf_title(self):
"""
Returns a string representing the title to be shown on the configuration page.
"""
return self.conf_title or self.get_title()
def has_site_conf(self):
return self.site_conf_form is not None
def has_project_conf(self):
return self.project_conf_form is not None
def can_enable_for_projects(self):
"""
Returns a boolean describing whether this plugin can be enabled on a per project basis
"""
return True
def get_form_initial(self, project=None):
return {}
# Response methods
def redirect(self, url):
"""
Returns a redirect response type.
"""
return HttpResponseRedirect(url)
def render(self, template, context=None):
"""
Given a template name, and an optional context (dictionary), returns a
ready-to-render response.
Default context includes the plugin instance.
>>> plugin.render('template.html', {'hello': 'world'})
"""
if context is None:
context = {}
context['plugin'] = self
return Response(template, context)
# The following methods are specific to web requests
def get_title(self):
"""
Returns the general title for this plugin.
>>> plugin.get_title()
"""
return self.title
def get_description(self):
"""
Returns the description for this plugin. This is shown on the plugin configuration
page.
>>> plugin.get_description()
"""
return self.description
def get_resource_links(self):
"""
Returns a list of tuples pointing to various resources for this plugin.
>>> def get_resource_links(self):
>>> return [
>>> ('Documentation', 'http://sentry.readthedocs.org'),
>>> ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'),
>>> ('Source', 'https://github.com/getsentry/sentry'),
>>> ]
"""
return self.resource_links
def get_view_response(self, request, group):
from sentry.models import Event
from sentry.permissions import can_admin_group
self.selected = request.path == self.get_url(group)
if not self.selected:
return
response = self.view(request, group)
if not response:
return
if isinstance(response, HttpResponseRedirect):
return response
if not isinstance(response, Response):
raise NotImplementedError('Use self.render() when returning responses.')
event = group.get_latest_event() or Event()
event.group = group
return response.respond(request, {
'plugin': self,
'project': group.project,
'group': group,
'event': event,
'can_admin_event': can_admin_group(request.user, group),
})
def view(self, request, group, **kwargs):
"""
Handles the view logic. If no response is given, we continue to the next action provider.
>>> def view(self, request, group, **kwargs):
>>> return self.render('myplugin/about.html')
"""
def before_events(self, request, group_list, **kwargs):
"""
Allows preprocessing of groups in the list view.
This is generally useful if you need to cache lookups
for something like ``tags`` which would otherwise do
multiple queries.
If you use this **at all** you should ensure it's already
reset on each execution.
As an example, here's how we might get a reference to ticket ids we were
storing per event, in an efficient O(1) manner.
>>> def before_events(self, request, event_list, **kwargs):
>>> prefix = self.get_conf_key()
>>> GroupMeta.objects.get_value_bulk(event_list, '%s:tid' % prefix)
"""
def tags(self, request, group, tag_list, **kwargs):
"""
Modifies the tag list for a grouped message.
A tag is a string, already marked safe or later escaped, that is shown inline with
the event.
This must return ``tag_list``.
>>> def tags(self, request, group, tag_list, **kwargs):
>>> tag_list.append(':(')
>>> return tag_list
"""
return tag_list
def actions(self, request, group, action_list, **kwargs):
"""
Modifies the action list for a grouped message.
An action is a tuple containing two elements:
('Action Label', '/uri/to/action/')
This must return ``action_list``.
>>> def actions(self, request, group, action_list, **kwargs):
>>> action_list.append(('Google', 'http://google.com'))
>>> return action_list
"""
return action_list
def panels(self, request, group, panel_list, **kwargs):
"""
Modifies the panel list for a grouped message.
A panel is a tuple containing two elements:
('Panel Label', '/uri/to/panel/')
This must return ``panel_list``.
>>> def panels(self, request, group, action_list, **kwargs):
>>> panel_list.append((self.get_title(), self.get_url(group)))
>>> return panel_list
"""
return panel_list
def widget(self, request, group, **kwargs):
"""
Renders as a widget in the group details sidebar.
>>> def widget(self, request, group, **kwargs):
>>> return self.render('myplugin/widget.html')
"""
# Server side signals which do not have request context
def is_rate_limited(self, project, **kwargs):
"""
Return True if this project (or the system) is over any defined
quotas.
"""
return False
def has_perm(self, user, perm, *objects, **kwargs):
"""
Given a user, a permission name, and an optional list of objects
within context, returns an override value for a permission.
:param user: either an instance of ``AnonymousUser`` or ``User``.
:param perm: a string, such as "edit_project"
:param objects: an optional list of objects
If your plugin does not modify this permission, simply return ``None``.
For example, has perm might be called like so:
>>> has_perm(user, 'add_project')
It also might be called with more context:
>>> has_perm(user, 'edit_project', project)
Or with even more context:
>>> has_perm(user, 'configure_project_plugin', project, plugin)
"""
return None
def missing_perm_response(self, request, perm, *args, **objects):
"""
Given a user, a permission name, and an optional mapping of objects
within a context, returns a custom response.
:param user: either an instance of ``AnonymousUser`` or ``User``.
:param perm: a string, such as "edit_project"
:param objects: an optional mapping of objects
If your plugin does not need to override this response, simply return
``None``.
"""
def on_alert(self, alert, **kwargs):
"""
Called when a new alert is generated.
:param alert: an instance of ``Alert``
>>> def on_alert(self, alert, **kwargs):
>>> print 'New alert!', alert.message
>>> print alert.get_absolute_url()
"""
def post_process(self, group, event, is_new, is_sample, **kwargs):
"""
Post processes an event after it has been saved.
:param group: an instance of ``Group``
:param event: an instance of ``Event``
:param is_new: a boolean describing if this group is new, or has changed state
:param is_sample: a boolean describing if this event was stored, or sampled
>>> def post_process(self, event, **kwargs):
>>> print 'New event created:', event.id
>>> print group.get_absolute_url()
"""
def get_tags(self, event, **kwargs):
"""
Return additional tags to add to this instance.
Tags should be a list of tuples.
>>> def get_tags(self, event, **kwargs):
>>> return [('tag-name', 'tag-value')]
"""
def get_filters(self, project=None, **kwargs):
"""
Provides additional filters to the builtins.
Must return an iterable.
>>> def get_filters(self, project, **kwargs):
>>> return [MyFilterClass]
"""
return []
def get_notification_forms(self, **kwargs):
"""
Provides additional UserOption forms for the Notification Settings page.
Must return an iterable.
>>> def get_notification_forms(self, **kwargs):
>>> return [MySettingsForm]
"""
return []
class Plugin(IPlugin):
"""
A plugin should be treated as if it were a singleton. The owner does not
control when or how the plugin gets instantiated, nor is it guaranteed that
it will happen, or happen more than once.
"""
__metaclass__ = PluginMount
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from mock import Mock
from preggy import expect
from holmes.config import Config
from holmes.reviewer import Reviewer
from holmes.validators.base import Validator
from tests.fixtures import PageFactory, ReviewFactory
from tests.unit.base import ApiTestCase
class TestBaseValidator(ApiTestCase, unittest.TestCase):
@property
def sync_cache(self):
return self.connect_to_sync_redis()
def test_can_validate(self):
expect(Validator(None).validate()).to_be_true()
def test_can_add_fact(self):
mock_reviewer = Mock()
Validator(mock_reviewer).add_fact('test', 10)
mock_reviewer.add_fact.assert_called_once_with('test', 10)
def test_can_add_violation(self):
mock_reviewer = Mock()
Validator(mock_reviewer).add_violation('test', 'value', 100)
mock_reviewer.add_violation.assert_called_once_with('test', 'value', 100)
def test_can_return_reviewer_info(self):
review = ReviewFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=review.page.uuid,
page_url=review.page.url,
page_score=0.0,
config=Config(),
validators=[]
)
validator = Validator(reviewer)
expect(validator.page_uuid).to_equal(review.page.uuid)
expect(validator.page_url).to_equal(review.page.url)
expect(validator.config).to_equal(reviewer.config)
def test_is_absolute(self):
validator = Validator(None)
expect(validator.is_absolute('http://globoi.com/index.html')).to_be_true()
def test_is_relative(self):
validator = Validator(None)
expect(validator.is_absolute('/index.html')).to_be_false()
def test_can_rebase(self):
page = PageFactory.create(url='http://globoi.com/test/index.html')
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
validator = Validator(reviewer)
expect(validator.rebase('index.png')).to_equal('http://globoi.com/test/index.png')
expect(validator.rebase('/index.png')).to_equal('http://globoi.com/index.png')
def test_will_call_reviewer_enqueue(self):
page = PageFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
reviewer.enqueue = Mock()
validator = Validator(reviewer)
validator.enqueue('/')
reviewer.enqueue.assert_called_once_with('/')
def test_will_call_reviewer_add_fact(self):
page = PageFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
reviewer.add_fact = Mock()
validator = Validator(reviewer)
validator.add_fact('random.fact', 'value')
reviewer.add_fact.assert_called_once_with('random.fact', 'value')
def test_will_call_reviewer_add_violation(self):
page = PageFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
reviewer.add_violation = Mock()
validator = Validator(reviewer)
validator.add_violation('random.violation', 'random', 0)
reviewer.add_violation.assert_called_once_with('random.violation', 'random', 0)
def test_can_encode_content(self):
validator = Validator(None)
content = u'random content'
gziped_content = validator.to_gzip(content)
expect(content).to_equal(gziped_content.decode('zip'))
def test_test_url_method(self):
validator = Validator(None)
expect(validator.test_url('the-url', Mock(status_code=200, url='the-url'))).to_equal(True)
expect(validator.test_url('the-url', Mock(status_code=404))).to_equal(False)
expect(validator.test_url('the-url', Mock(status_code=302))).to_equal(False)
expect(validator.test_url('the-url', Mock(status_code=307))).to_equal(False)
expect(validator.test_url('the-url-root', Mock(status_code=200, url='the-url-index'))).to_equal(False)
callback_1 = Mock()
callback_2 = Mock()
expect(validator.test_url('the-url', Mock(status_code=404), callback_1, callback_2)).to_equal(False)
expect(callback_1.call_count).to_equal(1)
expect(callback_2.call_count).to_equal(0)
callback_1 = Mock()
callback_2 = Mock()
expect(validator.test_url('the-url', Mock(status_code=302), callback_1, callback_2)).to_equal(False)
expect(callback_1.call_count).to_equal(0)
expect(callback_2.call_count).to_equal(1)
callback_1 = Mock()
callback_2 = Mock()
expect(validator.test_url('the-url', Mock(status_code=307), callback_1, callback_2)).to_equal(False)
expect(callback_1.call_count).to_equal(0)
expect(callback_2.call_count).to_equal(1)
def test_send_url(self):
validator = Validator(Mock(config=Mock(MAX_ENQUEUE_BUFFER_LENGTH=1)))
validator.flush = Mock()
validator.test_url = Mock(return_value=True)
expect(len(validator.url_buffer)).to_equal(0)
validator.send_url('the-url', 0.0, 'the-response')
expect(len(validator.url_buffer)).to_equal(1)
expect(validator.flush.call_count).to_equal(0)
validator.send_url('the-url-2', 0.0, 'the-response-2')
expect(len(validator.url_buffer)).to_equal(2)
expect(validator.flush.call_count).to_equal(1)
def test_flush_method(self):
validator = Validator(None)
validator.enqueue = Mock()
validator.flush()
expect(validator.enqueue.call_count).to_equal(0)
validator = Validator(None)
validator.url_buffer = [1, 2, 3]
validator.enqueue = Mock()
validator.flush()
validator.enqueue.assert_called_once_with([1, 2, 3])
def test_not_implemented_methods(self):
validator = Validator(None)
self.assertRaises(NotImplementedError, validator.broken_link_violation)
self.assertRaises(NotImplementedError, validator.moved_link_violation)
def test_normalize_url_with_valid_url(self):
validator = Validator(None)
url = validator.normalize_url('http://globo.com')
expect(url).to_equal('http://globo.com')
def test_normalize_url_with_invalid_url(self):
validator = Validator(None)
url = validator.normalize_url('http://]globo.com')
expect(url).to_be_null()
def test_normalize_url_with_not_absoulte_url(self):
page = PageFactory.create(url='http://globoi.com/test/index.html')
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
validator = Validator(reviewer)
url = validator.normalize_url('/metal.html')
expect(url).to_equal('http://globoi.com/metal.html')
def test_can_get_default_violations_values(self):
config = Config()
validator = Validator(None)
values = validator.get_default_violations_values(config)
expect(values).to_equal({})
def test_can_get_violation_pref(self):
page = PageFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[],
cache=self.sync_cache
)
reviewer.violation_definitions = {
'page.title.size': {'default_value': 70},
}
validator = Validator(reviewer)
page_title_size = validator.get_violation_pref('page.title.size')
expect(page_title_size).to_equal(70)
| |
"""Fava's main WSGI application.
when using Fava's WSGI app, make sure to set ``app.config['BEANCOUNT_FILES']``.
To start a simple server::
from fava.application import app
app.config['BEANCOUNT_FILES'] = ['/path/to/file.beancount']
app.run('localhost', 5000)
Attributes:
app: An instance of :class:`flask.Flask`, this is Fava's WSGI application.
"""
import datetime
import functools
import threading
from io import BytesIO
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
import flask
import markdown2 # type: ignore
import werkzeug.urls
from beancount import __version__ as beancount_version
from beancount.core.account import ACCOUNT_RE
from beancount.utils.text_utils import replace_numbers # type: ignore
from flask import abort
from flask import Flask
from flask import redirect
from flask import render_template
from flask import render_template_string
from flask import request
from flask import send_file
from flask_babel import Babel # type: ignore
from flask_babel import get_translations
from werkzeug.utils import secure_filename
from fava import __version__ as fava_version
from fava import LANGUAGES
from fava import template_filters
from fava.context import g
from fava.core import FavaLedger
from fava.core.charts import FavaJSONEncoder
from fava.core.documents import is_document_or_import_file
from fava.help import HELP_PAGES
from fava.helpers import FavaAPIException
from fava.json_api import json_api
from fava.serialisation import serialise
from fava.util import next_key
from fava.util import resource_path
from fava.util import send_file_inline
from fava.util import setup_logging
from fava.util import slugify
from fava.util.date import Interval
from fava.util.excel import HAVE_EXCEL
STATIC_FOLDER = resource_path("static")
setup_logging()
app = Flask( # pylint: disable=invalid-name
__name__,
template_folder=str(resource_path("templates")),
static_folder=str(STATIC_FOLDER),
)
app.register_blueprint(json_api, url_prefix="/<bfile>/api")
app.json_encoder = FavaJSONEncoder # type: ignore
try:
jinja_extensions = app.jinja_options.setdefault("extensions", [])
except TypeError: # Flask <2
jinja_extensions = app.jinja_options["extensions"]
jinja_extensions.append("jinja2.ext.do")
jinja_extensions.append("jinja2.ext.loopcontrols")
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.config["HAVE_EXCEL"] = HAVE_EXCEL
app.config["ACCOUNT_RE"] = ACCOUNT_RE
REPORTS = [
"balance_sheet",
"commodities",
"documents",
"events",
"editor",
"errors",
"holdings",
"import",
"income_statement",
"journal",
"options",
"query",
"statistics",
"trial_balance",
]
LOAD_FILE_LOCK = threading.Lock()
def ledger_slug(ledger: FavaLedger) -> str:
"""Generate URL slug for a ledger."""
title_slug = slugify(ledger.options["title"])
return title_slug or slugify(ledger.beancount_file_path)
def update_ledger_slugs(ledgers: List[FavaLedger]) -> None:
"""Update the dictionary mapping URL slugs to ledgers."""
ledgers_by_slug: Dict[str, FavaLedger] = {}
for ledger in ledgers:
slug = ledger_slug(ledger)
unique_key = next_key(slug, ledgers_by_slug)
ledgers_by_slug[unique_key] = ledger
app.config["LEDGERS"] = ledgers_by_slug
def _load_file() -> None:
"""Load Beancount files.
This is run automatically on the first request.
"""
ledgers = [
FavaLedger(filepath) for filepath in app.config["BEANCOUNT_FILES"]
]
update_ledger_slugs(ledgers)
def get_locale() -> Optional[str]:
"""Get locale.
Returns:
The locale that should be used for Babel. If not given as an option to
Fava, guess from browser.
"""
lang = g.ledger.fava_options["language"]
if lang is not None:
return lang
return request.accept_languages.best_match(["en"] + LANGUAGES)
BABEL = Babel(app)
BABEL.localeselector(get_locale)
for function in template_filters.FILTERS:
app.add_template_filter(function) # type: ignore
app.add_template_filter(serialise)
@app.url_defaults
def _inject_filters(endpoint: str, values: Dict[str, Any]) -> None:
if "bfile" not in values and app.url_map.is_endpoint_expecting(
endpoint, "bfile"
):
values["bfile"] = g.beancount_file_slug
if endpoint in ["static", "index"]:
return
for name in ["conversion", "interval", "account", "filter", "time"]:
if name not in values:
values[name] = request.args.get(name)
def static_url(filename: str) -> str:
"""Return a static url with an mtime query string for cache busting."""
file_path = STATIC_FOLDER / filename
try:
mtime = int(file_path.stat().st_mtime)
except FileNotFoundError:
mtime = 0
return url_for("static", filename=filename, mtime=mtime)
CACHED_URL_FOR = functools.lru_cache(2048)(flask.url_for)
def url_for(endpoint: str, **values: Any) -> str:
"""A wrapper around flask.url_for that uses a cache."""
_inject_filters(endpoint, values)
return CACHED_URL_FOR(endpoint, **values)
def url_for_source(**kwargs: Any) -> str:
"""URL to source file (possibly link to external editor)."""
if g.ledger.fava_options["use-external-editor"]:
return (
f"beancount://{kwargs.get('file_path')}"
+ f"?lineno={kwargs.get('line', 1)}"
)
return url_for("report", report_name="editor", **kwargs)
def translations() -> Any:
"""Get translations catalog."""
# pylint: disable=protected-access
return get_translations()._catalog
app.add_template_global(static_url, "static_url") # type: ignore
app.add_template_global(datetime.date.today, "today")
app.add_template_global(url_for, "url_for") # type: ignore
app.add_template_global(url_for_source, "url_for_source")
app.add_template_global(translations, "translations")
@app.context_processor
def template_context() -> Dict[str, Any]:
"""Inject variables into the template context."""
return dict(ledger=g.ledger)
@app.before_request
def _perform_global_filters() -> None:
ledger = getattr(g, "ledger", None)
if ledger:
# check (and possibly reload) source file
if request.blueprint != "json_api":
ledger.changed()
ledger.filter(
account=request.args.get("account"),
filter=request.args.get("filter"),
time=request.args.get("time"),
)
@app.after_request
def _incognito(response: flask.wrappers.Response) -> flask.wrappers.Response:
"""Replace all numbers with 'X'."""
if app.config.get("INCOGNITO") and response.content_type.startswith(
"text/html"
):
is_editor = (
request.endpoint == "report"
and request.view_args is not None
and request.view_args["report_name"] == "editor"
)
if not is_editor:
original_text = response.get_data(as_text=True)
response.set_data(replace_numbers(original_text))
return response
@app.url_value_preprocessor
def _pull_beancount_file(
_: Optional[str], values: Optional[Dict[str, str]]
) -> None:
g.beancount_file_slug = values.pop("bfile", None) if values else None
with LOAD_FILE_LOCK:
if not app.config.get("LEDGERS"):
_load_file()
if g.beancount_file_slug:
if g.beancount_file_slug not in app.config["LEDGERS"]:
if not any(
g.beancount_file_slug == ledger_slug(ledger)
for ledger in app.config["LEDGERS"].values()
):
abort(404)
# one of the file slugs changed, update the mapping
update_ledger_slugs(app.config["LEDGERS"].values())
g.ledger = app.config["LEDGERS"][g.beancount_file_slug]
g.conversion = request.args.get("conversion", "at_cost")
g.interval = Interval.get(request.args.get("interval", "month"))
@app.errorhandler(FavaAPIException) # type: ignore
def fava_api_exception(error: FavaAPIException) -> str:
"""Handle API errors."""
return render_template(
"_layout.html", page_title="Error", content=error.message
)
@app.route("/")
@app.route("/<bfile>/")
def index() -> werkzeug.wrappers.response.Response:
"""Redirect to the Income Statement (of the given or first file)."""
if not g.beancount_file_slug:
g.beancount_file_slug = next(iter(app.config["LEDGERS"]))
index_url = url_for("index")
default_path = app.config["LEDGERS"][g.beancount_file_slug].fava_options[
"default-page"
]
return redirect(f"{index_url}{default_path}")
@app.route("/<bfile>/account/<name>/")
@app.route("/<bfile>/account/<name>/<subreport>/")
def account(name: str, subreport: str = "journal") -> str:
"""The account report."""
if subreport in ["journal", "balances", "changes"]:
return render_template(
"account.html", account_name=name, subreport=subreport
)
return abort(404)
@app.route("/<bfile>/document/", methods=["GET"])
def document() -> Any:
"""Download a document."""
filename = request.args.get("filename")
if filename is None:
return abort(404)
if is_document_or_import_file(filename, g.ledger):
return send_file_inline(filename)
return abort(404)
@app.route("/<bfile>/statement/", methods=["GET"])
def statement() -> Any:
"""Download a statement file."""
entry_hash = request.args.get("entry_hash", "")
key = request.args.get("key", "")
document_path = g.ledger.statement_path(entry_hash, key)
return send_file_inline(document_path)
@app.route("/<bfile>/holdings/by_<aggregation_key>/")
def holdings_by(aggregation_key: str) -> str:
"""The holdings report."""
if aggregation_key in ["account", "currency", "cost_currency"]:
return render_template(
"_layout.html",
active_page="holdings",
aggregation_key=aggregation_key,
)
return abort(404)
@app.route("/<bfile>/_query_result/")
def query_result() -> str:
"""Query shell."""
return render_template("_query_result.html")
@app.route("/<bfile>/<report_name>/")
def report(report_name: str) -> str:
"""Endpoint for most reports."""
if report_name in REPORTS:
return render_template("_layout.html", active_page=report_name)
return abort(404)
@app.route("/<bfile>/extension/<report_name>/")
def extension_report(report_name: str) -> str:
"""Endpoint for extension reports."""
try:
template, extension = g.ledger.extensions.template_and_extension(
report_name
)
content = render_template_string(template, extension=extension)
return render_template(
"_layout.html", content=content, page_title=extension.report_title
)
except LookupError:
return abort(404)
@app.route("/<bfile>/download-query/query_result.<result_format>")
def download_query(result_format: str) -> Any:
"""Download a query result."""
name, data = g.ledger.query_shell.query_to_file(
request.args.get("query_string", ""), result_format
)
filename = f"{secure_filename(name.strip())}.{result_format}"
try:
return send_file(data, as_attachment=True, download_name=filename)
except TypeError: # Flask <2
return send_file(
data, as_attachment=True, attachment_filename=filename
)
@app.route("/<bfile>/download-journal/")
def download_journal() -> Any:
"""Download a Journal file."""
now = datetime.datetime.now().replace(microsecond=0)
filename = f"journal_{now.isoformat()}.beancount"
data = BytesIO(bytes(render_template("beancount_file"), "utf8"))
try:
return send_file(data, as_attachment=True, download_name=filename)
except TypeError: # Flask <2
return send_file(
data, as_attachment=True, attachment_filename=filename
)
@app.route("/<bfile>/help/", defaults={"page_slug": "_index"})
@app.route("/<bfile>/help/<string:page_slug>")
def help_page(page_slug: str) -> str:
"""Fava's included documentation."""
if page_slug not in HELP_PAGES:
abort(404)
html = markdown2.markdown_path(
(resource_path("help") / (page_slug + ".md")),
extras=["fenced-code-blocks", "tables", "header-ids"],
)
return render_template(
"_layout.html",
active_page="help",
page_slug=page_slug,
help_html=render_template_string(
html,
beancount_version=beancount_version,
fava_version=fava_version,
),
HELP_PAGES=HELP_PAGES,
)
@app.route("/jump")
def jump() -> werkzeug.wrappers.response.Response:
"""Redirect back to the referer, replacing some parameters.
This is useful for sidebar links, e.g. a link ``/jump?time=year``
would set the time filter to `year` on the current page.
When accessing ``/jump?param1=abc`` from
``/example/page?param1=123¶m2=456``, this view should redirect to
``/example/page?param1=abc¶m2=456``.
"""
url = werkzeug.urls.url_parse(request.referrer)
qs_dict = url.decode_query()
for key, values in request.args.lists():
if len(values) == 1 and values[0] == "":
try:
del qs_dict[key]
except KeyError:
pass
continue
qs_dict.setlist(key, values)
redirect_url = url.replace(
query=werkzeug.urls.url_encode(qs_dict, sort=True)
)
return redirect(werkzeug.urls.url_unparse(redirect_url))
| |
#!/usr/bin/env python
"""
heatsequer experiment input/output functions
"""
# amnonscript
__version__ = "0.9"
import heatsequer as hs
import numpy as np
import scipy.sparse
import csv
import biom
import os
from pdb import set_trace as XXX
import hashlib
import re
def load(tablename, mapname='map.txt', taxfile='', nameisseq=True,studyname=False,tabletype='biom',normalize=True,addsname='',addmname='',keepzero=False,removefrom=False,removenum=1,mapsampletolowercase=False,sortit=True,useseqnamefortax=True,rawreads=False,usesparse=False):
"""
Load an experiment - a biom table and a mapping file
input:
tablename - the name of the biom table file
mapname - name of the mapping file
taxfile - empty ('') to load taxonomy from biom table, non-empty to load
from rdp output file (web)
nameisseq - False to keep otu name as sid without hashing it, True to treat otuid as sequence
addsname - a string to add to each table sample name (or empty to not add)
addsmame - a string to add to each mapping file sample name (or empty to not add)
studyname - Flase to assign from table file name, otherwise string to store as study name
tabletype:
'biom' - a biom table
'meta' - a metabolomics table (row per sample, col per metabolite, can contain duplicate metaboliteids)
normalize - True to normalize to 10k reads per sample, False to not normalize (change to mean 10k reads/sample)
keepzero : bool
True (default) to keep samples with 0 reads, False to throw away
removefrom : string
if non empty - cut table sample name after (and including) the first occurance of removefrom
mapsampletolowercase : bool
True to convert the mapping file sample id to lower case (for EMP data). default=False
sortit : bool
True (default) to sort sequences by taxonomy, False to not sort
useseqnamefortax : bool
True (default) to use the sequence as taxonomy if no taxonomy supplied, False to use 'unknown'
rawreads : bool
True in combination with normalize=False - do not modify read count to mean 10k
usesparse : book
True to use sparse matrix representation, False to use non-sparse (default)
output:
an experiment class for the current experiment
"""
params=locals()
# load the table
if tabletype=='biom':
hs.Debug(6,'Loading biom table %s' % tablename)
table=biom.load_table(tablename)
elif tabletype=='meta':
hs.Debug(6,'Loading metabolite table %s' % tablename)
table=loadmetabuckettable(tablename)
else:
hs.Debug(9,'Table type %s not supported' % tabletype)
return False
datamd5g=hashlib.md5()
datamd5g.update(table.matrix_data.todense().A.view(np.uint8))
datamd5=datamd5g.hexdigest()
print(datamd5)
# if need to cut table sample names
if removefrom:
idtable={}
foundids={}
ids=table.ids(axis='sample')
if len(set(ids))!=len(ids):
hs.Debug(8,'non unique ids identified')
for cid in ids:
if removefrom in cid:
fpos=hs.findn(cid,removefrom,removenum)
if fpos==-1:
hs.Debug(6,'Did not find enough %s in %s' % (removefrom,cid))
tid=cid
else:
tid=cid[:fpos]
else:
hs.Debug(6,'%s not found in sample name %s (removefrom)' % (removefrom,cid))
tid=cid
if tid in foundids:
hs.Debug(6,'already have id %s' % cid)
foundids[tid]+=1
idtable[cid]=tid+'-rep-'+str(foundids[tid])
print(idtable[cid])
else:
foundids[tid]=1
idtable[cid]=tid
hs.Debug(6,'found %d keys %d values' % (len(set(idtable.keys())),len(set(idtable.values()))))
table=table.update_ids(idtable,axis='sample')
# if need to add constant string to sample names in table
if addsname!='':
idtable={}
ids=table.ids(axis='sample')
for cid in ids:
idtable[cid]=addsname+cid
table=table.update_ids(idtable,axis='sample')
smap = {}
mapsamples = []
mapmd5=''
if mapname:
# if mapping file supplied, load it
mapsamples,smap,fields,mapmd5=loadmap(mapname,mapsampletolowercase=mapsampletolowercase,addmname=addmname)
else:
# no mapping file, so just create the #SampleID field
hs.Debug(6,'No mapping file supplied - using just sample names')
tablesamples = table.ids(axis='sample')
for cid in tablesamples:
smap[cid]={'#SampleID':cid}
mapsamples.append(cid)
fields=['#SampleID']
mapmd5=''
# remove table samples not in mapping file
tablesamples = table.ids(axis='sample')
hs.Debug(6,'number of samples in table is %d' % len(tablesamples))
hs.Debug(3,'testing for samples not in mapping file')
removelist=[]
for cid in tablesamples:
if cid not in mapsamples:
removelist.append(cid)
hs.Debug(6,'Table sample %s not found in mapping file' % cid)
hs.Debug(6,'removing %s samples' % len(removelist))
if len(removelist)>0:
table=table.filter(removelist,axis='sample',invert=True)
tablesamples = table.ids(axis='sample')
hs.Debug(6,'deleted. number of samples in table is now %d' % len(tablesamples))
# remove samples not in table from mapping file
hs.Debug(3,'removing samples not in table')
removemap=[]
addlist=[]
for idx,cmap in enumerate(mapsamples):
if cmap not in tablesamples:
hs.Debug(2,'Map sample %s not in table' % cmap)
if not keepzero:
removemap.append(idx)
try:
del smap[cmap]
except:
hs.Debug(8,'Duplicate SampleID %s in mapping file' % cmap)
else:
addlist.append(cmap)
if len(removemap)>0:
hs.Debug(7,'removing %d samples from mapping file' % len(removemap))
mapsamples=hs.delete(mapsamples,removemap)
hs.Debug(7,'number of samples in mapping file is now %d' % len(mapsamples))
# get info about the sequences
tableseqs = table.ids(axis='observation')
sids = []
tax = []
osnames=[]
for cid in tableseqs:
# get the original sample name
osnames.append(cid)
# get the sid (hash )
if nameisseq:
sids.append(hs.hashseq(cid))
else:
sids.append(cid)
# get the taxonomy string
ctax=gettaxfromtable(table,cid,useseqname=useseqnamefortax)
tax.append(ctax)
hs.Debug(3,'experiment has %d sequences' % len(tax))
if not studyname:
studyname=os.path.basename(tablename)
exp=hs.Experiment()
exp.datatype=tabletype
if usesparse:
exp.data=scipy.sparse.csr_matrix(table.matrix_data)
exp.sparse=True
hs.Debug(3,'Sparse matrix initialized')
else:
exp.data=table.matrix_data.todense().A
# check if need to add the 0 read samples to the data
if len(addlist)>0:
tablesamples=list(tablesamples)
tablesamples=tablesamples+addlist
exp.data=np.hstack([exp.data,np.zeros([np.shape(exp.data)[0],len(addlist)])])
exp.smap=smap
exp.samples=tablesamples
exp.seqs=tableseqs
for idx,cseq in enumerate(exp.seqs):
exp.seqdict[cseq]=idx
exp.sids=sids
exp.origotunames=osnames
exp.tax=tax
exp.tablefilename=tablename
exp.studyname=studyname
exp.mapfilename=tablename
exp.filters = [tablename]
exp.fields = fields
exp.datamd5 = datamd5
exp.mapmd5 = mapmd5
# add the original number of reads as a field to the experiment
hs.Debug(3,'Adding origReads')
colsum=hs.sum(exp.data,axis=0)
hs.Debug(3,'converting origReads to list')
exp.origreads=list(colsum)
hs.Debug(3,'appending the origReads values')
exp.fields.append('origReads')
for idx,csamp in enumerate(exp.samples):
exp.smap[csamp]['origReads']=str(exp.origreads[idx])
# normalize samples to 10k reads per samples
hs.Debug(3,'Removing 0 read samples')
colsum=hs.sum(exp.data,axis=0)
okreads=np.where(colsum>0)
if np.size(colsum)-np.size(okreads[0])>0:
hs.Debug(6,"Samples with 0 reads: %d" % (np.size(colsum)-np.size(okreads[0])))
if not keepzero:
exp=hs.reordersamples(exp,okreads[0])
colsum=hs.sum(exp.data,axis=0)
if tabletype=='meta':
normalize=False
if normalize:
hs.Debug(3,'Normalizing')
exp.data=10000*hs.divvec(exp.data,colsum)
else:
if not rawreads:
hs.Debug(3,'Normalizing (constant) to mean 10000')
exp.data=10000*exp.data/np.mean(colsum)
else:
hs.Debug(3,'Keeping raw reads. No normalization')
# calculate the scaling factor
exp.scalingfactor=np.array(exp.origreads)/np.sum(exp.data,axis=0)
exp.uniqueid=exp.getexperimentid()
if sortit:
exp=hs.sortbacteria(exp,logit=False)
hs.addcommand(exp,"load",params=params)
exp.filters.append('loaded table=%s, map=%s' % (tablename,mapname))
return(exp)
def loadmetabuckettable(filename):
'''
load a metabolomics csv bucket table and convert to a biom table in memory
input:
filename : str
the csv bucket table file name
output:
table: biom.Table
a biom table initialized by the bucket table
'''
hs.Debug(6,'Loading metabolite table')
# load the metabolite table and turn it into a biom table
fl=open(filename,'rU')
head=fl.readline().rstrip('\n')
# its a csv
headsplit=head.split(',')
headsplit=headsplit[1:]
# look if we have strange entries (like 'x','y','z')
usepos=[]
metabolites=[]
for idx,cmet in enumerate(headsplit):
if cmet[0].isdigit():
usepos.append(idx+1)
metabolites.append("%s-%d" % (cmet,idx))
else:
hs.Debug(7,'Metabolite %s (col %d) not numeric!' % (cmet,idx))
# load sample names
sampnames=[]
for cline in fl:
cline=cline.rstrip('\n')
cdat=cline.split(',')
sampnames.append(cdat[0])
fl.close()
# now load the table data:
dat=np.genfromtxt(filename,skip_header=1,usecols=usepos,delimiter=',')
dat=np.transpose(dat)
# and create the biom table:
table=biom.table.Table(dat,metabolites,sampnames)
# and add metabolite name as taxonomy:
taxdict={}
for cmet in metabolites:
taxdict[cmet]={'taxonomy': cmet}
table.add_metadata(taxdict,axis='observation')
return table
def gettaxfromtable(table,seq,useseqname=False):
"""
get the taxonomy for a given sequence in the biom table
and convert it to a nicer string
if no taxonomy - use "unknown"
input:
table : biom.table
the biom table containing the sequence and (possibly) the taxonomy
seq : str
the sequence to get taxonomy for
useseqname : bool
False (default) to use 'unknwon' for sequences without taxonomy, True to use sequence as taxonomy for them
output:
tax : str
the nice taxonomy string
"""
if useseqname:
tax=seq
else:
tax='unknown'
md=table.metadata(seq,axis='observation')
if md:
if 'taxonomy' in md:
tax = md['taxonomy']
if not isinstance(tax,str):
newtax=''
for x in tax:
if len(x)>2:
if x[2]=='_':
newtax+=x[3:]
else:
newtax+=x
else:
newtax+=x
newtax+=';'
# tax=[x[3:] if x[2]=='_' else x for x in tax]
# tax = ';'.join(tax)
tax=newtax
return tax
def loadrdptax(expdat,rdpfilename,fastaname=False,threshold=60):
"""
load rdp taxonomy (the output of download allrank in the rdp classifier website) and add to biom table
input:
expdat - the biom table for which the taxonomy was assigned (sequenced were `d)
rdpfilename - name of the saved allrank rdp assignment
fastaname - name of fasta file used for rdp assignment (if it was not from saveseqsforrdp) or False if sequences are in the header of the fasta
threshold - the assignemt probability threshold under which to not include the assignment (for each level)
"""
params=locals()
if fastaname:
seqs,headers=hs.readfastaseqs(fastaname)
hdict={}
for idx,chead in enumerate(headers):
hdict[chead]=seqs[idx]
fl=open(rdpfilename,'r')
for cline in fl:
cline=cline.rstrip()
cdat=cline.split(';')
# skip header lines
if len(cdat)<2:
continue
# check if sequence in experiment
cseq=cdat[0]
if fastaname:
if cdat[0] in hdict:
cseq=hdict[cseq]
else:
hs.Debug(6,'sequence %s not found in fasta file' % cseq)
if cseq not in expdat.seqdict:
hs.Debug(6,'sequence %s not found in experiment' % cseq)
continue
cpos=expdat.seqdict[cseq]
ctax=''
for idx in np.arange(2,len(cdat),2):
cp=cdat[idx+1].rstrip('%')
if float(cp)<60:
break
ctax+=';'
ctax+=cdat[idx]
expdat.tax[cpos]=ctax
fl.close()
expdat.filters.append("loaded rdp taxonomy from file %s" % rdpfilename)
hs.addcommand(expdat,"loadrdptax",params=params,replaceparams={'expdat':expdat})
return(expdat)
def saveseqsforrdp(expdat,outfilename):
"""
save sequences of an experiment into a fasta file
with the header identical to the sequence (for easy reloading of rdp taxonomy)
input:
expdat : Experiment
outfilename : string
name of the output fasta file
"""
fl=open(outfilename,'w')
for cseq in expdat.seqs:
fl.write('>'+cseq+'\n')
fl.write(cseq+'\n')
fl.close()
hs.Debug(6,'wrote %d sequences to file %s' % (len(expdat.seqs),outfilename))
def saveseqsfasta(expdat,seqs,filename):
"""
save bacteria from list seqs to a fasta file
input:
expdat - (for taxonomy) or False
seqs - the sequences to save
filename - the name of the output fasta file
"""
fl=open(filename,'w')
for idx,cseq in enumerate(seqs):
if expdat:
if cseq in expdat.seqdict:
ctax=expdat.tax[expdat.seqdict[cseq]]
cid=expdat.sids[expdat.seqdict[cseq]]
else:
ctax='NA'
cid='NA'
fl.write('>%d-%s-%s\n' % (idx,str(cid),str(ctax)))
fl.write(cseq+'\n')
fl.close()
def saveexpseqs(expdat,filename):
"""
save all bacteria from an experiment as a fasta file
input:
expdat
filename - the name of the output fasta file
"""
hs.saveseqsfasta(expdat,expdat.seqs,filename)
def savemap(expdat,filename):
hs.Debug(1,"Saving mapping file %s" % filename)
mf=open(filename,'w')
mf.write('#SampleID')
for cfield in expdat.fields:
if cfield=='#SampleID':
continue
if len(cfield)==0:
cfield='NA'
cfield=cfield.replace('\t','_')
mf.write('\t%s' % cfield)
mf.write('\n')
for csamp in expdat.samples:
if len(csamp)==0:
continue
mf.write('%s' % csamp)
for cfield in expdat.fields:
if cfield=='#SampleID':
continue
mf.write('\t')
cfval=expdat.smap[csamp][cfield]
cfval=cfval.replace('\t','_')
mf.write(str(cfval))
mf.write('\n')
mf.close()
def savetsvtable(expdat,filename,logtransform=True):
"""
save an experiment as a tab separated table, with columns for samples and rows for bacteria
for jose clemente long babies paper
input:
expdat
filename - name of the output tsv file
minreads - save only bacteria with >=minreads reads
logtransform - True to save the log2 of the reads, False to save the reads
"""
ldat=hs.copyexp(expdat.data)
if logtransform:
ldat[np.where(ldat<1)]=1
ldat=np.log2(ldat)
of=open(filename,'w')
of.write("Taxonomy\tSequence")
for csamp in expdat.samples:
of.write("\t%s" % csamp)
of.write("\n")
for idx,cseq in enumerate(expdat.seqs):
of.write("%s\t%s" % (expdat.tax[idx],cseq))
for cval in ldat[idx,:]:
of.write("\t%f" % cval)
of.write("\n")
of.close()
def loadexptree(expdat,treefilename):
"""
EXPERIMENTAL
load a tree file associated with an experiment
input:
expdat
treefilename - the name of the newick tree file (from make_phylogeny.py). note that need to use the sequences as the fasta sequence ids (use -s in the CreateTable)
output:
expdat - with a new field - tree
"""
params=locals()
import skbio.tree
tree=skbio.tree.TreeNode.read(treefilename)
hs.Debug(4,'Loaded tree')
expdat.tree=tree
expdat.filters.append("Load tree %s" % treefilename)
hs.addcommand(expdat,"loadexptree",params=params,replaceparams={'expdat':expdat})
return expdat
def saveillitable(expdat,filename):
"""
create a tsv table file for use with the illi visualization tool
input:
expdat
filename - name of the table file to create
"""
fl=open(filename,'w')
for idx,ctax in enumerate(expdat.tax):
atax=ctax.split(';')
btax=[]
for tpart in atax:
if not tpart=='':
btax.append(tpart)
if len(btax)>2:
btax=btax[-2:]
taxstr=''
for tpart in btax:
taxstr+=';%s' % tpart
cname='%s-%d' % (taxstr,idx)
fl.write('\t%s' % cname)
fl.write('\n')
for sampidx,csamp in enumerate(expdat.samples):
fl.write('%s' % csamp)
for idx,cseq in enumerate(expdat.tax):
fl.write('\t%f' % expdat.data[idx,sampidx])
fl.write('\n')
fl.close()
def savecommands(expdat,filename):
"""
save the commands used to generate an experiment to a python text file
input:
expdat : Experiment
filename : string
the filename to save commands to
"""
hs.savelisttofile(expdat.commands,filename,delimiter='\n')
hs.Debug(6,"%d Commands saved to file %s" % (len(expdat.commands),filename))
def saveseqstolinefile(seqs,filename):
"""
save experiment sequeces to a file with 1 sequence per line
input:
seqs : list of strings
The sequences to save
filename : string
File to save to
"""
fl=open(filename,'w')
for cseq in seqs:
fl.write('%s\n' % cseq)
fl.close()
def loadmap(mapfilename,sampidfield='#SampleID',mapsampletolowercase=False,addmname=''):
"""
load a tab separated mapping file and store the values and samples (from #SampleID field)
input:
mapfilename : string
name of the mapping file
sampidfield : string
name of the field containing the sample id (default is #SampleID)
addmname - a string to add to each mapping file sample name (or empty to not add)
output:
mapsamples : list of strings
the sampleids (in order) - the values in sampidfield
smap : dict of dict of strings
indexed by sampleid, then by field name (i.e. 'ENV_MATTER' etc.), values are the actual value of the field for the sample
fields : list of strings
names of the mapping file fields (i.e 'ENV_MATTER' etc)
mapmd5 : string
the md5 of the map file
"""
smap = {}
mapsamples = []
hs.Debug(6,'Loading mapping file %s' % mapfilename)
mapf = open(mapfilename, 'rU')
reader = csv.DictReader(mapf, delimiter='\t')
fields = reader.fieldnames
for cline in reader:
if addmname:
cline[sampidfield]=cline[sampidfield]+addmname
cid = cline[sampidfield]
if mapsampletolowercase:
cid=cid.lower()
smap[cid] = cline
mapsamples.append(cid)
mapf.close()
hs.Debug(6,'number of samples in map is %d' % len(mapsamples))
mapmd5=getfilemd5(mapfilename)
return mapsamples,smap,fields,mapmd5
def createbiomtablefromexp(expdat,addtax=True):
"""
Create a biom table from an experiment
input:
expdat : Experiment
addtax : bool
True to add taxonomy metadata, False to not add
output:
table - the biom table (with taxonomy)
"""
# init the table
table=biom.table.Table(expdat.data,expdat.seqs,expdat.samples,type="OTU table")
# and add metabolite name as taxonomy:
if addtax:
taxdict={}
for idx,cseq in enumerate(expdat.seqs):
ctax=str(expdat.tax[idx])
if len(ctax)==0:
ctax='NA'
taxs=ctax.split(';')
taxdict[cseq]={'taxonomy': taxs}
table.add_metadata(taxdict,axis='observation')
return table
def savetobiom(expdat,filename,format='hdf5',addtax=True,useorigreads=True,exporthistory=True,logtransform=False):
"""
Save an experiment to a biom table
input:
expdat : Experiment
filename : string
Name of the file to save to
format : string
Format of the file ('hdf5','json','txt')
addtax : bool
True to add taxonomy metadata, False to not add
useorigreads : bool
True (default) to use original number of reads, False to use normalized (sum=10k)
exporthistory : bool
True (default) to save also the history (to filename.commands.txt)
logtransform : bool
True to log transform the data, false (default) to save original data
"""
savemap(expdat,filename+'.map.txt')
if exporthistory:
savecommands(expdat,filename+'.commands.txt')
hs.Debug(1,'Saving biom table %s' % filename)
if useorigreads:
newexp=hs.toorigreads(expdat)
else:
newexp=expdat
# if we need to log tranfrom the reads
if logtransform:
lowcutoff=1
ldat=newexp.data
ldat[np.where(ldat<lowcutoff)]=lowcutoff
ldat=np.log2(ldat)
tab=createbiomtablefromexp(newexp,addtax=addtax)
if format=='hdf5':
with biom.util.biom_open(filename, 'w') as f:
tab.to_hdf5(f, "heatsequer")
elif format=='json':
with open(filename,'w') as f:
tab.to_json("heatsequer",f)
elif format=='txt':
s=tab.to_tsv()
with open(filename,'w') as f:
f.write(s)
else:
hs.Debug(9,'file format not supported')
return
hs.Debug(6,'table saved to file %s' % filename)
return
def reloadmap(expdat,mapfilename):
"""
reload the mapping file for a loaded experiment
input:
expdat : Experiment
mapfilename : string
Name of the mapping file to reload
output:
newexp : Experiment
like expdat but with fields from new map file
"""
params=locals()
newexp=hs.copyexp(expdat)
mapsamples,smap,fields,mapmd5=loadmap(mapfilename)
newexp.smap=smap
newexp.fields=fields
newexp.mapmd5=mapmd5
for csamp in newexp.samples:
if csamp not in mapsamples:
hs.Debug(7,'Sample %s not in new map!' % csamp)
newexp.filters.append('reload map %s' % mapfilename)
hs.addcommand(newexp,"reloadmapfile",params=params,replaceparams={'expdat':expdat})
return newexp
def writetaxseq(expdat,filename):
"""
write a tab delimited table of taxonomy and sequence
used for rapid response summary
input:
expdat : Experiment
filename : name of the tsv output file
"""
fl=open(filename,'w')
fl.write('Taxonomy\tSequence\n')
for idx in np.arange(len(expdat.seqs),0,-1):
fl.write('%s\t%s\n' % (expdat.tax[idx-1],expdat.seqs[idx-1]))
fl.close()
def getfilemd5(filename):
"""
get the md5 of the text file filename
input:
filename : str
name of the file to calculate md5 on
output:
flmd5: str
the md5 of the file filename
"""
fl=open(filename,'rU')
flmd5=hashlib.md5()
for cline in fl:
try:
flmd5.update(cline.encode('utf-8'))
# for python 2
# flmd5.update(cline.decode('utf8').encode('utf-8'))
except:
hs.Debug(6,'map md5 cannot be calculated - utf problems?')
return ''
flmd5=flmd5.hexdigest()
return flmd5
| |
#
# This file is part of KoreanCodecs.
#
# Copyright(C) 2002-2003 Hye-Shik Chang <perky@FreeBSD.org>.
#
# KoreanCodecs is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# KoreanCodecs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with KoreanCodecs; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id: hangul.py,v 1.2 2003/10/15 19:24:53 perky Exp $
#
class UnicodeHangulError(Exception):
def __init__ (self, msg):
self.msg = msg
Exception.__init__(self, msg)
def __repr__ (self):
return self.msg
__str__ = __repr__
Null = ''
try:
True
except:
True is 1
False is 0
class Jaeum:
Codes = ('\u3131', '\u3132', '\u3133', '\u3134', '\u3135', '\u3136',
# G GG GS N NJ NH
'\u3137', '\u3138', '\u3139', '\u313a', '\u313b', '\u313c',
# D DD L LG LM LB
'\u313d', '\u313e', '\u313f', '\u3140', '\u3141', '\u3142',
# LS LT LP LH M B
'\u3143', '\u3144', '\u3145', '\u3146', '\u3147', '\u3148',
# BB BS S SS NG J
'\u3149', '\u314a', '\u314b', '\u314c', '\u314d', '\u314e')
# JJ C K T P H
Width = len(Codes)
G, GG, GS, N, NJ, NH, D, DD, L, LG, LM, LB, LS, LT, LP, LH, M, B, \
BB, BS, S, SS, NG, J, JJ, C, K, T, P, H = Codes
Choseong = [G, GG, N, D, DD, L, M, B, BB, S, SS, NG, J, JJ, C, K, T, P, H]
Jongseong = [Null, G, GG, GS, N, NJ, NH, D, L, LG, LM, LB, LS, LT, \
LP, LH, M, B, BS, S, SS, NG, J, C, K, T, P, H]
MultiElement = {
GG: (G, G), GS: (G, S), NJ: (N, J), NH: (N, H), DD: (D, D),
LG: (L, G), LM: (L, M), LB: (L, B), LS: (L, S), LT: (L, T),
LP: (L, P), LH: (L, H), BB: (B, B), BS: (B, S), SS: (S, S),
JJ: (J, J)
}
class Moeum:
Codes = ('\u314f', '\u3150', '\u3151', '\u3152', '\u3153', '\u3154',
# A AE YA YAE EO E
'\u3155', '\u3156', '\u3157', '\u3158', '\u3159', '\u315a',
# YEO YE O WA WAE OE
'\u315b', '\u315c', '\u315d', '\u315e', '\u315f', '\u3160',
# YO U WEO WE WI YU
'\u3161', '\u3162', '\u3163')
# EU YI I
Width = len(Codes)
A, AE, YA, YAE, EO, E, YEO, YE, O, WA, WAE, OE, YO, \
U, WEO, WE, WI, YU, EU, YI, I = Codes
Jungseong = list(Codes)
MultiElement = {
AE: (A, I), YAE: (YA, I), YE: (YEO, I), WA: (O, A), WAE: (O, A, I),
OE: (O, I), WEO: (U, EO), WE: (U, E), WI: (U, I), YI: (EU, I)
}
# Aliases for your convinience
Choseong = Jaeum.Choseong
Jungseong = Moeum.Jungseong
Jongseong = Jaeum.Jongseong
for name, code in list(Jaeum.__dict__.items()) + list(Moeum.__dict__.items()):
if name.isupper() and len(name) <= 3:
exec("%s = %s" % (name, repr(code)))
del name, code
# Unicode Hangul Syllables Characteristics
ZONE = ('\uAC00', '\uD7A3')
NCHOSEONG = len(Choseong)
NJUNGSEONG = len(Jungseong)
NJONGSEONG = len(Jongseong)
JBASE_CHOSEONG = '\u1100'
JBASE_JUNGSEONG = '\u1161'
JBASE_JONGSEONG = '\u11A8'
CHOSEONG_FILLER = '\u115F'
JUNGSEONG_FILLER = '\u1160'
_ishangul = (
lambda code:
ZONE[0] <= code <= ZONE[1] or
code in Jaeum.Codes or
code in Moeum.Codes
)
# Alternative Suffixes : do not use outside
ALT_SUFFIXES = {
'\uc744': ('\ub97c', '\uc744'), # reul, eul
'\ub97c': ('\ub97c', '\uc744'), # reul, eul
'\uc740': ('\ub294', '\uc740'), # neun, eun
'\ub294': ('\ub294', '\uc740'), # neun, eun
'\uc774': ('\uac00', '\uc774'), # yi, ga
'\uac00': ('\uac00', '\uc774'), # yi, ga
'\uc640': ('\uc640', '\uacfc'), # wa, gwa
'\uacfc': ('\uc640', '\uacfc'), # wa, gwa
}
# Ida-Varitaion Suffixes : do not use outside
IDA_SUFFIXES = {
'(\uc774)': ('', '\uc774'), # (yi)da
'(\uc785)': (17, '\uc785'), # (ip)nida
'(\uc778)': (4, '\uc778'), # (in)-
}
def isJaeum(u):
if u:
for c in u:
if c not in Jaeum.Codes:
break
else:
return True
return False
def isMoeum(u):
if u:
for c in u:
if c not in Moeum.Codes:
break
else:
return True
return False
def ishangul(u):
if u:
for c in u:
if not _ishangul(c):
break
else:
return True
return False
def join(codes):
""" Join function which makes hangul syllable from jamos """
if len(codes) != 3:
raise UnicodeHangulError("needs 3-element tuple")
if not codes[0] or not codes[1]: # single jamo
return codes[0] or codes[1]
return chr(
0xac00 + (
Choseong.index(codes[0])*NJUNGSEONG +
Jungseong.index(codes[1])
)*NJONGSEONG + Jongseong.index(codes[2])
)
def split(code):
""" Split function which splits hangul syllable into jamos """
if len(code) != 1 or not _ishangul(code):
raise UnicodeHangulError("needs 1 hangul letter")
if code in Jaeum.Codes:
return (code, Null, Null)
if code in Moeum.Codes:
return (Null, code, Null)
code = ord(code) - 0xac00
return (
Choseong[int(code / (NJUNGSEONG*NJONGSEONG))], # Python3000 safe
Jungseong[int(code / NJONGSEONG) % NJUNGSEONG],
Jongseong[code % NJONGSEONG]
)
def conjoin(s):
obuff = []
ncur = 0
while ncur < len(s):
c = s[ncur]
if JBASE_CHOSEONG <= c <= '\u1112' or c == CHOSEONG_FILLER: # starts with choseong
if len(s) > ncur+1 and JUNGSEONG_FILLER <= s[ncur+1] <= '\u1175':
cho = Choseong[ord(c) - ord(JBASE_CHOSEONG)]
jung = Jungseong[ord(s[ncur+1]) - ord(JBASE_JUNGSEONG)]
if len(s) > ncur+2 and JBASE_JONGSEONG <= s[ncur+2] <= '\u11C2':
jong = Jongseong[ord(s[ncur+2]) - ord(JBASE_JONGSEONG) + 1]
ncur += 2
else:
jong = Null
ncur += 1
obuff.append(join([cho, jung, jong]))
else:
obuff.append(join([Choseong[ord(c) - ord(JBASE_CHOSEONG)], Null, Null]))
elif JBASE_JUNGSEONG <= c <= '\u1175':
obuff.append(join([Null, Jungseong[ord(c) - ord(JBASE_JUNGSEONG)], Null]))
else:
obuff.append(c)
ncur += 1
return ''.join(obuff)
def disjoint(s):
obuff = []
for c in s:
if _ishangul(c):
cho, jung, jong = split(c)
if cho:
obuff.append( chr(ord(JBASE_CHOSEONG) + Choseong.index(cho)) )
else:
obuff.append( CHOSEONG_FILLER )
if jung:
obuff.append( chr(ord(JBASE_JUNGSEONG) + Jungseong.index(jung)) )
else:
obuff.append( JUNGSEONG_FILLER )
if jong:
obuff.append( chr(ord(JBASE_JONGSEONG) + Jongseong.index(jong) - 1) )
else:
obuff.append(c)
return ''.join(obuff)
def _has_final(c):
# for internal use only
if '\uac00' <= c <= '\ud7a3': # hangul
return 1, (ord(c) - 0xac00) % 28 > 0
else:
return 0, c in '013678.bklmnptLMNRZ'
# Iterator Emulator for ancient versions before 2.1
try:
iter
except:
class iter:
def __init__(self, obj):
self.obj = obj
self.ptr = 0
def __next__(self):
try:
return self.obj[self.ptr]
finally:
self.ptr += 1
# Nested scope lambda emulation for versions before 2.2
import sys
if sys.hexversion < 0x2020000:
class plambda:
def __init__(self, obj):
self.obj = obj
def __call__(self):
return self.obj
else:
plambda = None
del sys
def format(fmtstr, *args, **kwargs):
if kwargs:
argget = lambda:kwargs
if plambda:
argget = plambda(kwargs)
else:
argget = iter(args).__next__
obuff = []
ncur = escape = fmtinpth = 0
ofmt = fmt = ''
while ncur < len(fmtstr):
c = fmtstr[ncur]
if escape:
obuff.append(c)
escape = 0
ofmt = ''
elif c == '\\':
escape = 1
elif fmt:
fmt += c
if not fmtinpth and c.isalpha():
ofmt = fmt % argget()
obuff.append(ofmt)
fmt = ''
elif fmtinpth and c == ')':
fmtinpth = 0
elif c == '(':
fmtinpth = 1
elif c == '%':
obuff.append('%')
elif c == '%':
fmt += c
ofmt = ''
else:
if ofmt and c in ALT_SUFFIXES:
obuff.append(ALT_SUFFIXES[c][
_has_final(ofmt[-1])[1] and 1 or 0
])
elif ofmt and fmtstr[ncur:ncur+3] in IDA_SUFFIXES:
sel = IDA_SUFFIXES[fmtstr[ncur:ncur+3]]
ishan, hasfinal = _has_final(ofmt[-1])
if hasfinal:
obuff.append(sel[1])
elif ishan:
if sel[0]:
obuff[-1] = obuff[-1][:-1] + chr(ord(ofmt[-1]) + sel[0])
else:
obuff.append(sel[0] and sel[1])
ncur += 2
else:
obuff.append(c)
ofmt = ''
ncur += 1
return ''.join(obuff)
| |
import numpy as np
import pandas as pd
import pytest
import pymongo
from csv import DictReader
from datetime import datetime, timedelta
import mongots
mongo_client = pymongo.MongoClient()
mongo_client.drop_database('TestDb')
@pytest.fixture
def client():
return mongots.MongoTSClient(mongo_client=mongo_client)
@pytest.fixture
def db(client):
return client.TestDb
@pytest.fixture
def collection(db):
return db.temperatures
def test_are_mongots_instances(client, db, collection):
assert isinstance(client, mongots.MongoTSClient)
assert isinstance(db, mongots.MongoTSDatabase)
assert isinstance(collection, mongots.MongoTSCollection)
def test_contains_mongo_instances(client, db, collection):
assert isinstance(client._client, pymongo.MongoClient)
assert isinstance(db._database, pymongo.database.Database)
assert isinstance(collection._collection, pymongo.collection.Collection)
temperatures_in_paris = [
(35.6, datetime(2010, 7, 23, 13, 45), {'city': 'Paris'}),
(36.8, datetime(2010, 7, 23, 15), {'city': 'Paris'}),
(29, datetime(2010, 7, 23, 23, 57), {'city': 'Paris'}),
(18, datetime(2010, 7, 25, 20), {'city': 'Paris'}),
]
@pytest.mark.parametrize('value, timestamp, tags', temperatures_in_paris)
def test_insert_temperatures_in_paris_one_by_one(
collection,
value,
timestamp,
tags,
):
assert collection.insert_one(value, timestamp, tags=tags)
def test_right_number_of_documents_were_inserted(collection):
assert 1 == collection._collection.count({})
assert 1 == collection._collection.count({'city': 'Paris'})
def test_yearly_temperatures_in_paris_were_correctly_inserted(collection):
year_document = collection._collection.find_one({
'city': 'Paris',
}, {
'count': 1,
'sum': 1,
'sum2': 1,
})
assert 4 == year_document['count']
assert 35.6 + 36.8 + 29 + 18 == year_document['sum']
assert 35.6**2 + 36.8**2 + 29**2 + 18**2 == year_document['sum2']
def test_monthly_temperatures_in_paris_were_correctly_inserted(collection):
months_document = collection._collection.find_one({
'city': 'Paris',
}, {
'months': 1,
})['months']
assert 12 == len(months_document)
for month_index, month_document in enumerate(months_document):
if 6 == month_index:
assert 4 == month_document['count']
assert 35.6 + 36.8 + 29 + 18 == month_document['sum']
assert 35.6**2 + 36.8**2 + 29**2 + 18**2 == month_document['sum2']
else:
assert 0 == month_document['count']
assert 0 == month_document['sum']
assert 0 == month_document['sum2']
def test_daily_temperatures_in_paris_were_correctly_inserted(collection):
days_document = collection._collection.find_one({
'city': 'Paris',
}, {
'months': 1,
})['months'][6]['days']
assert 31 == len(days_document)
for day_index, day_document in enumerate(days_document):
if 22 == day_index:
assert 3 == day_document['count']
assert 35.6 + 36.8 + 29 == day_document['sum']
assert 35.6**2 + 36.8**2 + 29**2 == day_document['sum2']
elif 24 == day_index:
assert 1 == day_document['count']
assert 18 == day_document['sum']
assert 18**2 == day_document['sum2']
else:
assert 0 == day_document['count']
assert 0 == day_document['sum']
assert 0 == day_document['sum2']
def test_hourly_temperatures_in_paris_were_correctly_inserted(collection):
hours_document = collection._collection.find_one({
'city': 'Paris',
}, {
'months': 1,
})['months'][6]['days'][22]['hours']
assert 24 == len(hours_document)
for hour_index, hour_document in enumerate(hours_document):
if 13 == hour_index:
assert 1 == hour_document['count']
assert 35.6 == hour_document['sum']
assert 35.6**2 == hour_document['sum2']
elif 15 == hour_index:
assert 1 == hour_document['count']
assert 36.8 == hour_document['sum']
assert 36.8**2 == hour_document['sum2']
elif 23 == hour_index:
assert 1 == hour_document['count']
assert 29 == hour_document['sum']
assert 29**2 == hour_document['sum2']
else:
assert 0 == hour_document['count']
assert 0 == hour_document['sum']
assert 0 == hour_document['sum2']
@pytest.fixture
def big_collection(db):
return db.lotOfValues
def test_insert_constants_per_month_succeeds(big_collection):
ts = datetime(2010, 1, 1)
while ts < datetime(2010, 9, 1):
value = (ts.month-1) * 5
assert big_collection.insert_one(value, ts)
ts += timedelta(days=1)
def test_query_retrieve_expected_constant_per_month(big_collection):
df = big_collection.query(
datetime(2010, 1, 1),
datetime(2010, 9, 1),
aggregateby='1m',
)
assert (9, 5) == df.shape
assert list(df['count']) == [31, 28, 31, 30, 31, 30, 31, 31, 0]
assert list(df['mean'])[:8] == [0, 5, 10, 15, 20, 25, 30, 35]
assert np.isnan(list(df['mean'])[8])
assert list(df['std'])[:8] == [0, 0, 0, 0, 0, 0, 0, 0]
assert np.isnan(list(df['std'])[8])
assert list(df['min'])[:8] == [0, 5, 10, 15, 20, 25, 30, 35]
assert list(df['min'])[8] == np.inf
assert list(df['max'])[:8] == [0, 5, 10, 15, 20, 25, 30, 35]
assert list(df['max'])[8] == -np.inf
@pytest.fixture
def weather_data_pressure():
with open('test/data/weather_data.csv') as file:
reader = DictReader(file, delimiter=';')
result = []
for row in reader:
try:
timestamp = datetime.strptime(
row['datetime'],
'%Y-%m-%d %H:%M:%S'
)
pressure = float(row['atmospheric pressure'])
city = row['city']
result.append((pressure, timestamp, {'city': city}))
except Exception as e:
pass
return result
@pytest.fixture
def pressure_collection(db):
return db.atmosphericPressure
def test_insert_pressure_succeeds(pressure_collection, weather_data_pressure):
assert 6348 == len(weather_data_pressure)
for pressure, timestamp, tags in weather_data_pressure:
assert pressure_collection.insert_one(pressure, timestamp, tags=tags)
@pytest.mark.parametrize('args, kwargs, expected', [(
# 1996 per year
(datetime(1996, 1, 1), datetime(1996, 12, 31)),
{'aggregateby': '1y'},
{
'index': pd.Index([datetime(1996, 1, 1)], name='datetime'),
'data': [
[6348, 1001.0, 1074.2, 1.015427520478e+03, 5.8321529378],
],
},
), (
# 1996 per month
(datetime(1996, 1, 1), datetime(1996, 12, 31)),
{'aggregateby': '1m'},
{
'index': pd.Index([
datetime(1996, 1, 1),
datetime(1996, 2, 1),
datetime(1996, 3, 1),
datetime(1996, 4, 1),
datetime(1996, 5, 1),
datetime(1996, 6, 1),
datetime(1996, 7, 1),
datetime(1996, 8, 1),
datetime(1996, 9, 1),
datetime(1996, 10, 1),
datetime(1996, 11, 1),
datetime(1996, 12, 1),
], name='datetime'),
'data': [
[0, np.inf, -np.inf, np.nan, np.nan],
[0, np.inf, -np.inf, np.nan, np.nan],
[0, np.inf, -np.inf, np.nan, np.nan],
[0, np.inf, -np.inf, np.nan, np.nan],
[0, np.inf, -np.inf, np.nan, np.nan],
[0, np.inf, -np.inf, np.nan, np.nan],
[3431, 1001.0, 1074.2, 1016.739872, 6.253782],
[2917, 1002.0, 1028.1, 1013.883922, 4.859204],
[0, np.inf, -np.inf, np.nan, np.nan],
[0, np.inf, -np.inf, np.nan, np.nan],
[0, np.inf, -np.inf, np.nan, np.nan],
[0, np.inf, -np.inf, np.nan, np.nan],
],
},
), (
# 1996 summer per month and per city
(datetime(1996, 7, 15), datetime(1996, 9, 15)),
{'aggregateby': '1m', 'groupby': ['city']},
{
'index': pd.MultiIndex.from_product([
[datetime(1996, 7, 1), datetime(1996, 8, 1), datetime(1996, 9, 1)],
['istanbul', 'london', 'paris'],
], names=['datetime', 'city']),
'data': [
[1244, 1001, 1074.2, 1014.047186, 4.207450],
[780, 1001, 1037.3, 1017.958462, 7.899298],
[1407, 1006.1, 1028.1, 1018.445060, 5.914784],
[1063, 1003, 1019, 1012.393979, 2.477956],
[639, 1002, 1028.1, 1014.007668, 6.711384],
[1215, 1004.1, 1026.1, 1015.122387, 4.913515],
[0, np.inf, -np.inf, np.nan, np.nan],
[0, np.inf, -np.inf, np.nan, np.nan],
[0, np.inf, -np.inf, np.nan, np.nan],
],
},
), (
# 1996 July per day and per city
(datetime(1996, 7, 15), datetime(1996, 7, 20)),
{'aggregateby': '1d', 'groupby': ['city']},
{
'index': pd.MultiIndex.from_product([[
datetime(1996, 7, 15),
datetime(1996, 7, 16),
datetime(1996, 7, 17),
datetime(1996, 7, 18),
datetime(1996, 7, 19),
datetime(1996, 7, 20),
], [
'istanbul',
'london',
'paris'
]], names=['datetime', 'city']),
'data': [
[43, 1013.2, 1017.9, 1015.558140, 1.259980],
[23, 1027.1, 1032.2, 1029.286957, 1.153936],
[48, 1024.0, 1027.1, 1025.845833, 0.640949],
[37, 1009.1, 1074.2, 1013.621622, 10.251780],
[25, 1030.1, 1033.2, 1032.356000, 0.975738],
[47, 1025.1, 1028.1, 1026.695745, 1.044939],
[43, 1010.2, 1016.3, 1012.974419, 1.654043],
[27, 1027.1, 1037.3, 1029.459259, 2.167138],
[45, 1022.0, 1026.1, 1024.371111, 1.340584],
[43, 1015.2, 1016.3, 1015.993023, 0.493396],
[29, 1025.1, 1028.1, 1026.893103, 0.713385],
[48, 1023.0, 1024.0, 1023.729167, 0.444390],
[42, 1016.3, 1017.3, 1016.776190, 0.499433],
[24, 1025.1, 1028.1, 1026.350000, 0.968246],
[44, 1022.0, 1025.1, 1023.938636, 0.911596],
[41, 1017.3, 1019, 1018.141463, 0.693515],
[25, 1023.0, 1025.1, 1024.372000, 0.831153],
[45, 1022.0, 1024.0, 1023.022222, 0.614234],
],
},
), (
# 1996 July 16th, per hours
(datetime(1996, 7, 16, 11), datetime(1996, 7, 16, 15)),
{'aggregateby': '1h'},
{
'index': pd.Index([
datetime(1996, 7, 16, 11),
datetime(1996, 7, 16, 12),
datetime(1996, 7, 16, 13),
datetime(1996, 7, 16, 14),
datetime(1996, 7, 16, 15),
], name='datetime'),
'data': [
[5, 1012.2, 1033.2, 1022.760, 8.821020],
[5, 1012.2, 1033.2, 1022.760, 8.821020],
[4, 1012.2, 1033.2, 1026.425, 8.582067],
[5, 1011.2, 1033.2, 1021.960, 9.063465],
[5, 1011.2, 1032.2, 1021.560, 8.708295],
],
},
), (
# 1996 July 16th, per hours in Paris
(datetime(1996, 7, 16, 11), datetime(1996, 7, 16, 15)),
{'aggregateby': '1h', 'tags': {'city': 'paris'}},
{
'index': pd.Index([
datetime(1996, 7, 16, 11),
datetime(1996, 7, 16, 12),
datetime(1996, 7, 16, 13),
datetime(1996, 7, 16, 14),
datetime(1996, 7, 16, 15),
], name='datetime'),
'data': [
[2, 1028.1, 1028.1, 1028.1, 0.0],
[2, 1028.1, 1028.1, 1028.1, 0.0],
[1, 1027.1, 1027.1, 1027.1, 0.0],
[2, 1027.1, 1027.1, 1027.1, 0.0],
[2, 1026.1, 1027.1, 1026.6, 0.5],
],
},
), (
# 1996 July 16th, per hours everywhere but in Paris
(datetime(1996, 7, 16, 11), datetime(1996, 7, 16, 15)),
{'aggregateby': '1h', 'tags': {'city': {'$ne': 'paris'}}},
{
'index': pd.Index([
datetime(1996, 7, 16, 11),
datetime(1996, 7, 16, 12),
datetime(1996, 7, 16, 13),
datetime(1996, 7, 16, 14),
datetime(1996, 7, 16, 15),
], name='datetime'),
'data': [
[3, 1012.2, 1033.2, 1019.200000, 9.899495],
[3, 1012.2, 1033.2, 1019.200000, 9.899495],
[3, 1012.2, 1033.2, 1026.200000, 9.899495],
[3, 1011.2, 1033.2, 1018.533333, 10.370899],
[3, 1011.2, 1032.2, 1018.200000, 9.899495],
],
},
), (
# 1996 July 16th, per hours and per city in Paris and London
(datetime(1996, 7, 16, 11), datetime(1996, 7, 16, 13)),
{
'aggregateby': '1h',
'groupby': ['city'],
'tags': {'city': {'$in': ['paris', 'london']}},
}, {
'index': pd.MultiIndex.from_product([[
datetime(1996, 7, 16, 11),
datetime(1996, 7, 16, 12),
datetime(1996, 7, 16, 13),
], [
'london',
'paris',
]], names=['datetime', 'city']),
'data': [
[1, 1033.2, 1033.2, 1033.2, 0.0],
[2, 1028.1, 1028.1, 1028.1, 0.0],
[1, 1033.2, 1033.2, 1033.2, 0.0],
[2, 1028.1, 1028.1, 1028.1, 0.0],
[2, 1033.2, 1033.2, 1033.2, 0.0],
[1, 1027.1, 1027.1, 1027.1, 0.0],
],
},
), (
# 1996 July 16th, per 2 hours in Paris and London
(datetime(1996, 7, 16, 11), datetime(1996, 7, 16, 13)),
{
'aggregateby': '2h',
'tags': {'city': {'$in': ['paris', 'london']}},
}, {
'index': pd.Index([
datetime(1996, 7, 16, 10),
datetime(1996, 7, 16, 12),
], name='datetime'),
'data': [
[3, 1028.1, 1033.2, 1029.8, 2.40416305609],
[6, 1027.1, 1033.2, 1030.48333333, 2.73704016938],
],
},
), (
# 1996 July 16th, per 2 hours per city in Paris and London
(datetime(1996, 7, 16, 11), datetime(1996, 7, 16, 13)),
{
'aggregateby': '2h',
'groupby': ['city'],
'tags': {'city': {'$in': ['paris', 'london']}},
}, {
'index': pd.MultiIndex.from_product([[
datetime(1996, 7, 16, 10),
datetime(1996, 7, 16, 12),
], [
'london',
'paris',
]], names=['datetime', 'city']),
'data': [
[1, 1033.2, 1033.2, 1033.2, 0.0],
[2, 1028.1, 1028.1, 1028.1, 0.0],
[3, 1033.2, 1033.2, 1033.2, 0.0],
[3, 1027.1, 1028.1, 1027.766666, 0.471404],
],
},
), (
# no data for the selected range
(datetime(1995, 7, 10), datetime(1995, 8, 10)),
{'aggregateby': '1d'},
{
'index': pd.Index([], name='datetime'),
'data': [],
},
), (
# end date before start date
(datetime(1996, 7, 10), datetime(1996, 7, 9)),
{'aggregateby': '1d'},
{
'index': pd.Index([], name='datetime'),
'data': [],
},
)])
def test_pressure_queries(pressure_collection, args, kwargs, expected):
actual_df = pressure_collection.query(*args, **kwargs)
expected_df = pd.DataFrame(
index=expected['index'],
columns=['count', 'min', 'max', 'mean', 'std'],
data=expected['data'],
)
pd.testing.assert_frame_equal(actual_df, expected_df)
def test_get_tags(pressure_collection):
assert pressure_collection.get_tags() == {
'city': ['paris', 'london', 'istanbul'],
}
def test_get_timerange(pressure_collection):
min_datetime, max_datetime = pressure_collection.get_timerange()
assert min_datetime == datetime(1996, 7, 1, 1)
assert max_datetime == datetime(1996, 8, 26, 21, 30)
def test_get_collections(db):
collections = db.get_collections()
assert 3 == len(collections)
assert collections[0] == {
'collection_name': 'temperatures',
'count': 4,
'timerange': (
datetime(2010, 7, 23, 13, 45),
datetime(2010, 7, 25, 20),
),
'tags': {'city': ['Paris']},
}
assert collections[1] == {
'collection_name': 'lotOfValues',
'count': 243,
'timerange': (
datetime(2010, 1, 1),
datetime(2010, 8, 31),
),
'tags': {},
}
assert collections[2] == {
'collection_name': 'atmosphericPressure',
'count': 6348,
'timerange': (
datetime(1996, 7, 1, 1),
datetime(1996, 8, 26, 21, 30),
),
'tags': {
'city': ['paris', 'london', 'istanbul'],
},
}
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
import socket
import sys
from pypjlink import protocol
class ProjectorError(Exception):
pass
reverse_dict = lambda d: dict(zip(d.values(), d.keys()))
POWER_STATES = {
'off': '0',
'on': '1',
'cooling': '2',
'warm-up': '3',
}
POWER_STATES_REV = reverse_dict(POWER_STATES)
SOURCE_TYPES = {
'RGB': '1',
'VIDEO': '2',
'DIGITAL': '3',
'STORAGE': '4',
'NETWORK': '5',
}
SOURCE_TYPES_REV = reverse_dict(SOURCE_TYPES)
MUTE_VIDEO = 1
MUTE_AUDIO = 2
MUTE_STATES_REV = {
'11': (True, False),
'21': (False, True),
'31': (True, True),
'30': (False, False),
}
ERROR_STATES_REV = {
'0': 'ok',
'1': 'warning',
'2': 'error',
}
class Projector(object):
def __init__(self, f):
self.f = f
@classmethod
def from_address(cls, address, port=4352):
"""build a Projector from a ip address"""
sock = socket.socket()
sock.connect((address, port))
# in python 3 I need to specify newline, otherwise read hangs
# in "PJLINK 0\r"
# I expect socket file to return byte strings in python 2 and
# unicode strings in python 3
if sys.version_info.major == 2:
f = sock.makefile()
else:
f = sock.makefile(mode='rw', newline='\r')
return cls(f)
def authenticate(self, password=None):
# I'm just implementing the authentication scheme designed in the
# protocol. Don't take this as any kind of assurance that it's secure.
data = protocol.read(self.f, 9)
assert data[:7] == 'PJLINK '
security = data[7]
if security == '0':
return None
data += protocol.read(self.f, 9)
assert security == '1'
assert data[8] == ' '
salt = data[9:17]
assert data[17] == '\r'
if password is None:
raise RuntimeError('projector needs a password')
if callable(password):
password = password()
pass_data = (salt + password).encode('utf-8')
pass_data_md5 = hashlib.md5(pass_data).hexdigest()
# we *must* send a command to complete the procedure,
# so we just get the power state.
cmd_data = protocol.to_binary('POWR', '?')
self.f.write(pass_data_md5 + cmd_data)
self.f.flush()
# read the response, see if it's a failed auth
data = protocol.read(self.f, 7)
if data == 'PJLINK ':
# should be a failed auth if we get that
data += protocol.read(self.f, 5)
assert data == 'PJLINK ERRA\r'
# it definitely is
return False
# good auth, so we should get a reply to the command we sent
body, param = protocol.parse_response(self.f, data)
# make sure we got a sensible response back
assert body == 'POWR'
if param in protocol.ERRORS:
raise ProjectorError(protocol.ERRORS[param])
# but we don't care about the value if we did
return True
def get(self, body):
success, response = protocol.send_command(self.f, body, '?')
if not success:
raise ProjectorError(response)
return response
def set(self, body, param):
success, response = protocol.send_command(self.f, body, param)
if not success:
raise ProjectorError(response)
assert response == 'OK'
# Power
def get_power(self):
param = self.get('POWR')
return POWER_STATES_REV[param]
def set_power(self, status, force=False):
if not force:
assert status in ('off', 'on')
self.set('POWR', POWER_STATES[status])
# Input
def get_input(self):
param = self.get('INPT')
source, number = param
source = SOURCE_TYPES_REV[source]
number = int(number)
return (source, number)
def set_input(self, source, number):
source = SOURCE_TYPES[source]
number = str(number)
assert number in '123456789'
self.set('INPT', source + number)
# A/V mute
def get_mute(self):
param = self.get('AVMT')
return MUTE_STATES_REV[param]
def set_mute(self, what, state):
assert what in (MUTE_VIDEO, MUTE_AUDIO, MUTE_VIDEO | MUTE_AUDIO)
what = str(what)
assert what in '123'
state = '1' if state else '0'
self.set('AVMT', what + state)
# Errors
def get_errors(self):
param = self.get('ERST')
errors = 'fan lamp temperature cover filter other'.split()
assert len(param) == len(errors)
return dict((key, ERROR_STATES_REV[value]) for key, value in zip(errors, param))
# Lamps
def get_lamps(self):
param = self.get('LAMP')
assert len(param) <= 65
values = param.split(' ')
assert len(values) <= 16 and len(values) % 2 == 0
lamps = []
for time, state in zip(values[::2], values[1::2]):
time = int(time)
state = bool(int(state))
lamps.append((time, state))
assert len(lamps) <= 8
return lamps
# Input list
def get_inputs(self):
param = self.get('INST')
assert len(param) <= 95
values = param.split(' ')
assert len(values) <= 50
inputs = []
for value in values:
source, number = value
source = SOURCE_TYPES_REV[source]
assert number in '123456789'
number = int(number)
inputs.append((source, number))
return inputs
# Projector info
def get_name(self):
param = self.get('NAME')
assert len(param) <= 64
return param
def get_manufacturer(self):
param = self.get('INF1')
assert len(param) <= 32
# stupidly, this is not defined as utf-8 in the spec. :(
return param
def get_product_name(self):
param = self.get('INF2')
assert len(param) <= 32
# stupidly, this is not defined as utf-8 in the spec. :(
return param
def get_other_info(self):
param = self.get('INFO')
assert len(param) <= 32
return param
# TODO: def get_class(self): self.get('CLSS')
# once we know that class 2 is, and how to deal with it
| |
__author__ = 'saeedamen' # Saeed Amen / saeed@thalesians.com
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
LoaderTemplate
Abstract class for various data source loaders.
"""
import abc
import copy
from pythalesians.util.configmanager import ConfigManager
class LoaderTemplate:
def __init__(self):
self.config = None
try:
self.config = ConfigManager()
except: pass
return
@abc.abstractmethod
def load_ticker(self, time_series_request):
"""
load_ticker - Retrieves market data from external data source
Parameters
----------
time_series_request : TimeSeriesRequest
contains all the various parameters detailing time series start and finish, tickers etc
Returns
-------
DataFrame
"""
return
# to be implemented by subclasses
@abc.abstractmethod
def kill_session(self):
return
def construct_vendor_time_series_request(self, time_series_request):
"""
construct_vendor_time_series_request - creates a TimeSeriesRequest with the vendor tickers
Parameters
----------
time_series_request : TimeSeriesRequest
contains all the various parameters detailing time series start and finish, tickers etc
Returns
-------
TimeSeriesRequest
"""
symbols_vendor = self.translate_to_vendor_ticker(time_series_request)
fields_vendor = self.translate_to_vendor_field(time_series_request)
time_series_request_vendor = copy.copy(time_series_request)
time_series_request_vendor.tickers = symbols_vendor
time_series_request_vendor.fields = fields_vendor
return time_series_request_vendor
def translate_to_vendor_field(self, time_series_request):
"""
translate_to_vendor_field - Converts all the fields from Thalesians fields to vendor fields
Parameters
----------
time_series_request : TimeSeriesRequest
contains all the various parameters detailing time series start and finish, tickers etc
Returns
-------
List of Strings
"""
if hasattr(time_series_request, 'vendor_fields'):
return time_series_request.vendor_fields
source = time_series_request.data_source
fields_list = time_series_request.fields
if isinstance(fields_list, str):
fields_list = [fields_list]
if self.config is None: return fields_list
fields_converted = []
for field in fields_list:
fields_converted.append(self.config.convert_library_to_vendor_field(source, field))
return fields_converted
# translate Thalesians ticker to vendor ticker
def translate_to_vendor_ticker(self, time_series_request):
"""
translate_to_vendor_tickers - Converts all the tickers from Thalesians tickers to vendor tickers
Parameters
----------
time_series_request : TimeSeriesRequest
contains all the various parameters detailing time series start and finish, tickers etc
Returns
-------
List of Strings
"""
if hasattr(time_series_request, 'vendor_tickers'):
return time_series_request.vendor_tickers
category = time_series_request.category
source = time_series_request.data_source
freq = time_series_request.freq
cut = time_series_request.cut
tickers_list = time_series_request.tickers
if isinstance(tickers_list, str):
tickers_list = [tickers_list]
if self.config is None: return tickers_list
tickers_list_converted = []
for ticker in tickers_list:
tickers_list_converted.append(
self.config.convert_library_to_vendor_ticker(category, source, freq, cut, ticker))
return tickers_list_converted
def translate_from_vendor_field(self, vendor_fields_list, time_series_request):
"""
translate_from_vendor_field - Converts all the fields from vendors fields to Thalesians fields
Parameters
----------
time_series_request : TimeSeriesRequest
contains all the various parameters detailing time series start and finish, tickers etc
Returns
-------
List of Strings
"""
data_source = time_series_request.data_source
if isinstance(vendor_fields_list, str):
vendor_fields_list = [vendor_fields_list]
# if self.config is None: return vendor_fields_list
fields_converted = []
# if we haven't set the configuration files for automatic configuration
if hasattr(time_series_request, 'vendor_fields'):
dictionary = dict(zip(time_series_request.vendor_fields, time_series_request.fields))
for vendor_field in vendor_fields_list:
try:
fields_converted.append(dictionary[vendor_field])
except:
fields_converted.append(vendor_field)
# otherwise used stored configuration files
else:
for vendor_field in vendor_fields_list:
fields_converted.append(self.config.convert_vendor_to_library_field(data_source, vendor_field))
return fields_converted
# translate Thalesians ticker to vendor ticker
def translate_from_vendor_ticker(self, vendor_tickers_list, time_series_request):
"""
translate_from_vendor_ticker - Converts all the fields from vendor tickers to Thalesians tickers
Parameters
----------
time_series_request : TimeSeriesRequest
contains all the various parameters detailing time series start and finish, tickers etc
Returns
-------
List of Strings
"""
if hasattr(time_series_request, 'vendor_tickers'):
dictionary = dict(zip(time_series_request.vendor_tickers, time_series_request.tickers))
tickers_stuff = []
for vendor_ticker in vendor_tickers_list:
tickers_stuff.append(dictionary[vendor_ticker])
return tickers_stuff # [item for sublist in tickers_stuff for item in sublist]
data_source = time_series_request.data_source
# tickers_list = time_series_request.tickers
if isinstance(vendor_tickers_list, str):
vendor_tickers_list = [vendor_tickers_list]
if self.config is None: return vendor_tickers_list
tickers_converted = []
for vendor_ticker in vendor_tickers_list:
tickers_converted.append(
self.config.convert_vendor_to_library_ticker(data_source, vendor_ticker))
return tickers_converted
| |
import re
import sys
from struct import Struct
from lib.tarantool_connection import TarantoolConnection
from internal.compat import bytes_to_str, str_to_bytes
HEADER = '!BBHBBHLLQ'
HEADER_STRUCT = Struct(HEADER)
HEADER_SIZE = 24
LENGTH = "!8xL"
LENGTH_STRUCT = Struct(LENGTH)
INDECR = "!Q"
INDECR_STRUCT = Struct(INDECR)
MAGIC = {
'request' : 0x80,
'response': 0x81
}
# Operation: ID, header type, extension length, key length, val length
# key/val length: 0 means nothing must be. 'None' means should be presented
COMMANDS = {
'get' : [0x00, Struct(HEADER + ''), 0, None, 0 ],
'set' : [0x01, Struct(HEADER + 'LL'), 8, None, None],
'add' : [0x02, Struct(HEADER + 'LL'), 8, None, None],
'replace' : [0x03, Struct(HEADER + 'LL'), 8, None, None],
'delete' : [0x04, Struct(HEADER + ''), 0, None, 0 ],
'incr' : [0x05, Struct(HEADER + 'QQL'), 20, None, 0 ],
'decr' : [0x06, Struct(HEADER + 'QQL'), 20, None, 0 ],
'quit' : [0x07, Struct(HEADER + ''), 0, 0, 0 ],
'flush' : [0x08, Struct(HEADER + 'I'), 4, 0, 0 ],
'getq' : [0x09, Struct(HEADER + ''), 0, None, 0 ],
'noop' : [0x0a, Struct(HEADER + ''), 0, 0, 0 ],
'version' : [0x0b, Struct(HEADER + ''), 0, 0, 0 ],
'getk' : [0x0c, Struct(HEADER + ''), 0, None, 0 ],
'getkq' : [0x0d, Struct(HEADER + ''), 0, None, 0 ],
'append' : [0x0e, Struct(HEADER + ''), 0, None, None],
'prepend' : [0x0f, Struct(HEADER + ''), 0, None, None],
'stat' : [0x10, Struct(HEADER + ''), 0, None, 0 ],
'setq' : [0x11, Struct(HEADER + 'LL'), 8, None, None],
'addq' : [0x12, Struct(HEADER + 'LL'), 8, None, None],
'replaceq' : [0x13, Struct(HEADER + 'LL'), 8, None, None],
'deleteq' : [0x14, Struct(HEADER + ''), 0, None, 0 ],
'incrq' : [0x15, Struct(HEADER + 'QQL'), 20, None, 0 ],
'decrq' : [0x16, Struct(HEADER + 'QQL'), 20, None, 0 ],
'quitq' : [0x17, Struct(HEADER + ''), 0, 0, 0 ],
'flushq' : [0x18, Struct(HEADER + 'I'), 4, 0, 0 ],
'appendq' : [0x19, Struct(HEADER + ''), 0, None, None],
'prependq' : [0x1A, Struct(HEADER + ''), 0, None, None],
'touch' : [0x1C, Struct(HEADER + 'L'), 4, None, 0 ],
'gat' : [0x1D, Struct(HEADER + 'L'), 4, None, 0 ],
'gatq' : [0x1E, Struct(HEADER + 'L'), 4, None, 0 ],
'gatk' : [0x23, Struct(HEADER + 'L'), 4, None, 0 ],
'gatkq' : [0x24, Struct(HEADER + 'L'), 4, None, 0 ],
'sasl_list' : [0x20, Struct(HEADER + ''), 0, 0, 0 ],
'sasl_start' : [0x21, Struct(HEADER + ''), 0, None, None],
'sasl_step' : [0x22, Struct(HEADER + ''), 0, None, 0 ],
}
def is_indecrq(cmd):
if cmd in (COMMANDS['incr'][0], COMMANDS['decr'][0], COMMANDS['incrq'][0],
COMMANDS['decrq'][0]):
return True
return False
# ID: extension type, extension length, key length, val length
# key/val length: 0 means nothing must be. 'None' means should be presented
ANSWERS = {
0x00: [Struct('!L'), 4, 0, None], # get
0x01: [None, 0, 0, 0 ], # set
0x02: [None, 0, 0, 0 ], # add
0x03: [None, 0, 0, 0 ], # replace
0x04: [None, 0, 0, 0 ], # delete
0x05: [None, 0, 0, 8 ], # incr
0x06: [None, 0, 0, 8 ], # decr
0x07: [None, 0, 0, 0 ], # quit
0x08: [None, 0, 0, 0 ], # flush
0x09: [Struct('!L'), 4, 0, None], # getq
0x0a: [None, 0, 0, 0 ], # noop
0x0b: [None, 0, 0, None], # version
0x0c: [Struct('!L'), 4, None, None], # getk
0x0d: [Struct('!L'), 4, None, None], # getkq
0x0e: [None, 0, 0, 0 ], # append
0x0f: [None, 0, 0, 0 ], # prepend
0x10: [None, 0, None, None], # stat
0x11: [None, 0, 0, 0 ], # setq
0x12: [None, 0, 0, 0 ], # addq
0x13: [None, 0, 0, 0 ], # replaceq
0x14: [None, 0, 0, 0 ], # deleteq
0x15: [None, 0, 0, 8 ], # incrq
0x16: [None, 0, 0, 8 ], # decrq
0x17: [None, 0, 0, 0 ], # quitq
0x18: [None, 0, 0, 0 ], # flushq
0x19: [None, 0, 0, 0 ], # appendq
0x1A: [None, 0, 0, 0 ], # prependq
0x1C: [None, 0, 0, 0 ], # touch
0x1D: [Struct('!L'), 4, 0, None], # gat
0x1E: [Struct('!L'), 4, 0, None], # gatq
0x20: [None, 0, 0, None], # sasl_list
0x21: [None, 0, None, None], # sasl_start
0x22: [None, 0, None, None], # sasl_step
0x23: [Struct('!L'), 4, None, None], # gatk
0x24: [Struct('!L'), 4, None, None], # gatkq
}
STATUS = {
'OK' : 0x00,
'KEY_ENOENT' : 0x01,
'KEY_EEXISTS' : 0x02,
'E2BIG' : 0x03,
'EINVAL' : 0x04,
'NOT_STORED' : 0x05,
'DELTA_BADVAL' : 0x06,
'VBUCKET_BADVAL' : 0x07,
'AUTH_ERROR' : 0x20,
'AUTH_CONTINUE' : 0x21,
'UNKNOWN_COMMAND' : 0x81,
'ENOMEM' : 0x82,
'NOT_SUPPORTED' : 0x83,
'SERVER_ERROR' : 0x84,
'EBUSY' : 0x85,
'EAGAIN' : 0x86,
}
TYPE = {
'raw' : 0x00,
}
class MemcachedException(Exception):
def __init__(self, msg, size):
self.msg = msg
self.size = size
def binary_decorator(fn):
def decor(self, *args, **kwargs):
nosend = kwargs.pop('nosend', False)
result = fn(self, *args, **kwargs)
self.opaque += 1
rv = None
if not (result is None):
rv = result
else:
self.latest = self.opaque - 1
if nosend is False:
self.send()
rv = self.read_responses()
return rv
return decor
class MemcachedBinaryConnection(TarantoolConnection):
def __init__(self, host, port):
super(MemcachedBinaryConnection, self).__init__(host, port)
self.latest = 0
self.opaque = 1
self.recv_buffer = ''
self.cas = {}
self.commands = []
self.connect()
def send(self):
commands = b''.join(self.commands)
self.socket.sendall(commands)
self.commands = []
def _recv(self, to_read):
buf = b""
while to_read > 0:
tmp = self.socket.recv(to_read)
to_read -= len(tmp)
buf += tmp
return buf
def _read_response(self):
hdr = self._recv(HEADER_SIZE)
sz = hdr + self._recv(LENGTH_STRUCT.unpack_from(hdr)[0])
return sz
def _read_and_parse_response(self):
cmd = self._read_response()
to_read = HEADER_SIZE
if len(cmd) < to_read:
raise MemcachedException("Need more bytes", to_read - len(cmd))
a = (magic, op, key_len, ext_len, dtype,
status, tot_len, opaque, cas) = HEADER_STRUCT.unpack_from(cmd)
to_read += tot_len
val_len = tot_len - key_len - ext_len
if len(cmd) < to_read:
raise MemcachedException("Need more bytes", to_read - len(cmd))
struct, ext_lenv, key_lenv, val_lenv = ANSWERS[op]
# Multiple checks to be confident in server responses
assert(magic == MAGIC['response'])
if status == STATUS['OK']:
assert(ext_len == ext_lenv)
assert(((key_lenv is None or key_lenv > 0) and key_len > 0) or key_len == 0)
assert(((val_lenv is None or val_lenv > 0) and val_len > 0) or val_len == 0)
else:
assert(val_len > 0)
retval = {
'magic' : magic,
'op' : op,
'dtype' : dtype,
'status' : status,
'opaque' : opaque,
'cas' : cas,
}
extra = None
if struct is not None and status == STATUS['OK']:
extra = struct.unpack_from(cmd, HEADER_SIZE)
if extra is not None:
retval['flags'] = extra[0]
key = None
if key_lenv is None:
begin = HEADER_SIZE + ext_len
end = begin + key_len
key = cmd[begin:end]
if key is not None:
retval['key'] = key
val = None
if val_lenv is None or val_len > 0:
begin = HEADER_SIZE + ext_len + key_len
end = HEADER_SIZE + tot_len
val = cmd[begin:end]
# decode result of (incr/decr)(q)
if is_indecrq(op):
val = INDECR_STRUCT.unpack_from(val)[0]
else:
# Ideally we should lean on 4th bit in flags to
# decide whether to interpret this value as binary
# or as text.
#
# Unconditional interpreting it as a text is
# enough for testing purposes, though.
val = bytes_to_str(val)
retval['val'] = val
return retval
def read_responses(self):
resp = []
while True:
obj = self._read_and_parse_response()
resp.append(obj)
if obj['cas'] > 0 and 'key' in obj and obj['key']:
self.cas[obj['key']] = obj['cas']
if obj['opaque'] == self.latest:
break
return resp
def append_query(self, cmd, args):
assert(cmd in COMMANDS)
op, struct, ext_len, key_len, val_len = COMMANDS[cmd]
key = args.get('key', '')
val = args.get('val', '')
key_len = 0 if key_len is not None else len(key)
val_len = 0 if val_len is not None else len(val)
tot_len = val_len + key_len + ext_len
dtype = args.get('type', TYPE['raw'])
opaque = self.opaque
cas = args.get('cas', 0)
extra = args.get('extra', [])
retval = [
struct.pack(MAGIC['request'], op, key_len, ext_len, dtype, 0,
tot_len, opaque, cas, *extra),
str_to_bytes(key), str_to_bytes(val)
]
cmd = b''.join(retval)
self.commands.append(cmd)
@binary_decorator
def get(self, key):
self.append_query('get', {
'key': key,
})
@binary_decorator
def getq(self, key, nosend=False):
self.append_query('getq', {
'key': key,
})
@binary_decorator
def getk(self, key, nosend=False):
self.append_query('getk', {
'key': key,
})
@binary_decorator
def getkq(self, key, nosend=False):
self.append_query('getkq', {
'key': key,
})
@binary_decorator
def set(self, key, value, expire=0, flags=0, nosend=False, cas=-1):
if (cas == -1):
cas = self.cas.get(key, 0)
self.append_query('set', {
'key': key,
'cas': cas,
'val': value,
'extra': [flags, expire]
})
@binary_decorator
def setq(self, key, value, expire=0, flags=0, nosend=False, cas=-1):
if (cas == -1):
cas = self.cas.get(key, 0)
self.append_query('setq', {
'key': key,
'cas': cas,
'val': value,
'extra': [flags, expire]
})
@binary_decorator
def add(self, key, value, expire=0, flags=0, nosend=False, cas=-1):
if (cas == -1):
cas = self.cas.get(key, 0)
self.append_query('add', {
'key': key,
'cas': cas,
'val': value,
'extra': [flags, expire]
})
@binary_decorator
def addq(self, key, value, expire=0, flags=0, nosend=False, cas=-1):
if (cas == -1):
cas = self.cas.get(key, 0)
self.append_query('addq', {
'key': key,
'cas': cas,
'val': value,
'extra': [flags, expire]
})
@binary_decorator
def replace(self, key, value, expire=0, flags=0, nosend=False, cas=-1):
if (cas == -1):
cas = self.cas.get(key, 0)
self.append_query('replace', {
'key': key,
'cas': cas,
'val': value,
'extra': [flags, expire]
})
@binary_decorator
def replaceq(self, key, value, expire=0, flags=0, nosend=False, cas=-1):
if (cas == -1):
cas = self.cas.get(key, 0)
self.append_query('replaceq', {
'key': key,
'cas': cas,
'val': value,
'extra': [flags, expire]
})
@binary_decorator
def delete(self, key, nosend=False):
self.append_query('delete', {
'key': key,
})
@binary_decorator
def deleteq(self, key, nosend=False):
self.append_query('deleteq', {
'key': key,
})
@binary_decorator
def incr(self, key, delta=1, expire=0xFFFFFFFF, initial=0, nosend=False):
self.append_query('incr', {
'key': key,
'extra': [delta, initial, expire]
})
@binary_decorator
def decr(self, key, delta=1, expire=0xFFFFFFFF, initial=0, nosend=False):
self.append_query('decr', {
'key': key,
'extra': [delta, initial, expire]
})
@binary_decorator
def incrq(self, key, delta=1, expire=0xFFFFFFFF, initial=0, nosend=False):
self.append_query('incrq', {
'key': key,
'extra': [delta, initial, expire]
})
@binary_decorator
def decrq(self, key, delta=1, expire=0xFFFFFFFF, initial=0, nosend=False):
self.append_query('decrq', {
'key': key,
'extra': [delta, initial, expire]
})
@binary_decorator
def quit(self, nosend=False):
self.append_query('quit', {})
@binary_decorator
def quitq(self, nosend=False):
self.append_query('quitq', {})
@binary_decorator
def flush(self, expire=0, nosend=False):
self.append_query('flush', {
'extra': [expire]
})
@binary_decorator
def flushq(self, expire=0, nosend=False):
self.append_query('flushq', {
'extra': [expire]
})
@binary_decorator
def noop(self):
self.append_query('noop', {})
self.latest = self.opaque
self.send()
return self.read_responses()
@binary_decorator
def version(self, nosend=False):
self.append_query('version', {})
@binary_decorator
def append(self, key, value, nosend=False):
self.append_query('append', {
'key': key,
'val': value,
})
@binary_decorator
def prepend(self, key, value, nosend=False):
self.append_query('prepend', {
'key': key,
'val': value,
})
@binary_decorator
def appendq(self, key, value, nosend=False):
self.append_query('appendq', {
'key': key,
'val': value,
})
@binary_decorator
def prependq(self, key, value, nosend=False):
self.append_query('prependq', {
'key': key,
'val': value,
})
@binary_decorator
def stat(self, key=''):
# nosend flag is ignored
self.append_query('stat', {
'key': key,
})
self.latest = self.opaque
self.send()
ans = {}
while True:
rv = self._read_and_parse_response()
if 'key' in rv and not rv['key'] and \
'val' in rv and not rv['val']:
return ans
ans[bytes_to_str(rv['key'])] = rv['val']
return ans
@binary_decorator
def touch(self, key, expire, nosend=False):
self.append_query('touch', {
'key': key,
'extra': [expire]
})
@binary_decorator
def gat(self, key, expire, nosend=False):
self.append_query('gat', {
'key': key,
'extra': [expire]
})
@binary_decorator
def gatk(self, key, expire, nosend=False):
self.append_query('gatk', {
'key': key,
'extra': [expire]
})
@binary_decorator
def gatq(self, key, expire, nosend=False):
self.append_query('gat', {
'key': key,
'extra': [expire]
})
@binary_decorator
def gatkq(self, key, expire, nosend=False):
self.append_query('gatk', {
'key': key,
'extra': [expire]
})
@binary_decorator
def sasl_list(self):
self.append_query('sasl_list', {})
@binary_decorator
def sasl_start(self, mech, value):
self.append_query('sasl_start', {
'key': mech,
'val': value
})
@binary_decorator
def sasl_step(self, value):
self.append_query('sasl_step', {
'val': value
})
MEMCACHED_SEPARATOR = '\r\n'
class MemcachedCommandBuffer(object):
def __init__(self, commands):
self.buf = commands
def read_line(self):
if self.buf == None:
return None
index = self.buf.find(MEMCACHED_SEPARATOR)
if index > 0:
line = self.buf[:index]
self.buf = self.buf[index + 2:]
else:
line = self.buf
self.buf = None
return line
class MemcachedTextConnection(TarantoolConnection):
def execute_no_reconnect(self, commands, silent = False):
self.send(commands, silent)
return self.recv(silent)
def send(self, commands, silent = False):
self.commands = commands
self.socket.sendall(str_to_bytes(commands))
if not silent:
print("<<" + '-' * 50)
sys.stdout.write(self.commands.strip() + '\n')
#sys.stdout.write(self.commands)
def recv(self, silent = False):
self.recv_buffer = ''
self.command_buffer = MemcachedCommandBuffer(self.commands)
self.raw_reply = ''
self.reply = ''
while True:
cmd = self.command_buffer.read_line()
if cmd == None:
# end of buffer reached
break
if re.match('set|add|replace|append|prepend|cas', cmd, re.I):
self.reply_storage(cmd)
elif re.match('get|gets', cmd, re.I):
self.reply_retrieval(cmd)
elif re.match('delete', cmd, re.I):
self.reply_deletion(cmd)
elif re.match('incr|decr', cmd, re.I):
self.reply_incr_decr(cmd)
elif re.match('stats', cmd, re.I):
self.reply_stats(cmd)
elif re.match('flush_all|version|quit', cmd, re.I):
self.reply_other(cmd)
elif cmd == '':
continue
else:
self.reply_unknown(cmd)
if not silent:
print(">>" + '-'*50)
sys.stdout.write(self.reply.strip() + '\n')
#sys.stdout.write(self.reply)
return self.raw_reply
def reply_storage(self, cmd):
self.command_buffer.read_line()
self.reply_single_line(cmd)
def reply_retrieval(self, cmd):
while True:
# read reply cmd
key = self.read_line()
self.raw_reply += key + MEMCACHED_SEPARATOR
# delete cas
if (re.match('VALUE', key)):
arr = key.split(' ')
if (len(arr) > 4):
arr[4] = 'cas'
key = ' '.join(arr)
# store line in reply buffer
self.reply += key + MEMCACHED_SEPARATOR
# chec reply type
if re.match('VALUE', key):
# Value header received
key_params = key.split()
if len(key_params) < 4:
continue
# receive value
value_len = int(key_params[3])
while value_len > 0:
# Receive value line
value = self.read_line()
# store value line in reply buffer
self.raw_reply += value + MEMCACHED_SEPARATOR
self.reply += value + MEMCACHED_SEPARATOR
# decrease value len
value_len -= len(value)
elif re.match('END', key):
break
elif re.match('ERROR|CLIENT_ERROR|SERVER_ERROR', key):
break
else:
# unknown
print("error: unknown line: '%s'" % key)
self.reply += "error: unknown line: '%s'" % key
break
def reply_deletion(self, cmd):
self.reply_single_line(cmd)
def reply_incr_decr(self, cmd):
self.reply_single_line(cmd)
def reply_stats(self, cmd):
while True:
# read reply stats
stat = self.read_line()
# store stat in reply buffer
self.raw_reply += stat + MEMCACHED_SEPARATOR
a = stat.split(' ')
if len(a) > 2 and a[1] in ['pid', 'time', 'version', 'pointer_size']:
self.reply += ' '.join(a[:2]) + ' <var>' + MEMCACHED_SEPARATOR
else:
self.reply += stat + MEMCACHED_SEPARATOR
if re.match('END', stat):
break
if re.match('ERROR|CLIENT_ERROR|SERVER_ERROR', stat):
break
def reply_other(self, cmd):
self.reply_single_line(cmd)
def reply_single_line(self, cmd):
params = cmd.split()
if re.match('noreply', params[-1], re.I):
# Noreply option exist
noreply = True
else:
noreply = False
if not noreply:
reply = self.read_line() + MEMCACHED_SEPARATOR
self.reply += reply
self.raw_reply += reply
def reply_unknown(self, line):
reply = self.read_line() + MEMCACHED_SEPARATOR
self.raw_reply += reply
self.reply += reply
def read_line(self):
buf = self.recv_buffer
while True:
# try to find separator in the exist buffer
index = buf.find(MEMCACHED_SEPARATOR)
if index > 0:
break
data = bytes_to_str(self.socket.recv(1048576))
if not data:
return None
buf += data
# get line
line = buf[:index]
# cut line from receive buffer
self.recv_buffer = buf[index + 2:]
return line
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss functions to be used by LayerCollection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
# Dependency imports
import six
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
@six.add_metaclass(abc.ABCMeta)
class LossFunction(object):
"""Abstract base class for loss functions.
Note that unlike typical loss functions used in neural networks these are
summed and not averaged across cases in the batch, since this is what the
users of this class (FisherEstimator and MatrixVectorProductComputer) will
be expecting. The implication of this is that you will may want to
normalize things like Fisher-vector products by the batch size when you
use this class. It depends on the use case.
"""
@abc.abstractproperty
def targets(self):
"""The targets being predicted by the model.
Returns:
None or Tensor of appropriate shape for calling self._evaluate() on.
"""
pass
@abc.abstractproperty
def inputs(self):
"""The inputs to the loss function (excluding the targets)."""
pass
def evaluate(self):
"""Evaluate the loss function on the targets."""
if self.targets is not None:
# We treat the targets as "constant". It's only the inputs that get
# "back-propped" through.
return self._evaluate(tf.stop_gradient(self.targets))
else:
raise Exception("Cannot evaluate losses with unspecified targets.")
@abc.abstractmethod
def _evaluate(self, targets):
"""Evaluates the negative log probability of the targets.
Args:
targets: Tensor that distribution can calculate log_prob() of.
Returns:
negative log probability of each target, summed across all targets.
"""
pass
@abc.abstractmethod
def multiply_ggn(self, vector):
"""Right-multiply a vector by the GGN.
Here the 'GGN' is the GGN matrix (whose definition is slightly flexible)
of the loss function with respect to its inputs.
Args:
vector: The vector to multiply. Must be the same shape(s) as the
'inputs' property.
Returns:
The vector right-multiplied by the GGN. Will be of the same shape(s)
as the 'inputs' property.
"""
pass
@abc.abstractmethod
def multiply_ggn_factor(self, vector):
"""Right-multiply a vector by a factor B of the GGN.
Here the 'GGN' is the GGN matrix (whose definition is slightly flexible)
of the loss function with respect to its inputs. Typically this will be
block-diagonal across different cases in the batch, since the loss function
is typically summed across cases.
Note that B can be any matrix satisfying B * B^T = G where G is the GGN,
but will agree with the one used in the other methods of this class.
Args:
vector: The vector to multiply. Must be of the shape given by the
'ggn_factor_inner_shape' property.
Returns:
The vector right-multiplied by B. Will be of the same shape(s) as the
'inputs' property.
"""
pass
@abc.abstractmethod
def multiply_ggn_factor_transpose(self, vector):
"""Right-multiply a vector by the transpose of a factor B of the GGN.
Here the 'GGN' is the GGN matrix (whose definition is slightly flexible)
of the loss function with respect to its inputs. Typically this will be
block-diagonal across different cases in the batch, since the loss function
is typically summed across cases.
Note that B can be any matrix satisfying B * B^T = G where G is the GGN,
but will agree with the one used in the other methods of this class.
Args:
vector: The vector to multiply. Must be the same shape(s) as the
'inputs' property.
Returns:
The vector right-multiplied by B^T. Will be of the shape given by the
'ggn_factor_inner_shape' property.
"""
pass
@abc.abstractmethod
def multiply_ggn_factor_replicated_one_hot(self, index):
"""Right-multiply a replicated-one-hot vector by a factor B of the GGN.
Here the 'GGN' is the GGN matrix (whose definition is slightly flexible)
of the loss function with respect to its inputs. Typically this will be
block-diagonal across different cases in the batch, since the loss function
is typically summed across cases.
A 'replicated-one-hot' vector means a tensor which, for each slice along the
batch dimension (assumed to be dimension 0), is 1.0 in the entry
corresponding to the given index and 0 elsewhere.
Note that B can be any matrix satisfying B * B^T = G where G is the GGN,
but will agree with the one used in the other methods of this class.
Args:
index: A tuple representing in the index of the entry in each slice that
is 1.0. Note that len(index) must be equal to the number of elements
of the 'ggn_factor_inner_shape' tensor minus one.
Returns:
The vector right-multiplied by B^T. Will be of the same shape(s) as the
'inputs' property.
"""
pass
@abc.abstractproperty
def ggn_factor_inner_shape(self):
"""The shape of the tensor returned by multiply_ggn_factor."""
pass
@abc.abstractproperty
def ggn_factor_inner_static_shape(self):
"""Static version of ggn_factor_inner_shape."""
pass
@property
def dtype(self):
if isinstance(self.inputs, (list, tuple)):
return self.inputs[0].dtype
return self.inputs.dtype
@six.add_metaclass(abc.ABCMeta)
class NegativeLogProbLoss(LossFunction):
"""Abstract base class for loss functions that are negative log probs."""
def __init__(self, seed=None):
self._default_seed = seed
super(NegativeLogProbLoss, self).__init__()
@property
def inputs(self):
return self.params
@abc.abstractproperty
def params(self):
"""Parameters to the underlying distribution."""
pass
@abc.abstractmethod
def multiply_fisher(self, vector):
"""Right-multiply a vector by the Fisher.
Args:
vector: The vector to multiply. Must be the same shape(s) as the
'inputs' property.
Returns:
The vector right-multiplied by the Fisher. Will be of the same shape(s)
as the 'inputs' property.
"""
pass
@abc.abstractmethod
def multiply_fisher_factor(self, vector):
"""Right-multiply a vector by a factor B of the Fisher.
Here the 'Fisher' is the Fisher information matrix (i.e. expected outer-
product of gradients) with respect to the parameters of the underlying
probability distribution (whose log-prob defines the loss). Typically this
will be block-diagonal across different cases in the batch, since the
distribution is usually (but not always) conditionally iid across different
cases.
Note that B can be any matrix satisfying B * B^T = F where F is the Fisher,
but will agree with the one used in the other methods of this class.
Args:
vector: The vector to multiply. Must be of the shape given by the
'fisher_factor_inner_shape' property.
Returns:
The vector right-multiplied by B. Will be of the same shape(s) as the
'inputs' property.
"""
pass
@abc.abstractmethod
def multiply_fisher_factor_transpose(self, vector):
"""Right-multiply a vector by the transpose of a factor B of the Fisher.
Here the 'Fisher' is the Fisher information matrix (i.e. expected outer-
product of gradients) with respect to the parameters of the underlying
probability distribution (whose log-prob defines the loss). Typically this
will be block-diagonal across different cases in the batch, since the
distribution is usually (but not always) conditionally iid across different
cases.
Note that B can be any matrix satisfying B * B^T = F where F is the Fisher,
but will agree with the one used in the other methods of this class.
Args:
vector: The vector to multiply. Must be the same shape(s) as the
'inputs' property.
Returns:
The vector right-multiplied by B^T. Will be of the shape given by the
'fisher_factor_inner_shape' property.
"""
pass
@abc.abstractmethod
def multiply_fisher_factor_replicated_one_hot(self, index):
"""Right-multiply a replicated-one-hot vector by a factor B of the Fisher.
Here the 'Fisher' is the Fisher information matrix (i.e. expected outer-
product of gradients) with respect to the parameters of the underlying
probability distribution (whose log-prob defines the loss). Typically this
will be block-diagonal across different cases in the batch, since the
distribution is usually (but not always) conditionally iid across different
cases.
A 'replicated-one-hot' vector means a tensor which, for each slice along the
batch dimension (assumed to be dimension 0), is 1.0 in the entry
corresponding to the given index and 0 elsewhere.
Note that B can be any matrix satisfying B * B^T = H where H is the Fisher,
but will agree with the one used in the other methods of this class.
Args:
index: A tuple representing in the index of the entry in each slice that
is 1.0. Note that len(index) must be equal to the number of elements
of the 'fisher_factor_inner_shape' tensor minus one.
Returns:
The vector right-multiplied by B. Will be of the same shape(s) as the
'inputs' property.
"""
pass
@abc.abstractproperty
def fisher_factor_inner_shape(self):
"""The shape of the tensor returned by multiply_fisher_factor."""
pass
@abc.abstractproperty
def fisher_factor_inner_static_shape(self):
"""Static version of fisher_factor_inner_shape."""
pass
@abc.abstractmethod
def sample(self, seed):
"""Sample 'targets' from the underlying distribution."""
pass
def evaluate_on_sample(self, seed=None):
"""Evaluates the log probability on a random sample.
Args:
seed: int or None. Random seed for this draw from the distribution.
Returns:
Log probability of sampled targets, summed across examples.
"""
if seed is None:
seed = self._default_seed
# We treat the targets as "constant". It's only the inputs that get
# "back-propped" through.
return self._evaluate(tf.stop_gradient(self.sample(seed)))
class NaturalParamsNegativeLogProbLoss(NegativeLogProbLoss):
"""Base class for neg log prob losses whose inputs are 'natural' parameters.
We will take the GGN of the loss to be the Fisher associated with the
distribution, which also happens to be equal to the Hessian for this class
of loss functions. See here: https://arxiv.org/abs/1412.1193
'Natural parameters' are defined for exponential-family models. See for
example: https://en.wikipedia.org/wiki/Exponential_family
"""
def multiply_ggn(self, vector):
return self.multiply_fisher(vector)
def multiply_ggn_factor(self, vector):
return self.multiply_fisher_factor(vector)
def multiply_ggn_factor_transpose(self, vector):
return self.multiply_fisher_factor_transpose(vector)
def multiply_ggn_factor_replicated_one_hot(self, index):
return self.multiply_fisher_factor_replicated_one_hot(index)
@property
def ggn_factor_inner_shape(self):
return self.fisher_factor_inner_shape
@property
def ggn_factor_inner_static_shape(self):
return self.fisher_factor_inner_shape
class DistributionNegativeLogProbLoss(NegativeLogProbLoss):
"""Base class for neg log prob losses that use the TF Distribution classes."""
def __init__(self, seed=None):
super(DistributionNegativeLogProbLoss, self).__init__(seed=seed)
@abc.abstractproperty
def dist(self):
"""The underlying tfp.distributions.Distribution."""
pass
def _evaluate(self, targets):
return -tf.reduce_sum(self.dist.log_prob(targets))
def sample(self, seed):
return self.dist.sample(seed=seed)
class NormalMeanNegativeLogProbLoss(DistributionNegativeLogProbLoss,
NaturalParamsNegativeLogProbLoss):
"""Neg log prob loss for a normal distribution parameterized by a mean vector.
Note that the covariance is treated as a constant 'var' times the identity.
Also note that the Fisher for such a normal distribution with respect the mean
parameter is given by:
F = (1/var) * I
See for example https://www.ii.pwr.edu.pl/~tomczak/PDF/[JMT]Fisher_inf.pdf.
"""
def __init__(self, mean, var=0.5, targets=None, seed=None):
assert isinstance(var, float) # variance must be a constant float
self._mean = mean
self._var = var
self._targets = targets
super(NormalMeanNegativeLogProbLoss, self).__init__(seed=seed)
@property
def targets(self):
return self._targets
@property
def dist(self):
return tfp.distributions.Normal(loc=self._mean, scale=tf.sqrt(self._var))
@property
def params(self):
return self._mean
def multiply_fisher(self, vector):
return (1. / self._var) * vector
def multiply_fisher_factor(self, vector):
return self._var**-0.5 * vector
def multiply_fisher_factor_transpose(self, vector):
return self.multiply_fisher_factor(vector) # it's symmetric in this case
def multiply_fisher_factor_replicated_one_hot(self, index):
assert len(index) == 1, "Length of index was {}".format(len(index))
ones_slice = tf.expand_dims(
tf.ones(tf.shape(self._mean)[:1], dtype=self._mean.dtype), axis=-1)
output_slice = self._var**-0.5 * ones_slice
return insert_slice_in_zeros(output_slice, 1, int(self._mean.shape[1]),
index[0])
@property
def fisher_factor_inner_shape(self):
return tf.shape(self._mean)
@property
def fisher_factor_inner_static_shape(self):
return self._mean.shape
class NormalMeanVarianceNegativeLogProbLoss(DistributionNegativeLogProbLoss):
"""Negative log prob loss for a normal distribution with mean and variance.
This class parameterizes a multivariate normal distribution with n independent
dimensions. Unlike `NormalMeanNegativeLogProbLoss`, this class does not
assume the variance is held constant. The Fisher Information for n = 1
is given by,
F = [[1 / variance, 0],
[ 0, 0.5 / variance^2]]
where the parameters of the distribution are concatenated into a single
vector as [mean, variance]. For n > 1, the mean parameter vector is
concatenated with the variance parameter vector.
See https://www.ii.pwr.edu.pl/~tomczak/PDF/[JMT]Fisher_inf.pdf for derivation.
"""
def __init__(self, mean, variance, targets=None, seed=None):
assert len(mean.shape) == 2, "Expect 2D mean tensor."
assert len(variance.shape) == 2, "Expect 2D variance tensor."
self._mean = mean
self._variance = variance
self._targets = targets
super(NormalMeanVarianceNegativeLogProbLoss, self).__init__(seed=seed)
@property
def targets(self):
return self._targets
@property
def dist(self):
return tfp.distributions.Normal(
loc=self._mean, scale=tf.sqrt(self._variance))
@property
def params(self):
return self._mean, self._variance
def _concat(self, mean, variance):
return tf.concat([mean, variance], axis=-1)
def _split(self, params):
return tf.split(params, 2, axis=-1)
@property
def _fisher_mean(self):
return 1. / self._variance
@property
def _fisher_mean_factor(self):
return 1. / tf.sqrt(self._variance)
@property
def _fisher_var(self):
return 1. / (2 * tf.square(self._variance))
@property
def _fisher_var_factor(self):
return 1. / (tf.sqrt(2.) * self._variance)
def multiply_fisher(self, vecs):
mean_vec, var_vec = vecs
return (self._fisher_mean * mean_vec, self._fisher_var * var_vec)
def multiply_fisher_factor(self, vecs):
mean_vec, var_vec = self._split(vecs)
return (self._fisher_mean_factor * mean_vec,
self._fisher_var_factor * var_vec)
def multiply_fisher_factor_transpose(self, vecs):
mean_vec, var_vec = vecs
return self._concat(self._fisher_mean_factor * mean_vec,
self._fisher_var_factor * var_vec)
def multiply_fisher_factor_replicated_one_hot(self, index):
assert len(index) == 1, "Length of index was {}".format(len(index))
index = index[0]
if index < int(self._mean.shape[-1]):
# Index corresponds to mean parameter.
mean_slice = self._fisher_mean_factor[:, index]
mean_slice = tf.expand_dims(mean_slice, axis=-1)
mean_output = insert_slice_in_zeros(mean_slice, 1, int(
self._mean.shape[1]), index)
var_output = tf.zeros_like(mean_output)
else:
index -= int(self._mean.shape[-1])
# Index corresponds to variance parameter.
var_slice = self._fisher_var_factor[:, index]
var_slice = tf.expand_dims(var_slice, axis=-1)
var_output = insert_slice_in_zeros(var_slice, 1,
int(self._variance.shape[1]), index)
mean_output = tf.zeros_like(var_output)
return mean_output, var_output
@property
def fisher_factor_inner_shape(self):
return tf.concat(
[tf.shape(self._mean)[:-1], 2 * tf.shape(self._mean)[-1:]], axis=0)
@property
def fisher_factor_inner_static_shape(self):
shape = self._mean.shape.as_list()
return tf.TensorShape(shape[-1:] + [2 * shape[-1]])
def multiply_ggn(self, vector):
raise NotImplementedError()
def multiply_ggn_factor(self, vector):
raise NotImplementedError()
def multiply_ggn_factor_transpose(self, vector):
raise NotImplementedError()
def multiply_ggn_factor_replicated_one_hot(self, index):
raise NotImplementedError()
@property
def ggn_factor_inner_shape(self):
raise NotImplementedError()
@property
def ggn_factor_inner_static_shape(self):
raise NotImplementedError()
class CategoricalLogitsNegativeLogProbLoss(DistributionNegativeLogProbLoss,
NaturalParamsNegativeLogProbLoss):
"""Neg log prob loss for a categorical distribution parameterized by logits.
Note that the Fisher (for a single case) of a categorical distribution, with
respect to the natural parameters (i.e. the logits), is given by:
F = diag(p) - p*p^T
where p = softmax(logits). F can be factorized as F = B * B^T where
B = diag(q) - p*q^T
where q is the entry-wise square root of p. This is easy to verify using the
fact that q^T*q = 1.
"""
def __init__(self, logits, targets=None, seed=None):
"""Instantiates a CategoricalLogitsNegativeLogProbLoss.
Args:
logits: Tensor of shape [batch_size, output_size]. Parameters for
underlying distribution.
targets: None or Tensor of shape [batch_size]. Each elements contains an
index in [0, output_size).
seed: int or None. Default random seed when sampling.
"""
self._logits = logits
self._targets = targets
super(CategoricalLogitsNegativeLogProbLoss, self).__init__(seed=seed)
@property
def targets(self):
return self._targets
@property
def dist(self):
return tfp.distributions.Categorical(logits=self._logits)
@property
def _probs(self):
return self.dist.probs_parameter()
@property
def _sqrt_probs(self):
return tf.sqrt(self._probs)
@property
def params(self):
return self._logits
def multiply_fisher(self, vector):
probs = self._probs
return vector * probs - probs * tf.reduce_sum(
vector * probs, axis=-1, keepdims=True)
def multiply_fisher_factor(self, vector):
probs = self._probs
sqrt_probs = self._sqrt_probs
return sqrt_probs * vector - probs * tf.reduce_sum(
sqrt_probs * vector, axis=-1, keepdims=True)
def multiply_fisher_factor_transpose(self, vector):
probs = self._probs
sqrt_probs = self._sqrt_probs
return sqrt_probs * vector - sqrt_probs * tf.reduce_sum(
probs * vector, axis=-1, keepdims=True)
def multiply_fisher_factor_replicated_one_hot(self, index):
assert len(index) == 1, "Length of index was {}".format(len(index))
probs = self._probs
sqrt_probs = self._sqrt_probs
sqrt_probs_slice = tf.expand_dims(sqrt_probs[:, index[0]], -1)
padded_slice = insert_slice_in_zeros(sqrt_probs_slice, 1,
int(sqrt_probs.shape[1]), index[0])
return padded_slice - probs * sqrt_probs_slice
@property
def fisher_factor_inner_shape(self):
return tf.shape(self._logits)
@property
def fisher_factor_inner_static_shape(self):
return self._logits.shape
class MultiBernoulliNegativeLogProbLoss(DistributionNegativeLogProbLoss,
NaturalParamsNegativeLogProbLoss):
"""Neg log prob loss for multiple Bernoulli distributions param'd by logits.
Represents N independent Bernoulli distributions where N = len(logits). Its
Fisher Information matrix is given by,
F = diag(p * (1-p))
p = sigmoid(logits)
As F is diagonal with positive entries, its factor B is,
B = diag(sqrt(p * (1-p)))
"""
def __init__(self, logits, targets=None, seed=None):
self._logits = logits
self._targets = targets
super(MultiBernoulliNegativeLogProbLoss, self).__init__(seed=seed)
@property
def targets(self):
return self._targets
@property
def dist(self):
return tfp.distributions.Bernoulli(logits=self._logits)
@property
def _probs(self):
return self.dist.probs_parameter()
@property
def params(self):
return self._logits
def multiply_fisher(self, vector):
return self._probs * (1 - self._probs) * vector
def multiply_fisher_factor(self, vector):
return tf.sqrt(self._probs * (1 - self._probs)) * vector
def multiply_fisher_factor_transpose(self, vector):
return self.multiply_fisher_factor(vector) # it's symmetric in this case
def multiply_fisher_factor_replicated_one_hot(self, index):
assert len(index) == 1, "Length of index was {}".format(len(index))
probs_slice = tf.expand_dims(self._probs[:, index[0]], -1)
output_slice = tf.sqrt(probs_slice * (1 - probs_slice))
return insert_slice_in_zeros(output_slice, 1, int(self._logits.shape[1]),
index[0])
@property
def fisher_factor_inner_shape(self):
return tf.shape(self._logits)
@property
def fisher_factor_inner_static_shape(self):
return self._logits.shape
def insert_slice_in_zeros(slice_to_insert, dim, dim_size, position):
"""Inserts slice into a larger tensor of zeros.
Forms a new tensor which is the same shape as slice_to_insert, except that
the dimension given by 'dim' is expanded to the size given by 'dim_size'.
'position' determines the position (index) at which to insert the slice within
that dimension.
Assumes slice_to_insert.shape[dim] = 1.
Args:
slice_to_insert: The slice to insert.
dim: The dimension which to expand with zeros.
dim_size: The new size of the 'dim' dimension.
position: The position of 'slice_to_insert' in the new tensor.
Returns:
The new tensor.
Raises:
ValueError: If the slice's shape at the given dim is not 1.
"""
slice_shape = slice_to_insert.shape
if slice_shape[dim] != 1:
raise ValueError("Expected slice_to_insert.shape to have {} dim of 1, but "
"was {}".format(dim, slice_to_insert.shape[dim]))
before = [0] * int(len(slice_shape))
after = before[:]
before[dim] = position
after[dim] = dim_size - position - 1
return tf.pad(slice_to_insert, list(zip(before, after)))
class OnehotCategoricalLogitsNegativeLogProbLoss(
CategoricalLogitsNegativeLogProbLoss):
"""Neg log prob loss for a categorical distribution with onehot targets.
Identical to CategoricalLogitsNegativeLogProbLoss except that the underlying
distribution is OneHotCategorical as opposed to Categorical.
"""
@property
def dist(self):
return tfp.distributions.OneHotCategorical(logits=self._logits)
| |
#!/usr/bin/env python3
import geometry3d as g3d
import montecarlo as mc
from pyassimp import *
from math import *
import random
import matplotlib.pyplot as plt
def visualization(robot, est_pos, step, particles, particles_resampled, weights):
""" Visualization
:param robot: the current robot object
:param step: the current step
:param p: list with particles
:param pr: list of resampled particles
:param weights: particle weights
"""
plt.figure("Robot in the world", figsize=(10., 10.))
plt.title('Particle filter, step ' + str(step))
# draw coordinate grid for plotting
grid = [bbox[0][0], bbox[1][0], bbox[0][2], bbox[1][2]]
plt.axis(grid)
plt.grid(b=True, which='major', color='0.75', linestyle='--')
plt.xticks([i for i in range(int(bbox[0][0]), int(bbox[1][0]), 500)])
plt.yticks([i for i in range(int(bbox[0][2]), int(bbox[1][2]), 500)])
# draw particles
for p in particles:
#print('P:', p)
# particle
circle = plt.Circle((p[0], p[2]), 20., facecolor='#ffb266', edgecolor='#994c00', alpha=0.5)
plt.gca().add_patch(circle)
# particle's orientation
arrow = plt.Arrow(p[0], p[2], 50*cos(p[3]), 50*sin(p[3]), alpha=1., facecolor='#994c00', edgecolor='#994c00')
plt.gca().add_patch(arrow)
# draw resampled particles
for pr in particles_resampled:
# particle
circle = plt.Circle((pr[0], pr[2]), 20., facecolor='#66ff66', edgecolor='#009900', alpha=0.5)
plt.gca().add_patch(circle)
# particle's orientation
arrow = plt.Arrow(pr[0], pr[2], 50*cos(pr[3]), 50*sin(pr[3]), alpha=1., facecolor='#006600', edgecolor='#006600')
plt.gca().add_patch(arrow)
# fixed landmarks of known locations
#for lm in landmarks:
# circle = plt.Circle((lm[0], lm[1]), 1., facecolor='#cc0000', edgecolor='#330000')
# plt.gca().add_patch(circle)
# robot's location
circle = plt.Circle((robot[0], robot[2]), 20., facecolor='#6666ff', edgecolor='#0000cc')
plt.gca().add_patch(circle)
# robot's orientation
arrow = plt.Arrow(robot[0], robot[2], 50*cos(robot[3]), 50*sin(robot[3]), alpha=0.5, facecolor='#000000', edgecolor='#000000')
plt.gca().add_patch(arrow)
# estimated robot's location
circle = plt.Circle((est_pos[0], est_pos[2]), 20., facecolor='#ff0000', edgecolor='#0000cc')
plt.gca().add_patch(circle)
# estimated robot's orientation
arrow = plt.Arrow(est_pos[0], est_pos[2], 50*cos(est_pos[3]), 50*sin(est_pos[3]), alpha=0.5, facecolor='#ff0000', edgecolor='#000000')
plt.gca().add_patch(arrow)
#plt.show()
plt.savefig("figure_" + str(step) + ".png")
plt.close()
location = [1000, 0, 1000, 0]
#{ x: 1000, y: 300, z: -4200 } 1200
#{ x: 1500, y: 300, z: -6906.748046875 } 3906.748046875
#{ x: 2000, y: 300, z: -3903.758083014863 } 903.7580830148631
#{ x: 2500, y: 300, z: -3802.8835927798614 } 802.8835927798614
#{ x: 3000, y: 300, z: -4714.183137763199 } 1714.1831377631988
#{ x: 3500, y: 300, z: -4294.633381125961 } 1294.633381125961
#{ x: 4000, y: 300, z: -4851.1315041878415 } 1851.1315041878415
#{ x: 4500, y: 300, z: -6906.748046875 } 3906.748046875
event = {
'triangles': [],
'particles': [],
'motion': [0.0, 500.0], # delta_theta, s
'noise': [30.0, 1.0 * 0.5, 50.0], # bearing, steering, distance
'measurements': []
}
triangles = event['triangles']
particles = event['particles']
motion = event['motion']
noise = event['noise']
measurements = event['measurements']
scene = load('obj/room.obj')
assert len(scene.meshes)
for mesh in scene.meshes:
assert len(mesh.vertices)
for v in mesh.vertices:
triangles.extend(v)
release(scene)
bbox = g3d.bounding_box(triangles)
world_x_size = bbox[1][0] - bbox[0][0]
world_y_size = bbox[1][1] - bbox[0][1]
world_z_size = bbox[1][2] - bbox[0][2]
print('Bounding box:', bbox[0], bbox[1])
print('World size:', world_x_size, world_y_size, world_z_size)
N_PART = 1000
N_BOX = 8
N_SENSORS = 4
particles = []
if len(particles) == 0:
for i in range(N_PART):
#particle = (random.random() * world_x_size + bbox[0][0],
# random.random() * 3 + bbox[0][1],
# random.random() * world_z_size + bbox[0][2],
# random.random() * 2.0 * pi)
particle = (random.gauss(location[0], 500),
random.gauss(location[1], 500),
random.gauss(location[2], 500),
random.gauss(location[3], pi / 4))
particles.append(particle)
vertices = []
for i in range(0, len(triangles), 9):
v0 = triangles[i+0:i+3]
v1 = triangles[i+3:i+6]
v2 = triangles[i+6:i+9]
v = (v0, v1, v2,
g3d.minus(v1, v0), # Find vectors for two edges sharing vert0
g3d.minus(v2, v0)
)
vertices.append(v)
max_meas = world_x_size **2 + world_z_size ** 2
weights = [0] * N_PART
# Split the world evenly with boxes
box_x_size = world_x_size / N_BOX
box_y_size = world_y_size
box_z_size = world_z_size / N_BOX
box_x_half_size = box_x_size / 2
box_y_half_size = box_y_size / 2
box_z_half_size = box_z_size / 2
boxes = []
for ix in range(N_BOX):
for iz in range(N_BOX):
box = (
( # box center
box_x_size * ix + box_x_half_size,
box_y_half_size,
box_z_size * iz + box_z_half_size
),
( # box half sizes
box_x_half_size,
box_y_half_size,
box_z_half_size
),
[] # placeholder for bounding triangle indexes
)
boxes.append(box)
# Calculate list of bounding triangles for each box
print('Total triangles:', len(vertices))
tt = 0
for box in boxes:
for i, verts in enumerate(vertices):
#print(i, verts)
if g3d.triboxoverlap(box[0], box[1], verts):
#print(' **** in', i)
box[2].append(i)
tt += len(box[2])
print('Triangles in box:', len(box[2]))
print('Total triangles in boxes:', tt)
init_dir = [1.0, 0.0, 0.0]
for step in range(8):
# Simulate measurements
measurements = []
for s in range(0, N_SENSORS): # generate sensor directions
measurements.append({'direction':
g3d.rotateY(init_dir, 2.0 * pi / N_SENSORS * s)})
for meas in measurements:
meas['origin'] = [0.0, 300.0, 0.0]
min_dist = None
origin = g3d.plus(location, meas['origin'])
direction = meas['direction']
inv_direction = [
1.0 / (x if abs(x) > 0.00001 else copysign(0.00001, x)) for x in direction
]
meas['inv_direction'] = inv_direction
#print('origin:', origin, 'dir:', direction)
triangles_processed = 0
for bn, box in enumerate(boxes):
# build "bounding box" style box
bb = [
[
box[0][0] - box[1][0],
box[0][1] - box[1][1],
box[0][2] - box[1][2],
],
[
box[0][0] + box[1][0],
box[0][1] + box[1][1],
box[0][2] + box[1][2],
]
]
# Check if measurement ray intersects the box.
# Only if it is we will check relevant triangles.
if g3d.boxrayintersectBL(bb, origin, inv_direction):
#print('X aabb:', bn, bb, 'cnt:', len(box[2]), 'in direction: ', direction, 'origin', origin)
for vx in box[2]:
verts = vertices[vx]
triangles_processed += 1
x, (t,u,v) = g3d.intersect_triangle(origin,
direction,
verts,
True)
if x and t > 0:
#print(particle, t)
if min_dist is None or t < min_dist:
min_dist = t
print('Processed triangles:', triangles_processed)
meas['distance'] = min_dist
print('Location/distance:', location, min_dist)
# Measurement update
for n, p in enumerate(particles):
weights[n] = mc.measurement_prob(boxes,
vertices,
p,
measurements,
noise,
max_meas)
#for ww in weights:
# print('W:', ww)
# Normalization
sum_weights = sum(weights)
if not sum_weights == 0:
k = N_PART / sum(weights)
for n, w in enumerate(weights):
weights[n] = w * k
#for i, w in enumerate(weights):
# if w > 0:
# print('W:', w, particles[i])
print('Resampling')
# Resampling
p2 = []
index = int(random.random() * N_PART)
beta = 0.0
mw = max(weights)
for i in range(N_PART):
beta += random.random() * 2.0 * mw
while beta > weights[index]:
beta -= weights[index]
index = (index + 1) % N_PART
p2.append(particles[index])
est_pos = mc.get_position(particles)
print('Estimated position:', est_pos)
visualization(location, est_pos, step, particles, p2, weights)
particles = p2
print('======== {0} ========'.format(step))
#for p in particles:
# print('P:', p)
# Motion update (prediction)
for n, p in enumerate(particles):
particles[n] = mc.move(p, motion, noise)
location[0] += 500
| |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''pyglet is a cross-platform games and multimedia package.
Detailed documentation is available at http://www.pyglet.org
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: __init__.py 2444 2009-03-22 08:51:01Z Alex.Holkner $'
import os
import sys
_is_epydoc = hasattr(sys, 'is_epydoc') and sys.is_epydoc
#: The release version of this pyglet installation.
#:
#: Valid only if pyglet was installed from a source or binary distribution
#: (i.e. not in a checked-out copy from SVN).
#:
#: Use setuptools if you need to check for a specific release version, e.g.::
#:
#: >>> import pyglet
#: >>> from pkg_resources import parse_version
#: >>> parse_version(pyglet.version) >= parse_version('1.1')
#: True
#:
version = '1.1.3'
def _require_ctypes_version(version):
# Check ctypes version
import ctypes
req = [int(i) for i in version.split('.')]
have = [int(i) for i in ctypes.__version__.split('.')]
if not tuple(have) >= tuple(req):
raise ImportError('pyglet requires ctypes %s or later.' % version)
_require_ctypes_version('1.0.0')
_enable_optimisations = not __debug__
if getattr(sys, 'frozen', None):
_enable_optimisations = True
#: Global dict of pyglet options. To change an option from its default, you
#: must import ``pyglet`` before any sub-packages. For example::
#:
#: import pyglet
#: pyglet.options['debug_gl'] = False
#:
#: The default options can be overridden from the OS environment. The
#: corresponding environment variable for each option key is prefaced by
#: ``PYGLET_``. For example, in Bash you can set the ``debug_gl`` option with::
#:
#: PYGLET_DEBUG_GL=True; export PYGLET_DEBUG_GL
#:
#: For options requiring a tuple of values, separate each value with a comma.
#:
#: The non-development options are:
#:
#: audio
#: A sequence of the names of audio modules to attempt to load, in
#: order of preference. Valid driver names are:
#:
#: * directsound, the Windows DirectSound audio module (Windows only)
#: * alsa, the ALSA audio module (Linux only)
#: * openal, the OpenAL audio module
#: * silent, no audio
#: debug_lib
#: If True, prints the path of each dynamic library loaded.
#: debug_gl
#: If True, all calls to OpenGL functions are checked afterwards for
#: errors using ``glGetError``. This will severely impact performance,
#: but provides useful exceptions at the point of failure. By default,
#: this option is enabled if ``__debug__`` is (i.e., if Python was not run
#: with the -O option). It is disabled by default when pyglet is "frozen"
#: within a py2exe or py2app library archive.
#: shadow_window
#: By default, pyglet creates a hidden window with a GL context when
#: pyglet.gl is imported. This allows resources to be loaded before
#: the application window is created, and permits GL objects to be
#: shared between windows even after they've been closed. You can
#: disable the creation of the shadow window by setting this option to
#: False. Recommended for advanced devlopers only.
#:
#: **Since:** pyglet 1.1
#: vsync
#: If set, the `pyglet.window.Window.vsync` property is ignored, and
#: this option overrides it (to either force vsync on or off). If unset,
#: or set to None, the `pyglet.window.Window.vsync` property behaves
#: as documented.
#: xsync
#: If set (the default), pyglet will attempt to synchronise the drawing of
#: double-buffered windows to the border updates of the X11 window
#: manager. This improves the appearance of the window during resize
#: operations. This option only affects double-buffered windows on
#: X11 servers supporting the Xsync extension with a window manager
#: that implements the _NET_WM_SYNC_REQUEST protocol.
#:
#: **Since:** pyglet 1.1
#:
options = {
'audio': ('directsound', 'openal', 'alsa', 'silent'),
'font': ('gdiplus', 'win32'), # ignored outside win32; win32 is deprecated
'debug_font': False,
'debug_gl': not _enable_optimisations,
'debug_gl_trace': False,
'debug_gl_trace_args': False,
'debug_graphics_batch': False,
'debug_lib': False,
'debug_media': False,
'debug_texture': False,
'debug_trace': False,
'debug_trace_args': False,
'debug_trace_depth': 1,
'debug_trace_flush': True,
'debug_win32': False,
'debug_x11': False,
'graphics_vbo': True,
'shadow_window': True,
'vsync': None,
'xsync': True,
}
_option_types = {
'audio': tuple,
'font': tuple,
'debug_font': bool,
'debug_gl': bool,
'debug_gl_trace': bool,
'debug_gl_trace_args': bool,
'debug_graphics_batch': bool,
'debug_lib': bool,
'debug_media': bool,
'debug_texture': bool,
'debug_trace': bool,
'debug_trace_args': bool,
'debug_trace_depth': int,
'debug_trace_flush': bool,
'debug_win32': bool,
'debug_x11': bool,
'graphics_vbo': bool,
'shadow_window': bool,
'vsync': bool,
'xsync': bool,
}
def _read_environment():
'''Read defaults for options from environment'''
for key in options:
env = 'PYGLET_%s' % key.upper()
try:
value = os.environ['PYGLET_%s' % key.upper()]
if _option_types[key] is tuple:
options[key] = value.split(',')
elif _option_types[key] is bool:
options[key] = value in ('true', 'TRUE', 'True', '1')
elif _option_types[key] is int:
options[key] = int(value)
except KeyError:
pass
_read_environment()
if sys.platform == 'cygwin':
# This hack pretends that the posix-like ctypes provides windows
# functionality. COM does not work with this hack, so there is no
# DirectSound support.
import ctypes
ctypes.windll = ctypes.cdll
ctypes.oledll = ctypes.cdll
ctypes.WINFUNCTYPE = ctypes.CFUNCTYPE
ctypes.HRESULT = ctypes.c_long
# Call tracing
# ------------
_trace_filename_abbreviations = {}
def _trace_repr(value, size=40):
value = repr(value)
if len(value) > size:
value = value[:size//2-2] + '...' + value[-size//2-1:]
return value
def _trace_frame(frame, indent):
from pyglet import lib
import os
if frame.f_code is lib._TraceFunction.__call__.func_code:
is_ctypes = True
func = frame.f_locals['self']._func
name = func.__name__
location = '[ctypes]'
else:
is_ctypes = False
code = frame.f_code
name = code.co_name
path = code.co_filename
line = code.co_firstlineno
try:
filename = _trace_filename_abbreviations[path]
except KeyError:
# Trim path down
dir = ''
path, filename = os.path.split(path)
while len(dir + filename) < 30:
filename = os.path.join(dir, filename)
path, dir = os.path.split(path)
if not dir:
filename = os.path.join('', filename)
break
else:
filename = os.path.join('...', filename)
_trace_filename_abbreviations[path] = filename
location = '(%s:%d)' % (filename, line)
if indent:
name = 'Called from %s' % name
print '%s%s %s' % (indent, name, location)
if _trace_args:
if is_ctypes:
args = [_trace_repr(arg) for arg in frame.f_locals['args']]
print ' %sargs=(%s)' % (indent, ', '.join(args))
else:
for argname in code.co_varnames[:code.co_argcount]:
try:
argvalue = _trace_repr(frame.f_locals[argname])
print ' %s%s=%s' % (indent, argname, argvalue)
except:
pass
if _trace_flush:
sys.stdout.flush()
def _trace_func(frame, event, arg):
if event == 'call':
indent = ''
for i in range(_trace_depth):
_trace_frame(frame, indent)
indent += ' '
frame = frame.f_back
if not frame:
break
elif event == 'exception':
(exception, value, traceback) = arg
print 'First chance exception raised:', repr(exception)
def _install_trace():
sys.setprofile(_trace_func)
_trace_args = options['debug_trace_args']
_trace_depth = options['debug_trace_depth']
_trace_flush = options['debug_trace_flush']
if options['debug_trace']:
_install_trace()
# Lazy loading
# ------------
class _ModuleProxy(object):
_module = None
def __init__(self, name):
self.__dict__['_module_name'] = name
def __getattr__(self, name):
try:
return getattr(self._module, name)
except AttributeError:
if self._module is not None:
raise
import_name = 'pyglet.%s' % self._module_name
__import__(import_name)
module = sys.modules[import_name]
object.__setattr__(self, '_module', module)
globals()[self._module_name] = module
return getattr(module, name)
def __setattr__(self, name, value):
try:
setattr(self._module, name, value)
except AttributeError:
if self._module is not None:
raise
import_name = 'pyglet.%s' % self._module_name
__import__(import_name)
module = sys.modules[import_name]
object.__setattr__(self, '_module', module)
globals()[self._module_name] = module
setattr(module, name, value)
if not _is_epydoc:
app = _ModuleProxy('app')
clock = _ModuleProxy('clock')
com = _ModuleProxy('com')
event = _ModuleProxy('event')
font = _ModuleProxy('font')
gl = _ModuleProxy('gl')
graphics = _ModuleProxy('graphics')
image = _ModuleProxy('image')
lib = _ModuleProxy('lib')
media = _ModuleProxy('media')
resource = _ModuleProxy('resource')
sprite = _ModuleProxy('sprite')
text = _ModuleProxy('text')
window = _ModuleProxy('window')
# Fool py2exe, py2app into including all top-level modules (doesn't understand
# lazy loading)
if False:
import app
import clock
import com
import event
import font
import gl
import graphics
import image
import lib
import media
import resource
import sprite
import text
import window
# Hack around some epydoc bug that causes it to think pyglet.window is None.
if _is_epydoc:
import window
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The passive components to version discovery.
The Discover object in discover.py contains functions that can create objects
on your behalf. These functions are not usable from within the keystoneauth1
library because you will get dependency resolution issues.
The Discover object in this file provides the querying components of Discovery.
This includes functions like url_for which allow you to retrieve URLs and the
raw data specified in version discovery responses.
"""
import logging
import re
from keystoneauth1 import _utils as utils
from keystoneauth1 import exceptions
_LOGGER = logging.getLogger(__name__)
@utils.positional()
def get_version_data(session, url, authenticated=None):
"""Retrieve raw version data from a url."""
headers = {'Accept': 'application/json'}
resp = session.get(url, headers=headers, authenticated=authenticated)
try:
body_resp = resp.json()
except ValueError:
pass
else:
# In the event of querying a root URL we will get back a list of
# available versions.
try:
return body_resp['versions']['values']
except (KeyError, TypeError):
pass
# Most servers don't have a 'values' element so accept a simple
# versions dict if available.
try:
return body_resp['versions']
except KeyError:
pass
# Otherwise if we query an endpoint like /v2.0 then we will get back
# just the one available version.
try:
return [body_resp['version']]
except KeyError:
pass
err_text = resp.text[:50] + '...' if len(resp.text) > 50 else resp.text
raise exceptions.DiscoveryFailure('Invalid Response - Bad version data '
'returned: %s' % err_text)
def normalize_version_number(version):
"""Turn a version representation into a tuple."""
# trim the v from a 'v2.0' or similar
try:
version = version.lstrip('v')
except AttributeError:
pass
# if it's an integer or a numeric as a string then normalize it
# to a string, this ensures 1 decimal point
try:
num = float(version)
except Exception:
pass
else:
version = str(num)
# if it's a string (or an integer) from above break it on .
try:
return tuple(map(int, version.split('.')))
except Exception:
pass
# last attempt, maybe it's a list or iterable.
try:
return tuple(map(int, version))
except Exception:
pass
raise TypeError('Invalid version specified: %s' % version)
def version_match(required, candidate):
"""Test that an available version is a suitable match for a required
version.
To be suitable a version must be of the same major version as required
and be at least a match in minor/patch level.
eg. 3.3 is a match for a required 3.1 but 4.1 is not.
:param tuple required: the version that must be met.
:param tuple candidate: the version to test against required.
:returns: True if candidate is suitable False otherwise.
:rtype: bool
"""
# major versions must be the same (e.g. even though v2 is a lower
# version than v3 we can't use it if v2 was requested)
if candidate[0] != required[0]:
return False
# prevent selecting a minor version less than what is required
if candidate < required:
return False
return True
class Discover(object):
CURRENT_STATUSES = ('stable', 'current', 'supported')
DEPRECATED_STATUSES = ('deprecated',)
EXPERIMENTAL_STATUSES = ('experimental',)
@utils.positional()
def __init__(self, session, url, authenticated=None):
self._data = get_version_data(session, url,
authenticated=authenticated)
def raw_version_data(self, allow_experimental=False,
allow_deprecated=True, allow_unknown=False):
"""Get raw version information from URL.
Raw data indicates that only minimal validation processing is performed
on the data, so what is returned here will be the data in the same
format it was received from the endpoint.
:param bool allow_experimental: Allow experimental version endpoints.
:param bool allow_deprecated: Allow deprecated version endpoints.
:param bool allow_unknown: Allow endpoints with an unrecognised status.
:returns: The endpoints returned from the server that match the
criteria.
:rtype: list
"""
versions = []
for v in self._data:
try:
status = v['status']
except KeyError:
_LOGGER.warning('Skipping over invalid version data. '
'No stability status in version.')
continue
status = status.lower()
if status in self.CURRENT_STATUSES:
versions.append(v)
elif status in self.DEPRECATED_STATUSES:
if allow_deprecated:
versions.append(v)
elif status in self.EXPERIMENTAL_STATUSES:
if allow_experimental:
versions.append(v)
elif allow_unknown:
versions.append(v)
return versions
def version_data(self, **kwargs):
"""Get normalized version data.
Return version data in a structured way.
:returns: A list of version data dictionaries sorted by version number.
Each data element in the returned list is a dictionary
consisting of at least:
:version tuple: The normalized version of the endpoint.
:url str: The url for the endpoint.
:raw_status str: The status as provided by the server
:rtype: list(dict)
"""
data = self.raw_version_data(**kwargs)
versions = []
for v in data:
try:
version_str = v['id']
except KeyError:
_LOGGER.info('Skipping invalid version data. Missing ID.')
continue
try:
links = v['links']
except KeyError:
_LOGGER.info('Skipping invalid version data. Missing links')
continue
version_number = normalize_version_number(version_str)
for link in links:
try:
rel = link['rel']
url = link['href']
except (KeyError, TypeError):
_LOGGER.info('Skipping invalid version link. '
'Missing link URL or relationship.')
continue
if rel.lower() == 'self':
break
else:
_LOGGER.info('Skipping invalid version data. '
'Missing link to endpoint.')
continue
versions.append({'version': version_number,
'url': url,
'raw_status': v['status']})
versions.sort(key=lambda v: v['version'])
return versions
def data_for(self, version, **kwargs):
"""Return endpoint data for a version.
:param tuple version: The version is always a minimum version in the
same major release as there should be no compatibility issues with
using a version newer than the one asked for.
:returns: the endpoint data for a URL that matches the required version
(the format is described in version_data) or None if no
match.
:rtype: dict
"""
version = normalize_version_number(version)
version_data = self.version_data(**kwargs)
for data in reversed(version_data):
if version_match(version, data['version']):
return data
return None
def url_for(self, version, **kwargs):
"""Get the endpoint url for a version.
:param tuple version: The version is always a minimum version in the
same major release as there should be no compatibility issues with
using a version newer than the one asked for.
:returns: The url for the specified version or None if no match.
:rtype: str
"""
data = self.data_for(version, **kwargs)
return data['url'] if data else None
class _VersionHacks(object):
"""A container to abstract the list of version hacks.
This could be done as simply a dictionary but is abstracted like this to
make for easier testing.
"""
def __init__(self):
self._discovery_data = {}
def add_discover_hack(self, service_type, old, new=''):
"""Add a new hack for a service type.
:param str service_type: The service_type in the catalog.
:param re.RegexObject old: The pattern to use.
:param str new: What to replace the pattern with.
"""
hacks = self._discovery_data.setdefault(service_type, [])
hacks.append((old, new))
def get_discover_hack(self, service_type, url):
"""Apply the catalog hacks and figure out an unversioned endpoint.
:param str service_type: the service_type to look up.
:param str url: The original url that came from a service_catalog.
:returns: Either the unversioned url or the one from the catalog
to try.
"""
for old, new in self._discovery_data.get(service_type, []):
new_string, number_of_subs_made = old.subn(new, url)
if number_of_subs_made > 0:
return new_string
return url
_VERSION_HACKS = _VersionHacks()
_VERSION_HACKS.add_discover_hack('identity', re.compile('/v2.0/?$'), '/')
def _get_catalog_discover_hack(service_type, url):
"""Apply the catalog hacks and figure out an unversioned endpoint.
This function is internal to keystoneauth1.
:param str service_type: the service_type to look up.
:param str url: The original url that came from a service_catalog.
:returns: Either the unversioned url or the one from the catalog to try.
"""
return _VERSION_HACKS.get_discover_hack(service_type, url)
def add_catalog_discover_hack(service_type, old, new):
"""Adds a version removal rule for a particular service.
Originally deployments of OpenStack would contain a versioned endpoint in
the catalog for different services. E.g. an identity service might look
like ``http://localhost:5000/v2.0``. This is a problem when we want to use
a different version like v3.0 as there is no way to tell where it is
located. We cannot simply change all service catalogs either so there must
be a way to handle the older style of catalog.
This function adds a rule for a given service type that if part of the URL
matches a given regular expression in *old* then it will be replaced with
the *new* value. This will replace all instances of old with new. It should
therefore contain a regex anchor.
For example the included rule states::
add_catalog_version_hack('identity', re.compile('/v2.0/?$'), '/')
so if the catalog retrieves an *identity* URL that ends with /v2.0 or
/v2.0/ then it should replace it simply with / to fix the user's catalog.
:param str service_type: The service type as defined in the catalog that
the rule will apply to.
:param re.RegexObject old: The regular expression to search for and replace
if found.
:param str new: The new string to replace the pattern with.
"""
_VERSION_HACKS.add_discover_hack(service_type, old, new)
| |
from ajabsacco.core.facades import loans as loan_facades
from ajabsacco.core.facades import savings as savings_facades
from ajabsacco.core.models import *
from ajabsacco.core.forms import *
from django.shortcuts import get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.template.response import TemplateResponse
from datetime import datetime
from dateutil.relativedelta import relativedelta
@login_required
def dashboard(request):
if request.user.get_role_display() == "Member":
return redirect("member-dashboard")
members = Member.objects.all().order_by('-date_joined')[:5]
no_of_members = Member.objects.filter(is_active=True).count()
trends = dict(
trend_member_growth=[],
trend_savings_deposit_trends=[],
trend_loan_repayment_trends=[],
trend_loan_portfolio_growth=[],
trend_asset_growth=[],
)
for i in range(13, 0, -1):
date = timezone.now() - relativedelta(months=(i))
trends['trend_asset_growth'].append("%2d" % (
loan_facades.asset_balances(in_month_date=date) +
savings_facades.asset_balances(in_month_date=date))
)
trends['trend_member_growth'].append(Member.objects.in_month(date).count())
repayments = (LoanTransactionEntry.objects.posted().repayments(
).in_month(date).aggregate(repayments=Sum('amount')).pop('repayments', 0) or 0)
repayments = repayments or 0
trends['trend_loan_repayment_trends'].append("%2d" % repayments)
trends['trend_loan_portfolio_growth'].append("%2d" % loan_facades.loan_portfolio_control_balance(
in_month_date=date))
savings_deposits = SavingsTransactionEntry.objects.posted(
).credits().in_month(date).aggregate(deposits=Sum('amount')).pop('deposits', 0)
savings_deposits = savings_deposits or 0
trends['trend_savings_deposit_trends'].append("%2d" % savings_deposits)
loan_products = LoanProduct.objects.filter(
product_account__account_transaction__status=LoanTransactionEntry.POSTED,
product_account__account_transaction__gl_account_code=loan_codes.LOAN_PORTFOLIO_CONTROL_ACCOUNT
).annotate(
balance=Sum('product_account__account_transaction__ledger_balance_increment'),
count=Count('product_account__account_transaction__ledger_balance_increment')
)
top_savers = Member.objects.filter(
is_active=True,
savings_account__account_transaction__status=SavingsTransactionEntry.POSTED,
savings_account__account_transaction__gl_account_code=savings_codes.DEPOSITS_CONTROL
).annotate(
balance=Sum('savings_account__account_transaction__ledger_balance_increment')
).filter(balance__isnull=False).order_by('-balance')[:10]
delinquent_members = Member.objects.filter(
is_active=True,
loan_account__in=LoanAccount.objects.active().late()
)[:5]
for member in delinquent_members:
member.arrears = loan_facades.member_loan_repayment_due(member)
context = {
'members': members,
'top_savers': top_savers,
'loan_products': loan_products,
'members_deposits': savings_facades.member_deposits(),
'delinquent_loans': loan_facades.number_of_loans_late(),
'no_of_members': no_of_members,
'delinquent_members': delinquent_members,
'gross_loan_portfolio': loan_facades.gross_loan_portfolio(),
'net_loan_portfolio': loan_facades.net_loan_portfolio(),
'number_of_active_loan_accounts': loan_facades.number_of_active_accounts(),
'number_of_active_savings_accounts': savings_facades.number_of_active_accounts()
}
context.update(trends)
return TemplateResponse(request, "backoffice/dashboard.html", context)
@login_required
def member_dashboard(request):
member = request.user
loan_transactions = LoanTransactionEntry.objects.select_related(
'account',
'account__holder',
'account__holder__member_profile'
).filter(
account__holder=member
).only('transaction_type', 'transaction_id', 'account', 'amount', 'status').distinct().order_by('-posting_date')
savings_transactions = SavingsTransactionEntry.objects.select_related(
'account',
'account__holder',
'account__holder__member_profile'
).filter(
account__holder=member
).only('transaction_type', 'transaction_id', 'account', 'amount', 'status').distinct().order_by('-posting_date')
transactions = []
transactions += list(loan_transactions)
transactions += list(savings_transactions)
return TemplateResponse(request, "backoffice/member_dashboard.html", {
'transactions': transactions,
'member': member,
'net_loan_portfolio':loan_facades.member_principal_balances(member),
'repayment_due':loan_facades.member_loan_repayment_due(member),
'total_deposits': savings_facades.member_deposits_balances(member)
})
@login_required
def loans_dashboard(request):
loans = LoanAccount.objects.all()
if request.user.get_role_display() == "Member":
loans = loans.filter(holder=request.user)
from django.db.models import Sum
for loan in loans:
loan.balance = loan_facades.loan_account_principal_balance(loan)
return TemplateResponse(request, "loans/dashboard.html", {
'loans': loans
})
@login_required
def accounting_dashboard(request):
return TemplateResponse(request, "backoffice/dashboard.html", {
})
@login_required
def reports_dashboard(request):
return TemplateResponse(request, "backoffice/dashboard.html", {
})
@login_required
def products_dashboard(request):
return TemplateResponse(request, "backoffice/dashboard.html", {
})
@login_required
def transactions_dashboard(request):
member = request.user
loan_transactions = LoanTransactionEntry.objects.select_related(
'account',
'account__holder',
'account__holder__member_profile'
).debits().filter(
account__holder=member
).only('transaction_type', 'transaction_id', 'account', 'amount', 'status').distinct().order_by('-posting_date')
savings_transactions = SavingsTransactionEntry.objects.select_related(
'account',
'account__holder',
'account__holder__member_profile'
).debits().filter(
account__holder=member
).only('transaction_type', 'transaction_id', 'account', 'amount', 'status').distinct().order_by('-posting_date')
transactions = []
transactions += list(loan_transactions)
transactions += list(savings_transactions)
return TemplateResponse(request, "members/member_transactions.html", {
'transactions': transactions,
'member': member
})
return TemplateResponse(request, "members/member_list.html", {
'members': members,
'transactions': transactions
})
@login_required
def savings_dashboard(request):
from django.db.models import Sum
savings_accounts = SavingsAccount.objects.filter()
if request.user.get_role_display() == "Member":
savings_accounts = savings_accounts.filter(holder=request.user)
for savings_account in savings_accounts:
savings_account.balance = savings_facades.savings_account_balance(savings_account)
return TemplateResponse(request, "savings/dashboard.html", {
'savings_accounts': savings_accounts
})
@login_required
def groups_dashboard(request):
groups = Group.objects.all()
return TemplateResponse(request, "groups/group_list.html", {
'groups': groups
})
@login_required
def members_dashboard(request):
members = Member.objects.all().exclude(is_active=False)
if request.GET.get('q') is not None:
q = request.GET.get('q')
members = members.filter(
Q(member_profile__first_name__icontains=q)|
Q(member_profile__middle_name__icontains=q)|
Q(member_profile__surname__icontains=q)|
Q(member_no__icontains=q)
)
return TemplateResponse(request, "members/member_list.html", {
'members': members
})
| |
import os
import sys
import tempfile
import unittest
import mock
import numpy
from chainer import cuda
from chainer import link
from chainer import links
from chainer import optimizers
from chainer.serializers import hdf5
from chainer import testing
from chainer.testing import attr
if hdf5._available:
import h5py
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestHDF5Serializer(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
self.hdf5file = h5py.File(path, 'w')
self.serializer = hdf5.HDF5Serializer(self.hdf5file, compression=3)
self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def tearDown(self):
if hasattr(self, 'hdf5file'):
self.hdf5file.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_get_item(self):
child = self.serializer['x']
self.assertIsInstance(child, hdf5.HDF5Serializer)
self.assertEqual(child.group.name, '/x')
self.assertEqual(child.compression, 3)
def check_serialize(self, data):
ret = self.serializer('w', data)
dset = self.hdf5file['w']
self.assertIsInstance(dset, h5py.Dataset)
self.assertEqual(dset.shape, data.shape)
self.assertEqual(dset.size, data.size)
self.assertEqual(dset.dtype, data.dtype)
read = numpy.empty((2, 3), dtype=numpy.float32)
dset.read_direct(read)
numpy.testing.assert_array_equal(read, cuda.to_cpu(data))
self.assertEqual(dset.compression_opts, 3)
self.assertIs(ret, data)
def test_serialize_cpu(self):
self.check_serialize(self.data)
@attr.gpu
def test_serialize_gpu(self):
self.check_serialize(cuda.to_gpu(self.data))
def test_serialize_scalar(self):
ret = self.serializer('x', 10)
dset = self.hdf5file['x']
self.assertIsInstance(dset, h5py.Dataset)
self.assertEqual(dset.shape, ())
self.assertEqual(dset.size, 1)
self.assertEqual(dset.dtype, int)
read = numpy.empty((), dtype=numpy.int32)
dset.read_direct(read)
self.assertEqual(read, 10)
self.assertEqual(dset.compression_opts, None)
self.assertIs(ret, 10)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestHDF5Deserializer(unittest.TestCase):
def setUp(self):
self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
with h5py.File(path, 'w') as f:
f.require_group('x')
f.create_dataset('y', data=self.data)
f.create_dataset('z', data=numpy.asarray(10))
self.hdf5file = h5py.File(path, 'r')
self.deserializer = hdf5.HDF5Deserializer(self.hdf5file)
def tearDown(self):
if hasattr(self, 'hdf5file'):
self.hdf5file.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_get_item(self):
child = self.deserializer['x']
self.assertIsInstance(child, hdf5.HDF5Deserializer)
self.assertEqual(child.group.name, '/x')
def check_deserialize(self, y):
ret = self.deserializer('y', y)
numpy.testing.assert_array_equal(cuda.to_cpu(y), self.data)
self.assertIs(ret, y)
def test_deserialize_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(y)
@attr.gpu
def test_deserialize_gpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(cuda.to_gpu(y))
def test_deserialize_scalar(self):
z = 5
ret = self.deserializer('z', z)
self.assertEqual(ret, 10)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestSaveHDF5(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
def tearDown(self):
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_save(self):
obj = mock.MagicMock()
hdf5.save_hdf5(self.temp_file_path, obj, compression=3)
self.assertEqual(obj.serialize.call_count, 1)
(serializer,), _ = obj.serialize.call_args
self.assertIsInstance(serializer, hdf5.HDF5Serializer)
self.assertEqual(serializer.compression, 3)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestLoadHDF5(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
# Make a hdf5 file with empty data
h5py.File(path, 'w')
def tearDown(self):
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_load(self):
obj = mock.MagicMock()
hdf5.load_hdf5(self.temp_file_path, obj)
self.assertEqual(obj.serialize.call_count, 1)
(serializer,), _ = obj.serialize.call_args
self.assertIsInstance(serializer, hdf5.HDF5Deserializer)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestGroupHierachy(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
child = link.Chain(linear=links.Linear(2, 3))
child.add_param('Wc', (2, 3))
self.parent = link.Chain(child=child)
self.parent.add_param('Wp', (2, 3))
self.optimizer = optimizers.AdaDelta()
self.optimizer.setup(self.parent)
def _save(self, h5, obj, name):
group = h5.create_group(name)
serializer = hdf5.HDF5Serializer(group)
serializer.save(obj)
def _load(self, h5, obj, name):
group = h5[name]
serializer = hdf5.HDF5Deserializer(group)
serializer.load(obj)
def tearDown(self):
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def _check_group(self, h5, state):
self.assertSetEqual(set(h5.keys()),
set(('child',) + state))
self.assertSetEqual(set(h5['child'].keys()),
{'linear', 'Wc'})
self.assertSetEqual(set(h5['child']['linear'].keys()),
{'W', 'b'})
def test_save_chain(self):
with h5py.File(self.temp_file_path) as h5:
self._save(h5, self.parent, 'test')
self.assertSetEqual(set(h5.keys()), {'test'})
self._check_group(h5['test'], ('Wp',))
def test_save_optimizer(self):
with h5py.File(self.temp_file_path) as h5:
self._save(h5, self.optimizer, 'test')
self.assertSetEqual(set(h5.keys()), {'test'})
self._check_group(h5['test'], ('Wp', 'epoch', 't'))
def test_save_chain2(self):
hdf5.save_hdf5(self.temp_file_path, self.parent)
with h5py.File(self.temp_file_path) as h5:
self._check_group(h5, ('Wp',))
def test_save_optimizer2(self):
hdf5.save_hdf5(self.temp_file_path, self.optimizer)
with h5py.File(self.temp_file_path) as h5:
self._check_group(h5, ('Wp', 'epoch', 't'))
def test_load_chain(self):
with h5py.File(self.temp_file_path) as h5:
self._save(h5, self.parent, 'test')
with h5py.File(self.temp_file_path) as h5:
self._load(h5, self.parent, 'test')
def test_load_optimizer(self):
with h5py.File(self.temp_file_path) as h5:
self._save(h5, self.optimizer, 'test')
with h5py.File(self.temp_file_path) as h5:
self._load(h5, self.optimizer, 'test')
original_import = __import__
def no_h5py(name, _globals=None, _locals=None, fromlist=(), level=0):
if name == 'h5py':
raise ImportError()
else:
return original_import(name, _globals, _locals, fromlist, level)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestNoH5py(unittest.TestCase):
def setUp(self):
__builtins__['__import__'] = no_h5py
def tearDown(self):
__builtins__['__import__'] = original_import
def test_raise(self):
del sys.modules['chainer.serializers.hdf5']
del sys.modules['chainer.serializers.npz']
del sys.modules['chainer.serializers']
import chainer.serializers
with self.assertRaises(RuntimeError):
chainer.serializers.save_hdf5(None, None, None)
with self.assertRaises(RuntimeError):
chainer.serializers.load_hdf5(None, None)
with self.assertRaises(RuntimeError):
chainer.serializers.HDF5Serializer(None)
with self.assertRaises(RuntimeError):
chainer.serializers.HDF5Deserializer(None)
testing.run_module(__name__, __file__)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.