gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2009 - Frank Scholz <coherence@beebits.net>
# Copyright 2014 - Hartmut Goebel <h.goebel@crazy-compilers.com>
import time
import pygtk
pygtk.require("2.0")
import gtk
from twisted.internet import reactor
from coherence.base import Coherence
from coherence.upnp.core.utils import means_true
from coherence import log
from ._resources import _geticon
# gtk store defines
TYPE_COLUMN = 0
NAME_COLUMN = 1
UDN_COLUMN = 2
ICON_COLUMN = 3
OBJECT_COLUMN = 4
DEVICE = 0
SERVICE = 1
VARIABLE = 2
ACTION = 3
ARGUMENT = 4
def device_type(object):
if object is None:
return None
return object.get_device_type().split(':')[3].lower()
class DevicesWidget(log.Loggable):
logCategory = 'inspector'
def __init__(self, coherence):
self.coherence = coherence
self.cb_item_dbl_click = None
self.cb_item_left_click = None
self.cb_item_right_click = None
self.cb_resource_chooser = None
self.build_ui()
self.init_controlpoint()
def build_ui(self):
self.window = gtk.ScrolledWindow()
self.window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.icons = {}
self.icons['device'] = _geticon('upnp-device.png')
self.icons['mediaserver'] = _geticon('network-server.png')
self.icons['mediarenderer'] = _geticon('media-renderer.png')
self.icons['binarylight'] = _geticon('network-light.png')
self.icons['dimmablelight'] = self.icons['binarylight']
self.icons['digitalsecuritycamera'] = _geticon('camera-web.png')
self.icons['printer'] = _geticon('printer.png')
self.folder_icon = _geticon('folder.png')
self.service_icon = _geticon('upnp-service.png')
self.action_icon = _geticon('upnp-action.png')
self.action_arg_in_icon = _geticon('upnp-action-arg-in.png')
self.action_arg_out_icon = _geticon('upnp-action-arg-out.png')
self.state_variable_icon = _geticon('upnp-state-variable.png')
self.store = gtk.TreeStore(int, # 0: type
str, # 1: name
str, # 2: device udn
gtk.gdk.Pixbuf,
object
)
self.treeview = gtk.TreeView(self.store)
self.column = gtk.TreeViewColumn('Devices')
self.treeview.append_column(self.column)
# create a CellRenderers to render the data
icon_cell = gtk.CellRendererPixbuf()
text_cell = gtk.CellRendererText()
self.column.pack_start(icon_cell, False)
self.column.pack_start(text_cell, True)
self.column.set_attributes(text_cell, text=1)
self.column.add_attribute(icon_cell, "pixbuf", 3)
#self.column.set_cell_data_func(self.cellpb, get_icon)
self.treeview.connect("button_press_event", self.button_action)
self.treeview.connect("row-activated", self.activated)
self.treeview.connect("move_cursor", self.moved_cursor)
gtk.binding_entry_add_signal(self.treeview, gtk.keysyms.Left, 0,
"expand-collapse-cursor-row",
bool, False, bool, False, bool, False)
gtk.binding_entry_add_signal(self.treeview, gtk.keysyms.Right, 0,
"expand-collapse-cursor-row",
bool, False, bool, True, bool, False)
selection = self.treeview.get_selection()
selection.set_mode(gtk.SELECTION_SINGLE)
self.window.add(self.treeview)
self.windows = {}
def _build_run_action_box(self, object, id, row_path):
window = gtk.Window()
window.set_default_size(350, 300)
window.set_title('Invoke Action %s' % object.name)
window.connect("delete_event", self.deactivate, id)
def build_label(icon, label):
hbox = gtk.HBox(homogeneous=False, spacing=10)
image = gtk.Image()
image.set_from_pixbuf(icon)
hbox.pack_start(image, False, False, 2)
text = gtk.Label(label)
hbox.pack_start(text, False, False, 2)
return hbox
def build_button(label):
hbox = gtk.HBox(homogeneous=False, spacing=10)
image = gtk.Image()
image.set_from_pixbuf(self.action_icon)
hbox.pack_start(image, False, False, 2)
text = gtk.Label(label)
hbox.pack_start(text, False, False, 2)
button = gtk.Button()
button.set_flags(gtk.CAN_DEFAULT)
button.add(hbox)
return button
def build_arguments(action, direction):
text = gtk.Label("<b>'%s' arguments:</b>'" % direction)
text.set_use_markup(True)
hbox = gtk.HBox(homogeneous=False, spacing=10)
hbox.pack_start(text, False, False, 2)
vbox = gtk.VBox(homogeneous=False, spacing=10)
vbox.pack_start(hbox, False, False, 2)
row = 0
if direction == 'in':
arguments = object.get_in_arguments()
else:
arguments = object.get_out_arguments()
table = gtk.Table(rows=len(arguments), columns=2,
homogeneous=False)
entries = {}
for argument in arguments:
variable = action.service.get_state_variable(
argument.state_variable)
name = gtk.Label(argument.name + ':')
name.set_alignment(0, 0)
#hbox = gtk.HBox(homogeneous=False, spacing=2)
#hbox.pack_start(name,False,False,2)
table.attach(name, 0, 1, row, row + 1, gtk.SHRINK)
if variable.data_type == 'boolean':
entry = gtk.CheckButton()
if direction == 'in':
entries[argument.name] = entry.get_active
else:
entry.set_sensitive(False)
entries[argument.name] = (variable.data_type, entry.set_active)
elif variable.data_type == 'string':
if direction == 'in' and len(variable.allowed_values) > 0:
store = gtk.ListStore(str)
for value in variable.allowed_values:
store.append((value, ))
entry = gtk.ComboBox()
text_cell = gtk.CellRendererText()
entry.pack_start(text_cell, True)
entry.set_attributes(text_cell, text=0)
entry.set_model(store)
entry.set_active(0)
entries[argument.name] = (entry.get_active, entry.get_model)
else:
if direction == 'in':
entry = gtk.Entry(max=0)
entries[argument.name] = entry.get_text
else:
entry = gtk.ScrolledWindow()
entry.set_border_width(1)
entry.set_shadow_type(gtk.SHADOW_ETCHED_IN)
entry.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
textview = gtk.TextView()
textview.set_editable(False)
textview.set_wrap_mode(gtk.WRAP_WORD)
entry.add(textview)
entries[argument.name] = ('text', textview)
else:
if direction == 'out':
entry = gtk.Entry(max=0)
entry.set_editable(False)
entries[argument.name] = (variable.data_type, entry.set_text)
else:
adj = gtk.Adjustment(0, 0, 4294967296, 1.0, 50.0, 0.0)
entry = gtk.SpinButton(adj, 0, 0)
entry.set_numeric(True)
entry.set_digits(0)
entries[argument.name] = entry.get_value_as_int
table.attach(entry, 1, 2, row, row + 1, gtk.FILL | gtk.EXPAND, gtk.FILL | gtk.EXPAND)
row += 1
#hbox = gtk.HBox(homogeneous=False, spacing=10)
#hbox.pack_start(table,False,False,2)
#hbox.show()
vbox.pack_start(table, False, False, 2)
return vbox, entries
vbox = gtk.VBox(homogeneous=False, spacing=10)
vbox.pack_start(build_label(self.store[row_path[0]][ICON_COLUMN],
self.store[row_path[0]][NAME_COLUMN]),
False, False, 2)
vbox.pack_start(build_label(self.service_icon,
self.store[row_path[0],
row_path[1]][NAME_COLUMN]),
False, False, 2)
vbox.pack_start(build_label(self.action_icon, object.name),
False, False, 2)
hbox = gtk.HBox(homogeneous=False, spacing=10)
hbox.pack_start(vbox, False, False, 2)
button = build_button('Invoke')
hbox.pack_end(button, False, False, 20)
vbox = gtk.VBox(homogeneous=False, spacing=10)
vbox.pack_start(hbox, False, False, 2)
in_entries = {}
out_entries = {}
if len(object.get_in_arguments()) > 0:
box, in_entries = build_arguments(object, 'in')
vbox.pack_start(box, False, False, 2)
if len(object.get_out_arguments()) > 0:
box, out_entries = build_arguments(object, 'out')
vbox.pack_start(box, False, False, 2)
window.add(vbox)
status_bar = gtk.Statusbar()
context_id = status_bar.get_context_id("Action Statusbar")
vbox.pack_end(status_bar, False, False, 2)
button.connect('clicked', self.call_action, object,
in_entries, out_entries, status_bar)
window.show_all()
self.windows[id] = window
def activated(self, view, row_path, column):
iter = self.store.get_iter(row_path)
if iter:
type, object = self.store.get(iter, TYPE_COLUMN, OBJECT_COLUMN)
if type == ACTION:
id = '@'.join((object.service.device.get_usn(),
object.service.service_type,
object.name))
try:
self.windows[id].show()
except:
self._build_run_action_box(object, id, row_path)
elif type == DEVICE:
devtype = device_type(object)
if devtype == 'mediaserver':
self.mediaserver_browse(None, object)
elif devtype == 'mediarenderer':
self.mediarenderer_control(None, object)
elif devtype == 'internetgatewaydevice':
self.igd_control(None, object)
else:
if view.row_expanded(row_path):
view.collapse_row(row_path)
else:
view.expand_row(row_path, False)
def deactivate(self, window, event, id):
#print "deactivate",id
del self.windows[id]
def button_action(self, widget, event):
x = int(event.x)
y = int(event.y)
path = self.treeview.get_path_at_pos(x, y)
if path == None:
return True
row_path, column, _, _ = path
if event.button == 3:
if self.cb_item_right_click != None:
return self.cb_item_right_click(widget, event)
else:
iter = self.store.get_iter(row_path)
type, object = self.store.get(iter, TYPE_COLUMN, OBJECT_COLUMN)
if type == DEVICE:
menu = gtk.Menu()
item = gtk.CheckMenuItem("Show events")
item.set_sensitive(False)
menu.append(item)
item = gtk.CheckMenuItem("Show log")
item.set_sensitive(False)
menu.append(item)
menu.append(gtk.SeparatorMenuItem())
item = gtk.MenuItem("Extract device and service descriptions...")
item.connect("activate", self.extract_descriptions, object)
menu.append(item)
menu.append(gtk.SeparatorMenuItem())
item = gtk.MenuItem("Test device...")
item.set_sensitive(False)
menu.append(item)
devtype = device_type(object)
if devtype == 'mediaserver':
menu.append(gtk.SeparatorMenuItem())
item = gtk.MenuItem("Browse MediaServer...")
item.connect("activate", self.mediaserver_browse, object)
menu.append(item)
elif devtype == 'mediarenderer':
menu.append(gtk.SeparatorMenuItem())
item = gtk.MenuItem("Control MediaRendererer...")
item.connect("activate", self.mediarenderer_control, object)
menu.append(item)
elif devtype == 'internetgatewaydevice':
menu.append(gtk.SeparatorMenuItem())
item = gtk.MenuItem("control InternetGatewayDevice")
item.connect("activate", self.igd_control, object)
menu.append(item)
menu.show_all()
menu.popup(None, None, None, event.button, event.time)
return True
elif type == SERVICE:
menu = gtk.Menu()
item = gtk.CheckMenuItem("Show events")
item.set_sensitive(False)
menu.append(item)
item = gtk.CheckMenuItem("Show log")
item.set_sensitive(False)
menu.append(item)
menu.show_all()
menu.popup(None, None, None, event.button, event.time)
return True
return False
elif (event.button == 1 and
self.cb_item_left_click != None):
reactor.callLater(0.1, self.cb_item_left_click, widget, event)
return False
return 0
def extract_descriptions(self, widget, device):
print "extract xml descriptions", widget, device
from extract import Extract
id = '@'.join((device.get_usn(), 'DeviceXMlExtract'))
try:
self.windows[id].show()
except:
ui = Extract(device)
self.windows[id] = ui.window
def moved_cursor(self, widget, step, count):
reactor.callLater(0.1, self.cb_item_left_click, widget, None)
return False
def init_controlpoint(self):
self.coherence.connect(self.device_found,
'Coherence.UPnP.RootDevice.detection_completed')
self.coherence.connect(self.device_removed,
'Coherence.UPnP.RootDevice.removed')
for device in self.coherence.devices:
self.device_found(device)
def call_action(self, widget, action, in_entries, out_entries, status_bar):
self.debug("in_entries %r", in_entries)
self.debug("out_entries %r", out_entries)
context_id = status_bar.get_context_id("Action Statusbar")
status_bar.pop(context_id)
status_bar.push(context_id,
time.strftime("%H:%M:%S") + " - calling " + action.name)
kwargs = {}
for entry, method in in_entries.items():
if isinstance(method, tuple):
kwargs[entry] = unicode(method[1]()[method[0]()][0])
else:
value = method()
if type(value) == bool:
if value == True:
kwargs[entry] = '1'
else:
kwargs[entry] = '0'
else:
kwargs[entry] = unicode(value)
def populate(result, entries):
self.info("result %r", result)
self.info("entries %r", entries)
status_bar.pop(context_id)
status_bar.push(context_id,
time.strftime("%H:%M:%S") + " - ok")
for argument, value in result.items():
type, method = entries[argument]
if type == 'boolean':
value = means_true(value)
if type == 'text':
method.get_buffer().set_text(value)
continue
method(value)
def fail(f):
self.debug(f)
status_bar.pop(context_id)
status_bar.push(context_id,
time.strftime("%H:%M:%S") + " - fail %s" % f.value)
self.info("action %s call %r", action.name, kwargs)
d = action.call(**kwargs)
d.addCallback(populate, out_entries)
d.addErrback(fail)
def device_found(self, device=None, row=None):
self.info(device.get_friendly_name(),
device.get_usn(),
device.get_device_type().split(':')[3].lower(),
device.get_device_type())
name = '%s (%s)' % (device.get_friendly_name(), ':'.join(device.get_device_type().split(':')[3:5]))
item = self.store.append(row, (DEVICE, name, device.get_usn(),
self.icons.get(device.get_device_type().split(':')[3].lower(), self.icons['device']),
device))
for service in device.services:
_, _, _, service_class, version = service.service_type.split(':')
service.subscribe()
service_item = self.store.append(
item,
(SERVICE,
':'.join((service_class, version)),
service.service_type, self.service_icon, service))
variables_item = self.store.append(
service_item, (-1, 'State Variables', '',
self.folder_icon, None))
for variable in service.get_state_variables(0).values():
self.store.append(
variables_item,
(VARIABLE, variable.name, '',
self.state_variable_icon, variable))
for action in sorted(service.get_actions().values(), key=lambda a: a.name):
action_item = self.store.append(
service_item,
(ACTION, action.name, '',
self.action_icon, action))
for argument in action.get_in_arguments():
self.store.append(
action_item,
(ARGUMENT, argument.name, '',
self.action_arg_in_icon, argument))
for argument in action.get_out_arguments():
self.store.append(
action_item, (
ARGUMENT, argument.name, '',
self.action_arg_out_icon, argument))
for embedded_device in device.devices:
self.device_found(embedded_device, row=item)
def device_removed(self, usn=None):
self.info('device_removed %r', usn)
row_count = 0
for row in self.store:
if usn == row[UDN_COLUMN]:
ids = []
for w in self.windows.keys():
if w.startswith(usn):
ids.append(w)
for id in ids:
self.windows[id].destroy()
del self.windows[id]
self.store.remove(self.store.get_iter(row_count))
break
row_count += 1
def mediaserver_browse(self, widget, device):
from mediaserver import MediaServerWidget
id = '@'.join((device.get_usn(), 'MediaServerBrowse'))
try:
self.windows[id].show()
except:
ui = MediaServerWidget(self.coherence, device)
self.windows[id] = ui.window
#ui.cb_item_right_click = self.button_pressed
#ui.window.show_all()
def mediarenderer_control(self, widget, device):
from mediarenderer import MediaRendererWidget
id = '@'.join((device.get_usn(), 'MediaRendererControl'))
try:
self.windows[id].show()
except:
ui = MediaRendererWidget(self.coherence, device)
self.windows[id] = ui.window
def igd_control(self, widget, device):
from igd import IGDWidget
id = '@'.join((device.get_usn(), 'IGDControl'))
try:
self.windows[id].show()
except:
ui = IGDWidget(self.coherence, device)
self.windows[id] = ui.window
|
|
# -*- coding: utf-8 -*-
"""
Do HTTP API requests easily using Gherkin language.
"""
import json
from behave import given, when
from sdklib.http import HttpRequestContext, HttpSdk
from sdklib.http.authorization import BasicAuthentication, X11PathsAuthentication
from sdklib.http.renderers import FormRenderer, JSONRenderer
__all__ = (
'set_default_host', 'set_default_proxy', 'set_url_path', 'set_url_path_with_params',
'set_authorization_basic', 'set_11path_authorization', 'set_headers', 'set_query_parameters',
'set_body_parameters', 'set_form_parameters', 'set_body_files', 'send_http_request',
'send_http_request_with_query_parameters', 'send_http_request_with_form_parameters',
'send_http_request_with_body_parameters'
)
def safe_add_http_request_context_to_behave_context(context):
if not hasattr(context, "http_request_context"):
context.http_request_context = HttpRequestContext()
@given('The API endpoint "{host}"')
def set_default_host(context, host):
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.host = host
@given('The API proxy "{host}"')
def set_default_proxy(context, host):
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.proxy = host
@given('The API resource "{url_path}"')
def set_url_path(context, url_path):
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.url_path = url_path
@given('The parameterized API resource "{url_path_str_format}" with these parameter values')
def set_url_path_with_params(context, url_path_str_format):
"""
Parameters:
+------+--------+
| key | value |
+======+========+
| key1 | value1 |
+------+--------+
| key2 | value2 |
+------+--------+
"""
safe_add_http_request_context_to_behave_context(context)
table_as_json = dict(context.table)
url_path = url_path_str_format % table_as_json
context.http_request_context.url_path = url_path
@given('Authorization-Basic with username "{username}" and password "{password}"')
def set_authorization_basic(context, username, password):
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.authentication_instances.append(
BasicAuthentication(username=username, password=password)
)
@given('11Paths-Authorization with application id "{app_id}" and secret "{secret}"')
def set_11path_authorization(context, app_id, secret):
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.authentication_instances.append(
X11PathsAuthentication(app_id=app_id, secret=secret)
)
@given('The headers')
def set_headers(context):
"""
Parameters:
+--------------+---------------+
| header_name | header_value |
+==============+===============+
| header1 | value1 |
+--------------+---------------+
| header2 | value2 |
+--------------+---------------+
"""
safe_add_http_request_context_to_behave_context(context)
headers = dict()
for row in context.table:
headers[row["header_name"]] = row["header_value"]
context.http_request_context.headers = headers
@given('The query parameters')
def set_query_parameters(context):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.query_params = get_parameters(context)
@given('The body parameters')
def set_body_parameters(context):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.body_params = get_parameters(context)
@given('The form parameters')
def set_form_parameters(context):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.body_params = get_parameters(context)
context.http_request_context.renderer = FormRenderer()
def get_parameters(context):
"""
Reads parameters from context table
:param context: behave context
:return: dict with parameters names and values
"""
return {row['param_name']: row['param_value'] for row in context.table}
@given('The body files')
def set_body_files(context):
"""
Parameters:
+-------------+--------------+
| param_name | path_to_file |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
files = dict()
for row in context.table:
files[row["param_name"]] = row["path_to_file"]
context.http_request_context.files = files
@given('The default renderer')
def set_default_renderer(context):
"""
Set default renderer
:param context: behave context
"""
context.http_request_context.renderer = None
@when('I send a HTTP "{method}" request')
def send_http_request(context, method):
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.method = method
context.api_response = HttpSdk.http_request_from_context(context.http_request_context)
context.http_request_context.clear()
@when('I send a HTTP "{method}" request with query parameters')
def send_http_request_with_query_parameters(context, method):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
set_query_parameters(context)
send_http_request(context, method)
@when('I send a HTTP "{method}" request with body parameters')
def send_http_request_with_body_parameters(context, method):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
set_body_parameters(context)
send_http_request(context, method)
@when('I send a HTTP "{method}" request with form parameters')
def send_http_request_with_form_parameters(context, method):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
set_form_parameters(context)
send_http_request(context, method)
@when('I send a HTTP "{method}" request with body parameters encoded "{encoding_type}"')
def send_http_request_with_body_parameters_encoded(context, method, encoding_type):
pass
@when('I send a HTTP "{method}" request with this body "{resource_file}"')
def send_http_request_with_body_resource_file(context, method, resource_file):
pass
@when('I send a HTTP "{method}" request with this JSON')
def send_http_request_with_json(context, method):
"""
Parameters:
.. code-block:: json
{
"param1": "value1",
"param2": "value2",
"param3": {
"param31": "value31"
}
}
"""
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.body_params = json.loads(context.text)
context.http_request_context.renderer = JSONRenderer()
send_http_request(context, method)
@when('I send a HTTP "{method}" request with this XML')
def send_http_request_with_xml(context, method):
pass
|
|
""" Abiword to HTML Converter for PubTal
Copyright (c) 2003 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
"""
import xml.sax, StringIO, cgi
import logging
#font-weight: bold; font-style: italic; text-decoration: underline, line-through, overline
HTML_StyleMap = {'italic': ('font-style', 'italic'), 'bold': ('font-weight', 'bold')
,'subscript': ('vertical-align', 'sub'), 'superscript': ('vertical-align', 'super')
,'underline': ('text-decoration', 'underline'), 'line-through': ('text-decoration', 'line-through')
,'overline': ('text-decoration', 'overline')}
HTML_StartTagMap = {'text-style': '<span style="%s">', 'Bullet List': '<ul>'
,'Numbered List': '<ol>', 'List Item': '<li>', 'link': '<a href="%s">'
,'Start Bookmark': '<a name="%s">'
,'Start endnote': '<a href="#%s">%s</a>'
,'Endnote Anchor': '<a name="%s" style="vertical-align: super">%s</a>'
,'table': '<table>', 'tablerow': '<tr>', 'tablecell': '<td%s>'
,'p': '<p>', 'h1': '<h1>', 'h2': '<h2>', 'h3': '<h3>', 'h4': '<h4>'
, 'h5': '<h5>', 'Plain Text': '<pre>'}
# Note that we don't have any <br> end tag - it's not used in either HTML or XHTML
HTML_EndTagMap = {'text-style': '</span>', 'Bullet List': '</ul>'
,'Numbered List': '</ol>', 'List Item': '</li>', 'link': '</a>'
,'End Bookmark': '</a>'
,'table': '</table>', 'tablerow': '</tr>', 'tablecell': '</td>'
,'p': '</p>', 'h1': '</h1>', 'h2': '</h2>', 'h3': '</h3>', 'h4': '</h4>'
, 'h5': '</h5>', 'Plain Text': '</pre>'}
class AbiwordToHTMLConverter (xml.sax.handler.ContentHandler, xml.sax.handler.DTDHandler):
""" Convert AbiWord format to HTML or XHTML
"""
def __init__ (self):
xml.sax.handler.ContentHandler.__init__ (self)
self.log = logging.getLogger ("PubTal.AbiwordToHTMLConverter")
def convertContent (self, content):
self.result = StringIO.StringIO()
self.scopeStack = []
self.StartTagMap = HTML_StartTagMap
self.EndTagMap = HTML_EndTagMap
self.StyleMap = HTML_StyleMap
self.ourParser = xml.sax.make_parser()
self.log.debug ("Setting features of parser")
self.ourParser.setFeature (xml.sax.handler.feature_external_ges, 0)
self.ourParser.setFeature (xml.sax.handler.feature_namespaces, 0)
self.ourParser.setContentHandler (self)
# Initialise our state
self.metaData = {}
self.data = []
self.currentAttributes = None
self.statefulMarkup = StatefulMarkup (self.result, self.StartTagMap, self.EndTagMap)
# Dictionary of current text styles (e.g. bold, italic, etc)
self.textStyle = {}
# List of endNotes that we've built up. Tuple of (linkName, linkHTML)
self.endNoteNum = 1
self.endNoteToNumMap = {}
self.endNotes = []
# Parse the content as XML
self.ourParser.parse (content)
def getBody (self):
return self.result.getvalue()
def getFootnotes (self):
return u"".join (self.endNotes)
def getMetadata (self):
return self.metaData
def startElement (self, tag, attributes):
self.log.debug ("Recieved Start Tag: " + tag + " Attributes: " + str (attributes))
self.currentAttributes = attributes
propertiesList = attributes.get ('props', "").split (';')
properties = {}
for prop in propertiesList:
breakPoint = prop.find (':')
properties [prop[0:breakPoint].strip()] = prop [breakPoint + 1:].strip()
self.log.debug ("Character properties: %s" % str (properties))
if (tag == "abiword"):
try:
fileformat = attributes ['fileformat']
except:
msg = ("No fileformat attribute on abiword element!")
self.log.error (msg)
raise AbiwordFormatException (msg)
if (fileformat != "1.1"):
self.log.warn ("Only file format 1.1 has been tested. Content is version %s" % fileformat)
elif (tag == "p"):
self.data = []
self.statefulMarkup.startParagraph (tag, attributes, properties)
elif (tag == "c"):
self.writeStyledText()
if (properties.get ("font-weight", "") == "bold"):
self.textStyle ['bold'] = 1
if (properties.get ("font-style","") == "italic"):
self.textStyle ['italic'] = 1
# This handles superscript and subscript
textPosition = properties.get ("text-position", "")
self.textStyle [textPosition] = 1
# This handles overline, line-through, and underline
textDecoration = properties.get ("text-decoration", "").split (" ")
for decor in textDecoration:
self.textStyle [decor] = 1
elif (tag == "a"):
linkDest = attributes ['xlink:href']
self.result.write (self.StartTagMap ['link'] % cgi.escape (linkDest))
elif (tag == "br"):
# Write out any styled text and re-open SPANs as needed.
self.writeStyledText()
self.result.write (self.StartTagMap ['br'])
elif (tag == "bookmark"):
self.writeStyledText()
self.statefulMarkup.startBookmark (tag, attributes, properties)
elif (tag == "field"):
self.writeStyledText()
# Is this a footnote or endnote?
type = attributes ['type']
id = None
if (type == "footnote_ref"):
id = "footnote-id-%s" % attributes ['footnote-id']
self.endNoteToNumMap [id] = self.endNoteNum
self.result.write (self.StartTagMap ['Start endnote'] % (id, str (self.endNoteNum)))
self.endNoteNum = self.endNoteNum + 1
elif (type == "endnote_ref"):
id = "endnote-id-%s" % attributes ['endnote-id']
self.endNoteToNumMap [id] = self.endNoteNum
self.result.write (self.StartTagMap ['Start endnote'] % (id, str (self.endNoteNum)))
self.endNoteNum += 1
elif (type == "endnote_anchor"):
# The anchor text.
id = "endnote-id-%s" % attributes ['endnote-id']
self.result.write (self.StartTagMap ['Endnote Anchor'] % (id, str (self.endNoteToNumMap[id])))
elif (type == "footnote_anchor"):
# The anchor text for a footnote.
id = "footnote-id-%s" % attributes ['footnote-id']
self.result.write (self.StartTagMap ['Endnote Anchor'] % (id, str (self.endNoteToNumMap[id])))
elif (tag == "foot" or tag == "endnote"):
# Capture the footnote/endnote separately.
self.scopeStack.append ((self.result, self.statefulMarkup))
self.result = StringIO.StringIO()
self.statefulMarkup = StatefulMarkup (self.result, self.StartTagMap, self.EndTagMap)
elif (tag == "table"):
# The begining of a table can mean the end of a list.
self.statefulMarkup.structureChange()
self.result.write (self.StartTagMap ['table'])
elif (tag == "cell"):
leftAttach = int (properties ['left-attach'])
rightAttach = int (properties ['right-attach'])
bottomAttach = int (properties ['bot-attach'])
topAttach = int (properties ['top-attach'])
width = rightAttach - leftAttach
cellAtts = u""
if (width > 1):
cellAtts += ' colspan="%s"' % str (width)
height = bottomAttach - topAttach
if (height > 1):
cellAtts += ' rowspan="%s"' % str (height)
# Do we have to close a TR?
if (leftAttach == 0):
if (topAttach != 0):
# This isn't the first row, so we need to close a previous one!
self.result.write (self.EndTagMap ['tablerow'])
self.result.write (self.StartTagMap ['tablerow'])
self.result.write (self.StartTagMap ['tablecell'] % cellAtts)
elif (tag == "m"):
# For metadata we want to clear out any previous text we've accumulated.
self.data = []
else:
#self.log.warn ("Unknown start element %s" % tag)
self.statefulMarkup.structureChange()
def endElement (self, tag):
self.log.debug ("Recieved Real End Tag: " + tag)
if (tag == "m"):
keyName = self.currentAttributes ['key']
if (keyName.startswith ("dc.")):
keyName = keyName [3:]
if (keyName == "creator"):
# Used in PubTal to keep things the same as the examples.
keyName = "author"
data = u"".join (self.data)
self.log.debug ("Meta information key=%s value=%s" % (keyName, data))
self.metaData [keyName] = data
elif (tag == "p"):
self.writeStyledText()
self.statefulMarkup.endParagraph (tag)
elif (tag == "c"):
self.writeStyledText()
self.textStyle = {}
elif (tag == "a"):
self.result.write (self.EndTagMap ['link'])
elif (tag == "foot" or tag == "endnote"):
self.endNotes.append (self.result.getvalue())
self.result, self.statefulMarkup = self.scopeStack.pop()
elif (tag == "table"):
self.statefulMarkup.structureChange()
self.result.write (self.EndTagMap ['tablerow'])
self.result.write (self.EndTagMap ['table'])
elif (tag == "cell"):
# Ends of cells can mean the end of a list - best check
self.statefulMarkup.structureChange()
self.result.write (self.EndTagMap ['tablecell'])
elif (tag == "bookmark"):
pass
elif (tag == "field"):
pass
else:
#self.log.warn ("Unknown end element %s" % tag)
self.statefulMarkup.structureChange()
def characters (self, data):
# Accumulate the character data together so that we can merge all the newline events
self.log.debug ("Recieved character data: " + data)
self.data.append (data)
def writeStyledText (self):
if (len (self.data) == 0):
self.log.debug ("No text to write.")
return
styleDictionary = {}
for style in self.textStyle.keys():
styleProperty, styleValue = self.StyleMap.get (style, (None, None))
if (styleProperty is not None):
curPropVal = styleDictionary.get (styleProperty, u"")
if (len (curPropVal) > 0):
curPropVal += ', ' + styleValue
else:
curPropVal = styleValue
styleDictionary [styleProperty] = curPropVal
# Now build the style attribute value.
if (len (styleDictionary) > 0):
styleValueList = []
for property in styleDictionary.keys():
# Get the value for this property
value = styleDictionary [property]
styleValueList.append (property + ": " + value)
self.result.write (self.StartTagMap ['text-style'] % u"; ".join (styleValueList))
# Write out the text
self.result.write (cgi.escape (u"".join (self.data)))
self.data = []
if (len (styleDictionary) > 0):
self.result.write (self.EndTagMap ['text-style'])
class StatefulMarkup:
def __init__ (self, result, startTagMap, endTagMap):
""" The StatefulMarkup class is used to maintain the context for
either the main document or a footnote or endnote.
It handles the complications of lists.
"""
self.log = logging.getLogger ("PubTal.AbiwordToHTMLConverter.StatefulMarkup")
self.result = result
self.StartTagMap = startTagMap
self.EndTagMap = endTagMap
self.paragraphType = None
# List of currently open boomark (anchor) links.
self.bookmarks = []
# Current stack of lists.
self.listStack = []
def startParagraph (self, tag, attributes, properties):
paragraphType = attributes.get ('style', "")
self.log.debug ("Starting a new paragraph, type %s" % paragraphType)
if (attributes.has_key ('listid')):
# This is a list item.
listStyle = properties.get ('list-style', 'Bullet List')
listLevel = attributes ['level']
if (len (self.listStack) > 0):
# We already have a list opened, so let's compare levels
oldListLevel, oldListType = self.listStack[-1]
if (oldListLevel < listLevel):
# We are growing outwards with this item.
self.result.write (self.StartTagMap [listStyle])
# Add this list to the stack
self.listStack.append ((listLevel, listStyle))
elif (oldListLevel > listLevel):
# We are going down a level!
# Take this opportunity to close out the list item.
self.result.write (self.EndTagMap ['List Item'])
# Close the actual list
self.result.write (self.EndTagMap [oldListType])
# Also close out the containing list item.
# Take this opportunity to close out the list item.
self.result.write (self.EndTagMap ['List Item'])
self.listStack.pop()
else:
# This is an item in an existing list, so close out the last item.
self.result.write (self.EndTagMap ['List Item'])
else:
# This is the first item in a new list!
# Add this list to the stack
self.listStack.append ((listLevel, listStyle))
self.result.write (self.StartTagMap [listStyle])
# This paragraph type is really a list item.
self.paragraphType = "List Item"
else:
# This is not a list item - check for the possibility of an open list
while (len (self.listStack) > 0):
self.log.debug ("We have an open list, but the next P element is not a list item!")
oldListLevel, oldListType = self.listStack.pop()
# Take this opportunity to close out the list item.
self.result.write (self.EndTagMap ['List Item'])
# Close the old list type
self.result.write (self.EndTagMap [oldListType])
if (paragraphType.startswith ("Heading")):
headingLevel = paragraphType [-1:]
self.paragraphType = u"h" + headingLevel
elif (paragraphType == "Plain Text"):
self.paragraphType = "Plain Text"
else:
self.paragraphType = "p"
self.result.write (self.StartTagMap [self.paragraphType])
def endParagraph (self, tag):
self.log.debug ("Closing paragraph of type %s" % self.paragraphType)
while (len (self.bookmarks) > 0):
oldBookmark = self.bookmarks.pop()
self.result.write (self.EndTagMap ['End Bookmark'])
# Don't write out the </li> for lists here - it depends on what follows next!
if (self.paragraphType != 'List Item'):
self.result.write (self.EndTagMap [self.paragraphType] + '\n')
def startBookmark (self, tag, attributes, properties):
# Is this the start, or end of a bookmark?
type = attributes ['type']
name = attributes ['name']
if (type == "end" and name in self.bookmarks):
# Closing a bookmark
self.result.write (self.EndTagMap ['End Bookmark'])
self.bookmarks.remove (name)
elif (type == "start"):
# Opening a new bookmark.
self.result.write (self.StartTagMap ['Start Bookmark'] % name)
self.bookmarks.append (name)
def structureChange (self):
""" Called to indicate that the next tag type was not a paragraph.
Used for when <table> closes a list, etc.
"""
while (len (self.listStack) > 0):
self.log.debug ("We have an open list, but the next P element is not a list item!")
oldListLevel, oldListType = self.listStack.pop()
# Take this opportunity to close out the list item.
self.result.write (self.EndTagMap ['List Item'])
# Close the old list type
self.result.write (self.EndTagMap [oldListType])
class AbiwordFormatException (Exception):
pass
|
|
<<<<<<< HEAD
<<<<<<< HEAD
import unittest
import os
from test.support import TESTFN, import_fresh_module
c_stat = import_fresh_module('stat', fresh=['_stat'])
py_stat = import_fresh_module('stat', blocked=['_stat'])
class TestFilemode:
statmod = None
file_flags = {'SF_APPEND', 'SF_ARCHIVED', 'SF_IMMUTABLE', 'SF_NOUNLINK',
'SF_SNAPSHOT', 'UF_APPEND', 'UF_COMPRESSED', 'UF_HIDDEN',
'UF_IMMUTABLE', 'UF_NODUMP', 'UF_NOUNLINK', 'UF_OPAQUE'}
formats = {'S_IFBLK', 'S_IFCHR', 'S_IFDIR', 'S_IFIFO', 'S_IFLNK',
'S_IFREG', 'S_IFSOCK'}
format_funcs = {'S_ISBLK', 'S_ISCHR', 'S_ISDIR', 'S_ISFIFO', 'S_ISLNK',
'S_ISREG', 'S_ISSOCK'}
stat_struct = {
'ST_MODE': 0,
'ST_INO': 1,
'ST_DEV': 2,
'ST_NLINK': 3,
'ST_UID': 4,
'ST_GID': 5,
'ST_SIZE': 6,
'ST_ATIME': 7,
'ST_MTIME': 8,
'ST_CTIME': 9}
# permission bit value are defined by POSIX
permission_bits = {
'S_ISUID': 0o4000,
'S_ISGID': 0o2000,
'S_ENFMT': 0o2000,
'S_ISVTX': 0o1000,
'S_IRWXU': 0o700,
'S_IRUSR': 0o400,
'S_IREAD': 0o400,
'S_IWUSR': 0o200,
'S_IWRITE': 0o200,
'S_IXUSR': 0o100,
'S_IEXEC': 0o100,
'S_IRWXG': 0o070,
'S_IRGRP': 0o040,
'S_IWGRP': 0o020,
'S_IXGRP': 0o010,
'S_IRWXO': 0o007,
'S_IROTH': 0o004,
'S_IWOTH': 0o002,
'S_IXOTH': 0o001}
def setUp(self):
try:
os.remove(TESTFN)
except OSError:
try:
os.rmdir(TESTFN)
except OSError:
pass
tearDown = setUp
def get_mode(self, fname=TESTFN, lstat=True):
if lstat:
st_mode = os.lstat(fname).st_mode
else:
st_mode = os.stat(fname).st_mode
modestr = self.statmod.filemode(st_mode)
return st_mode, modestr
def assertS_IS(self, name, mode):
# test format, lstrip is for S_IFIFO
fmt = getattr(self.statmod, "S_IF" + name.lstrip("F"))
self.assertEqual(self.statmod.S_IFMT(mode), fmt)
# test that just one function returns true
testname = "S_IS" + name
for funcname in self.format_funcs:
func = getattr(self.statmod, funcname, None)
if func is None:
if funcname == testname:
raise ValueError(funcname)
continue
if funcname == testname:
self.assertTrue(func(mode))
else:
self.assertFalse(func(mode))
def test_mode(self):
with open(TESTFN, 'w'):
pass
if os.name == 'posix':
os.chmod(TESTFN, 0o700)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr, '-rwx------')
self.assertS_IS("REG", st_mode)
self.assertEqual(self.statmod.S_IMODE(st_mode),
self.statmod.S_IRWXU)
os.chmod(TESTFN, 0o070)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr, '----rwx---')
self.assertS_IS("REG", st_mode)
self.assertEqual(self.statmod.S_IMODE(st_mode),
self.statmod.S_IRWXG)
os.chmod(TESTFN, 0o007)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr, '-------rwx')
self.assertS_IS("REG", st_mode)
self.assertEqual(self.statmod.S_IMODE(st_mode),
self.statmod.S_IRWXO)
os.chmod(TESTFN, 0o444)
st_mode, modestr = self.get_mode()
self.assertS_IS("REG", st_mode)
self.assertEqual(modestr, '-r--r--r--')
self.assertEqual(self.statmod.S_IMODE(st_mode), 0o444)
else:
os.chmod(TESTFN, 0o700)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr[:3], '-rw')
self.assertS_IS("REG", st_mode)
self.assertEqual(self.statmod.S_IFMT(st_mode),
self.statmod.S_IFREG)
def test_directory(self):
os.mkdir(TESTFN)
os.chmod(TESTFN, 0o700)
st_mode, modestr = self.get_mode()
self.assertS_IS("DIR", st_mode)
if os.name == 'posix':
self.assertEqual(modestr, 'drwx------')
else:
self.assertEqual(modestr[0], 'd')
@unittest.skipUnless(hasattr(os, 'symlink'), 'os.symlink not available')
def test_link(self):
try:
os.symlink(os.getcwd(), TESTFN)
except (OSError, NotImplementedError) as err:
raise unittest.SkipTest(str(err))
else:
st_mode, modestr = self.get_mode()
self.assertEqual(modestr[0], 'l')
self.assertS_IS("LNK", st_mode)
@unittest.skipUnless(hasattr(os, 'mkfifo'), 'os.mkfifo not available')
def test_fifo(self):
os.mkfifo(TESTFN, 0o700)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr, 'prwx------')
self.assertS_IS("FIFO", st_mode)
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
def test_devices(self):
if os.path.exists(os.devnull):
st_mode, modestr = self.get_mode(os.devnull, lstat=False)
self.assertEqual(modestr[0], 'c')
self.assertS_IS("CHR", st_mode)
# Linux block devices, BSD has no block devices anymore
for blockdev in ("/dev/sda", "/dev/hda"):
if os.path.exists(blockdev):
st_mode, modestr = self.get_mode(blockdev, lstat=False)
self.assertEqual(modestr[0], 'b')
self.assertS_IS("BLK", st_mode)
break
def test_module_attributes(self):
for key, value in self.stat_struct.items():
modvalue = getattr(self.statmod, key)
self.assertEqual(value, modvalue, key)
for key, value in self.permission_bits.items():
modvalue = getattr(self.statmod, key)
self.assertEqual(value, modvalue, key)
for key in self.file_flags:
modvalue = getattr(self.statmod, key)
self.assertIsInstance(modvalue, int)
for key in self.formats:
modvalue = getattr(self.statmod, key)
self.assertIsInstance(modvalue, int)
for key in self.format_funcs:
func = getattr(self.statmod, key)
self.assertTrue(callable(func))
self.assertEqual(func(0), 0)
class TestFilemodeCStat(TestFilemode, unittest.TestCase):
statmod = c_stat
formats = TestFilemode.formats | {'S_IFDOOR', 'S_IFPORT', 'S_IFWHT'}
format_funcs = TestFilemode.format_funcs | {'S_ISDOOR', 'S_ISPORT',
'S_ISWHT'}
class TestFilemodePyStat(TestFilemode, unittest.TestCase):
statmod = py_stat
if __name__ == '__main__':
unittest.main()
=======
import unittest
import os
from test.support import TESTFN, import_fresh_module
c_stat = import_fresh_module('stat', fresh=['_stat'])
py_stat = import_fresh_module('stat', blocked=['_stat'])
class TestFilemode:
statmod = None
file_flags = {'SF_APPEND', 'SF_ARCHIVED', 'SF_IMMUTABLE', 'SF_NOUNLINK',
'SF_SNAPSHOT', 'UF_APPEND', 'UF_COMPRESSED', 'UF_HIDDEN',
'UF_IMMUTABLE', 'UF_NODUMP', 'UF_NOUNLINK', 'UF_OPAQUE'}
formats = {'S_IFBLK', 'S_IFCHR', 'S_IFDIR', 'S_IFIFO', 'S_IFLNK',
'S_IFREG', 'S_IFSOCK'}
format_funcs = {'S_ISBLK', 'S_ISCHR', 'S_ISDIR', 'S_ISFIFO', 'S_ISLNK',
'S_ISREG', 'S_ISSOCK'}
stat_struct = {
'ST_MODE': 0,
'ST_INO': 1,
'ST_DEV': 2,
'ST_NLINK': 3,
'ST_UID': 4,
'ST_GID': 5,
'ST_SIZE': 6,
'ST_ATIME': 7,
'ST_MTIME': 8,
'ST_CTIME': 9}
# permission bit value are defined by POSIX
permission_bits = {
'S_ISUID': 0o4000,
'S_ISGID': 0o2000,
'S_ENFMT': 0o2000,
'S_ISVTX': 0o1000,
'S_IRWXU': 0o700,
'S_IRUSR': 0o400,
'S_IREAD': 0o400,
'S_IWUSR': 0o200,
'S_IWRITE': 0o200,
'S_IXUSR': 0o100,
'S_IEXEC': 0o100,
'S_IRWXG': 0o070,
'S_IRGRP': 0o040,
'S_IWGRP': 0o020,
'S_IXGRP': 0o010,
'S_IRWXO': 0o007,
'S_IROTH': 0o004,
'S_IWOTH': 0o002,
'S_IXOTH': 0o001}
def setUp(self):
try:
os.remove(TESTFN)
except OSError:
try:
os.rmdir(TESTFN)
except OSError:
pass
tearDown = setUp
def get_mode(self, fname=TESTFN, lstat=True):
if lstat:
st_mode = os.lstat(fname).st_mode
else:
st_mode = os.stat(fname).st_mode
modestr = self.statmod.filemode(st_mode)
return st_mode, modestr
def assertS_IS(self, name, mode):
# test format, lstrip is for S_IFIFO
fmt = getattr(self.statmod, "S_IF" + name.lstrip("F"))
self.assertEqual(self.statmod.S_IFMT(mode), fmt)
# test that just one function returns true
testname = "S_IS" + name
for funcname in self.format_funcs:
func = getattr(self.statmod, funcname, None)
if func is None:
if funcname == testname:
raise ValueError(funcname)
continue
if funcname == testname:
self.assertTrue(func(mode))
else:
self.assertFalse(func(mode))
def test_mode(self):
with open(TESTFN, 'w'):
pass
if os.name == 'posix':
os.chmod(TESTFN, 0o700)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr, '-rwx------')
self.assertS_IS("REG", st_mode)
self.assertEqual(self.statmod.S_IMODE(st_mode),
self.statmod.S_IRWXU)
os.chmod(TESTFN, 0o070)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr, '----rwx---')
self.assertS_IS("REG", st_mode)
self.assertEqual(self.statmod.S_IMODE(st_mode),
self.statmod.S_IRWXG)
os.chmod(TESTFN, 0o007)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr, '-------rwx')
self.assertS_IS("REG", st_mode)
self.assertEqual(self.statmod.S_IMODE(st_mode),
self.statmod.S_IRWXO)
os.chmod(TESTFN, 0o444)
st_mode, modestr = self.get_mode()
self.assertS_IS("REG", st_mode)
self.assertEqual(modestr, '-r--r--r--')
self.assertEqual(self.statmod.S_IMODE(st_mode), 0o444)
else:
os.chmod(TESTFN, 0o700)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr[:3], '-rw')
self.assertS_IS("REG", st_mode)
self.assertEqual(self.statmod.S_IFMT(st_mode),
self.statmod.S_IFREG)
def test_directory(self):
os.mkdir(TESTFN)
os.chmod(TESTFN, 0o700)
st_mode, modestr = self.get_mode()
self.assertS_IS("DIR", st_mode)
if os.name == 'posix':
self.assertEqual(modestr, 'drwx------')
else:
self.assertEqual(modestr[0], 'd')
@unittest.skipUnless(hasattr(os, 'symlink'), 'os.symlink not available')
def test_link(self):
try:
os.symlink(os.getcwd(), TESTFN)
except (OSError, NotImplementedError) as err:
raise unittest.SkipTest(str(err))
else:
st_mode, modestr = self.get_mode()
self.assertEqual(modestr[0], 'l')
self.assertS_IS("LNK", st_mode)
@unittest.skipUnless(hasattr(os, 'mkfifo'), 'os.mkfifo not available')
def test_fifo(self):
os.mkfifo(TESTFN, 0o700)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr, 'prwx------')
self.assertS_IS("FIFO", st_mode)
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
def test_devices(self):
if os.path.exists(os.devnull):
st_mode, modestr = self.get_mode(os.devnull, lstat=False)
self.assertEqual(modestr[0], 'c')
self.assertS_IS("CHR", st_mode)
# Linux block devices, BSD has no block devices anymore
for blockdev in ("/dev/sda", "/dev/hda"):
if os.path.exists(blockdev):
st_mode, modestr = self.get_mode(blockdev, lstat=False)
self.assertEqual(modestr[0], 'b')
self.assertS_IS("BLK", st_mode)
break
def test_module_attributes(self):
for key, value in self.stat_struct.items():
modvalue = getattr(self.statmod, key)
self.assertEqual(value, modvalue, key)
for key, value in self.permission_bits.items():
modvalue = getattr(self.statmod, key)
self.assertEqual(value, modvalue, key)
for key in self.file_flags:
modvalue = getattr(self.statmod, key)
self.assertIsInstance(modvalue, int)
for key in self.formats:
modvalue = getattr(self.statmod, key)
self.assertIsInstance(modvalue, int)
for key in self.format_funcs:
func = getattr(self.statmod, key)
self.assertTrue(callable(func))
self.assertEqual(func(0), 0)
class TestFilemodeCStat(TestFilemode, unittest.TestCase):
statmod = c_stat
formats = TestFilemode.formats | {'S_IFDOOR', 'S_IFPORT', 'S_IFWHT'}
format_funcs = TestFilemode.format_funcs | {'S_ISDOOR', 'S_ISPORT',
'S_ISWHT'}
class TestFilemodePyStat(TestFilemode, unittest.TestCase):
statmod = py_stat
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import unittest
import os
from test.support import TESTFN, import_fresh_module
c_stat = import_fresh_module('stat', fresh=['_stat'])
py_stat = import_fresh_module('stat', blocked=['_stat'])
class TestFilemode:
statmod = None
file_flags = {'SF_APPEND', 'SF_ARCHIVED', 'SF_IMMUTABLE', 'SF_NOUNLINK',
'SF_SNAPSHOT', 'UF_APPEND', 'UF_COMPRESSED', 'UF_HIDDEN',
'UF_IMMUTABLE', 'UF_NODUMP', 'UF_NOUNLINK', 'UF_OPAQUE'}
formats = {'S_IFBLK', 'S_IFCHR', 'S_IFDIR', 'S_IFIFO', 'S_IFLNK',
'S_IFREG', 'S_IFSOCK'}
format_funcs = {'S_ISBLK', 'S_ISCHR', 'S_ISDIR', 'S_ISFIFO', 'S_ISLNK',
'S_ISREG', 'S_ISSOCK'}
stat_struct = {
'ST_MODE': 0,
'ST_INO': 1,
'ST_DEV': 2,
'ST_NLINK': 3,
'ST_UID': 4,
'ST_GID': 5,
'ST_SIZE': 6,
'ST_ATIME': 7,
'ST_MTIME': 8,
'ST_CTIME': 9}
# permission bit value are defined by POSIX
permission_bits = {
'S_ISUID': 0o4000,
'S_ISGID': 0o2000,
'S_ENFMT': 0o2000,
'S_ISVTX': 0o1000,
'S_IRWXU': 0o700,
'S_IRUSR': 0o400,
'S_IREAD': 0o400,
'S_IWUSR': 0o200,
'S_IWRITE': 0o200,
'S_IXUSR': 0o100,
'S_IEXEC': 0o100,
'S_IRWXG': 0o070,
'S_IRGRP': 0o040,
'S_IWGRP': 0o020,
'S_IXGRP': 0o010,
'S_IRWXO': 0o007,
'S_IROTH': 0o004,
'S_IWOTH': 0o002,
'S_IXOTH': 0o001}
def setUp(self):
try:
os.remove(TESTFN)
except OSError:
try:
os.rmdir(TESTFN)
except OSError:
pass
tearDown = setUp
def get_mode(self, fname=TESTFN, lstat=True):
if lstat:
st_mode = os.lstat(fname).st_mode
else:
st_mode = os.stat(fname).st_mode
modestr = self.statmod.filemode(st_mode)
return st_mode, modestr
def assertS_IS(self, name, mode):
# test format, lstrip is for S_IFIFO
fmt = getattr(self.statmod, "S_IF" + name.lstrip("F"))
self.assertEqual(self.statmod.S_IFMT(mode), fmt)
# test that just one function returns true
testname = "S_IS" + name
for funcname in self.format_funcs:
func = getattr(self.statmod, funcname, None)
if func is None:
if funcname == testname:
raise ValueError(funcname)
continue
if funcname == testname:
self.assertTrue(func(mode))
else:
self.assertFalse(func(mode))
def test_mode(self):
with open(TESTFN, 'w'):
pass
if os.name == 'posix':
os.chmod(TESTFN, 0o700)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr, '-rwx------')
self.assertS_IS("REG", st_mode)
self.assertEqual(self.statmod.S_IMODE(st_mode),
self.statmod.S_IRWXU)
os.chmod(TESTFN, 0o070)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr, '----rwx---')
self.assertS_IS("REG", st_mode)
self.assertEqual(self.statmod.S_IMODE(st_mode),
self.statmod.S_IRWXG)
os.chmod(TESTFN, 0o007)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr, '-------rwx')
self.assertS_IS("REG", st_mode)
self.assertEqual(self.statmod.S_IMODE(st_mode),
self.statmod.S_IRWXO)
os.chmod(TESTFN, 0o444)
st_mode, modestr = self.get_mode()
self.assertS_IS("REG", st_mode)
self.assertEqual(modestr, '-r--r--r--')
self.assertEqual(self.statmod.S_IMODE(st_mode), 0o444)
else:
os.chmod(TESTFN, 0o700)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr[:3], '-rw')
self.assertS_IS("REG", st_mode)
self.assertEqual(self.statmod.S_IFMT(st_mode),
self.statmod.S_IFREG)
def test_directory(self):
os.mkdir(TESTFN)
os.chmod(TESTFN, 0o700)
st_mode, modestr = self.get_mode()
self.assertS_IS("DIR", st_mode)
if os.name == 'posix':
self.assertEqual(modestr, 'drwx------')
else:
self.assertEqual(modestr[0], 'd')
@unittest.skipUnless(hasattr(os, 'symlink'), 'os.symlink not available')
def test_link(self):
try:
os.symlink(os.getcwd(), TESTFN)
except (OSError, NotImplementedError) as err:
raise unittest.SkipTest(str(err))
else:
st_mode, modestr = self.get_mode()
self.assertEqual(modestr[0], 'l')
self.assertS_IS("LNK", st_mode)
@unittest.skipUnless(hasattr(os, 'mkfifo'), 'os.mkfifo not available')
def test_fifo(self):
os.mkfifo(TESTFN, 0o700)
st_mode, modestr = self.get_mode()
self.assertEqual(modestr, 'prwx------')
self.assertS_IS("FIFO", st_mode)
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
def test_devices(self):
if os.path.exists(os.devnull):
st_mode, modestr = self.get_mode(os.devnull, lstat=False)
self.assertEqual(modestr[0], 'c')
self.assertS_IS("CHR", st_mode)
# Linux block devices, BSD has no block devices anymore
for blockdev in ("/dev/sda", "/dev/hda"):
if os.path.exists(blockdev):
st_mode, modestr = self.get_mode(blockdev, lstat=False)
self.assertEqual(modestr[0], 'b')
self.assertS_IS("BLK", st_mode)
break
def test_module_attributes(self):
for key, value in self.stat_struct.items():
modvalue = getattr(self.statmod, key)
self.assertEqual(value, modvalue, key)
for key, value in self.permission_bits.items():
modvalue = getattr(self.statmod, key)
self.assertEqual(value, modvalue, key)
for key in self.file_flags:
modvalue = getattr(self.statmod, key)
self.assertIsInstance(modvalue, int)
for key in self.formats:
modvalue = getattr(self.statmod, key)
self.assertIsInstance(modvalue, int)
for key in self.format_funcs:
func = getattr(self.statmod, key)
self.assertTrue(callable(func))
self.assertEqual(func(0), 0)
class TestFilemodeCStat(TestFilemode, unittest.TestCase):
statmod = c_stat
formats = TestFilemode.formats | {'S_IFDOOR', 'S_IFPORT', 'S_IFWHT'}
format_funcs = TestFilemode.format_funcs | {'S_ISDOOR', 'S_ISPORT',
'S_ISWHT'}
class TestFilemodePyStat(TestFilemode, unittest.TestCase):
statmod = py_stat
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
TestGyp.py: a testing framework for GYP integration tests.
"""
import collections
from contextlib import contextmanager
import itertools
import os
import re
import shutil
import subprocess
import sys
import tempfile
import TestCmd
import TestCommon
from TestCommon import __all__
__all__.extend([
'TestGyp',
])
def remove_debug_line_numbers(contents):
"""Function to remove the line numbers from the debug output
of gyp and thus reduce the extreme fragility of the stdout
comparison tests.
"""
lines = contents.splitlines()
# split each line on ":"
lines = [l.split(":", 3) for l in lines]
# join each line back together while ignoring the
# 3rd column which is the line number
lines = [len(l) > 3 and ":".join(l[3:]) or l for l in lines]
return "\n".join(lines)
def match_modulo_line_numbers(contents_a, contents_b):
"""File contents matcher that ignores line numbers."""
contents_a = remove_debug_line_numbers(contents_a)
contents_b = remove_debug_line_numbers(contents_b)
return TestCommon.match_exact(contents_a, contents_b)
@contextmanager
def LocalEnv(local_env):
"""Context manager to provide a local OS environment."""
old_env = os.environ.copy()
os.environ.update(local_env)
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
class TestGypBase(TestCommon.TestCommon):
"""
Class for controlling end-to-end tests of gyp generators.
Instantiating this class will create a temporary directory and
arrange for its destruction (via the TestCmd superclass) and
copy all of the non-gyptest files in the directory hierarchy of the
executing script.
The default behavior is to test the 'gyp' or 'gyp.bat' file in the
current directory. An alternative may be specified explicitly on
instantiation, or by setting the TESTGYP_GYP environment variable.
This class should be subclassed for each supported gyp generator
(format). Various abstract methods below define calling signatures
used by the test scripts to invoke builds on the generated build
configuration and to run executables generated by those builds.
"""
formats = []
build_tool = None
build_tool_list = []
_exe = TestCommon.exe_suffix
_obj = TestCommon.obj_suffix
shobj_ = TestCommon.shobj_prefix
_shobj = TestCommon.shobj_suffix
lib_ = TestCommon.lib_prefix
_lib = TestCommon.lib_suffix
dll_ = TestCommon.dll_prefix
_dll = TestCommon.dll_suffix
# Constants to represent different targets.
ALL = '__all__'
DEFAULT = '__default__'
# Constants for different target types.
EXECUTABLE = '__executable__'
STATIC_LIB = '__static_lib__'
SHARED_LIB = '__shared_lib__'
def __init__(self, gyp=None, *args, **kw):
self.origin_cwd = os.path.abspath(os.path.dirname(sys.argv[0]))
self.extra_args = sys.argv[1:]
if not gyp:
gyp = os.environ.get('TESTGYP_GYP')
if not gyp:
if sys.platform == 'win32':
gyp = 'gyp.bat'
else:
gyp = 'gyp'
self.gyp = os.path.abspath(gyp)
self.no_parallel = False
self.formats = [self.format]
self.initialize_build_tool()
kw.setdefault('match', TestCommon.match_exact)
# Put test output in out/testworkarea by default.
# Use temporary names so there are no collisions.
workdir = os.path.join('out', kw.get('workdir', 'testworkarea'))
# Create work area if it doesn't already exist.
if not os.path.isdir(workdir):
os.makedirs(workdir)
kw['workdir'] = tempfile.mktemp(prefix='testgyp.', dir=workdir)
formats = kw.pop('formats', [])
super(TestGypBase, self).__init__(*args, **kw)
real_format = self.format.split('-')[-1]
excluded_formats = set([f for f in formats if f[0] == '!'])
included_formats = set(formats) - excluded_formats
if ('!'+real_format in excluded_formats or
included_formats and real_format not in included_formats):
msg = 'Invalid test for %r format; skipping test.\n'
self.skip_test(msg % self.format)
self.copy_test_configuration(self.origin_cwd, self.workdir)
self.set_configuration(None)
# Set $HOME so that gyp doesn't read the user's actual
# ~/.gyp/include.gypi file, which may contain variables
# and other settings that would change the output.
os.environ['HOME'] = self.workpath()
# Clear $GYP_DEFINES for the same reason.
if 'GYP_DEFINES' in os.environ:
del os.environ['GYP_DEFINES']
# Override the user's language settings, which could
# otherwise make the output vary from what is expected.
os.environ['LC_ALL'] = 'C'
def built_file_must_exist(self, name, type=None, **kw):
"""
Fails the test if the specified built file name does not exist.
"""
return self.must_exist(self.built_file_path(name, type, **kw))
def built_file_must_not_exist(self, name, type=None, **kw):
"""
Fails the test if the specified built file name exists.
"""
return self.must_not_exist(self.built_file_path(name, type, **kw))
def built_file_must_match(self, name, contents, **kw):
"""
Fails the test if the contents of the specified built file name
do not match the specified contents.
"""
return self.must_match(self.built_file_path(name, **kw), contents)
def built_file_must_not_match(self, name, contents, **kw):
"""
Fails the test if the contents of the specified built file name
match the specified contents.
"""
return self.must_not_match(self.built_file_path(name, **kw), contents)
def built_file_must_not_contain(self, name, contents, **kw):
"""
Fails the test if the specified built file name contains the specified
contents.
"""
return self.must_not_contain(self.built_file_path(name, **kw), contents)
def copy_test_configuration(self, source_dir, dest_dir):
"""
Copies the test configuration from the specified source_dir
(the directory in which the test script lives) to the
specified dest_dir (a temporary working directory).
This ignores all files and directories that begin with
the string 'gyptest', and all '.svn' subdirectories.
"""
for root, dirs, files in os.walk(source_dir):
if '.svn' in dirs:
dirs.remove('.svn')
dirs = [ d for d in dirs if not d.startswith('gyptest') ]
files = [ f for f in files if not f.startswith('gyptest') ]
for dirname in dirs:
source = os.path.join(root, dirname)
destination = source.replace(source_dir, dest_dir)
os.mkdir(destination)
if sys.platform != 'win32':
shutil.copystat(source, destination)
for filename in files:
source = os.path.join(root, filename)
destination = source.replace(source_dir, dest_dir)
shutil.copy2(source, destination)
def initialize_build_tool(self):
"""
Initializes the .build_tool attribute.
Searches the .build_tool_list for an executable name on the user's
$PATH. The first tool on the list is used as-is if nothing is found
on the current $PATH.
"""
for build_tool in self.build_tool_list:
if not build_tool:
continue
if os.path.isabs(build_tool):
self.build_tool = build_tool
return
build_tool = self.where_is(build_tool)
if build_tool:
self.build_tool = build_tool
return
if self.build_tool_list:
self.build_tool = self.build_tool_list[0]
def relocate(self, source, destination):
"""
Renames (relocates) the specified source (usually a directory)
to the specified destination, creating the destination directory
first if necessary.
Note: Don't use this as a generic "rename" operation. In the
future, "relocating" parts of a GYP tree may affect the state of
the test to modify the behavior of later method calls.
"""
destination_dir = os.path.dirname(destination)
if not os.path.exists(destination_dir):
self.subdir(destination_dir)
os.rename(source, destination)
def report_not_up_to_date(self):
"""
Reports that a build is not up-to-date.
This provides common reporting for formats that have complicated
conditions for checking whether a build is up-to-date. Formats
that expect exact output from the command (make) can
just set stdout= when they call the run_build() method.
"""
print "Build is not up-to-date:"
print self.banner('STDOUT ')
print self.stdout()
stderr = self.stderr()
if stderr:
print self.banner('STDERR ')
print stderr
def run_gyp(self, gyp_file, *args, **kw):
"""
Runs gyp against the specified gyp_file with the specified args.
"""
# When running gyp, and comparing its output we use a comparitor
# that ignores the line numbers that gyp logs in its debug output.
if kw.pop('ignore_line_numbers', False):
kw.setdefault('match', match_modulo_line_numbers)
# TODO: --depth=. works around Chromium-specific tree climbing.
depth = kw.pop('depth', '.')
run_args = ['--depth='+depth]
run_args.extend(['--format='+f for f in self.formats]);
run_args.append(gyp_file)
if self.no_parallel:
run_args += ['--no-parallel']
# TODO: if extra_args contains a '--build' flag
# we really want that to only apply to the last format (self.format).
run_args.extend(self.extra_args)
# Default xcode_ninja_target_pattern to ^.*$ to fix xcode-ninja tests
xcode_ninja_target_pattern = kw.pop('xcode_ninja_target_pattern', '.*')
run_args.extend(
['-G', 'xcode_ninja_target_pattern=%s' % xcode_ninja_target_pattern])
run_args.extend(args)
return self.run(program=self.gyp, arguments=run_args, **kw)
def run(self, *args, **kw):
"""
Executes a program by calling the superclass .run() method.
This exists to provide a common place to filter out keyword
arguments implemented in this layer, without having to update
the tool-specific subclasses or clutter the tests themselves
with platform-specific code.
"""
if kw.has_key('SYMROOT'):
del kw['SYMROOT']
super(TestGypBase, self).run(*args, **kw)
def set_configuration(self, configuration):
"""
Sets the configuration, to be used for invoking the build
tool and testing potential built output.
"""
self.configuration = configuration
def configuration_dirname(self):
if self.configuration:
return self.configuration.split('|')[0]
else:
return 'Default'
def configuration_buildname(self):
if self.configuration:
return self.configuration
else:
return 'Default'
#
# Abstract methods to be defined by format-specific subclasses.
#
def build(self, gyp_file, target=None, **kw):
"""
Runs a build of the specified target against the configuration
generated from the specified gyp_file.
A 'target' argument of None or the special value TestGyp.DEFAULT
specifies the default argument for the underlying build tool.
A 'target' argument of TestGyp.ALL specifies the 'all' target
(if any) of the underlying build tool.
"""
raise NotImplementedError
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type.
"""
raise NotImplementedError
def built_file_basename(self, name, type=None, **kw):
"""
Returns the base name of the specified file name, of the specified type.
A bare=True keyword argument specifies that prefixes and suffixes shouldn't
be applied.
"""
if not kw.get('bare'):
if type == self.EXECUTABLE:
name = name + self._exe
elif type == self.STATIC_LIB:
name = self.lib_ + name + self._lib
elif type == self.SHARED_LIB:
name = self.dll_ + name + self._dll
return name
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable program built from a gyp-generated configuration.
The specified name should be independent of any particular generator.
Subclasses should find the output executable in the appropriate
output build directory, tack on any necessary executable suffix, etc.
"""
raise NotImplementedError
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified target is up to date.
The subclass should implement this by calling build()
(or a reasonable equivalent), checking whatever conditions
will tell it the build was an "up to date" null build, and
failing if it isn't.
"""
raise NotImplementedError
class TestGypGypd(TestGypBase):
"""
Subclass for testing the GYP 'gypd' generator (spit out the
internal data structure as pretty-printed Python).
"""
format = 'gypd'
def __init__(self, gyp=None, *args, **kw):
super(TestGypGypd, self).__init__(*args, **kw)
# gypd implies the use of 'golden' files, so parallelizing conflicts as it
# causes ordering changes.
self.no_parallel = True
class TestGypCustom(TestGypBase):
"""
Subclass for testing the GYP with custom generator
"""
def __init__(self, gyp=None, *args, **kw):
self.format = kw.pop("format")
super(TestGypCustom, self).__init__(*args, **kw)
class TestGypCMake(TestGypBase):
"""
Subclass for testing the GYP CMake generator, using cmake's ninja backend.
"""
format = 'cmake'
build_tool_list = ['cmake']
ALL = 'all'
def cmake_build(self, gyp_file, target=None, **kw):
arguments = kw.get('arguments', [])[:]
self.build_tool_list = ['cmake']
self.initialize_build_tool()
chdir = os.path.join(kw.get('chdir', '.'),
'out',
self.configuration_dirname())
kw['chdir'] = chdir
arguments.append('-G')
arguments.append('Ninja')
kw['arguments'] = arguments
stderr = kw.get('stderr', None)
if stderr:
kw['stderr'] = stderr.split('$$$')[0]
self.run(program=self.build_tool, **kw)
def ninja_build(self, gyp_file, target=None, **kw):
arguments = kw.get('arguments', [])[:]
self.build_tool_list = ['ninja']
self.initialize_build_tool()
# Add a -C output/path to the command line.
arguments.append('-C')
arguments.append(os.path.join('out', self.configuration_dirname()))
if target not in (None, self.DEFAULT):
arguments.append(target)
kw['arguments'] = arguments
stderr = kw.get('stderr', None)
if stderr:
stderrs = stderr.split('$$$')
kw['stderr'] = stderrs[1] if len(stderrs) > 1 else ''
return self.run(program=self.build_tool, **kw)
def build(self, gyp_file, target=None, status=0, **kw):
# Two tools must be run to build, cmake and the ninja.
# Allow cmake to succeed when the overall expectation is to fail.
if status is None:
kw['status'] = None
else:
if not isinstance(status, collections.Iterable): status = (status,)
kw['status'] = list(itertools.chain((0,), status))
self.cmake_build(gyp_file, target, **kw)
kw['status'] = status
self.ninja_build(gyp_file, target, **kw)
def run_built_executable(self, name, *args, **kw):
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
if sys.platform == 'darwin':
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = os.path.join('out', configuration)
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append('out')
result.append(self.configuration_dirname())
if type == self.STATIC_LIB:
if sys.platform != 'darwin':
result.append('obj.target')
elif type == self.SHARED_LIB:
if sys.platform != 'darwin' and sys.platform != 'win32':
result.append('lib.target')
subdir = kw.get('subdir')
if subdir and type != self.SHARED_LIB:
result.append(subdir)
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
def up_to_date(self, gyp_file, target=None, **kw):
result = self.ninja_build(gyp_file, target, **kw)
if not result:
stdout = self.stdout()
if 'ninja: no work to do' not in stdout:
self.report_not_up_to_date()
self.fail_test()
return result
class TestGypMake(TestGypBase):
"""
Subclass for testing the GYP Make generator.
"""
format = 'make'
build_tool_list = ['make']
ALL = 'all'
def build(self, gyp_file, target=None, **kw):
"""
Runs a Make build using the Makefiles generated from the specified
gyp_file.
"""
arguments = kw.get('arguments', [])[:]
if self.configuration:
arguments.append('BUILDTYPE=' + self.configuration)
if target not in (None, self.DEFAULT):
arguments.append(target)
# Sub-directory builds provide per-gyp Makefiles (i.e.
# Makefile.gyp_filename), so use that if there is no Makefile.
chdir = kw.get('chdir', '')
if not os.path.exists(os.path.join(chdir, 'Makefile')):
print "NO Makefile in " + os.path.join(chdir, 'Makefile')
arguments.insert(0, '-f')
arguments.insert(1, os.path.splitext(gyp_file)[0] + '.Makefile')
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Make target is up to date.
"""
if target in (None, self.DEFAULT):
message_target = 'all'
else:
message_target = target
kw['stdout'] = "make: Nothing to be done for `%s'.\n" % message_target
return self.build(gyp_file, target, **kw)
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by Make.
"""
configuration = self.configuration_dirname()
libdir = os.path.join('out', configuration, 'lib')
# TODO(piman): when everything is cross-compile safe, remove lib.target
if sys.platform == 'darwin':
# Mac puts target shared libraries right in the product directory.
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = (
libdir + '.host:' + os.path.join('out', configuration))
else:
os.environ['LD_LIBRARY_PATH'] = libdir + '.host:' + libdir + '.target'
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Make.
Built files are in the subdirectory 'out/{configuration}'.
The default is 'out/Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
A subdir= keyword argument specifies a library subdirectory within
the default 'obj.target'.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
configuration = self.configuration_dirname()
result.extend(['out', configuration])
if type == self.STATIC_LIB and sys.platform != 'darwin':
result.append('obj.target')
elif type == self.SHARED_LIB and sys.platform != 'darwin':
result.append('lib.target')
subdir = kw.get('subdir')
if subdir and type != self.SHARED_LIB:
result.append(subdir)
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
def ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def FindMSBuildInstallation(msvs_version = 'auto'):
"""Returns path to MSBuild for msvs_version or latest available.
Looks in the registry to find install location of MSBuild.
MSBuild before v4.0 will not build c++ projects, so only use newer versions.
"""
import TestWin
registry = TestWin.Registry()
msvs_to_msbuild = {
'2013': r'12.0',
'2012': r'4.0', # Really v4.0.30319 which comes with .NET 4.5.
'2010': r'4.0'}
msbuild_basekey = r'HKLM\SOFTWARE\Microsoft\MSBuild\ToolsVersions'
if not registry.KeyExists(msbuild_basekey):
print 'Error: could not find MSBuild base registry entry'
return None
msbuild_version = None
if msvs_version in msvs_to_msbuild:
msbuild_test_version = msvs_to_msbuild[msvs_version]
if registry.KeyExists(msbuild_basekey + '\\' + msbuild_test_version):
msbuild_version = msbuild_test_version
else:
print ('Warning: Environment variable GYP_MSVS_VERSION specifies "%s" '
'but corresponding MSBuild "%s" was not found.' %
(msvs_version, msbuild_version))
if not msbuild_version:
for msvs_version in sorted(msvs_to_msbuild, reverse=True):
msbuild_test_version = msvs_to_msbuild[msvs_version]
if registry.KeyExists(msbuild_basekey + '\\' + msbuild_test_version):
msbuild_version = msbuild_test_version
break
if not msbuild_version:
print 'Error: could not find MSBuild registry entry'
return None
msbuild_path = registry.GetValue(msbuild_basekey + '\\' + msbuild_version,
'MSBuildToolsPath')
if not msbuild_path:
print 'Error: could not get MSBuild registry entry value'
return None
return os.path.join(msbuild_path, 'MSBuild.exe')
def FindVisualStudioInstallation():
"""Returns appropriate values for .build_tool and .uses_msbuild fields
of TestGypBase for Visual Studio.
We use the value specified by GYP_MSVS_VERSION. If not specified, we
search %PATH% and %PATHEXT% for a devenv.{exe,bat,...} executable.
Failing that, we search for likely deployment paths.
"""
possible_roots = ['%s:\\Program Files%s' % (chr(drive), suffix)
for drive in range(ord('C'), ord('Z') + 1)
for suffix in ['', ' (x86)']]
possible_paths = {
'2013': r'Microsoft Visual Studio 12.0\Common7\IDE\devenv.com',
'2012': r'Microsoft Visual Studio 11.0\Common7\IDE\devenv.com',
'2010': r'Microsoft Visual Studio 10.0\Common7\IDE\devenv.com',
'2008': r'Microsoft Visual Studio 9.0\Common7\IDE\devenv.com',
'2005': r'Microsoft Visual Studio 8\Common7\IDE\devenv.com'}
possible_roots = [ConvertToCygpath(r) for r in possible_roots]
msvs_version = 'auto'
for flag in (f for f in sys.argv if f.startswith('msvs_version=')):
msvs_version = flag.split('=')[-1]
msvs_version = os.environ.get('GYP_MSVS_VERSION', msvs_version)
if msvs_version in possible_paths:
# Check that the path to the specified GYP_MSVS_VERSION exists.
path = possible_paths[msvs_version]
for r in possible_roots:
build_tool = os.path.join(r, path)
if os.path.exists(build_tool):
uses_msbuild = msvs_version >= '2010'
msbuild_path = FindMSBuildInstallation(msvs_version)
return build_tool, uses_msbuild, msbuild_path
else:
print ('Warning: Environment variable GYP_MSVS_VERSION specifies "%s" '
'but corresponding "%s" was not found.' % (msvs_version, path))
# Neither GYP_MSVS_VERSION nor the path help us out. Iterate through
# the choices looking for a match.
for version in sorted(possible_paths, reverse=True):
path = possible_paths[version]
for r in possible_roots:
build_tool = os.path.join(r, path)
if os.path.exists(build_tool):
uses_msbuild = msvs_version >= '2010'
msbuild_path = FindMSBuildInstallation(msvs_version)
return build_tool, uses_msbuild, msbuild_path
print 'Error: could not find devenv'
sys.exit(1)
class TestGypOnMSToolchain(TestGypBase):
"""
Common subclass for testing generators that target the Microsoft Visual
Studio toolchain (cl, link, dumpbin, etc.)
"""
@staticmethod
def _ComputeVsvarsPath(devenv_path):
devenv_dir = os.path.split(devenv_path)[0]
vsvars_path = os.path.join(devenv_path, '../../Tools/vsvars32.bat')
return vsvars_path
def initialize_build_tool(self):
super(TestGypOnMSToolchain, self).initialize_build_tool()
if sys.platform in ('win32', 'cygwin'):
build_tools = FindVisualStudioInstallation()
self.devenv_path, self.uses_msbuild, self.msbuild_path = build_tools
self.vsvars_path = TestGypOnMSToolchain._ComputeVsvarsPath(
self.devenv_path)
def run_dumpbin(self, *dumpbin_args):
"""Run the dumpbin tool with the specified arguments, and capturing and
returning stdout."""
assert sys.platform in ('win32', 'cygwin')
cmd = os.environ.get('COMSPEC', 'cmd.exe')
arguments = [cmd, '/c', self.vsvars_path, '&&', 'dumpbin']
arguments.extend(dumpbin_args)
proc = subprocess.Popen(arguments, stdout=subprocess.PIPE)
output = proc.communicate()[0]
assert not proc.returncode
return output
class TestGypNinja(TestGypOnMSToolchain):
"""
Subclass for testing the GYP Ninja generator.
"""
format = 'ninja'
build_tool_list = ['ninja']
ALL = 'all'
DEFAULT = 'all'
def run_gyp(self, gyp_file, *args, **kw):
TestGypBase.run_gyp(self, gyp_file, *args, **kw)
def build(self, gyp_file, target=None, **kw):
arguments = kw.get('arguments', [])[:]
# Add a -C output/path to the command line.
arguments.append('-C')
arguments.append(os.path.join('out', self.configuration_dirname()))
if target is None:
target = 'all'
arguments.append(target)
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def run_built_executable(self, name, *args, **kw):
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
if sys.platform == 'darwin':
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = os.path.join('out', configuration)
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append('out')
result.append(self.configuration_dirname())
if type == self.STATIC_LIB:
if sys.platform != 'darwin':
result.append('obj')
elif type == self.SHARED_LIB:
if sys.platform != 'darwin' and sys.platform != 'win32':
result.append('lib')
subdir = kw.get('subdir')
if subdir and type != self.SHARED_LIB:
result.append(subdir)
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
def up_to_date(self, gyp_file, target=None, **kw):
result = self.build(gyp_file, target, **kw)
if not result:
stdout = self.stdout()
if 'ninja: no work to do' not in stdout:
self.report_not_up_to_date()
self.fail_test()
return result
class TestGypMSVS(TestGypOnMSToolchain):
"""
Subclass for testing the GYP Visual Studio generator.
"""
format = 'msvs'
u = r'=== Build: 0 succeeded, 0 failed, (\d+) up-to-date, 0 skipped ==='
up_to_date_re = re.compile(u, re.M)
# Initial None element will indicate to our .initialize_build_tool()
# method below that 'devenv' was not found on %PATH%.
#
# Note: we must use devenv.com to be able to capture build output.
# Directly executing devenv.exe only sends output to BuildLog.htm.
build_tool_list = [None, 'devenv.com']
def initialize_build_tool(self):
super(TestGypMSVS, self).initialize_build_tool()
self.build_tool = self.devenv_path
def build(self, gyp_file, target=None, rebuild=False, clean=False, **kw):
"""
Runs a Visual Studio build using the configuration generated
from the specified gyp_file.
"""
configuration = self.configuration_buildname()
if clean:
build = '/Clean'
elif rebuild:
build = '/Rebuild'
else:
build = '/Build'
arguments = kw.get('arguments', [])[:]
arguments.extend([gyp_file.replace('.gyp', '.sln'),
build, configuration])
# Note: the Visual Studio generator doesn't add an explicit 'all'
# target, so we just treat it the same as the default.
if target not in (None, self.ALL, self.DEFAULT):
arguments.extend(['/Project', target])
if self.configuration:
arguments.extend(['/ProjectConfig', self.configuration])
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Visual Studio target is up to date.
Beware that VS2010 will behave strangely if you build under
C:\USERS\yourname\AppData\Local. It will cause needless work. The ouptut
will be "1 succeeded and 0 up to date". MSBuild tracing reveals that:
"Project 'C:\Users\...\AppData\Local\...vcxproj' not up to date because
'C:\PROGRAM FILES (X86)\MICROSOFT VISUAL STUDIO 10.0\VC\BIN\1033\CLUI.DLL'
was modified at 02/21/2011 17:03:30, which is newer than '' which was
modified at 01/01/0001 00:00:00.
The workaround is to specify a workdir when instantiating the test, e.g.
test = TestGyp.TestGyp(workdir='workarea')
"""
result = self.build(gyp_file, target, **kw)
if not result:
stdout = self.stdout()
m = self.up_to_date_re.search(stdout)
up_to_date = m and int(m.group(1)) > 0
if not up_to_date:
self.report_not_up_to_date()
self.fail_test()
return result
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by Visual Studio.
"""
configuration = self.configuration_dirname()
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Visual Studio.
Built files are in a subdirectory that matches the configuration
name. The default is 'Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append(self.configuration_dirname())
if type == self.STATIC_LIB:
result.append('lib')
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
class TestGypMSVSNinja(TestGypNinja):
"""
Subclass for testing the GYP Visual Studio Ninja generator.
"""
format = 'msvs-ninja'
def initialize_build_tool(self):
super(TestGypMSVSNinja, self).initialize_build_tool()
# When using '--build', make sure ninja is first in the format list.
self.formats.insert(0, 'ninja')
def build(self, gyp_file, target=None, rebuild=False, clean=False, **kw):
"""
Runs a Visual Studio build using the configuration generated
from the specified gyp_file.
"""
arguments = kw.get('arguments', [])[:]
if target in (None, self.ALL, self.DEFAULT):
# Note: the Visual Studio generator doesn't add an explicit 'all' target.
# This will build each project. This will work if projects are hermetic,
# but may fail if they are not (a project may run more than once).
# It would be nice to supply an all.metaproj for MSBuild.
arguments.extend([gyp_file.replace('.gyp', '.sln')])
else:
# MSBuild documentation claims that one can specify a sln but then build a
# project target like 'msbuild a.sln /t:proj:target' but this format only
# supports 'Clean', 'Rebuild', and 'Publish' (with none meaning Default).
# This limitation is due to the .sln -> .sln.metaproj conversion.
# The ':' is not special, 'proj:target' is a target in the metaproj.
arguments.extend([target+'.vcxproj'])
if clean:
build = 'Clean'
elif rebuild:
build = 'Rebuild'
else:
build = 'Build'
arguments.extend(['/target:'+build])
configuration = self.configuration_buildname()
config = configuration.split('|')
arguments.extend(['/property:Configuration='+config[0]])
if len(config) > 1:
arguments.extend(['/property:Platform='+config[1]])
arguments.extend(['/property:BuildInParallel=false'])
arguments.extend(['/verbosity:minimal'])
kw['arguments'] = arguments
return self.run(program=self.msbuild_path, **kw)
class TestGypXcode(TestGypBase):
"""
Subclass for testing the GYP Xcode generator.
"""
format = 'xcode'
build_tool_list = ['xcodebuild']
phase_script_execution = ("\n"
"PhaseScriptExecution /\\S+/Script-[0-9A-F]+\\.sh\n"
" cd /\\S+\n"
" /bin/sh -c /\\S+/Script-[0-9A-F]+\\.sh\n"
"(make: Nothing to be done for `all'\\.\n)?")
strip_up_to_date_expressions = [
# Various actions or rules can run even when the overall build target
# is up to date. Strip those phases' GYP-generated output.
re.compile(phase_script_execution, re.S),
# The message from distcc_pump can trail the "BUILD SUCCEEDED"
# message, so strip that, too.
re.compile('__________Shutting down distcc-pump include server\n', re.S),
]
up_to_date_endings = (
'Checking Dependencies...\n** BUILD SUCCEEDED **\n', # Xcode 3.0/3.1
'Check dependencies\n** BUILD SUCCEEDED **\n\n', # Xcode 3.2
'Check dependencies\n\n\n** BUILD SUCCEEDED **\n\n', # Xcode 4.2
'Check dependencies\n\n** BUILD SUCCEEDED **\n\n', # Xcode 5.0
)
def build(self, gyp_file, target=None, **kw):
"""
Runs an xcodebuild using the .xcodeproj generated from the specified
gyp_file.
"""
# Be sure we're working with a copy of 'arguments' since we modify it.
# The caller may not be expecting it to be modified.
arguments = kw.get('arguments', [])[:]
arguments.extend(['-project', gyp_file.replace('.gyp', '.xcodeproj')])
if target == self.ALL:
arguments.append('-alltargets',)
elif target not in (None, self.DEFAULT):
arguments.extend(['-target', target])
if self.configuration:
arguments.extend(['-configuration', self.configuration])
symroot = kw.get('SYMROOT', '$SRCROOT/build')
if symroot:
arguments.append('SYMROOT='+symroot)
kw['arguments'] = arguments
# Work around spurious stderr output from Xcode 4, http://crbug.com/181012
match = kw.pop('match', self.match)
def match_filter_xcode(actual, expected):
if actual:
if not TestCmd.is_List(actual):
actual = actual.split('\n')
if not TestCmd.is_List(expected):
expected = expected.split('\n')
actual = [a for a in actual
if 'No recorder, buildTask: <Xcode3BuildTask:' not in a]
return match(actual, expected)
kw['match'] = match_filter_xcode
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Xcode target is up to date.
"""
result = self.build(gyp_file, target, **kw)
if not result:
output = self.stdout()
for expression in self.strip_up_to_date_expressions:
output = expression.sub('', output)
if not output.endswith(self.up_to_date_endings):
self.report_not_up_to_date()
self.fail_test()
return result
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by xcodebuild.
"""
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = os.path.join('build', configuration)
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Xcode.
Built files are in the subdirectory 'build/{configuration}'.
The default is 'build/Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
configuration = self.configuration_dirname()
result.extend(['build', configuration])
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
class TestGypXcodeNinja(TestGypXcode):
"""
Subclass for testing the GYP Xcode Ninja generator.
"""
format = 'xcode-ninja'
def initialize_build_tool(self):
super(TestGypXcodeNinja, self).initialize_build_tool()
# When using '--build', make sure ninja is first in the format list.
self.formats.insert(0, 'ninja')
def build(self, gyp_file, target=None, **kw):
"""
Runs an xcodebuild using the .xcodeproj generated from the specified
gyp_file.
"""
build_config = self.configuration
if build_config and build_config.endswith(('-iphoneos',
'-iphonesimulator')):
build_config, sdk = self.configuration.split('-')
kw['arguments'] = kw.get('arguments', []) + ['-sdk', sdk]
with self._build_configuration(build_config):
return super(TestGypXcodeNinja, self).build(
gyp_file.replace('.gyp', '.ninja.gyp'), target, **kw)
@contextmanager
def _build_configuration(self, build_config):
config = self.configuration
self.configuration = build_config
try:
yield
finally:
self.configuration = config
def built_file_path(self, name, type=None, **kw):
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append('out')
result.append(self.configuration_dirname())
subdir = kw.get('subdir')
if subdir and type != self.SHARED_LIB:
result.append(subdir)
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
def up_to_date(self, gyp_file, target=None, **kw):
result = self.build(gyp_file, target, **kw)
if not result:
stdout = self.stdout()
if 'ninja: no work to do' not in stdout:
self.report_not_up_to_date()
self.fail_test()
return result
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by xcodebuild + ninja.
"""
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = os.path.join('out', configuration)
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
format_class_list = [
TestGypGypd,
TestGypCMake,
TestGypMake,
TestGypMSVS,
TestGypMSVSNinja,
TestGypNinja,
TestGypXcode,
TestGypXcodeNinja,
]
def TestGyp(*args, **kw):
"""
Returns an appropriate TestGyp* instance for a specified GYP format.
"""
format = kw.pop('format', os.environ.get('TESTGYP_FORMAT'))
for format_class in format_class_list:
if format == format_class.format:
return format_class(*args, **kw)
raise Exception, "unknown format %r" % format
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pddb
----------------------------------
Tests for all the database engine methods from `pddb` module.
"""
import os
import re
import sys
import time
import json
from shutil import rmtree
import unittest2
from pddb import PandasDatabase
# TODO: delete performance
# TODO: find non-existent column in dynamic schema
# TODO: find should not create table using auto_load -> Won't fix, will be handled by save
# TODO: rename row generator -> record generator
# TODO: add where_not tests
# TODO: add read_csv test
# TODO: double load
# TODO: save() saves only dataframes that have been changed
# TODO: DeprecationWarning Using a non-integer number will result in an error in the future
# TODO: insert 2 items, delete 1 item by id, delete 1 item by id
class TestPandasDatabaseMethods(unittest2.TestCase):
# pylint: disable=invalid-name,too-many-public-methods,protected-access
@classmethod
def setUpClass(cls):
cls.pddb = None
cls.cols = ['A', 'B', 'C']
cls.tname = 'table_name'
@classmethod
def tearDownClass(cls):
if cls.pddb is not None and cls.pddb.persistent:
cls.pddb.drop_all()
def test_create_database(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=False, auto_load=False,
auto_save=False, persistent=True, debug=False)
self.assertTrue(os.path.exists(test_name.lower()))
rmtree(test_name.lower())
self.pddb.drop_all()
self.pddb = None
def test_find_one(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=True,
auto_save=False, persistent=False, debug=True)
record = {'col_name': 'A_1'}
record_from_insert = self.pddb.insert(
self.tname, record, columns='col_name', astype='dict')
record_from_findone = self.pddb.find_one(
self.tname, where=record, columns='col_name', astype='dict')
json_from_record = json.dumps(record)
json_from_findone = json.dumps(record_from_findone)
json_from_insert = json.dumps(record_from_insert)
self.assertEqual(json_from_record, json_from_insert)
self.assertEqual(json_from_record, json_from_findone)
self.pddb.drop_all()
self.pddb = None
def test_find_one_none(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=True,
auto_save=False, persistent=False, debug=True)
record = self.pddb.find_one(self.tname, where={'my_cond': None}, astype='json')
self.assertEqual(record, json.dumps(dict()))
self.pddb.drop_all()
self.pddb = None
def test_create_table_with_fixed_schema(self):
test_name = self.id().lower()
schema = {self.tname: self.cols}
self.pddb = PandasDatabase(test_name, dynamic_schema=False, astype=list, auto_load=False,
auto_save=False, persistent=False, debug=False)
self.pddb.load(table_schemas=schema)
db_cols = set(self.pddb._db[self.tname].columns)
cols_with_id = set(['__id__'] + [c for c in self.cols])
self.assertEqual(db_cols, cols_with_id)
self.pddb.drop_all()
self.pddb = None
def test_single_insert_with_fixed_schema(self):
test_name = self.id().lower()
schema = {self.tname: self.cols}
self.pddb = PandasDatabase(test_name, dynamic_schema=False, astype='dict', auto_load=False,
auto_save=False, persistent=False, debug=False)
self.pddb.load(table_schemas=schema)
record = {c: '%s_%d' % (c, i) for (i, c) in enumerate(self.cols)}
record = self.pddb.insert(self.tname, record)
record_id = record['__id__']
record_db = self.pddb.find(self.tname, where={'__id__': record_id})[0]
self.assertEqual(record, record_db)
self.pddb.drop_all()
self.pddb = None
def test_many_insert_with_fixed_schema(self):
test_name = self.id().lower()
schema = {self.tname: self.cols}
self.pddb = PandasDatabase(test_name, dynamic_schema=False, auto_load=False,
auto_save=False, persistent=False, debug=False)
self.pddb.load(table_schemas=schema)
test_record_store = []
for i in range(10):
record = {c: '%s_%d' % (c, i) for c in self.cols}
record_db = self.pddb.insert(self.tname, record)
record_id = record_db.loc['__id__']
record['__id__'] = record_id
test_record_store.append(record)
rows = self.pddb.find(self.tname, astype='dict')
self.assertEqual(rows, test_record_store)
self.pddb.drop_all()
self.pddb = None
def test_single_upsert_with_fixed_schema(self):
test_name = self.id().lower()
schema = {self.tname: self.cols}
self.pddb = PandasDatabase(test_name, dynamic_schema=False, astype='dict', auto_load=False,
auto_save=False, persistent=False, debug=False)
self.pddb.load(table_schemas=schema)
# Test insert first
record = {c: '%s_%d' % (c, i) for (i, c) in enumerate(self.cols)}
record = self.pddb.upsert(self.tname, record=record)[0]
record_id = record['__id__']
record_db = self.pddb.find_one(self.tname, where={'__id__': record_id})
self.assertEqual(record, record_db)
# Test update second
record_new = {c: '%s_%d' % (c, -i) for (i, c) in enumerate(self.cols)}
record_new = self.pddb.upsert(self.tname, record=record_new, where={'__id__': record_id})[0]
record_db = self.pddb.find(self.tname, where={'__id__': record_id})[0]
self.assertNotEqual(record, record_new)
self.assertEqual(record_db, record_new)
self.pddb.drop_all()
self.pddb = None
def test_many_upsert_with_fixed_schema(self):
test_name = self.id().lower()
schema = {self.tname: self.cols}
self.pddb = PandasDatabase(test_name, dynamic_schema=False, auto_load=False,
auto_save=False, persistent=False, debug=False)
self.pddb.load(table_schemas=schema)
test_record_store = []
for i in range(10):
record = {c: '%s_%d' % (c, i) for c in self.cols}
record_new = self.pddb.upsert(self.tname, record=record, astype='dict')[0]
for c in self.cols:
record_new[c] = '%s_%d' % (c, -i)
self.pddb.upsert(self.tname, record=record_new, where={'__id__': record_new['__id__']})
test_record_store.append(record_new)
rows = self.pddb.find(self.tname, astype='dict')
self.assertEqual(rows, test_record_store)
self.pddb.drop_all()
self.pddb = None
def test_create_table_with_dynamic_schema(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=True,
auto_save=False, persistent=False, debug=False)
for i, c in enumerate(self.cols):
record = {c: str(i)}
self.pddb.insert(self.tname, record)
PandasDatabase_cols = set(self.pddb._db[self.tname].columns)
cols_with_id = set(['__id__'] + [c for c in self.cols])
self.assertEqual(PandasDatabase_cols, cols_with_id)
self.pddb.drop_all()
self.pddb = None
def test_astype(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=True,
auto_save=False, persistent=False, debug=False)
for i in range(10):
self.pddb.insert(self.tname, {c: '%s_%d' % (c, i) for c in self.cols})
self.assertEqual(str(self.pddb.find(self.tname)),
str(self.pddb.find(self.tname, astype='dataframe')))
self.assertEqual(str(self.pddb.find(self.tname, astype=dict)),
str(self.pddb.find(self.tname, astype='dict')))
self.assertEqual(str(self.pddb.find(self.tname, astype=str)),
str(self.pddb.find(self.tname, astype='json')))
self.assertRaisesRegex(RuntimeError, '.*',
lambda: self.pddb.find_one(self.tname, astype='dataframe'))
self.pddb.drop_all()
self.pddb = None
def test_bytes_conversion(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=True,
auto_save=False, persistent=False, debug=True)
record = self.pddb.insert(self.tname, record={b'col_name_1': b'1'}, astype='dict')
self.assertTrue(all([type(x) == str for x in list(record.keys()) + list(record.values())]))
self.pddb.drop_all()
self.pddb = None
def test_unicode_conversion(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=True,
auto_save=False, persistent=False, debug=True)
record = self.pddb.insert(self.tname, record={u'col_name_1': u'1'}, astype='dict')
self.assertTrue(all([type(x) == str for x in list(record.keys()) + list(record.values())]))
self.pddb.drop_all()
self.pddb = None
def test_illegal_column_name(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, astype=list, auto_load=True,
auto_save=False, persistent=False, debug=False)
record = {'col*name': 'A_1'}
insert_function = lambda: self.pddb.insert(self.tname, record)
self.assertRaisesRegex(ValueError, 'Column names must match the following regex: ".+"',
insert_function)
self.pddb.drop_all()
self.pddb = None
def test_find_regex(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=True,
auto_save=False, persistent=False, debug=False)
for i in range(10):
self.pddb.insert(self.tname, record={'col_name': str(i)})
rows = self.pddb.find(self.tname, where={'col_name': re.compile(r'[1-5]')})
self.assertEqual(len(rows), 5)
self.pddb.drop_all()
self.pddb = None
def test_type_cast(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=True,
auto_save=False, auto_cast=True, persistent=False, debug=False)
dict_mix = {1: 'a', 'col_name': 1, 'b': 0.5}
dict_str = {str(k): str(v) for k,v in dict_mix.items()}
record = self.pddb.insert(self.tname, record=dict_mix, astype='dict')
self.assertTrue(all([type(x) == str for x in list(record.keys()) + list(record.values())]))
record.pop(PandasDatabase._id_colname)
self.assertEqual(record, dict_str)
self.pddb.drop_all()
self.pddb = None
def test_upsert_to_insert_with_where(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=True,
auto_save=False, persistent=False, debug=True)
record = self.pddb.upsert(self.tname, record={'col_name_1': '1'},
where={'col_name_2': '2'}, columns=['col_name_1', 'col_name_2'],
astype='dict')
self.assertEqual(record[0], {'col_name_1': '1', 'col_name_2': '2'})
self.pddb.drop_all()
self.pddb = None
def test_upsert_to_insert_with_conflict(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=True,
auto_save=False, persistent=False, debug=True)
newrower = lambda: {'col_name_2': '2'}
self.pddb.load(table_rowgens={self.tname: newrower})
insert_function = lambda: self.pddb.upsert(self.tname, record={'col_name_1': '1'},
where_not={'col_name_2': '2'})
self.assertRaisesRegex(ValueError, 'Cannot insert new record because default '
'values conflict with conditions provided: {.+}', insert_function)
self.pddb.drop_all()
self.pddb = None
def test_create_table_with_upper_case(self):
test_name = self.id().lower()
tname = self.tname.upper()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=False,
auto_save=False, persistent=False, debug=False)
newrower = lambda: {c: '%s_%d' % (c, 0) for c in self.cols}
self.pddb.load(table_rowgens={tname: newrower})
row = newrower()
record = self.pddb.insert(tname, columns=self.cols, astype='dict')
self.assertEqual(row, record)
self.pddb.drop_all()
self.pddb = None
def test_find_using_columns(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=False,
auto_save=False, persistent=False, debug=False)
self.pddb.load(table_names=self.tname)
self.pddb.insert(self.tname, {c: '%s_%d' % (c, i) for (i, c) in enumerate(self.cols)})
record_A = self.pddb.find_one(self.tname, columns=['A'])
record_B = self.pddb.find_one(self.tname, columns='B')
record_ABC = self.pddb.find(self.tname, columns=['A', 'B', 'C'], astype='dict')[0]
cols_A = sorted(list(record_A.keys()))
cols_B = sorted(list(record_B.keys()))
cols_ABC = sorted(list(record_ABC.keys()))
self.assertEqual(cols_A, ['A'])
self.assertEqual(cols_B, ['B'])
self.assertEqual(cols_ABC, ['A', 'B', 'C'])
self.pddb.drop_all()
self.pddb = None
def test_find_where_in(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=True,
auto_save=False, persistent=False, debug=False)
record_store = []
for i in range(10):
record = {c: '%s_%d' % (c, i) for c in self.cols}
self.pddb.insert(self.tname, record)
record_store.append(record)
search_for = {self.cols[0]: ['%s_%d' % (self.cols[0], i) for i in range(5)]}
results = self.pddb.find(self.tname, where=search_for, columns=self.cols, astype='dict')
self.assertEqual(record_store[:5], results)
self.pddb.drop_all()
self.pddb = None
def test_single_delete_record(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=False,
auto_save=False, persistent=False, debug=False)
self.pddb.load(table_names=self.tname)
for i in range(10):
self.pddb.insert(self.tname, {c: '%s_%d' % (c, i) for c in self.cols})
c = self.cols[0]
firstrecord = {c: '%s_%d' % (c, 0)}
delrows = self.pddb.delete(self.tname, where=firstrecord)
allrows = self.pddb.find(self.tname)
allvalA = allrows['A'].values
self.assertEqual(1, len(delrows))
self.assertEqual(9, len(allrows))
self.assertFalse('A_0' in allvalA)
self.pddb.drop_all()
self.pddb = None
def test_single_rowgen(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=False,
auto_save=False, persistent=False, debug=False)
newrower = lambda: {c: '%s_%d' % (c, 0) for c in self.cols}
self.pddb.load(table_rowgens={self.tname: newrower})
row = newrower()
record = self.pddb.insert(self.tname, columns=self.cols, astype='dict')
self.assertEqual(row, record)
self.pddb.drop_all()
def test_fixed_schema_fail_column(self):
test_name = self.id().lower()
schema = {self.tname: self.cols}
self.pddb = PandasDatabase(test_name, dynamic_schema=False, auto_load=False,
auto_save=False, persistent=False, debug=False)
self.pddb.load(table_schemas=schema)
insert_function = lambda: self.pddb.insert(self.tname, record={'D': '0'})
self.assertRaisesRegex(ValueError, 'Column "D" does not exist in schema for table "%s"' %
self.tname, insert_function)
self.pddb.drop_all()
self.pddb = None
def test_drop_table(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=True,
auto_save=False, persistent=False, debug=False)
all_tables = []
for i in range(10):
tname = '%s_%d' % (self.tname, i)
all_tables.append(tname)
self.pddb.insert(tname, record={c: '%s_%d' % (c, i) for c in self.cols})
tnames = self.pddb.get_table_names()
self.assertEqual(sorted(tnames), sorted(all_tables))
for i in range(5):
tname = '%s_%d' % (self.tname, i)
all_tables.remove(tname)
self.pddb.drop(tname)
tnames = self.pddb.get_table_names()
self.assertEqual(sorted(tnames), sorted(all_tables))
self.pddb.drop(all_tables)
self.assertEqual(len(self.pddb.get_table_names()), 0)
self.pddb.drop_all()
self.pddb = None
def test_save_then_drop_all(self):
test_name = self.id().lower()
schema = {self.tname: self.cols}
self.pddb = PandasDatabase(test_name, dynamic_schema=False, auto_load=False,
auto_save=False, persistent=True, debug=False)
self.pddb.load(table_schemas=schema)
self.pddb.insert(self.tname, {c: '%s_%d' % (c, i) for (i, c) in enumerate(self.cols)})
self.pddb.save()
header_expected = ','.join(self.cols) + ',%s\n' % PandasDatabase._id_colname
record_expected_regex = header_expected + \
','.join(['%s_%d' % (c, i) for (i, c) in enumerate(self.cols)]) + ',.+?\n'
with open(os.path.join(self.pddb.root_dir, test_name, self.tname + '.csv'), 'r') as f:
record_csv = f.read()
self.assertTrue(os.path.exists(test_name))
self.pddb.drop_all()
self.assertRegex(record_csv, record_expected_regex)
self.assertFalse(os.path.exists(test_name))
self.pddb.drop_all()
self.pddb = None
def test_defer_save_queue_max(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, astype='dict', auto_load=False,
auto_save=True, deferred_save=True, persistent=True,
debug=False)
self.pddb.load(table_names=self.tname)
i = 0
for i in range(self.pddb.save_queue_max):
self.pddb.insert(self.tname, {c: '%s_%d' % (c, i) for c in self.cols})
self.assertFalse(os.path.exists(os.path.join(test_name.lower(), self.tname + '.csv')))
self.pddb.insert(self.tname, {c: '%s_%d' % (c, i) for c in self.cols})
time.sleep(1) # Give an extra second to actually save the file to disk
self.assertTrue(os.path.exists(os.path.join(test_name.lower(), self.tname + '.csv')))
self.pddb.drop_all()
def test_defer_save_wait(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, astype='dict', auto_load=False,
auto_save=True, deferred_save=True, persistent=True,
debug=False)
self.pddb.load(table_names=self.tname)
for i in range(self.pddb.save_queue_max):
self.pddb.insert(self.tname, {c: '%s_%d' % (c, i) for c in self.cols})
self.assertFalse(os.path.exists(os.path.join(test_name.lower(), self.tname + '.csv')))
time.sleep(self.pddb.save_wait + 1) # Give an extra second to actually save the file to disk
self.assertTrue(os.path.exists(os.path.join(test_name.lower(), self.tname + '.csv')))
self.pddb.drop_all()
self.pddb = None
'''
def test_join(self):
test_name = self.id().lower()
self.pddb = PandasDatabase(test_name, dynamic_schema=True, auto_load=True,
auto_save=False, persistent=False, debug=True)
employees = [{'id': 1, 'name': 'john', 'salary': 1, 'dept_id': 1},
{'id': 2, 'name': 'mary', 'salary': 7, 'dept_id': 1},
{'id': 3, 'name': 'alex', 'salary': 3, 'dept_id': 2}]
departments = [{'id': 1, 'name': 'accounting'},
{'id': 2, 'name': 'marketing'},
{'id': 3, 'name': 'human resources'}]
for emp in employees:
self.pddb.insert('employees', record=emp)
for dept in departments:
self.pddb.insert('departments', record=dept)
on_function = lambda emp, dept: emp['dept_id'] == dept['id']
merged = self.pddb.join('employees', 'departments', how='inner', on=on_function)
record_from_insert = self.pddb.insert(self.tname, record, columns='col_name', astype='dict')
record_from_findone = self.pddb.find_one(self.tname, where=record, columns='col_name', astype='dict')
json_from_record = json.dumps(record)
json_from_findone = json.dumps(record_from_findone)
json_from_insert = json.dumps(record_from_insert)
self.assertEqual(json_from_record, json_from_insert)
self.assertEqual(json_from_record, json_from_findone)
self.pddb.drop_all()
self.pddb = None
'''
if __name__ == '__main__':
sys.exit(unittest2.main())
|
|
"""passlib.handlers.misc - misc generic handlers
"""
#=============================================================================
# imports
#=============================================================================
# core
import sys
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from passlib.utils import to_native_str, str_consteq
from passlib.utils.compat import unicode, u, unicode_or_bytes_types
import passlib.utils.handlers as uh
# local
__all__ = [
"unix_disabled",
"unix_fallback",
"plaintext",
]
#=============================================================================
# handler
#=============================================================================
class unix_fallback(uh.ifc.DisabledHash, uh.StaticHandler):
"""This class provides the fallback behavior for unix shadow files, and follows the :ref:`password-hash-api`.
This class does not implement a hash, but instead provides fallback
behavior as found in /etc/shadow on most unix variants.
If used, should be the last scheme in the context.
* this class will positively identify all hash strings.
* for security, passwords will always hash to ``!``.
* it rejects all passwords if the hash is NOT an empty string (``!`` or ``*`` are frequently used).
* by default it rejects all passwords if the hash is an empty string,
but if ``enable_wildcard=True`` is passed to verify(),
all passwords will be allowed through if the hash is an empty string.
.. deprecated:: 1.6
This has been deprecated due to its "wildcard" feature,
and will be removed in Passlib 1.8. Use :class:`unix_disabled` instead.
"""
name = "unix_fallback"
context_kwds = ("enable_wildcard",)
@classmethod
def identify(cls, hash):
if isinstance(hash, unicode_or_bytes_types):
return True
else:
raise uh.exc.ExpectedStringError(hash, "hash")
def __init__(self, enable_wildcard=False, **kwds):
warn("'unix_fallback' is deprecated, "
"and will be removed in Passlib 1.8; "
"please use 'unix_disabled' instead.",
DeprecationWarning)
super(unix_fallback, self).__init__(**kwds)
self.enable_wildcard = enable_wildcard
def _calc_checksum(self, secret):
if self.checksum:
# NOTE: hash will generally be "!", but we want to preserve
# it in case it's something else, like "*".
return self.checksum
else:
return u("!")
@classmethod
def verify(cls, secret, hash, enable_wildcard=False):
uh.validate_secret(secret)
if not isinstance(hash, unicode_or_bytes_types):
raise uh.exc.ExpectedStringError(hash, "hash")
elif hash:
return False
else:
return enable_wildcard
_MARKER_CHARS = u("*!")
_MARKER_BYTES = b"*!"
class unix_disabled(uh.ifc.DisabledHash, uh.MinimalHandler):
"""This class provides disabled password behavior for unix shadow files,
and follows the :ref:`password-hash-api`.
This class does not implement a hash, but instead matches the "disabled account"
strings found in ``/etc/shadow`` on most Unix variants. "encrypting" a password
will simply return the disabled account marker. It will reject all passwords,
no matter the hash string. The :meth:`~passlib.ifc.PasswordHash.hash`
method supports one optional keyword:
:type marker: str
:param marker:
Optional marker string which overrides the platform default
used to indicate a disabled account.
If not specified, this will default to ``"*"`` on BSD systems,
and use the Linux default ``"!"`` for all other platforms.
(:attr:`!unix_disabled.default_marker` will contain the default value)
.. versionadded:: 1.6
This class was added as a replacement for the now-deprecated
:class:`unix_fallback` class, which had some undesirable features.
"""
name = "unix_disabled"
setting_kwds = ("marker",)
context_kwds = ()
_disable_prefixes = tuple(str(_MARKER_CHARS))
# TODO: rename attr to 'marker'...
if 'bsd' in sys.platform: # pragma: no cover -- runtime detection
default_marker = u("*")
else:
# use the linux default for other systems
# (glibc also supports adding old hash after the marker
# so it can be restored later).
default_marker = u("!")
@classmethod
def using(cls, marker=None, **kwds):
subcls = super(unix_disabled, cls).using(**kwds)
if marker is not None:
if not cls.identify(marker):
raise ValueError("invalid marker: %r" % marker)
subcls.default_marker = marker
return subcls
@classmethod
def identify(cls, hash):
# NOTE: technically, anything in the /etc/shadow password field
# which isn't valid crypt() output counts as "disabled".
# but that's rather ambiguous, and it's hard to predict what
# valid output is for unknown crypt() implementations.
# so to be on the safe side, we only match things *known*
# to be disabled field indicators, and will add others
# as they are found. things beginning w/ "$" should *never* match.
#
# things currently matched:
# * linux uses "!"
# * bsd uses "*"
# * linux may use "!" + hash to disable but preserve original hash
# * linux counts empty string as "any password";
# this code recognizes it, but treats it the same as "!"
if isinstance(hash, unicode):
start = _MARKER_CHARS
elif isinstance(hash, bytes):
start = _MARKER_BYTES
else:
raise uh.exc.ExpectedStringError(hash, "hash")
return not hash or hash[0] in start
@classmethod
def verify(cls, secret, hash):
uh.validate_secret(secret)
if not cls.identify(hash): # handles typecheck
raise uh.exc.InvalidHashError(cls)
return False
@classmethod
def hash(cls, secret, **kwds):
if kwds:
uh.warn_hash_settings_deprecation(cls, kwds)
return cls.using(**kwds).hash(secret)
uh.validate_secret(secret)
marker = cls.default_marker
assert marker and cls.identify(marker)
return to_native_str(marker, param="marker")
@uh.deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genhash(cls, secret, config, marker=None):
if not cls.identify(config):
raise uh.exc.InvalidHashError(cls)
elif config:
# preserve the existing str,since it might contain a disabled password hash ("!" + hash)
uh.validate_secret(secret)
return to_native_str(config, param="config")
else:
if marker is not None:
cls = cls.using(marker=marker)
return cls.hash(secret)
@classmethod
def disable(cls, hash=None):
out = cls.hash("")
if hash is not None:
hash = to_native_str(hash, param="hash")
if cls.identify(hash):
# extract original hash, so that we normalize marker
hash = cls.enable(hash)
if hash:
out += hash
return out
@classmethod
def enable(cls, hash):
hash = to_native_str(hash, param="hash")
for prefix in cls._disable_prefixes:
if hash.startswith(prefix):
orig = hash[len(prefix):]
if orig:
return orig
else:
raise ValueError("cannot restore original hash")
raise uh.exc.InvalidHashError(cls)
class plaintext(uh.MinimalHandler):
"""This class stores passwords in plaintext, and follows the :ref:`password-hash-api`.
The :meth:`~passlib.ifc.PasswordHash.hash`, :meth:`~passlib.ifc.PasswordHash.genhash`, and :meth:`~passlib.ifc.PasswordHash.verify` methods all require the
following additional contextual keyword:
:type encoding: str
:param encoding:
This controls the character encoding to use (defaults to ``utf-8``).
This encoding will be used to encode :class:`!unicode` passwords
under Python 2, and decode :class:`!bytes` hashes under Python 3.
.. versionchanged:: 1.6
The ``encoding`` keyword was added.
"""
# NOTE: this is subclassed by ldap_plaintext
name = "plaintext"
setting_kwds = ()
context_kwds = ("encoding",)
default_encoding = "utf-8"
@classmethod
def identify(cls, hash):
if isinstance(hash, unicode_or_bytes_types):
return True
else:
raise uh.exc.ExpectedStringError(hash, "hash")
@classmethod
def hash(cls, secret, encoding=None):
uh.validate_secret(secret)
if not encoding:
encoding = cls.default_encoding
return to_native_str(secret, encoding, "secret")
@classmethod
def verify(cls, secret, hash, encoding=None):
if not encoding:
encoding = cls.default_encoding
hash = to_native_str(hash, encoding, "hash")
if not cls.identify(hash):
raise uh.exc.InvalidHashError(cls)
return str_consteq(cls.hash(secret, encoding), hash)
@uh.deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genconfig(cls):
return cls.hash("")
@uh.deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genhash(cls, secret, config, encoding=None):
# NOTE: 'config' is ignored, as this hash has no salting / etc
if not cls.identify(config):
raise uh.exc.InvalidHashError(cls)
return cls.hash(secret, encoding=encoding)
#=============================================================================
# eof
#=============================================================================
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.stack_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class StackOpTest(test.TestCase):
def _testStackPushPop(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose([[4.0, 5.0]], c1.eval())
def testStackPushPop(self):
self._testStackPushPop(use_gpu=False)
self._testStackPushPop(use_gpu=True)
def _testStackPushPopSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
a = np.arange(2000)
x = constant_op.constant(a, dtype=dtypes.float32)
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, x, swap_memory=True)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose(a, c1.eval())
def testStackPushPopSwap(self):
self._testStackPushPopSwap(use_gpu=False)
self._testStackPushPopSwap(use_gpu=True)
def _testStackWhileSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(0)
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
def c(x):
return math_ops.less(x, 10)
def b(x):
with ops.control_dependencies([x]):
a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
v = gen_data_flow_ops.stack_push_v2(h, a, swap_memory=True)
with ops.control_dependencies([v]):
return math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n])
v = constant_op.constant(np.zeros(2000), dtype=dtypes.float32)
def c1(x, y):
del y
return math_ops.greater(x, 0)
def b1(x, y):
nx = math_ops.subtract(x, 1)
ny = y + gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
return [nx, ny]
_, ry = control_flow_ops.while_loop(
c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()])
self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
def testStackWhileSwap(self):
self._testStackWhileSwap(use_gpu=False)
self._testStackWhileSwap(use_gpu=True)
def _testMultiStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push_v2(h1, 4.0)
with ops.control_dependencies([c1]):
c1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
h2 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="bar")
c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
with ops.control_dependencies([c2]):
c2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
r = c1 + c2
self.assertAllClose(9.0, r.eval())
def testMultiStack(self):
self._testMultiStack(use_gpu=False)
self._testMultiStack(use_gpu=True)
def _testSameNameStacks(self, use_gpu):
"""Different stacks with the same name do not interfere."""
with self.test_session(use_gpu=use_gpu) as sess:
h1 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
h2 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push_v2(h1, 4.0)
with ops.control_dependencies([c1]):
c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
with ops.control_dependencies([c2]):
pop1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
pop2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
out1, out2 = sess.run([pop1, pop2])
self.assertAllClose(out1, 4.0)
self.assertAllClose(out2, 5.0)
def testSameNameStacks(self):
self._testSameNameStacks(use_gpu=False)
self._testSameNameStacks(use_gpu=True)
def _testCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_close_v2(h)
sess.run(c1)
def testCloseStack(self):
self._testCloseStack(use_gpu=False)
self._testCloseStack(use_gpu=True)
def _testPushCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_close_v2(h)
sess.run(c1)
def testPushCloseStack(self):
self._testPushCloseStack(use_gpu=False)
self._testPushCloseStack(use_gpu=True)
class StackOpRefTest(test.TestCase):
"""Tests for deprecated non-resource variant of stack ops."""
def _testStackPushPop(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop(h, dtypes.float32)
self.assertAllClose([[4.0, 5.0]], c1.eval())
def testStackPushPop(self):
self._testStackPushPop(use_gpu=False)
self._testStackPushPop(use_gpu=True)
def _testStackPushPopSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
a = np.arange(2000)
x = constant_op.constant(a, dtype=dtypes.float32)
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push(h, x, swap_memory=True)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop(h, dtypes.float32)
self.assertAllClose(a, c1.eval())
def testStackPushPopSwap(self):
self._testStackPushPopSwap(use_gpu=False)
self._testStackPushPopSwap(use_gpu=True)
def _testMultiStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push(h1, 4.0)
with ops.control_dependencies([c1]):
c1 = gen_data_flow_ops.stack_pop(h1, dtypes.float32)
h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="bar")
c2 = gen_data_flow_ops.stack_push(h2, 5.0)
with ops.control_dependencies([c2]):
c2 = gen_data_flow_ops.stack_pop(h2, dtypes.float32)
r = c1 + c2
self.assertAllClose(9.0, r.eval())
def _testStackWhileSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(0)
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
def c(x):
return math_ops.less(x, 10)
def b(x):
with ops.control_dependencies([x]):
a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
v = gen_data_flow_ops.stack_push(h, a, swap_memory=True)
with ops.control_dependencies([v]):
return math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n])
v = constant_op.constant(np.zeros(2000), dtype=dtypes.float32)
def c1(x, y):
del y
return math_ops.greater(x, 0)
def b1(x, y):
nx = math_ops.subtract(x, 1)
ny = y + gen_data_flow_ops.stack_pop(h, dtypes.float32)
return [nx, ny]
_, ry = control_flow_ops.while_loop(
c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()])
self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
def testStackWhileSwap(self):
self._testStackWhileSwap(use_gpu=False)
self._testStackWhileSwap(use_gpu=True)
def testMultiStack(self):
self._testMultiStack(use_gpu=False)
self._testMultiStack(use_gpu=True)
def _testSameNameStacks(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push(h1, 4.0)
h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c2 = gen_data_flow_ops.stack_push(h2, 5.0)
_ = c1 + c2
self.assertNotEqual(h1.eval()[1], h2.eval()[1])
def testSameNameStacks(self):
self._testSameNameStacks(use_gpu=False)
self._testSameNameStacks(use_gpu=True)
def _testCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_close(h)
sess.run(c1)
def testCloseStack(self):
self._testCloseStack(use_gpu=False)
self._testCloseStack(use_gpu=True)
def _testPushCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_close(h)
sess.run(c1)
def testPushCloseStack(self):
self._testPushCloseStack(use_gpu=False)
self._testPushCloseStack(use_gpu=True)
if __name__ == "__main__":
test.main()
|
|
#
# Proximate - Peer-to-peer social networking
#
# Copyright (c) 2008-2011 Nokia Corporation
#
# All rights reserved.
#
# This software is licensed under The Clear BSD license.
# See the LICENSE file for more details.
#
from gobject import io_add_watch, timeout_add, source_remove, IO_IN, IO_OUT, \
PRIORITY_LOW
from os import SEEK_END
from socket import AF_INET, SOCK_DGRAM, SOCK_STREAM, SOL_SOCKET, \
SO_BROADCAST, SO_ERROR, SO_REUSEADDR, socket, error, herror, gaierror, \
inet_ntoa, inet_aton
from errno import EAGAIN, EINPROGRESS, EINTR, EADDRNOTAVAIL
import fcntl
import struct
import os
from support import debug, die, warning
from proximateprotocol import TP_MAX_TRANSFER, TP_MAX_RECORD_SIZE
from utils import str_to_int
from plugins import get_plugin_by_type
TCPQ_NO_CONNECTION = 0
TCPQ_OK = 1
TCPQ_EOF = 2
TCPQ_CONNECTION_TIMEOUT = 3
TCPQ_CONNECTION_REFUSED = 4
TCPQ_UNKNOWN_HOST = 5
TCPQ_TIMEOUT = 6
TCPQ_PROTOCOL_VIOLATION = 7
TCPQ_CONNECTING = 8
TCPQ_ERROR = 9
def bind_socket(sock, address, port):
try:
sock.bind((address, port))
except error, (errno, strerror):
return False
except herror, (errno, strerror):
return False
except gaierror, (errno, strerror):
return False
return True
def connect_socket(sock, name, port):
while True:
try:
sock.connect((name, port))
except error, (errno, strerror):
# 1. Connection now in progress (EINPROGRESS)
# 2. Connection refused (ECONNREFUSED)
debug('connect_socket: %s %d: %s\n' %(name, port, strerror))
if errno == EINTR:
continue
return errno == EINPROGRESS
except gaierror, (errno, strerror):
# Unknown host name
debug('connect_socket: %s %d: %s\n' %(name, port, strerror))
return False
break
return True
def create_tcp_socket(address, port, reuse = False):
""" If port != 0, create and bind listening socket on the given
port and address. Otherwise create a socket that can be connected.
Returns the socket when successful, otherwise None."""
try:
sock = socket(AF_INET, SOCK_STREAM)
except error, (errno, strerror):
debug('ioutils error (%s): %s\n' %(errno, strerror))
return None
if reuse:
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
if port != 0:
if not bind_socket(sock, address, port):
debug('ioutils: Can not bind\n')
return None
sock.listen(5)
return sock
def create_tcp_listener(port, accepthandler, reuse = False):
rfd = create_tcp_socket('', port, reuse = reuse)
if rfd == None:
return (None, None)
rfd.setblocking(False)
tag = io_add_watch(rfd, IO_IN, accepthandler)
return (rfd, tag)
def filesize(path):
try:
size = os.path.getsize(path)
except OSError:
return 0
return size
def get_flen(f):
error = None
try:
f.seek(0, SEEK_END)
flen = f.tell()
f.seek(0)
except IOError, (errno, strerror):
# Some files are not seekable
flen = 0
error = 'Can not seek file %s: %s' %(fname, strerror)
return (flen, error)
def get_ip_address(ifname):
fail = (None, None)
no_conn = ('', None)
try:
sock = socket(AF_INET, SOCK_STREAM)
except error, (errno, strerror):
debug('ioutils error (%s): %s\n' %(errno, strerror))
return fail
try:
ip = fcntl.ioctl(sock.fileno(), 0x8915, struct.pack('256s', ifname[:15]))[20:24]
bcast = fcntl.ioctl(sock.fileno(), 0x8919, struct.pack('256s', ifname[:15]))[20:24]
except IOError, (errno, strerror):
if errno == EADDRNOTAVAIL:
return no_conn
return fail
sock.close()
return (inet_ntoa(ip), inet_ntoa(bcast))
def create_udp_socket(address, port, bcast, reuse = False):
""" If port != 0, create and bind listening socket on port
and address. If bcast == True, create a broadcast socket.
Returns the socket when successful, otherwise None."""
if port != 0 and bcast:
debug('create_udp_socket: both port != 0 and bcast == True may not be true\n')
return None
try:
sock = socket(AF_INET, SOCK_DGRAM)
except error, (errno, strerror):
debug('ioutils error (%s): %s\n' %(errno, strerror))
return None
if reuse:
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
if port != 0:
if not bind_socket(sock, address, port):
debug('ioutils: Can not bind\n')
return None
elif bcast:
try:
sock.setsockopt(SOL_SOCKET, SO_BROADCAST, True)
except error, (errno, strerror):
debug('ioutils error (%s): %s\n' %(errno, strerror))
return None
return sock
def send_broadcast(address, bcast_port, msg):
""" Send an UDP broadcast message. """
bcast_sock = create_udp_socket('', 0, True)
if bcast_sock == None:
warning('Can not create UDP socket: unable to broadcast\n')
return 0
bcast_sock.setblocking(False)
try:
bytes_sent = bcast_sock.sendto(msg, (address, bcast_port))
except error, (errno, strerror):
warning('Error sending broadcast to (%s, %d): %s\n' %(address, bcast_port, strerror))
return errno
if bytes_sent != len(msg):
warning('Error sending broadcast: only %d bytes sent\n' %(bytes_sent))
return EAGAIN
return 0
def valid_ip(ip):
try:
a = inet_aton(ip)
except error, (errno, strerror):
return False
return True
class TCP_Queue:
""" TCP_Queue is a bidirectional messaging class for TCP sockets.
Messages are read as sequential records from the pipe. Messages
can also be written to the pipe. Messages are not passed to handler
until whole record is received.
"""
initialized = False
def __init__(self, handler, parameter = None, closehandler = None):
self.handler = handler
self.parameter = parameter
self.closehandler = closehandler
self.status = TCPQ_NO_CONNECTION
self.sock = None
self.rtag = None
self.wtag = None
self.timeouttag = None
self.connecttimeouttag = None
self.throttled = False
self.timeinterval = None
self.maxsize = TP_MAX_RECORD_SIZE
self.wsize = TP_MAX_TRANSFER
self.bytestransferred = 0
self.timeoutbytes = 1
self.inb = ''
self.outb = ''
self.msglen = None
self.recv_handler = None
self.send_handler = None
self.closeaftersend = None
def connect(self, address, timeout = None):
assert(self.status != TCPQ_OK)
self.sock = create_tcp_socket('', 0)
if self.sock == None:
return False
self.sock.setblocking(False)
if not connect_socket(self.sock, address[0], address[1]):
self.close(TCPQ_UNKNOWN_HOST, 'Unknown host')
return False
self.status = TCPQ_CONNECTING
self.wtag = io_add_watch(self.sock, IO_OUT, self.check_connect)
if timeout != None:
self.connecttimeouttag = timeout_add(timeout * 1000, self.no_connection)
return True
def no_connection(self):
self.close(TCPQ_CONNECTION_TIMEOUT, 'Connection timeout')
return False
def check_connect(self, fd, condition):
if self.sock.getsockopt(SOL_SOCKET, SO_ERROR) != 0:
self.close(TCPQ_CONNECTION_REFUSED, 'Connection refused')
else:
# Stop waiting for write, which was meant for connect()
self.writemode(False)
# Remove connection timeout
self.remove_connection_timeout()
self.initialize(self.sock)
return False
def initialize(self, sock):
""" Called when connection is estabilished. """
assert(self.status != TCPQ_OK)
self.sock = sock
self.bytestransferred = 0
self.status = TCPQ_OK
if not self.throttled:
self.readmode()
if len(self.inb) > 0:
self.process()
# Enable write mode if required
if len(self.outb) > 0 or self.send_handler != None:
self.writemode()
def append_input(self, data):
""" This function can be used to insert arbitrary data to the
input queue. It is useful when the socket has already been read before
creating TCP_Queue object. The data that was read can be partially
or fully passed for TCP_Queue to be processed. This can be used
to skip protocol headers before processing the connection as
TCP_Queue messages. """
if self.status == TCPQ_OK:
die('TCP_Queue: you may not insert data with append_input() after it has been initialized!\n')
self.inb += data
def set_close_handler(self, f):
self.closehandler = f
def set_timeout(self, timeinterval, timeoutbytes = 1):
""" Set or remove timeout. Timeout is removed if timeinterval == None.
Otherwise the timeout in seconds is 'timeinterval'. During this
interval at least 'timeoutbytes' must be transferred in total to
either direction. By default 'timeoutbytes' is 1, which means that if
any IO takes place within 'timeinterval' seconds, the connection does
not timeout. """
self.remove_data_timeout()
if timeinterval == None:
return
assert(timeinterval > 0 and timeoutbytes > 0)
self.timeoutbytes = timeoutbytes
self.timeouttag = timeout_add(timeinterval * 1000, self.timeout)
def set_max_message_size(self, maxsize):
assert(maxsize == None or maxsize > 0)
self.maxsize = maxsize
def set_wsize(self, wsize):
assert(wsize > 0)
self.wsize = wsize
def set_send_handler(self, handler = None):
""" Start streaming mode. Given handler is called when output queue
is to be filled. Handler can report error by calling close() and
returning None.
Setting None ends streaming mode. """
self.send_handler = handler
# If we do not currently in send mode, make sure we start writing to
# the socket by enabling the write watch.
# NOTE: The write watch is disabled inside the write handler when
# it is no longer needed.
if handler != None:
self.writemode()
def set_recv_handler(self, handler = None):
""" Set handler for incoming data. Can be used for receiving streaming
data. Handler should return number of bytes it consumed.
Handler can report error by calling close() and returning None.
Setting handler to None will return to normal operation. """
self.recv_handler = handler
def throttle(self, enabled = True):
""" Start or stop throttling received data. """
self.throttled = enabled
# If we are leaving the throttled mode, make sure the read watch
# is installed and we are reading data from the socket.
if not enabled:
self.readmode()
def readmode(self, readenable=True):
""" Enable or disable read event handler. Should not be called from
the outside of TCP_Queue """
if readenable:
if self.rtag == None:
self.rtag = io_add_watch(self.sock, IO_IN, self.socket_read, priority=PRIORITY_LOW)
elif self.rtag != None:
source_remove(self.rtag)
self.rtag = None
def writemode(self, writeenable=True):
""" Enable or disable write event handler. Should not be called from
the outside of TCP_Queue """
if writeenable:
if self.wtag == None:
self.wtag = io_add_watch(self.sock, IO_OUT, self.socket_write, priority=PRIORITY_LOW)
elif self.wtag != None:
source_remove(self.wtag)
self.wtag = None
def remove_connection_timeout(self):
if self.connecttimeouttag != None:
source_remove(self.connecttimeouttag)
self.connecttimeouttag = None
def remove_data_timeout(self):
if self.timeouttag != None:
source_remove(self.timeouttag)
self.timeouttag = None
def remove_io_notifications(self):
self.readmode(False)
self.writemode(False)
self.remove_data_timeout()
self.remove_connection_timeout()
def close(self, status = TCPQ_EOF, msg = ''):
debug('TCP_Queue closed: status %d (%s)\n' %(status, msg))
self.inb = ''
self.outb = ''
self.send_handler = None
self.recv_handler = None
self.throttled = False
self.status = status
self.remove_io_notifications()
if self.sock != None:
self.sock.close()
self.sock = None
if self.closehandler != None:
self.closehandler(self, self.parameter, msg)
def close_after_send(self, msg=''):
self.closeaftersend = msg
self.writemode()
def timeout(self):
if self.bytestransferred < self.timeoutbytes:
self.close(TCPQ_TIMEOUT, 'Idle timeout')
return False
self.bytestransferred = 0
return True
def get_one_msg(self):
""" Messages come in formatted as: LENGTH1 + MSG1 + LENGTH2 + MSG2 + ..
where LENGTHx is a bencoded unsigned integer, e.g. 'i65e' == 65 bytes.
The actual message comes directly after length. The message is
arbitrary binary data.
self.msglen is used to store the length of a message that is currently
being read.
The function has two states:
1. self.msglen == None
* read LENGTH-X
2. self.msglen >= 0
* read MSG-X
"""
nothing = (True, None)
error = (False, None)
if self.msglen == None:
# Read bencoded unsigned integer. We don't actually need to check
# the initial prefix character 'i'.
i = self.inb.find('e')
if i < 0:
if len(self.inb) < 10:
return nothing
# Too long a header without a terminator, kill connection
return error
try:
x = int(self.inb[1:i])
except ValueError:
x = -1
if x < 0 or (self.maxsize != None and x > self.maxsize):
return error
self.msglen = x
self.inb = self.inb[(i + 1):]
if self.msglen == None or len(self.inb) < self.msglen:
return nothing
# We got the full payload
# Remove the payload from the beginning of self.inb and return it
msg = self.inb[0:self.msglen]
self.inb = self.inb[self.msglen:]
# Next time: read a message length (don't come here)
self.msglen = None
return (True, msg)
def process(self):
""" Process all complete and valid messages in self.inb """
assert(self.status == TCPQ_OK)
success = True
status = TCPQ_PROTOCOL_VIOLATION
# Note: throttling can be enabled inside handler
while success and not self.throttled:
# In streaming mode, give all received data to handler
if self.recv_handler != None:
consumed = self.recv_handler(self.inb)
if consumed == None:
return False
self.inb = self.inb[consumed:]
if len(self.inb) == 0:
break
else:
(success, msg) = self.get_one_msg()
if msg == None:
break
success = self.handler(self, msg, self.parameter)
if not success:
self.close(status)
return success
def socket_read(self, fd, condition):
""" Received data is now available on the socket. """
assert(self.status == TCPQ_OK)
try:
chunk = self.sock.recv(TP_MAX_TRANSFER)
except error, (errno, strerror):
warning('TCP_Queue read error %d: %s\n' %(errno, strerror))
ret = (errno == EAGAIN or errno == EINTR)
if not ret:
self.close(TCPQ_ERROR, msg = strerror)
return ret
if len(chunk) == 0:
self.close(TCPQ_EOF)
return False
self.bytestransferred += len(chunk)
self.inb += chunk
if not self.process():
return False
# Do we continue in read mode?
ret = not self.throttled
if not ret:
self.readmode(False)
return ret
def write_buffer(self):
""" Write from the send queue to the socket. Maximum amount of written
data is the window size. """
chunk = self.outb[0:self.wsize]
try:
bytes = self.sock.send(chunk)
except error, (errno, strerror):
warning('TCP_Queue send error %d: %s\n' %(errno, strerror))
ret = (errno == EAGAIN or errno == EINTR)
if not ret:
self.close(TCPQ_ERROR, msg = strerror)
return ret
# Succefully sent data. Remove from the beginning of self.outb
self.bytestransferred += bytes
self.outb = self.outb[bytes:]
return True
def socket_write(self, fd, condition):
""" The socket can be now written to. """
assert(self.status == TCPQ_OK)
# If we are sending stream, fill send queue
if self.send_handler != None and len(self.outb) < self.wsize:
chunk = self.send_handler()
if chunk == None:
return False
self.outb += chunk
if not self.write_buffer():
return False
if not self.throttled and len(self.inb) > 0:
# We have possibly have come back from throttled mode, process
# buffered data
if not self.process():
return False
# Do we continue in write mode?
ret = len(self.outb) > 0 or self.send_handler != None
if not ret:
if self.closeaftersend != None:
self.close(msg=self.closeaftersend)
else:
self.writemode(False)
return ret
def write(self, msg, writelength=True):
""" Write a message to the queue. msg is an arbitrary binary blob
to be sent.
Normally, all messages are prefixed with a message length. When one
sends a message "foo", it is prefixed with bencoded number 3 indicating
the length of "foo". However, if writelength == False, the length is
not sent. This can be used for hacks, for example, to prefix a new
connection with a magic value.
Note, msg with zero length is actually sent. The other party will
receive an empty string.
"""
if writelength:
self.outb += 'i%de' % len(msg)
self.outb += msg
# If we do not currently in send mode, make sure we start writing to
# the socket by enabling the write watch.
# NOTE: The write watch is disabled inside the write handler when
# it is no longer needed.
if self.status == TCPQ_OK:
self.writemode()
|
|
from __future__ import unicode_literals
import threading
import time
from multiple_database.routers import TestRouter
from django.db import DatabaseError, connection, router, transaction
from django.test import (
TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from .models import Person
# We need to set settings.DEBUG to True so we can capture the output SQL
# to examine.
@override_settings(DEBUG=True)
class SelectForUpdateTests(TransactionTestCase):
available_apps = ['select_for_update']
def setUp(self):
# This is executed in autocommit mode so that code in
# run_select_for_update can see this data.
self.person = Person.objects.create(name='Reinhardt')
# We need another database connection in transaction to test that one
# connection issuing a SELECT ... FOR UPDATE will block.
self.new_connection = connection.copy()
def tearDown(self):
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
self.new_connection.close()
def start_blocking_transaction(self):
self.new_connection.set_autocommit(False)
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.new_connection.rollback()
self.new_connection.set_autocommit(True)
def has_for_update_sql(self, tested_connection, nowait=False):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = tested_connection.ops.for_update_sql(nowait)
sql = tested_connection.queries[-1]['sql']
return bool(sql.find(for_update_sql) > -1)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
Test that the backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic():
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(connection))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
Test that the backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic():
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(connection, nowait=True))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_nowait_raises_error(self):
"""
If a SELECT...FOR UPDATE NOWAIT is run on a database backend
that supports FOR UPDATE but not NOWAIT, then we should find
that a DatabaseError is raised.
"""
self.assertRaises(
DatabaseError,
list,
Person.objects.all().select_for_update(nowait=True)
)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction(self):
"""
Test that a TransactionManagementError is raised
when a select_for_update query is executed outside of a transaction.
"""
with self.assertRaises(transaction.TransactionManagementError):
list(Person.objects.all().select_for_update())
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction_only_in_execution(self):
"""
Test that no TransactionManagementError is raised
when select_for_update is invoked outside of a transaction -
only when the query is executed.
"""
people = Person.objects.all().select_for_update()
with self.assertRaises(transaction.TransactionManagementError):
list(people)
def run_select_for_update(self, status, nowait=False):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
with transaction.atomic():
people = list(
Person.objects.all().select_for_update(nowait=nowait)
)
people[0].name = 'Fred'
people[0].save()
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
Check that a thread running a select_for_update that
accesses rows being touched by a similar operation
on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError('Thread did not run and block')
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.assertFalse(thread.isAlive())
# We must commit the transaction to ensure that MySQL gets a fresh read,
# since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Check that running a raw query which can't obtain a FOR UPDATE lock
raises the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update')
@override_settings(DATABASE_ROUTERS=[TestRouter()])
def test_select_for_update_on_multidb(self):
query = Person.objects.select_for_update()
self.assertEqual(router.db_for_write(Person), query.db)
@skipUnlessDBFeature('has_select_for_update')
def test_select_for_update_with_get(self):
with transaction.atomic():
person = Person.objects.select_for_update().get(name='Reinhardt')
self.assertEqual(person.name, 'Reinhardt')
|
|
"""The tests for the emulated Hue component."""
import asyncio
import json
from unittest.mock import patch
import pytest
from homeassistant import setup, const, core
import homeassistant.components as core_components
from homeassistant.components import (
emulated_hue, http, light, script, media_player, fan
)
from homeassistant.const import STATE_ON, STATE_OFF
from homeassistant.components.emulated_hue.hue_api import (
HUE_API_STATE_ON, HUE_API_STATE_BRI, HueUsernameView,
HueAllLightsStateView, HueOneLightStateView, HueOneLightChangeView)
from homeassistant.components.emulated_hue import Config
from tests.common import get_test_instance_port
HTTP_SERVER_PORT = get_test_instance_port()
BRIDGE_SERVER_PORT = get_test_instance_port()
BRIDGE_URL_BASE = 'http://127.0.0.1:{}'.format(BRIDGE_SERVER_PORT) + '{}'
JSON_HEADERS = {const.HTTP_HEADER_CONTENT_TYPE: const.CONTENT_TYPE_JSON}
@pytest.fixture
def hass_hue(loop, hass):
"""Setup a hass instance for these tests."""
# We need to do this to get access to homeassistant/turn_(on,off)
loop.run_until_complete(
core_components.async_setup(hass, {core.DOMAIN: {}}))
loop.run_until_complete(setup.async_setup_component(
hass, http.DOMAIN,
{http.DOMAIN: {http.CONF_SERVER_PORT: HTTP_SERVER_PORT}}))
with patch('homeassistant.components'
'.emulated_hue.UPNPResponderThread'):
loop.run_until_complete(
setup.async_setup_component(hass, emulated_hue.DOMAIN, {
emulated_hue.DOMAIN: {
emulated_hue.CONF_LISTEN_PORT: BRIDGE_SERVER_PORT,
emulated_hue.CONF_EXPOSE_BY_DEFAULT: True
}
}))
loop.run_until_complete(
setup.async_setup_component(hass, light.DOMAIN, {
'light': [
{
'platform': 'demo',
}
]
}))
loop.run_until_complete(
setup.async_setup_component(hass, script.DOMAIN, {
'script': {
'set_kitchen_light': {
'sequence': [
{
'service_template':
"light.turn_{{ requested_state }}",
'data_template': {
'entity_id': 'light.kitchen_lights',
'brightness': "{{ requested_level }}"
}
}
]
}
}
}))
loop.run_until_complete(
setup.async_setup_component(hass, media_player.DOMAIN, {
'media_player': [
{
'platform': 'demo',
}
]
}))
loop.run_until_complete(
setup.async_setup_component(hass, fan.DOMAIN, {
'fan': [
{
'platform': 'demo',
}
]
}))
# Kitchen light is explicitly excluded from being exposed
kitchen_light_entity = hass.states.get('light.kitchen_lights')
attrs = dict(kitchen_light_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE] = False
hass.states.async_set(
kitchen_light_entity.entity_id, kitchen_light_entity.state,
attributes=attrs)
# Expose the script
script_entity = hass.states.get('script.set_kitchen_light')
attrs = dict(script_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE] = True
hass.states.async_set(
script_entity.entity_id, script_entity.state, attributes=attrs
)
return hass
@pytest.fixture
def hue_client(loop, hass_hue, test_client):
"""Create web client for emulated hue api."""
web_app = hass_hue.http.app
config = Config(None, {'type': 'alexa'})
HueUsernameView().register(web_app.router)
HueAllLightsStateView(config).register(web_app.router)
HueOneLightStateView(config).register(web_app.router)
HueOneLightChangeView(config).register(web_app.router)
return loop.run_until_complete(test_client(web_app))
@asyncio.coroutine
def test_discover_lights(hue_client):
"""Test the discovery of lights."""
result = yield from hue_client.get('/api/username/lights')
assert result.status == 200
assert 'application/json' in result.headers['content-type']
result_json = yield from result.json()
devices = set(val['uniqueid'] for val in result_json.values())
# Make sure the lights we added to the config are there
assert 'light.ceiling_lights' in devices
assert 'light.bed_light' in devices
assert 'script.set_kitchen_light' in devices
assert 'light.kitchen_lights' not in devices
assert 'media_player.living_room' in devices
assert 'media_player.bedroom' in devices
assert 'media_player.walkman' in devices
assert 'media_player.lounge_room' in devices
assert 'fan.living_room_fan' in devices
@asyncio.coroutine
def test_get_light_state(hass_hue, hue_client):
"""Test the getting of light state."""
# Turn office light on and set to 127 brightness
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_ON,
{
const.ATTR_ENTITY_ID: 'light.ceiling_lights',
light.ATTR_BRIGHTNESS: 127
},
blocking=True)
office_json = yield from perform_get_light_state(
hue_client, 'light.ceiling_lights', 200)
assert office_json['state'][HUE_API_STATE_ON] is True
assert office_json['state'][HUE_API_STATE_BRI] == 127
# Check all lights view
result = yield from hue_client.get('/api/username/lights')
assert result.status == 200
assert 'application/json' in result.headers['content-type']
result_json = yield from result.json()
assert 'light.ceiling_lights' in result_json
assert result_json['light.ceiling_lights']['state'][HUE_API_STATE_BRI] == \
127
# Turn bedroom light off
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_OFF,
{
const.ATTR_ENTITY_ID: 'light.bed_light'
},
blocking=True)
bedroom_json = yield from perform_get_light_state(
hue_client, 'light.bed_light', 200)
assert bedroom_json['state'][HUE_API_STATE_ON] is False
assert bedroom_json['state'][HUE_API_STATE_BRI] == 0
# Make sure kitchen light isn't accessible
yield from perform_get_light_state(
hue_client, 'light.kitchen_lights', 404)
@asyncio.coroutine
def test_put_light_state(hass_hue, hue_client):
"""Test the seeting of light states."""
yield from perform_put_test_on_ceiling_lights(hass_hue, hue_client)
# Turn the bedroom light on first
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_ON,
{const.ATTR_ENTITY_ID: 'light.bed_light',
light.ATTR_BRIGHTNESS: 153},
blocking=True)
bed_light = hass_hue.states.get('light.bed_light')
assert bed_light.state == STATE_ON
assert bed_light.attributes[light.ATTR_BRIGHTNESS] == 153
# Go through the API to turn it off
bedroom_result = yield from perform_put_light_state(
hass_hue, hue_client,
'light.bed_light', False)
bedroom_result_json = yield from bedroom_result.json()
assert bedroom_result.status == 200
assert 'application/json' in bedroom_result.headers['content-type']
assert len(bedroom_result_json) == 1
# Check to make sure the state changed
bed_light = hass_hue.states.get('light.bed_light')
assert bed_light.state == STATE_OFF
# Make sure we can't change the kitchen light state
kitchen_result = yield from perform_put_light_state(
hass_hue, hue_client,
'light.kitchen_light', True)
assert kitchen_result.status == 404
@asyncio.coroutine
def test_put_light_state_script(hass_hue, hue_client):
"""Test the setting of script variables."""
# Turn the kitchen light off first
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'light.kitchen_lights'},
blocking=True)
# Emulated hue converts 0-100% to 0-255.
level = 23
brightness = round(level * 255 / 100)
script_result = yield from perform_put_light_state(
hass_hue, hue_client,
'script.set_kitchen_light', True, brightness)
script_result_json = yield from script_result.json()
assert script_result.status == 200
assert len(script_result_json) == 2
kitchen_light = hass_hue.states.get('light.kitchen_lights')
assert kitchen_light.state == 'on'
assert kitchen_light.attributes[light.ATTR_BRIGHTNESS] == level
@asyncio.coroutine
def test_put_light_state_media_player(hass_hue, hue_client):
"""Test turning on media player and setting volume."""
# Turn the music player off first
yield from hass_hue.services.async_call(
media_player.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'media_player.walkman'},
blocking=True)
# Emulated hue converts 0.0-1.0 to 0-255.
level = 0.25
brightness = round(level * 255)
mp_result = yield from perform_put_light_state(
hass_hue, hue_client,
'media_player.walkman', True, brightness)
mp_result_json = yield from mp_result.json()
assert mp_result.status == 200
assert len(mp_result_json) == 2
walkman = hass_hue.states.get('media_player.walkman')
assert walkman.state == 'playing'
assert walkman.attributes[media_player.ATTR_MEDIA_VOLUME_LEVEL] == level
@asyncio.coroutine
def test_put_light_state_fan(hass_hue, hue_client):
"""Test turning on fan and setting speed."""
# Turn the fan off first
yield from hass_hue.services.async_call(
fan.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'fan.living_room_fan'},
blocking=True)
# Emulated hue converts 0-100% to 0-255.
level = 43
brightness = round(level * 255 / 100)
fan_result = yield from perform_put_light_state(
hass_hue, hue_client,
'fan.living_room_fan', True, brightness)
fan_result_json = yield from fan_result.json()
assert fan_result.status == 200
assert len(fan_result_json) == 2
living_room_fan = hass_hue.states.get('fan.living_room_fan')
assert living_room_fan.state == 'on'
assert living_room_fan.attributes[fan.ATTR_SPEED] == fan.SPEED_MEDIUM
# pylint: disable=invalid-name
@asyncio.coroutine
def test_put_with_form_urlencoded_content_type(hass_hue, hue_client):
"""Test the form with urlencoded content."""
# Needed for Alexa
yield from perform_put_test_on_ceiling_lights(
hass_hue, hue_client, 'application/x-www-form-urlencoded')
# Make sure we fail gracefully when we can't parse the data
data = {'key1': 'value1', 'key2': 'value2'}
result = yield from hue_client.put(
'/api/username/lights/light.ceiling_lights/state',
headers={
'content-type': 'application/x-www-form-urlencoded'
},
data=data,
)
assert result.status == 400
@asyncio.coroutine
def test_entity_not_found(hue_client):
"""Test for entity which are not found."""
result = yield from hue_client.get(
'/api/username/lights/not.existant_entity')
assert result.status == 404
result = yield from hue_client.put(
'/api/username/lights/not.existant_entity/state')
assert result.status == 404
@asyncio.coroutine
def test_allowed_methods(hue_client):
"""Test the allowed methods."""
result = yield from hue_client.get(
'/api/username/lights/light.ceiling_lights/state')
assert result.status == 405
result = yield from hue_client.put(
'/api/username/lights/light.ceiling_lights')
assert result.status == 405
result = yield from hue_client.put(
'/api/username/lights')
assert result.status == 405
@asyncio.coroutine
def test_proper_put_state_request(hue_client):
"""Test the request to set the state."""
# Test proper on value parsing
result = yield from hue_client.put(
'/api/username/lights/{}/state'.format(
'light.ceiling_lights'),
data=json.dumps({HUE_API_STATE_ON: 1234}))
assert result.status == 400
# Test proper brightness value parsing
result = yield from hue_client.put(
'/api/username/lights/{}/state'.format(
'light.ceiling_lights'),
data=json.dumps({
HUE_API_STATE_ON: True,
HUE_API_STATE_BRI: 'Hello world!'
}))
assert result.status == 400
# pylint: disable=invalid-name
def perform_put_test_on_ceiling_lights(hass_hue, hue_client,
content_type='application/json'):
"""Test the setting of a light."""
# Turn the office light off first
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'light.ceiling_lights'},
blocking=True)
ceiling_lights = hass_hue.states.get('light.ceiling_lights')
assert ceiling_lights.state == STATE_OFF
# Go through the API to turn it on
office_result = yield from perform_put_light_state(
hass_hue, hue_client,
'light.ceiling_lights', True, 56, content_type)
assert office_result.status == 200
assert 'application/json' in office_result.headers['content-type']
office_result_json = yield from office_result.json()
assert len(office_result_json) == 2
# Check to make sure the state changed
ceiling_lights = hass_hue.states.get('light.ceiling_lights')
assert ceiling_lights.state == STATE_ON
assert ceiling_lights.attributes[light.ATTR_BRIGHTNESS] == 56
@asyncio.coroutine
def perform_get_light_state(client, entity_id, expected_status):
"""Test the gettting of a light state."""
result = yield from client.get('/api/username/lights/{}'.format(entity_id))
assert result.status == expected_status
if expected_status == 200:
assert 'application/json' in result.headers['content-type']
return (yield from result.json())
return None
@asyncio.coroutine
def perform_put_light_state(hass_hue, client, entity_id, is_on,
brightness=None, content_type='application/json'):
"""Test the setting of a light state."""
req_headers = {'Content-Type': content_type}
data = {HUE_API_STATE_ON: is_on}
if brightness is not None:
data[HUE_API_STATE_BRI] = brightness
result = yield from client.put(
'/api/username/lights/{}/state'.format(entity_id), headers=req_headers,
data=json.dumps(data).encode())
# Wait until state change is complete before continuing
yield from hass_hue.async_block_till_done()
return result
|
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2017 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
from .permissions import Permissions
from .colour import Colour
from .mixins import Hashable
from .utils import snowflake_time
class Role(Hashable):
"""Represents a Discord role in a :class:`Guild`.
Supported Operations:
+-----------+------------------------------------------------------------------+
| Operation | Description |
+===========+==================================================================+
| x == y | Checks if two roles are equal. |
+-----------+------------------------------------------------------------------+
| x != y | Checks if two roles are not equal. |
+-----------+------------------------------------------------------------------+
| x > y | Checks if a role is higher than another in the hierarchy. |
+-----------+------------------------------------------------------------------+
| x < y | Checks if a role is lower than another in the hierarchy. |
+-----------+------------------------------------------------------------------+
| x >= y | Checks if a role is higher or equal to another in the hierarchy. |
+-----------+------------------------------------------------------------------+
| x <= y | Checks if a role is lower or equal to another in the hierarchy. |
+-----------+------------------------------------------------------------------+
| hash(x) | Return the role's hash. |
+-----------+------------------------------------------------------------------+
| str(x) | Returns the role's name. |
+-----------+------------------------------------------------------------------+
Attributes
----------
id: int
The ID for the role.
name: str
The name of the role.
permissions: :class:`Permissions`
Represents the role's permissions.
guild: :class:`Guild`
The guild the role belongs to.
colour: :class:`Colour`
Represents the role colour. An alias exists under ``color``.
hoist: bool
Indicates if the role will be displayed separately from other members.
position: int
The position of the role. This number is usually positive. The bottom
role has a position of 0.
managed: bool
Indicates if the role is managed by the guild through some form of
integrations such as Twitch.
mentionable: bool
Indicates if the role can be mentioned by users.
"""
__slots__ = ('id', 'name', 'permissions', 'color', 'colour', 'position',
'managed', 'mentionable', 'hoist', 'guild', '_state' )
def __init__(self, *, guild, state, data):
self.guild = guild
self._state = state
self.id = int(data['id'])
self._update(data)
def __str__(self):
return self.name
def __repr__(self):
return '<Role id={0.id} name={0.name!r}>'.format(self)
def __lt__(self, other):
if not isinstance(other, Role) or not isinstance(self, Role):
return NotImplemented
if self.guild != other.guild:
raise RuntimeError('cannot compare roles from two different guilds.')
if self.position < other.position:
return True
if self.position == other.position:
return int(self.id) > int(other.id)
return False
def __le__(self, other):
r = Role.__lt__(other, self)
if r is NotImplemented:
return NotImplemented
return not r
def __gt__(self, other):
return Role.__lt__(other, self)
def __ge__(self, other):
r = Role.__lt__(self, other)
if r is NotImplemented:
return NotImplemented
return not r
def _update(self, data):
self.name = data['name']
self.permissions = Permissions(data.get('permissions', 0))
self.position = data.get('position', 0)
self.colour = Colour(data.get('color', 0))
self.hoist = data.get('hoist', False)
self.managed = data.get('managed', False)
self.mentionable = data.get('mentionable', False)
self.color = self.colour
def is_default(self):
"""Checks if the role is the default role."""
return self.guild.id == self.id
@property
def created_at(self):
"""Returns the role's creation time in UTC."""
return snowflake_time(self.id)
@property
def mention(self):
"""Returns a string that allows you to mention a role."""
return '<@&{}>'.format(self.id)
@property
def members(self):
"""Returns a list of :class:`Member` with this role."""
all_members = self.guild.members
if self.is_default():
return all_members
ret = []
for member in all_members:
if self in member.roles:
ret.append(member)
return ret
@asyncio.coroutine
def _move(self, position):
if position <= 0:
raise InvalidArgument("Cannot move role to position 0 or below")
if self.is_default():
raise InvalidArgument("Cannot move default role")
if self.position == position:
return # Save discord the extra request.
http = self._state.http
change_range = range(min(self.position, position), max(self.position, position) + 1)
sorted_roles = sorted((x for x in self.guild.roles if x.position in change_range and x.id != self.id),
key=lambda x: x.position)
roles = [r.id for r in sorted_roles]
if self.position > position:
roles.insert(0, self.id)
else:
roles.append(self.id)
payload = [{"id": z[0], "position": z[1]} for z in zip(roles, change_range)]
yield from http.move_role_position(self.guild.id, payload)
@asyncio.coroutine
def edit(self, **fields):
"""|coro|
Edits the role.
You must have the :attr:`Permissions.manage_roles` permission to
use this.
All fields are optional.
Parameters
-----------
name: str
The new role name to change to.
permissions: :class:`Permissions`
The new permissions to change to.
colour: :class:`Colour`
The new colour to change to. (aliased to color as well)
hoist: bool
Indicates if the role should be shown separately in the member list.
mentionable: bool
Indicates if the role should be mentionable by others.
position: int
The new role's position. This must be below your top role's
position or it will fail.
Raises
-------
Forbidden
You do not have permissions to change the role.
HTTPException
Editing the role failed.
InvalidArgument
An invalid position was given or the default
role was asked to be moved.
"""
position = fields.get('position')
if position is not None:
yield from self._move(position)
self.position = position
try:
colour = fields['colour']
except KeyError:
colour = fields.get('color', self.colour)
payload = {
'name': fields.get('name', self.name),
'permissions': fields.get('permissions', self.permissions).value,
'color': colour.value,
'hoist': fields.get('hoist', self.hoist),
'mentionable': fields.get('mentionable', self.mentionable)
}
data = yield from self._state.http.edit_role(self.guild.id, self.id, **payload)
self._update(data)
@asyncio.coroutine
def delete(self):
"""|coro|
Deletes the role.
You must have the :attr:`Permissions.manage_roles` permission to
use this.
Raises
--------
Forbidden
You do not have permissions to delete the role.
HTTPException
Deleting the role failed.
"""
yield from self._state.http.delete_role(self.guild.id, self.id)
|
|
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.activities_summary_v30_rc1 import ActivitiesSummaryV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.history_v30_rc1 import HistoryV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.orcid_identifier_v30_rc1 import OrcidIdentifierV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.person_v30_rc1 import PersonV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.preferences_v30_rc1 import PreferencesV30Rc1 # noqa: F401,E501
class RecordV30Rc1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'orcid_identifier': 'OrcidIdentifierV30Rc1',
'preferences': 'PreferencesV30Rc1',
'history': 'HistoryV30Rc1',
'person': 'PersonV30Rc1',
'activities_summary': 'ActivitiesSummaryV30Rc1',
'path': 'str'
}
attribute_map = {
'orcid_identifier': 'orcid-identifier',
'preferences': 'preferences',
'history': 'history',
'person': 'person',
'activities_summary': 'activities-summary',
'path': 'path'
}
def __init__(self, orcid_identifier=None, preferences=None, history=None, person=None, activities_summary=None, path=None): # noqa: E501
"""RecordV30Rc1 - a model defined in Swagger""" # noqa: E501
self._orcid_identifier = None
self._preferences = None
self._history = None
self._person = None
self._activities_summary = None
self._path = None
self.discriminator = None
if orcid_identifier is not None:
self.orcid_identifier = orcid_identifier
if preferences is not None:
self.preferences = preferences
if history is not None:
self.history = history
if person is not None:
self.person = person
if activities_summary is not None:
self.activities_summary = activities_summary
if path is not None:
self.path = path
@property
def orcid_identifier(self):
"""Gets the orcid_identifier of this RecordV30Rc1. # noqa: E501
:return: The orcid_identifier of this RecordV30Rc1. # noqa: E501
:rtype: OrcidIdentifierV30Rc1
"""
return self._orcid_identifier
@orcid_identifier.setter
def orcid_identifier(self, orcid_identifier):
"""Sets the orcid_identifier of this RecordV30Rc1.
:param orcid_identifier: The orcid_identifier of this RecordV30Rc1. # noqa: E501
:type: OrcidIdentifierV30Rc1
"""
self._orcid_identifier = orcid_identifier
@property
def preferences(self):
"""Gets the preferences of this RecordV30Rc1. # noqa: E501
:return: The preferences of this RecordV30Rc1. # noqa: E501
:rtype: PreferencesV30Rc1
"""
return self._preferences
@preferences.setter
def preferences(self, preferences):
"""Sets the preferences of this RecordV30Rc1.
:param preferences: The preferences of this RecordV30Rc1. # noqa: E501
:type: PreferencesV30Rc1
"""
self._preferences = preferences
@property
def history(self):
"""Gets the history of this RecordV30Rc1. # noqa: E501
:return: The history of this RecordV30Rc1. # noqa: E501
:rtype: HistoryV30Rc1
"""
return self._history
@history.setter
def history(self, history):
"""Sets the history of this RecordV30Rc1.
:param history: The history of this RecordV30Rc1. # noqa: E501
:type: HistoryV30Rc1
"""
self._history = history
@property
def person(self):
"""Gets the person of this RecordV30Rc1. # noqa: E501
:return: The person of this RecordV30Rc1. # noqa: E501
:rtype: PersonV30Rc1
"""
return self._person
@person.setter
def person(self, person):
"""Sets the person of this RecordV30Rc1.
:param person: The person of this RecordV30Rc1. # noqa: E501
:type: PersonV30Rc1
"""
self._person = person
@property
def activities_summary(self):
"""Gets the activities_summary of this RecordV30Rc1. # noqa: E501
:return: The activities_summary of this RecordV30Rc1. # noqa: E501
:rtype: ActivitiesSummaryV30Rc1
"""
return self._activities_summary
@activities_summary.setter
def activities_summary(self, activities_summary):
"""Sets the activities_summary of this RecordV30Rc1.
:param activities_summary: The activities_summary of this RecordV30Rc1. # noqa: E501
:type: ActivitiesSummaryV30Rc1
"""
self._activities_summary = activities_summary
@property
def path(self):
"""Gets the path of this RecordV30Rc1. # noqa: E501
:return: The path of this RecordV30Rc1. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this RecordV30Rc1.
:param path: The path of this RecordV30Rc1. # noqa: E501
:type: str
"""
self._path = path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RecordV30Rc1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RecordV30Rc1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
|
'''
graph_blast2pairs.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python graph_blast2pairs.py --help
Type::
python graph_blast2pairs.py --help
for command line help.
Command line options
--------------------
'''
import sys
import re
import getopt
import CGAT.Experiment as E
import CGAT.BlastAlignments as BlastAlignments
import math
USAGE = """python %s [OPTIONS] < graph.in > graph.out
Version: $Id: graph_blast2pairs.py 2782 2009-09-10 11:40:29Z andreas $
Wrapper for rescoring blast alignments.
Options:
-h, --help print this message.
-v, --verbose= loglevel.
-m, --method= method to use for scoring [bitscore|normalize]
-f, --self-score= filename with self-scores
--lambda= lambda for bitscore calculation
--k= K for bitscore calculation
--expected= expected score in matrix
-a, --append append new columns at the end
--evalue-to-log convert evalue to log
--effective-length= calculate evalue from bitscore based on effective sbjct length
--min-evalue= minimum evalue (after which it is truncated)
--version ouptut version
new scores are saved in third column (overwriting the E-Value)
Methods:
kimura = set third column to kimura two parameter score from pid
bitscore = set third column to bitscore from score.
normalize-product = normalize third column by dividing with self-scores
new = old * old / self1 / self2
normalize-max = normalize third column by max(old/self1), max(old/self2)
normalize-scoredist-avg = normalize third column method by Sonnhammer & Hollich (2005)
scoredist-avg = set third column to score by Sonnhammer & Hollich (2005)
scoredist-min = set third column to modified score Sonnhammer & Hollich (2005) (minimum score for
estimating the upper bound).
gapless-score = score of alignment without the gaps
reset-evalue = reset evalue based on bitscore
""" % sys.argv[0]
param_long_options = ["help", "verbose=", "method=", "lambda=", "k=", "self-scores=",
"expected=", "append", "evalue-to-log",
"effective-length=", "version"]
param_short_options = "hv:m:f:o:a"
param_loglevel = 1
param_method = "bitscore"
param_filename_self_scores = None
param_lambda = 0.267
param_K = 0.0410
param_expected = -0.5209
param_gop = -11.0
param_gep = -1.0
param_append = False
param_evalue_to_log = False
param_effective_length = None
param_min_evalue = 1e-200
def CalculateGapScore(ali, gop, gep):
s = 0.0
data = re.split("[+-]", ali[1:])
for x in data[1:-1:2]:
s += gop + int(x) * gep
return s
# ------------------------------------------------------------------------
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
try:
optlist, args = getopt.getopt(
sys.argv[1:], param_short_options, param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(2)
for o, a in optlist:
if o in ("-v", "--verbose"):
param_loglevel = int(a)
elif o in ("--version", ):
print "version="
sys.exit(0)
elif o in ("-h", "--help"):
print USAGE
sys.exit(0)
elif o in ("-m", "--method"):
param_method = a
elif o in ("-f", "--self-scores"):
param_filename_self_scores = a
elif o == "--lambda":
param_lambda = float(a)
elif o == "--k":
param_K = float(a)
elif o == "--expected":
param_expected = float(a)
elif o in ("-a", "--append"):
param_append = True
elif o == "--evalue-to-log":
param_evalue_to_log = True
elif o == "--effective-length":
param_effective_length = int(a)
elif o == "--min-evalue":
param_min_evalue = float(a)
if param_loglevel >= 1:
print E.GetHeader()
print E.GetParams()
if param_filename_self_scores:
self_scores = {}
for line in open(param_filename_self_scores, "r"):
if line[0] == "#":
continue
d = line[:-1].split("\t")[:2]
if d[0] not in self_scores:
self_scores[d[0]] = 0.0
self_scores[d[0]] = max(self_scores[d[0]], float(d[1]))
if param_method == "kimura":
a = 0
f = lambda x: x < 0.85 and 0.0000001 - \
math.log(1.0 - x - 0.2 * x * x) or 5.2030
elif param_method == "bitscore":
a = 1
lK = math.log(param_K)
l2 = math.log(2)
f = lambda x: (param_lambda * x - lK) / l2
elif param_method in ("normalize-product", "normalize-product-distance",
"normalize-max", "normalize-max-distance"):
a = 2
if param_method == "normalize-product":
f = lambda x, y, z: x * x / self_scores[y] / self_scores[z]
elif param_method == "normalize-product-distance":
f = lambda x, y, z: max(
0.0, 1.0 - x * x / self_scores[y] / self_scores[z])
elif param_method == "normalize-max":
f = lambda x, y, z: max(x / self_scores[y], x / self_scores[z])
elif param_method == "normalize-max-distance":
f = lambda x, y, z: max(
0.0, 1.0 - max(x / self_scores[y], x / self_scores[z]))
elif param_method == "normalize-scoredist-avg":
a = 3
f = lambda x, y, z, l: max(
0.0, -100.0 * math.log((x - param_expected * l) / ((self_scores[y] + self_scores[z]) * 0.5 - param_expected * l)))
elif param_method == "scoredist-avg":
a = 4
f = lambda x, y, z, l: max(
0.0, -100.0 * math.log((x - param_expected * l) / ((self_scores[y] + self_scores[z]) * 0.5 - param_expected * l)))
elif param_method == "scoredist-min":
a = 4
f = lambda x, y, z, l: max(
0.0, -100.0 * math.log((x - param_expected * l) / (min(self_scores[y], self_scores[z]) - param_expected * l)))
elif param_method == "gapless-score":
a = 5
f = lambda x, a, b: x - \
CalculateGapScore(a, param_gop, param_gep) - \
CalculateGapScore(b, param_gop, param_gep)
elif param_method == "reset-evalue":
a = 6
if param_evalue_to_log:
# this way is less likely to underflow (2^-s might be zero for
# large s)
me = math.log(param_min_evalue)
l2 = math.log(2)
le = math.log(param_effective_length)
f = lambda s, m, n: max(me, -s * l2 + math.log(m) + le)
else:
f = lambda s, m, n: max(param_min_evalue, math.pow(2, -s) * m * n)
param_evalue_to_log = False
else:
raise "unknown method %s" % param_method
ninput, noutput, nskipped, nfailed = 0, 0, 0, 0
for line in sys.stdin:
if line[0] == "#":
continue
link = BlastAlignments.Link()
link.Read(line)
ninput += 1
try:
if a == 0:
new_val = f((100.0 - link.mPercentIdentity) / 100.0)
elif a == 1:
new_val = f(link.score)
elif a == 2:
# note: used to be evalue
new_val = f(link.mBitScore, link.mQueryToken, link.mSbjctToken)
elif a == 3:
new_val = f(link.mEvalue, link.mQueryToken, link.mSbjctToken, max(
link.mQueryTo - link.mQueryFrom, link.mSbjctTo - link.mSbjctFrom) + 1)
elif a == 4:
new_val = f(link.score, link.mQueryToken, link.mSbjctToken, max(
link.mQueryTo - link.mQueryFrom, link.mSbjctTo - link.mSbjctFrom) + 1)
elif a == 5:
new_val = int(f(link.score, link.mQueryAli, link.mSbjctAli))
elif a == 6:
new_val = f(
link.mBitScore, link.mQueryLength, param_effective_length)
except KeyError:
if param_loglevel >= 2:
print "# Key error in line", line[:-1]
nfailed += 1
continue
if param_evalue_to_log:
link.mEvalue = math.log(link.mEvalue)
if param_append:
print str(link) + "\t" + str(new_val)
else:
if a in (0, 1, 2, 3, 4, 6):
link.mEvalue = new_val
elif a in (5,):
link.score = new_val
print str(link)
noutput += 1
print "# ninput=%i, noutput=%i, nskipped=%i, failed=%i" % (
ninput, noutput, nskipped, nfailed)
if param_loglevel >= 1:
print E.GetFooter()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
from __future__ import division, absolute_import
from fontTools.ttLib import TTFont, newTable
from fontTools.cffLib import TopDictIndex, TopDict, CharStrings, SubrsIndex, GlobalSubrsIndex, PrivateDict, IndexedStrings
from fontTools.ttLib.tables.O_S_2f_2 import Panose
from fontTools.ttLib.tables._h_e_a_d import mac_epoch_diff
from .pens.t2CharStringPen import T2CharStringPen
from .fontInfoData import getFontBounds, getAttrWithFallback, dateStringToTimeValue, dateStringForNow, intListToNum, normalizeStringForPostscript
try:
sorted
except NameError:
def sorted(l):
l = list(l)
l.sort()
return l
def _roundInt(v):
return int(round(v))
class OutlineOTFCompiler(object):
"""
This object will create a bare-bones OTF-CFF containing
outline data and not much else. The only external
method is :meth:`ufo2fdk.tools.outlineOTF.compile`.
When creating this object, you must provide a *font*
object and a *path* indicating where the OTF should
be saved. Optionally, you can provide a *glyphOrder*
list of glyph names indicating the order of the glyphs
in the font.
"""
def __init__(self, font, path, glyphOrder=None):
self.ufo = font
self.path = path
self.log = []
# make any missing glyphs and store them locally
missingRequiredGlyphs = self.makeMissingRequiredGlyphs()
# make a dict of all glyphs
self.allGlyphs = {}
for glyph in font:
self.allGlyphs[glyph.name] = glyph
self.allGlyphs.update(missingRequiredGlyphs)
# store the glyph order
if glyphOrder is None:
glyphOrder = sorted(self.allGlyphs.keys())
self.glyphOrder = self.makeOfficialGlyphOrder(glyphOrder)
# make a reusable bounding box
self.fontBoundingBox = tuple([_roundInt(i) for i in self.makeFontBoundingBox()])
# make a reusable character mapping
self.unicodeToGlyphNameMapping = self.makeUnicodeToGlyphNameMapping()
# -----------
# Main Method
# -----------
def compile(self):
"""
Compile the OTF.
"""
self.otf = TTFont(sfntVersion="OTTO")
# populate basic tables
self.setupTable_head()
self.setupTable_hhea()
self.setupTable_hmtx()
self.setupTable_name()
self.setupTable_maxp()
self.setupTable_cmap()
self.setupTable_OS2()
self.setupTable_post()
self.setupTable_CFF()
self.setupOtherTables()
# write the file
self.otf.save(self.path)
# discard the object
self.otf.close()
del self.otf
# -----
# Tools
# -----
def makeFontBoundingBox(self):
"""
Make a bounding box for the font.
**This should not be called externally.** Subclasses
may override this method to handle the bounds creation
in a different way if desired.
"""
return getFontBounds(self.ufo)
def makeUnicodeToGlyphNameMapping(self):
"""
Make a ``unicode : glyph name`` mapping for the font.
**This should not be called externally.** Subclasses
may override this method to handle the mapping creation
in a different way if desired.
"""
mapping = {}
for glyphName, glyph in self.allGlyphs.items():
unicodes = glyph.unicodes
for uni in unicodes:
mapping[uni] = glyphName
return mapping
def makeMissingRequiredGlyphs(self):
"""
Add space and .notdef to the font if they are not present.
**This should not be called externally.** Subclasses
may override this method to handle the glyph creation
in a different way if desired.
"""
glyphs = {}
font = self.ufo
unitsPerEm = _roundInt(getAttrWithFallback(font.info, "unitsPerEm"))
ascender = _roundInt(getAttrWithFallback(font.info, "ascender"))
descender = _roundInt(getAttrWithFallback(font.info, "descender"))
defaultWidth = _roundInt(unitsPerEm * 0.5)
if ".notdef" not in self.ufo:
glyphs[".notdef"] = StubGlyph(name=".notdef", width=defaultWidth, unitsPerEm=unitsPerEm, ascender=ascender, descender=descender)
if "space" not in self.ufo:
glyphs["space"] = StubGlyph(name="space", width=defaultWidth, unitsPerEm=unitsPerEm, ascender=ascender, descender=descender, unicodes=[32])
return glyphs
def makeOfficialGlyphOrder(self, glyphOrder):
"""
Make a the final glyph order.
**This should not be called externally.** Subclasses
may override this method to handle the order creation
in a different way if desired.
"""
allGlyphs = self.allGlyphs
orderedGlyphs = [".notdef", "space"]
for glyphName in glyphOrder:
if glyphName in [".notdef", "space"]:
continue
orderedGlyphs.append(glyphName)
for glyphName in sorted(allGlyphs.keys()):
if glyphName not in orderedGlyphs:
orderedGlyphs.append(glyphName)
return orderedGlyphs
def getCharStringForGlyph(self, glyph, private, globalSubrs):
"""
Get a Type2CharString for the *glyph*
**This should not be called externally.** Subclasses
may override this method to handle the charstring creation
in a different way if desired.
"""
width = glyph.width
# subtract the nominal width
postscriptNominalWidthX = getAttrWithFallback(self.ufo.info, "postscriptNominalWidthX")
if postscriptNominalWidthX:
width = width - postscriptNominalWidthX
# round
width = _roundInt(width)
pen = T2CharStringPen(width, self.allGlyphs)
glyph.draw(pen)
charString = pen.getCharString(private, globalSubrs)
return charString
# --------------
# Table Builders
# --------------
def setupTable_head(self):
"""
Make the head table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
self.otf["head"] = head = newTable("head")
font = self.ufo
head.checkSumAdjustment = 0
head.tableVersion = 1.0
versionMajor = getAttrWithFallback(font.info, "versionMajor")
versionMinor = getAttrWithFallback(font.info, "versionMinor") * .001
head.fontRevision = versionMajor + versionMinor
head.magicNumber = 0x5F0F3CF5
# upm
head.unitsPerEm = getAttrWithFallback(font.info, "unitsPerEm")
# times
head.created = dateStringToTimeValue(getAttrWithFallback(font.info, "openTypeHeadCreated")) - mac_epoch_diff
head.modified = dateStringToTimeValue(dateStringForNow()) - mac_epoch_diff
# bounding box
xMin, yMin, xMax, yMax = self.fontBoundingBox
head.xMin = xMin
head.yMin = yMin
head.xMax = xMax
head.yMax = yMax
# style mapping
styleMapStyleName = getAttrWithFallback(font.info, "styleMapStyleName")
macStyle = []
if styleMapStyleName == "bold":
macStyle = [0]
elif styleMapStyleName == "bold italic":
macStyle = [0, 1]
elif styleMapStyleName == "italic":
macStyle = [1]
head.macStyle = intListToNum(macStyle, 0, 16)
# misc
head.flags = intListToNum(getAttrWithFallback(font.info, "openTypeHeadFlags"), 0, 16)
head.lowestRecPPEM = _roundInt(getAttrWithFallback(font.info, "openTypeHeadLowestRecPPEM"))
head.fontDirectionHint = 2
head.indexToLocFormat = 0
head.glyphDataFormat = 0
def setupTable_name(self):
"""
Make the name table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
self.otf["name"] = newTable("name")
def setupTable_maxp(self):
"""
Make the maxp table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
self.otf["maxp"] = maxp = newTable("maxp")
maxp.tableVersion = 0x00005000
def setupTable_cmap(self):
"""
Make the cmap table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
from fontTools.ttLib.tables._c_m_a_p import cmap_format_4
nonBMP = dict((k, v) for k, v in self.unicodeToGlyphNameMapping.items() if k > 65535)
if nonBMP:
mapping = dict((k, v) for k, v in self.unicodeToGlyphNameMapping.items() if k <= 65535)
else:
mapping = dict(self.unicodeToGlyphNameMapping)
# mac
cmap4_0_3 = cmap_format_4(4)
cmap4_0_3.platformID = 0
cmap4_0_3.platEncID = 3
cmap4_0_3.language = 0
cmap4_0_3.cmap = mapping
# windows
cmap4_3_1 = cmap_format_4(4)
cmap4_3_1.platformID = 3
cmap4_3_1.platEncID = 1
cmap4_3_1.language = 0
cmap4_3_1.cmap = mapping
# store
self.otf["cmap"] = cmap = newTable("cmap")
cmap.tableVersion = 0
cmap.tables = [cmap4_0_3, cmap4_3_1]
# If we have glyphs outside Unicode BMP, we must set another
# subtable that can hold longer codepoints for them.
if nonBMP:
from fontTools.ttLib.tables._c_m_a_p import cmap_format_12
nonBMP.update(mapping)
# mac
cmap12_0_4 = cmap_format_12(12)
cmap12_0_4.platformID = 0
cmap12_0_4.platEncID = 4
cmap12_0_4.language = 0
cmap12_0_4.cmap = nonBMP
# windows
cmap12_3_10 = cmap_format_12(12)
cmap12_3_10.platformID = 3
cmap12_3_10.platEncID = 10
cmap12_3_10.language = 0
cmap12_3_10.cmap = nonBMP
# update tables registry
cmap.tables = [cmap4_0_3, cmap4_3_1, cmap12_0_4, cmap12_3_10]
cmap.tables.sort()
def setupTable_OS2(self):
"""
Make the OS/2 table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
self.otf["OS/2"] = os2 = newTable("OS/2")
font = self.ufo
os2.version = 0x0004
# average glyph width
widths = [glyph.width for glyph in self.allGlyphs.values() if glyph.width > 0]
os2.xAvgCharWidth = _roundInt(sum(widths) / len(widths))
# weight and width classes
os2.usWeightClass = getAttrWithFallback(font.info, "openTypeOS2WeightClass")
os2.usWidthClass = getAttrWithFallback(font.info, "openTypeOS2WidthClass")
# embedding
os2.fsType = intListToNum(getAttrWithFallback(font.info, "openTypeOS2Type"), 0, 16)
# subscript
v = getAttrWithFallback(font.info, "openTypeOS2SubscriptXSize")
if v is None:
v = 0
os2.ySubscriptXSize = _roundInt(v)
v = getAttrWithFallback(font.info, "openTypeOS2SubscriptYSize")
if v is None:
v = 0
os2.ySubscriptYSize = _roundInt(v)
v = getAttrWithFallback(font.info, "openTypeOS2SubscriptXOffset")
if v is None:
v = 0
os2.ySubscriptXOffset = _roundInt(v)
v = getAttrWithFallback(font.info, "openTypeOS2SubscriptYOffset")
if v is None:
v = 0
os2.ySubscriptYOffset = _roundInt(v)
# superscript
v = getAttrWithFallback(font.info, "openTypeOS2SuperscriptXSize")
if v is None:
v = 0
os2.ySuperscriptXSize = _roundInt(v)
v = getAttrWithFallback(font.info, "openTypeOS2SuperscriptYSize")
if v is None:
v = 0
os2.ySuperscriptYSize = _roundInt(v)
v = getAttrWithFallback(font.info, "openTypeOS2SuperscriptXOffset")
if v is None:
v = 0
os2.ySuperscriptXOffset = _roundInt(v)
v = getAttrWithFallback(font.info, "openTypeOS2SuperscriptYOffset")
if v is None:
v = 0
os2.ySuperscriptYOffset = _roundInt(v)
# strikeout
v = getAttrWithFallback(font.info, "openTypeOS2StrikeoutSize")
if v is None:
v = 0
os2.yStrikeoutSize = _roundInt(v)
v = getAttrWithFallback(font.info, "openTypeOS2StrikeoutPosition")
if v is None:
v = 0
os2.yStrikeoutPosition = _roundInt(v)
# family class
os2.sFamilyClass = 0 # XXX not sure how to create the appropriate value
# panose
data = getAttrWithFallback(font.info, "openTypeOS2Panose")
panose = Panose()
panose.bFamilyType = data[0]
panose.bSerifStyle = data[1]
panose.bWeight = data[2]
panose.bProportion = data[3]
panose.bContrast = data[4]
panose.bStrokeVariation = data[5]
panose.bArmStyle = data[6]
panose.bLetterForm = data[7]
panose.bMidline = data[8]
panose.bXHeight = data[9]
os2.panose = panose
# Unicode ranges
uniRanges = getAttrWithFallback(font.info, "openTypeOS2UnicodeRanges")
os2.ulUnicodeRange1 = intListToNum(uniRanges, 0, 32)
os2.ulUnicodeRange2 = intListToNum(uniRanges, 32, 32)
os2.ulUnicodeRange3 = intListToNum(uniRanges, 64, 32)
os2.ulUnicodeRange4 = intListToNum(uniRanges, 96, 32)
# codepage ranges
codepageRanges = getAttrWithFallback(font.info, "openTypeOS2CodePageRanges")
os2.ulCodePageRange1 = intListToNum(codepageRanges, 0, 32)
os2.ulCodePageRange2 = intListToNum(codepageRanges, 32, 32)
# vendor id
os2.achVendID = str(getAttrWithFallback(font.info, "openTypeOS2VendorID").decode("ascii", "ignore"))
# vertical metrics
os2.sxHeight = _roundInt(getAttrWithFallback(font.info, "xHeight"))
os2.sCapHeight = _roundInt(getAttrWithFallback(font.info, "capHeight"))
os2.sTypoAscender = _roundInt(getAttrWithFallback(font.info, "openTypeOS2TypoAscender"))
os2.sTypoDescender = _roundInt(getAttrWithFallback(font.info, "openTypeOS2TypoDescender"))
os2.sTypoLineGap = _roundInt(getAttrWithFallback(font.info, "openTypeOS2TypoLineGap"))
os2.usWinAscent = _roundInt(getAttrWithFallback(font.info, "openTypeOS2WinAscent"))
os2.usWinDescent = _roundInt(getAttrWithFallback(font.info, "openTypeOS2WinDescent"))
# style mapping
selection = list(getAttrWithFallback(font.info, "openTypeOS2Selection"))
styleMapStyleName = getAttrWithFallback(font.info, "styleMapStyleName")
if styleMapStyleName == "regular":
selection.append(6)
elif styleMapStyleName == "bold":
selection.append(5)
elif styleMapStyleName == "italic":
selection.append(0)
elif styleMapStyleName == "bold italic":
selection += [0, 5]
os2.fsSelection = intListToNum(selection, 0, 16)
# characetr indexes
unicodes = [i for i in self.unicodeToGlyphNameMapping.keys() if i is not None]
if unicodes:
minIndex = min(unicodes)
maxIndex = max(unicodes)
else:
# the font may have *no* unicode values
# (it really happens!) so there needs
# to be a fallback. use space for this.
minIndex = 0x0020
maxIndex = 0x0020
if maxIndex > 0xFFFF:
# the spec says that 0xFFFF should be used
# as the max if the max exceeds 0xFFFF
maxIndex = 0xFFFF
os2.fsFirstCharIndex = minIndex
os2.fsLastCharIndex = maxIndex
os2.usBreakChar = 32
os2.usDefaultChar = 0
# maximum contextual lookup length
os2.usMaxContex = 0
def setupTable_hmtx(self):
"""
Make the hmtx table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
self.otf["hmtx"] = hmtx = newTable("hmtx")
hmtx.metrics = {}
for glyphName, glyph in self.allGlyphs.items():
width = glyph.width
left = 0
if len(glyph) or len(glyph.components):
left = glyph.leftMargin
if left is None:
left = 0
hmtx[glyphName] = (_roundInt(width), _roundInt(left))
def setupTable_hhea(self):
"""
Make the hhea table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
self.otf["hhea"] = hhea = newTable("hhea")
font = self.ufo
hhea.tableVersion = 0x00010000
# vertical metrics
hhea.ascent = _roundInt(getAttrWithFallback(font.info, "openTypeHheaAscender"))
hhea.descent = _roundInt(getAttrWithFallback(font.info, "openTypeHheaDescender"))
hhea.lineGap = _roundInt(getAttrWithFallback(font.info, "openTypeHheaLineGap"))
# horizontal metrics
widths = []
lefts = []
rights = []
extents = []
for glyph in self.allGlyphs.values():
left = glyph.leftMargin
right = glyph.rightMargin
if left is None:
left = 0
if right is None:
right = 0
widths.append(glyph.width)
lefts.append(left)
rights.append(right)
bounds = glyph.bounds
if bounds is not None:
xMin, yMin, xMax, yMax = bounds
else:
xMin = 0
xMax = 0
extent = left + (xMax - xMin) # equation from spec for calculating xMaxExtent: Max(lsb + (xMax - xMin))
extents.append(extent)
hhea.advanceWidthMax = _roundInt(max(widths))
hhea.minLeftSideBearing = _roundInt(min(lefts))
hhea.minRightSideBearing = _roundInt(min(rights))
hhea.xMaxExtent = _roundInt(max(extents))
# misc
hhea.caretSlopeRise = getAttrWithFallback(font.info, "openTypeHheaCaretSlopeRise")
hhea.caretSlopeRun = getAttrWithFallback(font.info, "openTypeHheaCaretSlopeRun")
hhea.caretOffset = _roundInt(getAttrWithFallback(font.info, "openTypeHheaCaretOffset"))
hhea.reserved0 = 0
hhea.reserved1 = 0
hhea.reserved2 = 0
hhea.reserved3 = 0
hhea.metricDataFormat = 0
# glyph count
hhea.numberOfHMetrics = len(self.allGlyphs)
def setupTable_post(self):
"""
Make the post table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
self.otf["post"] = post = newTable("post")
font = self.ufo
post.formatType = 3.0
# italic angle
italicAngle = getAttrWithFallback(font.info, "italicAngle")
post.italicAngle = italicAngle
# underline
underlinePosition = getAttrWithFallback(font.info, "postscriptUnderlinePosition")
if underlinePosition is None:
underlinePosition = 0
post.underlinePosition = _roundInt(underlinePosition)
underlineThickness = getAttrWithFallback(font.info, "postscriptUnderlineThickness")
if underlineThickness is None:
underlineThickness = 0
post.underlineThickness = _roundInt(underlineThickness)
# determine if the font has a fixed width
post.isFixedPitch = getAttrWithFallback(font.info, "postscriptIsFixedPitch")
# misc
post.minMemType42 = 0
post.maxMemType42 = 0
post.minMemType1 = 0
post.maxMemType1 = 0
def setupTable_CFF(self):
"""
Make the CFF table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
self.otf["CFF "] = cff = newTable("CFF ")
cff = cff.cff
# set up the basics
cff.major = 1
cff.minor = 0
cff.hdrSize = 4
cff.offSize = 4
cff.fontNames = []
strings = IndexedStrings()
cff.strings = strings
private = PrivateDict(strings=strings)
private.rawDict.update(private.defaults)
globalSubrs = GlobalSubrsIndex(private=private)
topDict = TopDict(GlobalSubrs=globalSubrs, strings=strings)
topDict.Private = private
charStrings = topDict.CharStrings = CharStrings(file=None, charset=None,
globalSubrs=globalSubrs, private=private, fdSelect=None, fdArray=None)
charStrings.charStringsAreIndexed = True
topDict.charset = []
charStringsIndex = charStrings.charStringsIndex = SubrsIndex(private=private, globalSubrs=globalSubrs)
cff.topDictIndex = topDictIndex = TopDictIndex()
topDictIndex.append(topDict)
topDictIndex.strings = strings
cff.GlobalSubrs = globalSubrs
# populate naming data
info = self.ufo.info
psName = getAttrWithFallback(info, "postscriptFontName")
cff.fontNames.append(psName)
topDict = cff.topDictIndex[0]
topDict.version = "%d.%d" % (getAttrWithFallback(info, "versionMajor"), getAttrWithFallback(info, "versionMinor"))
trademark = getAttrWithFallback(info, "trademark")
if trademark:
trademark = normalizeStringForPostscript(trademark.replace(u"\u00A9", "Copyright"))
if trademark != self.ufo.info.trademark:
self.log.append("[Warning] The trademark was normalized for storage in the CFF table and consequently some characters were dropped: '%s'" % trademark)
if trademark is None:
trademark = ""
topDict.Notice = trademark
copyright = getAttrWithFallback(info, "copyright")
if copyright:
copyright = normalizeStringForPostscript(copyright.replace(u"\u00A9", "Copyright"))
if copyright != self.ufo.info.copyright:
self.log.append("[Warning] The copyright was normalized for storage in the CFF table and consequently some characters were dropped: '%s'" % copyright)
if copyright is None:
copyright = ""
topDict.Copyright = copyright
topDict.FullName = getAttrWithFallback(info, "postscriptFullName")
topDict.FamilyName = getAttrWithFallback(info, "openTypeNamePreferredFamilyName")
topDict.Weight = getAttrWithFallback(info, "postscriptWeightName")
topDict.FontName = getAttrWithFallback(info, "postscriptFontName")
# populate various numbers
topDict.isFixedPitch = getAttrWithFallback(info, "postscriptIsFixedPitch")
topDict.ItalicAngle = getAttrWithFallback(info, "italicAngle")
underlinePosition = getAttrWithFallback(info, "postscriptUnderlinePosition")
if underlinePosition is None:
underlinePosition = 0
topDict.UnderlinePosition = _roundInt(underlinePosition)
underlineThickness = getAttrWithFallback(info, "postscriptUnderlineThickness")
if underlineThickness is None:
underlineThickness = 0
topDict.UnderlineThickness = _roundInt(underlineThickness)
# populate font matrix
unitsPerEm = _roundInt(getAttrWithFallback(info, "unitsPerEm"))
topDict.FontMatrix = [1.0 / unitsPerEm, 0, 0, 1.0 / unitsPerEm, 0, 0]
# populate the width values
defaultWidthX = _roundInt(getAttrWithFallback(info, "postscriptDefaultWidthX"))
if defaultWidthX:
private.rawDict["defaultWidthX"] = defaultWidthX
nominalWidthX = _roundInt(getAttrWithFallback(info, "postscriptNominalWidthX"))
if nominalWidthX:
private.rawDict["nominalWidthX"] = nominalWidthX
# populate hint data
blueFuzz = _roundInt(getAttrWithFallback(info, "postscriptBlueFuzz"))
blueShift = _roundInt(getAttrWithFallback(info, "postscriptBlueShift"))
blueScale = getAttrWithFallback(info, "postscriptBlueScale")
forceBold = getAttrWithFallback(info, "postscriptForceBold")
blueValues = getAttrWithFallback(info, "postscriptBlueValues")
if isinstance(blueValues, list):
blueValues = [_roundInt(i) for i in blueValues]
otherBlues = getAttrWithFallback(info, "postscriptOtherBlues")
if isinstance(otherBlues, list):
otherBlues = [_roundInt(i) for i in otherBlues]
familyBlues = getAttrWithFallback(info, "postscriptFamilyBlues")
if isinstance(familyBlues, list):
familyBlues = [_roundInt(i) for i in familyBlues]
familyOtherBlues = getAttrWithFallback(info, "postscriptFamilyOtherBlues")
if isinstance(familyOtherBlues, list):
familyOtherBlues = [_roundInt(i) for i in familyOtherBlues]
stemSnapH = getAttrWithFallback(info, "postscriptStemSnapH")
if isinstance(stemSnapH, list):
stemSnapH = [_roundInt(i) for i in stemSnapH]
stemSnapV = getAttrWithFallback(info, "postscriptStemSnapV")
if isinstance(stemSnapV, list):
stemSnapV = [_roundInt(i) for i in stemSnapV]
# only write the blues data if some blues are defined.
if (blueValues or otherBlues):
private.rawDict["BlueFuzz"] = blueFuzz
private.rawDict["BlueShift"] = blueShift
private.rawDict["BlueScale"] = blueScale
private.rawDict["ForceBold"] = forceBold
private.rawDict["BlueValues"] = blueValues
private.rawDict["OtherBlues"] = otherBlues
private.rawDict["FamilyBlues"] = familyBlues
private.rawDict["FamilyOtherBlues"] = familyOtherBlues
# only write the stems if both are defined.
if (stemSnapH and stemSnapV):
private.rawDict["StemSnapH"] = stemSnapH
private.rawDict["StdHW"] = stemSnapH[0]
private.rawDict["StemSnapV"] = stemSnapV
private.rawDict["StdVW"] = stemSnapV[0]
# populate glyphs
for glyphName in self.glyphOrder:
glyph = self.allGlyphs[glyphName]
charString = self.getCharStringForGlyph(glyph, private, globalSubrs)
# add to the font
exists = glyphName in charStrings
if exists:
# XXX a glyph already has this name. should we choke?
glyphID = charStrings.charStrings[glyphName]
charStringsIndex.items[glyphID] = charString
else:
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
topDict.FontBBox = self.fontBoundingBox
# write the glyph order
self.otf.setGlyphOrder(self.glyphOrder)
def setupOtherTables(self):
"""
Make the other tables. The default implementation does nothing.
**This should not be called externally.** Subclasses
may override this method to add other tables to the
font if desired.
"""
pass
class StubGlyph(object):
"""
This object will be used to create missing glyphs
(specifically the space and the .notdef) in the
provided UFO.
"""
def __init__(self, name, width, unitsPerEm, ascender, descender, unicodes=[]):
self.name = name
self.width = width
self.unitsPerEm = unitsPerEm
self.ascender = ascender
self.descender = descender
self.unicodes = unicodes
self.components = []
if unicodes:
self.unicode = unicodes[0]
else:
self.unicode = None
if name == ".notdef":
self.draw = self._drawDefaultNotdef
def __len__(self):
if self.name == ".notdef":
return 1
return 0
def _get_leftMargin(self):
if self.bounds is None:
return 0
return self.bounds[0]
leftMargin = property(_get_leftMargin)
def _get_rightMargin(self):
bounds = self.bounds
if bounds is None:
return 0
xMin, yMin, xMax, yMax = bounds
return self.width - bounds[2]
rightMargin = property(_get_rightMargin)
def draw(self, pen):
pass
def _drawDefaultNotdef(self, pen):
width = int(round(self.unitsPerEm * 0.5))
stroke = int(round(self.unitsPerEm * 0.05))
ascender = self.ascender
descender = self.descender
xMin = stroke
xMax = width - stroke
yMax = ascender
yMin = descender
pen.moveTo((xMin, yMin))
pen.lineTo((xMax, yMin))
pen.lineTo((xMax, yMax))
pen.lineTo((xMin, yMax))
pen.lineTo((xMin, yMin))
pen.closePath()
xMin += stroke
xMax -= stroke
yMax -= stroke
yMin += stroke
pen.moveTo((xMin, yMin))
pen.lineTo((xMin, yMax))
pen.lineTo((xMax, yMax))
pen.lineTo((xMax, yMin))
pen.lineTo((xMin, yMin))
pen.closePath()
def _get_bounds(self):
from fontTools.pens.boundsPen import BoundsPen
pen = BoundsPen(None)
self.draw(pen)
return pen.bounds
bounds = property(_get_bounds)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Wrapper for pywsman.Client
"""
import time
from xml.etree import ElementTree
from oslo_utils import importutils
from ironic.common import exception
from ironic.common.i18n import _LW
from ironic.drivers.modules.drac import common as drac_common
from ironic.openstack.common import log as logging
pywsman = importutils.try_import('pywsman')
LOG = logging.getLogger(__name__)
_SOAP_ENVELOPE_URI = 'http://www.w3.org/2003/05/soap-envelope'
# Filter Dialects, see (Section 2.3.1):
# http://en.community.dell.com/techcenter/extras/m/white_papers/20439105.aspx
_FILTER_DIALECT_MAP = {'cql': 'http://schemas.dmtf.org/wbem/cql/1/dsp0202.pdf',
'wql': 'http://schemas.microsoft.com/wbem/wsman/1/WQL'}
# ReturnValue constants
RET_SUCCESS = '0'
RET_ERROR = '2'
RET_CREATED = '4096'
RETRY_COUNT = 5
RETRY_DELAY = 5
def get_wsman_client(node):
"""Return a DRAC client object.
Given an ironic node object, this method gives back a
Client object which is a wrapper for pywsman.Client.
:param node: an ironic node object.
:returns: a Client object.
:raises: InvalidParameterValue if some mandatory information
is missing on the node or on invalid inputs.
"""
driver_info = drac_common.parse_driver_info(node)
client = Client(**driver_info)
return client
def retry_on_empty_response(client, action, *args, **kwargs):
"""Wrapper to retry an action on failure."""
func = getattr(client, action)
for i in range(RETRY_COUNT):
response = func(*args, **kwargs)
if response:
return response
else:
LOG.warning(_LW('Empty response on calling %(action)s on client. '
'Last error (cURL error code): %(last_error)s, '
'fault string: "%(fault_string)s" '
'response_code: %(response_code)s. '
'Retry attempt %(count)d') %
{'action': action,
'last_error': client.last_error(),
'fault_string': client.fault_string(),
'response_code': client.response_code(),
'count': i + 1})
time.sleep(RETRY_DELAY)
class Client(object):
def __init__(self, drac_host, drac_port, drac_path, drac_protocol,
drac_username, drac_password):
pywsman_client = pywsman.Client(drac_host, drac_port, drac_path,
drac_protocol, drac_username,
drac_password)
# TODO(ifarkas): Add support for CACerts
pywsman.wsman_transport_set_verify_peer(pywsman_client, False)
pywsman.wsman_transport_set_verify_host(pywsman_client, False)
self.client = pywsman_client
def wsman_enumerate(self, resource_uri, filter_query=None,
filter_dialect='cql'):
"""Enumerates a remote WS-Man class.
:param resource_uri: URI of the resource.
:param filter_query: the query string.
:param filter_dialect: the filter dialect. Valid options are:
'cql' and 'wql'. Defaults to 'cql'.
:raises: DracClientError on an error from pywsman library.
:raises: DracInvalidFilterDialect if an invalid filter dialect
was specified.
:returns: an ElementTree object of the response received.
"""
options = pywsman.ClientOptions()
filter_ = None
if filter_query is not None:
try:
filter_dialect = _FILTER_DIALECT_MAP[filter_dialect]
except KeyError:
valid_opts = ', '.join(_FILTER_DIALECT_MAP)
raise exception.DracInvalidFilterDialect(
invalid_filter=filter_dialect, supported=valid_opts)
filter_ = pywsman.Filter()
filter_.simple(filter_dialect, filter_query)
options.set_flags(pywsman.FLAG_ENUMERATION_OPTIMIZATION)
options.set_max_elements(100)
doc = retry_on_empty_response(self.client, 'enumerate',
options, filter_, resource_uri)
root = self._get_root(doc)
LOG.debug("WSMAN enumerate returned raw XML: %s",
ElementTree.tostring(root))
final_xml = root
find_query = './/{%s}Body' % _SOAP_ENVELOPE_URI
insertion_point = final_xml.find(find_query)
while doc.context() is not None:
doc = retry_on_empty_response(self.client, 'pull', options, None,
resource_uri, str(doc.context()))
root = self._get_root(doc)
for result in root.findall(find_query):
for child in list(result):
insertion_point.append(child)
return final_xml
def wsman_invoke(self, resource_uri, method, selectors=None,
properties=None, expected_return=None):
"""Invokes a remote WS-Man method.
:param resource_uri: URI of the resource.
:param method: name of the method to invoke.
:param selectors: dictionary of selectors.
:param properties: dictionary of properties.
:param expected_return_value: expected return value.
:raises: DracClientError on an error from pywsman library.
:raises: DracOperationFailed on error reported back by DRAC.
:raises: DracUnexpectedReturnValue on return value mismatch.
:returns: an ElementTree object of the response received.
"""
if selectors is None:
selectors = {}
if properties is None:
properties = {}
options = pywsman.ClientOptions()
for name, value in selectors.items():
options.add_selector(name, value)
# NOTE(ifarkas): manually constructing the XML doc should be deleted
# once pywsman supports passing a list as a property.
# For now this is only a fallback method: in case no
# list provided, the supported pywsman API will be used.
list_included = any([isinstance(prop_item, list) for prop_item
in properties.values()])
if list_included:
xml_doc = pywsman.XmlDoc('%s_INPUT' % method, resource_uri)
xml_root = xml_doc.root()
for name, value in properties.items():
if isinstance(value, list):
for item in value:
xml_root.add(resource_uri, str(name), str(item))
else:
xml_root.add(resource_uri, name, value)
LOG.debug(('WSMAN invoking: %(resource_uri)s:%(method)s'
'\nselectors: %(selectors)r\nxml: %(xml)s'),
{
'resource_uri': resource_uri,
'method': method,
'selectors': selectors,
'xml': xml_root.string()
})
else:
xml_doc = None
for name, value in properties.items():
options.add_property(name, value)
LOG.debug(('WSMAN invoking: %(resource_uri)s:%(method)s'
'\nselectors: %(selectors)r\properties: %(props)r') % {
'resource_uri': resource_uri,
'method': method,
'selectors': selectors,
'props': properties})
doc = retry_on_empty_response(self.client, 'invoke', options,
resource_uri, method, xml_doc)
root = self._get_root(doc)
LOG.debug("WSMAN invoke returned raw XML: %s",
ElementTree.tostring(root))
return_value = drac_common.find_xml(root, 'ReturnValue',
resource_uri).text
if return_value == RET_ERROR:
messages = drac_common.find_xml(root, 'Message',
resource_uri, True)
message_args = drac_common.find_xml(root, 'MessageArguments',
resource_uri, True)
if message_args:
messages = [m.text % p.text for (m, p) in
zip(messages, message_args)]
else:
messages = [m.text for m in messages]
raise exception.DracOperationFailed(message='%r' % messages)
if expected_return and return_value != expected_return:
raise exception.DracUnexpectedReturnValue(
expected_return_value=expected_return,
actual_return_value=return_value)
return root
def _get_root(self, doc):
if doc is None or doc.root() is None:
raise exception.DracClientError(
last_error=self.client.last_error(),
fault_string=self.client.fault_string(),
response_code=self.client.response_code())
root = doc.root()
return ElementTree.fromstring(root.string())
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import httmock
import inspect
import io
import json
import mock
import moto
import os
import six
import sys
import time
import zipfile
from .. import base, mock_s3
from girder.constants import AssetstoreType, ROOT_DIR
from girder.utility import assetstore_utilities
from girder.utility.progress import ProgressContext
from girder.utility.s3_assetstore_adapter import makeBotoConnectParams
def setUpModule():
# We want to test the paths to the actual amazon S3 server, so we use
# direct mocking rather than a local S3 server.
base.startServer(mockS3=False)
def tearDownModule():
base.stopServer()
class AssetstoreTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
info = {
'email': 'admin@email.com',
'login': 'admin',
'firstName': 'Admin',
'lastName': 'Admin',
'password': 'adminpassword',
'admin': True
}
self.admin = self.model('user').createUser(**info)
def testCreateAndSetCurrent(self):
# Non admin users should not be able to see assetstore list
resp = self.request(path='/assetstore', method='GET')
self.assertStatus(resp, 401)
resp = self.request(path='/assetstore', method='GET', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(1, len(resp.json))
oldAssetstore = resp.json[0]
self.assertTrue(oldAssetstore['current'])
self.assertEqual(oldAssetstore['name'], 'Test')
self.assertEqual(oldAssetstore['type'], AssetstoreType.FILESYSTEM)
params = {
'name': 'Test',
'type': -1
}
resp = self.request(path='/assetstore', method='POST', user=self.admin,
params=params)
self.assertStatus(resp, 400)
self.assertEqual(resp.json, {
'type': 'rest',
'message': 'Invalid type parameter'
})
params = {
'name': 'Test',
'type': AssetstoreType.FILESYSTEM
}
resp = self.request(path='/assetstore', method='POST', user=self.admin,
params=params)
self.assertMissingParameter(resp, 'root')
params['root'] = os.path.join(oldAssetstore['root'], 'other')
resp = self.request(path='/assetstore', method='POST', user=self.admin,
params=params)
self.assertStatus(resp, 400)
self.assertEqual(resp.json['field'], 'name')
params['name'] = 'New Name'
# Actually creates the new assetstore
resp = self.request(path='/assetstore', method='POST', user=self.admin,
params=params)
self.assertStatusOk(resp)
assetstore = resp.json
self.assertEqual(assetstore['name'], 'New Name')
self.assertFalse(assetstore['current'])
# Set the new assetstore as current
params = {
'name': assetstore['name'],
'root': assetstore['root'],
'current': True
}
resp = self.request(path='/assetstore/%s' % assetstore['_id'],
method='PUT', user=self.admin, params=params)
self.assertStatusOk(resp)
assetstore = self.model('assetstore').load(resp.json['_id'])
self.assertTrue(assetstore['current'])
# The old assetstore should no longer be current
oldAssetstore = self.model('assetstore').load(oldAssetstore['_id'])
self.assertFalse(oldAssetstore['current'])
# List the assetstores
assetstoresBefore = list(self.model('assetstore').list())
# Now break the root of the new assetstore and make sure we can still
# list it
oldroot = assetstore['root']
assetstore['root'] = '///invalidpath'
self.model('assetstore').save(assetstore, validate=False)
assetstoresAfter = list(self.model('assetstore').list())
self.assertEqual(len(assetstoresBefore), len(assetstoresAfter))
self.assertIsNone([
store for store in assetstoresAfter
if store['_id'] == assetstore['_id']][0]['capacity']['free'])
# restore the original root
assetstore['root'] = oldroot
self.model('assetstore').save(assetstore, validate=False)
def testFilesystemAssetstoreImport(self):
folder = six.next(self.model('folder').childFolders(
self.admin, parentType='user', force=True, filters={
'name': 'Public'
}))
params = {
'importPath': '/nonexistent/dir',
'destinationType': 'folder',
'destinationId': folder['_id']
}
path = '/assetstore/%s/import' % str(self.assetstore['_id'])
resp = self.request(path, method='POST', params=params)
self.assertStatus(resp, 401)
resp = self.request(path, method='POST', params=params, user=self.admin)
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'],
'No such directory: /nonexistent/dir.')
params['importPath'] = os.path.join(
ROOT_DIR, 'tests', 'cases', 'py_client')
resp = self.request(path, method='POST', params=params, user=self.admin)
self.assertStatusOk(resp)
resp = self.request('/resource/lookup', user=self.admin, params={
'path': '/user/admin/Public/testdata/hello.txt/hello.txt'
})
self.assertStatusOk(resp)
self.assertEqual(resp.json['_modelType'], 'file')
file = self.model('file').load(resp.json['_id'], force=True, exc=True)
self.assertTrue(os.path.isfile(file['path']))
# Make sure downloading the file works
resp = self.request('/file/%s/download' % str(file['_id']),
isJson=False)
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), 'hello\n')
# Deleting the file should not actually remove the file on disk
resp = self.request('/file/' + str(file['_id']), method='DELETE',
user=self.admin)
self.assertStatusOk(resp)
self.assertIsNone(self.model('file').load(file['_id'], force=True))
self.assertTrue(os.path.isfile(file['path']))
def testFilesystemAssetstoreFindInvalidFiles(self):
# Create several files in the assetstore, some of which point to real
# files on disk and some that don't
folder = six.next(self.model('folder').childFolders(
parent=self.admin, parentType='user', force=True, limit=1))
item = self.model('item').createItem('test', self.admin, folder)
path = os.path.join(
ROOT_DIR, 'tests', 'cases', 'py_client', 'testdata', 'hello.txt')
real = self.model('file').createFile(
name='hello.txt', creator=self.admin, item=item,
assetstore=self.assetstore, size=os.path.getsize(path))
real['imported'] = True
real['path'] = path
self.model('file').save(real)
fake = self.model('file').createFile(
name='fake', creator=self.admin, item=item, size=1,
assetstore=self.assetstore)
fake['path'] = 'nonexistent/path/to/file'
fake['sha512'] = '...'
self.model('file').save(fake)
fakeImport = self.model('file').createFile(
name='fakeImport', creator=self.admin, item=item, size=1,
assetstore=self.assetstore)
fakeImport['imported'] = True
fakeImport['path'] = '/nonexistent/path/to/file'
self.model('file').save(fakeImport)
adapter = assetstore_utilities.getAssetstoreAdapter(self.assetstore)
self.assertTrue(inspect.isgeneratorfunction(adapter.findInvalidFiles))
with ProgressContext(True, user=self.admin, title='test') as p:
for i, info in enumerate(
adapter.findInvalidFiles(progress=p, filters={
'imported': True
}), 1):
self.assertEqual(info['reason'], 'missing')
self.assertEqual(info['file']['_id'], fakeImport['_id'])
self.assertEqual(i, 1)
self.assertEqual(p.progress['data']['current'], 2)
self.assertEqual(p.progress['data']['total'], 2)
for i, info in enumerate(
adapter.findInvalidFiles(progress=p), 1):
self.assertEqual(info['reason'], 'missing')
self.assertIn(info['file']['_id'], (
fakeImport['_id'], fake['_id']))
self.assertEqual(i, 2)
self.assertEqual(p.progress['data']['current'], 3)
self.assertEqual(p.progress['data']['total'], 3)
def testDeleteAssetstore(self):
resp = self.request(path='/assetstore', method='GET', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(1, len(resp.json))
assetstore = self.model('assetstore').load(resp.json[0]['_id'])
# Create a second assetstore so that when we delete the first one, the
# current assetstore will be switched to the second one.
secondStore = self.model('assetstore').createFilesystemAssetstore(
'Another Store', os.path.join(ROOT_DIR, 'tests', 'assetstore',
'server_assetstore_test2'))
# make sure our original asset store is the current one
current = self.model('assetstore').getCurrent()
self.assertEqual(current['_id'], assetstore['_id'])
# Anonymous user should not be able to delete assetstores
resp = self.request(path='/assetstore/%s' % assetstore['_id'],
method='DELETE')
self.assertStatus(resp, 401)
# Simulate the existence of a file within the assetstore
folders = self.model('folder').childFolders(
self.admin, 'user', user=self.admin)
item = self.model('item').createItem(
name='x.txt', creator=self.admin, folder=six.next(folders))
file = self.model('file').createFile(
creator=self.admin, item=item, name='x.txt',
size=1, assetstore=assetstore, mimeType='text/plain')
file['sha512'] = 'x' # add this dummy value to simulate real file
resp = self.request(path='/assetstore/%s' % assetstore['_id'],
method='DELETE', user=self.admin)
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'You may not delete an '
'assetstore that contains files.')
# Delete the offending file, we can now delete the assetstore
self.model('file').remove(file)
resp = self.request(path='/assetstore/%s' % assetstore['_id'],
method='DELETE', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['message'],
'Deleted assetstore %s.' % assetstore['name'])
resp = self.request(path='/assetstore', method='GET', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(1, len(resp.json))
# Get the current assetstore. It should now be the second store we
# created
current = self.model('assetstore').getCurrent()
self.assertEqual(current['_id'], secondStore['_id'])
def testGridFSAssetstoreAdapter(self):
resp = self.request(path='/assetstore', method='GET', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(1, len(resp.json))
oldAssetstore = resp.json[0]
self.assertTrue(oldAssetstore['current'])
self.assertEqual(oldAssetstore['name'], 'Test')
# Clear any old DB data
base.dropGridFSDatabase('girder_test_assetstore_create_assetstore')
params = {
'name': 'New Name',
'type': AssetstoreType.GRIDFS
}
resp = self.request(path='/assetstore', method='POST', user=self.admin,
params=params)
self.assertMissingParameter(resp, 'db')
params['db'] = 'girder_test_assetstore_create_assetstore'
resp = self.request(path='/assetstore', method='POST', user=self.admin,
params=params)
self.assertStatusOk(resp)
assetstore = resp.json
self.assertEqual(assetstore['name'], 'New Name')
self.assertFalse(assetstore['current'])
# Set the new assetstore as current
params = {
'name': assetstore['name'],
'db': assetstore['db'],
'current': True
}
resp = self.request(path='/assetstore/%s' % assetstore['_id'],
method='PUT', user=self.admin, params=params)
self.assertStatusOk(resp)
assetstore = self.model('assetstore').load(resp.json['_id'])
self.assertTrue(assetstore['current'])
# The old assetstore should no longer be current
oldAssetstore = self.model('assetstore').load(oldAssetstore['_id'])
self.assertFalse(oldAssetstore['current'])
# Test that we can create an assetstore with an alternate mongo host
# and a replica set (but don't bother using an actual replica set).
# Since we are faking the replicaset, we have to bypass validation so
# we don't get exceptions from trying to connect to nonexistent hosts.
# We also hack to make it the current assetstore without using validate.
self.model('assetstore').update({'current': True},
{'$set': {'current': False}})
params = {
'name': 'Replica Set Name',
'type': AssetstoreType.GRIDFS,
'db': 'girder_test_assetstore_create_rs_assetstore',
'mongohost': 'mongodb://127.0.0.1:27080,127.0.0.1:27081,'
'127.0.0.1:27082',
'replicaset': 'replicaset',
'current': True
}
self.model('assetstore').save(params, validate=False)
# Neither of the old assetstores should be current
oldAssetstore = self.model('assetstore').load(oldAssetstore['_id'])
self.assertFalse(oldAssetstore['current'])
assetstore = self.model('assetstore').load(assetstore['_id'])
self.assertFalse(assetstore['current'])
# Getting the assetstores should succeed, even though we can't connect
# to the replica set.
resp = self.request(path='/assetstore', method='GET', user=self.admin)
self.assertStatusOk(resp)
@moto.mock_s3bucket_path
def testS3AssetstoreAdapter(self):
# Delete the default assetstore
self.model('assetstore').remove(self.assetstore)
s3Regex = r'^https://s3.amazonaws.com(:443)?/bucketname/foo/bar'
params = {
'name': 'S3 Assetstore',
'type': AssetstoreType.S3,
'bucket': '',
'accessKeyId': 'someKey',
'secret': 'someSecret',
'prefix': '/foo/bar/'
}
# Validation should fail with empty bucket name
resp = self.request(path='/assetstore', method='POST', user=self.admin,
params=params)
self.assertStatus(resp, 400)
self.assertEqual(resp.json, {
'type': 'validation',
'field': 'bucket',
'message': 'Bucket must not be empty.'
})
params['bucket'] = 'bucketname'
# Validation should fail with a missing bucket
resp = self.request(path='/assetstore', method='POST', user=self.admin,
params=params)
self.assertStatus(resp, 400)
self.assertEqual(resp.json, {
'type': 'validation',
'field': 'bucket',
'message': 'Unable to write into bucket "bucketname".'
})
# Validation should fail with a bogus service name
params['service'] = 'ftp://nowhere'
resp = self.request(path='/assetstore', method='POST', user=self.admin,
params=params)
self.assertStatus(resp, 400)
del params['service']
# Create a bucket (mocked using moto), so that we can create an
# assetstore in it
botoParams = makeBotoConnectParams(params['accessKeyId'],
params['secret'])
bucket = mock_s3.createBucket(botoParams, 'bucketname')
# Create an assetstore
resp = self.request(path='/assetstore', method='POST', user=self.admin,
params=params)
self.assertStatusOk(resp)
assetstore = self.model('assetstore').load(resp.json['_id'])
# Set the assetstore to current. This is really to test the edit
# assetstore code.
params['current'] = True
resp = self.request(path='/assetstore/%s' % assetstore['_id'],
method='PUT', user=self.admin, params=params)
self.assertStatusOk(resp)
# Test init for a single-chunk upload
folders = self.model('folder').childFolders(self.admin, 'user')
parentFolder = six.next(folders)
params = {
'parentType': 'folder',
'parentId': parentFolder['_id'],
'name': 'My File.txt',
'size': 1024,
'mimeType': 'text/plain'
}
resp = self.request(path='/file', method='POST', user=self.admin,
params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['received'], 0)
self.assertEqual(resp.json['size'], 1024)
self.assertEqual(resp.json['behavior'], 's3')
singleChunkUpload = resp.json
s3Info = singleChunkUpload['s3']
self.assertEqual(s3Info['chunked'], False)
self.assertIsInstance(s3Info['chunkLength'], int)
self.assertEqual(s3Info['request']['method'], 'PUT')
six.assertRegex(self, s3Info['request']['url'], s3Regex)
self.assertEqual(s3Info['request']['headers']['x-amz-acl'], 'private')
# Test resume of a single-chunk upload
resp = self.request(path='/file/offset', method='GET', user=self.admin,
params={'uploadId': resp.json['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json['method'], 'PUT')
self.assertTrue('headers' in resp.json)
six.assertRegex(self, resp.json['url'], s3Regex)
# Test finalize for a single-chunk upload
resp = self.request(path='/file/completion', method='POST',
user=self.admin,
params={'uploadId': singleChunkUpload['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json['size'], 1024)
self.assertEqual(resp.json['assetstoreId'], str(assetstore['_id']))
self.assertFalse('s3Key' in resp.json)
self.assertFalse('relpath' in resp.json)
file = self.model('file').load(resp.json['_id'], force=True)
self.assertTrue('s3Key' in file)
self.assertRegexpMatches(file['relpath'], '^/bucketname/foo/bar/')
# Test init for a multi-chunk upload
params['size'] = 1024 * 1024 * 1024 * 5
resp = self.request(path='/file', method='POST', user=self.admin,
params=params)
self.assertStatusOk(resp)
multiChunkUpload = resp.json
s3Info = multiChunkUpload['s3']
self.assertEqual(s3Info['chunked'], True)
self.assertIsInstance(s3Info['chunkLength'], int)
self.assertEqual(s3Info['request']['method'], 'POST')
six.assertRegex(self, s3Info['request']['url'], s3Regex)
# Test uploading a chunk
resp = self.request(path='/file/chunk', method='POST',
user=self.admin, params={
'uploadId': multiChunkUpload['_id'],
'offset': 0,
'chunk': json.dumps({
'partNumber': 1,
's3UploadId': 'abcd'
})
})
self.assertStatusOk(resp)
six.assertRegex(self, resp.json['s3']['request']['url'], s3Regex)
self.assertEqual(resp.json['s3']['request']['method'], 'PUT')
# We should not be able to call file/offset with multi-chunk upload
resp = self.request(path='/file/offset', method='GET', user=self.admin,
params={'uploadId': multiChunkUpload['_id']})
self.assertStatus(resp, 400)
self.assertEqual(resp.json, {
'type': 'validation',
'message': 'You should not call requestOffset on a chunked '
'direct-to-S3 upload.'
})
# Test finalize for a multi-chunk upload
resp = self.request(path='/file/completion', method='POST',
user=self.admin,
params={'uploadId': multiChunkUpload['_id']})
largeFile = resp.json
self.assertStatusOk(resp)
six.assertRegex(self, resp.json['s3FinalizeRequest']['url'], s3Regex)
self.assertEqual(resp.json['s3FinalizeRequest']['method'], 'POST')
# Test init for an empty file (should be no-op)
params['size'] = 0
resp = self.request(path='/file', method='POST', user=self.admin,
params=params)
emptyFile = resp.json
self.assertStatusOk(resp)
self.assertFalse('behavior' in resp.json)
self.assertFalse('s3' in resp.json)
# Test download for an empty file
resp = self.request(path='/file/%s/download' % emptyFile['_id'],
user=self.admin, method='GET', isJson=False)
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), '')
self.assertEqual(resp.headers['Content-Length'], 0)
self.assertEqual(resp.headers['Content-Disposition'],
'attachment; filename="My File.txt"')
# Test download of a non-empty file
resp = self.request(path='/file/%s/download' % largeFile['_id'],
user=self.admin, method='GET', isJson=False)
self.assertStatus(resp, 303)
six.assertRegex(self, resp.headers['Location'], s3Regex)
# Test download of a non-empty file, with Content-Disposition=inline.
# Expect the special S3 header response-content-disposition.
params = {'contentDisposition': 'inline'}
inlineRegex = r'response-content-disposition=' + \
'inline%3B\+filename%3D%22My\+File.txt%22'
resp = self.request(path='/file/%s/download' % largeFile['_id'],
user=self.admin, method='GET', isJson=False,
params=params)
self.assertStatus(resp, 303)
six.assertRegex(self, resp.headers['Location'], s3Regex)
six.assertRegex(self, resp.headers['Location'], inlineRegex)
# Test download as part of a streaming zip
@httmock.all_requests
def s3_pipe_mock(url, request):
if(url.netloc.startswith('s3.amazonaws.com') and
url.scheme == 'https'):
return 'dummy file contents'
else:
raise Exception('Unexpected url %s' % url)
with httmock.HTTMock(s3_pipe_mock):
resp = self.request(
'/folder/%s/download' % parentFolder['_id'],
method='GET', user=self.admin, isJson=False)
self.assertStatusOk(resp)
zip = zipfile.ZipFile(io.BytesIO(self.getBody(resp, text=False)),
'r')
self.assertTrue(zip.testzip() is None)
extracted = zip.read('Public/My File.txt')
self.assertEqual(extracted, b'dummy file contents')
# Attempt to import item directly into user; should fail
resp = self.request(
'/assetstore/%s/import' % assetstore['_id'], method='POST', params={
'importPath': '/foo/bar',
'destinationType': 'user',
'destinationId': self.admin['_id']
}, user=self.admin)
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'],
'Keys cannot be imported directly underneath a user.')
# Import existing data from S3
resp = self.request('/folder', method='POST', params={
'parentType': 'folder',
'parentId': parentFolder['_id'],
'name': 'import destinaton'
}, user=self.admin)
self.assertStatusOk(resp)
importFolder = resp.json
resp = self.request(
'/assetstore/%s/import' % assetstore['_id'], method='POST', params={
'importPath': '',
'destinationType': 'folder',
'destinationId': importFolder['_id'],
}, user=self.admin)
self.assertStatusOk(resp)
# Data should now appear in the tree
resp = self.request('/folder', user=self.admin, params={
'parentId': importFolder['_id'],
'parentType': 'folder'
})
self.assertStatusOk(resp)
children = resp.json
self.assertEqual(len(children), 1)
self.assertEqual(children[0]['name'], 'foo')
resp = self.request('/folder', user=self.admin, params={
'parentId': children[0]['_id'],
'parentType': 'folder'
})
self.assertStatusOk(resp)
children = resp.json
self.assertEqual(len(children), 1)
self.assertEqual(children[0]['name'], 'bar')
resp = self.request('/item', user=self.admin, params={
'folderId': children[0]['_id']
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
item = resp.json[0]
self.assertEqual(item['name'], 'test')
self.assertEqual(item['size'], 0)
resp = self.request('/item/%s/files' % str(item['_id']),
user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertFalse('imported' in resp.json[0])
self.assertFalse('relpath' in resp.json[0])
file = self.model('file').load(resp.json[0]['_id'], force=True)
self.assertTrue(file['imported'])
self.assertFalse('relpath' in file)
self.assertEqual(file['size'], 0)
self.assertEqual(file['assetstoreId'], assetstore['_id'])
self.assertTrue(bucket.get_key('/foo/bar/test') is not None)
# Deleting an imported file should not delete it from S3
with mock.patch('girder.events.daemon.trigger') as daemon:
resp = self.request('/item/%s' % str(item['_id']), method='DELETE',
user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(daemon.mock_calls), 0)
# Create the file key in the moto s3 store so that we can test that it
# gets deleted.
file = self.model('file').load(largeFile['_id'], user=self.admin)
bucket.initiate_multipart_upload(file['s3Key'])
key = bucket.new_key(file['s3Key'])
key.set_contents_from_string("test")
# Test delete for a non-empty file
resp = self.request(path='/file/%s' % largeFile['_id'],
user=self.admin, method='DELETE')
self.assertStatusOk(resp)
# The file should be gone now
resp = self.request(path='/file/%s/download' % largeFile['_id'],
user=self.admin, method='GET', isJson=False)
self.assertStatus(resp, 400)
# The actual delete may still be in the event queue, so we want to
# check the S3 bucket directly.
startTime = time.time()
while True:
if bucket.get_key(file['s3Key']) is None:
break
if time.time()-startTime > 15:
break # give up and fail
time.sleep(0.1)
self.assertIsNone(bucket.get_key(file['s3Key']))
resp = self.request(path='/folder/%s' % parentFolder['_id'],
method='DELETE', user=self.admin)
self.assertStatusOk(resp)
# Set the assetstore to read only, attempt to delete it
assetstore['readOnly'] = True
self.model('assetstore').save(assetstore)
def fn(*args, **kwargs):
raise Exception('get_all_multipart_uploads should not be called')
# Must mock globally (too tricky to get a direct mock.patch)
old = sys.modules['boto.s3.bucket'].Bucket.get_all_multipart_uploads
sys.modules['boto.s3.bucket'].Bucket.get_all_multipart_uploads = fn
try:
resp = self.request(path='/assetstore/%s' % assetstore['_id'],
method='DELETE', user=self.admin)
self.assertStatusOk(resp)
finally:
sys.modules['boto.s3.bucket'].Bucket.get_all_multipart_uploads = old
|
|
#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import subprocess
import sys
import openstackdocstheme
import yaml
# -- OpenStack-Ansible configuration --------------------------------------
# Variables to override
target_name = 'openstack-ansible'
description = 'OpenStack-Ansible deploys OpenStack environments using Ansible.'
previous_series_name = 'victoria'
current_series_name = 'wallaby'
# General information about the project.
author = 'OpenStack-Ansible Contributors'
category = 'Miscellaneous'
copyright = '2014-2018, OpenStack-Ansible Contributors'
title = 'OpenStack-Ansible Documentation'
# Smart variable replacements with what we can. The openstackdocstheme has
# no stable interface though. This works with 1.20.
current_series = openstackdocstheme.ext._get_series_name()
if current_series == "latest":
latest_tag = "master"
branch = "master"
upgrade_warning = ("Upgrading to master is not recommended. "
"Master is under heavy development, and is not stable.")
else:
series_names = current_series.capitalize()
latest_tag = subprocess.check_output(["git", "describe", "--abbrev=0",
"--tag"]).strip().decode()
branch = "stable/{}".format(current_series)
upgrade_warning = "The upgrade is always under active development."
CONF_PATH = os.path.dirname(os.path.realpath(__file__))
GNOCCHI_DETAILS = '../../playbooks/defaults/repo_packages/gnocchi.yml'
with open(os.path.join(CONF_PATH, GNOCCHI_DETAILS), 'r') as fdesc:
gnocchi_file_content = yaml.safe_load(fdesc)
gnocchi_branch = gnocchi_file_content['gnocchi_git_track_branch']
# References variable for substitutions
deploy_guide_prefix = ("https://docs.openstack.org/"
"project-deploy-guide/openstack-ansible/"
"{}/%s".format(current_series))
dev_docs_prefix = ("https://docs.openstack.org/openstack-ansible/"
"{}/%s".format(current_series))
# Substitutions loader
rst_epilog = """
.. |current_release_git_branch_name| replace:: {current_release_git_branch_name}
.. |current_release_gnocchi_git_branch_name| replace:: {current_release_gnocchi_git_branch_name}
.. |previous_series_name| replace:: {previous_series_name}
.. |previous_release_formal_name| replace:: {previous_release_formal_name}
.. |current_release_formal_name| replace:: {current_release_formal_name}
.. |latest_tag| replace:: {latest_tag}
.. |upgrade_warning| replace:: {upgrade_warning}
""".format( # noqa: E501
current_release_git_branch_name=branch,
current_release_gnocchi_git_branch_name=gnocchi_branch,
previous_series_name=previous_series_name,
previous_release_formal_name=previous_series_name.capitalize(),
current_release_formal_name=current_series_name.capitalize(),
latest_tag=latest_tag,
upgrade_warning=upgrade_warning,
)
# Format: Reference name: (string containing %s for substitution, linkname)
extlinks = {'deploy_guide': (deploy_guide_prefix, ''),
'dev_docs': (dev_docs_prefix, '')
}
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../inventory/'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinxcontrib.rsvgconverter'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/' + target_name
openstackdocs_pdf_link = True
# The bug project is always the same for all our repos
openstackdocs_bug_project = 'openstack-ansible'
openstackdocs_bug_tag = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['admin/maintenance-tasks/galera.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = target_name + '-docs'
# If true, publish source files
html_copy_source = False
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'doc-' + target_name + '.tex',
title, author, 'manual'),
]
latex_use_xindy = False
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, target_name,
title, [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, target_name,
title, author, openstackdocs_bug_project,
description, category),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for PDF output --------------------------------------------------
pdf_documents = [
(master_doc, target_name,
title, author)
]
locale_dirs = ['locale/']
|
|
''' convenience functions for ANOVA type analysis with OLS
Note: statistical results of ANOVA are not checked, OLS is
checked but not whether the reported results are the ones used
in ANOVA
includes form2design for creating dummy variables
TODO:
* ...
*
'''
import numpy as np
#from scipy import stats
import scikits.statsmodels as sm
def data2dummy(x, returnall=False):
'''convert array of categories to dummy variables
by default drops dummy variable for last category
uses ravel, 1d only'''
x = x.ravel()
groups = np.unique(x)
if returnall:
return (x[:, None] == groups).astype(int)
else:
return (x[:, None] == groups).astype(int)[:,:-1]
def data2proddummy(x):
'''creates product dummy variables from 2 columns of 2d array
drops last dummy variable, but not from each category
singular with simple dummy variable but not with constant
quickly written, no safeguards
'''
#brute force, assumes x is 2d
#replace with encoding if possible
groups = np.unique(map(tuple, x.tolist()))
#includes singularity with additive factors
return (x==groups[:,None,:]).all(-1).T.astype(int)[:,:-1]
def data2groupcont(x1,x2):
'''create dummy continuous variable
Parameters
----------
x1 : 1d array
label or group array
x2 : 1d array (float)
continuous variable
Notes
-----
useful for group specific slope coefficients in regression
'''
if x2.ndim == 1:
x2 = x2[:,None]
dummy = data2dummy(x1, returnall=True)
return dummy * x2
# Result strings
#the second leaves the constant in, not with NIST regression
#but something fishy with res.ess negative in examples ?
#not checked if these are all the right ones
anova_str0 = '''
ANOVA statistics (model sum of squares excludes constant)
Source DF Sum Squares Mean Square F Value Pr > F
Model %(df_model)i %(ess)f %(mse_model)f %(fvalue)f %(f_pvalue)f
Error %(df_resid)i %(ssr)f %(mse_resid)f
CTotal %(nobs)i %(uncentered_tss)f %(mse_total)f
R squared %(rsquared)f
'''
anova_str = '''
ANOVA statistics (model sum of squares includes constant)
Source DF Sum Squares Mean Square F Value Pr > F
Model %(df_model)i %(ssmwithmean)f %(mse_model)f %(fvalue)f %(f_pvalue)f
Error %(df_resid)i %(ssr)f %(mse_resid)f
CTotal %(nobs)i %(uncentered_tss)f %(mse_total)f
R squared %(rsquared)f
'''
def anovadict(res):
'''update regression results dictionary with ANOVA specific statistics
not checked for completeness
'''
ad = {}
ad.update(res.__dict__)
ad['ssmwithmean'] = res.uncentered_tss - res.ssr
return ad
def form2design(ss, data):
'''convert string formula to data dictionary
ss : string
* I : add constant
* varname : for simple varnames data is used as is
* F:varname : create dummy variables for factor varname
* P:varname1*varname2 : create product dummy variables for
varnames
* G:varname1*varname2 : create product between factor and
continuous variable
data : dict or structured array
data set, access of variables by name as in dictionaries
Returns
-------
vars : dictionary
dictionary of variables with converted dummy variables
names : list
list of names, product (P:) and grouped continuous
variables (G:) have name by joining individual names
sorted according to input
Examples
--------
>>> xx, n = form2design('I a F:b P:c*d G:c*f', testdata)
>>> xx.keys()
['a', 'b', 'const', 'cf', 'cd']
>>> n
['const', 'a', 'b', 'cd', 'cf']
Notes
-----
with sorted dict, separate name list wouldn't be necessary
'''
vars = {}
names = []
for item in ss.split():
if item == 'I':
vars['const'] = np.ones(data.shape[0])
names.append('const')
elif not ':' in item:
vars[item] = data[item]
names.append(item)
elif item[:2] == 'F:':
v = item.split(':')[1]
vars[v] = data2dummy(data[v])
names.append(v)
elif item[:2] == 'P:':
v = item.split(':')[1].split('*')
vars[''.join(v)] = data2proddummy(np.c_[data[v[0]],data[v[1]]])
names.append(''.join(v))
elif item[:2] == 'G:':
v = item.split(':')[1].split('*')
vars[''.join(v)] = data2groupcont(data[v[0]], data[v[1]])
names.append(''.join(v))
else:
raise ValueError, 'unknown expression in formula'
return vars, names
def dropname(ss, li):
'''drop names from a list of strings,
names to drop are in space delimeted list
does not change original list
'''
newli = li[:]
for item in ss.split():
newli.remove(item)
return newli
if __name__ == '__main__':
# Test Example with created data
# ------------------------------
nobs = 1000
testdataint = np.random.randint(3, size=(nobs,4)).view([('a',int),('b',int),('c',int),('d',int)])
testdatacont = np.random.normal( size=(nobs,2)).view([('e',float), ('f',float)])
import numpy.lib.recfunctions
dt2 = numpy.lib.recfunctions.zip_descr((testdataint, testdatacont),flatten=True)
# concatenate structured arrays
testdata = np.empty((nobs,1), dt2)
for name in testdataint.dtype.names:
testdata[name] = testdataint[name]
for name in testdatacont.dtype.names:
testdata[name] = testdatacont[name]
#print form2design('a',testdata)
if 0: # print only when nobs is small, e.g. nobs=10
xx, n = form2design('F:a',testdata)
print xx
print form2design('P:a*b',testdata)
print data2proddummy((np.c_[testdata['a'],testdata['b']]))
xx, names = form2design('a F:b P:c*d',testdata)
#xx, names = form2design('I a F:b F:c F:d P:c*d',testdata)
xx, names = form2design('I a F:b P:c*d', testdata)
xx, names = form2design('I a F:b P:c*d G:a*e f', testdata)
X = np.column_stack([xx[nn] for nn in names])
# simple test version: all coefficients equal to one
y = X.sum(1) + 0.01*np.random.normal(size=(nobs))
rest1 = sm.OLS(y,X).results
print rest1.params
print anova_str % anovadict(rest1)
X = np.column_stack([xx[nn] for nn in dropname('ae f', names)])
# simple test version: all coefficients equal to one
y = X.sum(1) + 0.01*np.random.normal(size=(nobs))
rest1 = sm.OLS(y,X).results
print rest1.params
print anova_str % anovadict(rest1)
# Example: from Bruce
# -------------------
#get data and clean it
#^^^^^^^^^^^^^^^^^^^^^
# requires file 'dftest3.data' posted by Bruce
# read data set and drop rows with missing data
dt_b = np.dtype([('breed', int), ('sex', int), ('litter', int),
('pen', int), ('pig', int), ('age', float),
('bage', float), ('y', float)])
dta = np.genfromtxt('dftest3.data', dt_b,missing='.', usemask=True)
print 'missing', [dta.mask[k].sum() for k in dta.dtype.names]
m = dta.mask.view(bool)
droprows = m.reshape(-1,len(dta.dtype.names)).any(1)
# get complete data as plain structured array
# maybe doesn't work with masked arrays
dta_use_b1 = dta[~droprows,:].data
print dta_use_b1.shape
print dta_use_b1.dtype
#Example b1: variables from Bruce's glm
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# prepare data and dummy variables
xx_b1, names_b1 = form2design('I F:sex age', dta_use_b1)
# create design matrix
X_b1 = np.column_stack([xx_b1[nn] for nn in dropname('', names_b1)])
y_b1 = dta_use_b1['y']
# estimate using OLS
rest_b1 = sm.OLS(y_b1, X_b1).results
# print results
print rest_b1.params
print anova_str % anovadict(rest_b1)
#compare with original version only in original version
#print anova_str % anovadict(res_b0)
# Example: use all variables except pig identifier
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
allexog = ' '.join(dta.dtype.names[:-1])
#'breed sex litter pen pig age bage'
xx_b1a, names_b1a = form2design('I F:breed F:sex F:litter F:pen age bage', dta_use_b1)
X_b1a = np.column_stack([xx_b1a[nn] for nn in dropname('', names_b1a)])
y_b1a = dta_use_b1['y']
rest_b1a = sm.OLS(y_b1a, X_b1a).results
print rest_b1a.params
print anova_str % anovadict(rest_b1a)
for dropn in names_b1a:
print '\nResults dropping', dropn
X_b1a_ = np.column_stack([xx_b1a[nn] for nn in dropname(dropn, names_b1a)])
y_b1a_ = dta_use_b1['y']
rest_b1a_ = sm.OLS(y_b1a_, X_b1a_).results
#print rest_b1a_.params
print anova_str % anovadict(rest_b1a_)
|
|
import logging
import os
import collections
import operator
import copy
from functools import cmp_to_key
from future.utils import viewitems
class NullHandler(logging.Handler):
"""
For backward-compatibility with Python 2.6, a local class definition
is used instead of logging.NullHandler
"""
def emit(self, record):
pass
REQUESTLOGGER = logging.getLogger('log_all_requests_of_testcases_to_file')
REQUESTLOGGER.addHandler(NullHandler())
def failure_message(expected, passed, methodname):
msg = 'The PUT request payload that the method "' + methodname + '" assembled differs from the expected. This does not necessarily mean that it is wrong, it might just be a different way to talking to the Handle Server. Please run an integration test to check this and update the exptected PUT request accordingly.\nCreated: ' + str(passed) + '\nExpected: ' + str(expected)
return msg
def replace_timestamps(jsonobject):
''' Replace timestamp values by "xxx" because their values do not matter.'''
# Replace:
if type(jsonobject) == type({}):
if 'timestamp' in jsonobject:
jsonobject['timestamp'] = 'xxx'
# Recursion:
if type(jsonobject) == type({'b':2}):
for item in jsonobject.items():
replace_timestamps(item)
elif type(jsonobject) == type([2, 2]) or type(jsonobject) == type((2, 2)):
for item in jsonobject:
replace_timestamps(item)
def log_new_case(name):
REQUESTLOGGER.info('\n' + 60 * '*' + '\n*** ' + name + '\n' + 60 * '*' + '\n')
def log_request_response_to_file(op, handle, url, head, veri, resp, payload=None):
space = '\n '
message = ''
message += op + ' ' + handle
message += space + 'URL: ' + url
message += space + 'HEADERS: ' + str(head)
message += space + 'VERIFY: ' + str(veri)
if payload is not None:
message += space + 'PAYLOAD:' + space + str(payload)
message += space + 'RESPONSECODE: ' + str(resp.status_code)
message += space + 'RESPONSE:' + space + str(resp.content)
REQUESTLOGGER.info(message)
def log_start_test_code():
REQUESTLOGGER.info('--->')
def log_end_test_code():
REQUESTLOGGER.info('---<')
def sort_lists(jsonobject):
'''
Deprecated!
'''
# DEPRECATED!!!
#
# TODO: The whole function does not sort anything, as "sorted" returns
# a new list instead of modifying the existing one. So the sorted version
# vanishes in neverland... nowhereland. And the function returns None.
# Which, when compared, is... None. Yay. All tests pass.
msg = 'This sort function returns false positive when comparing sorted test results.'
msg += 'Use flattensort() instead.'
raise ValueError(msg)
# Sort:
if type(jsonobject) == type([]):
sorted(jsonobject, key=lambda x:sorted(x.keys()))
# Python 2.6.6:
# sorted(iterable, cmp=None, key=None, reverse=False) --> new sorted list
# Python 3.7.1:
# Return a new list containing all items from the iterable in ascending order.
# A custom key function can be supplied to customize the sort order, and the
# reverse flag can be set to request the result in descending order.
# Recursion:
if type(jsonobject) == type({'b':2}):
for item in jsonobject.items():
sort_lists(item)
elif type(jsonobject) == type([2, 2]) or type(jsonobject) == type((2, 2)):
# TODO Isn't the first comparison bullshit, as lists are caught above?
for item in jsonobject:
sort_lists(item)
def flattensort(jsonobject):
'''
Take a complex object (JSON object: nested dicts and lists of
any depth) and flatten and sort it.
This is done recursively: The leaves are being sorted flattened
first. Then we go up and sort and flatten each level.
Purpose: Compare JSON objects in unit tests, where the order of
the objects does not matter, and they can be quite deeply nested.
Test using these:
# These are the same, just differently sorted:
x = {"values": [{"index": 100, "type": "HS_ADMIN", "data": {"value": {"permissions": "011111110011", "index": "200", "handle": "0.NA/my"}, "format": "admin"}}, {"index": 1, "type": "URL", "data": "http://foo.bar"}, {"index": 2, "type": "CHECKSUM", "data": "123456"}, {"index": 4, "type": "BAR", "data": "bar"}, {"index": 3, "type": "FOO", "data": "foo"}, {"index": 5, "type": "10320/LOC", "data": "<locations><location href=\"http://bar.bar\" id=\"0\" /><location href=\"http://foo.foo\" id=\"1\" /></locations>"}]}
y = {"values": [{"type": "HS_ADMIN", "index": 100, "data": {"value": {"index": "200", "handle": "0.NA/my", "permissions": "011111110011"}, "format": "admin"}}, {"index": 1, "type": "URL", "data": "http://foo.bar"}, {"index": 2, "type": "CHECKSUM", "data": "123456"}, {"index": 3, "type": "FOO", "data": "foo"}, {"index": 4, "type": "BAR", "data": "bar"}, {"index": 5, "type": "10320/LOC", "data": "<locations><location href=\"http://bar.bar\" id=\"0\" /><location href=\"http://foo.foo\" id=\"1\" /></locations>"}]}
# These are actually different:
x = {'values': [{'index': 100, 'type': 'HS_ADMIN', 'data': {'value': {'index': '200', 'handle': '0.NA/my', 'permissions': '011111110011'}, 'format': 'admin'}}, {'index': 2, 'type': 'FOO', 'data': 'foo'}, {'index': 3, 'type': 'BAR', 'data': 'bar'}, {'index': 1, 'type': 'URL', 'data': 'http://foo.bar'}, {'index': 4, 'type': 'CHECKSUM', 'data': '123456'}]}
y = {'values': [{'index': 100, 'type': 'HS_ADMIN', 'data': {'value': {'index': '200', 'handle': '0.NA/my', 'permissions': '011111110011'}, 'format': 'admin'}}, {'index': 1, 'type': 'URL', 'data': 'http://foo.bar'}, {'index': 2, 'type': 'CHECKSUM', 'data': '123456'}, {'index': 3, 'type': 'FOO', 'data': 'foo'}, {'index': 4, 'type': 'BAR', 'data': 'bar'}, {'index': 5, 'type': '10320/LOC', 'data': '<locations><location href="http://bar.bar" id="0" /><location href="http://foo.foo" id="1" /></locations>'}]}
print('x==x: %s' % (x==x))
print('y==y: %s' % (y==y))
print('x==y: %s' % (x==y))
print('rx==ry: %s' % (flattensort(x)==flattensort(y)))
'''
# Sorting a leave (end of recursion):
# If it is a shallow, simple list, we can use sort().
if type(jsonobject) == type([]):
res = copy.deepcopy(jsonobject)
# Why? The sort() can alter the list before failing, so
# we must operate on a deep-copy.
try:
res.sort()
# We make it a string so that the deep lists become flat lists and
# can be sorted eventually. Otherwise we'd sort all the leaves, but
# still could not sort the higher levels.
res = ','.join(res)
res = '['+res+']'
return res
except TypeError as e:
# Not a shallow list, or items have uncomparable types.
pass
# Shallow list of various types: Recursion
res = []
shallow = True
for item in jsonobject:
if isinstance(item, list) or isinstance(item, tuple) or isinstance(item, dict):
shallow = False
res = None
break
else:
res.append(str(item))
if shallow:
res.sort()
res = ','.join(res)
res = '['+res+']'
return res
# Deep list: Recursion
res = []
for item in jsonobject:
item = flattensort(item)
res.append(item)
# The deep list's entries were now all flattened, so we
# can and must sort them (then flatten):
res.sort()
res = ','.join(res)
res = '['+res+']'
return res
# Dictionary: Recursion
if type(jsonobject) == type({'b':2}):
res = []
for item in jsonobject.items():
item = flattensort(item)
res.append(item)
# The dictionary's entries were now all flattened, so we
# can and must sort them (then flatten):
res.sort()
res = ','.join(res)
res = '{'+res+'}'
return res
# Tuples: Recursion
elif type(jsonobject) == type((2, 2)):
if not len(jsonobject) == 2:
raise ValueError('Tuple of length %s, expected 2!')
# Here, tuples are dictionary entries, kv pairs, so we don't
# need to sort them but just flatten them to string:
tup1 = flattensort(jsonobject[0])
tup2 = flattensort(jsonobject[1])
res = tup1+':'+tup2
return res
# Simple types: Just return
try:
if isinstance(jsonobject, basestring):
return '"'+jsonobject+'"'
except NameError:
if isinstance(jsonobject, str):
return '"'+jsonobject+'"'
return str(jsonobject)
|
|
def mark_descendants(nodes):
for node in nodes:
node.descendant = True
mark_descendants(node.childrens)
def make_tree(request, items, levels, url, ancestors, descendants=False, current_level=0, to_levels=100, active_levels=0):
from cms.models import Page
"""
builds the tree of all the navigation extender nodes and marks them with some metadata
"""
levels -= 1
current_level += 1
found = False
for item in items:
item.level = current_level
if descendants and not found:
item.descendant = True
item.ancestors_ascending = ancestors
if item.get_absolute_url() == url:
item.selected = True
item.descendant = False
levels = active_levels
descendants = True
found = True
last = None
for anc in ancestors:
if not isinstance(anc, Page) and last:
last = None
if hasattr(last, 'childrens'):
for child in last.childrens:
if isinstance(child, Page):
child.sibling = True
else:
last = anc
anc.ancestor = True
if last:
if hasattr(last, 'childrens'):
for child in last.childrens:
if isinstance(child, Page):
child.sibling = True
elif found:
item.sibling = True
if levels == 0 and not hasattr(item, "ancestor" ) or item.level == to_levels or not hasattr(item, "childrens"):
item.childrens = []
else:
make_tree(request, item.childrens, levels, url, ancestors+[item], descendants, current_level, to_levels, active_levels)
if found:
for item in items:
if not hasattr(item, "selected"):
item.sibling = True
def get_extended_navigation_nodes(request, levels, ancestors, current_level, to_levels, active_levels, mark_sibling, path):
"""
discovers all navigation nodes from navigation extenders
"""
func_name = path.split(".")[-1]
ext = __import__(".".join(path.split(".")[:-1]),(),(),(func_name,))
func = getattr(ext, func_name)
items = func(request)
descendants = False
for anc in ancestors:
if hasattr(anc, 'selected'):
if anc.selected:
descendants = True
if len(ancestors) and hasattr(ancestors[-1], 'ancestor'):
make_tree(request, items, 100, request.path, ancestors, descendants, current_level, 100, active_levels)
make_tree(request, items, levels, request.path, ancestors, descendants, current_level, to_levels, active_levels)
if mark_sibling:
for item in items:
if not hasattr(item, "selected" ):
item.sibling = True
return items
def find_children(target, pages, levels=100, active_levels=0, ancestors=None, selected_pk=0, soft_roots=True, request=None, no_extended=False, to_levels=100):
"""
recursive function for marking all children and handling the active and inactive trees with the level limits
"""
if not hasattr(target, "childrens"):
target.childrens = []
if ancestors == None:
ancestors = []
if target.pk in ancestors:
target.ancestor = True
if target.pk == selected_pk:
levels = active_levels
if (levels <= 0 or (target.soft_root and soft_roots)) and not target.pk in ancestors:
return
mark_sibling = False
for page in pages:
if page.parent_id and page.parent_id == target.pk:
if hasattr(target, "selected") or hasattr(target, "descendant"):
page.descendant = True
if len(target.childrens):
target.childrens[-1].last = False
page.ancestors_ascending = [target] + list(target.ancestors_ascending)
page.home_pk_cache = target.home_pk_cache
page.last = True
target.childrens.append(page)
find_children(page,
pages,
levels-1,
active_levels,
ancestors,
selected_pk,
soft_roots,
request,
no_extended,
to_levels)
if hasattr(page, "selected"):
mark_sibling = True
if target.navigation_extenders and (levels > 0 or target.pk in ancestors) and not no_extended and target.level < to_levels:
target.childrens += get_extended_navigation_nodes(request,
levels,
list(target.ancestors_ascending) + [target],
target.level,
to_levels,
active_levels,
mark_sibling,
target.navigation_extenders)
def cut_levels(nodes, level):
"""
For cutting the nav_extender levels if you have a from_level in the navigation.
"""
result = []
if nodes:
if nodes[0].level == level:
return nodes
for node in nodes:
result += cut_levels(node.childrens, level)
return result
def find_selected(nodes):
"""
Finds a selected nav_extender node
"""
for node in nodes:
if hasattr(node, "selected"):
return node
if hasattr(node, "ancestor"):
result = find_selected(node.childrens)
if result:
return result
def set_language_changer(request, func):
"""
Sets a language chooser function that accepts one parameter: language
The function should return a url in the supplied language
normally you would want to give it the get_absolute_url function with an optional language parameter
example:
def get_absolute_url(self, language=None):
reverse('product_view', args=[self.get_slug(language=language)])
Use this function in your nav extender views that have i18n slugs.
"""
request._language_changer = func
def language_changer_decorator(language_changer):
"""
A decorator wrapper for set_language_changer.
from menus.utils import language_changer_decorator
@language_changer_decorator(function_get_language_changer_url)
def my_view_function(request, somearg):
pass
"""
def _decorator(func):
def _wrapped(request, *args, **kwargs):
set_language_changer(request, language_changer)
return func(request, *args, **kwargs)
_wrapped.__name__ = func.__name__
_wrapped.__doc__ = func.__doc__
return _wrapped
return _decorator
def simple_language_changer(func):
def _wrapped(request, *args, **kwargs):
def _language_changer(lang):
return request.path
set_language_changer(request, _language_changer)
return func(request, *args, **kwargs)
_wrapped.__name__ = func.__name__
_wrapped.__doc__ = func.__doc__
return _wrapped
from django.conf import settings
def handle_navigation_manipulators(navigation_tree, request):
for handler_function_name, name in settings.CMS_NAVIGATION_MODIFIERS:
func_name = handler_function_name.split(".")[-1]
modifier = __import__(".".join(handler_function_name.split(".")[:-1]),(),(),(func_name,))
handler_func = getattr(modifier, func_name)
handler_func(navigation_tree, request)
return navigation_tree
|
|
# -*- coding: utf8 -*-
from __future__ import absolute_import
import logging
import os
import scandir
import subprocess
from six import text_type
from django.conf import settings
log = logging.getLogger(__name__)
class PullFromRepositoryException(Exception):
pass
class PullFromRepository(object):
def __init__(self, source, target, branch):
self.source = source
self.target = target
self.branch = branch
def pull(self, source=None, target=None):
raise NotImplementedError
class PullFromGit(PullFromRepository):
def pull(self, source=None, target=None, branch=None):
log.debug("Git: Update repository.")
source = source or self.source
target = target or self.target
branch = branch or self.branch
command = ["git", "fetch", "--all"]
execute(command, target)
# Undo local changes
remote = "origin"
if branch:
remote += "/" + branch
command = ["git", "reset", "--hard", remote]
code, output, error = execute(command, target)
if code != 0:
log.info("Git: " + text_type(error))
log.debug("Git: Clone instead.")
command = ["git", "clone", source, target]
code, output, error = execute(command)
if code != 0:
raise PullFromRepositoryException(text_type(error))
log.debug("Git: Repository at " + source + " cloned.")
else:
log.debug("Git: Repository at " + source + " updated.")
if branch:
command = ["git", "checkout", branch]
code, output, error = execute(command, target)
if code != 0:
raise PullFromRepositoryException(text_type(error))
log.debug("Git: Branch " + branch + " checked out.")
class PullFromHg(PullFromRepository):
def pull(self, source=None, target=None):
log.debug("Mercurial: Update repository.")
source = source or self.source
target = target or self.target
# Undo local changes: Mercurial doesn't offer anything more elegant
command = ["rm", "-rf", target]
code, output, error = execute(command)
command = ["hg", "clone", source, target]
code, output, error = execute(command)
if code == 0:
log.debug("Mercurial: Repository at " + source + " cloned.")
else:
raise PullFromRepositoryException(text_type(error))
class PullFromSvn(PullFromRepository):
def pull(self, source=None, target=None):
log.debug("Subversion: Checkout or update repository.")
source = source or self.source
target = target or self.target
if os.path.exists(target):
status = "updated"
command = ["svn", "update", "--accept", "theirs-full", target]
else:
status = "checked out"
command = ["svn", "checkout", "--trust-server-cert",
"--non-interactive", source, target]
code, output, error = execute(command, env=get_svn_env())
if code != 0:
raise PullFromRepositoryException(text_type(error))
log.debug("Subversion: Repository at " + source + " %s." % status)
class CommitToRepositoryException(Exception):
pass
class CommitToRepository(object):
def __init__(self, path, message, user, branch, url):
self.path = path
self.message = message
self.user = user
self.url = url
self.branch = branch
def commit(self, path=None, message=None, user=None):
raise NotImplementedError
def nothing_to_commit(self):
return log.warning('Nothing to commit')
class CommitToGit(CommitToRepository):
def commit(self, path=None, message=None, user=None, branch=None):
log.debug("Git: Commit to repository.")
path = path or self.path
message = message or self.message
user = user or self.user
branch = branch or self.branch
author = user.display_name_and_email
# Embed git identity info into commands
git_cmd = ['git', '-c', 'user.name=Mozilla Pontoon', '-c',
'user.email=pontoon@mozilla.com']
# Add new and remove missing paths
execute(git_cmd + ['add', '-A', '--', path], path)
# Commit
commit = git_cmd + ['commit', '-m', message, '--author', author]
code, output, error = execute(commit, path)
if code != 0 and len(error):
raise CommitToRepositoryException(text_type(error))
# Push
push_target = 'HEAD'
if branch:
push_target = branch
push = ["git", "push", self.url, push_target]
code, output, error = execute(push, path)
if code != 0:
raise CommitToRepositoryException(text_type(error))
if 'Everything up-to-date' in error:
return self.nothing_to_commit()
log.info(message)
class CommitToHg(CommitToRepository):
def commit(self, path=None, message=None, user=None):
log.debug("Mercurial: Commit to repository.")
path = path or self.path
message = message or self.message
user = user or self.user
author = user.display_name_and_email
# Add new and remove missing paths
add = ["hg", "addremove"]
execute(add, path)
# Commit
commit = ["hg", "commit", "-m", message, "-u", author]
code, output, error = execute(commit, path)
if code != 0 and len(error):
raise CommitToRepositoryException(text_type(error))
# Push
push = ["hg", "push"]
code, output, error = execute(push, path)
if code == 1 and 'no changes found' in output:
return self.nothing_to_commit()
if code != 0 and len(error):
raise CommitToRepositoryException(text_type(error))
log.info(message)
class CommitToSvn(CommitToRepository):
def commit(self, path=None, message=None, user=None):
log.debug("Subversion: Commit to repository.")
path = path or self.path
message = message or self.message
user = user or self.user
author = user.display_name_and_email
# Commit
command = ["svn", "commit", "-m", message, "--with-revprop",
"author=%s" % author, path]
code, output, error = execute(command, env=get_svn_env())
if code != 0:
raise CommitToRepositoryException(error.decode('utf-8'))
if not output and not error:
return self.nothing_to_commit()
log.info(message)
def execute(command, cwd=None, env=None):
try:
st = subprocess.PIPE
proc = subprocess.Popen(
args=command, stdout=st, stderr=st, stdin=st, cwd=cwd, env=env)
(output, error) = proc.communicate()
code = proc.returncode
return code, output, error
except OSError as error:
return -1, "", error
def update_from_vcs(repo_type, url, path, branch):
obj = globals()['PullFrom%s' % repo_type.capitalize()](url, path, branch)
obj.pull()
def commit_to_vcs(repo_type, path, message, user, branch, url):
try:
obj = globals()['CommitTo%s' % repo_type.capitalize()](
path, message, user, branch, url)
return obj.commit()
except CommitToRepositoryException as e:
log.debug('%s Commit Error for %s: %s' % (repo_type.upper(), path, e))
raise e
def get_svn_env():
"""Return an environment dict for running SVN in."""
if settings.SVN_LD_LIBRARY_PATH:
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = (settings.SVN_LD_LIBRARY_PATH + ':' +
env['LD_LIBRARY_PATH'])
return env
else:
return None
class VCSRepository(object):
@classmethod
def for_type(cls, repo_type, path):
SubClass = cls.REPO_TYPES.get(repo_type)
if SubClass is None:
raise ValueError('No subclass found for repo type {0}.'.format(repo_type))
return SubClass(path)
def __init__(self, path):
self.path = path
def execute(self, cmd, cwd=None, env=None, log_errors=True):
cwd = cwd or self.path
code, output, error = execute(cmd, cwd=cwd, env=env)
if log_errors and code != 0:
log.error('Error while executing command `{cmd}` in `{cwd}`: {stderr}'.format(
cmd=text_type(cmd), cwd=cwd, stderr=error
))
return code, output, error
def get_changed_files(self, path, from_revision, statueses=None):
"""Get a list of changed files in the repository."""
raise NotImplementedError
def get_removed_files(self, from_revision):
"""Get a list of removed files in the repository."""
raise NotImplementedError
class SvnRepository(VCSRepository):
def execute(self, cmd, cwd=None, env=None, log_errors=False):
return execute(cmd, cwd=cwd, env=get_svn_env())
@property
def revision(self):
code, output, error = self.execute(['svnversion', self.path], log_errors=True)
return output.strip() if code == 0 else None
def get_changed_files(self, path, from_revision, statuses=None):
statuses = statuses or ('A', 'M')
def normalize_revision(rev):
"""Remove all non digit characters from the revision number. """
return ''.join(filter(lambda c: c.isdigit(), rev))
from_revision = normalize_revision(from_revision)
code, output, error = self.execute(
['svn', 'diff', '-r', '{}:{}'.format(from_revision, 'HEAD'), '--summarize'],
cwd=path
)
if code == 0:
# Mark added/modfied files as the changed ones
return [line.split()[1] for line in output.split('\n') if line and line[0] in statuses]
return []
def get_removed_files(self, path, from_revision):
return self.get_changed_files(path, from_revision, ('D',))
class GitRepository(VCSRepository):
@property
def revision(self):
code, output, error = self.execute(
['git', 'rev-parse', 'HEAD'],
)
return output.strip() if code == 0 else None
def get_changed_files(self, path, from_revision, statuses=None):
statuses = statuses or ('A', 'M')
code, output, error = self.execute(
['git', 'diff', '--name-status', '{}..HEAD'.format(from_revision), '--', path],
)
if code == 0:
return [line.split()[1] for line in output.split('\n') if line and line[0] in statuses]
return []
def get_removed_files(self, path, from_revision):
return self.get_changed_files(path, from_revision, ('D',))
class HgRepository(VCSRepository):
@property
def revision(self):
code, output, error = self.execute(
['hg', 'identify', '--id', '--rev=default'],
cwd=self.path,
log_errors=True
)
return output.strip() if code == 0 else None
def _strip(self, rev):
"Ignore trailing + in revision number. It marks local changes."
return rev.rstrip('+')
def get_changed_files(self, path, from_revision, statuses=None):
statuses = statuses or ('A', 'M')
code, output, error = self.execute(
[
'hg', 'status', '-a', '-m', '-r', '--rev={}'.format(self._strip(from_revision)),
'--rev=default',
],
cwd=path
)
if code == 0:
# Mark added / modified files as the changed ones
return [line.split()[1] for line in output.split('\n') if line and line[0] in statuses]
return []
def get_removed_files(self, path, from_revision):
return self.get_changed_files(path, self._strip(from_revision), ('R',))
# TODO: Tie these to the same constants that the Repository model uses.
VCSRepository.REPO_TYPES = {
'hg': HgRepository,
'svn': SvnRepository,
'git': GitRepository,
}
def get_revision(repo_type, path):
repo = VCSRepository.for_type(repo_type, path)
return repo.revision
def get_changed_files(repo_type, path, revision):
"""Return a list of changed files for the repository."""
repo = VCSRepository.for_type(repo_type, path)
log.info('Retrieving changed files for: {}:{}'.format(path, revision))
# If there's no latest revision we should return all the files in the latest
# version of repository
if revision is None:
paths = []
for root, _, files in scandir.walk(path):
for f in files:
if root[0] == '.' or '/.' in root:
continue
paths.append(os.path.join(root, f).replace(path + '/', ''))
return paths, []
return repo.get_changed_files(path, revision), repo.get_removed_files(path, revision)
|
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import time
import datetime
import threading
import Queue
from pyalgotrade import bar
from pyalgotrade import barfeed
from pyalgotrade import dataseries
from pyalgotrade import resamplebase
import pyalgotrade.logger
from pyalgotrade.utils import dt
import api
logger = pyalgotrade.logger.getLogger("xignite")
def utcnow():
return dt.as_utc(datetime.datetime.utcnow())
class PollingThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.__stopped = False
def __wait(self):
# Wait until getNextCallDateTime checking for cancelation every 0.5 second.
nextCall = self.getNextCallDateTime()
while not self.__stopped and utcnow() < nextCall:
time.sleep(0.5)
def stop(self):
self.__stopped = True
def stopped(self):
return self.__stopped
def run(self):
logger.debug("Thread started.")
while not self.__stopped:
self.__wait()
if not self.__stopped:
try:
self.doCall()
except Exception, e:
logger.critical("Unhandled exception", exc_info=e)
logger.debug("Thread finished.")
# Must return a non-naive datetime.
def getNextCallDateTime(self):
raise NotImplementedError()
def doCall(self):
raise NotImplementedError()
def build_bar(barDict, identifier, frequency):
# "StartDate": "3/19/2014"
# "StartTime": "9:55:00 AM"
# "EndDate": "3/19/2014"
# "EndTime": "10:00:00 AM"
# "UTCOffset": 0
# "Open": 31.71
# "High": 31.71
# "Low": 31.68
# "Close": 31.69
# "Volume": 2966
# "Trades": 19
# "TWAP": 31.6929
# "VWAP": 31.693
startDate = barDict["StartDate"]
startTime = barDict["StartTime"]
startDateTimeStr = startDate + " " + startTime
startDateTime = datetime.datetime.strptime(startDateTimeStr, "%m/%d/%Y %I:%M:%S %p")
instrument, exchange = api.parse_instrument_exchange(identifier)
startDateTime = api.to_market_datetime(startDateTime, exchange)
return bar.BasicBar(startDateTime, barDict["Open"], barDict["High"], barDict["Low"], barDict["Close"], barDict["Volume"], None, frequency)
class GetBarThread(PollingThread):
# Events
ON_BARS = 1
def __init__(self, queue, apiToken, identifiers, frequency, apiCallDelay):
PollingThread.__init__(self)
# Map frequency to precision and period.
if frequency < bar.Frequency.MINUTE:
raise Exception("Frequency must be greater than or equal to bar.Frequency.MINUTE")
elif frequency < bar.Frequency.HOUR:
self.__precision = "Minutes"
self.__period = frequency / bar.Frequency.MINUTE
elif frequency < bar.Frequency.DAY:
self.__precision = "Hours"
self.__period = frequency / bar.Frequency.HOUR
else:
raise Exception("Frequency must be less than bar.Frequency.DAY")
self.__queue = queue
self.__apiToken = apiToken
self.__identifiers = identifiers
self.__frequency = frequency
self.__nextBarClose = None
# The delay between the bar's close and the API call.
self.__apiCallDelay = apiCallDelay
self.__updateNextBarClose()
def __updateNextBarClose(self):
self.__nextBarClose = resamplebase.build_range(utcnow(), self.__frequency).getEnding()
def getNextCallDateTime(self):
return self.__nextBarClose + self.__apiCallDelay
def doCall(self):
endDateTime = self.__nextBarClose
self.__updateNextBarClose()
barDict = {}
for indentifier in self.__identifiers:
try:
logger.debug("Requesting bars with precision %s and period %s for %s" % (self.__precision, self.__period, indentifier))
response = api.XigniteGlobalRealTime_GetBar(self.__apiToken, indentifier, "Symbol", endDateTime, self.__precision, self.__period)
# logger.debug(response)
barDict[indentifier] = build_bar(response["Bar"], indentifier, self.__frequency)
except api.XigniteError, e:
logger.error(e)
if len(barDict):
bars = bar.Bars(barDict)
self.__queue.put((GetBarThread.ON_BARS, bars))
class LiveFeed(barfeed.BaseBarFeed):
"""A real-time BarFeed that builds bars using XigniteGlobalRealTime API
(https://www.xignite.com/product/global-real-time-stock-quote-data/).
:param apiToken: The API token to authenticate calls to Xignine APIs.
:type apiToken: string.
:param identifiers: A list with the fully qualified identifier for the securities including the exchange suffix.
:type identifiers: list.
:param frequency: The frequency of the bars.
Must be greater than or equal to **bar.Frequency.MINUTE** and less than **bar.Frequency.DAY**.
:param apiCallDelay: The delay in seconds between the bar's close and the API call.
This is necessary because the bar may not be immediately available.
:type apiCallDelay: int.
:param maxLen: The maximum number of values that the :class:`pyalgotrade.dataseries.bards.BarDataSeries` will hold.
Once a bounded length is full, when new items are added, a corresponding number of items are discarded from the opposite end.
:type maxLen: int.
.. note:: Valid exchange suffixes are:
* **ARCX**: NYSE ARCA
* **CHIX**: CHI-X EUROPE LIMITED
* **XASE**: NYSE MKT EQUITIES
* **XNAS**: NASDAQ
* **XNYS**: NEW YORK STOCK EXCHANGE, INC
"""
QUEUE_TIMEOUT = 0.01
def __init__(self, apiToken, identifiers, frequency, apiCallDelay=30, maxLen=dataseries.DEFAULT_MAX_LEN):
barfeed.BaseBarFeed.__init__(self, frequency, maxLen)
if not isinstance(identifiers, list):
raise Exception("identifiers must be a list")
self.__queue = Queue.Queue()
self.__thread = GetBarThread(self.__queue, apiToken, identifiers, frequency, datetime.timedelta(seconds=apiCallDelay))
for instrument in identifiers:
self.registerInstrument(instrument)
######################################################################
# observer.Subject interface
def start(self):
if self.__thread.is_alive():
raise Exception("Already strated")
# Start the thread that runs the client.
self.__thread.start()
def stop(self):
self.__thread.stop()
def join(self):
if self.__thread.is_alive():
self.__thread.join()
def eof(self):
return self.__thread.stopped()
def peekDateTime(self):
return None
######################################################################
# barfeed.BaseBarFeed interface
def getCurrentDateTime(self):
return utcnow()
def barsHaveAdjClose(self):
return False
def getNextBars(self):
ret = None
try:
eventType, eventData = self.__queue.get(True, LiveFeed.QUEUE_TIMEOUT)
if eventType == GetBarThread.ON_BARS:
ret = eventData
else:
logger.error("Invalid event received: %s - %s" % (eventType, eventData))
except Queue.Empty:
pass
return ret
|
|
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
# INPUTS: BrainProtonDensitySlice.png
# OUTPUTS: FastMarchingImageFilterOutput5.png
# ARGUMENTS: 81 114 1.0 -0.5 3.0 100 100
#
# INPUTS: BrainProtonDensitySlice.png
# OUTPUTS: FastMarchingImageFilterOutput6.png
# ARGUMENTS: 99 114 1.0 -0.5 3.0 100 100
#
# INPUTS: BrainProtonDensitySlice.png
# OUTPUTS: FastMarchingImageFilterOutput7.png
# ARGUMENTS: 56 92 1.0 -0.3 2.0 200 100
#
# INPUTS: BrainProtonDensitySlice.png
# OUTPUTS: FastMarchingImageFilterOutput8.png
# OUTPUTS: {FastMarchingFilterOutput1.png}
# OUTPUTS: {FastMarchingFilterOutput2.png}
# OUTPUTS: {FastMarchingFilterOutput3.png}
# ARGUMENTS: 40 90 0.5 -0.3 2.0 200 100
import itk
from sys import argv, stderr, exit
if( len(argv) < 10 ):
print >> stderr, """Missing Parameters
Usage: FastMarchingImageFilter.py inputImage outputImage seedX seedY Sigma SigmoidAlpha SigmoidBeta TimeThreshold StoppingValue"""
exit(1)
itk.auto_progress(2)
InternalPixelType = itk.F
Dimension = 2
InternalImageType = itk.Image[ InternalPixelType, Dimension ]
OutputPixelType = itk.UC
OutputImageType = itk.Image[ OutputPixelType, Dimension ]
thresholder = itk.BinaryThresholdImageFilter[ InternalImageType, OutputImageType ].New();
timeThreshold = float( argv[8] )
thresholder.SetLowerThreshold( 0.0 )
thresholder.SetUpperThreshold( timeThreshold )
thresholder.SetOutsideValue( 0 )
thresholder.SetInsideValue( 255 )
ReaderType = itk.ImageFileReader[ InternalImageType ]
WriterType = itk.ImageFileWriter[ OutputImageType ]
reader = ReaderType.New()
writer = WriterType.New()
reader.SetFileName( argv[1] )
writer.SetFileName( argv[2] )
CastFilterType = itk.RescaleIntensityImageFilter[
InternalImageType,
OutputImageType ]
SmoothingFilterType = itk.CurvatureAnisotropicDiffusionImageFilter[
InternalImageType,
InternalImageType ]
smoothing = SmoothingFilterType.New()
GradientFilterType = itk.GradientMagnitudeRecursiveGaussianImageFilter[
InternalImageType,
InternalImageType ]
SigmoidFilterType = itk.SigmoidImageFilter[
InternalImageType,
InternalImageType ]
gradientMagnitude = GradientFilterType.New();
sigmoid = SigmoidFilterType.New()
sigmoid.SetOutputMinimum( 0.0 )
sigmoid.SetOutputMaximum( 1.0 )
FastMarchingFilterType = itk.FastMarchingImageFilter[ InternalImageType,
InternalImageType ]
fastMarching = FastMarchingFilterType.New()
smoothing.SetInput( reader.GetOutput() )
gradientMagnitude.SetInput( smoothing.GetOutput() )
sigmoid.SetInput( gradientMagnitude.GetOutput() )
fastMarching.SetInput( sigmoid.GetOutput() )
thresholder.SetInput( fastMarching.GetOutput() )
writer.SetInput( thresholder.GetOutput() )
smoothing.SetTimeStep( 0.125 )
smoothing.SetNumberOfIterations( 5 )
smoothing.SetConductanceParameter( 9.0 )
sigma = float( argv[5] )
gradientMagnitude.SetSigma( sigma )
alpha = float( argv[6] )
beta = float( argv[7] )
sigmoid.SetAlpha( alpha )
sigmoid.SetBeta( beta )
NodeType = itk.LevelSetNode[InternalPixelType, Dimension]
NodeContainer = itk.VectorContainer[itk.UI, NodeType]
seeds = NodeContainer.New()
seedPosition = [int( argv[3] ), int( argv[4] )]
node = NodeType()
seedValue = 0.0
node.SetValue( seedValue )
node.SetIndex( seedPosition )
seeds.Initialize();
seeds.InsertElement( 0, node )
fastMarching.SetTrialPoints( seeds );
caster1 = CastFilterType.New()
caster2 = CastFilterType.New()
caster3 = CastFilterType.New()
caster4 = CastFilterType.New()
writer1 = WriterType.New()
writer2 = WriterType.New()
writer3 = WriterType.New()
writer4 = WriterType.New()
caster1.SetInput( smoothing.GetOutput() )
writer1.SetInput( caster1.GetOutput() )
writer1.SetFileName("FastMarchingFilterOutput1.png")
caster1.SetOutputMinimum( 0 )
caster1.SetOutputMaximum( 255 )
writer1.Update()
caster2.SetInput( gradientMagnitude.GetOutput() )
writer2.SetInput( caster2.GetOutput() )
writer2.SetFileName("FastMarchingFilterOutput2.png")
caster2.SetOutputMinimum( 0 )
caster2.SetOutputMaximum( 255 )
writer2.Update()
caster3.SetInput( sigmoid.GetOutput() )
writer3.SetInput( caster3.GetOutput() )
writer3.SetFileName("FastMarchingFilterOutput3.png")
caster3.SetOutputMinimum( 0 )
caster3.SetOutputMaximum( 255 )
writer3.Update()
caster4.SetInput( fastMarching.GetOutput() )
writer4.SetInput( caster4.GetOutput() )
writer4.SetFileName("FastMarchingFilterOutput4.png")
caster4.SetOutputMinimum( 0 )
caster4.SetOutputMaximum( 255 )
fastMarching.SetOutputSize(
reader.GetOutput().GetBufferedRegion().GetSize() )
stoppingTime = float( argv[9] )
fastMarching.SetStoppingValue( stoppingTime )
writer.Update()
writer4.Update()
InternalWriterType = itk.ImageFileWriter[ InternalImageType ]
mapWriter = InternalWriterType.New()
mapWriter.SetInput( fastMarching.GetOutput() )
mapWriter.SetFileName("FastMarchingFilterOutput4.mha")
mapWriter.Update()
speedWriter = InternalWriterType.New()
speedWriter.SetInput( sigmoid.GetOutput() )
speedWriter.SetFileName("FastMarchingFilterOutput3.mha")
speedWriter.Update()
gradientWriter = InternalWriterType.New()
gradientWriter.SetInput( gradientMagnitude.GetOutput() )
gradientWriter.SetFileName("FastMarchingFilterOutput2.mha")
gradientWriter.Update()
|
|
#Radio Control
#Martin O'Hanlon
#stuffaboutcode.com
#imports
import RPi.GPIO as GPIO
import mpd
import socket
from time import sleep
from Queue import Queue
from threading import Timer
from event import Event
from onoffswitch import OnOffSwitch
from playlistselector import PlaylistSelector
from volumecontrol import VolumeControl
from skiptrackpause import SkipTrackPause
from mpdkeepalive import MPDKeepAlive
#Constants
#GPIO pin constants
AMPMUTEPIN = 22
SHUTDOWNPIN = 12
LEDPIN = 16
#MPD constants
MPDHOST = "localhost"
MPDPORT = "6600"
#Playlist constants
PLAYLISTNAMEM = "MartPlaylist"
PLAYLISTNAMEL = "LeePlaylist"
#previous track timeout
PREVTRACKTIMEOUT = 5.0
#shutdown button press time
SHUTDOWNBUTTONTIME = 2
class Amp():
def __init__(self, mutePin):
self.mutePin = mutePin
#setup pins
GPIO.setup(mutePin, GPIO.OUT)
@property
def muted(self):
if GPIO.input(self.mutePin) == 1:
return True
else:
return False
def unmute(self):
GPIO.output(self.mutePin, 1)
def mute(self):
GPIO.output(self.mutePin, 0)
class ShutdownButton():
def __init__(self, eventQ):
self.eventQ = eventQ
GPIO.setup(SHUTDOWNPIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def start(self):
GPIO.add_event_detect(SHUTDOWNPIN, GPIO.RISING, callback=self._shutdownCallback, bouncetime=250)
def stop(self):
GPIO.remove_event_detect(SHUTDOWNPIN)
def _shutdownCallback(self, pin):
#start the shutdown timer
self.shutdownTimer = Timer(SHUTDOWNBUTTONTIME, self._shutdownTimer)
self.shutdownTimer.start()
def _shutdownTimer(self):
#if the shutdown button is still pressed, raise the event
if GPIO.input(SHUTDOWNPIN) == True:
self.eventQ.put(Event(Event.EventType.SHUTDOWN))
class LED():
def __init__(self, ledPin):
self.ledPin = ledPin
GPIO.setup(ledPin, GPIO.OUT)
def on(self):
GPIO.output(self.ledPin, True)
def off(self):
GPIO.output(self.ledPin, False)
class RadioControl():
def __init__(self, mpdHost = "localhost", mpdPort = "6600"):
#create properties
self.mpdHost = mpdHost
self.mpdPort = mpdPort
self.playing = False
self.playlist = None
self.stopped = True
self.prevTrackSkipActive = False
#create events queue
self.eventQ = Queue()
#Create objects to manage controls
#create on led
self.onLED = LED(LEDPIN)
#create amp
self.amp = Amp(AMPMUTEPIN)
#create on/off switch
self.onOffSwitch = OnOffSwitch(self.eventQ)
#create m/l selector
self.playlistSwitch = PlaylistSelector(self.eventQ)
#create volume control
self.volControl = VolumeControl(self.eventQ)
#create skip track & pause control
self.stp = SkipTrackPause(self.eventQ)
#create shutdown button control
self.shutdownButton = ShutdownButton(self.eventQ)
#create the mpd keep alive object
self.mpdKeepAlive = MPDKeepAlive(self.eventQ)
def start(self):
#turn on led
self.onLED.on()
#create connection to MPD client / volumio
self.mpd = mpd.MPDClient()
self.mpd.connect(MPDHOST, MPDPORT)
#start up mpd keep alive
self.mpdKeepAlive.start()
#start up the controls
self.onOffSwitch.start()
self.playlistSwitch.start()
self.volControl.start()
self.stp.start()
self.shutdownButton.start()
try:
self.stopped = False
#process events
while(not self.stopped):
while not self.eventQ.empty():
event = self.eventQ.get()
#print(event.eventType)
self._processEvent(event)
sleep(0.1)
finally:
#stop the controls
self.shutdownButton.stop()
self.stp.stop()
self.onOffSwitch.stop()
self.playlistSwitch.stop()
self.volControl.stop()
#stop mpd keep alive
self.mpdKeepAlive.stop()
#turn off led
self.onLED.off()
def _processEvent(self, event):
#on
if (event.eventType == Event.EventType.ON):
print("ON")
self._on()
#off
elif (event.eventType == Event.EventType.OFF):
print("OFF")
self._off()
#shutdown
elif (event.eventType == Event.EventType.SHUTDOWN):
print("SHUTDOWN")
self._shutdown()
#m selected
elif (event.eventType == Event.EventType.PLAYLISTM):
print("PLAYLISTM")
self._switchPlaylist(PLAYLISTNAMEM)
#l selected
elif (event.eventType == Event.EventType.PLAYLISTL):
print("PLAYLISTL")
self._switchPlaylist(PLAYLISTNAMEL)
#volume changes
elif (event.eventType == Event.EventType.VOL):
print("VOL - {}".format(event.value))
self._setVolume(event.value)
#pause
elif (event.eventType == Event.EventType.PAUSE):
print("PAUSE")
self._pauseResumePlayback()
#skip track
elif (event.eventType == Event.EventType.SKIPTRACK):
print("SKIPTRACK - {}".format(event.value))
self._skipTrack(event.value)
#skip track
elif (event.eventType == Event.EventType.PINGMPD):
#print("PING MPD")
self._pingMPD()
def _safeMPDExec(self, func, *arg):
success = False
try:
func(*arg)
success = True
except mpd.CommandError as e:
print "Error({0})".format(e.args)
except mpd.ConnectionError as e:
print "MPD connection error"
#try and reconnect
if self._reconnectMPD():
print "MPD reconnected"
#rerun function
self._safeMPDExec(func, *arg)
except socket.error as e:
print "socket error"
#try and reconnect
if self._reconnectMPD():
print "MPD reconnected"
#rerun function
self._safeMPDExec(func, *arg)
return success
def _safeMPDStatus(self):
status = None
try:
status = self.mpd.status()
except mpd.ConnectionError as e:
print "MPD connection error"
#try and reconnect
if self._reconnectMPD():
print "MPD reconnected"
#rerun function
status = self._safeMPDStatus()
except socket.error as e:
print "socket error"
#try and reconnect
if self._reconnectMPD():
print "MPD reconnected"
#rerun function
status = self._safeMPDStatus()
return status
def _reconnectMPD(self):
#connect to MPD server
success = False
try:
self.mpd.connect(MPDHOST, MPDPORT)
success = True
except:
print "Failed to connect to mpd"
return success
def _on(self):
#unmute amp
self.amp.unmute()
#if there is a playlist selected, resume playback
if self.playlist != None:
self._startPlayback()
def _off(self):
#if there is playlist selected - pause it
if self.playlist != None:
#stop playback
self._stopPlayback()
#mute amp
self.amp.mute()
def _startPlayback(self):
#state = self.mpd.status()["state"]
state = self._safeMPDStatus()["state"]
if state == "pause":
self._safeMPDExec(self.mpd.pause,0)
elif state == "stop":
self._safeMPDExec(self.mpd.play)
def _stopPlayback(self):
self._safeMPDExec(self.mpd.stop)
def _pausePlayback(self):
self._safeMPDExec(self.mpd.pause,1)
def _pauseResumePlayback(self):
state = self._safeMPDStatus()["state"]
if state == "play":
self._pausePlayback()
elif state == "pause":
self._startPlayback()
def _loadPlaylist(self, playlistname):
self._safeMPDExec(self.mpd.clear)
if not self._safeMPDExec(self.mpd.load, playlistname):
print "Failed to load playlist ({})".format(playlistname)
self.playlist = playlistname
#set repeat on
self._safeMPDExec(self.mpd.repeat, 1)
def _savePlaylist(self, playlistname):
#remove the playlist
self._safeMPDExec(self.mpd.rm, playlistname)
self._safeMPDExec(self.mpd.save, playlistname)
def _switchPlaylist(self, playlistname):
#if a different playlist is playing save it
if self.playlist != playlistname:
if self.playlist != None:
#save the previous playlist
self._savePlaylist(self.playlist)
#load the playlist and start playback
self._loadPlaylist(playlistname)
self._startPlayback()
def _setVolume(self, vol):
self._safeMPDExec(self.mpd.setvol, vol)
def _skipTrack(self, direction):
if direction == 1:
self._nextTrack()
elif direction == -1:
self._prevTrack()
def _nextTrack(self):
state = self._safeMPDStatus()["state"]
if state == "play":
self._safeMPDExec(self.mpd.next)
def _prevTrack(self):
state = self._safeMPDStatus()["state"]
if state == "play":
#if we are skipping back through tracks, goto the previous, otherwise restart the current track
if self.prevTrackSkipActive:
#goto the previous track
self._safeMPDExec(self.mpd.previous)
else:
#restart the current track
self._safeMPDExec(self.mpd.seekcur, 0)
#start / restart the timer
self._startPrevTrackSkip()
def _startPrevTrackSkip(self):
#print "track skip started"
#if its active cancel it
if self.prevTrackSkipActive:
self.prevTrackSkipTimer.cancel()
self.prevTrackSkipActive = True
self.prevTrackSkipTimer = Timer(PREVTRACKTIMEOUT, self._cancelPrevTrackSkip)
self.prevTrackSkipTimer.start()
def _cancelPrevTrackSkip(self):
#print "track skip cancelled"
self.prevTrackSkipActive = False
def _pingMPD(self):
self._safeMPDExec(self.mpd.ping)
def _shutdown(self):
#if there is playlist selected - pause it
if self.playlist != None:
#stop playback
self._stopPlayback()
#save the playlist
self._savePlaylist(self.playlist)
#mute amp
self.amp.mute()
#stop the program
self.stopped = True
#main program
if __name__ == "__main__":
#set GPIO mode
GPIO.setmode(GPIO.BCM)
#create radio control
radioControl = RadioControl(MPDHOST, MPDPORT)
try:
#start the radio control
radioControl.start()
finally:
#tidy up GPIO
GPIO.cleanup()
"""
Requirements:
ON/OFF
If the power switch is ON the amp should be unmuted.
If the power switch is OFF the amp should be muted.
When the power switch is turned ON:
- Unmute the amp
- If a playlist is paused
- unpaused the playback
When the power switch is turned OFF:
- If a playlist was playing
- pause the playback
- Mute the amp
PLAYBACK
There should be 2 default playlists which are selected when M or L is selected:
- MartPlaylist
- LeePlaylist
* if the playlist doesn't exist create one from a backup
When M/L is selected:
- If the M/L is paused:
- unpause the playback
- If the M/L has changed:
- if its playing stop the playback
- save the queue to the playlist
- persist in memory the track which was playing
- load the playlist
- set the playlist to repeat
- if there is track in memory set it to play that one
When the top is closed the music should pause.
"""
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, absolute_import, print_function
import logging
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flexget import plugin
from flexget.event import event
from flexget.manager import Session
from . import api_trakt as plugin_api_trakt
from . import db
# TODO: not very nice ..
lookup_series = plugin_api_trakt.ApiTrakt.lookup_series
lookup_movie = plugin_api_trakt.ApiTrakt.lookup_movie
log = logging.getLogger('trakt_lookup')
def is_show(entry):
return entry.get('series_name') or entry.get('tvdb_id', eval_lazy=False)
def is_episode(entry):
return entry.get('series_season') and entry.get('series_episode')
def is_season(entry):
return entry.get('series_season') and not is_episode(entry)
def is_movie(entry):
return bool(entry.get('movie_name'))
class TraktLazyLookup(object):
def __init__(self, field_map, lookup_function):
self.field_map = field_map
self.lookup_function = lookup_function
def __call__(self, entry):
with Session() as session:
try:
result = self.lookup_function(entry, session)
except LookupError as e:
log.debug(e)
else:
entry.update_using_map(self.field_map, result)
return entry
class TraktUserDataLookup(object):
def __init__(self, field_name, data_type, media_type, lookup_function):
self.field_name = field_name
self.lookup_function = lookup_function
self.data_type = data_type
self.media_type = media_type
def __call__(self, entry):
try:
result = self.lookup_function(
data_type=self.data_type, media_type=self.media_type, entry=entry
)
except LookupError as e:
log.debug(e)
else:
entry[self.field_name] = result
return entry
class PluginTraktLookup(object):
"""Retrieves trakt information for entries. Uses series_name,
series_season, series_episode from series plugin.
Example:
trakt_lookup: yes
Primarily used for passing trakt information to other plugins.
Among these is the IMDB url for the series.
This information is provided (via entry):
series info:
trakt_series_name
trakt_series_runtime
trakt_series_first_aired_epoch
trakt_series_first_aired_iso
trakt_series_air_time
trakt_series_content_ratingi
trakt_series_genres
trakt_series_imdb_url
trakt_series_trakt_url
imdb_id
tvdb_id
trakt_series_actors
trakt_series_country
trakt_series_year
trakt_series_tvrage_id
trakt_series_status
trakt_series_overview
trakt_ep_name
trakt_ep_season
trakt_ep_number
trakt_ep_overview
trakt_ep_first_aired_epoch
trakt_ep_first_aired_iso
trakt_ep_id
trakt_ep_tvdb_id
"""
# Series info
series_map = {
'trakt_series_name': 'title',
'trakt_series_year': 'year',
'imdb_id': 'imdb_id',
'tvdb_id': 'tvdb_id',
'tmdb_id': 'tmdb_id',
'trakt_show_id': 'id',
'trakt_show_slug': 'slug',
'tvrage_id': 'tvrage_id',
'trakt_trailer': 'trailer',
'trakt_homepage': 'homepage',
'trakt_series_runtime': 'runtime',
'trakt_series_first_aired': 'first_aired',
'trakt_series_air_time': 'air_time',
'trakt_series_air_day': 'air_day',
'trakt_series_content_rating': 'certification',
'trakt_genres': lambda i: [db_genre.name for db_genre in i.genres],
'trakt_series_network': 'network',
'imdb_url': lambda series: series.imdb_id
and 'http://www.imdb.com/title/%s' % series.imdb_id,
'trakt_series_url': lambda series: series.slug
and 'https://trakt.tv/shows/%s' % series.slug,
'trakt_series_country': 'country',
'trakt_series_status': 'status',
'trakt_series_overview': 'overview',
'trakt_series_rating': 'rating',
'trakt_series_votes': 'votes',
'trakt_series_language': 'language',
'trakt_series_aired_episodes': 'aired_episodes',
'trakt_series_episodes': lambda show: [episodes.title for episodes in show.episodes],
'trakt_languages': 'translation_languages',
}
series_actor_map = {'trakt_actors': lambda show: db.list_actors(show.actors)}
show_translate_map = {
'trakt_translations': lambda show: db.get_translations_dict(show.translations, 'show')
}
# Episode info
episode_map = {
'trakt_ep_name': 'title',
'trakt_ep_imdb_id': 'imdb_id',
'trakt_ep_tvdb_id': 'tvdb_id',
'trakt_ep_tmdb_id': 'tmdb_id',
'trakt_ep_tvrage': 'tvrage_id',
'trakt_episode_id': 'id',
'trakt_ep_first_aired': 'first_aired',
'trakt_ep_overview': 'overview',
'trakt_ep_abs_number': 'number_abs',
'trakt_season': 'season',
'trakt_episode': 'number',
'trakt_ep_id': lambda ep: 'S%02dE%02d' % (ep.season, ep.number),
}
# Season info
season_map = {
'trakt_season_name': 'title',
'trakt_season_tvdb_id': 'tvdb_id',
'trakt_season_tmdb_id': 'tmdb_id',
'trakt_season_tvrage': 'tvrage_id',
'trakt_season_id': 'id',
'trakt_season_first_aired': 'first_aired',
'trakt_season_overview': 'overview',
'trakt_season_episode_count': 'episode_count',
'trakt_season': 'number',
'trakt_season_aired_episodes': 'aired_episodes',
}
# Movie info
movie_map = {
'movie_name': 'title',
'movie_year': 'year',
'trakt_movie_name': 'title',
'trakt_movie_year': 'year',
'trakt_movie_id': 'id',
'trakt_movie_slug': 'slug',
'imdb_id': 'imdb_id',
'tmdb_id': 'tmdb_id',
'trakt_tagline': 'tagline',
'trakt_overview': 'overview',
'trakt_released': 'released',
'trakt_runtime': 'runtime',
'trakt_rating': 'rating',
'trakt_votes': 'votes',
'trakt_homepage': 'homepage',
'trakt_trailer': 'trailer',
'trakt_language': 'language',
'trakt_genres': lambda i: [db_genre.name for db_genre in i.genres],
'trakt_languages': 'translation_languages',
}
movie_translate_map = {
'trakt_translations': lambda movie: db.get_translations_dict(movie.translations, 'movie')
}
movie_actor_map = {'trakt_actors': lambda movie: db.list_actors(movie.actors)}
user_data_map = {
'collected': 'trakt_collected',
'watched': 'trakt_watched',
'ratings': {
'show': 'trakt_series_user_rating',
'season': 'trakt_season_user_rating',
'episode': 'trakt_ep_user_rating',
'movie': 'trakt_movie_user_rating',
},
}
schema = {
'oneOf': [
{
'type': 'object',
'properties': {'account': {'type': 'string'}, 'username': {'type': 'string'}},
'anyOf': [{'required': ['username']}, {'required': ['account']}],
'error_anyOf': 'At least one of `username` or `account` options are needed.',
'additionalProperties': False,
},
{'type': 'boolean'},
]
}
def __init__(self):
self.getter_map = {
'show': self._get_series,
'season': self._get_season,
'episode': self._get_episode,
'movie': self._get_movie,
}
def on_task_start(self, task, config):
if not isinstance(config, dict):
config = {}
self.trakt = plugin_api_trakt.ApiTrakt(
username=config.get('username'), account=config.get('account')
)
def _get_user_data_field_name(self, data_type, media_type):
if data_type not in self.user_data_map:
raise plugin.PluginError('Unknown user data type "%s"' % data_type)
if isinstance(self.user_data_map[data_type], dict):
return self.user_data_map[data_type][media_type]
return self.user_data_map[data_type]
def _get_lookup_args(self, entry):
args = {
'title': entry.get('series_name', eval_lazy=False)
or entry.get('title', eval_lazy=False),
'year': entry.get('year', eval_lazy=False),
'trakt_slug': (
entry.get('trakt_show_slug', eval_lazy=False)
or entry.get('trakt_movie_slug', eval_lazy=False)
),
'tmdb_id': entry.get('tmdb_id', eval_lazy=False),
'tvdb_id': entry.get('tvdb_id', eval_lazy=False),
'imdb_id': entry.get('imdb_id', eval_lazy=False),
'tvrage_id': entry.get('tvrage_id', eval_lazy=False),
}
if entry.get('trakt_movie_id', eval_lazy=False):
args['trakt_id'] = entry['trakt_movie_id']
elif entry.get('trakt_show_id', eval_lazy=False):
args['trakt_id'] = entry['trakt_show_id']
elif is_movie(entry) and entry.get('trakt_movie_id', eval_lazy=True):
args['trakt_id'] = entry['trakt_movie_id']
elif entry.get('trakt_show_id', eval_lazy=True):
args['trakt_id'] = entry['trakt_show_id']
return args
def _get_series(self, entry, session):
series_lookup_args = self._get_lookup_args(entry)
return lookup_series(session=session, **series_lookup_args)
def _get_season(self, entry, session):
series_lookup_args = self._get_lookup_args(entry)
show = lookup_series(session=session, **series_lookup_args)
return show.get_season(entry['series_season'], session)
def _get_episode(self, entry, session):
series_lookup_args = self._get_lookup_args(entry)
show = lookup_series(session=session, **series_lookup_args)
return show.get_episode(entry['series_season'], entry['series_episode'], session)
def _get_movie(self, entry, session):
movie_lookup_args = self._get_lookup_args(entry)
return lookup_movie(session=session, **movie_lookup_args)
def lazy_lookup(self, entry, media_type, mapping):
"""Does the lookup for this entry and populates the entry fields."""
with Session() as session:
try:
trakt_media = self.getter_map[media_type](entry, session)
except LookupError as e:
log.debug(e)
else:
entry.update_using_map(mapping, trakt_media)
return entry
def _lazy_user_data_lookup(self, data_type, media_type, entry):
try:
lookup = self.getter_map[media_type]
user_data_lookup = self.trakt.lookup_map[data_type][media_type]
except KeyError:
raise plugin.PluginError(
'Unknown data type="%s" or media type="%s"' % (data_type, media_type)
)
with Session() as session:
try:
return user_data_lookup(lookup(entry, session), entry['title'])
except LookupError as e:
log.debug(e)
# Run after series and metainfo series
@plugin.priority(110)
def on_task_metainfo(self, task, config):
if not config:
return
if isinstance(config, bool):
config = dict()
for entry in task.entries:
if is_show(entry):
entry.register_lazy_func(
TraktLazyLookup(self.series_map, self._get_series), self.series_map
)
# TODO cleaner way to do this?
entry.register_lazy_func(
TraktLazyLookup(self.series_actor_map, self._get_series), self.series_actor_map
)
entry.register_lazy_func(
TraktLazyLookup(self.show_translate_map, self._get_series),
self.show_translate_map,
)
if is_episode(entry):
entry.register_lazy_func(
TraktLazyLookup(self.episode_map, self._get_episode), self.episode_map
)
elif is_season(entry):
entry.register_lazy_func(
TraktLazyLookup(self.season_map, self._get_season), self.season_map
)
else:
entry.register_lazy_func(
TraktLazyLookup(self.movie_map, self._get_movie), self.movie_map
)
# TODO cleaner way to do this?
entry.register_lazy_func(
TraktLazyLookup(self.movie_actor_map, self._get_movie), self.movie_actor_map
)
entry.register_lazy_func(
TraktLazyLookup(self.movie_translate_map, self._get_movie),
self.movie_translate_map,
)
if config.get('username') or config.get('account'):
self._register_lazy_user_data_lookup(entry, 'collected')
self._register_lazy_user_data_lookup(entry, 'watched')
self._register_lazy_user_ratings_lookup(entry)
def _get_media_type_from_entry(self, entry):
media_type = None
if is_episode(entry):
media_type = 'episode'
elif is_season(entry):
media_type = 'season'
elif is_show(entry):
media_type = 'show'
elif is_movie(entry):
media_type = 'movie'
return media_type
def _register_lazy_user_data_lookup(self, entry, data_type, media_type=None):
media_type = media_type or self._get_media_type_from_entry(entry)
if not media_type:
return
field_name = self._get_user_data_field_name(data_type=data_type, media_type=media_type)
entry.register_lazy_func(
TraktUserDataLookup(field_name, data_type, media_type, self._lazy_user_data_lookup),
[field_name],
)
def _register_lazy_user_ratings_lookup(self, entry):
data_type = 'ratings'
if is_show(entry):
self._register_lazy_user_data_lookup(
entry=entry, data_type=data_type, media_type='show'
)
self._register_lazy_user_data_lookup(
entry=entry, data_type=data_type, media_type='season'
)
self._register_lazy_user_data_lookup(
entry=entry, data_type=data_type, media_type='episode'
)
else:
self._register_lazy_user_data_lookup(
entry=entry, data_type=data_type, media_type='movie'
)
@property
def series_identifier(self):
"""Returns the plugin main identifier type"""
return 'trakt_show_id'
@property
def movie_identifier(self):
"""Returns the plugin main identifier type"""
return 'trakt_movie_id'
@event('plugin.register')
def register_plugin():
plugin.register(
PluginTraktLookup,
'trakt_lookup',
api_ver=2,
interfaces=['task', 'series_metainfo', 'movie_metainfo'],
)
|
|
import pprint
import test.test_support
import unittest
try:
uni = unicode
except NameError:
def uni(x):
return x
# list, tuple and dict subclasses that do or don't overwrite __repr__
class list2(list):
pass
class list3(list):
def __repr__(self):
return list.__repr__(self)
class tuple2(tuple):
pass
class tuple3(tuple):
def __repr__(self):
return tuple.__repr__(self)
class dict2(dict):
pass
class dict3(dict):
def __repr__(self):
return dict.__repr__(self)
class QueryTestCase(unittest.TestCase):
def setUp(self):
self.a = range(100)
self.b = range(200)
self.a[-12] = self.b
def test_basic(self):
# Verify .isrecursive() and .isreadable() w/o recursion
verify = self.assert_
pp = pprint.PrettyPrinter()
for safe in (2, 2.0, 2j, "abc", [3], (2,2), {3: 3}, uni("yaddayadda"),
self.a, self.b):
# module-level convenience functions
verify(not pprint.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
verify(pprint.isreadable(safe),
"expected isreadable for %r" % (safe,))
# PrettyPrinter methods
verify(not pp.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
verify(pp.isreadable(safe),
"expected isreadable for %r" % (safe,))
def test_knotted(self):
# Verify .isrecursive() and .isreadable() w/ recursion
# Tie a knot.
self.b[67] = self.a
# Messy dict.
self.d = {}
self.d[0] = self.d[1] = self.d[2] = self.d
verify = self.assert_
pp = pprint.PrettyPrinter()
for icky in self.a, self.b, self.d, (self.d, self.d):
verify(pprint.isrecursive(icky), "expected isrecursive")
verify(not pprint.isreadable(icky), "expected not isreadable")
verify(pp.isrecursive(icky), "expected isrecursive")
verify(not pp.isreadable(icky), "expected not isreadable")
# Break the cycles.
self.d.clear()
del self.a[:]
del self.b[:]
for safe in self.a, self.b, self.d, (self.d, self.d):
# module-level convenience functions
verify(not pprint.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
verify(pprint.isreadable(safe),
"expected isreadable for %r" % (safe,))
# PrettyPrinter methods
verify(not pp.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
verify(pp.isreadable(safe),
"expected isreadable for %r" % (safe,))
def test_unreadable(self):
# Not recursive but not readable anyway
verify = self.assert_
pp = pprint.PrettyPrinter()
for unreadable in type(3), pprint, pprint.isrecursive:
# module-level convenience functions
verify(not pprint.isrecursive(unreadable),
"expected not isrecursive for %r" % (unreadable,))
verify(not pprint.isreadable(unreadable),
"expected not isreadable for %r" % (unreadable,))
# PrettyPrinter methods
verify(not pp.isrecursive(unreadable),
"expected not isrecursive for %r" % (unreadable,))
verify(not pp.isreadable(unreadable),
"expected not isreadable for %r" % (unreadable,))
def test_same_as_repr(self):
# Simple objects, small containers and classes that overwrite __repr__
# For those the result should be the same as repr().
# Ahem. The docs don't say anything about that -- this appears to
# be testing an implementation quirk. Starting in Python 2.5, it's
# not true for dicts: pprint always sorts dicts by key now; before,
# it sorted a dict display if and only if the display required
# multiple lines. For that reason, dicts with more than one element
# aren't tested here.
verify = self.assert_
for simple in (0, 0L, 0+0j, 0.0, "", uni(""),
(), tuple2(), tuple3(),
[], list2(), list3(),
{}, dict2(), dict3(),
verify, pprint,
-6, -6L, -6-6j, -1.5, "x", uni("x"), (3,), [3], {3: 6},
(1,2), [3,4], {5: 6, 7: 8},
tuple2((1,2)), tuple3((1,2)), tuple3(range(100)),
[3,4], list2([3,4]), list3([3,4]), list3(range(100)),
{5: 6, 7: 8}, dict2({5: 6}), dict3({5: 6}),
range(10, -11, -1)
):
native = repr(simple)
for function in "pformat", "saferepr":
f = getattr(pprint, function)
got = f(simple)
verify(native == got, "expected %s got %s from pprint.%s" %
(native, got, function))
def test_basic_line_wrap(self):
# verify basic line-wrapping operation
o = {'RPM_cal': 0,
'RPM_cal2': 48059,
'Speed_cal': 0,
'controldesk_runtime_us': 0,
'main_code_runtime_us': 0,
'read_io_runtime_us': 0,
'write_io_runtime_us': 43690}
exp = """\
{'RPM_cal': 0,
'RPM_cal2': 48059,
'Speed_cal': 0,
'controldesk_runtime_us': 0,
'main_code_runtime_us': 0,
'read_io_runtime_us': 0,
'write_io_runtime_us': 43690}"""
for type in [dict, dict2]:
self.assertEqual(pprint.pformat(type(o)), exp)
o = range(100)
exp = '[%s]' % ',\n '.join(map(str, o))
for type in [list, list2]:
self.assertEqual(pprint.pformat(type(o)), exp)
o = tuple(range(100))
exp = '(%s)' % ',\n '.join(map(str, o))
for type in [tuple, tuple2]:
self.assertEqual(pprint.pformat(type(o)), exp)
# indent parameter
o = range(100)
exp = '[ %s]' % ',\n '.join(map(str, o))
for type in [list, list2]:
self.assertEqual(pprint.pformat(type(o), indent=4), exp)
def test_sorted_dict(self):
# Starting in Python 2.5, pprint sorts dict displays by key regardless
# of how small the dictionary may be.
# Before the change, on 32-bit Windows pformat() gave order
# 'a', 'c', 'b' here, so this test failed.
d = {'a': 1, 'b': 1, 'c': 1}
self.assertEqual(pprint.pformat(d), "{'a': 1, 'b': 1, 'c': 1}")
self.assertEqual(pprint.pformat([d, d]),
"[{'a': 1, 'b': 1, 'c': 1}, {'a': 1, 'b': 1, 'c': 1}]")
# The next one is kind of goofy. The sorted order depends on the
# alphabetic order of type names: "int" < "str" < "tuple". Before
# Python 2.5, this was in the test_same_as_repr() test. It's worth
# keeping around for now because it's one of few tests of pprint
# against a crazy mix of types.
self.assertEqual(pprint.pformat({"xy\tab\n": (3,), 5: [[]], (): {}}),
r"{5: [[]], 'xy\tab\n': (3,), (): {}}")
def test_subclassing(self):
o = {'names with spaces': 'should be presented using repr()',
'others.should.not.be': 'like.this'}
exp = """\
{'names with spaces': 'should be presented using repr()',
others.should.not.be: like.this}"""
self.assertEqual(DottedPrettyPrinter().pformat(o), exp)
class DottedPrettyPrinter(pprint.PrettyPrinter):
def format(self, object, context, maxlevels, level):
if isinstance(object, str):
if ' ' in object:
return repr(object), 1, 0
else:
return object, 0, 0
else:
return pprint.PrettyPrinter.format(
self, object, context, maxlevels, level)
def test_main():
test.test_support.run_unittest(QueryTestCase)
if __name__ == "__main__":
test_main()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.svm
import sklearn.metrics
from sklearn import preprocessing
try:
from ibeis_cnn.models import abstract_models
AbstractCategoricalModel = abstract_models.AbstractCategoricalModel
except ImportError:
AbstractCategoricalModel = object
print('no ibeis_cnn')
from os.path import join
(print, rrr, profile) = ut.inject2(__name__)
def shark_net(dry=False):
"""
CommandLine:
python -m ibeis.scripts.classify_shark shark_net
python -m ibeis.scripts.classify_shark shark_net --dry
python -m ibeis.scripts.classify_shark shark_net --vd --monitor
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.classify_shark import * # NOQA
>>> shark_net()
"""
from ibeis.scripts import classify_shark
import ibeis
ibs = ibeis.opendb('WS_ALL') # NOQA
config = {
'dim_size': (224, 224),
'resize_dim': 'wh'
}
# ------------
# Define dataset
# ------------
target_type = 'binary'
#target_type = 'multiclass3'
# ut.delete(ibs.get_neuralnet_dir()) # to reset
dataset = classify_shark.get_shark_dataset(target_type, 'chip')
# ------------
# Define model
# ------------
if ut.get_computer_name() == 'Leviathan':
batch_size = 128
suffix = 'resnet'
# suffix = 'lenet'
#suffix = 'incep'
else:
suffix = 'lenet'
batch_size = 64
#suffix = 'resnet'
#batch_size = 32
model_name = 'injur-shark-' + suffix
if False:
model = classify_shark.WhaleSharkInjuryModel(
name=model_name,
output_dims=len(dataset.getprop('target_names')),
data_shape=config['dim_size'] + (3,),
batch_size=batch_size,
arch_dpath='.')
model.init_arch()
model.load_model_state()
else:
model = classify_shark.WhaleSharkInjuryModel(
name=model_name,
dataset_dpath=dataset.dataset_dpath,
training_dpath=ibs.get_neuralnet_dir(),
#
output_dims=len(dataset.getprop('target_names')),
data_shape=config['dim_size'] + (3,),
batch_size=batch_size,
)
model.init_arch()
model.print_layer_info()
if False:
model.arch_dpath = '/home/joncrall/Desktop/manually_saved/arch_injur-shark-resnet_o2_d27_c2942_jzuddodd/'
state_fpath = model.get_model_state_fpath(dpath=model.trained_arch_dpath)
state_fpath = model.get_model_state_fpath()
model.load_model_state(fpath=state_fpath)
#X_test, y_test = dataset.subset('test')
#X_test, y_test = dataset.subset('valid')
#X_test, y_test = dataset.subset('learn')
X_test, y_test = dataset.subset('test')
#y_pred = model.predict(X_test)
test_outptuts = model._predict(X_test)
y_pred = test_outptuts['predictions']
print(model.name)
report = sklearn.metrics.classification_report(
y_true=y_test, y_pred=y_pred,
)
print(report)
state_fpath = '/home/joncrall/Desktop/manually_saved/arch_injur-shark-resnet_o2_d27_c2942_jzuddodd/model_state_arch_jzuddodd.pkl'
dpath = '/home/joncrall/Desktop/manually_saved/arch_injur-shark-lenet_o2_d11_c688_acioqbst'
model.dump_cases(X_test, y_test, 'test', dpath=dpath)
hyperparams = dict(
era_size=30,
max_epochs=1000,
rate_schedule=.1,
augment_on=True,
class_weight='balanced',
stopping_patience=200,
)
model.learn_state.weight_decay = .000002
model.learn_state.learning_rate = .005
ut.update_existing(model.hyperparams, hyperparams, assert_exists=True)
model.monitor_config['monitor'] = True
model.monitor_config['weight_dump_freq'] = 100
model.monitor_config['case_dump_freq'] = 100
#model.build_backprop_func()
#model.build_forward_func()
# ---------------
# Setup and learn
# ---------------
X_learn, y_learn = dataset.subset('learn')
X_valid, y_valid = dataset.subset('valid')
X_test, y_test = dataset.subset('test')
#model.ensure_data_params(X_learn, y_learn)
#X_train = X_learn # NOQA
#y_train = y_learn # NOQA
valid_idx = None # NOQA
if dry or ut.get_argflag('--dry'):
return model, dataset
model.fit(X_learn, y_learn, X_valid=X_valid, y_valid=y_valid)
# @ut.reloadable_class
class WhaleSharkInjuryModel(AbstractCategoricalModel):
"""
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.classify_shark import * # NOQA
>>> from ibeis.scripts import classify_shark
>>> ds = classify_shark.get_sharks_dataset('binary', 'chip')
>>> problem = classify_shark.ClfProblem(ds)
>>> problem.print_support_info()
>>> ibs = ds.ibs
"""
def def_lenet(model):
import ibeis_cnn.__LASAGNE__ as lasange
from ibeis_cnn import custom_layers
print('[model] init_arch')
lrelu = lasange.nonlinearities.LeakyRectify(leakiness=(1. / 3.))
W = lasange.init.Orthogonal('relu')
bundles = custom_layers.make_bundles(
nonlinearity=lrelu, batch_norm=True,
filter_size=(3, 3), stride=(1, 1),
pool_size=(2, 2), pool_stride=(2, 2),
W=W,
)
b = ut.DynStruct(copy_dict=bundles)
network_layers_def = [
b.InputBundle(shape=model.input_shape, noise=False),
# Convolutional layers
b.ConvBundle(num_filters=16, pool=True),
b.ConvBundle(num_filters=16),
b.ConvBundle(num_filters=16, pool=True),
b.ConvBundle(num_filters=16),
b.ConvBundle(num_filters=32, pool=True),
b.ConvBundle(num_filters=32),
b.ConvBundle(num_filters=32, pool=True),
b.ConvBundle(num_filters=32),
# Fully connected layers
b.DenseBundle(num_units=64, dropout=.5),
b.DenseBundle(num_units=64, dropout=.5),
b.SoftmaxBundle(num_units=model.output_dims)
]
return network_layers_def
def def_resnet(model):
import ibeis_cnn.__LASAGNE__ as lasange
from ibeis_cnn import custom_layers
print('[model] init_arch')
nonlinearity = lasange.nonlinearities.LeakyRectify(leakiness=(1. / 3.))
W = lasange.init.HeNormal(gain='relu')
#W = lasange.init.GlorotUniform()
bundles = custom_layers.make_bundles(
nonlinearity=nonlinearity,
filter_size=(3, 3), stride=(1, 1),
W=W, pool_size=(2, 2), pool_stride=(2, 2)
)
b = ut.DynStruct(copy_dict=bundles)
network_layers_def = [
b.InputBundle(shape=model.input_shape, noise=False),
# Convolutional layers
b.ConvBundle(num_filters=16, pool=False),
b.ResidualBundle(num_filters=16, stride=(2, 2), preactivate=False),
b.ResidualBundle(num_filters=16),
b.ResidualBundle(num_filters=16, stride=(2, 2)),
b.ResidualBundle(num_filters=16),
b.ResidualBundle(num_filters=16, stride=(2, 2)),
b.ResidualBundle(num_filters=16),
b.ResidualBundle(num_filters=16, stride=(2, 2)),
b.ResidualBundle(num_filters=16, dropout=None),
b.ResidualBundle(num_filters=16, stride=(2, 2), dropout=.5),
b.ResidualBundle(num_filters=16, postactivate=True, dropout=.5),
# Fully connected layers
b.GlobalPool(),
b.SoftmaxBundle(num_units=model.output_dims)
]
return network_layers_def
def def_inception(model):
import ibeis_cnn.__LASAGNE__ as lasange
from ibeis_cnn import custom_layers
print('[model] init_arch')
N = 16
# Define default incption branch types
incep_branches = [
dict(t='c', s=(1, 1), r=0, n=N),
dict(t='c', s=(3, 3), r=N // 2, n=N // 2),
dict(t='c', s=(3, 3), r=N // 4, n=N // 4, d=2),
dict(t='p', s=(3, 3), n=N // 2)
]
lrelu = lasange.nonlinearities.LeakyRectify(leakiness=(1. / 3.))
W = lasange.init.Orthogonal('relu')
bundles = custom_layers.make_bundles(
nonlinearity=lrelu, batch_norm=True,
filter_size=(3, 3), stride=(1, 1),
pool_size=(3, 3), pool_stride=(2, 2),
branches=incep_branches,
W=W,
)
b = ut.DynStruct(copy_dict=bundles)
network_layers_def = [
# Convolutional layers
b.InputBundle(shape=model.input_shape, noise=False),
b.ConvBundle(num_filters=16, filter_size=(3, 3), pool=False),
b.ConvBundle(num_filters=12, filter_size=(3, 3), pool=True),
b.InceptionBundle(dropout=.3, pool=True),
b.InceptionBundle(dropout=.3, pool=True),
b.InceptionBundle(dropout=.4, pool=True),
b.InceptionBundle(dropout=.5,
branches=[
dict(t='c', s=(1, 1), r=0, n=model.output_dims),
dict(t='c', s=(3, 3), r=N // 2, n=model.output_dims),
dict(t='c', s=(3, 3), r=N // 4, n=model.output_dims, d=2),
dict(t='p', s=(3, 3), n=model.output_dims)
]),
b.GlobalPool(),
b.SoftmaxBundle(num_units=model.output_dims)
# Fully connected layers
#b.DenseBundle(num_units=64, dropout=.5),
#b.DenseBundle(num_units=64, dropout=.5),
]
return network_layers_def
def init_arch(model, verbose=ut.VERBOSE, **kwargs):
r"""
CommandLine:
python -m ibeis.scripts.classify_shark WhaleSharkInjuryModel.init_arch
python -m ibeis.scripts.classify_shark WhaleSharkInjuryModel.init_arch --show
python -m ibeis.scripts.classify_shark shark_net --dry --show
python -m ibeis.scripts.classify_shark shark_net --vd
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.classify_shark import * # NOQA
>>> verbose = True
>>> data_shape = tuple(ut.get_argval('--datashape', type_=list,
>>> default=(224, 224, 3)))
>>> model = WhaleSharkInjuryModel(batch_size=64, output_dims=2,
>>> data_shape=data_shape)
>>> model.init_arch()
>>> model.print_model_info_str()
>>> ut.quit_if_noshow()
>>> model.show_arch(fullinfo=False)
>>> ut.show_if_requested()
"""
from ibeis_cnn import custom_layers
#if ut.get_computer_name() == 'Leviathan':
if model.name.endswith('incep'):
network_layers_def = model.def_inception()
elif model.name.endswith('lenet'):
network_layers_def = model.def_lenet()
elif model.name.endswith('resnet'):
network_layers_def = model.def_resnet()
network_layers = custom_layers.evaluate_layer_list(
network_layers_def)
#model.network_layers = network_layers
output_layer = network_layers[-1]
model.output_layer = output_layer
return output_layer
def special_output():
pass
#def special_loss_function(output_activations):
# output_injur1 = output_activations[:, 0]
# output_injur2 = output_activations[:, 1]
# output_healthy = (1 - ((1 - output_injur1) * (1 - output_injur2))
# import ibeis_cnn.__LASAGNE__ as lasange
# lasange.objectives.binary_crossentropy(output_injur1)
# lasange.objectives.binary_crossentropy(output_injur2)
def augment(self, Xb, yb=None):
"""
X_valid, y_valid = dataset.subset('valid')
num = 10
Xb = X_valid[:num]
Xb = Xb / 255.0 if ut.is_int(Xb) else Xb
Xb = Xb.astype(np.float32, copy=True)
yb = None if yb is None else yb.astype(np.int32, copy=True)
# Rescale the batch data to the range 0 to 1
Xb_, yb_ = model.augment(Xb)
yb_ = None
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> pt.qt4ensure()
>>> from ibeis_cnn import augment
>>> augment.show_augmented_patches(Xb, Xb_, yb, yb_, data_per_label=1)
>>> ut.show_if_requested()
"""
from ibeis_cnn import augment
rng = np.random
affperterb_ranges = dict(
zoom_range=(1.3, 1.2),
max_tx=2,
max_ty=2,
max_shear=ut.TAU / 32,
max_theta=ut.TAU,
enable_stretch=True,
enable_flip=True,
)
Xb_, yb_ = augment.augment_affine(
Xb, yb, rng=rng, inplace=True, data_per_label=1,
affperterb_ranges=affperterb_ranges,
aug_prop=.5,
)
return Xb_, yb_
#def fit_interactive(X_train, y_train, X_valid, y_valid):
# pass
def get_shark_dataset(target_type='binary', data_type='chip'):
"""
>>> from ibeis.scripts.classify_shark import * # NOQA
>>> target_type = 'binary'
>>> data_type = 'hog'
>>> dataset = get_shark_dataset(target_type)
"""
from ibeis_cnn.dataset import DataSet
from ibeis.scripts import classify_shark
tup = classify_shark.get_shark_labels_and_metadata(target_type)
ibs, annots, target, target_names, config, metadata, enc = tup
data_shape = config['dim_size'] + (3,)
length = len(annots)
# Build dataset configuration string
trail_cfgstr = ibs.depc_annot.get_config_trail_str('chips', config)
trail_hashstr = ut.hashstr27(trail_cfgstr)
visual_uuids = annots.visual_uuids
metadata['visual_uuid'] = np.array(visual_uuids)
#metadata['nids'] = np.array(annots.nids)
chips_hashstr = ut.hashstr_arr27(annots.visual_uuids, 'chips')
cfgstr = chips_hashstr + '_' + trail_hashstr
name = 'injur-shark'
if data_type == 'hog':
cfgstr = 'hog_' + cfgstr
name += '-hog'
training_dpath = ibs.get_neuralnet_dir()
dataset = DataSet(cfgstr,
data_shape=data_shape,
num_data=length,
training_dpath=training_dpath,
name=name)
print(dataset.dataset_id)
dataset.setprop('ibs', ibs)
dataset.setprop('annots', annots)
dataset.setprop('target_names', target_names)
dataset.setprop('config', config)
dataset.setprop('enc', enc)
try:
dataset.load()
except IOError:
import vtool_ibeis as vt
dataset.ensure_dirs()
if data_type == 'hog':
data = np.array([h.ravel() for h in annots.hog_hog])
labels = target
# Save data where dataset expects it to be
dataset.save(data, labels, metadata, data_per_label=1)
else:
chip_gen = ibs.depc_annot.get('chips', annots.aids, 'img',
eager=False, config=config)
iter_ = iter(ut.ProgIter(chip_gen, length=length, lbl='load chip'))
shape = (length,) + data_shape
data = vt.fromiter_nd(iter_, shape=shape, dtype=np.uint8) # NOQA
labels = target
# Save data where dataset expects it to be
dataset.save(data, labels, metadata, data_per_label=1)
from ibeis_cnn.dataset import stratified_label_shuffle_split
if not dataset.has_split('learn'):
nids = np.array(dataset.metadata['nids'])
# Partition into a testing and training dataset
y = dataset.labels
train_idx, test_idx = stratified_label_shuffle_split(
y, nids, [.8, .2], rng=22019)
nids_train = nids.take(train_idx, axis=0)
y_train = y.take(train_idx, axis=0)
# Partition training into learning and validation
learn_idx, valid_idx = stratified_label_shuffle_split(
y_train, nids_train,
[.8, .2], y_idx=train_idx, rng=90120)
assert len(np.intersect1d(learn_idx, test_idx)) == 0
assert len(np.intersect1d(valid_idx, test_idx)) == 0
assert len(np.intersect1d(learn_idx, valid_idx)) == 0
if data_type == 'hog':
dataset.add_split('train', train_idx)
dataset.add_split('test', test_idx)
dataset.add_split('learn', learn_idx)
dataset.add_split('valid', valid_idx)
dataset.clear_cache('full')
if data_type == 'hog':
# hack
y = dataset.labels
nids = np.array(dataset.metadata['nids'])
train_idx, test_idx = stratified_label_shuffle_split(
y, nids, [.8, .2], rng=22019)
nids_train = nids.take(train_idx, axis=0)
y_train = y.take(train_idx, axis=0)
# Partition training into learning and validation
learn_idx, valid_idx = stratified_label_shuffle_split(
y_train, nids_train,
[.8, .2], y_idx=train_idx, rng=90120)
dataset._split_idxs = {}
dataset._split_idxs['learn'] = learn_idx
dataset._split_idxs['valid'] = valid_idx
dataset._split_idxs['train'] = train_idx
dataset._split_idxs['test'] = test_idx
dataset.ensure_symlinked()
return dataset
def get_shark_labels_and_metadata(target_type=None, ibs=None, config=None):
"""
>>> from ibeis.scripts.classify_shark import * # NOQA
>>> target_type = 'multiclass3'
>>> data_type = 'hog'
"""
import ibeis
if ibs is None:
ibs = ibeis.opendb('WS_ALL')
if config is None:
config = {
#'dim_size': (256, 256),
'dim_size': (224, 224),
'resize_dim': 'wh'
}
all_annots = ibs.annots(config=config)
isempty = ut.not_list(ut.lmap(len, ibs.images().aids))
#if False:
# x = ibs.images().compress(isempty)
num_empty_images = sum(isempty)
print('Images without annotations: %r' % (num_empty_images,))
print('Building labels for %r annotations from %r images' % (
len(all_annots), len(ut.unique(all_annots.gids))))
TARGET_TYPE = 'binary'
#TARGET_TYPE = 'multiclass3'
if target_type is None:
target_type = TARGET_TYPE
from ibeis.scripts import getshark
category_tags = getshark.get_injur_categories(all_annots)
print('Base Category Tags tags')
print(ut.repr3(ut.dict_hist(ut.flatten(category_tags))))
print('Base Co-Occurrence Freq')
co_occur1 = ut.tag_coocurrence(category_tags)
print(ut.repr3(co_occur1))
ntags_list = np.array(ut.lmap(len, category_tags))
is_no_tag = ntags_list == 0
is_single_tag = ntags_list == 1
is_multi_tag = ntags_list > 1
if target_type == 'binary':
regex_map = [
('injur-.*', 'injured'),
('healthy', 'healthy'),
]
elif target_type == 'multiclass3':
regex_map = [
('injur-trunc', 'injur-trunc'),
('injur-nicks', 'injur-trunc'),
('injur-scar', 'injur-scar'),
('injur-bite', 'injur-scar'),
('injur-gill', 'injur-scar'),
('injur-other', None),
('injur-dead', None),
('healthy', 'healthy'),
]
elif target_type == 'multiclassX':
regex_map = [
('injur-trunc', 'injur-trunc'),
('healthy', 'healthy'),
('injur-.*', None),
]
else:
raise ValueError('Unknown target_type=%r' % (target_type,))
tag_vocab = ut.flat_unique(*category_tags)
alias_map = ut.build_alias_map(regex_map, tag_vocab)
unmapped = list(set(tag_vocab) - set(alias_map.keys()))
print('unmapped = %r' % (unmapped,))
category_tags2 = ut.alias_tags(category_tags, alias_map)
ntags_list = np.array(ut.lmap(len, category_tags2))
is_no_tag = ntags_list == 0
is_single_tag = ntags_list == 1
is_multi_tag = ntags_list > 1
print('Cleaned tags')
hist = ut.tag_hist(category_tags2)
print(ut.repr3(hist))
# Get tag co-occurrence
print('Co-Occurrence Freq')
co_occur = ut.tag_coocurrence(category_tags2)
print(ut.repr3(co_occur))
print('Co-Occurrence Percent')
co_occur_percent = ut.odict([(keys, [100 * val / hist[k] for k in keys]) for
keys, val in co_occur.items()])
print(ut.repr3(co_occur_percent, precision=2, nl=1))
multi_annots = all_annots.compress(is_multi_tag) # NOQA
#ibs.set_image_imagesettext(multi_annots.gids, ['MultiTaged'] * is_multi_tag.sum())
print('can\'t use %r annots due to no labels' % (is_no_tag.sum(),))
print('can\'t use %r annots due to inconsistent labels' % (is_multi_tag.sum(),))
print('will use %r annots with consistent labels' % (is_single_tag.sum(),))
annot_tags = ut.compress(category_tags2, is_single_tag)
annots = all_annots.compress(is_single_tag)
annot_tag_hist = ut.dict_hist(ut.flatten(annot_tags))
print('Final Annot Tags')
print(ut.repr3(annot_tag_hist))
# target_names = ['healthy', 'injured']
enc = preprocessing.LabelEncoder()
enc.fit(ut.unique(ut.flatten(annot_tags)))
target = enc.transform(ut.flatten(annot_tags))
target_names = enc.classes_
metadata = {
'aids': np.array(annots.aids),
'nids': np.array(annots.nids),
}
tup = ibs, annots, target, target_names, config, metadata, enc
return tup
# @ut.reloadable_class
class ClfProblem(object):
""" Harness for researching a classification problem """
def __init__(problem, ds):
problem.ds = ds
def print_support_info(problem):
enc = problem.ds.enc
target_labels = enc.inverse_transform(problem.ds.target)
label_hist = ut.dict_hist(target_labels)
print('support hist' + ut.repr3(label_hist))
def fit_new_classifier(problem, train_idx):
"""
References:
http://leon.bottou.org/research/stochastic
http://blog.explainmydata.com/2012/06/ntrain-24853-ntest-25147-ncorrupt.html
http://scikit-learn.org/stable/modules/svm.html#svm-classification
http://scikit-learn.org/stable/modules/grid_search.html
"""
print('[problem] train classifier on %d data points' % (len(train_idx)))
data = problem.ds.data
target = problem.ds.target
x_train = data.take(train_idx, axis=0)
y_train = target.take(train_idx, axis=0)
clf = sklearn.svm.SVC(kernel=str('linear'), C=.17, class_weight='balanced',
decision_function_shape='ovr')
# C, penalty, loss
#param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
# 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
#param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
# 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
#clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
#clf = clf.fit(X_train_pca, y_train)
clf.fit(x_train, y_train)
return clf
def fit_new_linear_svm(problem, train_idx):
print('[problem] train classifier on %d data points' % (len(train_idx)))
data = problem.ds.data
target = problem.ds.target
x_train = data.take(train_idx, axis=0)
y_train = target.take(train_idx, axis=0)
clf = sklearn.svm.SVC(kernel=str('linear'), C=.17, class_weight='balanced',
decision_function_shape='ovr')
clf.fit(x_train, y_train)
def gridsearch_linear_svm_params(problem, train_idx):
"""
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.classify_shark import * # NOQA
>>> from ibeis.scripts import classify_shark
>>> ds = classify_shark.get_sharks_dataset('binary')
>>> problem = classify_shark.ClfProblem(ds)
>>> problem.print_support_info()
"""
try:
import sklearn.model_selection
except ImportError:
pass
import sklearn.grid_search
with ut.Timer('cv'):
data = problem.ds.data
target = problem.ds.target
def stratified_sample_idxs_unbalanced(target, size=1000):
rng = np.random.RandomState(43)
sample = []
for label in np.unique(target):
target_idxs = np.where(target == label)[0]
subset_size = size
rand_idx = ut.random_indexes(len(target_idxs), subset_size, rng=rng)
sample_idx = ut.take(target_idxs, rand_idx)
sample.append(sample_idx)
sample_idx = np.array(sorted(ut.flatten(sample)))
return sample_idx
train_idx = stratified_sample_idxs_unbalanced(target, 4000)
x_train = data.take(train_idx, axis=0)
y_train = target.take(train_idx, axis=0)
param_grid = {
#'C': [1, .5, .1, 5, 10, 100],
#'C': [1, 1e-1, 1e-2, 1e-3]
#'C': [1, 1e-1, 1e-2, 1e-3]
#'C': np.linspace(1, 1e-5, 15)
#'C': np.linspace(.2, 1e-5, 15)
#'C': np.logspace(np.log10(1e-3), np.log10(.1), 30, base=10)
#'C': np.linspace(.1, .3, 20),
#'C': np.linspace(1.0, .22, 20),
'C': np.linspace(.25, .01, 40),
#'loss': ['l2', 'l1'],
#'penalty': ['l2', 'l1'],
}
_clf = sklearn.svm.SVC(kernel=str('linear'), C=.17, class_weight='balanced',
decision_function_shape='ovr')
clf = sklearn.grid_search.GridSearchCV(_clf, param_grid, n_jobs=6,
iid=False, cv=5, verbose=3)
clf.fit(x_train, y_train)
#(NOTE grid.predict only uses the best estimator)
print('clf.best_params_ = %r' % (clf.best_params_,))
print("Best parameters set found on development set:")
print(clf.best_params_)
print("Grid scores on development set:")
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
xdata = np.array([t[0]['C'] for t in clf.grid_scores_])
ydata = np.array([t[1] for t in clf.grid_scores_])
y_data_std = np.array([t[2].std() for t in clf.grid_scores_])
ydata_mean = ydata
y_data_max = ydata_mean + y_data_std
y_data_min = ydata_mean - y_data_std
#pt.plot(xdata, ydata, '-rx')
import plottool_ibeis as pt
pt.figure(fnum=pt.ensure_fnum(None))
ax = pt.gca()
ax.fill_between(xdata, y_data_min, y_data_max, alpha=.2, color=pt.LIGHT_BLUE)
pt.draw_hist_subbin_maxima(ydata, xdata)
#y_data_std = np.array([t[2].std() for t in grid.grid_scores_])
#ydata_mean = c_ydata
#y_data_max = ydata_mean + y_data_std
#y_data_min = ydata_mean - y_data_std
##import plottool_ibeis as pt
##pt.figure(fnum=pt.ensure_fnum(None))
#ax = pt.gca()
#ax.fill_between(c_xdata, c_ydata, y_data_max, alpha=.2, color=pt.LIGHT_BLUE)
#ax.fill_between(c_xdata, c_ydata, y_data_min, alpha=.2, color=pt.LIGHT_BLUE)
##pt.figure(fnum=pt.ensure_fnum(None))
#hist = c_ydata
#centers = c_xdata
#pt.draw_hist_subbin_maxima(c_ydata, c_xdata, maxima_thresh=None, remove_endpoints=False)
#clf.best_params_ = {u'C': 0.07143785714285722}
#Best parameters set found on development set:
#{u'C': 0.07143785714285722}
#Grid scores on development set:
#0.729 (+/-0.016) for {u'C': 1.0}
#0.729 (+/-0.019) for {u'C': 0.92857214285714285}
#0.733 (+/-0.017) for {u'C': 0.85714428571428569}
#0.734 (+/-0.015) for {u'C': 0.78571642857142865}
#0.736 (+/-0.016) for {u'C': 0.71428857142857138}
#0.739 (+/-0.020) for {u'C': 0.64286071428571434}
#0.742 (+/-0.020) for {u'C': 0.57143285714285719}
#0.743 (+/-0.021) for {u'C': 0.50000500000000003}
#0.746 (+/-0.023) for {u'C': 0.42857714285714288}
#0.749 (+/-0.023) for {u'C': 0.35714928571428572}
#0.755 (+/-0.025) for {u'C': 0.28572142857142857}
#0.760 (+/-0.027) for {u'C': 0.21429357142857142}
#0.762 (+/-0.025) for {u'C': 0.14286571428571437}
#0.770 (+/-0.036) for {u'C': 0.07143785714285722}
#0.664 (+/-0.031) for {u'C': 1.0000000000000001e-05}
#0.774 (+/-0.039) for {u'C': 0.017433288221999882}
#0.775 (+/-0.039) for {u'C': 0.020433597178569417}
#0.774 (+/-0.039) for {u'C': 0.023950266199874861}
#0.777 (+/-0.038) for {u'C': 0.02807216203941177}
#0.775 (+/-0.036) for {u'C': 0.032903445623126679}
#0.773 (+/-0.033) for {u'C': 0.038566204211634723}
#0.722 (+/-0.060) for {u'C': 0.001}
#0.770 (+/-0.047) for {u'C': 0.01}
#0.775 (+/-0.047) for {u'C': 0.1}
#0.774 (+/-0.047) for {u'C': 0.12}
#0.773 (+/-0.045) for {u'C': 0.15}
#0.773 (+/-0.046) for {u'C': 0.17}
#0.772 (+/-0.047) for {u'C': 0.2}
#0.760 (+/-0.043) for {u'C': 0.5}
#0.748 (+/-0.043) for {u'C': 1.0}
#0.707 (+/-0.043) for {u'C': 100}
#0.702 (+/-0.047) for {u'C': 1000}
def classifier_test(problem, clf, test_idx):
print('[problem] test classifier on %d data points' % (len(test_idx),))
data = problem.ds.data
target = problem.ds.target
X_test = data.take(test_idx, axis=0)
y_true = target.take(test_idx, axis=0)
y_conf = predict_svc_ovr(X_test)
y_pred = y_conf.argmax(axis=1)
result = ClfSingleResult(problem.ds, test_idx, y_true, y_pred, y_conf)
return result
def stratified_2sample_idxs(problem, frac=.2, split_frac=.75):
target = problem.ds.target
target_labels = problem.ds.target_labels
rng = np.random.RandomState(43)
train_sample = []
test_sample = []
for label in target_labels:
target_idxs = np.where(target == label)[0]
subset_size = int(len(target_idxs) * frac)
rand_idx = ut.random_indexes(len(target_idxs), subset_size, rng=rng)
sample_idx = ut.take(target_idxs, rand_idx)
split = int(len(sample_idx) * split_frac)
train_sample.append(sample_idx[split:])
test_sample.append(sample_idx[:split])
train_idx = np.array(sorted(ut.flatten(train_sample)))
test_idx = np.array(sorted(ut.flatten(test_sample)))
return train_idx, test_idx
def gen_crossval_idxs(problem, n_folds=2):
y = problem.ds.target
rng = 43432
if hasattr(problem.ds, 'nids'):
# Ensure that an individual does not appear in both the train
# and the test dataset
from ibeis_cnn.dataset import stratified_kfold_label_split
labels = problem.ds.nids
_iter = stratified_kfold_label_split(y, labels, n_folds=n_folds, rng=rng)
else:
xvalkw = dict(n_folds=n_folds, shuffle=True, random_state=rng)
import sklearn.cross_validation
skf = sklearn.cross_validation.StratifiedKFold(y, **xvalkw)
_iter = skf
#import sklearn.model_selection
#skf = sklearn.model_selection.StratifiedKFold(**xvalkw)
#_iter = skf.split(X=np.empty(len(y)), y=y)
msg = 'cross-val test on %s' % (problem.ds.name)
progiter = ut.ProgIter(_iter, length=n_folds, lbl=msg)
for train_idx, test_idx in progiter:
yield train_idx, test_idx
# @ut.reloadable_class
class ClfSingleResult(object):
r"""
Reports the results of a classification problem
Example:
>>> # DISABLE_DOCTEST
>>> result = ClfSingleResult()
"""
def __init__(result, ds=None, test_idx=None, y_true=None, y_pred=None, y_conf=None):
result.ds = ds
result.test_idx = test_idx
result.y_true = y_true
result.y_pred = y_pred
result.y_conf = y_conf
def compile_results(result):
import pandas as pd
y_true = result.y_true
y_pred = result.y_pred
y_conf = result.y_conf
test_idx = result.test_idx
index = pd.Series(test_idx, name='test_idx')
if len(result.ds.target_names) == 1:
y_conf
decision = pd.DataFrame(y_conf, index=index, columns=result.ds.target_names)
result.decision = decision / 3
easiness = np.array(ut.ziptake(result.decision.values, y_true))
columns = ['pred', 'easiness']
column_data = [y_pred, easiness]
data = dict(zip(columns, column_data))
result.df = pd.DataFrame(data, index, columns)
def print_report(result):
report = sklearn.metrics.classification_report(
result.y_true, result.y_pred,
target_names=result.ds.target_names)
print(report)
def get_model_state(clf):
model_attr_names = [
a for a in dir(clf)
if a.endswith('_') and not a.startswith('__')
]
model_state = {a: getattr(clf, a)
for a in model_attr_names}
return model_state
def set_model_state(clf, model_state):
attr_names = sorted(model_state.keys())
attr_names1 = [
'dual_coef_',
]
attr_names2 = [
'coef_',
]
attr_names3 = attr_names1 + attr_names2
attr_namesA = ut.isect(attr_names1, attr_names)
attr_namesB = ut.setdiff(attr_names, attr_names3)
attr_namesC = ut.isect(attr_names2, attr_names)
attr_names_ = attr_namesA + attr_namesB + attr_namesC
for a in attr_names_:
val = model_state[a]
print('a = %r' % (a,))
try:
setattr(clf, a, val)
except AttributeError:
val2 = getattr(clf, a)
assert np.all(val == val2)
def predict_svc_ovr(clf, data):
if len(clf.classes_) == 2:
X = clf._validate_for_predict(data)
X = clf._compute_kernel(X)
_dec2 = clf._dense_decision_function(X)
dec2 = -_dec2
n_samples = dec2.shape[0]
n_classes = len(clf.classes_)
final = np.zeros((n_samples, n_classes))
confidence_max = max(np.abs(dec2.max()), np.abs(dec2.min()))
norm_conf = ((dec2.T[0] / confidence_max) + 1) / 2
final.T[0] = 1 - norm_conf
final.T[1] = norm_conf
# output comparable to multiclass version
y_conf = final
else:
# Get notion of confidence / probability of decision
y_conf = clf.decision_function(data)
return y_conf
def predict_ws_injury_interim_svm(ibs, aids):
"""
Returns relative confidence
"""
config = {
#'dim_size': (256, 256),
'dim_size': (224, 224),
'resize_dim': 'wh'
}
# Load the SVM
model_fname = 'interim_svc_injur-shark-hog_12559_224x224x3_ldhhxnxo.cPkl'
model_url = 'https://lev.cs.rpi.edu/public/models/{}'.format(model_fname)
model_fpath = ut.grab_file_url(model_url, check_hash=False)
clf = ut.load_cPkl(model_fpath)
annots = ibs.annots(aids, config=config)
data = np.array([h.ravel() for h in annots.hog_hog])
target_names = ['healthy', 'injured']
# confidence = clf.decision_function(data)
# y_conf = predict_svc_ovr(clf, data)
y_pred = clf.predict(data)
pred_nice = ut.take(target_names, y_pred)
return pred_nice
def shark_svm():
r"""
References:
http://scikit-learn.org/stable/model_selection.html
TODO:
* Change unreviewed healthy tags to healthy-likely
CommandLine:
python -m ibeis.scripts.classify_shark shark_svm --show
python -m ibeis.scripts.classify_shark shark_svm
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.classify_shark import * # NOQA
>>> shark_svm()
>>> ut.show_if_requested()
"""
from ibeis.scripts import classify_shark
target_type = 'binary'
#target_type = 'multiclass3'
#dataset = classify_shark.get_shark_dataset(target_type)
ds = classify_shark.get_shark_dataset(target_type, 'hog')
# Make resemble old dataset
# FIXME; make ibeis_cnn dataset work here too
#annots = ds.getprop('annots')
ds.enc = ds.getprop('enc')
ds.aids = ds.getprop('annots').aids
ds.nids = ds.getprop('annots').nids
ds.target = ds.labels
ds.target_names = ds.getprop('target_names')
ds.target_labels = ds.enc.transform(ds.target_names)
ds.ibs = ds.getprop('ibs')
ds.config = ds.getprop('config')
problem = classify_shark.ClfProblem(ds)
problem.print_support_info()
BUILD_RELEASE_MODEL = False
if BUILD_RELEASE_MODEL:
clf = sklearn.svm.SVC(kernel=str('linear'), C=.17,
class_weight='balanced',
decision_function_shape='ovr',
verbose=10)
clf.fit(ds.data, ds.target)
model_fname = 'interim_svc_{}.cPkl'.format(ds.dataset_id)
model_dpath = ut.ensuredir((ds.dataset_dpath, 'svms'))
model_fpath = join(model_dpath, model_fname)
ut.save_cPkl(model_fpath, clf)
"""
TO PUBLISH
scp clf to lev:/media/hdd/PUBLIC/models
run script lev:/media/hdd/PUBLIC/hash.py to refresh hashes
"""
user = ut.get_user_name()
host = 'lev.cs.rpi.edu'
remote_path = '/media/hdd/PUBLIC/models/' + model_fname
remote_uri = user + '@' + host + ':' + remote_path
ut.rsync(model_fpath, remote_uri)
command = 'python /media/hdd/PUBLIC/hash.py'
ut.cmd('ssh {user}@{host} "{command}"'.format(user=user, host=host,
command=command))
model_dpath = ut.ensuredir((ds.dataset_dpath, 'svms'))
#n_folds = 10
n_folds = 10
#ensemble_dpath = ut.ensuredir((model_dpath, 'svms_%d_fold' % (n_folds,)))
train_idx = ds._split_idxs['train']
test_idx = ds._split_idxs['test']
y_train = ds.target.take(train_idx)
nids_train = ut.take(ds.nids, train_idx)
# Ensure that an individual does not appear in both train and test
#_iter = stratified_kfold_label_split(y_train, nids_train, y_idx=train_idx,
# n_folds=n_folds, rng=rng)
class MyLabelCV(object):
def __init__(self, y_train, nids_train, n_folds):
self.nids_train = nids_train
self.y_train = y_train
self.n_folds = n_folds
def __len__(self):
return self.n_folds
def __iter__(self):
from ibeis_cnn.dataset import stratified_kfold_label_split
rng = 1809629827
for _ in stratified_kfold_label_split(self.y_train, self.nids_train,
n_folds=self.n_folds, rng=rng):
yield _
clf_fpath = join(model_dpath, '%s_svc_folds_%s.cPkl' % (target_type, n_folds))
if not ut.checkpath(clf_fpath):
"""
Curate strategy:
Use gridsearch to select a reasonable C=.17
Then train 10 classifiers with 10 split cross validation.
This lets us make an "unbias" prediction for each training example.
Look at predictions for all training examples (predict using only
classifiers not trained with that point).
Look at worst worst performing examples.
Fix any errors that occur.
Now that the database is better, we learn the actual model.
Learning strategy:
* Set aside a set test.
* The remaining data is the training set.
* Run Gridsearch with N-fold cross validation on training set to
look at performance given different hyperparameters of the SVM.
* Use quadratic interpolation to select a "best" parameter.
(NOTE grid.predict only uses the best estimator (however it is a refit estimator))
Train a single SVM using these parameters on all training data.
Evaluate this SVM on the test set.
"""
C = None
if C is None:
import sklearn
import sklearn.grid_search
import sklearn.svm
# C controls the margin of the hyperplane.
# Smaller C = Larger Hyperplane
# So, the larger the C the less willing the SVM will be to get
# examples wrong.
param_grid = {
#'C': np.linspace(.1, .2, 10),
'C': [.0001, .001, .005, .01, .08, .1, .12, .15, .17, .2, .22, .5, 1.0, 100, 1000, 10000]
#'C': np.linspace(.1, .2, 3),
}
clf = sklearn.svm.SVC(kernel=str('linear'), C=.17, class_weight='balanced',
decision_function_shape='ovr')
cv = MyLabelCV(y_train, nids_train, n_folds=n_folds)
grid = sklearn.grid_search.GridSearchCV(clf, param_grid=param_grid, cv=cv,
refit=False, n_jobs=min(n_folds, 6),
verbose=10)
x_train = ds.data.take(train_idx, axis=0)
y_train = ds.target.take(train_idx, axis=0)
grid.fit(x_train, y_train)
for params, mean_score, scores in grid.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
c_xdata = np.array([t[0]['C'] for t in grid.grid_scores_])
c_ydata = np.array([t[1] for t in grid.grid_scores_])
import vtool_ibeis as vt
#maxima_x, maxima_y, argmaxima = vt.hist_argmaxima(c_ydata, c_xdata, maxima_thresh=None)
submaxima_x, submaxima_y = vt.argsubmaxima(c_ydata, c_xdata)
#pt.draw_hist_subbin_maxima(c_ydata, c_xdata, maxima_thresh=None, remove_endpoints=False)
C = submaxima_x[0]
print('C = %r' % (C,))
else:
print('C = %r' % (C,))
clf_all = sklearn.svm.SVC(kernel=str('linear'), C=C, class_weight='balanced',
decision_function_shape='ovr', verbose=10)
X_train = ds.data.take(train_idx, axis=0)
clf_all.fit(X_train, y_train)
ut.save_data(clf_fpath, clf_all.__dict__)
clf = clf_all
else:
clf = sklearn.svm.SVC()
clf.__dict__.update(**ut.load_data(clf_fpath))
def classifier_test(clf, X_test, y_test):
print('[problem] test classifier on %d data points' % (len(test_idx),))
y_conf = predict_svc_ovr(X_test)
y_pred = y_conf.argmax(axis=1)
result = ClfSingleResult(problem.ds, test_idx, y_test, y_pred, y_conf)
return result
test_idx = ds._split_idxs['test']
X_test = ds.data.take(test_idx, axis=0)
y_test = ds.target.take(test_idx, axis=0)
result = classifier_test(clf, X_test, y_test)
result.compile_results()
result.print_report()
result_list = [result]
import pandas as pd
#import plottool_ibeis as pt
# Combine information from results
df = pd.concat([r.df for r in result_list])
df['hardness'] = 1 / df['easiness']
df['aid'] = ut.take(ds.aids, df.index)
df['target'] = ut.take(ds.target, df.index)
df['failed'] = df['pred'] != df['target']
report = sklearn.metrics.classification_report(
y_true=df['target'], y_pred=df['pred'],
target_names=result.ds.target_names)
print(report)
confusion = sklearn.metrics.confusion_matrix(df['target'], df['pred'])
print('Confusion Matrix:')
print(pd.DataFrame(confusion, columns=[m for m in result.ds.target_names],
index=['gt ' + m for m in result.ds.target_names]))
#inspect_results(ds, result_list)
if False:
if False:
#train_idx, test_idx = problem.stratified_2sample_idxs()
train_idx = ds._split_idxs['train']
test_idx = ds._split_idxs['test']
#import sklearn.metrics
#model_dpath = join(ds.dataset_dpath, 'svms')
#model_fpath = join(model_dpath, target_type + '_svc.cPkl')
#if ut.checkpath(model_fpath):
# clf = sklearn.svm.SVC(kernel=str('linear'), C=.17, class_weight='balanced',
# decision_function_shape='ovr')
# clf.__dict__.update(**ut.load_data(model_fpath))
#else:
# clf = problem.fit_new_classifier(train_idx)
# ut.ensuredir(model_dpath)
# ut.save_data(model_fpath, clf.__dict__)
result_list = []
result = problem.test_classifier(clf, test_idx)
result_list.append(result)
for result in result_list:
result.compile_results()
for result in result_list:
result.print_report()
inspect_results(ds, result_list)
if False:
result_list = []
result = problem.test_classifier(clf, train_idx)
result_list.append(result)
for result in result_list:
result.compile_results()
for result in result_list:
result.print_report()
inspect_results(ds, result_list)
if False:
result_list = []
# View support vectors
support_idxs = clf.support_
result = problem.test_classifier(clf, support_idxs)
result_list.append(result)
for result in result_list:
result.compile_results()
for result in result_list:
result.print_report()
inspect_results(ds, result_list)
def inspect_results(ds, result_list):
import pandas as pd
import plottool_ibeis as pt
pd.set_option("display.max_rows", 20)
pt.qt4ensure()
isect_sets = [set(s1).intersection(set(s2)) for s1, s2 in ut.combinations([
result.df.index for result in result_list], 2)]
assert all([len(s) == 0 for s in isect_sets]), ('cv sets should not intersect')
# Combine information from results
df = pd.concat([result.df for result in result_list])
df['hardness'] = 1 / df['easiness']
df['aid'] = ut.take(ds.aids, df.index)
df['target'] = ut.take(ds.target, df.index)
df['failed'] = df['pred'] != df['target']
report = sklearn.metrics.classification_report(
y_true=df['target'], y_pred=df['pred'],
target_names=result.ds.target_names)
print(report)
confusion = sklearn.metrics.confusion_matrix(df['target'], df['pred'])
print('Confusion Matrix:')
print(pd.DataFrame(confusion, columns=[m for m in result.ds.target_names],
index=['gt ' + m for m in result.ds.target_names]))
def target_partition(target):
df_chunk = df if target is None else df[df['target'] == target]
df_chunk = df_chunk.take(df_chunk['hardness'].argsort())
return df_chunk
def grab_subchunk(frac, n, target):
df_chunk = target_partition(target)
sl = ut.snapped_slice(len(df_chunk), frac, n)
print('sl = %r' % (sl,))
idx = df_chunk.index[sl]
df_chunk = df_chunk.loc[idx]
min_frac = sl.start / len(df_chunk)
max_frac = sl.stop / len(df_chunk)
min_frac = sl.start
max_frac = sl.stop
place_name = 'hardness=%.2f (%d-%d)' % (frac, min_frac, max_frac)
if target is not None:
df_chunk.nice = place_name + ' ' + ds.target_names[target]
else:
df_chunk.nice = place_name
return df_chunk
def grab_subchunk2(df_chunk, frac, n):
sl = ut.snapped_slice(len(df_chunk), frac, n)
print('sl = %r' % (sl,))
idx = df_chunk.index[sl]
df_chunk = df_chunk.loc[idx]
min_frac = sl.start / len(df_chunk)
max_frac = sl.stop / len(df_chunk)
min_frac = sl.start
max_frac = sl.stop
place_name = 'hardness=%.2f (%d-%d)' % (frac, min_frac, max_frac)
if target is not None:
df_chunk.nice = place_name + ' ' + ds.target_names[target]
else:
df_chunk.nice = place_name
return df_chunk
# Look at hardest train cases
# Look at hardest test cases
if True:
#n = 4
fracs = [0.0, .7, .8, .9, 1.0]
view_targets = ds.target_labels
n = 8 // len(view_targets)
else:
view_targets = [ut.listfind(ds.target_names.tolist(), 'healthy')]
#fracs = [0.0, .7, .8, .9, 1.0]
fracs = [0.45, .5, .55, .6, .62]
fracs = [0.72, .82, .84, .88]
fracs = [0.73, .83, .835, .89]
fracs = [0.73, .83, .835, .89]
fracs = [0.735, .833, .837, .934]
fracs = [0.2, .65, .75, .85, .95]
fracs = [0.3, .4, .67, .77, .87, .92]
n = 8 // len(view_targets)
if False:
view_targets = [ut.listfind(ds.target_names.tolist(), 'healthy')]
target_dfs = [target_partition(target) for target in view_targets]
critical_points = [np.where(_df['failed'])[0][0] for _df in target_dfs]
critical_fracs = [_pt / len(_df) for _pt, _df in zip(critical_points, target_dfs)]
n = 8 * 5
frac = critical_fracs[0]
frac += .1
_df = target_dfs[0]
df_part = grab_subchunk2(_df, frac, n)
df_chunks = [df_part.iloc[x] for x in ut.ichunks(range(len(df_part)), 8)]
else:
df_chunks = [grab_subchunk(frac, n, target)
for frac in fracs for target in view_targets]
ibs = ds.ibs
config = ds.config
from ibeis_cnn import draw_results
inter = draw_results.make_InteractClasses(ibs, config, df_chunks,
nCols=len(view_targets))
inter.start()
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis.scripts.classify_shark
python -m ibeis.scripts.classify_shark --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
|
import datetime
import re
from typing import Any, Dict, List, Mapping
from unittest import mock
import orjson
from django.conf import settings
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.actions import (
do_change_plan_type,
do_change_realm_subdomain,
do_create_realm,
do_deactivate_realm,
do_deactivate_stream,
do_scrub_realm,
do_send_realm_reactivation_email,
do_set_realm_property,
)
from zerver.lib.realm_description import get_realm_rendered_description, get_realm_text_description
from zerver.lib.send_email import send_future_email
from zerver.lib.streams import create_stream_if_needed
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import reset_emails_in_zulip_realm, tornado_redirected_to_list
from zerver.models import (
Attachment,
CustomProfileField,
Message,
Realm,
ScheduledEmail,
UserMessage,
UserProfile,
get_realm,
get_stream,
get_user_profile_by_email,
get_user_profile_by_id,
)
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(self, user_profile: UserProfile,
new_realm_name: str) -> None:
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_realm_creation_ensures_internal_realms(self) -> None:
with mock.patch("zerver.lib.actions.server_initialized", return_value=False):
with mock.patch("zerver.lib.actions.create_internal_realm") as mock_create_internal, \
self.assertLogs(level='INFO') as info_logs:
do_create_realm("testrealm", "Test Realm")
mock_create_internal.assert_called_once()
self.assertEqual(info_logs.output, [
'INFO:root:Server not yet initialized. Creating the internal realm first.'
])
def test_do_set_realm_name_caching(self) -> None:
"""The main complicated thing about setting realm names is fighting the
cache, and we start by populating the cache for Hamlet, and we end
by checking the cache to ensure that the new value is there."""
self.example_user('hamlet')
realm = get_realm('zulip')
new_name = 'Zed You Elle Eye Pea'
do_set_realm_property(realm, 'name', new_name)
self.assertEqual(get_realm(realm.string_id).name, new_name)
self.assert_user_profile_cache_gets_new_name(self.example_user('hamlet'), new_name)
def test_update_realm_name_events(self) -> None:
realm = get_realm('zulip')
new_name = 'Puliz'
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'name', new_name)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='name',
value=new_name,
))
def test_update_realm_description_events(self) -> None:
realm = get_realm('zulip')
new_description = 'zulip dev group'
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'description', new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_update_realm_description(self) -> None:
self.login('iago')
new_description = 'zulip dev group'
data = dict(description=orjson.dumps(new_description).decode())
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.client_patch('/json/realm', data)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.description, new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_realm_description_length(self) -> None:
new_description = 'A' * 1001
data = dict(description=orjson.dumps(new_description).decode())
# create an admin user
self.login('iago')
result = self.client_patch('/json/realm', data)
self.assert_json_error(result, 'Organization description is too long.')
realm = get_realm('zulip')
self.assertNotEqual(realm.description, new_description)
def test_realm_name_length(self) -> None:
new_name = 'A' * (Realm.MAX_REALM_NAME_LENGTH + 1)
data = dict(name=orjson.dumps(new_name).decode())
# create an admin user
self.login('iago')
result = self.client_patch('/json/realm', data)
self.assert_json_error(result, 'Organization name is too long.')
realm = get_realm('zulip')
self.assertNotEqual(realm.name, new_name)
def test_admin_restrictions_for_changing_realm_name(self) -> None:
new_name = 'Mice will play while the cat is away'
self.login('othello')
req = dict(name=orjson.dumps(new_name).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Must be an organization administrator')
def test_unauthorized_name_change(self) -> None:
data = {'full_name': 'Sir Hamlet'}
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
do_set_realm_property(user_profile.realm, 'name_changes_disabled', True)
url = '/json/settings'
result = self.client_patch(url, data)
self.assertEqual(result.status_code, 200)
# Since the setting fails silently, no message is returned
self.assert_in_response("", result)
# Realm admins can change their name even setting is disabled.
data = {'full_name': 'New Iago'}
self.login('iago')
url = '/json/settings'
result = self.client_patch(url, data)
self.assert_in_success_response(['"full_name":"New Iago"'], result)
def test_do_deactivate_realm_clears_user_realm_cache(self) -> None:
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
self.example_user('hamlet')
realm = get_realm('zulip')
do_deactivate_realm(realm)
user = self.example_user('hamlet')
self.assertTrue(user.realm.deactivated)
def test_do_change_realm_subdomain_clears_user_realm_cache(self) -> None:
"""The main complicated thing about changing realm subdomains is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
user = get_user_profile_by_email('hamlet@zulip.com')
realm = get_realm('zulip')
do_change_realm_subdomain(realm, "newzulip")
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.realm.string_id, "newzulip")
# This doesn't use a cache right now, but may later.
with self.assertRaises(Realm.DoesNotExist):
get_realm("zulip")
def test_do_deactivate_realm_clears_scheduled_jobs(self) -> None:
user = self.example_user('hamlet')
send_future_email('zerver/emails/followup_day1', user.realm,
to_user_ids=[user.id], delay=datetime.timedelta(hours=1))
self.assertEqual(ScheduledEmail.objects.count(), 1)
do_deactivate_realm(user.realm)
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_do_change_realm_description_clears_cached_descriptions(self) -> None:
realm = get_realm('zulip')
rendered_description = get_realm_rendered_description(realm)
text_description = get_realm_text_description(realm)
realm.description = 'New Description'
realm.save(update_fields=['description'])
new_rendered_description = get_realm_rendered_description(realm)
self.assertNotEqual(rendered_description, new_rendered_description)
self.assertIn(realm.description, new_rendered_description)
new_text_description = get_realm_text_description(realm)
self.assertNotEqual(text_description, new_text_description)
self.assertEqual(realm.description, new_text_description)
def test_do_deactivate_realm_on_deactivated_realm(self) -> None:
"""Ensure early exit is working in realm deactivation"""
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
def test_realm_reactivation_link(self) -> None:
realm = get_realm('zulip')
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
confirmation_url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
response = self.client_get(confirmation_url)
self.assert_in_success_response(['Your organization has been successfully reactivated'], response)
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
def test_realm_reactivation_confirmation_object(self) -> None:
realm = get_realm('zulip')
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
confirmation = Confirmation.objects.last()
self.assertEqual(confirmation.content_object, realm)
self.assertEqual(confirmation.realm, realm)
def test_do_send_realm_reactivation_email(self) -> None:
realm = get_realm('zulip')
do_send_realm_reactivation_email(realm)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
self.assertRegex(
outbox[0].from_email,
fr"^Zulip Account Security <{self.TOKENIZED_NOREPLY_REGEX}>\Z",
)
self.assertIn('Reactivate your Zulip organization', outbox[0].subject)
self.assertIn('Dear former administrators', outbox[0].body)
admins = realm.get_human_admin_users()
confirmation_url = self.get_confirmation_url_from_outbox(admins[0].delivery_email)
response = self.client_get(confirmation_url)
self.assert_in_success_response(['Your organization has been successfully reactivated'], response)
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
def test_realm_reactivation_with_random_link(self) -> None:
random_link = "/reactivate/5e89081eb13984e0f3b130bf7a4121d153f1614b"
response = self.client_get(random_link)
self.assert_in_success_response(['The organization reactivation link has expired or is not valid.'], response)
def test_change_notifications_stream(self) -> None:
# We need an admin user.
self.login('iago')
disabled_notif_stream_id = -1
req = dict(notifications_stream_id = orjson.dumps(disabled_notif_stream_id).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.notifications_stream, None)
new_notif_stream_id = 4
req = dict(notifications_stream_id = orjson.dumps(new_notif_stream_id).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
assert realm.notifications_stream is not None
self.assertEqual(realm.notifications_stream.id, new_notif_stream_id)
invalid_notif_stream_id = 1234
req = dict(notifications_stream_id = orjson.dumps(invalid_notif_stream_id).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid stream id')
realm = get_realm('zulip')
assert realm.notifications_stream is not None
self.assertNotEqual(realm.notifications_stream.id, invalid_notif_stream_id)
def test_get_default_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.notifications_stream_id = verona.id
realm.save(update_fields=["notifications_stream"])
notifications_stream = realm.get_notifications_stream()
assert notifications_stream is not None
self.assertEqual(notifications_stream.id, verona.id)
do_deactivate_stream(notifications_stream)
self.assertIsNone(realm.get_notifications_stream())
def test_change_signup_notifications_stream(self) -> None:
# We need an admin user.
self.login('iago')
disabled_signup_notifications_stream_id = -1
req = dict(signup_notifications_stream_id = orjson.dumps(disabled_signup_notifications_stream_id).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.signup_notifications_stream, None)
new_signup_notifications_stream_id = 4
req = dict(signup_notifications_stream_id = orjson.dumps(new_signup_notifications_stream_id).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
assert realm.signup_notifications_stream is not None
self.assertEqual(realm.signup_notifications_stream.id, new_signup_notifications_stream_id)
invalid_signup_notifications_stream_id = 1234
req = dict(signup_notifications_stream_id = orjson.dumps(invalid_signup_notifications_stream_id).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid stream id')
realm = get_realm('zulip')
assert realm.signup_notifications_stream is not None
self.assertNotEqual(realm.signup_notifications_stream.id, invalid_signup_notifications_stream_id)
def test_get_default_signup_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.signup_notifications_stream = verona
realm.save(update_fields=["signup_notifications_stream"])
signup_notifications_stream = realm.get_signup_notifications_stream()
assert signup_notifications_stream is not None
self.assertEqual(signup_notifications_stream, verona)
do_deactivate_stream(signup_notifications_stream)
self.assertIsNone(realm.get_signup_notifications_stream())
def test_change_realm_default_language(self) -> None:
new_lang = "de"
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, new_lang)
# we need an admin user.
self.login('iago')
req = dict(default_language=orjson.dumps(new_lang).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.default_language, new_lang)
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=orjson.dumps(invalid_lang).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, f"Invalid language '{invalid_lang}'")
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, invalid_lang)
def test_deactivate_realm_by_owner(self) -> None:
self.login('desdemona')
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
result = self.client_post('/json/realm/deactivate')
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertTrue(realm.deactivated)
def test_deactivate_realm_by_non_owner(self) -> None:
self.login('iago')
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
result = self.client_post('/json/realm/deactivate')
self.assert_json_error(result, "Must be an organization owner")
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
def test_change_bot_creation_policy(self) -> None:
# We need an admin user.
self.login('iago')
req = dict(bot_creation_policy = orjson.dumps(Realm.BOT_CREATION_LIMIT_GENERIC_BOTS).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
invalid_add_bot_permission = 4
req = dict(bot_creation_policy = orjson.dumps(invalid_add_bot_permission).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid bot_creation_policy')
def test_change_email_address_visibility(self) -> None:
# We need an admin user.
user_profile = self.example_user("iago")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
self.login_user(user_profile)
invalid_value = 12
req = dict(email_address_visibility = orjson.dumps(invalid_value).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid email_address_visibility')
reset_emails_in_zulip_realm()
realm = get_realm("zulip")
req = dict(email_address_visibility = orjson.dumps(Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
edited_user_profile = get_user_profile_by_id(user_profile.id)
self.assertEqual(edited_user_profile.email, f"user{edited_user_profile.id}@zulip.testserver")
# Check normal user cannot access email
result = self.api_get(cordelia, f"/api/v1/users/{hamlet.id}")
self.assert_json_success(result)
self.assertEqual(result.json()['user']['email'],
f'user{hamlet.id}@zulip.testserver')
self.assertEqual(result.json()['user'].get('delivery_email'), None)
# Check administrator gets delivery_email with EMAIL_ADDRESS_VISIBILITY_ADMINS
result = self.api_get(user_profile, f"/api/v1/users/{hamlet.id}")
self.assert_json_success(result)
self.assertEqual(result.json()['user']['email'],
f'user{hamlet.id}@zulip.testserver')
self.assertEqual(result.json()['user'].get('delivery_email'),
hamlet.delivery_email)
req = dict(email_address_visibility = orjson.dumps(Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY)
edited_user_profile = get_user_profile_by_id(user_profile.id)
self.assertEqual(edited_user_profile.email, f"user{edited_user_profile.id}@zulip.testserver")
# Check even administrator doesn't get delivery_email with
# EMAIL_ADDRESS_VISIBILITY_NOBODY
result = self.api_get(user_profile, f"/api/v1/users/{hamlet.id}")
self.assert_json_success(result)
self.assertEqual(result.json()['user']['email'],
f'user{hamlet.id}@zulip.testserver')
self.assertEqual(result.json()['user'].get('delivery_email'), None)
def test_change_stream_creation_policy(self) -> None:
# We need an admin user.
self.login('iago')
req = dict(create_stream_policy = orjson.dumps(Realm.POLICY_ADMINS_ONLY).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
invalid_value = 10
req = dict(create_stream_policy = orjson.dumps(invalid_value).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid create_stream_policy')
def test_change_invite_to_stream_policy(self) -> None:
# We need an admin user.
self.login('iago')
req = dict(invite_to_stream_policy = orjson.dumps(Realm.POLICY_ADMINS_ONLY).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
invalid_value = 10
req = dict(invite_to_stream_policy = orjson.dumps(invalid_value).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid invite_to_stream_policy')
def test_user_group_edit_policy(self) -> None:
# We need an admin user.
self.login('iago')
req = dict(user_group_edit_policy = orjson.dumps(Realm.USER_GROUP_EDIT_POLICY_ADMINS).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
invalid_value = 10
req = dict(user_group_edit_policy = orjson.dumps(invalid_value).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid user_group_edit_policy')
def test_private_message_policy(self) -> None:
# We need an admin user.
self.login('iago')
req = dict(private_message_policy = orjson.dumps(Realm.PRIVATE_MESSAGE_POLICY_DISABLED).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
invalid_value = 10
req = dict(private_message_policy = orjson.dumps(invalid_value).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid private_message_policy')
def test_change_wildcard_mention_policy(self) -> None:
# We need an admin user.
self.login('iago')
req = dict(wildcard_mention_policy = orjson.dumps(Realm.WILDCARD_MENTION_POLICY_EVERYONE).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
invalid_value = 10
req = dict(wildcard_mention_policy = orjson.dumps(invalid_value).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid wildcard_mention_policy')
def test_invalid_integer_attribute_values(self) -> None:
integer_values = [key for key, value in Realm.property_types.items() if value is int]
invalid_values = dict(
bot_creation_policy=10,
create_stream_policy=10,
invite_to_stream_policy=10,
email_address_visibility=10,
message_retention_days=10,
video_chat_provider=10,
waiting_period_threshold=-10,
digest_weekday=10,
user_group_edit_policy=10,
private_message_policy=10,
message_content_delete_limit_seconds=-10,
wildcard_mention_policy=10,
)
# We need an admin user.
self.login('iago')
for name in integer_values:
invalid_value = invalid_values.get(name)
if invalid_value is None:
raise AssertionError(f'No test created for {name}')
self.do_test_invalid_integer_attribute_value(name, invalid_value)
def do_test_invalid_integer_attribute_value(self, val_name: str, invalid_val: int) -> None:
possible_messages = {
f"Invalid {val_name}",
f"Bad value for '{val_name}'",
f"Bad value for '{val_name}': {invalid_val}",
f"Invalid {val_name} {invalid_val}",
}
req = {val_name: invalid_val}
result = self.client_patch('/json/realm', req)
msg = self.get_json_error(result)
self.assertTrue(msg in possible_messages)
def test_change_video_chat_provider(self) -> None:
self.assertEqual(get_realm('zulip').video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id'])
self.login('iago')
invalid_video_chat_provider_value = 10
req = {"video_chat_provider": orjson.dumps(invalid_video_chat_provider_value).decode()}
result = self.client_patch('/json/realm', req)
self.assert_json_error(result,
("Invalid video_chat_provider {}").format(invalid_video_chat_provider_value))
req = {"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['disabled']['id']).decode()}
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
self.assertEqual(get_realm('zulip').video_chat_provider,
Realm.VIDEO_CHAT_PROVIDERS['disabled']['id'])
req = {"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id']).decode()}
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
self.assertEqual(get_realm('zulip').video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id'])
req = {"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['big_blue_button']['id']).decode()}
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
self.assertEqual(get_realm('zulip').video_chat_provider,
Realm.VIDEO_CHAT_PROVIDERS['big_blue_button']['id'])
req = {"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['zoom']['id']).decode()}
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
def test_initial_plan_type(self) -> None:
with self.settings(BILLING_ENABLED=True):
self.assertEqual(do_create_realm('hosted', 'hosted').plan_type, Realm.LIMITED)
self.assertEqual(get_realm("hosted").max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(get_realm("hosted").message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED)
self.assertEqual(get_realm("hosted").upload_quota_gb, Realm.UPLOAD_QUOTA_LIMITED)
with self.settings(BILLING_ENABLED=False):
self.assertEqual(do_create_realm('onpremise', 'onpremise').plan_type, Realm.SELF_HOSTED)
self.assertEqual(get_realm('onpremise').max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(get_realm('onpremise').message_visibility_limit, None)
self.assertEqual(get_realm("onpremise").upload_quota_gb, None)
def test_change_plan_type(self) -> None:
realm = get_realm('zulip')
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, None)
do_change_plan_type(realm, Realm.STANDARD)
realm = get_realm('zulip')
self.assertEqual(realm.plan_type, Realm.STANDARD)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD)
do_change_plan_type(realm, Realm.LIMITED)
realm = get_realm('zulip')
self.assertEqual(realm.plan_type, Realm.LIMITED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_LIMITED)
do_change_plan_type(realm, Realm.STANDARD_FREE)
realm = get_realm('zulip')
self.assertEqual(realm.plan_type, Realm.STANDARD_FREE)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD)
do_change_plan_type(realm, Realm.LIMITED)
do_change_plan_type(realm, Realm.SELF_HOSTED)
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, None)
def test_message_retention_days(self) -> None:
self.login('iago')
realm = get_realm('zulip')
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Must be an organization owner")
self.login('desdemona')
req = dict(message_retention_days=orjson.dumps(0).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Bad value for 'message_retention_days': 0")
req = dict(message_retention_days=orjson.dumps(-10).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(
result, "Bad value for 'message_retention_days': -10")
req = dict(message_retention_days=orjson.dumps('invalid').decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Bad value for 'message_retention_days': invalid")
req = dict(message_retention_days=orjson.dumps(-1).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Bad value for 'message_retention_days': -1")
req = dict(message_retention_days=orjson.dumps('forever').decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
do_change_plan_type(realm, Realm.LIMITED)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(
result, "Available on Zulip Standard. Upgrade to access.")
do_change_plan_type(realm, Realm.STANDARD)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
class RealmAPITest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.login('desdemona')
def set_up_db(self, attr: str, value: Any) -> None:
realm = get_realm('zulip')
setattr(realm, attr, value)
realm.save(update_fields=[attr])
def update_with_api(self, name: str, value: int) -> Realm:
result = self.client_patch('/json/realm', {name: orjson.dumps(value).decode()})
self.assert_json_success(result)
return get_realm('zulip') # refresh data
def update_with_api_multiple_value(self, data_dict: Dict[str, Any]) -> Realm:
result = self.client_patch('/json/realm', data_dict)
self.assert_json_success(result)
return get_realm('zulip')
def do_test_realm_update_api(self, name: str) -> None:
"""Test updating realm properties.
If new realm properties have been added to the Realm model but the
test_values dict below has not been updated, this will raise an
assertion error.
"""
bool_tests: List[bool] = [False, True]
test_values: Dict[str, Any] = dict(
default_language=['de', 'en'],
default_code_block_language=['javascript', ''],
description=['Realm description', 'New description'],
digest_weekday=[0, 1, 2],
message_retention_days=[10, 20],
name=['Zulip', 'New Name'],
waiting_period_threshold=[10, 20],
create_stream_policy=[Realm.POLICY_ADMINS_ONLY,
Realm.POLICY_MEMBERS_ONLY,
Realm.POLICY_FULL_MEMBERS_ONLY],
user_group_edit_policy=[Realm.USER_GROUP_EDIT_POLICY_ADMINS,
Realm.USER_GROUP_EDIT_POLICY_MEMBERS],
private_message_policy=[Realm.PRIVATE_MESSAGE_POLICY_UNLIMITED,
Realm.PRIVATE_MESSAGE_POLICY_DISABLED],
invite_to_stream_policy=[Realm.POLICY_ADMINS_ONLY,
Realm.POLICY_MEMBERS_ONLY,
Realm.POLICY_FULL_MEMBERS_ONLY],
wildcard_mention_policy=[Realm.WILDCARD_MENTION_POLICY_EVERYONE,
Realm.WILDCARD_MENTION_POLICY_MEMBERS,
Realm.WILDCARD_MENTION_POLICY_FULL_MEMBERS,
Realm.WILDCARD_MENTION_POLICY_STREAM_ADMINS,
Realm.WILDCARD_MENTION_POLICY_ADMINS,
Realm.WILDCARD_MENTION_POLICY_NOBODY],
bot_creation_policy=[1, 2],
email_address_visibility=[Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE,
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS,
Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY],
video_chat_provider=[
dict(
video_chat_provider=orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id']).decode(),
),
],
message_content_delete_limit_seconds=[1000, 1100, 1200]
)
vals = test_values.get(name)
if Realm.property_types[name] is bool:
vals = bool_tests
if vals is None:
raise AssertionError(f'No test created for {name}')
if name == 'video_chat_provider':
self.set_up_db(name, vals[0][name])
realm = self.update_with_api_multiple_value(vals[0])
self.assertEqual(getattr(realm, name), orjson.loads(vals[0][name]))
else:
self.set_up_db(name, vals[0])
realm = self.update_with_api(name, vals[1])
self.assertEqual(getattr(realm, name), vals[1])
realm = self.update_with_api(name, vals[0])
self.assertEqual(getattr(realm, name), vals[0])
def test_update_realm_properties(self) -> None:
for prop in Realm.property_types:
with self.subTest(property=prop):
self.do_test_realm_update_api(prop)
def test_update_realm_allow_message_editing(self) -> None:
"""Tests updating the realm property 'allow_message_editing'."""
self.set_up_db('allow_message_editing', False)
self.set_up_db('message_content_edit_limit_seconds', 0)
self.set_up_db('allow_community_topic_editing', False)
realm = self.update_with_api('allow_message_editing', True)
realm = self.update_with_api('message_content_edit_limit_seconds', 100)
realm = self.update_with_api('allow_community_topic_editing', True)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api('allow_message_editing', False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api('message_content_edit_limit_seconds', 200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api('allow_community_topic_editing', False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.allow_community_topic_editing, False)
def test_update_realm_allow_message_deleting(self) -> None:
"""Tests updating the realm property 'allow_message_deleting'."""
self.set_up_db('allow_message_deleting', True)
self.set_up_db('message_content_delete_limit_seconds', 0)
realm = self.update_with_api('allow_message_deleting', False)
self.assertEqual(realm.allow_message_deleting, False)
self.assertEqual(realm.message_content_delete_limit_seconds, 0)
realm = self.update_with_api('allow_message_deleting', True)
realm = self.update_with_api('message_content_delete_limit_seconds', 100)
self.assertEqual(realm.allow_message_deleting, True)
self.assertEqual(realm.message_content_delete_limit_seconds, 100)
realm = self.update_with_api('message_content_delete_limit_seconds', 600)
self.assertEqual(realm.allow_message_deleting, True)
self.assertEqual(realm.message_content_delete_limit_seconds, 600)
class ScrubRealmTest(ZulipTestCase):
def test_scrub_realm(self) -> None:
zulip = get_realm("zulip")
lear = get_realm("lear")
iago = self.example_user("iago")
othello = self.example_user("othello")
cordelia = self.lear_user("cordelia")
king = self.lear_user("king")
create_stream_if_needed(lear, "Shakespeare")
self.subscribe(cordelia, "Shakespeare")
self.subscribe(king, "Shakespeare")
Message.objects.all().delete()
UserMessage.objects.all().delete()
for i in range(5):
self.send_stream_message(iago, "Scotland")
self.send_stream_message(othello, "Scotland")
self.send_stream_message(cordelia, "Shakespeare")
self.send_stream_message(king, "Shakespeare")
Attachment.objects.filter(realm=zulip).delete()
Attachment.objects.create(realm=zulip, owner=iago, path_id="a/b/temp1.txt", size=512)
Attachment.objects.create(realm=zulip, owner=othello, path_id="a/b/temp2.txt", size=512)
Attachment.objects.filter(realm=lear).delete()
Attachment.objects.create(realm=lear, owner=cordelia, path_id="c/d/temp1.txt", size=512)
Attachment.objects.create(realm=lear, owner=king, path_id="c/d/temp2.txt", size=512)
CustomProfileField.objects.create(realm=lear)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 10)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 20)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertNotEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
with self.assertLogs(level="WARNING"):
do_scrub_realm(zulip)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 0)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 0)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertEqual(Attachment.objects.filter(realm=zulip).count(), 0)
self.assertEqual(Attachment.objects.filter(realm=lear).count(), 2)
self.assertEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
self.assertNotEqual(CustomProfileField.objects.filter(realm=lear).count(), 0)
zulip_users = UserProfile.objects.filter(realm=zulip)
for user in zulip_users:
self.assertTrue(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
lear_users = UserProfile.objects.filter(realm=lear)
for user in lear_users:
self.assertIsNone(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
|
|
# Copyright 2009 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module allows to control GenePop.
"""
import os
import re
import shutil
import subprocess
import sys
import tempfile
from Bio.Application import AbstractCommandline, _Argument, _Option
def _gp_float(tok):
"""Gets a float from a token, if it fails, returns the string.
"""
try:
return float(tok)
except ValueError:
return str(tok)
def _gp_int(tok):
"""Gets a int from a token, if it fails, returns the string.
"""
try:
return int(tok)
except ValueError:
return str(tok)
def _read_allele_freq_table(f):
l = f.readline()
while l.find(" --")==-1:
if l == "":
raise StopIteration
if l.find("No data")>-1:
return None, None
l = f.readline()
alleles = filter(lambda x: x != '', f.readline().rstrip().split(" "))
alleles = map(lambda x: _gp_int(x), alleles)
l = f.readline().rstrip()
table = []
while l != "":
line = filter(lambda x: x != '', l.split(" "))
try:
table.append(
(line[0],
map(lambda x: _gp_float(x), line[1:-1]),
_gp_int(line[-1])))
except ValueError:
table.append(
(line[0],
[None] * len(alleles),
0))
l = f.readline().rstrip()
return alleles, table
def _read_table(f, funs):
table = []
l = f.readline().rstrip()
while l.find("---")==-1:
l = f.readline().rstrip()
l = f.readline().rstrip()
while l.find("===")==-1 and l.find("---")==-1 and l != "":
toks = filter(lambda x: x != "", l.split(" "))
line = []
for i in range(len(toks)):
try:
line.append(funs[i](toks[i]))
except ValueError:
line.append(toks[i]) # Could not cast
table.append(tuple(line))
l = f.readline().rstrip()
return table
def _read_triangle_matrix(f):
matrix = []
l = f.readline().rstrip()
while l != "":
matrix.append(
map(lambda x: _gp_float(x),
filter(lambda y: y != "", l.split(" "))))
l = f.readline().rstrip()
return matrix
def _read_headed_triangle_matrix(f):
matrix = {}
header = f.readline().rstrip()
if header.find("---")>-1 or header.find("===")>-1:
header = f.readline().rstrip()
nlines = len(filter(lambda x:x != '', header.split(' '))) - 1
for line_pop in range(nlines):
l = f.readline().rstrip()
vals = filter(lambda x:x != '', l.split(' ')[1:])
clean_vals = []
for val in vals:
try:
clean_vals.append(_gp_float(val))
except ValueError:
clean_vals.append(None)
for col_pop in range(len(clean_vals)):
matrix[(line_pop+1, col_pop)] = clean_vals[col_pop]
return matrix
def _hw_func(stream, is_locus, has_fisher = False):
l = stream.readline()
if is_locus:
hook = "Locus "
else:
hook = " Pop : "
while l != "":
if l.startswith(hook):
stream.readline()
stream.readline()
stream.readline()
table = _read_table(stream,[str,_gp_float,_gp_float,_gp_float,_gp_float,_gp_int,str])
#loci might mean pop if hook="Locus "
loci = {}
for entry in table:
if len(entry) < 3:
loci[entry[0]] = None
else:
locus, p, se, fis_wc, fis_rh, steps = entry[:-1]
if se == "-": se = None
loci[locus] = p, se, fis_wc, fis_rh, steps
return loci
l = stream.readline()
#self.done = True
raise StopIteration
class _FileIterator:
"""Iterator which crawls over a stream of lines with a function.
The generator function is expected to yield a tuple, while
consuming input
"""
def __init__(self, func, stream, fname):
self.func = func
self.stream = stream
self.fname = fname
self.done = False
def __iter__(self):
if self.done:
self.done = True
raise StopIteration
return self
def next(self):
return self.func(self)
def __del__(self):
self.stream.close()
try:
os.remove(self.fname)
except OSError:
#Jython seems to call the iterator twice
pass
class _GenePopCommandline(AbstractCommandline):
""" Command Line Wrapper for GenePop.
"""
def __init__(self, genepop_dir=None, cmd='Genepop', **kwargs):
self.parameters = [
_Argument(["command"],
"GenePop option to be called",
is_required=True),
_Argument(["mode"],
"Should allways be batch",
is_required=True),
_Argument(["input"],
"Input file",
is_required=True),
_Argument(["Dememorization"],
"Dememorization step"),
_Argument(["BatchNumber"],
"Number of MCMC batches"),
_Argument(["BatchLength"],
"Length of MCMC chains"),
_Argument(["HWtests"],
"Enumeration or MCMC"),
_Argument(["IsolBDstatistic"],
"IBD statistic (a or e)"),
_Argument(["MinimalDistance"],
"Minimal IBD distance"),
_Argument(["GeographicScale"],
"Log or Linear"),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
self.set_parameter("mode", "Mode=Batch")
def set_menu(self, option_list):
"""Sets the menu option.
Example set_menu([6,1]) = get all F statistics (menu 6.1)
"""
self.set_parameter("command", "MenuOptions="+
".".join(map(lambda x:str(x),option_list)))
def set_input(self, fname):
"""Sets the input file name.
"""
self.set_parameter("input", "InputFile="+fname)
class GenePopController(object):
def __init__(self, genepop_dir = None):
"""Initializes the controller.
genepop_dir is the directory where GenePop is.
The binary should be called Genepop (capital G)
"""
self.controller = _GenePopCommandline(genepop_dir)
def _remove_garbage(self, fname_out):
try:
if fname_out != None: os.remove(fname_out)
except OSError:
pass # safe
try:
os.remove("genepop.txt")
except OSError:
pass # safe
try:
os.remove("fichier.in")
except OSError:
pass # safe
try:
os.remove("cmdline.txt")
except OSError:
pass # safe
def _get_opts(self, dememorization, batches, iterations, enum_test=None):
opts = {}
opts["Dememorization"]=dememorization
opts["BatchNumber"]=batches
opts["BatchLength"]=iterations
if enum_test != None:
if enum_test == True:
opts["HWtests"]="Enumeration"
else:
opts["HWtests"]="MCMC"
return opts
def _run_genepop(self, extensions, option, fname, opts={}):
for extension in extensions:
self._remove_garbage(fname + extension)
self.controller.set_menu(option)
self.controller.set_input(fname)
for opt in opts:
self.controller.set_parameter(opt, opt+"="+str(opts[opt]))
self.controller() #checks error level is zero
self._remove_garbage(None)
return
def _test_pop_hz_both(self, fname, type, ext, enum_test = True,
dememorization = 10000, batches = 20, iterations = 5000):
"""Hardy-Weinberg test for heterozygote deficiency/excess.
Returns a population iterator containg
A dictionary[locus]=(P-val, SE, Fis-WC, Fis-RH, steps)
Some loci have a None if the info is not available
SE might be none (for enumerations)
"""
opts = self._get_opts(dememorization, batches, iterations, enum_test)
self._run_genepop([ext], [1, type], fname, opts)
f = open(fname + ext)
def hw_func(self):
return _hw_func(self.stream, False)
return _FileIterator(hw_func, f, fname + ext)
def _test_global_hz_both(self, fname, type, ext, enum_test = True,
dememorization = 10000, batches = 20, iterations = 5000):
"""Global Hardy-Weinberg test for heterozygote deficiency/excess.
Returns a triple with:
A list per population containg
(pop_name, P-val, SE, switches)
Some pops have a None if the info is not available
SE might be none (for enumerations)
A list per loci containg
(locus_name, P-val, SE, switches)
Some loci have a None if the info is not available
SE might be none (for enumerations)
Overall results (P-val, SE, switches)
"""
opts = self._get_opts(dememorization, batches, iterations, enum_test)
self._run_genepop([ext], [1, type], fname, opts)
def hw_pop_func(self):
return _read_table(self.stream, [str, _gp_float, _gp_float, _gp_float])
f1 = open(fname + ext)
l = f1.readline()
while l.find("by population") == -1:
l = f1.readline()
pop_p = _read_table(f1, [str, _gp_float, _gp_float, _gp_float])
f2 = open(fname + ext)
l = f2.readline()
while l.find("by locus") == -1:
l = f2.readline()
loc_p = _read_table(f2, [str, _gp_float, _gp_float, _gp_float])
f = open(fname + ext)
l = f.readline()
while l.find("all locus") == -1:
l = f.readline()
f.readline()
f.readline()
f.readline()
f.readline()
l = f.readline().rstrip()
p, se, switches = tuple(map(lambda x: _gp_float(x),
filter(lambda y: y != "",l.split(" "))))
f.close()
return pop_p, loc_p, (p, se, switches)
#1.1
def test_pop_hz_deficiency(self, fname, enum_test = True,
dememorization = 10000, batches = 20, iterations = 5000):
"""Hardy-Weinberg test for heterozygote deficiency.
Returns a population iterator containg
A dictionary[locus]=(P-val, SE, Fis-WC, Fis-RH, steps)
Some loci have a None if the info is not available
SE might be none (for enumerations)
"""
return self._test_pop_hz_both(fname, 1, ".D", enum_test,
dememorization, batches, iterations)
#1.2
def test_pop_hz_excess(self, fname, enum_test = True,
dememorization = 10000, batches = 20, iterations = 5000):
"""Hardy-Weinberg test for heterozygote deficiency.
Returns a population iterator containg
A dictionary[locus]=(P-val, SE, Fis-WC, Fis-RH, steps)
Some loci have a None if the info is not available
SE might be none (for enumerations)
"""
return self._test_pop_hz_both(fname, 2, ".E", enum_test,
dememorization, batches, iterations)
#1.3 P file
def test_pop_hz_prob(self, fname, ext, enum_test = False,
dememorization = 10000, batches = 20, iterations = 5000):
"""Hardy-Weinberg test based on probability.
Returns 2 iterators and a final tuple:
1. Returns a loci iterator containing
b. A dictionary[pop_pos]=(P-val, SE, Fis-WC, Fis-RH, steps)
Some pops have a None if the info is not available
SE might be none (for enumerations)
c. Result of Fisher's test (Chi2, deg freedom, prob)
2. Returns a population iterator containg
a. A dictionary[locus]=(P-val, SE, Fis-WC, Fis-RH, steps)
Some loci have a None if the info is not available
SE might be none (for enumerations)
b. Result of Fisher's test (Chi2, deg freedom, prob)
3. (Chi2, deg freedom, prob)
"""
opts = self._get_opts(dememorization, batches, iterations, enum_test)
self._run_genepop([ext], [1, 3], fname, opts)
def hw_prob_loci_func(self):
return _hw_func(self.stream, True, True)
def hw_prob_pop_func(self):
return _hw_func(self.stream, False, True)
shutil.copyfile(fname+".P", fname+".P2")
f1 = open(fname + ".P")
f2 = open(fname + ".P2")
return _FileIterator(hw_prob_loci_func, f1, fname + ".P"), _FileIterator(hw_prob_pop_func, f2, fname + ".P2")
#1.4
def test_global_hz_deficiency(self, fname, enum_test = True,
dememorization = 10000, batches = 20, iterations = 5000):
"""Global Hardy-Weinberg test for heterozygote deficiency.
Returns a triple with:
An list per population containg
(pop_name, P-val, SE, switches)
Some pops have a None if the info is not available
SE might be none (for enumerations)
An list per loci containg
(locus_name, P-val, SE, switches)
Some loci have a None if the info is not available
SE might be none (for enumerations)
Overall results (P-val, SE, switches)
"""
return self._test_global_hz_both(fname, 4, ".DG", enum_test,
dememorization, batches, iterations)
#1.5
def test_global_hz_excess(self, fname, enum_test = True,
dememorization = 10000, batches = 20, iterations = 5000):
"""Global Hardy-Weinberg test for heterozygote excess.
Returns a triple with:
An list per population containg
(pop_name, P-val, SE, switches)
Some pops have a None if the info is not available
SE might be none (for enumerations)
An list per loci containg
(locus_name, P-val, SE, switches)
Some loci have a None if the info is not available
SE might be none (for enumerations)
Overall results (P-val, SE, switches)
"""
return self._test_global_hz_both(fname, 5, ".EG", enum_test,
dememorization, batches, iterations)
#2.1
def test_ld(self, fname,
dememorization = 10000, batches = 20, iterations = 5000):
opts = self._get_opts(dememorization, batches, iterations)
self._run_genepop([".DIS"], [2, 1], fname, opts)
def ld_pop_func(self):
current_pop = None
l = self.stream.readline().rstrip()
if l == "":
self.done = True
raise StopIteration
toks = filter(lambda x: x != "", l.split(" "))
pop, locus1, locus2 = toks[0], toks[1], toks[2]
if not hasattr(self, "start_locus1"):
start_locus1, start_locus2 = locus1, locus2
current_pop = -1
if locus1 == start_locus1 and locus2 == start_locus2:
current_pop += 1
if toks[3] == "No":
return current_pop, pop, (locus1, locus2), None
p, se, switches = _gp_float(toks[3]), _gp_float(toks[4]), _gp_int(toks[5])
return current_pop, pop, (locus1, locus2), (p, se, switches)
def ld_func(self):
l = self.stream.readline().rstrip()
if l == "":
self.done = True
raise StopIteration
toks = filter(lambda x: x != "", l.split(" "))
locus1, locus2 = toks[0], toks[2]
try:
chi2, df, p = _gp_float(toks[3]), _gp_int(toks[4]), _gp_float(toks[5])
except ValueError:
return (locus1, locus2), None
return (locus1, locus2), (chi2, df, p)
f1 = open(fname + ".DIS")
l = f1.readline()
while l.find("----")==-1:
l = f1.readline()
shutil.copyfile(fname + ".DIS", fname + ".DI2")
f2 = open(fname + ".DI2")
l = f2.readline()
while l.find("Locus pair")==-1:
l = f2.readline()
while l.find("----")==-1:
l = f2.readline()
return _FileIterator(ld_pop_func, f1, fname+".DIS"), _FileIterator(ld_func, f2, fname + ".DI2")
#2.2
def create_contingency_tables(self, fname):
raise NotImplementedError
#3.1 PR/GE files
def test_genic_diff_all(self, fname,
dememorization = 10000, batches = 20, iterations = 5000):
raise NotImplementedError
#3.2 PR2/GE2 files
def test_genic_diff_pair(self, fname,
dememorization = 10000, batches = 20, iterations = 5000):
raise NotImplementedError
#3.3 G files
def test_genotypic_diff_all(self, fname,
dememorization = 10000, batches = 20, iterations = 5000):
raise NotImplementedError
#3.4 2G2 files
def test_genotypic_diff_pair(self, fname,
dememorization = 10000, batches = 20, iterations = 5000):
raise NotImplementedError
#4
def estimate_nm(self, fname):
self._run_genepop(["PRI"], [4], fname)
f = open(fname + ".PRI")
lines = f.readlines() # Small file, it is ok
f.close()
for line in lines:
m = re.search("Mean sample size: ([.0-9]+)", line)
if m != None:
mean_sample_size = _gp_float(m.group(1))
m = re.search("Mean frequency of private alleles p\(1\)= ([.0-9]+)", line)
if m != None:
mean_priv_alleles = _gp_float(m.group(1))
m = re.search("N=10: ([.0-9]+)", line)
if m != None:
mig10 = _gp_float(m.group(1))
m = re.search("N=25: ([.0-9]+)", line)
if m != None:
mig25 = _gp_float(m.group(1))
m = re.search("N=50: ([.0-9]+)", line)
if m != None:
mig50 = _gp_float(m.group(1))
m = re.search("for size= ([.0-9]+)", line)
if m != None:
mig_corrected = _gp_float(m.group(1))
os.remove(fname + ".PRI")
return mean_sample_size, mean_priv_alleles, mig10, mig25, mig50, mig_corrected
#5.1
def calc_allele_genotype_freqs(self, fname):
"""Calculates allele and genotype frequencies per locus and per sample.
Parameters:
fname - file name
Returns tuple with 2 elements:
Population iterator with
population name
Locus dictionary with key = locus name and content tuple as
Genotype List with
(Allele1, Allele2, observed, expected)
(expected homozygotes, observed hm,
expected heterozygotes, observed ht)
Allele frequency/Fis dictionary with allele as key and
(count, frequency, Fis Weir & Cockerham)
Totals as a pair
count
Fis Weir & Cockerham,
Fis Robertson & Hill
Locus iterator with
Locus name
allele list
Population list with a triple
population name
list of allele frequencies in the same order as allele list above
number of genes
Will create a file called fname.INF
"""
self._run_genepop(["INF"], [5,1], fname)
#First pass, general information
#num_loci = None
#num_pops = None
#f = open(fname + ".INF")
#l = f.readline()
#while (num_loci == None or num_pops == None) and l != '':
# m = re.search("Number of populations detected : ([0-9+])", l)
# if m != None:
# num_pops = _gp_int(m.group(1))
# m = re.search("Number of loci detected : ([0-9+])", l)
# if m != None:
# num_loci = _gp_int(m.group(1))
# l = f.readline()
#f.close()
def pop_parser(self):
if hasattr(self, "old_line"):
l = self.old_line
del self.old_line
else:
l = self.stream.readline()
loci_content = {}
while l != '':
l = l.rstrip()
if l.find("Tables of allelic frequencies for each locus")>-1:
return self.curr_pop, loci_content
match = re.match(".*Pop: (.+) Locus: (.+)", l)
if match != None:
pop = match.group(1)
locus = match.group(2)
if not hasattr(self, "first_locus"):
self.first_locus = locus
if hasattr(self, "curr_pop"):
if self.first_locus == locus:
old_pop = self.curr_pop
#self.curr_pop = pop
self.old_line = l
del self.first_locus
del self.curr_pop
return old_pop, loci_content
self.curr_pop = pop
else:
l = self.stream.readline()
continue
geno_list = []
l = self.stream.readline()
if l.find("No data")>-1: continue
while l.find("Genotypes Obs.")==-1:
l = self.stream.readline()
while l != "\n":
m2 = re.match(" +([0-9]+) , ([0-9]+) *([0-9]+) *(.+)",l)
if m2 != None:
geno_list.append((_gp_int(m2.group(1)), _gp_int(m2.group(2)),
_gp_int(m2.group(3)), _gp_float(m2.group(4))))
else:
l = self.stream.readline()
continue
l = self.stream.readline()
while l.find("Expected number of ho")==-1:
l = self.stream.readline()
expHo = _gp_float(l[38:])
l = self.stream.readline()
obsHo = _gp_int(l[38:])
l = self.stream.readline()
expHe = _gp_float(l[38:])
l = self.stream.readline()
obsHe = _gp_int(l[38:])
l = self.stream.readline()
while l.find("Sample count")==-1:
l = self.stream.readline()
l = self.stream.readline()
freq_fis={}
overall_fis = None
while l.find("----")==-1:
vals = filter(lambda x: x!='',
l.rstrip().split(' '))
if vals[0]=="Tot":
overall_fis = _gp_int(vals[1]), \
_gp_float(vals[2]), _gp_float(vals[3])
else:
freq_fis[_gp_int(vals[0])] = _gp_int(vals[1]), \
_gp_float(vals[2]), _gp_float(vals[3])
l = self.stream.readline()
loci_content[locus] = geno_list, \
(expHo, obsHo, expHe, obsHe), \
freq_fis, overall_fis
self.done = True
raise StopIteration
def locus_parser(self):
l = self.stream.readline()
while l != "":
l = l.rstrip()
match = re.match(" Locus: (.+)", l)
if match != None:
locus = match.group(1)
alleles, table = _read_allele_freq_table(self.stream)
return locus, alleles, table
l = self.stream.readline()
self.done = True
raise StopIteration
popf = open(fname + ".INF")
shutil.copyfile(fname + ".INF", fname + ".IN2")
locf = open(fname + ".IN2")
pop_iter = _FileIterator(pop_parser, popf, fname + ".INF")
locus_iter = _FileIterator(locus_parser, locf, fname + ".IN2")
return (pop_iter, locus_iter)
def _calc_diversities_fis(self, fname, ext):
self._run_genepop([ext], [5,2], fname)
f = open(fname + ext)
l = f.readline()
while l != "":
l = l.rstrip()
if l.startswith("Statistics per sample over all loci with at least two individuals typed"):
avg_fis = _read_table(f, [str, _gp_float, _gp_float, _gp_float])
avg_Qintra = _read_table(f, [str, _gp_float])
l = f.readline()
f.close()
def fis_func(self):
l = self.stream.readline()
while l != "":
l = l.rstrip()
m = re.search("Locus: (.+)", l)
if m != None:
locus = m.group(1)
self.stream.readline()
if self.stream.readline().find("No complete")>-1: return locus, None
self.stream.readline()
fis_table = _read_table(self.stream, [str, _gp_float, _gp_float, _gp_float])
self.stream.readline()
avg_qinter, avg_fis = tuple(map (lambda x: _gp_float(x),
filter(lambda y:y != "", self.stream.readline().split(" "))))
return locus, fis_table, avg_qinter, avg_fis
l = self.stream.readline()
self.done = True
raise StopIteration
dvf = open(fname + ext)
return _FileIterator(fis_func, dvf, fname + ext), avg_fis, avg_Qintra
#5.2
def calc_diversities_fis_with_identity(self, fname):
return self._calc_diversities_fis(fname, ".DIV")
#5.3
def calc_diversities_fis_with_size(self, fname):
raise NotImplementedError
#6.1 Less genotype frequencies
def calc_fst_all(self, fname):
"""Executes GenePop and gets Fst/Fis/Fit (all populations)
Parameters:
fname - file name
Returns:
(multiLocusFis, multiLocusFst, multiLocus Fit),
Iterator of tuples
(Locus name, Fis, Fst, Fit, Qintra, Qinter)
Will create a file called fname.FST .
This does not return the genotype frequencies.
"""
self._run_genepop([".FST"], [6,1], fname)
f = open(fname + ".FST")
l = f.readline()
while l != '':
if l.startswith(' All:'):
toks=filter(lambda x:x!="", l.rstrip().split(' '))
try:
allFis = _gp_float(toks[1])
except ValueError:
allFis = None
try:
allFst = _gp_float(toks[2])
except ValueError:
allFst = None
try:
allFit = _gp_float(toks[3])
except ValueError:
allFit = None
l = f.readline()
f.close()
f = open(fname + ".FST")
def proc(self):
if hasattr(self, "last_line"):
l = self.last_line
del self.last_line
else:
l = self.stream.readline()
locus = None
fis = None
fst = None
fit = None
qintra = None
qinter = None
while l != '':
l = l.rstrip()
if l.startswith(' Locus:'):
if locus != None:
self.last_line = l
return locus, fis, fst, fit, qintra, qinter
else:
locus = l.split(':')[1].lstrip()
elif l.startswith('Fis^='):
fis = _gp_float(l.split(' ')[1])
elif l.startswith('Fst^='):
fst = _gp_float(l.split(' ')[1])
elif l.startswith('Fit^='):
fit = _gp_float(l.split(' ')[1])
elif l.startswith('1-Qintra^='):
qintra = _gp_float(l.split(' ')[1])
elif l.startswith('1-Qinter^='):
qinter = _gp_float(l.split(' ')[1])
return locus, fis, fst, fit, qintra, qinter
l = self.stream.readline()
if locus != None:
return locus, fis, fst, fit, qintra, qinter
self.stream.close()
self.done = True
raise StopIteration
return (allFis, allFst, allFit), _FileIterator(proc , f, fname + ".FST")
#6.2
def calc_fst_pair(self, fname):
self._run_genepop([".ST2", ".MIG"], [6,2], fname)
f = open(fname + ".ST2")
l = f.readline()
while l != "":
l = l.rstrip()
if l.startswith("Estimates for all loci"):
avg_fst = _read_headed_triangle_matrix(f)
l = f.readline()
f.close()
def loci_func(self):
l = self.stream.readline()
while l != "":
l = l.rstrip()
m = re.search(" Locus: (.+)", l)
if m != None:
locus = m.group(1)
matrix = _read_headed_triangle_matrix(self.stream)
return locus, matrix
l = self.stream.readline()
self.done = True
raise StopIteration
stf = open(fname + ".ST2")
os.remove(fname + ".MIG")
return _FileIterator(loci_func, stf, fname + ".ST2"), avg_fst
#6.3
def calc_rho_all(self, fname):
raise NotImplementedError
#6.4
def calc_rho_pair(self, fname):
raise NotImplementedError
def _calc_ibd(self, fname, sub, stat="a", scale="Log", min_dist=0.00001):
"""Calculates isolation by distance statistics
"""
self._run_genepop([".GRA", ".MIG", ".ISO"], [6,sub],
fname, opts = {
"MinimalDistance" : min_dist,
"GeographicScale" : scale,
"IsolBDstatistic" : stat,
})
f = open(fname + ".ISO")
f.readline()
f.readline()
f.readline()
f.readline()
estimate = _read_triangle_matrix(f)
f.readline()
f.readline()
distance = _read_triangle_matrix(f)
f.readline()
match = re.match("a = (.+), b = (.+)", f.readline().rstrip())
a = _gp_float(match.group(1))
b = _gp_float(match.group(2))
f.readline()
f.readline()
match = re.match(" b=(.+)", f.readline().rstrip())
bb = _gp_float(match.group(1))
match = re.match(".*\[(.+) ; (.+)\]", f.readline().rstrip())
bblow = _gp_float(match.group(1))
bbhigh = _gp_float(match.group(2))
f.close()
os.remove(fname + ".MIG")
os.remove(fname + ".GRA")
os.remove(fname + ".ISO")
return estimate, distance, (a, b), (bb, bblow, bbhigh)
#6.5
def calc_ibd_diplo(self, fname, stat="a", scale="Log", min_dist=0.00001):
"""Calculates isolation by distance statistics for diploid data.
See _calc_ibd for parameter details.
Note that each pop can only have a single individual and
the individual name has to be the sample coordinates.
"""
return self._calc_ibd(fname, 5, stat, scale, min_dist)
#6.6
def calc_ibd_haplo(self, fname, stat="a", scale="Log", min_dist=0.00001):
"""Calculates isolation by distance statistics for haploid data.
See _calc_ibd for parameter details.
Note that each pop can only have a single individual and
the individual name has to be the sample coordinates.
"""
return self._calc_ibd(fname, 6, stat, scale, min_dist)
|
|
import re
import string
from PyQt5 import QtGui, QtCore
class CTextDecorator:
redPen = QtGui.QPen(QtGui.QColor(255, 0, 0))
greenPen = QtGui.QPen(QtGui.QColor(255, 255, 0))
whitePen = QtGui.QPen(QtGui.QColor(255, 255, 255))
normalPen = QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine)
MZbrush = QtGui.QBrush(QtGui.QColor(128, 0, 0))
grayBrush = QtGui.QBrush(QtGui.QColor(128, 128, 128))
def __init__(self):
pass
class TextDecorator(CTextDecorator):
def __init__(self, viewmode):
self.operations = []
self.dataModel = viewmode.getDataModel()
self.viewmode = viewmode
self.penMap = {}
self.brushMap = {}
self.PenInterval = []
self.normalPen = QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine)
# if we want to generate T/F table
self.Special = string.ascii_letters + string.digits + ' .;\':;=\"?-!()/\\_'
self.Special = [False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, True, True,
True, False, False, False, False, True, True,
True, False, False, False, True, True, True, True, True, True, True, True, True, True, True,
True, True, True, True, False, True, False, True,
False, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True,
True, True, True, False, True, False, False, True, False, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True, True, True, False, False, False, False,
False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False]
def reset(self):
self.penMap = {}
self.brushMap = {}
self.PenInterval = []
def getDataModel(self):
return self.dataModel
def isText(self, c):
"""
D = []
for i in range(256):
b = False
if self.isText(chr(i)):
b = True
D.append(b)
print D
sys.exit()
"""
return self.Special[ord(c)]
def getChar(self, idx):
# self.page = self.getDataModel().getDisplayablePage()
if idx < len(self.page):
return self.page[idx]
return 0
def decorate(self, pageOffset=None):
if pageOffset:
self.page = self.viewmode.getDisplayablePage(pageOffset=pageOffset)
else:
self.page = self.viewmode.getDisplayablePage()
return self.page
def addPenInterval(self, a, b, pen, ignoreHighlights=True):
self.PenInterval.append((a, b, pen, ignoreHighlights))
def choosePen(self, idx):
key = self.dataModel.getOffset() + idx
# if we do have a pen with that index, return it if it's different than default pen
# otherwise, return the pen that was set in that interval
# the priority here is de pen from other transformations, than interval pen
for a, b, ignoreHighlights, pen in self.PenInterval:
# in interval
if a <= key <= b:
if ignoreHighlights:
return pen
if key in self.penMap:
if self.penMap[key] == self.normalPen:
return pen
else:
return self.penMap[key]
else:
return pen
if key in self.penMap:
return self.penMap[key]
return self.normalPen
def chooseBrush(self, idx):
off = self.dataModel.getOffset() + idx
if off in self.brushMap:
return self.brushMap[off]
return None
class PageDecorator(TextDecorator):
def __init__(self, decorated):
pass
def reset(self):
self.decorated.reset()
self.penMap = {}
self.brushMap = {}
self.PenInterval = []
def getBrushMap(self):
return self.brushMap
def getPenMap(self):
return self.penMap
def doit(self):
pass
def getDataModel(self):
return self.dataModel
class HighlightASCII(PageDecorator):
def __init__(self, decorated):
self.dataModel = decorated.getDataModel()
self.penMap = decorated.penMap
self.decorated = decorated
super().__init__(decorated)
self.dataModel = super().getDataModel()
def decorate(self, pageOffset=None):
page = self.decorated.decorate(pageOffset)
self.PenInterval = self.decorated.PenInterval
self.brushMap = self.decorated.brushMap
self.penMap = self.decorated.penMap
off = self.dataModel.getOffset()
Match = [(m.start(), m.end()) for m in re.finditer(b'([a-zA-Z0-9\\-\\\\.%*:/? _<>]){4,}', page)]
for s, e in Match:
for i in range(e - s):
idx = off + s + i
if idx not in self.penMap:
self.penMap[off + s + i] = self.redPen
self.page = page
return self.page
class HighlightPrefix(PageDecorator):
def __init__(self, decorated, text, additionalLength=0, brush=None, pen=None):
super().__init__(decorated)
self.dataModel = decorated.getDataModel()
self.decorated = decorated
self.additionalLength = additionalLength
self.brush = brush
self.text = text
self.pen = pen
def decorate(self, pageOffset=None):
page = self.decorated.decorate(pageOffset)
self.PenInterval = self.decorated.PenInterval
self.brushMap = self.decorated.brushMap
self.penMap = self.decorated.penMap
self.page = self.highliteWithPrefix(page, self.text, self.additionalLength, self.brush, self.pen)
return self.page
def highliteWithPrefix(self, page, text, additionalLength=0, brush=None, pen=None):
# todo: nu am gasit o metoda mai eleganta pentru a selecta toate aparitiile ale lui text
# regexp nu merg, "bad re expression"
lenText = len(text)
M = []
idx = 0
if lenText > 0:
while idx < len(page):
idx = page.find(text, idx, len(page))
if idx == -1:
break
M.append((idx, lenText + additionalLength))
idx += lenText + additionalLength
off = self.dataModel.getOffset()
for start, length in M:
for i in range(length):
self.penMap[off + start + i] = pen
self.brushMap[off + start + i] = brush
return page
class HighlightWideChar(PageDecorator):
def __init__(self, decorated):
super().__init__(decorated)
self.dataModel = decorated.getDataModel()
self.decorated = decorated
def decorate(self, pageOffset=None):
self.page = self.decorated.decorate(pageOffset)
self.PenInterval = self.decorated.PenInterval
self.brushMap = self.decorated.brushMap
self.penMap = self.decorated.penMap
self.page = self.highliteWidechar2(self.page)
return self.page
def highliteWidechar2(self, page):
pageStart = self.dataModel.getOffset()
pageEnd = pageStart + len(page)
touched = False
# for s, e in self.Intervals:
# touched = True
if not touched:
# expand
Match = [(m.start(), m.end()) for m in re.finditer(r'([a-zA-Z0-9\-\\.%*:/? ]\x00){4,}', page)]
for s, e in Match:
for i in range(e - s):
self.penMap[pageStart + s + i] = QtGui.QPen(QtGui.QColor(255, 255, 0))
# get rid of '\x00'
string = page[s:e:2]
l = len(string)
# copy string that has no zeros
page[s:s + l] = string
# fill with zeros the remaining space
page[s + l: s + 2 * l] = '\x00' * l
return page
### todo: other way to highlight widechar, should test and see which one is faster
"""
def _changeText(self, page, page_start, I):
page_end = page_start + len(page)
for obj in I:
if obj['s'] >= page_start and obj['e'] <= page_end:
page[obj['s']-page_start:obj['e']-page_start] = obj['text']
def _expand(self, page, off, start, end):
I = []
start = start - off
end = end - off
i = start
while i < end:
if i+1 < end:
if page[i+1] == 0 and self.isText(chr(page[i])):
k = 0
for j in xrange(i, end, 2):
if j + 1 < end:
if self.isText(chr(page[j])) and page[j+1] == 0:
k += 1
else:
break
if k > 4:
if i+k*2 <= end:
obj = {}
obj['s'] = off + i + 1
obj['e'] = off + i + k * 2
for idx, j in enumerate(range(i+1, i + k*2)):
if j > i + k:
page[j] = 0
#self.penMap[j] = self.greenPen
elif j+idx+1 < end:
page[j] = page[j + idx + 1]
self.penMap[off + j] = self.greenPen
obj['text'] = page[i+1:i+k*2]
I.append(obj)
self.penMap[off + i] = self.greenPen
i += k*2
i = i + 1
return I
pass
def highliteWidechar(self, page):
off = self.dataModel.getOffset()
page_end = off + len(page)
touched = False
#print '-------'
for idx, iv in enumerate(self.Intervals):
#print 'acum aici'
# in interval
s, e, I = iv
#print s ,e
#print page_end
page_start = off
if off >= s:
touched = True
if page_end <= e:
self._changeText(page, off, I)
else:
if off <= e:
I2 = self._expand(page, off, e, page_end)
for obj in I2:
I.append(obj)
e = page_end
self.Intervals[idx] = (s, e, I)
else:
# suntem cu mai multe pagini mai jos
touched = False
else:
if page_end <= e and page_end >= s:
# scrolled up
I2 = self._expand(page, off, page_start, s)
for obj in I2:
I.append(obj)
s = page_start
self.Intervals[idx] = (s, e, I)
touched = True
else:
# out of this interval
touched = False
if not touched or touched:
#print 'aici'
self.Intervals.append((off, page_end, self._expand(page, off, off, page_end)))
"""
class RangePen(PageDecorator):
def __init__(self, decorated, a, b, pen, ignoreHighlights=True):
super().__init__(decorated)
self.dataModel = decorated.getDataModel()
self.decorated = decorated
self.a = a
self.b = b
self.pen = pen
self.already = False
self.ignoreHighlights = ignoreHighlights
def decorate(self, pageOffset=None):
self.page = self.decorated.decorate(pageOffset)
self.PenInterval = self.decorated.PenInterval
self.brushMap = self.decorated.brushMap
self.penMap = self.decorated.penMap
if not self.already:
self.addPenInterval(self.a, self.b, self.ignoreHighlights, self.pen)
self.already = True
return self.page
|
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import urllib
import xml.sax
import threading
import boto
from boto import handler
from boto.connection import AWSQueryConnection
from boto.sdb.domain import Domain, DomainMetaData
from boto.sdb.item import Item
from boto.sdb.regioninfo import SDBRegionInfo
from boto.exception import SDBResponseError
from boto.resultset import ResultSet
import warnings
class ItemThread(threading.Thread):
def __init__(self, name, domain_name, item_names):
threading.Thread.__init__(self, name=name)
print 'starting %s with %d items' % (name, len(item_names))
self.domain_name = domain_name
self.conn = SDBConnection()
self.item_names = item_names
self.items = []
def run(self):
for item_name in self.item_names:
item = self.conn.get_attributes(self.domain_name, item_name)
self.items.append(item)
class SDBConnection(AWSQueryConnection):
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'sdb.amazonaws.com'
APIVersion = '2007-11-07'
SignatureVersion = '2'
ResponseError = SDBResponseError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/', converter=None):
if not region:
region = SDBRegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
self.region.endpoint, debug, https_connection_factory, path)
self.box_usage = 0.0
self.converter = converter
self.item_cls = Item
def set_item_cls(self, cls):
self.item_cls = cls
def build_name_value_list(self, params, attributes, replace=False):
keys = attributes.keys()
keys.sort()
i = 1
for key in keys:
value = attributes[key]
if isinstance(value, list):
for v in value:
params['Attribute.%d.Name'%i] = key
if self.converter:
v = self.converter.encode(v)
params['Attribute.%d.Value'%i] = v
if replace:
params['Attribute.%d.Replace'%i] = 'true'
i += 1
else:
params['Attribute.%d.Name'%i] = key
if self.converter:
value = self.converter.encode(value)
params['Attribute.%d.Value'%i] = value
if replace:
params['Attribute.%d.Replace'%i] = 'true'
i += 1
def build_batch_list(self, params, items, replace=False):
item_names = items.keys()
i = 0
for item_name in item_names:
j = 0
item = items[item_name]
attr_names = item.keys()
params['Item.%d.ItemName' % i] = item_name
for attr_name in attr_names:
value = item[attr_name]
if isinstance(value, list):
for v in value:
if self.converter:
v = self.converter.encode(v)
params['Item.%d.Attribute.%d.Name' % (i,j)] = attr_name
params['Item.%d.Attribute.%d.Value' % (i,j)] = v
if replace:
params['Item.%d.Attribute.%d.Replace' % (i,j)] = 'true'
j += 1
else:
params['Item.%d.Attribute.%d.Name' % (i,j)] = attr_name
if self.converter:
value = self.converter.encode(value)
params['Item.%d.Attribute.%d.Value' % (i,j)] = value
if replace:
params['Item.%d.Attribute.%d.Replace' % (i,j)] = 'true'
j += 1
i += 1
def build_name_list(self, params, attribute_names):
i = 1
attribute_names.sort()
for name in attribute_names:
params['Attribute.%d.Name'%i] = name
i += 1
def get_usage(self):
"""
Returns the BoxUsage accumulated on this SDBConnection object.
:rtype: float
:return: The accumulated BoxUsage of all requests made on the connection.
"""
return self.box_usage
def print_usage(self):
"""
Print the BoxUsage and approximate costs of all requests made on this connection.
"""
print 'Total Usage: %f compute seconds' % self.box_usage
cost = self.box_usage * 0.14
print 'Approximate Cost: $%f' % cost
def get_domain(self, domain_name, validate=True):
domain = Domain(self, domain_name)
if validate:
self.select(domain, """select * from `%s` limit 1""" % domain_name)
return domain
def lookup(self, domain_name, validate=True):
"""
Lookup an existing SimpleDB domain
:type domain_name: string
:param domain_name: The name of the new domain
:rtype: :class:`boto.sdb.domain.Domain` object or None
:return: The Domain object or None if the domain does not exist.
"""
try:
domain = self.get_domain(domain_name, validate)
except:
domain = None
return domain
def get_all_domains(self, max_domains=None, next_token=None):
params = {}
if max_domains:
params['MaxNumberOfDomains'] = max_domains
if next_token:
params['NextToken'] = next_token
return self.get_list('ListDomains', params, [('DomainName', Domain)])
def create_domain(self, domain_name):
"""
Create a SimpleDB domain.
:type domain_name: string
:param domain_name: The name of the new domain
:rtype: :class:`boto.sdb.domain.Domain` object
:return: The newly created domain
"""
params = {'DomainName':domain_name}
d = self.get_object('CreateDomain', params, Domain)
d.name = domain_name
return d
def get_domain_and_name(self, domain_or_name):
if (isinstance(domain_or_name, Domain)):
return (domain_or_name, domain_or_name.name)
else:
return (self.get_domain(domain_or_name), domain_or_name)
def delete_domain(self, domain_or_name):
"""
Delete a SimpleDB domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:rtype: bool
:return: True if successful
B{Note:} This will delete the domain and all items within the domain.
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName':domain_name}
return self.get_status('DeleteDomain', params)
def domain_metadata(self, domain_or_name):
"""
Get the Metadata for a SimpleDB domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:rtype: :class:`boto.sdb.domain.DomainMetaData` object
:return: The newly created domain metadata object
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName':domain_name}
d = self.get_object('DomainMetadata', params, DomainMetaData)
d.domain = domain
return d
def put_attributes(self, domain_or_name, item_name, attributes, replace=True):
"""
Store attributes for a given item in a domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type item_name: string
:param item_name: The name of the item whose attributes are being stored.
:type attribute_names: dict or dict-like object
:param attribute_names: The name/value pairs to store as attributes
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
:rtype: bool
:return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName' : domain_name,
'ItemName' : item_name}
self.build_name_value_list(params, attributes, replace)
return self.get_status('PutAttributes', params)
def batch_put_attributes(self, domain_or_name, items, replace=True):
"""
Store attributes for multiple items in a domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are themselves dictionaries
of attribute names/values, exactly the same as the
attribute_names parameter of the scalar put_attributes
call.
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
:rtype: bool
:return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName' : domain_name}
self.build_batch_list(params, items, replace)
return self.get_status('BatchPutAttributes', params, verb='POST')
def get_attributes(self, domain_or_name, item_name, attribute_names=None, item=None):
"""
Retrieve attributes for a given item in a domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type item_name: string
:param item_name: The name of the item whose attributes are being retrieved.
:type attribute_names: string or list of strings
:param attribute_names: An attribute name or list of attribute names. This
parameter is optional. If not supplied, all attributes
will be retrieved for the item.
:rtype: :class:`boto.sdb.item.Item`
:return: An Item mapping type containing the requested attribute name/values
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName' : domain_name,
'ItemName' : item_name}
if attribute_names:
if not isinstance(attribute_names, list):
attribute_names = [attribute_names]
self.build_list_params(params, attribute_names, 'AttributeName')
response = self.make_request('GetAttributes', params)
body = response.read()
if response.status == 200:
if item == None:
item = self.item_cls(domain, item_name)
h = handler.XmlHandler(item, self)
xml.sax.parseString(body, h)
return item
else:
raise SDBResponseError(response.status, response.reason, body)
def delete_attributes(self, domain_or_name, item_name, attr_names=None):
"""
Delete attributes from a given item in a domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type item_name: string
:param item_name: The name of the item whose attributes are being deleted.
:type attributes: dict, list or :class:`boto.sdb.item.Item`
:param attributes: Either a list containing attribute names which will cause
all values associated with that attribute name to be deleted or
a dict or Item containing the attribute names and keys and list
of values to delete as the value. If no value is supplied,
all attribute name/values for the item will be deleted.
:rtype: bool
:return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName':domain_name,
'ItemName' : item_name}
if attr_names:
if isinstance(attr_names, list):
self.build_name_list(params, attr_names)
elif isinstance(attr_names, dict) or isinstance(attr_names, self.item_cls):
self.build_name_value_list(params, attr_names)
return self.get_status('DeleteAttributes', params)
def query(self, domain_or_name, query='', max_items=None, next_token=None):
"""
Returns a list of item names within domain_name that match the query.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type query: string
:param query: The SimpleDB query to be performed.
:type max_items: int
:param max_items: The maximum number of items to return. If not
supplied, the default is None which returns all
items matching the query.
:rtype: ResultSet
:return: An iterator containing the results.
"""
warnings.warn('Query interface is deprecated', DeprecationWarning)
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName':domain_name,
'QueryExpression' : query}
if max_items:
params['MaxNumberOfItems'] = max_items
if next_token:
params['NextToken'] = next_token
return self.get_object('Query', params, ResultSet)
def query_with_attributes(self, domain_or_name, query='', attr_names=None,
max_items=None, next_token=None):
"""
Returns a set of Attributes for item names within domain_name that match the query.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type query: string
:param query: The SimpleDB query to be performed.
:type attr_names: list
:param attr_names: The name of the attributes to be returned.
If no attributes are specified, all attributes
will be returned.
:type max_items: int
:param max_items: The maximum number of items to return. If not
supplied, the default is None which returns all
items matching the query.
:rtype: ResultSet
:return: An iterator containing the results.
"""
warnings.warn('Query interface is deprecated', DeprecationWarning)
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName':domain_name,
'QueryExpression' : query}
if max_items:
params['MaxNumberOfItems'] = max_items
if next_token:
params['NextToken'] = next_token
if attr_names:
self.build_list_params(params, attr_names, 'AttributeName')
return self.get_list('QueryWithAttributes', params, [('Item', self.item_cls)], parent=domain)
def select(self, domain_or_name, query='', next_token=None):
"""
Returns a set of Attributes for item names within domain_name that match the query.
The query must be expressed in using the SELECT style syntax rather than the
original SimpleDB query language.
Even though the select request does not require a domain object, a domain
object must be passed into this method so the Item objects returned can
point to the appropriate domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type query: string
:param query: The SimpleDB query to be performed.
:rtype: ResultSet
:return: An iterator containing the results.
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'SelectExpression' : query}
if next_token:
params['NextToken'] = next_token
return self.get_list('Select', params, [('Item', self.item_cls)], parent=domain)
def threaded_query(self, domain_or_name, query='', max_items=None, next_token=None, num_threads=6):
"""
Returns a list of fully populated items that match the query provided.
The name/value pairs for all of the matching item names are retrieved in a number of separate
threads (specified by num_threads) to achieve maximum throughput.
The ResultSet that is returned has an attribute called next_token that can be used
to retrieve additional results for the same query.
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
if max_items and num_threads > max_items:
num_threads = max_items
rs = self.query(domain_or_name, query, max_items, next_token)
threads = []
n = len(rs) / num_threads
for i in range(0, num_threads):
if i+1 == num_threads:
thread = ItemThread('Thread-%d' % i, domain_name, rs[n*i:])
else:
thread = ItemThread('Thread-%d' % i, domain_name, rs[n*i:n*(i+1)])
threads.append(thread)
thread.start()
del rs[0:]
for thread in threads:
thread.join()
for item in thread.items:
rs.append(item)
return rs
|
|
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2 = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
assert_array_almost_equal(P1, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 10):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2k = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
# The Barnes-Hut approximation uses a different method to estimate
# P_ij using only a number of nearest neighbors instead of all
# points (so that k = 3 * perplexity). As a result we set the
# perplexity=5, so that the number of neighbors is 5%.
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(100, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=50,
learning_rate=100.0, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
T = trustworthiness(X, X_embedded, n_neighbors=1)
assert_almost_equal(T, 1.0, decimal=1)
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [200, 250, 300]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert_less_equal(kl_divergences[1], kl_divergences[0])
assert_less_equal(kl_divergences[2], kl_divergences[1])
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
metric="precomputed", random_state=0, verbose=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
m = "'init' must be 'pca', 'random' or a NumPy array"
assert_raises_regexp(ValueError, m, TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
_barnes_hut_tsne.gradient(pij_input, pos_output, neighbors,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(100, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method)
tsne.fit_transform(X)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, False)
kl, gradex = _kl_divergence(params, P, degrees_of_freedom, n_samples,
n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
Pbh = _joint_probabilities_nn(distances, neighbors_nn,
perplexity, False)
kl, gradbh = _kl_divergence_bh(params, Pbh, neighbors_nn,
degrees_of_freedom, n_samples,
n_components, angle=angle,
skip_num_points=0, verbose=False)
assert_array_almost_equal(Pbh, P, decimal=5)
assert_array_almost_equal(gradex, gradbh, decimal=5)
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
counts = np.zeros(3, dtype='int64')
_barnes_hut_tsne.check_quadtree(X, counts)
m = "Tree consistency failed: unexpected number of points at root node"
assert_equal(counts[0], counts[1], m)
m = "Tree consistency failed: unexpected number of points on the tree"
assert_equal(counts[0], counts[2], m)
def test_index_offset():
# Make sure translating between 1D and N-D indices are preserved
assert_equal(_barnes_hut_tsne.test_index2offset(), 1)
assert_equal(_barnes_hut_tsne.test_index_offset(), 1)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for depthwise convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def ConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[4, 5, 5, 48], [4, 8, 8, 84], [4, 17, 17, 48], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 147, 147, 2],
[3, 299, 299, 3], [5, 183, 183, 1]]
filter_sizes = [[1, 1, 48, 2], [1, 3, 84, 1], [3, 1, 48, 4], [3, 3, 8, 1],
[3, 3, 7, 1], [5, 5, 2, 1], [3, 3, 2, 8], [2, 2, 3,
8], [5, 5, 1, 2]]
out_sizes = [[4, 5, 5, 96], [4, 8, 8, 84], [4, 17, 17, 192], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 49, 49, 16],
[3, 150, 150, 24], [5, 92, 92, 2]]
strides = [1, 1, 1, 1, 1, 1, 3, 2, 2]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, SAME, SAME, SAME, SAME, SAME, VALID, SAME, SAME, SAME]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def CheckGradConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
compute_gradient_error() is very expensive. So the configs should be
relatively small.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[2, 5, 8, 1], [4, 5, 5, 1], [2, 4, 4, 2], [1, 15, 15, 2],
[2, 15, 16, 1]]
filter_sizes = [[4, 4, 1, 2], [2, 2, 1, 2], [3, 1, 2, 2], [1, 3, 2, 1],
[3, 3, 1, 2]]
out_sizes = [[2, 5, 8, 2], [4, 2, 2, 2], [2, 4, 4, 4], [1, 15, 15, 2],
[2, 5, 5, 2]]
strides = [1, 2, 1, 1, 3]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, VALID, SAME, SAME, VALID]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
class DepthwiseConv2DTest(test.TestCase):
# This is testing that depthwise_conv2d and depthwise_conv2d_native
# produce the same results. It also tests that NCHW and NHWC
# formats agree, by comparing the depthwise_conv2d_native with
# 'NCHW' format (with transposition) matches the 'NHWC' format using
# the higher level interface.
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_type,
use_gpu,
grouped_conv=False,
data_format="NHWC"):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
data_type: The data type to use.
use_gpu: Whether to use GPU.
grouped_conv: Whether to use cuDNN 7's grouped convolution.
data_format: The data_format of the input. "NHWC" or "NCHW".
"""
input_size = 1
filter_size = 1
for s in tensor_in_sizes:
input_size *= s
for s in filter_in_sizes:
filter_size *= s
# Initializes the input and filter tensor with numbers incrementing from 1.
x1 = [f * 1.0 / input_size for f in range(1, input_size + 1)]
x2 = [f * 1.0 / filter_size for f in range(1, filter_size + 1)]
ops.reset_default_graph()
graph = ops.get_default_graph()
with self.session(graph=graph, use_gpu=use_gpu) as sess:
tolerance = {
dtypes.float16: 4e-2,
dtypes.float32: 1e-5,
dtypes.float64: 1e-12,
}[data_type]
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=data_type)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=data_type)
native_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NHWC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with sess.graph._kernel_label_map({
"DepthwiseConv2dNative": "cudnn_grouped_convolution"
} if grouped_conv else {}):
conv_native = nn_ops.depthwise_conv2d_native(
native_t1,
t2,
strides=strides,
data_format=data_format,
padding=padding)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
try:
native_result = sess.run(conv_native)
except errors.InvalidArgumentError as e:
# Grouped convolution kernel is only registered for cuDNN 7. Silently
# return when we are running on an earlier version or without GPU.
if e.message.startswith(
"No OpKernel was registered to support Op 'DepthwiseConv2dNative'"):
tf_logging.warn("Skipping grouped convolution test")
return
raise e
conv_interface = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
interface_result = sess.run(conv_interface)
tf_logging.info(
"data_type: %r, use_gpu: %r, grouped_conv: %r, max diff = %f",
data_type, use_gpu, grouped_conv,
np.amax(np.absolute(native_result - interface_result)))
self.assertArrayNear(
np.ravel(native_result), np.ravel(interface_result), tolerance)
self.assertShapeEqual(native_result, conv_native)
self.assertShapeEqual(native_result, conv_interface)
def testDepthwiseConv2D(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2D, %dth config: %r * %r, stride: %d, padding: "
"%s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
tf_logging.info("Testing without grouped_conv")
self._VerifyValues(
input_size, filter_size, stride, padding, data_type, use_gpu=True)
tf_logging.info("Testing with grouped_conv")
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
use_gpu=True,
grouped_conv=True)
def testDepthwiseConv2DWithUnknownShape(self):
# GitHub issue 22110.
if not test.is_gpu_available():
return
with self.test_session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32)
f = np.ones([1, 1, 1, 1], np.float32)
v = nn_impl.depthwise_conv2d(
x, f, [1, 1, 1, 1], "VALID", rate=[2, 1], data_format="NCHW")
self.assertAllEqual(
np.ones([1, 1, 1, 1], np.float32),
v.eval(feed_dict={x: np.ones([1, 1, 1, 1], np.float32)}))
def testDepthwiseConv2DFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFormat, %dth config: %r * %r, stride: %d, "
"padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
use_gpu=True,
data_format="NCHW")
# This is testing against hand calculated results.
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected, use_gpu):
"""Verifies the output values of the depthwise convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether to use GPU.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_ops.depthwise_conv2d_native(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv)
tf_logging.info("value = %r", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=False)
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=True)
# Gradient checkers. This tests depthwise gradient computations for both
# BackpropFilter and BackpropInput by comparing gradients computed by the
# depthwise gradient ops with the gradients computed numerically (details can
# be found in the compute_gradient_error().
# Note this check is very expensive so the input should not be too big.
def _ConstructAndTestGradient(self,
input_shape,
filter_shape,
output_shape,
stride,
padding,
data_type,
test_input,
use_gpu,
grouped_conv=False,
data_format="NHWC"):
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
ops.reset_default_graph()
graph = ops.get_default_graph()
with self.session(graph=graph, use_gpu=use_gpu) as sess:
tolerance = {
dtypes.float16: 4e-0,
dtypes.float32: 8e-4,
dtypes.float64: 1e-12,
}[data_type]
input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=data_type, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=data_type, name="filter")
native_input = input_tensor
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NHWC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_input = array_ops.transpose(input_tensor, [0, 3, 1, 2])
input_shape = [
input_shape[0], input_shape[3], input_shape[1], input_shape[2]
]
output_shape = [
output_shape[0], output_shape[3], output_shape[1], output_shape[2]
]
strides = [1, 1, stride, stride]
with sess.graph._kernel_label_map({
"DepthwiseConv2dNative": "cudnn_grouped_convolution",
"DepthwiseConv2dNativeBackpropInput": "cudnn_grouped_convolution",
"DepthwiseConv2dNativeBackpropFilter": "cudnn_grouped_convolution",
} if grouped_conv else {}):
depthwise_conv2d = nn_ops.depthwise_conv2d_native(
native_input,
filter_tensor,
strides,
padding,
data_format=data_format,
name="depthwise_conv2d")
self.assertEqual(output_shape, depthwise_conv2d.get_shape())
try:
if test_input:
err = gradient_checker.compute_gradient_error(
native_input, input_shape, depthwise_conv2d, output_shape)
else:
err = gradient_checker.compute_gradient_error(
filter_tensor, filter_shape, depthwise_conv2d, output_shape)
except errors.InvalidArgumentError as e:
# Grouped convolution kernel is only registered for cuDNN 7. Silently
# return when we are running on an earlier version or without GPU.
if grouped_conv and e.message.startswith(
"No OpKernel was registered to support Op 'DepthwiseConv2dNative'"):
tf_logging.warn("Skipping grouped convolution test")
return
raise e
tf_logging.info(
"data_type: %r, use_gpu: %r, grouped_conv: %r, error = %f", data_type,
use_gpu, grouped_conv, err)
self.assertLess(err, tolerance)
def testDepthwiseConv2DInputGrad(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DInputGrad, %dth config: %r * %r, stride: %d, "
"padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True)
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True,
grouped_conv=True)
def testDepthwiseConv2DInputGradFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DInputGradFormat, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True,
data_format="NCHW")
def testDepthwiseConv2DFilterGrad(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGrad, %dth config: %r * %r, stride: "
"%d, padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=False,
use_gpu=True)
def testDepthwiseConv2DFilterGradFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGradFormat, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=False,
use_gpu=True,
data_format="NCHW")
def _CompareBackpropInputFloat(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_gpu):
with self.test_session(use_gpu=use_gpu):
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def _CompareBackpropInputDouble(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float64)
x2 = np.random.rand(*output_sizes).astype(np.float64)
def _GetVal(use_gpu):
with self.test_session(use_gpu=use_gpu):
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DInputGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DInputGradCompare, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
self._CompareBackpropInputFloat(input_size, filter_size, output_size,
stride, padding)
self._CompareBackpropInputDouble(input_size, filter_size, output_size,
stride, padding)
def _CompareBackpropFilterFloat(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_gpu):
with self.test_session(use_gpu=use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def _CompareBackpropFilterDouble(self, input_sizes, filter_sizes,
output_sizes, stride, padding):
x0 = np.random.rand(*input_sizes).astype(np.float64)
x2 = np.random.rand(*output_sizes).astype(np.float64)
def _GetVal(use_gpu):
with self.test_session(use_gpu=use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DFilterGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGradCompare, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
self._CompareBackpropFilterFloat(input_size, filter_size, output_size,
stride, padding)
self._CompareBackpropFilterDouble(input_size, filter_size, output_size,
stride, padding)
if __name__ == "__main__":
test.main()
|
|
import os
from pprint import pformat
from django import http
from django.core import signals
from django.core.handlers.base import BaseHandler
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_unicode, smart_str
# NOTE: do *not* import settings (or any module which eventually imports
# settings) until after ModPythonHandler has been called; otherwise os.environ
# won't be set up correctly (with respect to settings).
class ModPythonRequest(http.HttpRequest):
def __init__(self, req):
self._req = req
# FIXME: This isn't ideal. The request URI may be encoded (it's
# non-normalized) slightly differently to the "real" SCRIPT_NAME
# and PATH_INFO values. This causes problems when we compute path_info,
# below. For now, don't use script names that will be subject to
# encoding/decoding.
self.path = force_unicode(req.uri)
root = req.get_options().get('django.root', '')
self.django_root = root
# req.path_info isn't necessarily computed correctly in all
# circumstances (it's out of mod_python's control a bit), so we use
# req.uri and some string manipulations to get the right value.
if root and req.uri.startswith(root):
self.path_info = force_unicode(req.uri[len(root):])
else:
self.path_info = self.path
if not self.path_info:
# Django prefers empty paths to be '/', rather than '', to give us
# a common start character for URL patterns. So this is a little
# naughty, but also pretty harmless.
self.path_info = u'/'
def __repr__(self):
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = pformat(self.GET)
except:
get = '<could not parse>'
try:
post = pformat(self.POST)
except:
post = '<could not parse>'
try:
cookies = pformat(self.COOKIES)
except:
cookies = '<could not parse>'
try:
meta = pformat(self.META)
except:
meta = '<could not parse>'
return smart_str(u'<ModPythonRequest\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
(self.path, unicode(get), unicode(post),
unicode(cookies), unicode(meta)))
def get_full_path(self):
return '%s%s' % (self.path, self._req.args and ('?' + self._req.args) or '')
def is_secure(self):
try:
return self._req.is_https()
except AttributeError:
# mod_python < 3.2.10 doesn't have req.is_https().
return self._req.subprocess_env.get('HTTPS', '').lower() in ('on', '1')
def _load_post_and_files(self):
"Populates self._post and self._files"
if 'content-type' in self._req.headers_in and self._req.headers_in['content-type'].startswith('multipart'):
self._raw_post_data = ''
self._post, self._files = self.parse_file_upload(self.META, self._req)
else:
self._post, self._files = http.QueryDict(self.raw_post_data, encoding=self._encoding), datastructures.MultiValueDict()
def _get_request(self):
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def _get_get(self):
if not hasattr(self, '_get'):
self._get = http.QueryDict(self._req.args, encoding=self._encoding)
return self._get
def _set_get(self, get):
self._get = get
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_cookies(self):
if not hasattr(self, '_cookies'):
self._cookies = http.parse_cookie(self._req.headers_in.get('cookie', ''))
return self._cookies
def _set_cookies(self, cookies):
self._cookies = cookies
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
def _get_meta(self):
"Lazy loader that returns self.META dictionary"
if not hasattr(self, '_meta'):
self._meta = {
'AUTH_TYPE': self._req.ap_auth_type,
'CONTENT_LENGTH': self._req.headers_in.get('content-length'),
'CONTENT_TYPE': self._req.headers_in.get('content-type'),
'GATEWAY_INTERFACE': 'CGI/1.1',
'PATH_INFO': self.path_info,
'PATH_TRANSLATED': None, # Not supported
'QUERY_STRING': self._req.args,
'REMOTE_ADDR': self._req.connection.remote_ip,
'REMOTE_HOST': None, # DNS lookups not supported
'REMOTE_IDENT': self._req.connection.remote_logname,
'REMOTE_USER': self._req.user,
'REQUEST_METHOD': self._req.method,
'SCRIPT_NAME': self.django_root,
'SERVER_NAME': self._req.server.server_hostname,
'SERVER_PORT': self._req.server.port,
'SERVER_PROTOCOL': self._req.protocol,
'SERVER_SOFTWARE': 'mod_python'
}
for key, value in self._req.headers_in.items():
key = 'HTTP_' + key.upper().replace('-', '_')
self._meta[key] = value
return self._meta
def _get_raw_post_data(self):
try:
return self._raw_post_data
except AttributeError:
self._raw_post_data = self._req.read()
return self._raw_post_data
def _get_method(self):
return self.META['REQUEST_METHOD'].upper()
GET = property(_get_get, _set_get)
POST = property(_get_post, _set_post)
COOKIES = property(_get_cookies, _set_cookies)
FILES = property(_get_files)
META = property(_get_meta)
REQUEST = property(_get_request)
raw_post_data = property(_get_raw_post_data)
method = property(_get_method)
class ModPythonHandler(BaseHandler):
request_class = ModPythonRequest
def __call__(self, req):
# mod_python fakes the environ, and thus doesn't process SetEnv. This fixes that
os.environ.update(req.subprocess_env)
# now that the environ works we can see the correct settings, so imports
# that use settings now can work
from django.conf import settings
# if we need to set up middleware, now that settings works we can do it now.
if self._request_middleware is None:
self.load_middleware()
set_script_prefix(req.get_options().get('django.root', ''))
signals.request_started.send(sender=self.__class__)
try:
try:
request = self.request_class(req)
except UnicodeDecodeError:
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
# Apply response middleware
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
finally:
signals.request_finished.send(sender=self.__class__)
# Convert our custom HttpResponse object back into the mod_python req.
req.content_type = response['Content-Type']
for key, value in response.items():
if key != 'content-type':
req.headers_out[str(key)] = str(value)
for c in response.cookies.values():
req.headers_out.add('Set-Cookie', c.output(header=''))
req.status = response.status_code
try:
for chunk in response:
req.write(chunk)
finally:
response.close()
return 0 # mod_python.apache.OK
def handler(req):
# mod_python hooks into this function.
return ModPythonHandler()(req)
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ProjectKey.user_added'
db.add_column('sentry_projectkey', 'user_added',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='keys_added_set', null=True, to=orm['auth.User']))
# Adding field 'ProjectKey.date_added'
db.add_column('sentry_projectkey', 'date_added',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, null=True))
def backwards(self, orm):
# Deleting field 'ProjectKey.user_added'
db.delete_column('sentry_projectkey', 'user_added_id')
# Deleting field 'ProjectKey.date_added'
db.delete_column('sentry_projectkey', 'date_added')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['auth.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'user_added': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['auth.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['auth.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from collections import OrderedDict
from six import integer_types, text_type, binary_type
from varcode import Variant
import pandas as pd
VALID_ELEMENT_TYPES = integer_types + (text_type, binary_type, float, bool)
# values of these types are automatically converted to their size or length
# unless some other conversion function is provided
COLLECTION_TYPES = (tuple, list, set, frozenset)
class DataFrameBuilder(object):
"""
Helper class for constructing a DataFrame which always has fields
of a variant (chr/pos/ref/alt) as well as some subset of the fields
from a namedtuple or ValueObject.
"""
def __init__(
self,
element_class,
field_names=None,
exclude=set([]),
converters={},
rename_dict={},
extra_column_fns={},
variant_columns=True,
convert_collections_to_size=True):
"""
Parameters
----------
element_class : type
Class of elements in this collection.
field_names : list, optional
If not given then we expect element_class to have a class member
named '_fields' which is a list of field names.
exclude : set
Field names from element_class which should be used as columns for
the DataFrame we're building
converters : dict
Dictionary of names mapping to functions. These functions will be
applied to each element of a column before it's added to the
DataFrame.
rename_dict : dict
Dictionary mapping element_class field names to desired column names
in the produced DataFrame.
extra_column_fns : dict
Dictionary mapping column names to functions which take a variant
and element (such as an AlleleRead instance) and return a single
value for each row.
variant_columns : bool
If True, then add four columns for fields of a Variant: chr/pos/ref/alt
along with a "gene" column indicating which gene name(s) the variant
overlaps.
convert_collections_to_size : bool
If a value is a built-in collection (list, tuple, or set) then
transform it to the size of that collection. If this option is False
then collection values cause a runtime error.
"""
self.element_class = element_class
self.rename_dict = rename_dict
self.converters = converters
self.variant_columns = variant_columns
self.convert_collections_to_size = convert_collections_to_size
if field_names is None:
assert hasattr(element_class, "_fields"), (
"Expected %s to have member called `_fields`" % element_class)
field_names = element_class._fields
# remove specified field names without changing the order of the others
self.original_field_names = [
x
for x in field_names
if x not in exclude
]
for name in converters:
if name not in self.original_field_names:
raise ValueError("No field named '%s', valid names: %s" % (
name,
self.original_field_names))
self.renamed_field_names = [
self.rename_dict.get(x, x)
for x in self.original_field_names
]
if self.variant_columns:
columns_list = [
# fields related to variant
("chr", []),
("pos", []),
("ref", []),
("alt", []),
]
else:
columns_list = []
for name in self.renamed_field_names:
columns_list.append((name, []))
self.extra_column_fns = extra_column_fns
for column_name in self.extra_column_fns:
columns_list.append((column_name, []))
self.columns_dict = OrderedDict(columns_list)
def add(self, variant, element):
"""
Add one row to the DataFrame
Parameters
----------
variant : varcode.Variant
element : must have type self.element_class
"""
if self.variant_columns:
assert isinstance(variant, Variant), \
"Expected %s : %s to be a Variant" % (
variant,
type(variant))
self.columns_dict["chr"].append(variant.contig)
self.columns_dict["pos"].append(variant.original_start)
self.columns_dict["ref"].append(variant.original_ref)
self.columns_dict["alt"].append(variant.original_alt)
else:
assert variant is None
assert isinstance(element, self.element_class), \
"Expected %s : %s to have type %s" % (
element, type(element), self.element_class)
for name in self.original_field_names:
value = getattr(element, name)
if name in self.converters:
fn = self.converters[name]
value = fn(value)
if isinstance(value, COLLECTION_TYPES) and self.convert_collections_to_size:
value = len(value)
elif value is None:
value = None
elif not isinstance(value, VALID_ELEMENT_TYPES):
raise ValueError(
"Please provider converter for field '%s' : %s to make a scalar or string" % (
name,
type(value)))
if name in self.rename_dict:
name = self.rename_dict[name]
self.columns_dict[name].append(value)
for column_name, fn in self.extra_column_fns.items():
self.columns_dict[column_name].append(fn(variant, element))
def add_many(self, variant, elements):
for element in elements:
self.add(variant, element)
def _check_column_lengths(self):
"""
Make sure columns are of the same length or else DataFrame construction
will fail.
"""
column_lengths_dict = {
name: len(xs)
for (name, xs)
in self.columns_dict.items()
}
unique_column_lengths = set(column_lengths_dict.values())
if len(unique_column_lengths) != 1:
raise ValueError(
"Mismatch between lengths of columns: %s" % (column_lengths_dict,))
def to_dataframe(self):
"""
Creates dataframe from accumulated rows
Returns
-------
pandas.DataFrame
"""
self._check_column_lengths()
return pd.DataFrame(self.columns_dict)
|
|
import arrow
import json
import os
import sys
from paramiko import SSHClient
from sh import ssh, rsync, Command, \
ErrorReturnCode_1, ErrorReturnCode_255, ErrorReturnCode
from slugify import slugify
from subprocess32 import call, Popen
from .config import expose_username, expose_url, data_dir, verbosity
from .expose import expose
from .log import get_logger
from .utils import quote
from .vagrant import ansible_env
from .virtualbox import list_vms, vm_network, vm_ip, \
vm_info, vm_start, vm_suspend
logger = get_logger('box')
class BoxList(list):
def not_created(self):
return [box for box in self if box.status() == 'not created']
def running(self):
return [box for box in self if box.is_running()]
def suspended(self):
return [box for box in self if box.status() == 'saved']
def status(self, status):
return [box for box in self if box.status() == status]
class Box(object):
"""
Represents an AerisCloud VM, might or might not be running, take
a Project instance and an entry from the .aeriscloud.yml infra
configuration
:param project: project.Project
:param data: dict[str,str]
"""
NO_PROJECT_DIR = 254
def __init__(self, project, data):
self.project = project
self.data = data
self._vm_name = ''.join([self.project.name(), '-', self.data['name']])
self._logger = get_logger(self._vm_name)
self.basebox = self.data.get('basebox', 'chef/centos-7.0')
def name(self):
"""
Return the name of the box in the project's infra
:return: str
"""
return self.data['name']
def image(self):
return self.basebox
def vm_name(self):
"""
Return the VM name
:return: str
"""
return self._vm_name
def info(self):
"""
Return all the information about a box, kinda messy
:return: dict[str,str]
"""
return vm_info(self._vm_name)
def status(self):
"""
Return the current status of the VM
:return: str
"""
if self._vm_name not in list_vms():
return 'not created'
info = self.info()
if 'VMState' in info:
return info['VMState'].strip('"')
return 'shutdown'
def last_status_change(self):
info = self.info()
if 'VMStateChangeTime' in info:
change_time = info['VMStateChangeTime'].strip('"')
return arrow.get(change_time[:-10]).to('local')
return None
def forwards(self):
headers = ['protocol', 'host_ip', 'host_port',
'guest_ip', 'guest_port']
return dict([
(
info.strip('"').split(',')[0],
dict(zip(headers, info.strip('"').split(',')[1:]))
)
for key, info in self.info().iteritems()
if key.startswith('Forwarding')
])
def is_running(self):
"""
Return whether or not the VM is currently running
:return: bool
"""
return self._vm_name in list_vms(True)
def network(self):
"""
Return the information for every network interfaces on the VM
(kinda slow)
:return: list[map[str,str]]
"""
return vm_network(self._vm_name)
def ip(self, id=1):
"""
Return the IP of a box for the given interface, by default
aeriscloud VMs have 2 interfaces:
* id 0: NAT interface, on the 10.0.0.0 network
* id 1: Host Only interface on the 172.16.0.0 network
:param id: int
:return: str
"""
return vm_ip(self._vm_name, id)
def ssh_key(self):
# Vagrant 1.7+ support
local_key = os.path.join(self.project.vagrant_dir(), 'machines',
self.name(), 'virtualbox', 'private_key')
insecure_key = os.path.join(os.environ['HOME'], '.vagrant.d',
'insecure_private_key')
if os.path.isfile(local_key):
self._logger.debug('using key "%s" for ssh connection', local_key)
return local_key
self._logger.debug('using key "%s" for ssh connection', insecure_key)
return insecure_key
def ssh(self, **kwargs):
"""
Return a pre-baked ssh client method to be used for calling
commands on the distant server. Each command called will be in
a different connection.
:return: sh.Command
"""
return ssh.bake(self.ip(), '-A', '-t', i=self.ssh_key(), l='vagrant',
o='StrictHostKeyChecking no', **kwargs)
def ssh_client(self):
"""
When needing a more precise SSH client, returns a paramiko SSH client
:return: paramiko.SSHClient
"""
client = SSHClient()
client.load_system_host_keys()
client.connect(self.ip(), username='vagrant', pkey=self.ssh_key())
return client
def ssh_shell(self, cmd=None, cd=True, popen=False, **kwargs):
"""
Create an interactive ssh shell on the remote VM
:return: subprocess32.Popen
"""
call_args = [
'ssh', self.ip(), '-t', '-A',
'-l', 'vagrant',
'-i', self.ssh_key()]
if cmd:
if isinstance(cmd, tuple) or isinstance(cmd, list):
cmd = ' '.join(map(quote, cmd))
if cd:
cmd = '[ ! -d "{0}" ] && exit {1}; cd "{0}"; {2}'.format(
self.project.name(),
self.NO_PROJECT_DIR,
cmd
)
call_args.append(cmd)
self._logger.debug('calling %s', ' '.join(call_args))
if popen:
return Popen(call_args, start_new_session=True, **kwargs)
return call(call_args, **kwargs)
def up(self, *args, **kwargs):
res = self.vagrant('up', *args, **kwargs)
if res == 0:
expose.add(self)
return res
def halt(self, *args, **kwargs):
res = self.vagrant('halt', *args, **kwargs)
if res == 0:
expose.remove(self)
return res
def resume(self):
"""
Resume a suspended box
:return: bool
"""
if self.status() != 'saved':
return False
vm_start(self._vm_name)
expose.add(self)
return True
def suspend(self):
"""
Save the state of a running box
:return: bool
"""
if not self.is_running():
return False
vm_suspend(self._vm_name)
expose.remove(self)
return True
def destroy(self):
res = self.vagrant('destroy')
if res == 0:
expose.remove(self)
return res
def expose(self):
expose.add(self)
def vagrant(self, *args, **kwargs):
"""
Runs a vagrant command
"""
args = tuple(list(args) + [self.name()])
return self.project.vagrant(*args, **kwargs)
def browse(self, endpoint='', ip=False):
"""
Given an endpoint, returns the URI to that endpoint
:param endpoint: str
:param ip: bool
:return: str
"""
if not ip and expose.enabled():
host = '%s.%s.%s.%s' % (self.data['name'],
self.project.name(),
expose_username(),
expose_url())
else:
host = self.ip()
project_config = self.project.config()
if 'browse' in project_config \
and endpoint in project_config['browse']:
path = project_config['browse'][endpoint]
else:
services = dict()
for service in self.services():
if 'path' not in service:
continue
services[slugify(service['name'])] = service
if endpoint in services:
service = services[endpoint]
return 'http://' + self.ip() + ':' \
+ service['port'] + service['path']
path = '/'
return 'http://' + host + path
def services(self):
"""
List the services running on the box
:return: dict[str,str,str,str]
"""
try:
return [
dict(zip(
['name', 'port', 'path', 'protocol'],
service.strip().split(',')
))
for service in self.ssh().cat('/etc/aeriscloud.d/*')
]
except ErrorReturnCode_1 as e:
self._logger.warn(e.stderr)
return []
except ErrorReturnCode_255 as e:
self._logger.error(e.stderr)
return []
def history(self):
try:
return [json.loads(line.strip()) for line in
self.ssh().cat('/home/vagrant/.provision')]
except ErrorReturnCode_1:
return []
except ErrorReturnCode_255 as e:
self._logger.error(e.stderr)
return []
def ansible(self, cmd='ansible-playbook'):
tmp_inventory_dir = os.path.join(data_dir(), 'vagrant-inventory')
if not os.path.isdir(tmp_inventory_dir):
os.makedirs(tmp_inventory_dir)
# create a temporary inventory file for ansible
tmp_inventory_file = os.path.join(tmp_inventory_dir, self.vm_name())
with open(tmp_inventory_file, 'w') as f:
f.write('%s ansible_ssh_host=%s ansible_ssh_port=22 '
'ansible_ssh_private_key_file=%s' % (
self.vm_name(),
self.ip(),
self.ssh_key()
))
ansible = Command(cmd)
new_env = ansible_env(os.environ.copy())
return ansible.bake('-i', tmp_inventory_file,
'--extra-vars', '@%s' %
self.project.config_file(),
_env=new_env,
_out_bufsize=0,
_err_bufsize=0)
def rsync(self, src, dest):
# enable arcfour and no compression for faster speed
ssh_options = 'ssh -T -c arcfour -o Compression=no -x ' \
'-i "%s" -l vagrant' % self.ssh_key()
# basic args for rsync
args = ['--delete', '--archive', '--hard-links',
'--one-file-system', '--compress-level=0',
'--omit-dir-times', '-e', ssh_options]
if verbosity():
args.append('-v')
if verbosity() > 1:
args.append('--progress')
if verbosity() > 2:
args.append('--stats')
# check for ignore
conf = self.project.config()
if 'rsync_ignores' in conf:
# TODO: check format
args += map(lambda x: '--exclude="%s"' % x,
conf['rsync_ignores'])
# then add sec and dest
args += [src, dest]
self._logger.debug('running: rsync %s' % ' '.join(args))
try:
rsync(*args,
_out=sys.stdout, _err=sys.stderr,
_out_bufsize=0, _err_bufsize=0)
except ErrorReturnCode:
return False
return True
def rsync_up(self):
if not self.project.rsync_enabled():
return
return self.rsync(
'%s/' % self.project.folder(),
'%s:/data/%s/' % (self.ip(), self.project.name())
)
def rsync_down(self):
if not self.project.rsync_enabled():
return
return self.rsync(
'%s:/data/%s/' % (self.ip(), self.project.name()),
'%s/' % self.project.folder()
)
def __repr__(self):
return '<Box %s from project %s>' % (self.name(),
self.project.name())
|
|
#!/usr/bin/env vpython3
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Usage: %prog [options] [<commitref>]*
If no <commitref>'s are supplied, it defaults to HEAD.
Calculates the generation number for one or more commits in a git repo.
Generation number of a commit C with parents P is defined as:
generation_number(C, []) = 0
generation_number(C, P) = max(map(generation_number, P)) + 1
This number can be used to order commits relative to each other, as long as for
any pair of the commits, one is an ancestor of the other.
Since calculating the generation number of a commit requires walking that
commit's entire history, this script caches all calculated data inside the git
repo that it operates on in the ref 'refs/number/commits'.
"""
from __future__ import print_function
from __future__ import division
import binascii
import collections
import logging
import optparse
import os
import struct
import sys
import tempfile
import git_common as git
import subprocess2
CHUNK_FMT = '!20sL'
CHUNK_SIZE = struct.calcsize(CHUNK_FMT)
DIRTY_TREES = collections.defaultdict(int)
REF = 'refs/number/commits'
AUTHOR_NAME = 'git-number'
AUTHOR_EMAIL = 'chrome-infrastructure-team@google.com'
# Number of bytes to use for the prefix on our internal number structure.
# 0 is slow to deserialize. 2 creates way too much bookkeeping overhead (would
# need to reimplement cache data structures to be a bit more sophisticated than
# dicts. 1 seems to be just right.
PREFIX_LEN = 1
# Set this to 'threads' to gather coverage data while testing.
POOL_KIND = 'procs'
def pathlify(hash_prefix):
"""Converts a binary object hash prefix into a posix path, one folder per
byte.
>>> pathlify('\xDE\xAD')
'de/ad'
"""
if sys.version_info.major == 3:
return '/'.join('%02x' % b for b in hash_prefix)
return '/'.join('%02x' % ord(b) for b in hash_prefix)
@git.memoize_one(threadsafe=False)
def get_number_tree(prefix_bytes):
"""Returns a dictionary of the git-number registry specified by
|prefix_bytes|.
This is in the form of {<full binary ref>: <gen num> ...}
>>> get_number_tree('\x83\xb4')
{'\x83\xb4\xe3\xe4W\xf9J*\x8f/c\x16\xecD\xd1\x04\x8b\xa9qz': 169, ...}
"""
ref = '%s:%s' % (REF, pathlify(prefix_bytes))
try:
raw = git.run('cat-file', 'blob', ref, autostrip=False, decode=False)
return dict(struct.unpack_from(CHUNK_FMT, raw, i * CHUNK_SIZE)
for i in range(len(raw) // CHUNK_SIZE))
except subprocess2.CalledProcessError:
return {}
@git.memoize_one(threadsafe=False)
def get_num(commit_hash):
"""Returns the generation number for a commit.
Returns None if the generation number for this commit hasn't been calculated
yet (see load_generation_numbers()).
"""
return get_number_tree(commit_hash[:PREFIX_LEN]).get(commit_hash)
def clear_caches(on_disk=False):
"""Clears in-process caches for e.g. unit testing."""
get_number_tree.clear()
get_num.clear()
if on_disk:
git.run('update-ref', '-d', REF)
def intern_number_tree(tree):
"""Transforms a number tree (in the form returned by |get_number_tree|) into
a git blob.
Returns the git blob id as hex-encoded string.
>>> d = {'\x83\xb4\xe3\xe4W\xf9J*\x8f/c\x16\xecD\xd1\x04\x8b\xa9qz': 169}
>>> intern_number_tree(d)
'c552317aa95ca8c3f6aae3357a4be299fbcb25ce'
"""
with tempfile.TemporaryFile() as f:
for k, v in sorted(tree.items()):
f.write(struct.pack(CHUNK_FMT, k, v))
f.seek(0)
return git.intern_f(f)
def leaf_map_fn(pre_tree):
"""Converts a prefix and number tree into a git index line."""
pre, tree = pre_tree
return '100644 blob %s\t%s\0' % (intern_number_tree(tree), pathlify(pre))
def finalize(targets):
"""Saves all cache data to the git repository.
After calculating the generation number for |targets|, call finalize() to
save all the work to the git repository.
This in particular saves the trees referred to by DIRTY_TREES.
"""
if not DIRTY_TREES:
return
msg = 'git-number Added %s numbers' % sum(DIRTY_TREES.values())
idx = os.path.join(git.run('rev-parse', '--git-dir'), 'number.idx')
env = os.environ.copy()
env['GIT_INDEX_FILE'] = str(idx)
progress_message = 'Finalizing: (%%(count)d/%d)' % len(DIRTY_TREES)
with git.ProgressPrinter(progress_message) as inc:
git.run('read-tree', REF, env=env)
prefixes_trees = ((p, get_number_tree(p)) for p in sorted(DIRTY_TREES))
updater = subprocess2.Popen(['git', 'update-index', '-z', '--index-info'],
stdin=subprocess2.PIPE, env=env)
with git.ScopedPool(kind=POOL_KIND) as leaf_pool:
for item in leaf_pool.imap(leaf_map_fn, prefixes_trees):
updater.stdin.write(item.encode())
inc()
updater.stdin.close()
updater.wait()
assert updater.returncode == 0
tree_id = git.run('write-tree', env=env)
commit_cmd = [
# Git user.name and/or user.email may not be configured, so specifying
# them explicitly. They are not used, but required by Git.
'-c', 'user.name=%s' % AUTHOR_NAME,
'-c', 'user.email=%s' % AUTHOR_EMAIL,
'commit-tree',
'-m', msg,
'-p'] + git.hash_multi(REF)
for t in targets:
commit_cmd.extend(['-p', binascii.hexlify(t).decode()])
commit_cmd.append(tree_id)
commit_hash = git.run(*commit_cmd)
git.run('update-ref', REF, commit_hash)
DIRTY_TREES.clear()
def preload_tree(prefix):
"""Returns the prefix and parsed tree object for the specified prefix."""
return prefix, get_number_tree(prefix)
def all_prefixes(depth=PREFIX_LEN):
if sys.version_info.major == 3:
prefixes = [bytes([i]) for i in range(255)]
else:
prefixes = [chr(i) for i in range(255)]
for x in prefixes:
# This isn't covered because PREFIX_LEN currently == 1
if depth > 1: # pragma: no cover
for r in all_prefixes(depth - 1):
yield x + r
else:
yield x
def load_generation_numbers(targets):
"""Populates the caches of get_num and get_number_tree so they contain
the results for |targets|.
Loads cached numbers from disk, and calculates missing numbers if one or
more of |targets| is newer than the cached calculations.
Args:
targets - An iterable of binary-encoded full git commit hashes.
"""
# In case they pass us a generator, listify targets.
targets = list(targets)
if all(get_num(t) is not None for t in targets):
return
if git.tree(REF) is None:
empty = git.mktree({})
commit_hash = git.run(
# Git user.name and/or user.email may not be configured, so specifying
# them explicitly. They are not used, but required by Git.
'-c', 'user.name=%s' % AUTHOR_NAME,
'-c', 'user.email=%s' % AUTHOR_EMAIL,
'commit-tree',
'-m', 'Initial commit from git-number',
empty)
git.run('update-ref', REF, commit_hash)
with git.ScopedPool(kind=POOL_KIND) as pool:
preload_iter = pool.imap_unordered(preload_tree, all_prefixes())
rev_list = []
with git.ProgressPrinter('Loading commits: %(count)d') as inc:
# Curiously, buffering the list into memory seems to be the fastest
# approach in python (as opposed to iterating over the lines in the
# stdout as they're produced). GIL strikes again :/
cmd = [
'rev-list', '--topo-order', '--parents', '--reverse', '^' + REF,
] + [binascii.hexlify(target).decode() for target in targets]
for line in git.run(*cmd).splitlines():
tokens = [binascii.unhexlify(token) for token in line.split()]
rev_list.append((tokens[0], tokens[1:]))
inc()
get_number_tree.update(preload_iter)
with git.ProgressPrinter('Counting: %%(count)d/%d' % len(rev_list)) as inc:
for commit_hash, pars in rev_list:
num = max(map(get_num, pars)) + 1 if pars else 0
prefix = commit_hash[:PREFIX_LEN]
get_number_tree(prefix)[commit_hash] = num
DIRTY_TREES[prefix] += 1
get_num.set(commit_hash, num)
inc()
def main(): # pragma: no cover
parser = optparse.OptionParser(usage=sys.modules[__name__].__doc__)
parser.add_option('--no-cache', action='store_true',
help='Do not actually cache anything we calculate.')
parser.add_option('--reset', action='store_true',
help='Reset the generation number cache and quit.')
parser.add_option('-v', '--verbose', action='count', default=0,
help='Be verbose. Use more times for more verbosity.')
opts, args = parser.parse_args()
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(opts.verbose, len(levels) - 1)])
# 'git number' should only be used on bots.
if os.getenv('CHROME_HEADLESS') != '1':
logging.error("'git-number' is an infrastructure tool that is only "
"intended to be used internally by bots. Developers should "
"use the 'Cr-Commit-Position' value in the commit's message.")
return 1
if opts.reset:
clear_caches(on_disk=True)
return
try:
targets = git.parse_commitrefs(*(args or ['HEAD']))
except git.BadCommitRefException as e:
parser.error(e)
load_generation_numbers(targets)
if not opts.no_cache:
finalize(targets)
print('\n'.join(map(str, map(get_num, targets))))
return 0
if __name__ == '__main__': # pragma: no cover
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from airflow.contrib.hooks.gcp_dataproc_hook import DataProcHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.version import version
from googleapiclient.errors import HttpError
class DataprocClusterCreateOperator(BaseOperator):
"""
Create a new cluster on Google Cloud Dataproc. The operator will wait until the
creation is successful or an error occurs in the creation process.
The parameters allow to configure the cluster. Please refer to
https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters
for a detailed explanation on the different parameters. Most of the configuration
parameters detailed in the link are available as a parameter to this operator.
"""
template_fields = ['cluster_name',]
@apply_defaults
def __init__(self,
cluster_name,
project_id,
num_workers,
zone,
network_uri=None,
subnetwork_uri=None,
tags=None,
storage_bucket=None,
init_actions_uris=None,
metadata=None,
image_version=None,
properties=None,
master_machine_type='n1-standard-4',
master_disk_size=500,
worker_machine_type='n1-standard-4',
worker_disk_size=500,
num_preemptible_workers=0,
labels=None,
region='global',
gcp_conn_id='google_cloud_default',
delegate_to=None,
service_account=None,
service_account_scopes=None,
*args,
**kwargs):
"""
Create a new DataprocClusterCreateOperator.
For more info on the creation of a cluster through the API, have a look at:
https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters
:param cluster_name: The name of the DataProc cluster to create.
:type cluster_name: string
:param project_id: The ID of the google cloud project in which
to create the cluster
:type project_id: string
:param num_workers: The # of workers to spin up
:type num_workers: int
:param storage_bucket: The storage bucket to use, setting to None lets dataproc
generate a custom one for you
:type storage_bucket: string
:param init_actions_uris: List of GCS uri's containing
dataproc initialization scripts
:type init_actions_uris: list[string]
:param metadata: dict of key-value google compute engine metadata entries
to add to all instances
:type metadata: dict
:param image_version: the version of software inside the Dataproc cluster
:type image_version: string
:param properties: dict of properties to set on
config files (e.g. spark-defaults.conf), see
https://cloud.google.com/dataproc/docs/reference/rest/v1/ \
projects.regions.clusters#SoftwareConfig
:type properties: dict
:param master_machine_type: Compute engine machine type to use for the master node
:type master_machine_type: string
:param master_disk_size: Disk size for the master node
:type int
:param worker_machine_type:Compute engine machine type to use for the worker nodes
:type worker_machine_type: string
:param worker_disk_size: Disk size for the worker nodes
:type worker_disk_size: int
:param num_preemptible_workers: The # of preemptible worker nodes to spin up
:type num_preemptible_workers: int
:param labels: dict of labels to add to the cluster
:type labels: dict
:param zone: The zone where the cluster will be located
:type zone: string
:param network_uri: The network uri to be used for machine communication, cannot be
specified with subnetwork_uri
:type network_uri: string
:param subnetwork_uri: The subnetwork uri to be used for machine communication, cannot be
specified with network_uri
:type subnetwork_uri: string
:param tags: The GCE tags to add to all instances
:type tags: list[string]
:param region: leave as 'global', might become relevant in the future
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param service_account: The service account of the dataproc instances.
:type service_account: string
:param service_account_scopes: The URIs of service account scopes to be included.
:type service_account_scopes: list[string]
"""
super(DataprocClusterCreateOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.cluster_name = cluster_name
self.project_id = project_id
self.num_workers = num_workers
self.num_preemptible_workers = num_preemptible_workers
self.storage_bucket = storage_bucket
self.init_actions_uris = init_actions_uris
self.metadata = metadata
self.image_version = image_version
self.properties = properties
self.master_machine_type = master_machine_type
self.master_disk_size = master_disk_size
self.worker_machine_type = worker_machine_type
self.worker_disk_size = worker_disk_size
self.labels = labels
self.zone = zone
self.network_uri = network_uri
self.subnetwork_uri = subnetwork_uri
self.tags = tags
self.region = region
self.service_account = service_account
self.service_account_scopes = service_account_scopes
def _get_cluster_list_for_project(self, service):
result = service.projects().regions().clusters().list(
projectId=self.project_id,
region=self.region
).execute()
return result.get('clusters', [])
def _get_cluster(self, service):
cluster_list = self._get_cluster_list_for_project(service)
cluster = [c for c in cluster_list if c['clusterName'] == self.cluster_name]
if cluster:
return cluster[0]
return None
def _get_cluster_state(self, service):
cluster = self._get_cluster(service)
if 'status' in cluster:
return cluster['status']['state']
else:
return None
def _cluster_ready(self, state, service):
if state == 'RUNNING':
return True
if state == 'ERROR':
cluster = self._get_cluster(service)
try:
error_details = cluster['status']['details']
except KeyError:
error_details = 'Unknown error in cluster creation, ' \
'check Google Cloud console for details.'
raise Exception(error_details)
return False
def _wait_for_done(self, service):
while True:
state = self._get_cluster_state(service)
if state is None:
self.log.info("No state for cluster '%s'", self.cluster_name)
time.sleep(15)
else:
self.log.info("State for cluster '%s' is %s", self.cluster_name, state)
if self._cluster_ready(state, service):
self.log.info(
"Cluster '%s' successfully created", self.cluster_name
)
return
time.sleep(15)
def _build_cluster_data(self):
zone_uri = \
'https://www.googleapis.com/compute/v1/projects/{}/zones/{}'.format(
self.project_id, self.zone
)
master_type_uri = \
"https://www.googleapis.com/compute/v1/projects/{}/zones/{}/machineTypes/{}".format(
self.project_id, self.zone, self.master_machine_type
)
worker_type_uri = \
"https://www.googleapis.com/compute/v1/projects/{}/zones/{}/machineTypes/{}".format(
self.project_id, self.zone, self.worker_machine_type
)
cluster_data = {
'projectId': self.project_id,
'clusterName': self.cluster_name,
'config': {
'gceClusterConfig': {
'zoneUri': zone_uri
},
'masterConfig': {
'numInstances': 1,
'machineTypeUri': master_type_uri,
'diskConfig': {
'bootDiskSizeGb': self.master_disk_size
}
},
'workerConfig': {
'numInstances': self.num_workers,
'machineTypeUri': worker_type_uri,
'diskConfig': {
'bootDiskSizeGb': self.worker_disk_size
}
},
'secondaryWorkerConfig': {},
'softwareConfig': {}
}
}
if self.num_preemptible_workers > 0:
cluster_data['config']['secondaryWorkerConfig'] = {
'numInstances': self.num_preemptible_workers,
'machineTypeUri': worker_type_uri,
'diskConfig': {
'bootDiskSizeGb': self.worker_disk_size
},
'isPreemptible': True
}
cluster_data['labels'] = self.labels if self.labels else {}
# Dataproc labels must conform to the following regex:
# [a-z]([-a-z0-9]*[a-z0-9])? (current airflow version string follows
# semantic versioning spec: x.y.z).
cluster_data['labels'].update({'airflow-version':
'v' + version.replace('.', '-').replace('+','-')})
if self.storage_bucket:
cluster_data['config']['configBucket'] = self.storage_bucket
if self.metadata:
cluster_data['config']['gceClusterConfig']['metadata'] = self.metadata
if self.network_uri:
cluster_data['config']['gceClusterConfig']['networkUri'] = self.network_uri
if self.subnetwork_uri:
cluster_data['config']['gceClusterConfig']['subnetworkUri'] = self.subnetwork_uri
if self.tags:
cluster_data['config']['gceClusterConfig']['tags'] = self.tags
if self.image_version:
cluster_data['config']['softwareConfig']['imageVersion'] = self.image_version
if self.properties:
cluster_data['config']['softwareConfig']['properties'] = self.properties
if self.init_actions_uris:
init_actions_dict = [
{'executableFile': uri} for uri in self.init_actions_uris
]
cluster_data['config']['initializationActions'] = init_actions_dict
if self.service_account:
cluster_data['config']['gceClusterConfig']['serviceAccount'] =\
self.service_account
if self.service_account_scopes:
cluster_data['config']['gceClusterConfig']['serviceAccountScopes'] =\
self.service_account_scopes
return cluster_data
def execute(self, context):
self.log.info('Creating cluster: %s', self.cluster_name)
hook = DataProcHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to
)
service = hook.get_conn()
if self._get_cluster(service):
self.log.info(
'Cluster %s already exists... Checking status...',
self.cluster_name
)
self._wait_for_done(service)
return True
cluster_data = self._build_cluster_data()
try:
service.projects().regions().clusters().create(
projectId=self.project_id,
region=self.region,
body=cluster_data
).execute()
except HttpError as e:
# probably two cluster start commands at the same time
time.sleep(10)
if self._get_cluster(service):
self.log.info(
'Cluster {} already exists... Checking status...',
self.cluster_name
)
self._wait_for_done(service)
return True
else:
raise e
self._wait_for_done(service)
class DataprocClusterDeleteOperator(BaseOperator):
"""
Delete a cluster on Google Cloud Dataproc. The operator will wait until the
cluster is destroyed.
"""
template_fields = ['cluster_name']
@apply_defaults
def __init__(self,
cluster_name,
project_id,
region='global',
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
Delete a cluster on Google Cloud Dataproc.
:param cluster_name: The name of the cluster to create.
:type cluster_name: string
:param project_id: The ID of the google cloud project in which
the cluster runs
:type project_id: string
:param region: leave as 'global', might become relevant in the future
:type region: string
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
"""
super(DataprocClusterDeleteOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.cluster_name = cluster_name
self.project_id = project_id
self.region = region
def _wait_for_done(self, service, operation_name):
time.sleep(15)
while True:
response = service.projects().regions().operations().get(
name=operation_name
).execute()
if 'done' in response and response['done']:
if 'error' in response:
raise Exception(str(response['error']))
else:
return
time.sleep(15)
def execute(self, context):
self.log.info('Deleting cluster: %s', self.cluster_name)
hook = DataProcHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to
)
service = hook.get_conn()
response = service.projects().regions().clusters().delete(
projectId=self.project_id,
region=self.region,
clusterName=self.cluster_name
).execute()
operation_name = response['name']
self.log.info("Cluster delete operation name: %s", operation_name)
self._wait_for_done(service, operation_name)
class DataProcPigOperator(BaseOperator):
"""
Start a Pig query Job on a Cloud DataProc cluster. The parameters of the operation
will be passed to the cluster.
It's a good practice to define dataproc_* parameters in the default_args of the dag
like the cluster name and UDFs.
```
default_args = {
'cluster_name': 'cluster-1',
'dataproc_pig_jars': [
'gs://example/udf/jar/datafu/1.2.0/datafu.jar',
'gs://example/udf/jar/gpig/1.2/gpig.jar'
]
}
```
You can pass a pig script as string or file reference. Use variables to pass on
variables for the pig script to be resolved on the cluster or use the parameters to
be resolved in the script as template parameters.
```
t1 = DataProcPigOperator(
task_id='dataproc_pig',
query='a_pig_script.pig',
variables={'out': 'gs://example/output/{{ds}}'},
dag=dag)
```
"""
template_fields = ['query', 'variables', 'job_name', 'cluster_name']
template_ext = ('.pg', '.pig',)
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
query=None,
query_uri=None,
variables=None,
job_name='{{task.task_id}}_{{ds_nodash}}',
cluster_name='cluster-1',
dataproc_pig_properties=None,
dataproc_pig_jars=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
region='global',
*args,
**kwargs):
"""
Create a new DataProcPigOperator.
For more detail on about job submission have a look at the reference:
https://cloud.google.com/dataproc/reference/rest/v1/projects.regions.jobs
:param query: The query or reference to the query file (pg or pig extension).
:type query: string
:param query_uri: The uri of a pig script on Cloud Storage.
:type query_uri: string
:param variables: Map of named parameters for the query.
:type variables: dict
:param job_name: The job name used in the DataProc cluster. This name by default
is the task_id appended with the execution data, but can be templated. The
name will always be appended with a random number to avoid name clashes.
:type job_name: string
:param cluster_name: The name of the DataProc cluster.
:type cluster_name: string
:param dataproc_pig_properties: Map for the Pig properties. Ideal to put in
default arguments
:type dataproc_pig_properties: dict
:param dataproc_pig_jars: URIs to jars provisioned in Cloud Storage (example: for
UDFs and libs) and are ideal to put in default arguments.
:type dataproc_pig_jars: list
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param region: The specified region where the dataproc cluster is created.
:type region: string
"""
super(DataProcPigOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.query = query
self.query_uri = query_uri
self.variables = variables
self.job_name = job_name
self.cluster_name = cluster_name
self.dataproc_properties = dataproc_pig_properties
self.dataproc_jars = dataproc_pig_jars
self.region = region
def execute(self, context):
hook = DataProcHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
job = hook.create_job_template(self.task_id, self.cluster_name, "pigJob",
self.dataproc_properties)
if self.query is None:
job.add_query_uri(self.query_uri)
else:
job.add_query(self.query)
job.add_variables(self.variables)
job.add_jar_file_uris(self.dataproc_jars)
job.set_job_name(self.job_name)
hook.submit(hook.project_id, job.build(), self.region)
class DataProcHiveOperator(BaseOperator):
"""
Start a Hive query Job on a Cloud DataProc cluster.
"""
template_fields = ['query', 'variables', 'job_name', 'cluster_name']
template_ext = ('.q',)
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
query=None,
query_uri=None,
variables=None,
job_name='{{task.task_id}}_{{ds_nodash}}',
cluster_name='cluster-1',
dataproc_hive_properties=None,
dataproc_hive_jars=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
region='global',
*args,
**kwargs):
"""
Create a new DataProcHiveOperator.
:param query: The query or reference to the query file (q extension).
:type query: string
:param query_uri: The uri of a hive script on Cloud Storage.
:type query_uri: string
:param variables: Map of named parameters for the query.
:type variables: dict
:param job_name: The job name used in the DataProc cluster. This name by default
is the task_id appended with the execution data, but can be templated. The
name will always be appended with a random number to avoid name clashes.
:type job_name: string
:param cluster_name: The name of the DataProc cluster.
:type cluster_name: string
:param dataproc_hive_properties: Map for the Pig properties. Ideal to put in
default arguments
:type dataproc_hive_properties: dict
:param dataproc_hive_jars: URIs to jars provisioned in Cloud Storage (example: for
UDFs and libs) and are ideal to put in default arguments.
:type dataproc_hive_jars: list
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param region: The specified region where the dataproc cluster is created.
:type region: string
"""
super(DataProcHiveOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.query = query
self.query_uri = query_uri
self.variables = variables
self.job_name = job_name
self.cluster_name = cluster_name
self.dataproc_properties = dataproc_hive_properties
self.dataproc_jars = dataproc_hive_jars
self.region = region
def execute(self, context):
hook = DataProcHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
job = hook.create_job_template(self.task_id, self.cluster_name, "hiveJob",
self.dataproc_properties)
if self.query is None:
job.add_query_uri(self.query_uri)
else:
job.add_query(self.query)
job.add_variables(self.variables)
job.add_jar_file_uris(self.dataproc_jars)
job.set_job_name(self.job_name)
hook.submit(hook.project_id, job.build(), self.region)
class DataProcSparkSqlOperator(BaseOperator):
"""
Start a Spark SQL query Job on a Cloud DataProc cluster.
"""
template_fields = ['query', 'variables', 'job_name', 'cluster_name']
template_ext = ('.q',)
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
query=None,
query_uri=None,
variables=None,
job_name='{{task.task_id}}_{{ds_nodash}}',
cluster_name='cluster-1',
dataproc_spark_properties=None,
dataproc_spark_jars=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
region='global',
*args,
**kwargs):
"""
Create a new DataProcSparkSqlOperator.
:param query: The query or reference to the query file (q extension).
:type query: string
:param query_uri: The uri of a spark sql script on Cloud Storage.
:type query_uri: string
:param variables: Map of named parameters for the query.
:type variables: dict
:param job_name: The job name used in the DataProc cluster. This name by default
is the task_id appended with the execution data, but can be templated. The
name will always be appended with a random number to avoid name clashes.
:type job_name: string
:param cluster_name: The name of the DataProc cluster.
:type cluster_name: string
:param dataproc_spark_properties: Map for the Pig properties. Ideal to put in
default arguments
:type dataproc_spark_properties: dict
:param dataproc_spark_jars: URIs to jars provisioned in Cloud Storage (example:
for UDFs and libs) and are ideal to put in default arguments.
:type dataproc_spark_jars: list
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param region: The specified region where the dataproc cluster is created.
:type region: string
"""
super(DataProcSparkSqlOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.query = query
self.query_uri = query_uri
self.variables = variables
self.job_name = job_name
self.cluster_name = cluster_name
self.dataproc_properties = dataproc_spark_properties
self.dataproc_jars = dataproc_spark_jars
self.region = region
def execute(self, context):
hook = DataProcHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
job = hook.create_job_template(self.task_id, self.cluster_name, "sparkSqlJob",
self.dataproc_properties)
if self.query is None:
job.add_query_uri(self.query_uri)
else:
job.add_query(self.query)
job.add_variables(self.variables)
job.add_jar_file_uris(self.dataproc_jars)
job.set_job_name(self.job_name)
hook.submit(hook.project_id, job.build(), self.region)
class DataProcSparkOperator(BaseOperator):
"""
Start a Spark Job on a Cloud DataProc cluster.
"""
template_fields = ['arguments', 'job_name', 'cluster_name']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
main_jar=None,
main_class=None,
arguments=None,
archives=None,
files=None,
job_name='{{task.task_id}}_{{ds_nodash}}',
cluster_name='cluster-1',
dataproc_spark_properties=None,
dataproc_spark_jars=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
region='global',
*args,
**kwargs):
"""
Create a new DataProcSparkOperator.
:param main_jar: URI of the job jar provisioned on Cloud Storage. (use this or
the main_class, not both together).
:type main_jar: string
:param main_class: Name of the job class. (use this or the main_jar, not both
together).
:type main_class: string
:param arguments: Arguments for the job.
:type arguments: list
:param archives: List of archived files that will be unpacked in the work
directory. Should be stored in Cloud Storage.
:type archives: list
:param files: List of files to be copied to the working directory
:type files: list
:param job_name: The job name used in the DataProc cluster. This name by default
is the task_id appended with the execution data, but can be templated. The
name will always be appended with a random number to avoid name clashes.
:type job_name: string
:param cluster_name: The name of the DataProc cluster.
:type cluster_name: string
:param dataproc_spark_properties: Map for the Pig properties. Ideal to put in
default arguments
:type dataproc_spark_properties: dict
:param dataproc_spark_jars: URIs to jars provisioned in Cloud Storage (example:
for UDFs and libs) and are ideal to put in default arguments.
:type dataproc_spark_jars: list
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param region: The specified region where the dataproc cluster is created.
:type region: string
"""
super(DataProcSparkOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.main_jar = main_jar
self.main_class = main_class
self.arguments = arguments
self.archives = archives
self.files = files
self.job_name = job_name
self.cluster_name = cluster_name
self.dataproc_properties = dataproc_spark_properties
self.dataproc_jars = dataproc_spark_jars
self.region = region
def execute(self, context):
hook = DataProcHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
job = hook.create_job_template(self.task_id, self.cluster_name, "sparkJob",
self.dataproc_properties)
job.set_main(self.main_jar, self.main_class)
job.add_args(self.arguments)
job.add_jar_file_uris(self.dataproc_jars)
job.add_archive_uris(self.archives)
job.add_file_uris(self.files)
job.set_job_name(self.job_name)
hook.submit(hook.project_id, job.build(), self.region)
class DataProcHadoopOperator(BaseOperator):
"""
Start a Hadoop Job on a Cloud DataProc cluster.
"""
template_fields = ['arguments', 'job_name', 'cluster_name']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
main_jar=None,
main_class=None,
arguments=None,
archives=None,
files=None,
job_name='{{task.task_id}}_{{ds_nodash}}',
cluster_name='cluster-1',
dataproc_hadoop_properties=None,
dataproc_hadoop_jars=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
region='global',
*args,
**kwargs):
"""
Create a new DataProcHadoopOperator.
:param main_jar: URI of the job jar provisioned on Cloud Storage. (use this or
the main_class, not both together).
:type main_jar: string
:param main_class: Name of the job class. (use this or the main_jar, not both
together).
:type main_class: string
:param arguments: Arguments for the job.
:type arguments: list
:param archives: List of archived files that will be unpacked in the work
directory. Should be stored in Cloud Storage.
:type archives: list
:param files: List of files to be copied to the working directory
:type files: list
:param job_name: The job name used in the DataProc cluster. This name by default
is the task_id appended with the execution data, but can be templated. The
name will always be appended with a random number to avoid name clashes.
:type job_name: string
:param cluster_name: The name of the DataProc cluster.
:type cluster_name: string
:param dataproc_hadoop_properties: Map for the Pig properties. Ideal to put in
default arguments
:type dataproc_hadoop_properties: dict
:param dataproc_hadoop_jars: URIs to jars provisioned in Cloud Storage (example:
for UDFs and libs) and are ideal to put in default arguments.
:type dataproc_hadoop_jars: list
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param region: The specified region where the dataproc cluster is created.
:type region: string
"""
super(DataProcHadoopOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.main_jar = main_jar
self.main_class = main_class
self.arguments = arguments
self.archives = archives
self.files = files
self.job_name = job_name
self.cluster_name = cluster_name
self.dataproc_properties = dataproc_hadoop_properties
self.dataproc_jars = dataproc_hadoop_jars
self.region = region
def execute(self, context):
hook = DataProcHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
job = hook.create_job_template(self.task_id, self.cluster_name, "hadoopJob",
self.dataproc_properties)
job.set_main(self.main_jar, self.main_class)
job.add_args(self.arguments)
job.add_jar_file_uris(self.dataproc_jars)
job.add_archive_uris(self.archives)
job.add_file_uris(self.files)
job.set_job_name(self.job_name)
hook.submit(hook.project_id, job.build(), self.region)
class DataProcPySparkOperator(BaseOperator):
"""
Start a PySpark Job on a Cloud DataProc cluster.
"""
template_fields = ['arguments', 'job_name', 'cluster_name']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
main,
arguments=None,
archives=None,
pyfiles=None,
files=None,
job_name='{{task.task_id}}_{{ds_nodash}}',
cluster_name='cluster-1',
dataproc_pyspark_properties=None,
dataproc_pyspark_jars=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
region='global',
*args,
**kwargs):
"""
Create a new DataProcPySparkOperator.
:param main: [Required] The Hadoop Compatible Filesystem (HCFS) URI of the main
Python file to use as the driver. Must be a .py file.
:type main: string
:param arguments: Arguments for the job.
:type arguments: list
:param archives: List of archived files that will be unpacked in the work
directory. Should be stored in Cloud Storage.
:type archives: list
:param files: List of files to be copied to the working directory
:type files: list
:param pyfiles: List of Python files to pass to the PySpark framework.
Supported file types: .py, .egg, and .zip
:type pyfiles: list
:param job_name: The job name used in the DataProc cluster. This name by default
is the task_id appended with the execution data, but can be templated. The
name will always be appended with a random number to avoid name clashes.
:type job_name: string
:param cluster_name: The name of the DataProc cluster.
:type cluster_name: string
:param dataproc_pyspark_properties: Map for the Pig properties. Ideal to put in
default arguments
:type dataproc_pyspark_properties: dict
:param dataproc_pyspark_jars: URIs to jars provisioned in Cloud Storage (example:
for UDFs and libs) and are ideal to put in default arguments.
:type dataproc_pyspark_jars: list
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: string
:param region: The specified region where the dataproc cluster is created.
:type region: string
"""
super(DataProcPySparkOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.main = main
self.arguments = arguments
self.archives = archives
self.files = files
self.pyfiles = pyfiles
self.job_name = job_name
self.cluster_name = cluster_name
self.dataproc_properties = dataproc_pyspark_properties
self.dataproc_jars = dataproc_pyspark_jars
self.region = region
def execute(self, context):
hook = DataProcHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
job = hook.create_job_template(self.task_id, self.cluster_name, "pysparkJob",
self.dataproc_properties)
job.set_python_main(self.main)
job.add_args(self.arguments)
job.add_jar_file_uris(self.dataproc_jars)
job.add_archive_uris(self.archives)
job.add_file_uris(self.files)
job.add_python_file_uris(self.pyfiles)
job.set_job_name(self.job_name)
hook.submit(hook.project_id, job.build(), self.region)
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import random
import sys
from ray.rllib.optimizers.segment_tree import SumSegmentTree, MinSegmentTree
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.compression import unpack_if_needed
from ray.rllib.utils.window_stat import WindowStat
@DeveloperAPI
class ReplayBuffer(object):
@DeveloperAPI
def __init__(self, size):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
self._hit_count = np.zeros(size)
self._eviction_started = False
self._num_added = 0
self._num_sampled = 0
self._evicted_hit_stats = WindowStat("evicted_hit", 1000)
self._est_size_bytes = 0
def __len__(self):
return len(self._storage)
@DeveloperAPI
def add(self, obs_t, action, reward, obs_tp1, done, weight):
data = (obs_t, action, reward, obs_tp1, done)
self._num_added += 1
if self._next_idx >= len(self._storage):
self._storage.append(data)
self._est_size_bytes += sum(sys.getsizeof(d) for d in data)
else:
self._storage[self._next_idx] = data
if self._next_idx + 1 >= self._maxsize:
self._eviction_started = True
self._next_idx = (self._next_idx + 1) % self._maxsize
if self._eviction_started:
self._evicted_hit_stats.push(self._hit_count[self._next_idx])
self._hit_count[self._next_idx] = 0
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(unpack_if_needed(obs_t), copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(unpack_if_needed(obs_tp1), copy=False))
dones.append(done)
self._hit_count[i] += 1
return (np.array(obses_t), np.array(actions), np.array(rewards),
np.array(obses_tp1), np.array(dones))
@DeveloperAPI
def sample_idxes(self, batch_size):
return [
random.randint(0,
len(self._storage) - 1) for _ in range(batch_size)
]
@DeveloperAPI
def sample_with_idxes(self, idxes):
self._num_sampled += len(idxes)
return self._encode_sample(idxes)
@DeveloperAPI
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [
random.randint(0,
len(self._storage) - 1) for _ in range(batch_size)
]
self._num_sampled += batch_size
return self._encode_sample(idxes)
@DeveloperAPI
def stats(self, debug=False):
data = {
"added_count": self._num_added,
"sampled_count": self._num_sampled,
"est_size_bytes": self._est_size_bytes,
"num_entries": len(self._storage),
}
if debug:
data.update(self._evicted_hit_stats.stats())
return data
@DeveloperAPI
class PrioritizedReplayBuffer(ReplayBuffer):
@DeveloperAPI
def __init__(self, size, alpha):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha > 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
self._prio_change_stats = WindowStat("reprio", 1000)
@DeveloperAPI
def add(self, obs_t, action, reward, obs_tp1, done, weight):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super(PrioritizedReplayBuffer, self).add(obs_t, action, reward,
obs_tp1, done, weight)
if weight is None:
weight = self._max_priority
self._it_sum[idx] = weight**self._alpha
self._it_min[idx] = weight**self._alpha
def _sample_proportional(self, batch_size):
res = []
for _ in range(batch_size):
# TODO(szymon): should we ensure no repeats?
mass = random.random() * self._it_sum.sum(0, len(self._storage))
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
@DeveloperAPI
def sample_idxes(self, batch_size):
return self._sample_proportional(batch_size)
@DeveloperAPI
def sample_with_idxes(self, idxes, beta):
assert beta > 0
self._num_sampled += len(idxes)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage))**(-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage))**(-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
@DeveloperAPI
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
self._num_sampled += batch_size
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage))**(-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage))**(-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
@DeveloperAPI
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
delta = priority**self._alpha - self._it_sum[idx]
self._prio_change_stats.push(delta)
self._it_sum[idx] = priority**self._alpha
self._it_min[idx] = priority**self._alpha
self._max_priority = max(self._max_priority, priority)
@DeveloperAPI
def stats(self, debug=False):
parent = ReplayBuffer.stats(self, debug)
if debug:
parent.update(self._prio_change_stats.stats())
return parent
|
|
#!/usr/bin/env python3
'''
Class to handle ISC bind nameserver
If hostname is specified, manage a bind instance over ssh,
otherwise manage a local bind
It is assumed SSH is configured for passwordless login.
Note:
To update SOA serial, the serial number should be on its own line, and
have a comment "; Serial" after it
for rndc commands, make sure user has correct permissions, or allowed to sudo.
Example in /etc/sudoers
anders ALL=(root) NOPASSWD: /usr/sbin/rndc *
anders ALL=(root) NOPASSWD: /usr/sbin/service bind9 restart
'''
import subprocess
import datetime
import ipaddress
from orderedattrdict import AttrDict
import dnsmgr_util as util
def ipv4_addr_to_reverse(addr):
"""
Returns the string in reverse, dot as delemiter
1.2.3.4 returns 4.3.2.1
"""
ip = addr.split(".")
return ".".join(reversed(ip))
def ipv6_addr_to_reverse(addr):
"""
Returns the IPv6 address, expanded and a dot between each hex digit
2001:db8::1 returns 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2
"""
addr = ipaddress.IPv6Address(addr)
ip = addr.exploded.replace(":", "")
return ".".join(reversed(ip))
class NS_Exception(Exception):
pass
class ZoneInfo(AttrDict):
def __init__(self):
super().__init__()
self.name = None # name of zone
self.file = None # full path to file with resource records
self.typ = None # master, slave etc
class ParserException(Exception):
pass
class Parser:
"""
Parser, for bind configuration files
"""
def __init__(self, f):
self.f = f
self.stack = ""
self.tokenchars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
def getNextChar(self):
"""Returns None at end of file"""
if self.stack:
c = self.stack[-1]
self.stack = self.stack[:-1]
return c
c = self.f.read(1)
if c:
return c
return None
def ungetChar(self, c):
self.stack += c
def getToken(self):
"""
Return next token
Skip spaces
Skip comments
If quoted, continue to next quote and return string
"""
token = ""
while True:
c = self.getNextChar()
if c is None:
return c
while c and c in " \n\t":
c = self.getNextChar()
if c == '"':
# string, parse to next quote
while True:
c = self.getNextChar()
if c == '"' or c is None:
return token
token += c
if c == ';' or c == '#':
# comment, ignore rest of line
while c and c != '\n':
c = self.getNextChar()
continue
if c == '/':
c2 = self.getNextChar()
if c2 == '/':
# comment. ignore rest of line
while c2 and c2 != '\n':
c2 = self.getNextChar()
continue
self.ungetChar(c2)
if c is not None:
token += c
c = self.getNextChar()
while c and c in self.tokenchars:
token += c
c = self.getNextChar()
if c:
self.ungetChar(c)
return token
# end of file, return what we have
if token:
return token
return None
def requireToken(self, req):
"""
Get next token, make sure it matches the required token
"""
tmp = self.getToken()
if tmp != req:
raise ParserException("Missing token %s" % tmp)
return tmp
class FileMgr:
"""
Handle reading and writing files, locally or over SSH
"""
def __init__(self, remote=None, filename=None, mode="r", openFile=True):
self.remote = remote
self.proc = None # subprocess being run
self.f = None # file handle to read/write, for subprocess
self.filename = filename
if filename and openFile:
self.open(filename, mode)
def open(self, filename, mode="r"):
self.filename = filename
self.mode = mode
if mode not in ["r", "w"]:
raise FileNotFoundError("Unknown file mode %s" % mode)
if self.remote:
cmd = ["ssh"]
if self.remote.port:
cmd.append("-p")
cmd.append(self.remote.port)
cmd.append(self.remote.host)
if mode == "r":
cmd += ["cat", filename]
self.proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, bufsize=1)
self.f = self.proc.stdout
else:
cmd += ["cat >%s" % filename]
self.proc = subprocess.Popen(cmd, stdin=subprocess.PIPE)
self.f = self.proc.stdin
else:
self.f = open(filename, mode + "b")
def read(self, length=None):
if self.mode == "r":
if length:
return self.f.read(length).decode()
return self.f.read().decode()
raise FileNotFoundError("Cannot read from file opened for write")
def readline(self):
if self.mode == "r":
return self.f.readline()
raise FileNotFoundError("Cannot readline() from file opened for write")
def write(self, msg):
if self.mode == "w":
if isinstance(msg, str):
self.f.write(msg.decode())
else:
self.f.write(msg)
return
raise FileNotFoundError("Cannot write to file opened for read")
def close(self):
self.f.close()
if self.remote:
# todo, wait for subprocess to quit?
pass
def exist(self):
"""
Returns True if file exist
"""
cmd = ["test", "-f", self.filename]
return util.runCmd(self.remote, cmd, call=True) == 0
def mkdir(self):
"""
Create the directory
"""
cmd = ["mkdir", "-p", self.filename]
return util.runCmd(self.remote, cmd, call=True) == 0
def size(self):
"""
Returns size of the file
"""
cmd = ["stat", "-c", "%s", self.filename]
out = util.runCmd(self.remote, cmd)
return int(out)
def copy(self, dest):
"""
Copy the file
"""
if not isinstance(dest, FileMgr):
raise ValueError("dest file must be instance of FileMgr")
if self.remote and dest.remote:
cmd = ["cp", "--force", self.filename, dest.filename]
raise ValueError("Can't copy source->dest if both are remote files, not implemented")
elif self.remote:
cmd = ["scp"]
if self.remote.port:
cmd += ["-P", self.remote.port]
cmd += ["%s:%s" % (self.remote.host, self.filename), dest.filename]
return util.runCmd(cmd=cmd)
elif dest.remote:
cmd = ["scp"]
if dest.remote.port:
cmd += ["-P", dest.remote.port]
cmd += [self.filename, "%s:%s" % (dest.remote.host, dest.filename)]
return util.runCmd(cmd=cmd)
cmd = ["cp", "--force", self.filename, dest.filename]
return util.runCmd(cmd=cmd)
def move(self, dest):
"""
Move the file
todo, make the object invalid, or point to the new path?
"""
if not isinstance(dest, FileMgr):
raise ValueError("dest file must be instance of FileUtil")
if self.remote and not dest.remote:
raise ValueError("Not implemented, cannot move from local to remote server")
if not self.remote and dest.remote:
raise ValueError("Not implemented, cannot move from remote to local server")
cmd = ["mv", "--force", self.filename, dest.filename]
ret = util.runCmd(self.remote, cmd, call=True)
return ret
def sha256sum(self):
"""Calculate sha256 checksum on file"""
cmd = ["sha256sum", self.filename]
out = util.runCmd(self.remote, cmd)
return out.split()[0].decode()
def compare(self, dest):
"""
Compare file with another file
This is done by calculating and comparing sha256 checksum
Returns True if files are identical
"""
sum1 = self.sha256sum()
sum2 = dest.sha256sum()
return sum1 == sum2
class NS_Manager:
"""
Helper to manage a bind instance
"""
def __init__(self,
host=None,
port="22",
includedir="/etc/bind/primary/include",
includefile="{zone}",
tmpdir="/tmp/dnsmgr",
directory="/var/cache/bind",
configfile=None,
ignorezones={},
cmd=None):
self.host = host
self.port = port
self.includedir = includedir
self.includefile = includefile
self.tmpdir = tmpdir
self.directory = directory
self.configfile = configfile
self.ignorezones = ignorezones
self.cmd = cmd
if host:
self.remote = AttrDict(host=host, port=port)
else:
self.remote = None
self.zones = {}
def _verifyTmpDir(self, remote=None):
"""
Create tmpdir if needed
"""
fdir = FileMgr(remote=remote, filename=self.tmpdir, mode="w", openFile=False)
fdir.mkdir()
def restart(self):
"""
Restart the bind process
"""
cmd = self.cmd.restart
cmd = cmd.split(" ")
return util.runCmd(self.remote, cmd)
def reloadZone(self, zone=None):
"""
reload zone content, one or all zones
"""
log.info("Reloading zone %s", zone)
cmd = self.cmd.reload_zone.format(zone=zone)
cmd = cmd.split(" ")
return util.runCmd(self.remote, cmd)
def increaseSoaSerial(self, zoneinfo):
"""
Increase serial number in a zonefile
First verifies that the SOA has the format YYYYMMDDxx, with a valid date
Extra check: new file with updated soa must have same size as old file
"""
if zoneinfo.typ != "master":
raise NS_Exception("increaseSoaSerial only makes sense for zone type master")
self._verifyTmpDir()
tmpfile = "%s/%s" % (self.tmpdir, zoneinfo.name)
# Copy file to temp
fsrc = FileMgr(self.remote, zoneinfo.file)
fdst = FileMgr(filename=tmpfile, mode="w")
fsrc.copy(fdst)
# compare checksums on original and copied file
if not fsrc.compare(fdst):
raise NS_Exception("Error, copied file differs in checksum")
# We now have a verified copy of the file locally, Search for serial number
f = open(tmpfile)
fpos = f.tell()
line = f.readline()
serial = None
while line:
line = line.rstrip()
if line.lower().endswith("; serial"):
serial = line
serialfpos = fpos
fpos = f.tell()
line = f.readline()
f.close()
if serial is None:
raise NS_Exception("Can't find serial number in file %s" % zoneinfo.file)
# search backwards for first digit
p = len(serial) - len("; Serial")
while not serial[p].isdigit():
p -= 1
if p < 0:
raise NS_Exception("Can't find last digit in serial number in file %s" % zoneinfo.file)
# check all 10 positions, must be digits
p -= 9 # should be first position in serial number
if p < 0:
raise NS_Exception("Can't find all digist in serial number in file %s" % zoneinfo.file)
if not serial[p:p+10].isdigit():
raise NS_Exception("Can't find serial number in file %s" % zoneinfo.file)
# check if serial starts with a valid date
try:
dt = datetime.datetime.strptime(serial[p:p+8], "%Y%m%d").date()
except ValueError as err:
raise NS_Exception("Serial number does not start with a valid date, in file %s" % zoneinfo.file)
seq = int(serial[p+8:p+10])
now = datetime.datetime.now().date()
if now > dt:
# Serial has old date, replace with todays date and restart sequence
dt = now
seq = 0
else:
if seq > 98:
# todo, increase to next day and restart sequence
dt = dt + datetime.timedelta(days=1)
seq = 0
else:
seq += 1
serial = dt.strftime("%Y%m%d") + str(seq).zfill(2)
# Ok, write the new serial to the temp file
f = open(tmpfile, "r+b")
f.seek(serialfpos + p)
f.write(serial.encode())
f.close()
# Copy the file with updated serial number to server
if self.remote:
self._verifyTmpDir(self.remote)
fsrc = FileMgr(filename=tmpfile)
fdst = FileMgr(self.remote, tmpfile, mode="w")
fsrc.copy(fdst)
# Compare checksums on local and remote file so copy was ok
if not fsrc.compare(fdst):
raise NS_Exception("Error: Copy of new file failed, incorrect checksum")
# Verify size between original file and file with updated serial
# They should be identical, since serial number never changes size
fsrc = FileMgr(remote=self.remote, filename=tmpfile)
fdst = FileMgr(remote=self.remote, filename=zoneinfo.file)
if fsrc.size() != fdst.size():
raise NS_Exception("Error: Old file and new file has different sizes")
# Copy file to correct location
cmd = ["cp", "--force", fsrc.filename, fdst.filename]
util.runCmd(remote=self.remote, cmd=cmd)
# Tell bind we have an updated serial no
self.reloadZone(zoneinfo.name)
def getZones(self, filename=None):
"""
Parse out all zones from bind/named configuration files
filename is the main configuration file, it then follows
all the includes to get all of the configuration
"""
if filename is None:
filename = self.configfile
self.zones = {} # Key is zonename, value is zoneinfo
def parseZone(parser):
zone = ZoneInfo()
zone.name = parser.getToken()
t = parser.getToken()
if t == "IN":
# just ignore
t = parser.getToken()
if t != "{":
raise ParserException("Missing token %s" % t)
while t != "}":
t = parser.getToken()
if t == 'type':
zone.typ = parser.getToken()
elif t == 'file':
zone.file = parser.getToken()
if zone.file[0] != "/":
zone.file = "%s/%s" % (self.directory, zone.file)
return zone
def parseBindConfigFile(filename):
"""
Recursive function, to handle INLINE statement
"""
f = FileMgr(self.remote)
f.open(filename, "r")
parser = Parser(f)
token = "dummy"
while token is not None:
token = parser.getToken()
if token == 'include':
filename = parser.getToken()
parseBindConfigFile(filename)
elif token == 'zone':
zone = parseZone(parser)
if zone.name not in self.ignorezones:
self.zones[zone.name] = zone
parseBindConfigFile(filename)
return self.zones
def saveZone(self, zone):
"""
Save zone resource records
We always write to a temp file, then comparing the new file with the
original. If they differ we replace the original file, increase the
SOA serial number and reload the zone
"""
zoneinfo = self.zones[zone.zonefile]
# Create name of zonefile
zonefile = self.includefile.format(zone=zone.zonefile)
self._verifyTmpDir()
filename = "%s/%s" % (self.tmpdir, zonefile)
f = open(filename, "w")
f.write(";\n")
f.write("; File generated by DnsNgr\n")
f.write("; Do not edit, changes will be overwritten\n")
f.write(";\n")
f.write("; Zonefile : %s/%s\n" % (self.includedir, zonefile))
f.write("; Records : %d\n" % len(zone))
f.write(";\n\n")
f.write("$ORIGIN %s.\n\n" % zone.zone)
if zone.typ == "forward":
for rrlist in zone:
for rr in rrlist.__iter__():
f.write("%-30s %5s %-8s %s\n" % (rr.name, rr.ttl, rr.typ, rr.value))
elif zone.typ == "reverse4":
st = -len(zone.zone) - 1
for rrlist in zone.__iter__():
for rr in rrlist:
name = ipv4_addr_to_reverse(str(rr.name)) + ".in-addr.arpa"
name = name[:st]
f.write("%-30s %5s %s %s.%s.\n" % (name, rr.ttl, rr.typ, rr.value, rr.domain))
elif zone.typ == "reverse6":
st = -len(zone.zone) - 1
for rrlist in zone.__iter__():
for rr in rrlist:
name = ipv6_addr_to_reverse(rr.name) + ".ip6.arpa"
name = name[:st]
f.write("%-50s %5s %s %s.%s.\n" % (name, rr.ttl, rr.typ, rr.value, rr.domain))
else:
print("Error: zone %s, unknown zone type %s" % (zone.name, zone.typ))
f.close()
if self.remote:
fsrc = FileMgr(filename=filename)
fdst = FileMgr(remote=self.remote, filename=filename)
fsrc.copy(fdst)
if not fsrc.compare(fdst):
raise NS_Exception("Error: Copied file has incorrect checksum, copy failed")
fsrc = FileMgr(remote=self.remote, filename=filename)
fdst = FileMgr(remote=self.remote, filename="%s/%s" %\
(self.includedir, zonefile), openFile=False)
if fdst.exist():
replace = not fsrc.compare(fdst)
else:
replace = True
if replace:
fsrc.move(fdst)
self.increaseSoaSerial(zoneinfo)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('cmd',
default=None,
choices=[
"status",
"restart",
"getzones",
"incsoaserial"
],
help='Action to run',
)
parser.add_argument('--host',
default=None,
)
parser.add_argument('--port',
default=None,
)
parser.add_argument('--zone',
default=None,
)
parser.add_argument('--tmpdir',
default=None,
)
parser.add_argument('--configfile',
default=None,
)
args = parser.parse_args()
bindMgrArgs = AttrDict(
host = args.host,
port = args.port,
configfile = args.configfile,
)
if args.tmpdir is not None: bindMgrArgs.tmpdir = args.tmpdir
bindMgr = NS_Manager(**bindMgrArgs)
if args.cmd == "status":
print("status not implemented")
elif args.cmd == "restart":
print("Restart DNS server")
bindMgr.restart()
elif args.cmd == "getzones":
print("Get zones")
zonesinfo = bindMgr.getZones()
for zoneinfo in zonesinfo.values():
print("zone")
print(" name", zoneinfo.name)
print(" type", zoneinfo.typ)
print(" file", zoneinfo.file)
elif args.cmd == "incsoaserial":
print("Increase SOA serial for zone %s" % args.zone)
zones = bindMgr.getZones("/etc/bind/named.conf")
if args.zone not in zones:
print("Nameserver does not handle zone %s" % args.zone)
zoneinfo = zones[args.zone]
bindMgr.increaseSoaSerial(zoneinfo)
else:
print("Error: unknown command %s" % args.cmd)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import ntpath
import posixpath
import sys
import collections
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
CHROME_SRC = os.path.dirname(os.path.dirname(os.path.dirname(BUILD_TOOLS_DIR)))
MOCK_DIR = os.path.join(CHROME_SRC, 'third_party', 'pymock')
# For the mock library
sys.path.append(MOCK_DIR)
from mock import call, patch, Mock
sys.path.append(BUILD_TOOLS_DIR)
import build_artifacts
class BasePosixTestCase(unittest.TestCase):
def setUp(self):
self.addCleanup(patch.stopall)
patch('build_artifacts.PLATFORM', 'posix').start()
patch('build_artifacts.BUILD_ARCHIVE_DIR', '/archive_dir/').start()
patch('os.path.join', posixpath.join).start()
class PosixTestCase(BasePosixTestCase):
def setUp(self):
BasePosixTestCase.setUp(self)
def testGetToolchainNaClLib(self):
tests = [
(('newlib', 'x86_32'), 'foo/x86_64-nacl/lib32'),
(('newlib', 'x86_64'), 'foo/x86_64-nacl/lib'),
(('newlib', 'arm'), 'foo/arm-nacl/lib'),
(('glibc', 'x86_32'), 'foo/x86_64-nacl/lib32'),
(('glibc', 'x86_64'), 'foo/x86_64-nacl/lib'),
(('bionic', 'arm'), 'foo/arm-nacl/lib'),
(('pnacl', None), 'foo/le32-nacl/lib'),
]
for test in tests:
self.assertEqual(
build_artifacts.GetToolchainNaClLib(test[0][0], 'foo', test[0][1]),
test[1])
def testGetGypBuiltLib(self):
tests = [
(('newlib', 'x86_32'), 'foo/Release/gen/tc_newlib/lib32'),
(('newlib', 'x86_64'), 'foo/Release/gen/tc_newlib/lib64'),
(('newlib', 'arm'), 'foo/Release/gen/tc_newlib/libarm'),
(('glibc', 'x86_32'), 'foo/Release/gen/tc_glibc/lib32'),
(('glibc', 'x86_64'), 'foo/Release/gen/tc_glibc/lib64'),
(('pnacl', None), 'foo/Release/gen/tc_pnacl_newlib/lib')
]
for test in tests:
self.assertEqual(
build_artifacts.GetGypBuiltLib('foo', test[0][0], test[0][1]),
test[1])
def testGetGypToolchainLib(self):
tests = [
(('newlib', 'x86_32'),
'foo/Release/gen/sdk/posix_x86/nacl_x86_newlib/x86_64-nacl/lib32'),
(('newlib', 'x86_64'),
'foo/Release/gen/sdk/posix_x86/nacl_x86_newlib/x86_64-nacl/lib'),
(('newlib', 'arm'),
'foo/Release/gen/sdk/posix_x86/nacl_arm_newlib/arm-nacl/lib'),
(('glibc', 'x86_32'),
'foo/Release/gen/sdk/posix_x86/nacl_x86_glibc/x86_64-nacl/lib32'),
(('glibc', 'x86_64'),
'foo/Release/gen/sdk/posix_x86/nacl_x86_glibc/x86_64-nacl/lib'),
# Bionic uses the newlib toolchain lib directory
(('bionic', 'arm'),
'foo/Release/gen/sdk/posix_x86/nacl_arm_newlib/arm-nacl/lib'),
(('pnacl', None),
'foo/Release/gen/sdk/posix_x86/pnacl_newlib/le32-nacl/lib'),
]
for test in tests:
self.assertEqual(
build_artifacts.GetGypToolchainLib('foo', test[0][0], test[0][1]),
test[1])
@patch('build_artifacts.all_archives', ['foo.tar.bz2', 'bar.tar.bz2'])
@patch('build_version.ChromeMajorVersion', Mock(return_value='40'))
@patch('build_version.ChromeRevision', Mock(return_value='302630'))
@patch('build_version.ChromeCommitPosition', Mock(return_value=
'1492c3d296476fe12cafecabba6ebabe-refs/heads/master@{#302630}'))
@patch('buildbot_common.Archive')
def testUploadArchives(self, archive_mock):
build_artifacts.UploadArchives()
cwd = '/archive_dir/'
bucket_path = 'native-client-sdk/archives/40-302630-1492c3d29'
archive_mock.assert_has_calls([
call('foo.tar.bz2', bucket_path, cwd=cwd, step_link=False),
call('foo.tar.bz2.sha1', bucket_path, cwd=cwd, step_link=False),
call('bar.tar.bz2', bucket_path, cwd=cwd, step_link=False),
call('bar.tar.bz2.sha1', bucket_path, cwd=cwd, step_link=False)
])
class GypNinjaPosixTestCase(BasePosixTestCase):
def setUp(self):
BasePosixTestCase.setUp(self)
patch('sys.executable', 'python').start()
patch('build_artifacts.SRC_DIR', 'src_dir').start()
patch('os.environ', {}).start()
self.run_mock = patch('buildbot_common.Run').start()
self.options_mock = patch('build_artifacts.options').start()
self.options_mock.mac_sdk = False
self.options_mock.no_arm_trusted = False
self.gyp_defines_base = ['nacl_allow_thin_archives=0']
def testSimple(self):
build_artifacts.GypNinjaBuild(
None, 'gyp.py', 'foo.gyp', 'target', 'out_dir')
self.run_mock.assert_has_calls([
call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G',
'output_dir=out_dir'],
cwd='src_dir',
env={'GYP_GENERATORS': 'ninja',
'GYP_DEFINES': ' '.join(self.gyp_defines_base)}),
call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir')
])
def testTargetArch(self):
build_artifacts.GypNinjaBuild(
'x64', 'gyp.py', 'foo.gyp', 'target', 'out_dir')
self.run_mock.assert_has_calls([
call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G',
'output_dir=out_dir'],
cwd='src_dir',
env={
'GYP_GENERATORS': 'ninja',
'GYP_DEFINES': ' '.join(self.gyp_defines_base +
['target_arch=x64']),
}),
call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir')
])
def testMultipleTargets(self):
build_artifacts.GypNinjaBuild(
None, 'gyp.py', 'foo.gyp', ['target1', 'target2'], 'out_dir')
self.run_mock.assert_has_calls([
call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G',
'output_dir=out_dir'],
cwd='src_dir',
env={'GYP_GENERATORS': 'ninja',
'GYP_DEFINES': ' '.join(self.gyp_defines_base)}),
call(['ninja', '-C', 'out_dir/Release', 'target1', 'target2'],
cwd='src_dir')
])
def testMacSdk(self):
build_artifacts.PLATFORM = 'mac'
self.options_mock.mac_sdk = '10.6'
build_artifacts.GypNinjaBuild(
None, 'gyp.py', 'foo.gyp', 'target', 'out_dir')
self.run_mock.assert_has_calls([
call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G',
'output_dir=out_dir'],
cwd='src_dir',
env={
'GYP_GENERATORS': 'ninja',
'GYP_DEFINES': ' '.join(self.gyp_defines_base +
['mac_sdk=10.6', 'clang=1']),
}),
call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir')
])
def testArmLinux(self):
build_artifacts.PLATFORM = 'linux'
build_artifacts.GypNinjaBuild(
'arm', 'gyp.py', 'foo.gyp', 'target', 'out_dir')
self.run_mock.assert_has_calls([
call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G',
'output_dir=out_dir'],
cwd='src_dir',
env={
'GYP_CROSSCOMPILE': '1',
'GYP_GENERATORS': 'ninja',
'GYP_DEFINES': ' '.join(self.gyp_defines_base +
['target_arch=arm',
'arm_float_abi=hard']),
}),
call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir')
])
def testNoArmTrusted(self):
build_artifacts.PLATFORM = 'linux'
self.options_mock.no_arm_trusted = True
build_artifacts.GypNinjaBuild(
'arm', 'gyp.py', 'foo.gyp', 'target', 'out_dir')
self.run_mock.assert_has_calls([
call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G',
'output_dir=out_dir'],
cwd='src_dir',
env={
'GYP_CROSSCOMPILE': '1',
'GYP_GENERATORS': 'ninja',
'GYP_DEFINES': ' '.join(self.gyp_defines_base +
['target_arch=arm',
'arm_float_abi=hard',
'disable_cross_trusted=1']),
}),
call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir')
])
class ArchivePosixTestCase(BasePosixTestCase):
def setUp(self):
BasePosixTestCase.setUp(self)
self.makedir_mock = patch('buildbot_common.MakeDir').start()
self.copyfile_mock = patch('buildbot_common.CopyFile').start()
self.copydir_mock = patch('buildbot_common.CopyDir').start()
self.isdir_mock = patch('os.path.isdir').start()
patch('os.path.exists', Mock(return_value=False)).start()
def dummy_isdir(path):
if path == '/archive_dir/posix_foo':
return True
return False
self.isdir_mock.side_effect = dummy_isdir
self.archive = build_artifacts.Archive('foo')
def testInit(self):
self.assertEqual(self.archive.name, 'posix_foo')
self.assertEqual(self.archive.archive_name, 'posix_foo.tar.bz2')
self.assertEqual(self.archive.archive_path,
'/archive_dir/posix_foo.tar.bz2')
self.assertEqual(self.archive.dirname, '/archive_dir/posix_foo')
self.makedir_mock.assert_called_once_with('/archive_dir/posix_foo')
@patch('glob.glob', Mock(side_effect=lambda x: [x]))
def testCopySimple(self):
self.archive.Copy('/copy_from', ['file1', 'file2'])
self.assertEqual(self.copydir_mock.call_count, 0)
self.copyfile_mock.assert_has_calls([
call('/copy_from/file1', '/archive_dir/posix_foo/file1'),
call('/copy_from/file2', '/archive_dir/posix_foo/file2')])
@patch('glob.glob')
def testCopyGlob(self, glob_mock):
glob_mock.return_value = ['/copy_from/foo', '/copy_from/bar']
self.archive.Copy('/copy_from', [('*', '')])
glob_mock.assert_called_once_with('/copy_from/*')
self.assertEqual(self.copydir_mock.call_count, 0)
self.copyfile_mock.assert_has_calls([
call('/copy_from/foo', '/archive_dir/posix_foo/'),
call('/copy_from/bar', '/archive_dir/posix_foo/')])
@patch('glob.glob', Mock(side_effect=lambda x: [x]))
def testCopyRename(self):
self.archive.Copy('/copy_from', [('file1', 'file1_renamed')])
self.assertEqual(self.copydir_mock.call_count, 0)
self.copyfile_mock.assert_called_once_with(
'/copy_from/file1', '/archive_dir/posix_foo/file1_renamed')
@patch('glob.glob', Mock(side_effect=lambda x: [x]))
def testCopyNewDir(self):
self.archive.Copy('/copy_from', [('file1', 'todir/')])
self.assertEqual(self.copydir_mock.call_count, 0)
self.copyfile_mock.assert_called_once_with(
'/copy_from/file1', '/archive_dir/posix_foo/todir/file1')
@patch('glob.glob', Mock(side_effect=lambda x: [x]))
def testCopyDir(self):
self.isdir_mock.side_effect = lambda _: True
self.archive.Copy('/copy_from', ['dirname'])
self.assertEqual(self.copyfile_mock.call_count, 0)
self.copydir_mock.assert_called_once_with(
'/copy_from/dirname', '/archive_dir/posix_foo/dirname')
class WinTestCase(unittest.TestCase):
def setUp(self):
patch('build_artifacts.PLATFORM', 'win').start()
patch('build_artifacts.BUILD_ARCHIVE_DIR', 'c:\\archive_dir\\').start()
patch('os.path.join', ntpath.join).start()
def tearDown(self):
patch.stopall()
@patch('os.path.exists', Mock(return_value=False))
@patch('buildbot_common.MakeDir')
def testArchiveInit(self, makedir_mock):
archive = build_artifacts.Archive('foo')
self.assertEqual(archive.name, 'win_foo')
self.assertEqual(archive.archive_name, 'win_foo.tar.bz2')
self.assertEqual(archive.archive_path, r'c:\archive_dir\win_foo.tar.bz2')
self.assertEqual(archive.dirname, r'c:\archive_dir\win_foo')
makedir_mock.assert_called_once_with(r'c:\archive_dir\win_foo')
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and methods for processing text content."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
from pyparsing import alphas
from pyparsing import Combine
from pyparsing import Each
from pyparsing import Group
from pyparsing import Literal
from pyparsing import nums
from pyparsing import Optional
from pyparsing import QuotedString
from pyparsing import Regex
from pyparsing import Suppress
from pyparsing import Word
from pyparsing import ZeroOrMore
from tools import verify
def sep(text):
"""Makes a separator."""
return Suppress(Literal(text))
def key(name):
"""Makes grammar expression for a key."""
return (
Literal(name) ^
(sep('\'') + Literal(name) + sep('\'')) ^
(sep('"') + Literal(name) + sep('"')))
def list_of(term):
"""Makes a delimited list of terms."""
return (
Optional(
term +
ZeroOrMore(Suppress(Literal(',')) + term) +
Optional(Suppress(Literal(',')))
)
)
def chunks(l, n):
"""Partitions the list l into disjoint sub-lists of length n."""
if len(l) % n != 0:
raise Exception('List length is not a multiple on %s', n)
return [l[i:i+n] for i in range(0, len(l), n)]
def make_dict(unused_s, unused_l, toks):
"""Makes a dict from the list using even items as keys, odd as values."""
result = {}
key_value_pairs = chunks(toks, 2)
for key_value_pair in key_value_pairs:
result[key_value_pair[0]] = key_value_pair[1]
return result
def make_list(unused_s, unused_l, toks):
"""Makes a list out of a token tuple holding a list."""
result = []
for item in toks:
result.append(item.asList())
return result
def make_bool(value):
"""Makes a boolean value lambda."""
def make_value():
return verify.Term(verify.BOOLEAN, value)
return make_value
def make_int(value):
"""Makes an int value lambda."""
return int(value[0])
def make_float(value):
"""Makes an float value lambda."""
return float(value[0])
class AssessmentParser13(object):
"""Grammar and parser for the assessment."""
string = (
QuotedString('\'', escChar='\\', multiline=True) ^
QuotedString('"', escChar='\\', multiline=True))
boolean = (
Literal('true').setParseAction(make_bool(True)) ^
Literal('false').setParseAction(make_bool(False)))
float = Combine(
Word(nums) + Optional(Literal('.') + Word(nums))
).setParseAction(make_float)
choice_decl = (
string ^
Combine(
sep('correct(') + string + sep(')')
).setParseAction(lambda x: verify.Term(verify.CORRECT, x[0]))
)
regex = (
Regex('/(.*)/i') ^
Combine(
sep('regex(') +
QuotedString('"', escChar='\\') +
sep(')')
).setParseAction(lambda x: verify.Term(verify.REGEX, x[0]))
)
question_decl = (
sep('{') +
Each(
Optional(
key('questionHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('lesson') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('correctAnswerString') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('correctAnswerRegex') + sep(':') +
regex + Optional(sep(','))) +
Optional(
key('correctAnswerNumeric') + sep(':') +
float + Optional(sep(','))) +
Optional(
key('multiLine') + sep(':') +
boolean + Optional(sep(','))) +
Optional(
key('choices') + sep(':') +
sep('[') +
Group(list_of(choice_decl)).setParseAction(make_list) +
sep(']') +
Optional(sep(',')))
) +
sep('}')).setParseAction(make_dict)
assessment_grammar = (
sep('assessment') +
sep('=') +
sep('{') +
Each(
Optional(
key('assessmentName') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('preamble') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('checkAnswers') + sep(':') +
boolean + Optional(sep(','))) +
Optional(
key('questionsList') + sep(':') +
sep('[') +
Group(list_of(question_decl)).setParseAction(make_list) +
sep(']') +
Optional(sep(',')))
) +
sep('}') +
Optional(sep(';'))).setParseAction(make_dict)
@classmethod
def parse_string(cls, content):
return cls.assessment_grammar.parseString(content)
@classmethod
def parse_string_in_scope(cls, content, scope, root_name):
"""Parses assessment text following grammar."""
if 'assessment' != root_name:
raise Exception('Unsupported schema: %s', root_name)
# we need to extract the results as a dictionary; so we remove the
# outer array holding it
ast = cls.parse_string(content).asList()
if len(ast) == 1:
ast = ast[0]
return dict(
scope.items() +
{'__builtins__': {}}.items() +
{root_name: ast}.items())
class ActivityParser13(object):
"""Grammar and parser for the activity."""
variable = Word(alphas)
integer = Word(nums).setParseAction(make_int)
string = (
QuotedString('\'', escChar='\\', multiline=True) ^
QuotedString('"', escChar='\\', multiline=True))
boolean = (
Literal('true').setParseAction(make_bool(True)) ^
Literal('false').setParseAction(make_bool(False)))
regex = (
Regex('/(.*)/i') ^
Combine(
sep('regex(') +
QuotedString('"', escChar='\\') +
sep(')')
).setParseAction(lambda x: verify.Term(verify.REGEX, x[0]))
)
choice_decl = Group(
sep('[') +
string + sep(',') +
boolean + sep(',') +
string +
sep(']')
)
choices_decl = Group(
sep('[') +
Optional(list_of(choice_decl)) +
sep(']')
).setParseAction(make_list)
multiple_choice_decl = (
key('questionType') + sep(':') + key('multiple choice') +
Optional(sep(','))
)
multiple_choice = (
sep('{') +
multiple_choice_decl +
Each(
Optional(
key('questionHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('choices') + sep(':') +
choices_decl + Optional(sep(',')))
) +
sep('}')
).setParseAction(make_dict)
free_text_decl = (
key('questionType') + sep(':') + key('freetext') +
Optional(sep(','))
)
free_text = (
sep('{') +
free_text_decl +
Each(
Optional(
key('questionHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('correctAnswerRegex') + sep(':') +
regex + Optional(sep(','))) +
Optional(
key('correctAnswerOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('incorrectAnswerOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('showAnswerPrompt') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('showAnswerOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('outputHeight') + sep(':') +
string + Optional(sep(',')))
) +
sep('}')
).setParseAction(make_dict)
question_list_decl = (
sep('{') +
Each(
Optional(
key('questionHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('choices') + sep(':') +
sep('[') +
Group(list_of(string)).setParseAction(make_list) +
sep(']') +
Optional(sep(','))) +
Optional(
key('correctIndex') + sep(':') +
(integer ^ (
sep('[') +
Group(list_of(integer)).setParseAction(make_list) +
sep(']'))) +
Optional(sep(','))) +
Optional(
key('multiSelect') + sep(':') +
boolean + Optional(sep(','))),
) +
sep('}')).setParseAction(make_dict)
questions_list_decl = Group(
sep('[') +
Optional(list_of(question_list_decl)) +
sep(']')
).setParseAction(make_list)
multiple_choice_group_decl = (
key('questionType') + sep(':') + key('multiple choice group') +
Optional(sep(','))
)
multiple_choice_group = (
sep('{') +
multiple_choice_group_decl +
Each(
Optional(
key('questionGroupHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('allCorrectMinCount') + sep(':') +
integer + Optional(sep(','))) +
Optional(
key('allCorrectOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('someIncorrectOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('questionsList') + sep(':') +
questions_list_decl + Optional(sep(',')))
) +
sep('}')
).setParseAction(make_dict)
activity_grammar = (
sep('activity') +
sep('=') +
sep('[') +
Optional(list_of(
string ^ multiple_choice ^ free_text ^ multiple_choice_group)) +
sep(']') +
Optional(sep(';')))
@classmethod
def parse_string(cls, content):
return cls.activity_grammar.parseString(content)
@classmethod
def parse_string_in_scope(cls, content, scope, root_name):
"""Parses activity text following grammar."""
if 'activity' != root_name:
raise Exception('Unsupported schema: %s', root_name)
return dict(
scope.items() +
{'__builtins__': {}}.items() +
{root_name: cls.parse_string(content).asList()}.items())
# here we register all the parser
SUPPORTED_PARSERS = {
'activity': ActivityParser13, 'assessment': AssessmentParser13}
def verify_activity(activity_text):
"""Parses and semantically verifies activity."""
activity = ActivityParser13.parse_string_in_scope(
activity_text, verify.Activity().scope, 'activity')
assert activity
verifier = verify.Verifier()
verifier.verify_activity_instance(activity, 'test')
def verify_assessment(assessment_text):
"""Parses and semantically verifies assessment."""
assessment = AssessmentParser13.parse_string_in_scope(
assessment_text, verify.Assessment().scope, 'assessment')
assert assessment
verifier = verify.Verifier()
verifier.verify_assessment_instance(assessment, 'test')
def parse_string_in_scope(content, scope, root_name):
parser = SUPPORTED_PARSERS.get(root_name)
if not parser:
raise Exception('Unsupported schema: %s', root_name)
return parser.parse_string_in_scope(content, scope, root_name)
def test_activity_multiple_choice_group():
"""Test activity parsing."""
activity_text = (
"""activity = [
'<p>This is text.</p>',
{
questionType: 'multiple choice group',
questionGroupHTML: '<p>This is text.</p>',
allCorrectMinCount: 55,
allCorrectOutput: '<p>This is text.</p>',
someIncorrectOutput: '<p>This is text.</p>',
questionsList: [
{questionHTML: '<p>This is text.</p>'},
{correctIndex: [1, 2, 3]},
{questionHTML: '<p>This is text.</p>',
correctIndex: 0, multiSelect: false,
choices: ['foo', 'bar'],},
]
},
{
"questionType": 'multiple choice group',
questionGroupHTML:
'<p>This section will test you on colors and numbers.</p>',
questionsList: [
{questionHTML: 'Pick all <i>odd</i> numbers:',
choices: ['1', '2', '3', '4', '5'], correctIndex: [0, 2, 4]},
{questionHTML: 'Pick one <i>even</i> number:',
choices: ['1', '2', '3', '4', '5'], correctIndex: [1, 3],
multiSelect: false},
{questionHTML: 'What color is the sky?',
choices: ['#00FF00', '#00FF00', '#0000FF'], correctIndex: 2}
],
allCorrectMinCount: 2,
allCorrectOutput: 'Great job! You know the material well.',
someIncorrectOutput: 'You must answer at least two questions correctly.'
}
];
""")
verify_activity(activity_text)
def test_activity_multiple_choice():
"""Test activity parsing."""
activity_text = (
"""activity = [
'<p>This is text.</p>',
{
questionType: 'multiple choice',
questionHTML: '<p>This is text.</p>',
choices: [
['<p>This is text.</p>', false, '<p>This is text.</p>'],
['<p>This is text.</p>', true, '<p>This is text.</p>'],
]
}
];
""")
verify_activity(activity_text)
def test_activity_free_text():
"""Test activity parsing."""
activity_text = (
"""activity = [
'<p>This is text.</p>',
{
'questionType': 'freetext',
questionHTML: '<p>This is text.</p>',
showAnswerPrompt: '<p>This is text.</p>',
showAnswerOutput: '<p>This is text.</p>',
correctAnswerRegex: regex("/4|four/i"),
correctAnswerOutput: '<p>This is text.</p>',
incorrectAnswerOutput: '<p>This is text.</p>',
},
{
questionType: 'freetext',
questionHTML: '<p>What color is the snow?</p>',
correctAnswerRegex: regex("/white/i"),
correctAnswerOutput: 'Correct!',
incorrectAnswerOutput: 'Try again.',
showAnswerOutput: 'Our search expert says: white!' },
];
""")
verify_activity(activity_text)
def test_assessment():
"""Test assessment parsing."""
# pylint: disable-msg=anomalous-backslash-in-string
assessment_text = (
"""assessment = {
assessmentName: '12345',
preamble: '<p>This is text.</p>',
checkAnswers: false,
questionsList: [
{questionHTML: '<p>This is text.</p>',
choices:
["A and B", "D and B", correct("A and C"), "C and D", "I don't know"]
},
{questionHTML: '<p>This is text.</p>',
choices: [correct("True"), "False", "I don't know"]
},
{questionHTML: '<p>This is text.</p>',
correctAnswerString: 'sunrise',
correctAnswerNumeric: 7.9
},
{questionHTML: '<p>This is text.</p>',
correctAnswerNumeric: 7,
correctAnswerRegex: regex("/354\s*[+]\s*651/")
}
],
};
""")
# pylint: enable-msg=anomalous-backslash-in-string
verify_assessment(assessment_text)
def test_activity_ast():
"""Test a mix of various activities using legacy and new parser."""
activity_text = (
"""activity = [
'<p>This is just some <i>HTML</i> text!</p>',
{ questionType: 'multiple choice',
questionHTML: '<p>What letter am I thinking about now?</p>',
choices: [
['A', false, '"A" is wrong, try again.'],
['B', true, '"B" is correct!'],
['C', false, '"C" is wrong, try again.'],
['D', false, '"D" is wrong, try again.']
]
},
{ questionType: 'freetext',
questionHTML: '<p>What color is the snow?</p>',
correctAnswerRegex: regex("/white/i"),
correctAnswerOutput: 'Correct!',
incorrectAnswerOutput: 'Try again.',
showAnswerOutput: 'Our search expert says: white!' },
{ questionType: 'multiple choice group',
questionGroupHTML:
'<p>This section will test you on colors and numbers.</p>',
allCorrectMinCount: 2,
questionsList: [
{questionHTML: 'Pick all <i>odd</i> numbers:',
choices: ['1', '2', '3', '4', '5'], correctIndex: [0, 2, 4]},
{questionHTML: 'Pick one <i>even</i> number:',
choices: ['1', '2', '3', '4', '5'], correctIndex: [1, 3],
multiSelect: false},
{questionHTML: 'What color is the sky?',
choices: ['#00FF00', '#00FF00', '#0000FF'], correctIndex: 2}
],
allCorrectOutput: 'Great job! You know the material well.',
someIncorrectOutput: 'You must answer at least two questions correctly.'
}
];
""")
verify_activity(activity_text)
scope = verify.Activity().scope
current_ast = ActivityParser13.parse_string_in_scope(
activity_text, scope, 'activity')
expected_ast = verify.legacy_eval_python_expression_for_test(
activity_text, scope, 'activity')
same = (
len(current_ast.get('activity')) == 4 and
current_ast.get('activity') == expected_ast.get('activity') and
current_ast == expected_ast)
if not same:
import pprint # # pylint: disable-msg=g-import-not-at-top
pprint.pprint(current_ast.get('activity'))
pprint.pprint(expected_ast.get('activity'))
assert same
def test_assessment_ast():
"""Test a mix of various activities using legacy and new parser."""
# pylint: disable-msg=anomalous-backslash-in-string
assessment_text = (
"""assessment = {
preamble: '<p>This is text.</p>',
questionsList: [
{'questionHTML': '<p>This is text.</p>',
choices:
["A and B", "D and B", correct("A and C"), "C and D", "I don't know"]
},
{"questionHTML": '<p>This is text.</p>',
choices: [correct("True"), "False", "I don't know"]
},
{questionHTML: '<p>This is text.</p>',
correctAnswerString: 'sunrise'
},
{questionHTML: '<p>This is text.</p>',
correctAnswerRegex: regex("/354\s*[+]\s*651/")
}
],
assessmentName: 'Pre',
checkAnswers: false
}
""")
# pylint: enable-msg=anomalous-backslash-in-string
verify_assessment(assessment_text)
scope = verify.Assessment().scope
current_ast = AssessmentParser13.parse_string_in_scope(
assessment_text, scope, 'assessment')
expected_ast = verify.legacy_eval_python_expression_for_test(
assessment_text, scope, 'assessment')
same = (
len(current_ast.get('assessment')) == 4 and
len(current_ast.get('assessment').get('questionsList')) == 4 and
current_ast.get('assessment') == expected_ast.get('assessment') and
current_ast == expected_ast)
if not same:
import pprint # # pylint: disable-msg=g-import-not-at-top
pprint.pprint(current_ast.get('assessment'))
pprint.pprint(expected_ast.get('assessment'))
assert same
def test_list_of():
"""Test delimited list."""
grammar = Optional(
Literal('[') +
Optional(list_of(Literal('a') ^ Literal('b'))) +
Literal(']'))
assert str(['[', ']']) == str(grammar.parseString('[]'))
assert str(['[', 'a', ']']) == str(grammar.parseString('[a]'))
assert str(['[', 'b', ']']) == str(grammar.parseString('[b]'))
assert str(['[', 'a', ']']) == str(grammar.parseString('[a,]'))
assert str(['[', 'b', ']']) == str(grammar.parseString('[b,]'))
assert str(['[', 'a', 'a', 'a', 'a', ']']) == str(
grammar.parseString('[a, a, a, a]'))
assert str(['[', 'a', 'a', 'a', 'a', ']']) == str(
grammar.parseString('[a,a,a,a]'))
assert str(['[', 'a', 'a', 'a', 'a', ']']) == str(
grammar.parseString('[a,a,a,a,]'))
assert str(['[', 'a', 'b', 'a', 'b', ']']) == str(
grammar.parseString('[a,b,a,b]'))
assert str(['[', 'b', 'a', 'b', 'a', ']']) == str(
grammar.parseString('[b,a,b,a]'))
assert str(['[', 'b', 'b', 'b', 'b', ']']) == str(
grammar.parseString('[b,b,b,b]'))
assert not grammar.parseString('')
assert not grammar.parseString('[c]')
assert not grammar.parseString('[a,c,b]')
def run_all_unit_tests():
"""Run all unit tests."""
original = verify.parse_content
try:
verify.parse_content = parse_string_in_scope
test_list_of()
test_activity_multiple_choice()
test_activity_free_text()
test_activity_multiple_choice_group()
test_activity_ast()
test_assessment()
test_assessment_ast()
# test existing verifier using parsing instead of exec/compile
verify.test_sample_assets()
finally:
verify.parse_content = original
if __name__ == '__main__':
run_all_unit_tests()
|
|
#
# Copyright 2015 The AMP HTML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the license.
#
"""Generates validator-generated.js.
This script reads validator.protoascii and reflects over its contents
to generate Javascript. This Javascript consists of Closure-style
classes and enums, as well as a createRules function which
instantiates the data structures specified in validator.protoascii -
the validator rules.
From a Javascript perspective, this approach looks elaborate - you may
wonder why we're not just writing Javascript directly, or why we're
not encoding our rules in JSON or YAML or even, gasp, XML? Besides the
additional type safety that we gain from our approach, it allows us to
share the rule specifications, error codes, etc. between multiple
validator implemenations, including an implementation in C++. This
makes it much easier to keep otherwise likely divergent behavior in
sync.
"""
import os
def UnderscoreToCamelCase(under_score):
"""Helper function which converts under_score names to camelCase.
In proto buffers, fields have under_scores. In Javascript, fields
have camelCase.
Args:
under_score: A name, segmented by under_scores.
Returns:
A name, segmented as camelCase.
"""
segments = under_score.split('_')
return '%s%s' % (segments[0], ''.join([s.title() for s in segments[1:]]))
def FindDescriptors(validator_pb2, msg_desc_by_name, enum_desc_by_name):
"""Finds the message and enum descriptors in the file.
This method finds the message and enum descriptors from a file descriptor;
it will visit the top-level messages, and within those the enums.
Args:
validator_pb2: The proto2 Python module generated from validator.proto.
msg_desc_by_name: A map of message descriptors, keyed by full_name.
enum_desc_by_name: A map of enum descriptors, keyed by full name.
"""
for msg_type in validator_pb2.DESCRIPTOR.message_types_by_name.values():
msg_desc_by_name[msg_type.full_name] = msg_type
for enum_type in msg_type.enum_types:
enum_desc_by_name[enum_type.full_name] = enum_type
def FieldTypeFor(descriptor, field_desc):
"""Returns the Javascript type for a given field descriptor.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: A field descriptor for a particular field in a message.
Returns:
The Javascript type for the given field descriptor.
"""
element_type = {
descriptor.FieldDescriptor.TYPE_DOUBLE: lambda: 'number',
descriptor.FieldDescriptor.TYPE_INT32: lambda: 'number',
descriptor.FieldDescriptor.TYPE_BOOL: lambda: 'boolean',
descriptor.FieldDescriptor.TYPE_STRING: lambda: 'string',
descriptor.FieldDescriptor.TYPE_ENUM: (
lambda: field_desc.enum_type.full_name),
descriptor.FieldDescriptor.TYPE_MESSAGE: (
lambda: field_desc.message_type.full_name),
}[field_desc.type]()
if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED:
return '!Array<!%s>' % element_type
else:
return element_type
def NonRepeatedValueToString(descriptor, field_desc, value):
"""For a non-repeated field, renders the value as a Javascript literal.
Helper function for ValueToString.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: The type descriptor for the field value to be rendered.
value: The value of the non-repeated field to be rendered.
Returns:
A Javascript literal for the provided non-repeated value.
"""
if field_desc.type == descriptor.FieldDescriptor.TYPE_STRING:
escaped = ('' + value).encode('unicode-escape')
return "'%s'" % escaped.replace("'", "\\'")
if field_desc.type == descriptor.FieldDescriptor.TYPE_BOOL:
if value:
return 'true'
return 'false'
if field_desc.type == descriptor.FieldDescriptor.TYPE_ENUM:
enum_value_name = field_desc.enum_type.values_by_number[value].name
return '%s.%s' % (field_desc.enum_type.full_name, enum_value_name)
if value is None:
return 'null'
return str(value)
def ValueToString(descriptor, field_desc, value):
"""Renders a field value as a Javascript literal.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: The type descriptor for the field value to be rendered.
value: The value of the field to be rendered.
Returns:
A Javascript literal for the provided value.
"""
if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if value:
return '[%s]' % ', '.join([NonRepeatedValueToString(descriptor,
field_desc, s)
for s in value])
return '[]'
return NonRepeatedValueToString(descriptor, field_desc, value)
def PrintClassFor(descriptor, msg_desc, out):
"""Prints a Javascript class for the given proto message.
This method emits a Javascript class (Closure-style) for the given
proto message to sys.stdout.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
msg_desc: The descriptor for a particular message type.
out: a list of lines to output (without the newline characters), to
which this function will append.
"""
# TODO(johannes): Should we provide access to the default values?
# Those are given in field.default_value for each field.
out.append('/**')
out.append(' * @constructor')
if (msg_desc.name == 'ValidationResult' or
msg_desc.name == 'ValidationError'):
out.append(' * @export')
out.append(' */')
out.append('%s = function() {' % msg_desc.full_name)
for field in msg_desc.fields:
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
out.append(' /** @export {%s} */' % FieldTypeFor(descriptor, field))
out.append(' this.%s = [];' % UnderscoreToCamelCase(field.name))
else:
out.append(' /** @export {?%s} */' % FieldTypeFor(descriptor, field))
out.append(' this.%s = null;' % UnderscoreToCamelCase(field.name))
out.append('};')
out.append('')
def PrintEnumFor(enum_desc, out):
"""Prints a Javascript enum for the given enum descriptor.
Args:
enum_desc: The descriptor for a particular enum type.
out: a list of lines to output (without the newline characters), to
which this function will append.
"""
out.append('/**')
out.append(' * @enum {string}')
out.append(' * @export')
out.append(' */')
out.append('%s = {' % enum_desc.full_name)
out.append(',\n'.join([" %s: '%s'" % (v.name, v.name)
for v in enum_desc.values]))
out.append('};')
out.append('')
def PrintObject(descriptor, msg, this_id, out):
"""Prints an object, by recursively constructing it.
This routine emits Javascript which will construct an object modeling
the provided message (in practice the ValidatorRules message).
It references the classes and enums enitted by PrintClassFor and PrintEnumFor.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
msg: A protocol message instance.
this_id: The id for the object being printed (all variables have the form
o_${num} with ${num} being increasing integers
out: a list of lines to output (without the newline characters), to
which this function will append.
Returns:
The next object id, that is, next variable available for creating objects.
"""
out.append(' var o_%d = new %s();' % (this_id, msg.DESCRIPTOR.full_name))
next_id = this_id + 1
for (field_desc, field_val) in msg.ListFields():
if field_desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE:
if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for val in field_val:
field_id = next_id
next_id = PrintObject(descriptor, val, field_id, out)
out.append(' o_%d.%s.push(o_%d);' % (
this_id, UnderscoreToCamelCase(field_desc.name), field_id))
else:
field_id = next_id
next_id = PrintObject(descriptor, field_val, field_id, out)
out.append(' o_%d.%s = o_%d;' % (
this_id, UnderscoreToCamelCase(field_desc.name), field_id))
else:
out.append(' o_%d.%s = %s;' % (
this_id, UnderscoreToCamelCase(field_desc.name),
ValueToString(descriptor, field_desc, field_val)))
return next_id
def GenerateValidatorGeneratedJs(specfile, validator_pb2, text_format,
descriptor, out):
"""Main method for the code generator.
This method reads the specfile and emits Javascript to sys.stdout.
Args:
specfile: Path to validator.protoascii, the specfile to generate
Javascript from.
validator_pb2: The proto2 Python module generated from validator.proto.
text_format: The text_format module from the protobuf package, e.g.
google.protobuf.text_format.
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
out: a list of lines to output (without the newline characters), to
which this function will append.
"""
# First, find the descriptors and enums and generate Javascript
# classes and enums.
msg_desc_by_name = {}
enum_desc_by_name = {}
FindDescriptors(validator_pb2, msg_desc_by_name, enum_desc_by_name)
rules_obj = '%s.RULES' % validator_pb2.DESCRIPTOR.package
all_names = [rules_obj] + msg_desc_by_name.keys() + enum_desc_by_name.keys()
all_names.sort()
out.append('//')
out.append('// Generated by %s - do not edit.' % os.path.basename(__file__))
out.append('//')
out.append('')
for name in all_names:
out.append("goog.provide('%s');" % name)
out.append('')
for name in all_names:
if name in msg_desc_by_name:
PrintClassFor(descriptor, msg_desc_by_name[name], out)
elif name in enum_desc_by_name:
PrintEnumFor(enum_desc_by_name[name], out)
# Read the rules file, validator.protoascii by parsing it as a text
# message of type ValidatorRules.
rules = validator_pb2.ValidatorRules()
text_format.Merge(open(specfile).read(), rules)
out.append('/**')
out.append(' * @return {!%s}' % rules.DESCRIPTOR.full_name)
out.append(' */')
out.append('function createRules() {')
PrintObject(descriptor, rules, 0, out)
out.append(' return o_0;')
out.append('}')
out.append('')
out.append('/**')
out.append(' * @type {!%s}' % rules.DESCRIPTOR.full_name)
out.append(' */')
out.append('%s = createRules();' % rules_obj)
|
|
# Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import importlib
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import numpy as np
from nnabla.ext_utils import get_extension_context
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
from nnabla.utils.nnp_graph import NnpLoader, NnpNetworkPass
from nnabla.utils.image_utils import imread
from nnabla.utils.data_iterator import data_iterator_simple
import argparse
def get_data_iterator_and_num_class(args):
"""
Get Data_iterator for training and test data set.
Also, obtain class / category information from data.
"""
if args.train_csv:
from nnabla.utils.data_iterator import data_iterator_csv_dataset
data_iterator = data_iterator_csv_dataset
if args.test_csv:
assert os.path.isfile(
args.test_csv), "csv file for test not found."
# check the number of the classes / categories
with open(args.train_csv, "r") as f:
csv_data_train = f.readlines()[1:] # line 1:"x:image,y:label"
classes_train = {line.split(",")[-1].strip()
for line in csv_data_train}
with open(args.test_csv, "r") as f:
# first line:"x:image,y:label"
csv_data_test = f.readlines()[1:]
classes_test = {line.split(",")[-1].strip()
for line in csv_data_test}
classes_train.update(classes_test)
num_class = len(classes_train)
data_iterator_train = data_iterator_csv_dataset(
args.train_csv, args.batch_size, args.shuffle,
normalize=False)
data_iterator_valid = data_iterator_csv_dataset(
args.test_csv, args.batch_size, args.shuffle,
normalize=False)
else:
print("No csv file for test given. So split the training data")
assert isintance(args.ratio, float), "ratio must be in (0.0, 1.0)"
# check the number of the classes / categories
with open(args.train_csv, "r") as f:
# first line is "x:image,y:label"
csv_data_train = f.readlines()[1:]
all_classes = {line.split(",")[-1].strip()
for line in csv_data_train}
num_class = len(all_classes)
all_data = data_iterator_csv_dataset(
args.train_csv, args.batch_size, args.shuffle,
normalize=False)
num_samples = all_data.size
num_train_samples = int(args.ratio * num_samples)
data_iterator_train = all_data.slice(
rng=None, slice_start=0, slice_end=num_train_samples)
data_iterator_valid = all_data.slice(
rng=None, slice_start=num_train_samples, slice_end=num_samples)
else:
# use caltech101 data like tutorial
from caltech101_data import data_iterator_caltech101
assert isintance(args.ratio, float), "ratio must be in (0.0, 1.0)"
data_iterator = data_iterator_caltech101
num_class = 101 # pre-defined (excluding background class)
all_data = data_iterator(
args.batch_size, width=args.width, height=args.height)
num_samples = all_data.size
num_train_samples = int(args.ratio * num_samples)
data_iterator_train = all_data.slice(
rng=None, slice_start=0, slice_end=num_train_samples)
data_iterator_valid = all_data.slice(
rng=None, slice_start=num_train_samples, slice_end=num_samples)
print("training images: {}".format(data_iterator_train.size))
print("validation images: {}".format(data_iterator_valid.size))
print("{} categories included.".format(num_class))
return data_iterator_train, data_iterator_valid, num_class
def learning_rate_scheduler(curr_iter, T_max, eta_max, eta_min=0):
"""
cosine annealing scheduler.
"""
lr = eta_min + 0.5 * (eta_max - eta_min) * \
(1 + np.cos(np.pi*(curr_iter / T_max)))
return lr
def loss_function(pred, label):
"""
Compute loss.
"""
loss = F.mean(F.softmax_cross_entropy(pred, label))
return loss
def construct_networks(args, images, model, num_class, test):
try:
pooled = model(images, force_global_pooling=1,
use_up_to="pool", training=not test)
except:
pooled = model(images, use_up_to="pool", training=not test)
with nn.parameter_scope("finetuning"):
if args.model == "VGG":
pooled = F.relu(pooled)
with nn.parameter_scope("additional_fc_1"):
pooled = PF.affine(pooled, 4096)
pooled = F.relu(pooled)
if not test:
pooled = F.dropout(pooled, 0.5)
with nn.parameter_scope("additional_fc_2"):
pooled = PF.affine(pooled, 4096)
pooled = F.relu(pooled)
if not test:
pooled = F.dropout(pooled, 0.5)
with nn.parameter_scope("last_fc"):
pred = PF.affine(pooled, num_class)
return pred
def CNN_run(args, model):
data_iterator_train, data_iterator_valid, num_class = \
get_data_iterator_and_num_class(args)
channels, image_height, image_width = 3, args.height, args.width
batch_size = args.batch_size
initial_model_lr = args.model_lr
one_epoch = data_iterator_train.size // batch_size
max_iter = args.epoch * one_epoch
val_iter = data_iterator_valid.size // batch_size
# Create monitor.
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=100)
monitor_err = MonitorSeries("Training error", monitor, interval=100)
monitor_vloss = MonitorSeries("Test loss", monitor, interval=100)
monitor_verr = MonitorSeries("Test error", monitor, interval=100)
# prepare variables and graph used for test
image_valid = nn.Variable(
(batch_size, channels, image_height, image_width))
label_valid = nn.Variable((batch_size, 1))
input_image_valid = {"image": image_valid, "label": label_valid}
pred_valid = construct_networks(
args, image_valid, model, num_class, test=True)
pred_valid.persistent = True
loss_valid = loss_function(pred_valid, label_valid)
top_1e_valid = F.mean(F.top_n_error(pred_valid, label_valid))
# prepare variables and graph used for training
image_train = nn.Variable(
(batch_size, channels, image_height, image_width))
label_train = nn.Variable((batch_size, 1))
input_image_train = {"image": image_train, "label": label_train}
pred_train = construct_networks(
args, image_train, model, num_class, test=False)
loss_train = loss_function(pred_train, label_train)
top_1e_train = F.mean(F.top_n_error(pred_train, label_train))
# prepare solvers
solver = S.Momentum(initial_model_lr)
solver.set_parameters(nn.get_parameters())
# Training-loop
for i in range(max_iter):
image, label = data_iterator_train.next()
input_image_train["image"].d = image
input_image_train["label"].d = label
nn.forward_all([loss_train, top_1e_train], clear_no_need_grad=True)
monitor_loss.add(i, loss_train.d.copy())
monitor_err.add(i, top_1e_train.d.copy())
if args.lr_control_model:
new_lr = learning_rate_scheduler(i, max_iter, initial_model_lr, 0)
solver.set_learning_rate(new_lr)
solver.zero_grad()
loss_train.backward(clear_buffer=True)
if args.with_grad_clip_model:
for k, v in nn.get_parameters().items():
v.grad.copy_from(F.clip_by_norm(
v.grad, args.grad_clip_value_model))
# update parameters
solver.weight_decay(args.weight_decay_model)
solver.update()
if i % args.model_save_interval == 0:
# Validation during training.
ve = 0.
vloss = 0.
for j in range(val_iter):
v_image, v_label = data_iterator_valid.next()
input_image_valid["image"].d = v_image
input_image_valid["label"].d = v_label
nn.forward_all([loss_valid, top_1e_valid], clear_buffer=True)
vloss += loss_valid.d.copy()
ve += top_1e_valid.d.copy()
ve /= val_iter
vloss /= val_iter
monitor_vloss.add(i, vloss)
monitor_verr.add(i, ve)
nn.save_parameters(os.path.join(
args.model_save_path, 'params_{}.h5'.format(i)))
ve = 0.
vloss = 0.
for j in range(val_iter):
v_image, v_label = data_iterator_valid.next()
input_image_valid["image"].d = v_image
input_image_valid["label"].d = v_label
nn.forward_all([loss_valid, top_1e_valid], clear_buffer=True)
vloss += loss_valid.d.copy()
ve += top_1e_valid.d.copy()
ve /= val_iter
vloss /= val_iter
monitor_vloss.add(i, vloss)
monitor_verr.add(i, ve)
nn.save_parameters(os.path.join(
args.model_save_path, 'params_{}.h5'.format(i)))
return
def main(args):
if not args.train_csv:
print("No user-made data given. Use caltech101 dataset for finetuning.")
else:
# prepare dataset.
assert os.path.isfile(
args.train_csv), "csv file for training not found, create dataset first."
ctx = get_extension_context(
args.context, device_id=args.device_id, type_config=args.type_config)
nn.set_default_context(ctx)
ext = nn.ext_utils.import_extension_module(args.context)
print("Use {} for fine-tuning".format(args.model))
model_name = args.model
if model_name == "ResNet":
num_layers = args.res_layer
elif model_name == "VGG":
num_layers = args.vgg_layer
elif model_name == "SqueezeNet":
num_layers = args.squeeze_ver
else:
num_layers = ""
model_module = importlib.import_module("nnabla.models.imagenet")
MODEL = getattr(model_module, model_name)
if model_name in ["ResNet", "VGG", "SqueezeNet"]:
model = MODEL(num_layers) # got model
else:
model = MODEL()
CNN_run(args, model)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Model settings
parser.add_argument('--model', type=str,
choices=["ResNet18",
"ResNet34",
"ResNet50",
"ResNet101",
"ResNet152",
"MobileNet",
"MobileNetV2",
"SENet",
"SqueezeNetV10",
"SqueezeNetV11",
"VGG11",
"VGG13",
"VGG16",
"NIN",
"DenseNet",
"InceptionV3",
"Xception",
"GoogLeNet",
"ResNet",
"SqueezeNet",
"VGG",
],
default="ResNet", help='name of the model')
parser.add_argument('--height', type=int, default=128, help='image height')
parser.add_argument('--width', type=int, default=128, help='image width')
# Dataset settings
parser.add_argument('--train-csv', type=str, default="",
help='.csv file which contains all the image path used for training')
parser.add_argument('--test-csv', type=str, default="",
help='.csv file which contains all the image path used for training')
parser.add_argument('--ratio', type=float, default=0.8,
help='ratio Training samples to Validation samples')
parser.add_argument('--shuffle', type=bool, default=True,
help='whether or not to execute shuffe. Better to set it True.')
# General settings
parser.add_argument('--context', '-c', type=str, default='cudnn',
help="Extension path. ex) cpu, cudnn.")
parser.add_argument("--device-id", "-d", type=str, default='0',
help='Device ID the training run on. \
This is only valid if you specify `-c cudnn`.')
parser.add_argument("--type-config", "-t", type=str, default='float',
help='Type of computation. e.g. "float", "half".')
parser.add_argument("--epoch", "-e", type=int, default=10)
# batch_size
parser.add_argument("--batch-size", "-b", type=int, default=32)
# learning rate and its control
parser.add_argument("--model-lr", type=float, default=0.025)
parser.add_argument("--lr-control-model", type=bool, default=True,)
# gradient clip model
parser.add_argument("--with-grad-clip-model", type=bool, default=False)
parser.add_argument("--grad-clip-value-model", type=float, default=5.0)
# weight_decay
parser.add_argument("--weight-decay-model", type=float, default=3e-4,
help='Weight decay rate. Weight decay is executed by default. \
Set it 0 to virtually disable it.')
# misc
parser.add_argument("--monitor-path", "-m",
type=str, default='tmp.monitor')
parser.add_argument("--model-save-interval", "-s", type=int, default=1000)
parser.add_argument("--model-save-path", "-o",
type=str, default='tmp.monitor')
# DEPRECATED. model-specific arguments
parser.add_argument('--res-layer', type=int, choices=[
18, 34, 50, 101, 152], default=18, help='DEPRECATED. which variation to use for ResNet')
parser.add_argument('--vgg-layer', type=int,
choices=[11, 13, 16], default=16, help='DEPRECATED. which variation to use for VGG')
parser.add_argument('--squeeze-ver', type=str, choices=[
'v1.0', 'v1.1'], default='v1.1', help='DEPRECATED. which version of SqueezeNet')
args = parser.parse_args()
main(args)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Gamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
class Gamma(distribution.Distribution):
"""The `Gamma` distribution with parameter alpha and beta.
The parameters are the shape and inverse scale parameters alpha, beta.
The PDF of this distribution is:
```pdf(x) = (beta^alpha)(x^(alpha-1))e^(-x*beta)/Gamma(alpha), x > 0```
and the CDF of this distribution is:
```cdf(x) = GammaInc(alpha, beta * x) / Gamma(alpha), x > 0```
where GammaInc is the incomplete lower Gamma function.
WARNING: This distribution may draw 0-valued samples for small alpha values.
See the note on `tf.random_gamma`.
Examples:
```python
dist = Gamma(alpha=3.0, beta=2.0)
dist2 = Gamma(alpha=[3.0, 4.0], beta=[2.0, 3.0])
```
"""
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="Gamma"):
"""Construct Gamma distributions with parameters `alpha` and `beta`.
The parameters `alpha` and `beta` must be shaped in a way that supports
broadcasting (e.g. `alpha + beta` is a valid operation).
Args:
alpha: Floating point tensor, the shape params of the
distribution(s).
alpha must contain only positive values.
beta: Floating point tensor, the inverse scale params of the
distribution(s).
beta must contain only positive values.
validate_args: `Boolean`, default `False`. Whether to assert that
`a > 0`, `b > 0`, and that `x > 0` in the methods `prob(x)` and
`log_prob(x)`. If `validate_args` is `False` and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prepend to all ops created by this distribution.
Raises:
TypeError: if `alpha` and `beta` are different dtypes.
"""
with ops.name_scope(name, values=[alpha, beta]) as ns:
with ops.control_dependencies([
check_ops.assert_positive(alpha),
check_ops.assert_positive(beta),
] if validate_args else []):
self._alpha = array_ops.identity(alpha, name="alpha")
self._beta = array_ops.identity(beta, name="beta")
contrib_tensor_util.assert_same_float_dtype((self._alpha, self._beta))
super(Gamma, self).__init__(
dtype=self._alpha.dtype,
parameters={"alpha": self._alpha, "beta": self._beta},
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
is_reparameterized=False,
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("alpha", "beta"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def alpha(self):
"""Shape parameter."""
return self._alpha
@property
def beta(self):
"""Inverse scale parameter."""
return self._beta
def _batch_shape(self):
return array_ops.shape(self.alpha + self.beta)
def _get_batch_shape(self):
return common_shapes.broadcast_shape(self.alpha.get_shape(),
self.beta.get_shape())
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
"""See the documentation for tf.random_gamma for more details."""
return random_ops.random_gamma([n],
self.alpha,
beta=self.beta,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
contrib_tensor_util.assert_same_float_dtype(tensors=[x],
dtype=self.dtype)
return (self.alpha * math_ops.log(self.beta) +
(self.alpha - 1.) * math_ops.log(x) -
self.beta * x -
math_ops.lgamma(self.alpha))
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
contrib_tensor_util.assert_same_float_dtype(tensors=[x], dtype=self.dtype)
# Note that igamma returns the regularized incomplete gamma function,
# which is what we want for the CDF.
return math_ops.log(math_ops.igamma(self.alpha, self.beta * x))
def _cdf(self, x):
return math_ops.igamma(self.alpha, self.beta * x)
@distribution_util.AppendDocstring(
"""This is defined to be
```
entropy = alpha - log(beta) + log(Gamma(alpha))
+ (1-alpha)digamma(alpha)
```
where digamma(alpha) is the digamma function.
""")
def _entropy(self):
return (self.alpha -
math_ops.log(self.beta) +
math_ops.lgamma(self.alpha) +
(1. - self.alpha) * math_ops.digamma(self.alpha))
def _mean(self):
return self.alpha / self.beta
def _variance(self):
return self.alpha / math_ops.square(self.beta)
def _std(self):
return math_ops.sqrt(self.alpha) / self.beta
@distribution_util.AppendDocstring(
"""The mode of a gamma distribution is `(alpha - 1) / beta` when
`alpha > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is `False`,
an exception will be raised rather than returning `NaN`.""")
def _mode(self):
mode = (self.alpha - 1.) / self.beta
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return math_ops.select(
self.alpha >= 1.,
mode,
array_ops.fill(self.batch_shape(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones((), self.dtype),
self.alpha,
message="mode not defined for components of alpha <= 1"),
], mode)
class GammaWithSoftplusAlphaBeta(Gamma):
"""Gamma with softplus transform on `alpha` and `beta`."""
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="GammaWithSoftplusAlphaBeta"):
with ops.name_scope(name, values=[alpha, beta]) as ns:
super(GammaWithSoftplusAlphaBeta, self).__init__(
alpha=nn.softplus(alpha),
beta=nn.softplus(beta),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
|
|
"""
Testing module for Virtual Watershed Data adaptor.
"""
from ..watershed import make_watershed_metadata, make_fgdc_metadata, \
VWClient, default_vw_client, _get_config, metadata_from_file
import datetime
import json
import pandas as pd
import os
import requests
import time
import unittest
from uuid import uuid4
from difflib import Differ
from requests.exceptions import HTTPError
from nose.tools import raises
# Path hack.
import sys; import os
sys.path.insert(0, os.path.abspath('..'))
from ..watershed import VARNAME_DICT
def show_string_diff(s1, s2):
""" Writes differences between strings s1 and s2 """
d = Differ()
diff = d.compare(s1.splitlines(), s2.splitlines())
diffList = [el for el in diff
if el[0] != ' ' and el[0] != '?']
for l in diffList:
if l[0] == '+':
print '+' + bcolors.GREEN + l[1:] + bcolors.ENDC
elif l[0] == '-':
print '-' + bcolors.RED + l[1:] + bcolors.ENDC
else:
assert False, 'Error, diffList entry must start with + or -'
class bcolors:
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
VW_CLIENT = default_vw_client('vwpy/test/test.conf')
class TestJSONMetadata(unittest.TestCase):
""" Test that individual and sets of JSON metadata are being properly
generated. """
def setUp(self):
"""
initialize the class with some appropriate entry
metadata from file
"""
self.config = _get_config('vwpy/test/test.conf')
self.modelRunUUID = '09079630-5ef8-11e4-9803-0800200c9a66'
self.parentModelRunUUID = '373ae181-a0b2-4998-ba32-e27da190f6dd'
def testCorrectMetadatum(self):
""" Test that a single metadata JSON string is properly built (JSON)"""
# minimal watershed JSON with geotiff
generated = make_watershed_metadata(
'vwpy/test/data/in.0010.I_lw.tif',
self.config, 'MODELRUNXX**A*', 'MODELRUNXX**A*', 'inputs',
'Dry Creek', 'Idaho', file_ext='tif', taxonomy='geoimage',
model_name='isnobal', proc_date='2015-05-08')
# load expected json metadata file
expected = \
open('vwpy/test/data/expected_minimal_tif_watershed.json',
'r').read()
# check equality
assert generated.strip() == expected.strip(), \
show_string_diff(generated, expected)
# minimal watershed JSON with iSNOBAL binary
generated = make_watershed_metadata(
'vwpy/test/data/in.0010',
self.config, 'MODELRUNXX**A*','MODELRUNXX**A*', 'inputs',
'Dry Creek', 'Idaho', file_ext='bin', model_vars='I_lw,T_a,e_a,u,T_g,S_n',
model_name='isnobal', proc_date='2015-05-08')
expected = open('vwpy/test/data/expected_minimal_isno_watershed.json',
'r').read()
# check equality
assert generated.strip() == expected.strip(), \
show_string_diff(generated, expected)
# full watershed JSON with geotiff
xml = make_fgdc_metadata('vwpy/test/data/in.0010.I_lw.tif',
self.config, 'MODELRUNXX**AA*',
"2010-10-01", "2011-09-31",
proc_date="2015-05-07",
theme_key="watershed", row_count=170,
column_count=124, lat_res=2.5,
lon_res=2.5, map_units='m')
generated = make_watershed_metadata(
'vwpy/test/data/in.0010.I_lw.tif',
self.config, 'MODELRUNXX**A*','MODELRUNXX**A*', 'inputs',
'Dry Creek', 'Idaho', fgdc_metadata=xml,
orig_epsg=26911, epsg=4326, model_set_type='tif', model_vars='I_lw',
model_set_taxonomy='grid', start_datetime='2010-10-01 10:00:00',
end_datetime='2010-10-01 11:00:00', model_name='isnobal')
# load expected json metadata file
expected = open('vwpy/test/data/expected_full_tif_watershed.json',
'r').read()
# check equality
assert generated.strip() == expected.strip(), \
show_string_diff(generated, expected)
# full watershed JSON with iSNOBAL binary
xml = make_fgdc_metadata('vwpy/test/data/in.0010',
self.config, 'MODELRUNXX**AA*',
"2010-10-01", "2011-09-31",
proc_date="2015-05-07",
theme_key="watershed", row_count=170,
column_count=124, lat_res=2.5,
lon_res=2.5, map_units='m', file_ext='bin')
generated = make_watershed_metadata(
'vwpy/test/data/in.0010',
self.config, 'MODELRUNXX**A*','MODELRUNXX**A*', 'inputs',
'Dry Creek', 'Idaho', fgdc_metadata=xml,
start_datetime='2010-01-01 10:00:00', end_datetime='2010-01-01 11:00:00',
orig_epsg=26911, epsg=4326, model_set_type='binary',
file_ext='bin', model_vars='I_lw,T_a,e_a,u,T_g,S_n',
model_name='isnobal')
expected = open('vwpy/test/data/expected_full_isno_watershed.json',
'r').read()
# check equality
assert generated.strip() == expected.strip(), \
show_string_diff(generated, expected)
class TestFGDCMetadata(unittest.TestCase):
""" Test individual and sets of XML FGDC-standard metadata are being
properly generated and uploaded to the Virtual Watershed
"""
def setUp(self):
""" initialize the class with some appropriate entry
metadata from file
"""
self.config = _get_config('vwpy/test/test.conf')
self.modelRunUUID = '09079630-5ef8-11e4-9803-0800200c9a66'
self.dataFile = 'vwpy/test/data/in.0000'
def testCorrectMetadatum(self):
""" Test that a single metadata JSON string is properly built (FGDC)"""
cfg = self.config
generated = make_fgdc_metadata('vwpy/test/data/in.0000',
cfg, 'MODELRUNXX**AA*', "2010-10-01",
"2011-09-31", proc_date="2015-05-07",
file_ext='ipw')
expected = open('vwpy/test/data/expected_minimal_fgdc.xml',
'r').read()
assert generated.strip() == expected.strip(), \
show_string_diff(generated, expected)
generated = \
make_fgdc_metadata('vwpy/test/data/in.0010.I_lw.tif',
cfg, 'MODELRUNXX**AA*',
"2010-10-01", "2011-09-31",
proc_date="2015-05-07",
theme_key="watershed", row_count=170,
column_count=124, lat_res=2.5,
lon_res=2.5, map_units='m')
expected = open('vwpy/test/data/expected_full_fgdc.xml',
'r').read()
assert generated.strip() == expected.strip(), \
show_string_diff(generated, expected)
class TestVWClient(unittest.TestCase):
""" Test the functionality of the Virtual Watershed client """
def setUp(self):
# clean up pre-existing unittest model runs
modelruns = VW_CLIENT.modelrun_search()
unittest_uuids = [r['Model Run UUID'] for r in modelruns.records
if 'unittest' in r['Model Run Name']]
for u in unittest_uuids:
s = VW_CLIENT.delete_modelrun(u)
print "pre-test cleanup success on %s: %s" % (u, str(s))
self.config = _get_config('vwpy/test/test.conf')
self.kwargs = {'keywords': 'Snow,iSNOBAL,wind',
'researcher_name': self.config['Researcher']['researcher_name'],
'description': 'unittest',
'model_run_name': 'unittest' + str(uuid4())}
self.UUID = VW_CLIENT.initialize_modelrun(**self.kwargs)
self.parent_uuid = self.UUID
VW_CLIENT.upload(self.UUID, 'vwpy/test/data/in.0000')
fgdc_md = make_fgdc_metadata('vwpy/test/data/in.0000',
self.config, self.UUID, "2010-10-01 00:00:00", "2010-10-01 01:00:00")
wmd_from_file = metadata_from_file('vwpy/test/data/in.0000',
self.UUID, self.UUID, 'unittest for download', 'Dry Creek', 'Idaho',
start_datetime="2010-10-01 00:00:00",
end_datetime="2010-10-01 01:00:00",
fgdc_metadata=fgdc_md, model_set_type='grid', file_ext='bin',
taxonomy='geoimage', model_set_taxonomy='grid',
model_name='isnobal', epsg=4326, orig_epsg=26911)
res = VW_CLIENT.insert_metadata(wmd_from_file)
time.sleep(1)
def test_initialize_modelrun(self):
"""
Test that a new model_run_uuid corresponding to new model run is properly initialized
"""
kwargs = {'keywords': 'Snow,iSNOBAL,wind',
'researcher_name': 'Matthew Turner',
'description': 'model run db testing',
'model_run_name': 'initialize unittest ' + str(uuid4())}
new_uuid = \
VW_CLIENT.initialize_modelrun(**kwargs)
result = VW_CLIENT.dataset_search(model_run_uuid=new_uuid)
assert result.total == 0, \
'Result does not exist?? result.total = %d' % result.total
@raises(HTTPError)
def test_duplicate_error(self):
"""
If the user tries to init a new model run with a previously used name, catch HTTPError
"""
keywords = 'Snow,iSNOBAL,wind'
description = 'model run db testing'
model_run_name = 'dup_test ' + str(uuid4())
uuid = VW_CLIENT.initialize_modelrun(keywords=keywords,
description=description,
model_run_name=model_run_name,
researcher_name=self.config['Researcher']['researcher_name'])
print "first inserted successfully"
# TODO get watershed guys to make researcher, model run name be PK
# at that point, this test will fail, but re-inserting Bill's
# fake submission will throw
VW_CLIENT.initialize_modelrun(keywords=keywords,
researcher_name=self.config['Researcher']['researcher_name'],
description=description,
model_run_name=model_run_name)
VW_CLIENT.delete_modelrun(uuid)
@raises(HTTPError)
def test_authFail(self):
""" Test that failed authorization is correctly caught """
vw_host = self.config['Connection']['watershed_url']
VWClient(vw_host, 'fake_user', 'fake_passwd')
def test_insert(self):
""" VW Client properly inserts data """
kwargs = {'keywords': 'Snow,iSNOBAL,wind',
'researcher_name': self.config['Researcher']['researcher_name'],
'description': 'unittest',
'model_run_name': 'unittest' + str(uuid4())}
UUID = \
VW_CLIENT.initialize_modelrun(**kwargs)
VW_CLIENT.upload(UUID, 'vwpy/test/data/in.0000')
dataFile = 'vwpy/test/data/in.0000'
fgdcXML = \
make_fgdc_metadata(dataFile, self.config, UUID,
"2010-10-01 00:00:00", "2010-10-01 01:00:00")
watershedJSON = \
make_watershed_metadata(dataFile, self.config, UUID,
UUID, 'inputs', 'Dry Creek', 'Idaho',
description='Description of the data',
start_datetime='2010-01-01 10:00:00',
end_datetime='2010-01-01 11:00:00', orig_epsg=26911, epsg=4326,
model_set_type='binary', file_ext='bin',
model_set_taxonomy='grid', taxonomy='geoimage',
model_vars='I_lw,T_a,e_a,u,T_g,S_n', model_name='isnobal')
insert_result = VW_CLIENT.insert_metadata(watershedJSON)
time.sleep(1)
vwTestUUIDEntries = VW_CLIENT.dataset_search(model_run_uuid=UUID)
assert vwTestUUIDEntries.records,\
'No VW Entries corresponding to the test UUID'
def test_insertFail(self):
"VW Client passes along correct status code on failed insert"
response = VW_CLIENT.insert_metadata('{"metadata": {"xml": "mo garbage"}}')
assert response.status_code == 500
def test_upload(self):
""" VW Client properly uploads data """
# fetch the file from the url we know from the VW file storage pattern
results = \
VW_CLIENT.dataset_search(model_run_uuid=self.UUID, limit=1)
url = results.records[0]['downloads'][0]['bin']
outfile = "vwpy/test/data/back_in.0000"
if os.path.isfile(outfile):
os.remove(outfile)
VW_CLIENT.download(url, outfile)
# check that the file now exists in the file system as expected
assert os.path.isfile(outfile)
os.remove(outfile)
# now do the same for netcdf
nc_file = 'vwpy/test/data/flat_sample.nc'
VW_CLIENT.upload(self.UUID, nc_file)
wmd_from_file = metadata_from_file('flat_sample.nc', self.UUID, self.UUID,
'testing upload/download of netcdf', 'Dry Creek', 'Idaho',
model_name='isnobal', model_set_type='grid', model_set='inputs',
model_set_taxonomy='grid', taxonomy='geoimage',
file_ext='nc', orig_epsg=26911, epsg=4326)
VW_CLIENT.insert_metadata(wmd_from_file)
time.sleep(1)
nc_url = [r['downloads'][0]['nc']
for r in VW_CLIENT.dataset_search(model_run_uuid=self.UUID).records
if r['name'].split('.')[-1] == 'nc'][0]
outfile = "vwpy/test/data/back_in.nc"
if os.path.isfile(outfile):
os.remove(outfile)
VW_CLIENT.download(nc_url, outfile)
# check that the file now exists in the file system as expected
assert os.path.isfile(outfile)
os.remove(outfile)
def test_swift_upload(self):
""" VW Client properly uploads data using the swift client"""
# now do the same for netcdf
nc_file = 'vwpy/test/data/flat_sample_for_swift.nc'
res = VW_CLIENT.swift_upload(self.UUID, nc_file)
wmd_from_file = metadata_from_file(nc_file, self.UUID, self.UUID,
'testing upload/download of netcdf', 'Dry Creek', 'Idaho',
model_name='isnobal', model_set_type='grid', model_set='inputs',
model_set_taxonomy='grid', taxonomy='geoimage',
file_ext='nc', orig_epsg=26911, epsg=4326)
VW_CLIENT.insert_metadata(wmd_from_file)
time.sleep(1)
nc_url = [r['downloads'][0]['nc']
for r in VW_CLIENT.dataset_search(model_run_uuid=self.UUID).records
if r['name'].split('.')[-1] == 'nc'][0]
outfile = "vwpy/test/data/back_in.nc"
if os.path.isfile(outfile):
os.remove(outfile)
VW_CLIENT.download(nc_url, outfile)
# check that the file now exists in the file system as expected
assert os.path.isfile(outfile)
os.remove(outfile)
def test_download(self):
"""
VW Client properly downloads data
"""
result = \
VW_CLIENT.dataset_search(model_run_uuid=self.UUID, limit=1)
r0 = result.records[0]
url = r0['downloads'][0]['bin']
outfile = "vwpy/test/data/test_dl.file"
if os.path.isfile(outfile):
os.remove(outfile)
VW_CLIENT.download(url, outfile)
assert os.path.isfile(outfile)
os.remove(outfile)
@raises(AssertionError)
def test_downloadFail(self):
""" VW Client throws error on failed download """
url = "http://httpbin.org/status/404"
VW_CLIENT.download(url, "this won't ever exist")
def test_watershed_connection(self):
"""
Test watershed functions operating on an IPW instance or as a static method
"""
# load expected json metadata file
expected = open('vwpy/test/data/expected_ipw_metadata.json',
'r').read()
description = 'Testing metadata!'
# TODO this gets tests passing; standardize uuids in setUp on nxt rfctr
parent_uuid = '373ae181-a0b2-4998-ba32-e27da190f6dd'
uuid = '09079630-5ef8-11e4-9803-0800200c9a66'
generated = metadata_from_file('vwpy/test/data/in.0000',
parent_uuid,
uuid,
description, 'Dry Creek', 'Idaho',
model_name='isnobal',
file_ext='bin',
config_file='vwpy/test/test.conf',
proc_date='2015-07-14')
# check equality
assert generated
assert expected
assert generated.strip() == expected.strip(), show_string_diff(generated, expected)
def test_metadata_from_file(self):
"""
Test that metadata is properly generated a .tif file
"""
# some values we're using for testing
parent_uuid = '373ae181-a0b2-4998-ba32-e27da190f6dd'
uuid = '09079630-5ef8-11e4-9803-0800200c9a66'
# .tif
generated = metadata_from_file(
os.path.dirname(__file__) + '/data/in.0008.I_lw.tif',
parent_uuid, uuid, 'Testing metadata!', 'Dry Creek', 'Idaho',
config_file='vwpy/test/test.conf', model_vars='I_lw',
proc_date="2015-05-12")
expected = open('vwpy/test/data/expected_tif.json', 'r').read()
assert generated.strip() == expected.strip(), \
show_string_diff(generated, expected)
# now assume we have resampled to 3-day intervals
dt = pd.Timedelta('3 days')
generated = metadata_from_file(
os.path.dirname(__file__) + '/data/in.0008.I_lw.tif',
parent_uuid, uuid, 'Testing metadata!', 'Dry Creek', 'Idaho',
config_file='vwpy/test/test.conf', dt=dt,
model_vars='melt', proc_date="2015-05-12")
expected = open('vwpy/test/data/expected_tif_nonhourdt.json',
'r').read()
assert generated.strip() == expected.strip(), \
show_string_diff(generated, expected)
def tearDown(self):
# clean up pre-existing unittest model runs
modelruns = VW_CLIENT.modelrun_search()
unittest_uuids = [r['Model Run UUID'] for r in modelruns.records
if 'unittest' in r['Model Run Name']]
for u in unittest_uuids:
s = VW_CLIENT.delete_modelrun(u)
print "post-test cleanup success on %s: %s" % (u, str(s))
dup_test_uuids = [r['Model Run UUID'] for r in modelruns.records
if 'dup_test' in r['Model Run Name']]
for u in dup_test_uuids:
s = VW_CLIENT.delete_modelrun(u)
print "post-test cleanup success on %s: %s" % (u, str(s))
def _gen_kw_args():
return {'keywords': 'Snow,iSNOBAL,wind',
'description': 'unittest',
'model_run_name': 'unittest' + str(uuid4())}
|
|
from __future__ import unicode_literals
import os
import base64
import datetime
import hashlib
import copy
import itertools
import codecs
import random
import string
import six
from bisect import insort
from moto.core import BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime
from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall, MissingKey, \
InvalidNotificationDestination, MalformedXML, InvalidStorageClass
from .utils import clean_key_name, _VersionedKeyStore
UPLOAD_ID_BYTES = 43
UPLOAD_PART_MIN_SIZE = 5242880
STORAGE_CLASS = ["STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA"]
class FakeDeleteMarker(BaseModel):
def __init__(self, key):
self.key = key
self._version_id = key.version_id + 1
@property
def version_id(self):
return self._version_id
class FakeKey(BaseModel):
def __init__(self, name, value, storage="STANDARD", etag=None, is_versioned=False, version_id=0):
self.name = name
self.value = value
self.last_modified = datetime.datetime.utcnow()
self.acl = get_canned_acl('private')
self.website_redirect_location = None
self._storage_class = storage if storage else "STANDARD"
self._metadata = {}
self._expiry = None
self._etag = etag
self._version_id = version_id
self._is_versioned = is_versioned
self._tagging = FakeTagging()
@property
def version_id(self):
return self._version_id
def copy(self, new_name=None):
r = copy.deepcopy(self)
if new_name is not None:
r.name = new_name
return r
def set_metadata(self, metadata, replace=False):
if replace:
self._metadata = {}
self._metadata.update(metadata)
def set_tagging(self, tagging):
self._tagging = tagging
def set_storage_class(self, storage):
if storage is not None and storage not in STORAGE_CLASS:
raise InvalidStorageClass(storage=storage)
self._storage_class = storage
def set_acl(self, acl):
self.acl = acl
def append_to_value(self, value):
self.value += value
self.last_modified = datetime.datetime.utcnow()
self._etag = None # must recalculate etag
if self._is_versioned:
self._version_id += 1
else:
self._is_versioned = 0
def restore(self, days):
self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days)
def increment_version(self):
self._version_id += 1
@property
def etag(self):
if self._etag is None:
value_md5 = hashlib.md5()
if isinstance(self.value, six.text_type):
value = self.value.encode("utf-8")
else:
value = self.value
value_md5.update(value)
self._etag = value_md5.hexdigest()
return '"{0}"'.format(self._etag)
@property
def last_modified_ISO8601(self):
return iso_8601_datetime_with_milliseconds(self.last_modified)
@property
def last_modified_RFC1123(self):
# Different datetime formats depending on how the key is obtained
# https://github.com/boto/boto/issues/466
return rfc_1123_datetime(self.last_modified)
@property
def metadata(self):
return self._metadata
@property
def tagging(self):
return self._tagging
@property
def response_dict(self):
res = {
'ETag': self.etag,
'last-modified': self.last_modified_RFC1123,
'content-length': str(len(self.value)),
}
if self._storage_class != 'STANDARD':
res['x-amz-storage-class'] = self._storage_class
if self._expiry is not None:
rhdr = 'ongoing-request="false", expiry-date="{0}"'
res['x-amz-restore'] = rhdr.format(self.expiry_date)
if self._is_versioned:
res['x-amz-version-id'] = str(self.version_id)
if self.website_redirect_location:
res['x-amz-website-redirect-location'] = self.website_redirect_location
return res
@property
def size(self):
return len(self.value)
@property
def storage_class(self):
return self._storage_class
@property
def expiry_date(self):
if self._expiry is not None:
return self._expiry.strftime("%a, %d %b %Y %H:%M:%S GMT")
class FakeMultipart(BaseModel):
def __init__(self, key_name, metadata):
self.key_name = key_name
self.metadata = metadata
self.parts = {}
self.partlist = [] # ordered list of part ID's
rand_b64 = base64.b64encode(os.urandom(UPLOAD_ID_BYTES))
self.id = rand_b64.decode('utf-8').replace('=', '').replace('+', '')
def complete(self, body):
decode_hex = codecs.getdecoder("hex_codec")
total = bytearray()
md5s = bytearray()
last = None
count = 0
for pn, etag in body:
part = self.parts.get(pn)
part_etag = None
if part is not None:
part_etag = part.etag.replace('"', '')
etag = etag.replace('"', '')
if part is None or part_etag != etag:
raise InvalidPart()
if last is not None and len(last.value) < UPLOAD_PART_MIN_SIZE:
raise EntityTooSmall()
md5s.extend(decode_hex(part_etag)[0])
total.extend(part.value)
last = part
count += 1
etag = hashlib.md5()
etag.update(bytes(md5s))
return total, "{0}-{1}".format(etag.hexdigest(), count)
def set_part(self, part_id, value):
if part_id < 1:
return
key = FakeKey(part_id, value)
self.parts[part_id] = key
if part_id not in self.partlist:
insort(self.partlist, part_id)
return key
def list_parts(self):
for part_id in self.partlist:
yield self.parts[part_id]
class FakeGrantee(BaseModel):
def __init__(self, id='', uri='', display_name=''):
self.id = id
self.uri = uri
self.display_name = display_name
def __eq__(self, other):
if not isinstance(other, FakeGrantee):
return False
return self.id == other.id and self.uri == other.uri and self.display_name == other.display_name
@property
def type(self):
return 'Group' if self.uri else 'CanonicalUser'
def __repr__(self):
return "FakeGrantee(display_name: '{}', id: '{}', uri: '{}')".format(self.display_name, self.id, self.uri)
ALL_USERS_GRANTEE = FakeGrantee(
uri='http://acs.amazonaws.com/groups/global/AllUsers')
AUTHENTICATED_USERS_GRANTEE = FakeGrantee(
uri='http://acs.amazonaws.com/groups/global/AuthenticatedUsers')
LOG_DELIVERY_GRANTEE = FakeGrantee(
uri='http://acs.amazonaws.com/groups/s3/LogDelivery')
PERMISSION_FULL_CONTROL = 'FULL_CONTROL'
PERMISSION_WRITE = 'WRITE'
PERMISSION_READ = 'READ'
PERMISSION_WRITE_ACP = 'WRITE_ACP'
PERMISSION_READ_ACP = 'READ_ACP'
class FakeGrant(BaseModel):
def __init__(self, grantees, permissions):
self.grantees = grantees
self.permissions = permissions
def __repr__(self):
return "FakeGrant(grantees: {}, permissions: {})".format(self.grantees, self.permissions)
class FakeAcl(BaseModel):
def __init__(self, grants=[]):
self.grants = grants
@property
def public_read(self):
for grant in self.grants:
if ALL_USERS_GRANTEE in grant.grantees:
if PERMISSION_READ in grant.permissions:
return True
if PERMISSION_FULL_CONTROL in grant.permissions:
return True
return False
def __repr__(self):
return "FakeAcl(grants: {})".format(self.grants)
def get_canned_acl(acl):
owner_grantee = FakeGrantee(
id='75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a')
grants = [FakeGrant([owner_grantee], [PERMISSION_FULL_CONTROL])]
if acl == 'private':
pass # no other permissions
elif acl == 'public-read':
grants.append(FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ]))
elif acl == 'public-read-write':
grants.append(FakeGrant([ALL_USERS_GRANTEE], [
PERMISSION_READ, PERMISSION_WRITE]))
elif acl == 'authenticated-read':
grants.append(
FakeGrant([AUTHENTICATED_USERS_GRANTEE], [PERMISSION_READ]))
elif acl == 'bucket-owner-read':
pass # TODO: bucket owner ACL
elif acl == 'bucket-owner-full-control':
pass # TODO: bucket owner ACL
elif acl == 'aws-exec-read':
pass # TODO: bucket owner, EC2 Read
elif acl == 'log-delivery-write':
grants.append(FakeGrant([LOG_DELIVERY_GRANTEE], [
PERMISSION_READ_ACP, PERMISSION_WRITE]))
else:
assert False, 'Unknown canned acl: %s' % (acl,)
return FakeAcl(grants=grants)
class FakeTagging(BaseModel):
def __init__(self, tag_set=None):
self.tag_set = tag_set or FakeTagSet()
class FakeTagSet(BaseModel):
def __init__(self, tags=None):
self.tags = tags or []
class FakeTag(BaseModel):
def __init__(self, key, value=None):
self.key = key
self.value = value
class LifecycleFilter(BaseModel):
def __init__(self, prefix=None, tag=None, and_filter=None):
self.prefix = prefix or ''
self.tag = tag
self.and_filter = and_filter
class LifecycleAndFilter(BaseModel):
def __init__(self, prefix=None, tags=None):
self.prefix = prefix or ''
self.tags = tags
class LifecycleRule(BaseModel):
def __init__(self, id=None, prefix=None, lc_filter=None, status=None, expiration_days=None,
expiration_date=None, transition_days=None, expired_object_delete_marker=None,
transition_date=None, storage_class=None):
self.id = id
self.prefix = prefix
self.filter = lc_filter
self.status = status
self.expiration_days = expiration_days
self.expiration_date = expiration_date
self.transition_days = transition_days
self.transition_date = transition_date
self.expired_object_delete_marker = expired_object_delete_marker
self.storage_class = storage_class
class CorsRule(BaseModel):
def __init__(self, allowed_methods, allowed_origins, allowed_headers=None, expose_headers=None,
max_age_seconds=None):
self.allowed_methods = [allowed_methods] if isinstance(allowed_methods, six.string_types) else allowed_methods
self.allowed_origins = [allowed_origins] if isinstance(allowed_origins, six.string_types) else allowed_origins
self.allowed_headers = [allowed_headers] if isinstance(allowed_headers, six.string_types) else allowed_headers
self.exposed_headers = [expose_headers] if isinstance(expose_headers, six.string_types) else expose_headers
self.max_age_seconds = max_age_seconds
class Notification(BaseModel):
def __init__(self, arn, events, filters=None, id=None):
self.id = id if id else ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(50))
self.arn = arn
self.events = events
self.filters = filters if filters else {}
class NotificationConfiguration(BaseModel):
def __init__(self, topic=None, queue=None, cloud_function=None):
self.topic = [Notification(t["Topic"], t["Event"], filters=t.get("Filter"), id=t.get("Id")) for t in topic] \
if topic else []
self.queue = [Notification(q["Queue"], q["Event"], filters=q.get("Filter"), id=q.get("Id")) for q in queue] \
if queue else []
self.cloud_function = [Notification(c["CloudFunction"], c["Event"], filters=c.get("Filter"), id=c.get("Id"))
for c in cloud_function] if cloud_function else []
class FakeBucket(BaseModel):
def __init__(self, name, region_name):
self.name = name
self.region_name = region_name
self.keys = _VersionedKeyStore()
self.multiparts = {}
self.versioning_status = None
self.rules = []
self.policy = None
self.website_configuration = None
self.acl = get_canned_acl('private')
self.tags = FakeTagging()
self.cors = []
self.logging = {}
self.notification_configuration = None
@property
def location(self):
return self.region_name
@property
def is_versioned(self):
return self.versioning_status == 'Enabled'
def set_lifecycle(self, rules):
self.rules = []
for rule in rules:
expiration = rule.get('Expiration')
transition = rule.get('Transition')
eodm = None
if expiration and expiration.get("ExpiredObjectDeleteMarker") is not None:
# This cannot be set if Date or Days is set:
if expiration.get("Days") or expiration.get("Date"):
raise MalformedXML()
eodm = expiration["ExpiredObjectDeleteMarker"]
# Pull out the filter:
lc_filter = None
if rule.get("Filter"):
# Can't have both `Filter` and `Prefix` (need to check for the presence of the key):
try:
if rule["Prefix"] or not rule["Prefix"]:
raise MalformedXML()
except KeyError:
pass
and_filter = None
if rule["Filter"].get("And"):
and_tags = []
if rule["Filter"]["And"].get("Tag"):
if not isinstance(rule["Filter"]["And"]["Tag"], list):
rule["Filter"]["And"]["Tag"] = [rule["Filter"]["And"]["Tag"]]
for t in rule["Filter"]["And"]["Tag"]:
and_tags.append(FakeTag(t["Key"], t.get("Value", '')))
and_filter = LifecycleAndFilter(prefix=rule["Filter"]["And"]["Prefix"], tags=and_tags)
filter_tag = None
if rule["Filter"].get("Tag"):
filter_tag = FakeTag(rule["Filter"]["Tag"]["Key"], rule["Filter"]["Tag"].get("Value", ''))
lc_filter = LifecycleFilter(prefix=rule["Filter"]["Prefix"], tag=filter_tag, and_filter=and_filter)
self.rules.append(LifecycleRule(
id=rule.get('ID'),
prefix=rule.get('Prefix'),
lc_filter=lc_filter,
status=rule['Status'],
expiration_days=expiration.get('Days') if expiration else None,
expiration_date=expiration.get('Date') if expiration else None,
expired_object_delete_marker=eodm,
transition_days=transition.get('Days') if transition else None,
transition_date=transition.get('Date') if transition else None,
storage_class=transition[
'StorageClass'] if transition else None,
))
def delete_lifecycle(self):
self.rules = []
def set_cors(self, rules):
from moto.s3.exceptions import InvalidRequest, MalformedXML
self.cors = []
if len(rules) > 100:
raise MalformedXML()
for rule in rules:
assert isinstance(rule["AllowedMethod"], list) or isinstance(rule["AllowedMethod"], six.string_types)
assert isinstance(rule["AllowedOrigin"], list) or isinstance(rule["AllowedOrigin"], six.string_types)
assert isinstance(rule.get("AllowedHeader", []), list) or isinstance(rule.get("AllowedHeader", ""),
six.string_types)
assert isinstance(rule.get("ExposedHeader", []), list) or isinstance(rule.get("ExposedHeader", ""),
six.string_types)
assert isinstance(rule.get("MaxAgeSeconds", "0"), six.string_types)
if isinstance(rule["AllowedMethod"], six.string_types):
methods = [rule["AllowedMethod"]]
else:
methods = rule["AllowedMethod"]
for method in methods:
if method not in ["GET", "PUT", "HEAD", "POST", "DELETE"]:
raise InvalidRequest(method)
self.cors.append(CorsRule(
rule["AllowedMethod"],
rule["AllowedOrigin"],
rule.get("AllowedHeader"),
rule.get("ExposedHeader"),
rule.get("MaxAgeSecond")
))
def delete_cors(self):
self.cors = []
def set_tags(self, tagging):
self.tags = tagging
def delete_tags(self):
self.tags = FakeTagging()
@property
def tagging(self):
return self.tags
def set_logging(self, logging_config, bucket_backend):
if not logging_config:
self.logging = {}
return
from moto.s3.exceptions import InvalidTargetBucketForLogging, CrossLocationLoggingProhibitted
# Target bucket must exist in the same account (assuming all moto buckets are in the same account):
if not bucket_backend.buckets.get(logging_config["TargetBucket"]):
raise InvalidTargetBucketForLogging("The target bucket for logging does not exist.")
# Does the target bucket have the log-delivery WRITE and READ_ACP permissions?
write = read_acp = False
for grant in bucket_backend.buckets[logging_config["TargetBucket"]].acl.grants:
# Must be granted to: http://acs.amazonaws.com/groups/s3/LogDelivery
for grantee in grant.grantees:
if grantee.uri == "http://acs.amazonaws.com/groups/s3/LogDelivery":
if "WRITE" in grant.permissions or "FULL_CONTROL" in grant.permissions:
write = True
if "READ_ACP" in grant.permissions or "FULL_CONTROL" in grant.permissions:
read_acp = True
break
if not write or not read_acp:
raise InvalidTargetBucketForLogging("You must give the log-delivery group WRITE and READ_ACP"
" permissions to the target bucket")
# Buckets must also exist within the same region:
if bucket_backend.buckets[logging_config["TargetBucket"]].region_name != self.region_name:
raise CrossLocationLoggingProhibitted()
# Checks pass -- set the logging config:
self.logging = logging_config
def set_notification_configuration(self, notification_config):
if not notification_config:
self.notification_configuration = None
return
self.notification_configuration = NotificationConfiguration(
topic=notification_config.get("TopicConfiguration"),
queue=notification_config.get("QueueConfiguration"),
cloud_function=notification_config.get("CloudFunctionConfiguration")
)
# Validate that the region is correct:
for thing in ["topic", "queue", "cloud_function"]:
for t in getattr(self.notification_configuration, thing):
region = t.arn.split(":")[3]
if region != self.region_name:
raise InvalidNotificationDestination()
def set_website_configuration(self, website_configuration):
self.website_configuration = website_configuration
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'DomainName':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "DomainName" ]"')
elif attribute_name == 'WebsiteURL':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "WebsiteURL" ]"')
raise UnformattedGetAttTemplateException()
def set_acl(self, acl):
self.acl = acl
@property
def physical_resource_id(self):
return self.name
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name):
bucket = s3_backend.create_bucket(resource_name, region_name)
return bucket
class S3Backend(BaseBackend):
def __init__(self):
self.buckets = {}
def create_bucket(self, bucket_name, region_name):
if bucket_name in self.buckets:
raise BucketAlreadyExists(bucket=bucket_name)
new_bucket = FakeBucket(name=bucket_name, region_name=region_name)
self.buckets[bucket_name] = new_bucket
return new_bucket
def get_all_buckets(self):
return self.buckets.values()
def get_bucket(self, bucket_name):
try:
return self.buckets[bucket_name]
except KeyError:
raise MissingBucket(bucket=bucket_name)
def delete_bucket(self, bucket_name):
bucket = self.get_bucket(bucket_name)
if bucket.keys:
# Can't delete a bucket with keys
return False
else:
return self.buckets.pop(bucket_name)
def set_bucket_versioning(self, bucket_name, status):
self.get_bucket(bucket_name).versioning_status = status
def get_bucket_versioning(self, bucket_name):
return self.get_bucket(bucket_name).versioning_status
def get_bucket_latest_versions(self, bucket_name):
versions = self.get_bucket_versions(bucket_name)
maximum_version_per_key = {}
latest_versions = {}
for version in versions:
if isinstance(version, FakeDeleteMarker):
name = version.key.name
else:
name = version.name
version_id = version.version_id
maximum_version_per_key[name] = max(
version_id,
maximum_version_per_key.get(name, -1)
)
if version_id == maximum_version_per_key[name]:
latest_versions[name] = version_id
return latest_versions
def get_bucket_versions(self, bucket_name, delimiter=None,
encoding_type=None,
key_marker=None,
max_keys=None,
version_id_marker=None,
prefix=''):
bucket = self.get_bucket(bucket_name)
if any((delimiter, encoding_type, key_marker, version_id_marker)):
raise NotImplementedError(
"Called get_bucket_versions with some of delimiter, encoding_type, key_marker, version_id_marker")
return itertools.chain(*(l for key, l in bucket.keys.iterlists() if key.startswith(prefix)))
def get_bucket_policy(self, bucket_name):
return self.get_bucket(bucket_name).policy
def set_bucket_policy(self, bucket_name, policy):
self.get_bucket(bucket_name).policy = policy
def delete_bucket_policy(self, bucket_name, body):
bucket = self.get_bucket(bucket_name)
bucket.policy = None
def set_bucket_lifecycle(self, bucket_name, rules):
bucket = self.get_bucket(bucket_name)
bucket.set_lifecycle(rules)
def set_bucket_website_configuration(self, bucket_name, website_configuration):
bucket = self.get_bucket(bucket_name)
bucket.set_website_configuration(website_configuration)
def get_bucket_website_configuration(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.website_configuration
def set_key(self, bucket_name, key_name, value, storage=None, etag=None):
key_name = clean_key_name(key_name)
if storage is not None and storage not in STORAGE_CLASS:
raise InvalidStorageClass(storage=storage)
bucket = self.get_bucket(bucket_name)
old_key = bucket.keys.get(key_name, None)
if old_key is not None and bucket.is_versioned:
new_version_id = old_key._version_id + 1
else:
new_version_id = 0
new_key = FakeKey(
name=key_name,
value=value,
storage=storage,
etag=etag,
is_versioned=bucket.is_versioned,
version_id=new_version_id)
bucket.keys[key_name] = new_key
return new_key
def append_to_key(self, bucket_name, key_name, value):
key_name = clean_key_name(key_name)
key = self.get_key(bucket_name, key_name)
key.append_to_value(value)
return key
def get_key(self, bucket_name, key_name, version_id=None):
key_name = clean_key_name(key_name)
bucket = self.get_bucket(bucket_name)
key = None
if bucket:
if version_id is None:
if key_name in bucket.keys:
key = bucket.keys[key_name]
else:
for key_version in bucket.keys.getlist(key_name, default=[]):
if str(key_version.version_id) == str(version_id):
key = key_version
break
if isinstance(key, FakeKey):
return key
else:
return None
def set_key_tagging(self, bucket_name, key_name, tagging):
key = self.get_key(bucket_name, key_name)
if key is None:
raise MissingKey(key_name)
key.set_tagging(tagging)
return key
def put_bucket_tagging(self, bucket_name, tagging):
bucket = self.get_bucket(bucket_name)
bucket.set_tags(tagging)
def delete_bucket_tagging(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.delete_tags()
def put_bucket_cors(self, bucket_name, cors_rules):
bucket = self.get_bucket(bucket_name)
bucket.set_cors(cors_rules)
def put_bucket_logging(self, bucket_name, logging_config):
bucket = self.get_bucket(bucket_name)
bucket.set_logging(logging_config, self)
def delete_bucket_cors(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.delete_cors()
def put_bucket_notification_configuration(self, bucket_name, notification_config):
bucket = self.get_bucket(bucket_name)
bucket.set_notification_configuration(notification_config)
def initiate_multipart(self, bucket_name, key_name, metadata):
bucket = self.get_bucket(bucket_name)
new_multipart = FakeMultipart(key_name, metadata)
bucket.multiparts[new_multipart.id] = new_multipart
return new_multipart
def complete_multipart(self, bucket_name, multipart_id, body):
bucket = self.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
value, etag = multipart.complete(body)
if value is None:
return
del bucket.multiparts[multipart_id]
key = self.set_key(bucket_name, multipart.key_name, value, etag=etag)
key.set_metadata(multipart.metadata)
return key
def cancel_multipart(self, bucket_name, multipart_id):
bucket = self.get_bucket(bucket_name)
del bucket.multiparts[multipart_id]
def list_multipart(self, bucket_name, multipart_id):
bucket = self.get_bucket(bucket_name)
return list(bucket.multiparts[multipart_id].list_parts())
def get_all_multiparts(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.multiparts
def set_part(self, bucket_name, multipart_id, part_id, value):
bucket = self.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
return multipart.set_part(part_id, value)
def copy_part(self, dest_bucket_name, multipart_id, part_id,
src_bucket_name, src_key_name, start_byte, end_byte):
src_key_name = clean_key_name(src_key_name)
src_bucket = self.get_bucket(src_bucket_name)
dest_bucket = self.get_bucket(dest_bucket_name)
multipart = dest_bucket.multiparts[multipart_id]
src_value = src_bucket.keys[src_key_name].value
if start_byte is not None:
src_value = src_value[start_byte:end_byte + 1]
return multipart.set_part(part_id, src_value)
def prefix_query(self, bucket, prefix, delimiter):
key_results = set()
folder_results = set()
if prefix:
for key_name, key in bucket.keys.items():
if key_name.startswith(prefix):
key_without_prefix = key_name.replace(prefix, "", 1)
if delimiter and delimiter in key_without_prefix:
# If delimiter, we need to split out folder_results
key_without_delimiter = key_without_prefix.split(delimiter)[
0]
folder_results.add("{0}{1}{2}".format(
prefix, key_without_delimiter, delimiter))
else:
key_results.add(key)
else:
for key_name, key in bucket.keys.items():
if delimiter and delimiter in key_name:
# If delimiter, we need to split out folder_results
folder_results.add(key_name.split(
delimiter)[0] + delimiter)
else:
key_results.add(key)
key_results = filter(lambda key: not isinstance(key, FakeDeleteMarker), key_results)
key_results = sorted(key_results, key=lambda key: key.name)
folder_results = [folder_name for folder_name in sorted(
folder_results, key=lambda key: key)]
return key_results, folder_results
def _set_delete_marker(self, bucket_name, key_name):
bucket = self.get_bucket(bucket_name)
bucket.keys[key_name] = FakeDeleteMarker(
key=bucket.keys[key_name]
)
def delete_key(self, bucket_name, key_name, version_id=None):
key_name = clean_key_name(key_name)
bucket = self.get_bucket(bucket_name)
try:
if not bucket.is_versioned:
bucket.keys.pop(key_name)
else:
if version_id is None:
self._set_delete_marker(bucket_name, key_name)
else:
if key_name not in bucket.keys:
raise KeyError
bucket.keys.setlist(
key_name,
[
key
for key in bucket.keys.getlist(key_name)
if str(key.version_id) != str(version_id)
]
)
if not bucket.keys.getlist(key_name):
bucket.keys.pop(key_name)
return True
except KeyError:
return False
def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name,
dest_key_name, storage=None, acl=None, src_version_id=None):
src_key_name = clean_key_name(src_key_name)
dest_key_name = clean_key_name(dest_key_name)
dest_bucket = self.get_bucket(dest_bucket_name)
key = self.get_key(src_bucket_name, src_key_name,
version_id=src_version_id)
if dest_key_name != src_key_name:
key = key.copy(dest_key_name)
dest_bucket.keys[dest_key_name] = key
# By this point, the destination key must exist, or KeyError
if dest_bucket.is_versioned:
dest_bucket.keys[dest_key_name].increment_version()
if storage is not None:
key.set_storage_class(storage)
if acl is not None:
key.set_acl(acl)
def set_bucket_acl(self, bucket_name, acl):
bucket = self.get_bucket(bucket_name)
bucket.set_acl(acl)
def get_bucket_acl(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.acl
s3_backend = S3Backend()
|
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from cinderclient.v3 import client as cinder_client_v3
import mock
from requests_mock.contrib import fixture
import nova.conf
from nova import context
from nova import exception
from nova import test
from nova.volume import cinder
CONF = nova.conf.CONF
_image_metadata = {
'kernel_id': 'fake',
'ramdisk_id': 'fake'
}
_volume_id = "6edbc2f4-1507-44f8-ac0d-eed1d2608d38"
_instance_uuid = "f4fda93b-06e0-4743-8117-bc8bcecd651b"
_instance_uuid_2 = "f4fda93b-06e0-4743-8117-bc8bcecd651c"
_attachment_id = "3b4db356-253d-4fab-bfa0-e3626c0b8405"
_attachment_id_2 = "3b4db356-253d-4fab-bfa0-e3626c0b8406"
_device = "/dev/vdb"
_device_2 = "/dev/vdc"
_volume_attachment = \
[{"server_id": _instance_uuid,
"attachment_id": _attachment_id,
"host_name": "",
"volume_id": _volume_id,
"device": _device,
"id": _volume_id
}]
_volume_attachment_2 = _volume_attachment
_volume_attachment_2.append({"server_id": _instance_uuid_2,
"attachment_id": _attachment_id_2,
"host_name": "",
"volume_id": _volume_id,
"device": _device_2,
"id": _volume_id})
exp_volume_attachment = collections.OrderedDict()
exp_volume_attachment[_instance_uuid] = {'attachment_id': _attachment_id,
'mountpoint': _device}
exp_volume_attachment_2 = exp_volume_attachment
exp_volume_attachment_2[_instance_uuid_2] = {'attachment_id': _attachment_id_2,
'mountpoint': _device_2}
class BaseCinderTestCase(object):
def setUp(self):
super(BaseCinderTestCase, self).setUp()
cinder.reset_globals()
self.requests = self.useFixture(fixture.Fixture())
self.api = cinder.API()
self.context = context.RequestContext('username',
'project_id',
auth_token='token',
service_catalog=self.CATALOG)
def flags(self, *args, **kwargs):
super(BaseCinderTestCase, self).flags(*args, **kwargs)
cinder.reset_globals()
def create_client(self):
return cinder.cinderclient(self.context)
def test_context_with_catalog(self):
self.assertEqual(self.URL, self.create_client().client.get_endpoint())
def test_cinder_http_retries(self):
retries = 42
self.flags(http_retries=retries, group='cinder')
self.assertEqual(retries, self.create_client().client.connect_retries)
def test_cinder_api_insecure(self):
# The True/False negation is awkward, but better for the client
# to pass us insecure=True and we check verify_cert == False
self.flags(insecure=True, group='cinder')
self.assertFalse(self.create_client().client.session.verify)
def test_cinder_http_timeout(self):
timeout = 123
self.flags(timeout=timeout, group='cinder')
self.assertEqual(timeout, self.create_client().client.session.timeout)
def test_cinder_api_cacert_file(self):
cacert = "/etc/ssl/certs/ca-certificates.crt"
self.flags(cafile=cacert, group='cinder')
self.assertEqual(cacert, self.create_client().client.session.verify)
# NOTE(mriedem): This does not extend BaseCinderTestCase because Cinder v1 is
# no longer supported, this is just to test that trying to use v1 fails.
class CinderV1TestCase(test.NoDBTestCase):
@mock.patch('nova.volume.cinder.cinder_client.get_volume_api_from_url',
return_value='1')
def test_cinderclient_unsupported_v1(self, get_api_version):
"""Tests that we fail if trying to use Cinder v1."""
self.flags(catalog_info='volume:cinder:publicURL', group='cinder')
# setup mocks
get_endpoint = mock.Mock(
return_value='http://localhost:8776/v1/%(project_id)s')
fake_session = mock.Mock(get_endpoint=get_endpoint)
ctxt = context.get_context()
with mock.patch.object(cinder, '_SESSION', fake_session):
self.assertRaises(exception.UnsupportedCinderAPIVersion,
cinder.cinderclient, ctxt)
get_api_version.assert_called_once_with(get_endpoint.return_value)
# NOTE(mriedem): This does not extend BaseCinderTestCase because Cinder v2 is
# no longer supported, this is just to test that trying to use v2 fails.
class CinderV2TestCase(test.NoDBTestCase):
@mock.patch('nova.volume.cinder.cinder_client.get_volume_api_from_url',
return_value='2')
def test_cinderclient_unsupported_v2(self, get_api_version):
"""Tests that we fail if trying to use Cinder v2."""
self.flags(catalog_info='volumev2:cinderv2:publicURL', group='cinder')
# setup mocks
get_endpoint = mock.Mock(
return_value='http://localhost:8776/v2/%(project_id)s')
fake_session = mock.Mock(get_endpoint=get_endpoint)
ctxt = context.get_context()
with mock.patch.object(cinder, '_SESSION', fake_session):
self.assertRaises(exception.UnsupportedCinderAPIVersion,
cinder.cinderclient, ctxt)
get_api_version.assert_called_once_with(get_endpoint.return_value)
class CinderV3TestCase(BaseCinderTestCase, test.NoDBTestCase):
"""Test case for cinder volume v3 api."""
URL = "http://localhost:8776/v3/project_id"
CATALOG = [{
"type": "volumev3",
"name": "cinderv3",
"endpoints": [{"publicURL": URL}]
}]
def setUp(self):
super(CinderV3TestCase, self).setUp()
self.addCleanup(CONF.reset)
def create_client(self):
c = super(CinderV3TestCase, self).create_client()
self.assertIsInstance(c, cinder_client_v3.Client)
self.assertEqual('3.0', c.api_version.get_string())
return c
def stub_volume(self, **kwargs):
volume = {
'name': None,
'description': None,
"attachments": [],
"availability_zone": "cinderv2",
"created_at": "2013-08-10T00:00:00.000000",
"id": _volume_id,
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true",
"multiattach": "true"
}
volume.update(kwargs)
return volume
def test_cinder_endpoint_template(self):
endpoint = 'http://other_host:8776/v3/%(project_id)s'
self.flags(endpoint_template=endpoint, group='cinder')
self.assertEqual('http://other_host:8776/v3/project_id',
self.create_client().client.endpoint_override)
def test_get_non_existing_volume(self):
self.requests.get(self.URL + '/volumes/nonexisting',
status_code=404)
self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
'nonexisting')
def test_volume_with_image_metadata(self):
v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata)
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIn('volume_image_metadata', volume)
self.assertEqual(_image_metadata, volume['volume_image_metadata'])
def test_volume_without_attachment(self):
v = self.stub_volume(id='1234')
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIsNone(volume.get('attachments'))
def test_volume_with_one_attachment(self):
v = self.stub_volume(id='1234', attachments=_volume_attachment)
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIn('attachments', volume)
self.assertEqual(exp_volume_attachment, volume['attachments'])
def test_volume_with_two_attachments(self):
v = self.stub_volume(id='1234', attachments=_volume_attachment_2)
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIn('attachments', volume)
self.assertEqual(exp_volume_attachment_2, volume['attachments'])
def test_create_client_with_no_service_name(self):
"""Tests that service_name is not required and not passed through
when constructing the cinder client Client object if it's not
configured.
"""
self.flags(catalog_info='volumev3::public', group='cinder')
with mock.patch('cinderclient.client.Client') as mock_client:
# We don't use self.create_client() because that has additional
# assertions that we don't care about in this test. We just care
# about how the client is created, not what is returned.
cinder.cinderclient(self.context)
self.assertEqual(1, len(mock_client.call_args_list))
call_kwargs = mock_client.call_args_list[0][1]
# Make sure service_type and interface are passed through.
self.assertEqual('volumev3', call_kwargs['service_type'])
self.assertEqual('public', call_kwargs['interface'])
# And service_name is not passed through.
self.assertNotIn('service_name', call_kwargs)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import re
import time
from math import cos, radians
import requests
from bs4 import BeautifulSoup
from io import BytesIO, StringIO
import astropy.units as u
import astropy.coordinates as coord
import astropy.io.votable as votable
from ..query import QueryWithLogin
from ..exceptions import InvalidQueryError, TimeoutError, NoResultsWarning
from ..utils import commons
from ..exceptions import TableParseError
__all__ = ['BaseWFAUClass', 'clean_catalog']
class BaseWFAUClass(QueryWithLogin):
"""
The BaseWFAUQuery class. This is intended to be inherited by other classes
that implement specific interfaces to Wide-Field Astronomy Unit
(http://www.roe.ac.uk/ifa/wfau/) archives
"""
BASE_URL = ""
LOGIN_URL = BASE_URL + "DBLogin"
IMAGE_URL = BASE_URL + "GetImage"
ARCHIVE_URL = BASE_URL + "ImageList"
REGION_URL = BASE_URL + "WSASQL"
CROSSID_URL = BASE_URL + "CrossID"
TIMEOUT = ""
def __init__(self, username=None, password=None, community=None,
database='', programme_id='all'):
"""
The BaseWFAUClass __init__ is meant to be overwritten
"""
super(BaseWFAUClass, self).__init__()
self.database = database
self.programme_id = programme_id
self.session = None
if username is None or password is None or community is None:
pass
else:
self.login(username, password, community)
def _login(self, username, password, community):
"""
Login to non-public data as a known user.
Parameters
----------
username : str
password : str
community : str
"""
# Construct cookie holder, URL opener, and retrieve login page
self.session = requests.session()
credentials = {'user': username, 'passwd': password,
'community': ' ', 'community2': community}
response = self.session.post(self.LOGIN_URL, data=credentials)
if not response.ok:
self.session = None
response.raise_for_status()
if 'FAILED to log in' in response.text:
self.session = None
raise Exception("Unable to log in with your given credentials.\n"
"Please try again.\n Note that you can continue "
"to access public data without logging in.\n")
def logged_in(self):
"""
Determine whether currently logged in.
"""
if self.session is None:
return False
for cookie in self.session.cookies:
if cookie.is_expired():
return False
return True
def _args_to_payload(self, *args, **kwargs):
request_payload = {}
request_payload['database'] = kwargs.get('database', self.database)
programme_id = kwargs.get('programme_id', self.programme_id)
request_payload['programmeID'] = self._verify_programme_id(
programme_id, query_type=kwargs['query_type'])
sys = self._parse_system(kwargs.get('system'))
request_payload['sys'] = sys
if sys == 'J':
C = commons.parse_coordinates(args[0]).transform_to(coord.ICRS)
request_payload['ra'] = C.ra.degree
request_payload['dec'] = C.dec.degree
elif sys == 'G':
C = commons.parse_coordinates(args[0]).transform_to(coord.Galactic)
request_payload['ra'] = C.l.degree
request_payload['dec'] = C.b.degree
return request_payload
def _verify_programme_id(self, pid, query_type='catalog'):
"""
Verify the programme ID is valid for the query being executed.
Parameters
----------
pid : int or str
The programme ID, either an integer (i.e., the # that will get passed
to the URL) or a string using the three-letter acronym for the
programme or its long name
Returns
-------
pid : int
Returns the integer version of the programme ID
Raises
------
ValueError
If the pid is 'all' and the query type is a catalog. You can query
all surveys for images, but not all catalogs.
"""
if pid == 'all' and query_type == 'image':
return 'all'
elif pid == 'all' and query_type == 'catalog':
raise ValueError(
"Cannot query all catalogs at once. Valid catalogs are: {0}.\n"
"Change programmeID to one of these."
.format(",".join(self.programmes_short.keys())))
elif pid in self.programmes_long:
return self.programmes_long[pid]
elif pid in self.programmes_short:
return self.programmes_short[pid]
elif query_type != 'image':
raise ValueError("programme_id {0} not recognized".format(pid))
def _parse_system(self, system):
if system is None:
return 'J'
elif system.lower() in ('g', 'gal', 'galactic'):
return 'G'
elif system.lower() in ('j', 'j2000', 'celestical', 'radec'):
return 'J'
def get_images(self, coordinates, waveband='all', frame_type='stack',
image_width=1 * u.arcmin, image_height=None, radius=None,
database=None, programme_id=None,
verbose=True, get_query_payload=False,
show_progress=True):
"""
Get an image around a target/ coordinates from a WFAU catalog.
Parameters
----------
coordinates : str or `astropy.coordinates` object
The target around which to search. It may be specified as a
string in which case it is resolved using online services or as
the appropriate `astropy.coordinates` object. ICRS coordinates
may also be entered as strings as specified in the
`astropy.coordinates` module.
waveband : str
The color filter to download. Must be one of ``'all'``, ``'J'``,
``'H'``, ``'K'``, ``'H2'``, ``'Z'``, ``'Y'``, ``'Br'``].
frame_type : str
The type of image. Must be one of ``'stack'``, ``'normal'``,
``'interleave'``, ``'deep_stack'``, ``'confidence'``,
``'difference'``, ``'leavstack'``, ``'all'``]
image_width : str or `~astropy.units.Quantity` object, optional
The image size (along X). Cannot exceed 15 arcmin. If missing,
defaults to 1 arcmin.
image_height : str or `~astropy.units.Quantity` object, optional
The image size (along Y). Cannot exceed 90 arcmin. If missing,
same as image_width.
radius : str or `~astropy.units.Quantity` object, optional
The string must be parsable by `~astropy.coordinates.Angle`. The
appropriate `~astropy.units.Quantity` object from `astropy.units`
may also be used. When missing only image around the given position
rather than multi-frames are retrieved.
programme_id : str
The survey or programme in which to search for.
database : str
The WFAU database to use.
verbose : bool
Defaults to `True`. When `True` prints additional messages.
get_query_payload : bool, optional
If `True` then returns the dictionary sent as the HTTP request.
Defaults to `False`.
Returns
-------
list : A list of `~astropy.io.fits.HDUList` objects.
"""
readable_objs = self.get_images_async(
coordinates, waveband=waveband, frame_type=frame_type,
image_width=image_width, image_height=image_height,
database=database, programme_id=programme_id, radius=radius,
verbose=verbose, get_query_payload=get_query_payload,
show_progress=show_progress)
if get_query_payload:
return readable_objs
return [obj.get_fits() for obj in readable_objs]
def get_images_async(self, coordinates, waveband='all', frame_type='stack',
image_width=1 * u.arcmin, image_height=None,
radius=None, database=None,
programme_id=None, verbose=True,
get_query_payload=False,
show_progress=True):
"""
Serves the same purpose as
`~astroquery.wfau.BaseWFAUClass.get_images` but returns a list of
file handlers to remote files.
Parameters
----------
coordinates : str or `astropy.coordinates` object
The target around which to search. It may be specified as a
string in which case it is resolved using online services or as
the appropriate `astropy.coordinates` object. ICRS coordinates
may also be entered as strings as specified in the
`astropy.coordinates` module.
waveband : str
The color filter to download. Must be one of ``'all'``, ``'J'``,
``'H'``, ``'K'``, ``'H2'``, ``'Z'``, ``'Y'``, ``'Br'``].
frame_type : str
The type of image. Must be one of ``'stack'``, ``'normal'``,
``'interleave'``, ``'deep_stack'``, ``'confidence'``,
``'difference'``, ``'leavstack'``, ``'all'``]
image_width : str or `~astropy.units.Quantity` object, optional
The image size (along X). Cannot exceed 15 arcmin. If missing,
defaults to 1 arcmin.
image_height : str or `~astropy.units.Quantity` object, optional
The image size (along Y). Cannot exceed 90 arcmin. If missing,
same as image_width.
radius : str or `~astropy.units.Quantity` object, optional
The string must be parsable by `~astropy.coordinates.Angle`. The
appropriate `~astropy.units.Quantity` object from `astropy.units`
may also be used. When missing only image around the given position
rather than multi-frames are retrieved.
programme_id : str
The survey or programme in which to search for. See
`list_catalogs`.
database : str
The WFAU database to use.
verbose : bool
Defaults to `True`. When `True` prints additional messages.
get_query_payload : bool, optional
If `True` then returns the dictionary sent as the HTTP request.
Defaults to `False`.
Returns
-------
list : list
A list of context-managers that yield readable file-like objects.
"""
if database is None:
database = self.database
if programme_id is None:
programme_id = self.programme_id
image_urls = self.get_image_list(coordinates, waveband=waveband,
frame_type=frame_type,
image_width=image_width,
image_height=image_height,
database=database, radius=radius,
programme_id=programme_id,
get_query_payload=get_query_payload)
if get_query_payload:
return image_urls
if verbose:
print("Found {num} targets".format(num=len(image_urls)))
return [commons.FileContainer(url, encoding='binary',
remote_timeout=self.TIMEOUT,
show_progress=show_progress)
for url in image_urls]
def get_image_list(self, coordinates, waveband='all', frame_type='stack',
image_width=1 * u.arcmin, image_height=None,
radius=None, database=None,
programme_id=None, get_query_payload=False):
"""
Function that returns a list of urls from which to download the FITS
images.
Parameters
----------
coordinates : str or `astropy.coordinates` object
The target around which to search. It may be specified as a
string in which case it is resolved using online services or as
the appropriate `astropy.coordinates` object. ICRS coordinates
may also be entered as strings as specified in the
`astropy.coordinates` module.
waveband : str
The color filter to download. Must be one of ``'all'``, ``'J'``,
``'H'``, ``'K'``, ``'H2'``, ``'Z'``, ``'Y'``, ``'Br'``].
frame_type : str
The type of image. Must be one of ``'stack'``, ``'normal'``,
``'interleave'``, ``'deep_stack'``, ``'confidence'``,
``'difference'``, ``'leavstack'``, ``'all'``]
image_width : str or `~astropy.units.Quantity` object, optional
The image size (along X). Cannot exceed 15 arcmin. If missing,
defaults to 1 arcmin.
image_height : str or `~astropy.units.Quantity` object, optional
The image size (along Y). Cannot exceed 90 arcmin. If missing,
same as image_width.
radius : str or `~astropy.units.Quantity` object, optional
The string must be parsable by `~astropy.coordinates.Angle`. The
appropriate `~astropy.units.Quantity` object from
`astropy.units` may also be used. When missing only image around
the given position rather than multi-frames are retrieved.
programme_id : str
The survey or programme in which to search for. See
`list_catalogs`.
database : str
The WFAU database to use.
verbose : bool
Defaults to `True`. When `True` prints additional messages.
get_query_payload : bool, optional
If `True` then returns the dictionary sent as the HTTP request.
Defaults to `False`.
Returns
-------
url_list : list of image urls
"""
if frame_type not in self.frame_types:
raise ValueError("Invalid frame type. Valid frame types are: {!s}"
.format(self.frame_types))
if waveband not in self.filters:
raise ValueError("Invalid waveband. Valid wavebands are: {!s}"
.format(self.filters.keys()))
if database is None:
database = self.database
if programme_id is None:
programme_id = self.programme_id
request_payload = self._args_to_payload(coordinates, database=database,
programme_id=programme_id,
query_type='image')
request_payload['filterID'] = self.filters[waveband]
request_payload['obsType'] = 'object'
request_payload['frameType'] = self.frame_types[frame_type]
request_payload['mfid'] = ''
if radius is None:
request_payload['xsize'] = _parse_dimension(image_width)
if image_height is None:
request_payload['ysize'] = _parse_dimension(image_width)
else:
request_payload['ysize'] = _parse_dimension(image_height)
query_url = self.IMAGE_URL
else:
query_url = self.ARCHIVE_URL
ra = request_payload.pop('ra')
dec = request_payload.pop('dec')
radius = coord.Angle(radius).degree
del request_payload['sys']
request_payload['userSelect'] = 'default'
request_payload['minRA'] = str(
round(ra - radius / cos(radians(dec)), 2))
request_payload['maxRA'] = str(
round(ra + radius / cos(radians(dec)), 2))
request_payload['formatRA'] = 'degrees'
request_payload['minDec'] = str(dec - radius)
request_payload['maxDec'] = str(dec + radius)
request_payload['formatDec'] = 'degrees'
request_payload['startDay'] = 0
request_payload['startMonth'] = 0
request_payload['startYear'] = 0
request_payload['endDay'] = 0
request_payload['endMonth'] = 0
request_payload['endYear'] = 0
request_payload['dep'] = 0
request_payload['lmfid'] = ''
request_payload['fsid'] = ''
request_payload['rows'] = 1000
if get_query_payload:
return request_payload
response = self._wfau_send_request(query_url, request_payload)
response = self._check_page(response.url, "row")
image_urls = self.extract_urls(response.text)
# different links for radius queries and simple ones
if radius is not None:
image_urls = [link for link in image_urls if
('fits_download' in link and '_cat.fits'
not in link and '_two.fit' not in link)]
else:
image_urls = [link.replace("getImage", "getFImage")
for link in image_urls]
return image_urls
def extract_urls(self, html_in):
"""
Helper function that uses regexps to extract the image urls from the
given HTML.
Parameters
----------
html_in : str
source from which the urls are to be extracted.
Returns
-------
links : list
The list of URLS extracted from the input.
"""
# Parse html input for links
ahref = re.compile(r'href="([a-zA-Z0-9_\.&\?=%/:-]+)"')
links = ahref.findall(html_in)
return links
def query_region(self, coordinates, radius=1 * u.arcmin,
programme_id=None, database=None,
verbose=False, get_query_payload=False, system='J2000',
attributes=['default'], constraints=''):
"""
Used to query a region around a known identifier or given
coordinates from the catalog.
Parameters
----------
coordinates : str or `astropy.coordinates` object
The target around which to search. It may be specified as a string
in which case it is resolved using online services or as the
appropriate `astropy.coordinates` object. ICRS coordinates may also
be entered as strings as specified in the `astropy.coordinates`
module.
radius : str or `~astropy.units.Quantity` object, optional
The string must be parsable by `~astropy.coordinates.Angle`. The
appropriate `~astropy.units.Quantity` object from
`astropy.units` may also be used. When missing defaults to 1
arcmin. Cannot exceed 90 arcmin.
programme_id : str
The survey or programme in which to search for. See
`list_catalogs`.
database : str
The WFAU database to use.
verbose : bool, optional.
When set to `True` displays warnings if the returned VOTable does
not conform to the standard. Defaults to `False`.
get_query_payload : bool, optional
If `True` then returns the dictionary sent as the HTTP request.
Defaults to `False`.
system : 'J2000' or 'Galactic'
The system in which to perform the query. Can affect the output
data columns.
attributes : list, optional.
Attributes to select from the table. See, e.g.,
http://horus.roe.ac.uk/vsa/crossID_notes.html
constraints : str, optional
SQL constraints to the search. Default is empty (no constrains
applied).
Returns
-------
result : `~astropy.table.Table`
Query result table.
"""
if database is None:
database = self.database
if programme_id is None:
if self.programme_id != 'all':
programme_id = self.programme_id
else:
raise ValueError("Must specify a programme_id for region queries")
response = self.query_region_async(coordinates, radius=radius,
programme_id=programme_id,
database=database,
get_query_payload=get_query_payload,
system=system, attributes=attributes,
constraints=constraints)
if get_query_payload:
return response
result = self._parse_result(response, verbose=verbose)
return result
def query_region_async(self, coordinates, radius=1 * u.arcmin,
programme_id=None,
database=None, get_query_payload=False,
system='J2000', attributes=['default'],
constraints=''):
"""
Serves the same purpose as `query_region`. But
returns the raw HTTP response rather than the parsed result.
Parameters
----------
coordinates : str or `astropy.coordinates` object
The target around which to search. It may be specified as a
string in which case it is resolved using online services or as
the appropriate `astropy.coordinates` object. ICRS coordinates
may also be entered as strings as specified in the
`astropy.coordinates` module.
radius : str or `~astropy.units.Quantity` object, optional
The string must be parsable by `~astropy.coordinates.Angle`. The
appropriate `~astropy.units.Quantity` object from
`astropy.units` may also be used. When missing defaults to 1
arcmin. Cannot exceed 90 arcmin.
programme_id : str
The survey or programme in which to search for. See
`list_catalogs`.
database : str
The WFAU database to use.
get_query_payload : bool, optional
If `True` then returns the dictionary sent as the HTTP request.
Defaults to `False`.
attributes : list, optional.
Attributes to select from the table. See, e.g.,
http://horus.roe.ac.uk/vsa/crossID_notes.html
constraints : str, optional
SQL constraints to the search. Default is empty (no constrains
applied).
Returns
-------
response : `requests.Response`
The HTTP response returned from the service.
"""
if database is None:
database = self.database
if programme_id is None:
if self.programme_id != 'all':
programme_id = self.programme_id
else:
raise ValueError("Must specify a programme_id for region queries")
request_payload = self._args_to_payload(coordinates,
programme_id=programme_id,
database=database,
system=system,
query_type='catalog')
request_payload['radius'] = _parse_dimension(radius)
request_payload['from'] = 'source'
request_payload['formaction'] = 'region'
request_payload['xSize'] = ''
request_payload['ySize'] = ''
request_payload['boxAlignment'] = 'RADec'
request_payload['emailAddress'] = ''
request_payload['format'] = 'VOT'
request_payload['compress'] = 'NONE'
request_payload['rows'] = 1
request_payload['select'] = ','.join(attributes)
request_payload['where'] = constraints
# for some reason, this is required on the VISTA website
if self.archive is not None:
request_payload['archive'] = self.archive
if get_query_payload:
return request_payload
response = self._wfau_send_request(self.REGION_URL, request_payload)
response = self._check_page(response.url, "query finished")
return response
def _parse_result(self, response, verbose=False):
"""
Parses the raw HTTP response and returns it as a
`~astropy.table.Table`.
Parameters
----------
response : `requests.Response`
The HTTP response object
verbose : bool, optional
Defaults to `False`. If `True` it displays warnings whenever the
VOtable returned from the service doesn't conform to the standard.
Returns
-------
table : `~astropy.table.Table`
"""
table_links = self.extract_urls(response.text)
# keep only one link that is not a webstart
if len(table_links) == 0:
raise Exception("No VOTable found on returned webpage!")
table_link = [link for link in table_links if "8080" not in link][0]
with commons.get_readable_fileobj(table_link) as flo:
content = flo.read()
if not verbose:
commons.suppress_vo_warnings()
try:
io_obj = BytesIO(content.encode('utf-8'))
parsed_table = votable.parse(io_obj, verify='warn')
first_table = parsed_table.get_first_table()
table = first_table.to_table()
if len(table) == 0:
warnings.warn("Query returned no results, so the table will "
"be empty", NoResultsWarning)
return table
except Exception as ex:
self.response = content
self.table_parse_error = ex
raise
raise TableParseError("Failed to parse WFAU votable! The raw "
"response can be found in self.response, "
"and the error in self.table_parse_error. "
"Exception: " + str(self.table_parse_error))
def list_catalogs(self, style='short'):
"""
Returns a list of available catalogs in WFAU.
These can be used as ``programme_id`` in queries.
Parameters
----------
style : str, optional
Must be one of ``'short'``, ``'long'``. Defaults to ``'short'``.
Determines whether to print long names or abbreviations for
catalogs.
Returns
-------
list : list containing catalog name strings in long or short style.
"""
if style == 'short':
return list(self.programmes_short.keys())
elif style == 'long':
return list(self.programmes_long.keys())
else:
warnings.warn("Style must be one of 'long', 'short'.\n"
"Returning catalog list in short format.\n")
return list(self.programmes_short.keys())
def _get_databases(self):
if self.logged_in():
response = self.session.get(url="/".join([self.BASE_URL,
self.IMAGE_FORM]))
else:
response = requests.get(url="/".join([self.BASE_URL,
self.IMAGE_FORM]))
root = BeautifulSoup(response.content, features='html5lib')
databases = [xrf.attrs['value'] for xrf in
root.find('select').findAll('option')]
return databases
def list_databases(self):
"""
List the databases available from the WFAU archive.
"""
self.databases = set(self.all_databases + tuple(self._get_databases()))
return self.databases
def _wfau_send_request(self, url, request_payload):
"""
Helper function that sends the query request via a session or simple
HTTP GET request.
Parameters
----------
url : str
The url to send the request to.
request_payload : dict
The dict of parameters for the GET request
Returns
-------
response : `requests.Response` object
The response for the HTTP GET request
"""
if hasattr(self, 'session') and self.logged_in():
response = self.session.get(url, params=request_payload,
timeout=self.TIMEOUT)
else:
response = self._request("GET", url=url, params=request_payload,
timeout=self.TIMEOUT)
return response
def _check_page(self, url, keyword, wait_time=1, max_attempts=30):
page_loaded = False
while not page_loaded and max_attempts > 0:
if self.logged_in():
response = self.session.get(url)
else:
response = requests.get(url=url)
self.response = response
content = response.text
if re.search("error", content, re.IGNORECASE):
raise InvalidQueryError(
"Service returned with an error! "
"Check self.response for more information.")
elif re.search(keyword, content, re.IGNORECASE):
page_loaded = True
max_attempts -= 1
# wait for wait_time seconds before checking again
time.sleep(wait_time)
if page_loaded is False:
raise TimeoutError("Page did not load.")
return response
def query_cross_id_async(self, coordinates, radius=1*u.arcsec,
programme_id=None, database=None, table="source",
constraints="", attributes='default',
pairing='all', system='J2000',
get_query_payload=False,
):
"""
Query the crossID server
Parameters
----------
coordinates : astropy.SkyCoord
An array of one or more astropy SkyCoord objects specifying the
objects to crossmatch against.
radius : str or `~astropy.units.Quantity` object, optional
The string must be parsable by `~astropy.coordinates.Angle`. The
appropriate `~astropy.units.Quantity` object from
`astropy.units` may also be used. When missing defaults to 1
arcsec.
programme_id : str
The survey or programme in which to search for. See
`list_catalogs`.
database : str
The WFAU database to use.
table : str
The table ID, one of: "source", "detection", "synopticSource"
constraints : str
SQL constraints. If 'source' is selected, this will be expanded
automatically
attributes : str
Additional attributes to select from the table. See, e.g.,
http://horus.roe.ac.uk/vsa/crossID_notes.html
system : 'J2000' or 'Galactic'
The system in which to perform the query. Can affect the output
data columns.
get_query_payload : bool, optional
If `True` then returns the dictionary sent as the HTTP request.
Defaults to `False`.
"""
if table == "source":
constraints += "(priOrSec<=0 OR priOrSec=frameSetID)"
if database is None:
database = self.database
if programme_id is None:
if self.programme_id != 'all':
programme_id = self.programme_id
else:
raise ValueError("Must specify a programme_id")
request_payload = self._args_to_payload(coordinates,
programme_id=programme_id,
database=database,
system=system,
query_type='catalog')
request_payload['radius'] = _parse_dimension(radius)
request_payload['from'] = 'source'
request_payload['formaction'] = 'region'
request_payload['xSize'] = ''
request_payload['ySize'] = ''
request_payload['boxAlignment'] = 'RADec'
request_payload['emailAddress'] = ''
request_payload['format'] = 'VOT'
request_payload['compress'] = 'NONE'
request_payload['rows'] = 1
request_payload['select'] = 'default'
request_payload['where'] = ''
request_payload['disp'] = ''
request_payload['baseTable'] = table
request_payload['whereClause'] = constraints
request_payload['qType'] = 'form'
request_payload['selectList'] = attributes
request_payload['uploadFile'] = 'file.txt'
if pairing not in ('nearest', 'all'):
raise ValueError("pairing must be one of 'nearest' or 'all'")
request_payload['nearest'] = 0 if pairing == 'nearest' else 1
# for some reason, this is required on the VISTA website
if self.archive is not None:
request_payload['archive'] = self.archive
if get_query_payload:
return request_payload
fh = StringIO()
assert len(coordinates) > 0
for crd in coordinates:
fh.write("{0} {1}\n".format(crd.ra.deg, crd.dec.deg))
fh.seek(0)
if hasattr(self, 'session') and self.logged_in():
response = self.session.post(self.CROSSID_URL,
params=request_payload,
files={'file.txt': fh},
timeout=self.TIMEOUT)
else:
response = self._request("POST", url=self.CROSSID_URL,
params=request_payload,
files={'file.txt': fh},
timeout=self.TIMEOUT)
raise NotImplementedError("It appears we haven't implemented the file "
"upload correctly. Help is needed.")
# response = self._check_page(response.url, "query finished")
return response
def query_cross_id(self, *args, **kwargs):
"""
See `query_cross_id_async`
"""
get_query_payload = kwargs.get('get_query_payload', False)
verbose = kwargs.get('verbose', False)
response = self.query_cross_id_async(*args, **kwargs)
if get_query_payload:
return response
result = self._parse_result(response, verbose=verbose)
return result
def clean_catalog(wfau_catalog, clean_band='K_1', badclass=-9999,
maxerrbits=41, minerrbits=0, maxpperrbits=60):
"""
Attempt to remove 'bad' entries in a catalog.
Parameters
----------
wfau_catalog : `~astropy.io.fits.BinTableHDU`
A FITS binary table instance from the WFAU survey.
clean_band : ``'K_1'``, ``'K_2'``, ``'J'``, ``'H'``
The band to use for bad photometry flagging.
badclass : int
Class to exclude.
minerrbits : int
maxerrbits : int
Inside this range is the accepted number of error bits.
maxpperrbits : int
Exclude this type of error bit.
Examples
--------
"""
band = clean_band
mask = ((wfau_catalog[band + 'ERRBITS'] <= maxerrbits) *
(wfau_catalog[band + 'ERRBITS'] >= minerrbits) *
((wfau_catalog['PRIORSEC'] == wfau_catalog['FRAMESETID']) +
(wfau_catalog['PRIORSEC'] == 0)) *
(wfau_catalog[band + 'PPERRBITS'] < maxpperrbits)
)
if band + 'CLASS' in wfau_catalog.colnames:
mask *= (wfau_catalog[band + 'CLASS'] != badclass)
elif 'mergedClass' in wfau_catalog.colnames:
mask *= (wfau_catalog['mergedClass'] != badclass)
return wfau_catalog.data[mask]
def _parse_dimension(dim):
"""
Parses the radius and returns it in the format expected by WFAU.
Parameters
----------
dim : str, `~astropy.units.Quantity`
Returns
-------
dim_in_min : float
The value of the radius in arcminutes.
"""
if (isinstance(dim, u.Quantity) and
dim.unit in u.deg.find_equivalent_units()):
dim_in_min = dim.to(u.arcmin).value
# otherwise must be an Angle or be specified in hours...
else:
try:
new_dim = coord.Angle(dim).degree
dim_in_min = u.Quantity(
value=new_dim, unit=u.deg).to(u.arcmin).value
except (u.UnitsError, coord.errors.UnitsError, AttributeError):
raise u.UnitsError("Dimension not in proper units")
return dim_in_min
|
|
from __future__ import absolute_import
import logging
import datetime
from sentry_sdk.hub import Hub
from sentry_sdk.utils import (
to_string,
event_from_exception,
current_stacktrace,
capture_internal_exceptions,
)
from sentry_sdk.integrations import Integration
from sentry_sdk._compat import iteritems
from sentry_sdk._types import MYPY
if MYPY:
from logging import LogRecord
from typing import Any
from typing import Dict
from typing import Optional
DEFAULT_LEVEL = logging.INFO
DEFAULT_EVENT_LEVEL = logging.ERROR
_IGNORED_LOGGERS = set(["sentry_sdk.errors"])
def ignore_logger(
name # type: str
):
# type: (...) -> None
"""This disables recording (both in breadcrumbs and as events) calls to
a logger of a specific name. Among other uses, many of our integrations
use this to prevent their actions being recorded as breadcrumbs. Exposed
to users as a way to quiet spammy loggers.
:param name: The name of the logger to ignore (same string you would pass to ``logging.getLogger``).
"""
_IGNORED_LOGGERS.add(name)
class LoggingIntegration(Integration):
identifier = "logging"
def __init__(self, level=DEFAULT_LEVEL, event_level=DEFAULT_EVENT_LEVEL):
# type: (Optional[int], Optional[int]) -> None
self._handler = None
self._breadcrumb_handler = None
if level is not None:
self._breadcrumb_handler = BreadcrumbHandler(level=level)
if event_level is not None:
self._handler = EventHandler(level=event_level)
def _handle_record(self, record):
# type: (LogRecord) -> None
if self._handler is not None and record.levelno >= self._handler.level:
self._handler.handle(record)
if (
self._breadcrumb_handler is not None
and record.levelno >= self._breadcrumb_handler.level
):
self._breadcrumb_handler.handle(record)
@staticmethod
def setup_once():
# type: () -> None
old_callhandlers = logging.Logger.callHandlers # type: ignore
def sentry_patched_callhandlers(self, record):
# type: (Any, LogRecord) -> Any
try:
return old_callhandlers(self, record)
finally:
# This check is done twice, once also here before we even get
# the integration. Otherwise we have a high chance of getting
# into a recursion error when the integration is resolved
# (this also is slower).
if record.name not in _IGNORED_LOGGERS:
integration = Hub.current.get_integration(LoggingIntegration)
if integration is not None:
integration._handle_record(record)
logging.Logger.callHandlers = sentry_patched_callhandlers # type: ignore
def _can_record(record):
# type: (LogRecord) -> bool
return record.name not in _IGNORED_LOGGERS
def _breadcrumb_from_record(record):
# type: (LogRecord) -> Dict[str, Any]
return {
"ty": "log",
"level": _logging_to_event_level(record.levelname),
"category": record.name,
"message": record.message,
"timestamp": datetime.datetime.utcfromtimestamp(record.created),
"data": _extra_from_record(record),
}
def _logging_to_event_level(levelname):
# type: (str) -> str
return {"critical": "fatal"}.get(levelname.lower(), levelname.lower())
COMMON_RECORD_ATTRS = frozenset(
(
"args",
"created",
"exc_info",
"exc_text",
"filename",
"funcName",
"levelname",
"levelno",
"linenno",
"lineno",
"message",
"module",
"msecs",
"msg",
"name",
"pathname",
"process",
"processName",
"relativeCreated",
"stack",
"tags",
"thread",
"threadName",
"stack_info",
)
)
def _extra_from_record(record):
# type: (LogRecord) -> Dict[str, None]
return {
k: v
for k, v in iteritems(vars(record))
if k not in COMMON_RECORD_ATTRS
and (not isinstance(k, str) or not k.startswith("_"))
}
class EventHandler(logging.Handler, object):
"""
A logging handler that emits Sentry events for each log record
Note that you do not have to use this class if the logging integration is enabled, which it is by default.
"""
def emit(self, record):
# type: (LogRecord) -> Any
with capture_internal_exceptions():
self.format(record)
return self._emit(record)
def _emit(self, record):
# type: (LogRecord) -> None
if not _can_record(record):
return
hub = Hub.current
if hub.client is None:
return
client_options = hub.client.options
# exc_info might be None or (None, None, None)
if record.exc_info is not None and record.exc_info[0] is not None:
event, hint = event_from_exception(
record.exc_info,
client_options=client_options,
mechanism={"type": "logging", "handled": True},
)
elif record.exc_info and record.exc_info[0] is None:
event = {}
hint = {}
with capture_internal_exceptions():
event["threads"] = {
"values": [
{
"stacktrace": current_stacktrace(
client_options["with_locals"]
),
"crashed": False,
"current": True,
}
]
}
else:
event = {}
hint = {}
hint["log_record"] = record
event["level"] = _logging_to_event_level(record.levelname)
event["logger"] = record.name
event["logentry"] = {"message": to_string(record.msg), "params": record.args}
event["extra"] = _extra_from_record(record)
hub.capture_event(event, hint=hint)
# Legacy name
SentryHandler = EventHandler
class BreadcrumbHandler(logging.Handler, object):
"""
A logging handler that records breadcrumbs for each log record.
Note that you do not have to use this class if the logging integration is enabled, which it is by default.
"""
def emit(self, record):
# type: (LogRecord) -> Any
with capture_internal_exceptions():
self.format(record)
return self._emit(record)
def _emit(self, record):
# type: (LogRecord) -> None
if not _can_record(record):
return
Hub.current.add_breadcrumb(
_breadcrumb_from_record(record), hint={"log_record": record}
)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The Official CONLL 2016 Shared Task Scorer
"""
import argparse
import json
from confusion_matrix import ConfusionMatrix, Alphabet
from conn_head_mapper import ConnHeadMapper
import validator
CONN_HEAD_MAPPER = ConnHeadMapper()
def evaluate(gold_list, predicted_list):
connective_cm = evaluate_connectives(gold_list, predicted_list)
arg1_cm, arg2_cm, rel_arg_cm = evaluate_argument_extractor(gold_list, predicted_list)
sense_cm = evaluate_sense(gold_list, predicted_list)
print 'Explicit connectives : Precision %1.4f Recall %1.4f F1 %1.4f' % connective_cm.get_prf('yes')
print 'Arg 1 extractor : Precision %1.4f Recall %1.4f F1 %1.4f' % arg1_cm.get_prf('yes')
print 'Arg 2 extractor : Precision %1.4f Recall %1.4f F1 %1.4f' % arg2_cm.get_prf('yes')
print 'Arg1 Arg2 extractor combined : Precision %1.4f Recall %1.4f F1 %1.4f' % rel_arg_cm.get_prf('yes')
print 'Sense classification--------------'
sense_cm.print_summary()
print 'Overall parser performance --------------'
precision, recall, f1 = sense_cm.compute_micro_average_f1()
print 'Precision %1.4f Recall %1.4f F1 %1.4f' % (precision, recall, f1)
return connective_cm, arg1_cm, arg2_cm, rel_arg_cm, sense_cm, precision, recall, f1
def evaluate_argument_extractor(gold_list, predicted_list):
"""Evaluate argument extractor at Arg1, Arg2, and relation level
"""
gold_arg1 = [(x['DocID'], x['Arg1']['TokenList']) for x in gold_list]
predicted_arg1 = [(x['DocID'], x['Arg1']['TokenList']) for x in predicted_list]
arg1_cm = compute_binary_eval_metric(gold_arg1, predicted_arg1, span_exact_matching)
gold_arg2 = [(x['DocID'], x['Arg2']['TokenList']) for x in gold_list]
predicted_arg2 = [(x['DocID'], x['Arg2']['TokenList']) for x in predicted_list]
arg2_cm = compute_binary_eval_metric(gold_arg2, predicted_arg2, span_exact_matching)
gold_arg12 = [(x['DocID'], (x['Arg1']['TokenList'], x['Arg2']['TokenList'])) \
for x in gold_list]
predicted_arg12 = [(x['DocID'], (x['Arg1']['TokenList'], x['Arg2']['TokenList'])) \
for x in predicted_list]
rel_arg_cm = compute_binary_eval_metric(gold_arg12, predicted_arg12, spans_exact_matching)
return arg1_cm, arg2_cm, rel_arg_cm
def evaluate_connectives(gold_list, predicted_list):
"""Evaluate connective recognition accuracy for explicit discourse relations
"""
explicit_gold_list = [(x['DocID'], x['Connective']['TokenList'], x['Connective']['RawText']) \
for x in gold_list if x['Type'] == 'Explicit']
explicit_predicted_list = [(x['DocID'], x['Connective']['TokenList']) \
for x in predicted_list if x['Type'] == 'Explicit']
connective_cm = compute_binary_eval_metric(
explicit_gold_list, explicit_predicted_list, connective_head_matching)
return connective_cm
def spans_exact_matching(gold_doc_id_spans, predicted_doc_id_spans):
"""Matching two lists of spans
Input:
gold_doc_id_spans : (DocID , a list of lists of tuples of token addresses)
predicted_doc_id_spans : (DocID , a list of lists of token indices)
Returns:
True if the spans match exactly
"""
exact_match = True
gold_docID = gold_doc_id_spans[0]
gold_spans = gold_doc_id_spans[1]
predicted_docID = predicted_doc_id_spans[0]
predicted_spans = predicted_doc_id_spans[1]
for gold_span, predicted_span in zip(gold_spans, predicted_spans):
exact_match = span_exact_matching((gold_docID,gold_span), (predicted_docID, predicted_span)) \
and exact_match
return exact_match
def span_exact_matching(gold_span, predicted_span):
"""Matching two spans
Input:
gold_span : a list of tuples :(DocID, list of tuples of token addresses)
predicted_span : a list of tuples :(DocID, list of token indices)
Returns:
True if the spans match exactly
"""
gold_docID = gold_span[0]
predicted_docID = predicted_span[0]
gold_token_indices = [x[2] for x in gold_span[1]]
predicted_token_indices = predicted_span[1]
return gold_docID == predicted_docID and gold_token_indices == predicted_token_indices
def connective_head_matching(gold_raw_connective, predicted_raw_connective):
"""Matching connectives
Input:
gold_raw_connective : (DocID, a list of tuples of token addresses, raw connective token)
predicted_raw_connective : (DocID, a list of tuples of token addresses)
A predicted raw connective is considered iff
1) the predicted raw connective includes the connective "head"
2) the predicted raw connective tokens are the subset of predicted raw connective tokens
For example:
connective_head_matching('two weeks after', 'weeks after') --> True
connective_head_matching('two weeks after', 'two weeks') --> False not covering head
connective_head_matching('just because', 'because') --> True
connective_head_matching('just because', 'simply because') --> False not subset
connective_head_matching('just because', 'since') --> False
"""
gold_docID, gold_token_address_list, gold_tokens = gold_raw_connective
predicted_docID, predicted_token_list = predicted_raw_connective
if gold_docID != predicted_docID:
return False
gold_token_indices = [x[2] for x in gold_token_address_list]
if gold_token_address_list == predicted_token_list:
return True
elif not set(predicted_token_list).issubset(set(gold_token_indices)):
return False
else:
conn_head, indices = CONN_HEAD_MAPPER.map_raw_connective(gold_tokens)
gold_head_connective_indices = [gold_token_indices[x] for x in indices]
return set(gold_head_connective_indices).issubset(set(predicted_token_list))
def evaluate_sense(gold_list, predicted_list):
"""Evaluate sense classifier
The label ConfusionMatrix.NEGATIVE_CLASS is for the relations
that are missed by the system
because the arguments don't match any of the gold relations.
"""
sense_alphabet = Alphabet()
valid_senses = validator.identify_valid_senses(gold_list)
for relation in gold_list:
sense = relation['Sense'][0]
if sense in valid_senses:
sense_alphabet.add(sense)
sense_alphabet.add(ConfusionMatrix.NEGATIVE_CLASS)
sense_cm = ConfusionMatrix(sense_alphabet)
gold_to_predicted_map, predicted_to_gold_map = \
_link_gold_predicted(gold_list, predicted_list, spans_exact_matching)
for i, gold_relation in enumerate(gold_list):
gold_sense = gold_relation['Sense'][0]
if gold_sense in valid_senses:
if i in gold_to_predicted_map:
predicted_sense = gold_to_predicted_map[i]['Sense'][0]
if predicted_sense in gold_relation['Sense']:
sense_cm.add(predicted_sense, predicted_sense)
else:
if not sense_cm.alphabet.has_label(predicted_sense):
predicted_sense = ConfusionMatrix.NEGATIVE_CLASS
sense_cm.add(predicted_sense, gold_sense)
else:
sense_cm.add(ConfusionMatrix.NEGATIVE_CLASS, gold_sense)
for i, predicted_relation in enumerate(predicted_list):
if i not in predicted_to_gold_map:
predicted_sense = predicted_relation['Sense'][0]
if not sense_cm.alphabet.has_label(predicted_sense):
predicted_sense = ConfusionMatrix.NEGATIVE_CLASS
sense_cm.add(predicted_sense, ConfusionMatrix.NEGATIVE_CLASS)
return sense_cm
def combine_spans(span1, span2):
"""Merge two text span dictionaries
"""
new_span = {}
new_span['CharacterSpanList'] = span1['CharacterSpanList'] + span2['CharacterSpanList']
new_span['SpanList'] = span1['SpanList'] + span2['SpanList']
new_span['RawText'] = span1['RawText'] + span2['RawText']
new_span['TokenList'] = span1['TokenList'] + span2['TokenList']
return new_span
def compute_binary_eval_metric(gold_list, predicted_list, matching_fn):
"""Compute binary evaluation metric
"""
binary_alphabet = Alphabet()
binary_alphabet.add('yes')
binary_alphabet.add('no')
cm = ConfusionMatrix(binary_alphabet)
matched_predicted = [False for x in predicted_list]
for gold_span in gold_list:
found_match = False
for i, predicted_span in enumerate(predicted_list):
if matching_fn(gold_span, predicted_span) and not matched_predicted[i]:
cm.add('yes', 'yes')
matched_predicted[i] = True
found_match = True
break
if not found_match:
cm.add('yes', 'no')
# Predicted span that does not match with any
for matched in matched_predicted:
if not matched:
cm.add('no', 'yes')
return cm
def _link_gold_predicted(gold_list, predicted_list, matching_fn):
"""Link gold standard relations to the predicted relations
A pair of relations are linked when the arg1 and the arg2 match exactly.
We do this because we want to evaluate sense classification later.
Returns:
A tuple of two dictionaries:
1) mapping from gold relation index to predicted relation index
2) mapping from predicted relation index to gold relation index
"""
gold_to_predicted_map = {}
predicted_to_gold_map = {}
gold_arg12_list = [(x['DocID'], (x['Arg1']['TokenList'], x['Arg2']['TokenList']))
for x in gold_list]
predicted_arg12_list = [(x['DocID'], (x['Arg1']['TokenList'], x['Arg2']['TokenList']))
for x in predicted_list]
for gi, gold_span in enumerate(gold_arg12_list):
for pi, predicted_span in enumerate(predicted_arg12_list):
if matching_fn(gold_span, predicted_span):
gold_to_predicted_map[gi] = predicted_list[pi]
predicted_to_gold_map[pi] = gold_list[gi]
return gold_to_predicted_map, predicted_to_gold_map
def main():
parser = argparse.ArgumentParser(
description="Evaluate system's output against the gold standard")
parser.add_argument('gold', help='Gold standard file')
parser.add_argument('predicted', help='System output file')
args = parser.parse_args()
gold_list = [json.loads(x) for x in open(args.gold)]
predicted_list = [json.loads(x) for x in open(args.predicted)]
print '\n================================================'
print 'Evaluation for all discourse relations'
evaluate(gold_list, predicted_list)
print '\n================================================'
print 'Evaluation for explicit discourse relations only'
explicit_gold_list = [x for x in gold_list if x['Type'] == 'Explicit']
explicit_predicted_list = [x for x in predicted_list if x['Type'] == 'Explicit']
evaluate(explicit_gold_list, explicit_predicted_list)
print '\n================================================'
print 'Evaluation for non-explicit discourse relations only (Implicit, EntRel, AltLex)'
non_explicit_gold_list = [x for x in gold_list if x['Type'] != 'Explicit']
non_explicit_predicted_list = [x for x in predicted_list if x['Type'] != 'Explicit']
evaluate(non_explicit_gold_list, non_explicit_predicted_list)
if __name__ == '__main__':
main()
|
|
"""Simple watchdog system.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import io
import logging
import os
import re
import stat # pylint: disable=wrong-import-order
import time
from treadmill import fs
_LOGGER = logging.getLogger(__name__)
_DEFAULT_WATCHDOG_TIMEOUT = '30s'
class Watchdog:
"""Simple file based watchdog system.
"""
WATCHDOG_NAME_RE = re.compile(r'^(?P<name>(?:\w+[#:\.\-@]*)*\w+)$')
WATCHDOG_DURATION_RE = re.compile(r'^(?P<duration>[0-9]{1,3}[smhd])$')
def __init__(self, basepath, timeout=_DEFAULT_WATCHDOG_TIMEOUT):
self.watchdog_path = basepath
self.timeout = timeout
def initialize(self):
"""Properly setup an empty watchdog directory.
"""
for _watchdog, filename, _stat in self._list_gen(self.watchdog_path):
os.unlink(filename)
os.chmod(self.watchdog_path, 0o1777)
def check(self):
"""Check the status of all watchdogs.
:returns `list`:
List of `(name, duration, data)` for each failed watchdog.
"""
curtime = time.time()
failed_watchdogs = []
for watchdog, filename, st_info in self._list_gen(self.watchdog_path):
if curtime < st_info.st_mtime:
# If the watchdog is set in the future, then service is still
# alive
pass
else:
# Otherwise, this is a watchdog failure
_LOGGER.warning('Watchdog failed: %r.', watchdog)
failed_watchdogs.append((filename, watchdog, st_info.st_mtime))
# Retreive the payload of failed watchdogs
if failed_watchdogs:
failures = []
for filename, name, failed_at in failed_watchdogs:
try:
with io.open(filename, 'r') as f:
data = f.read()
except OSError:
_LOGGER.exception('Reading watchdog data')
data = ''
failures.append((name, failed_at, data))
return failures
else:
return []
def create(self, name, timeout=None, content=''):
"""Create a watchdog.
:param name:
Name associated with the watchdog
:type name:
``str``
:param timeout:
Timeout for the watchdog in the format `[0-9]{1,3}[smhd]`
:type timeout:
``str``
:param content:
Content to be recorded with the watchdog
:type content:
``bytes``
"""
if not self.WATCHDOG_NAME_RE.match(name):
raise ValueError('Invalid name format: %r' % name)
if timeout is None:
timeout = self.timeout
else:
if not self.WATCHDOG_DURATION_RE.match(timeout):
raise ValueError('Invalid timeout duration: %r' % timeout)
timeout_in_sec = self._duration_to_secs(timeout)
return self.Lease(self.watchdog_path, name, timeout_in_sec, content)
class Lease:
"""Watchdog Lease object.
Represent a currently held watchdog lease.
"""
__slots__ = (
'content',
'filename',
'name',
'timeout',
)
def __init__(self, basedir, name, timeout, content):
self.name = name
self.timeout = timeout
self.content = content
self.filename = os.path.join(basedir, name)
_LOGGER.debug('Setting up watchdog: %r', self)
self._write(timeout_at=(time.time() + self.timeout))
def __hash__(self):
return hash(self.filename)
def __eq__(self, other):
return self.filename == other.filename
def _write(self, timeout_at):
"""Setup the watchdog's lease file.
"""
dirname = os.path.dirname(self.filename)
filename = os.path.basename(self.filename)
fs.mkdir_safe(dirname)
fs.write_safe(
self.filename,
lambda f: f.write(self.content),
prefix='.' + filename,
mode='w',
permission=0o600,
utimes=(timeout_at, timeout_at),
fsync=True
)
def heartbeat(self):
"""Renew a watchdog for one timeout.
"""
timeout_at = time.time() + self.timeout
try:
os.utime(self.filename, (timeout_at, timeout_at))
except OSError as err:
if err.errno == errno.ENOENT:
_LOGGER.warning('Lost lease file: %r', self.filename)
self._write(timeout_at)
else:
raise
def remove(self):
"""Remove a watchdog.
"""
_LOGGER.debug('Clear watchdog: %r:%s', self.name, self.timeout)
try:
os.unlink(self.filename)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def __repr__(self):
return '<{cls}: {name}:{timeout}>'.format(
cls=self.__class__.__name__,
name=self.name,
timeout=self.timeout
)
@staticmethod
def _list_gen(watchdog_path):
"""Generate the list of currently defined watchdogs.
:returns `list`:
List of (`name`, `filename`) of defined watchdog.
"""
# Remove all dot files and all non-file
for watchdog in os.listdir(watchdog_path):
if watchdog[0] == '.':
continue
filename = os.path.join(watchdog_path, watchdog)
try:
filestat = os.lstat(filename)
except os.error:
continue
if not stat.S_ISREG(filestat.st_mode):
continue
yield (watchdog, filename, filestat)
@staticmethod
def _duration_to_secs(duration):
"""Convert all duration specifications into seconds.
"""
secs = int(duration[:-1])
if duration[-1] == 's':
pass
elif duration[-1] == 'm':
secs *= 60
elif duration[-1] == 'h':
secs *= 60 * 60
elif duration[-1] == 'd':
secs *= 60 * 60 * 24
else:
raise ValueError('Invalid duration: %r' % duration)
return secs
|
|
import discord
import asyncio
from twitch.api import v3 as twitch
from twitch.exceptions import ResourceUnavailableException
from random import randint
import argparse
import os
import requests
import json
import random
from bs4 import BeautifulSoup
import urllib.request
import urllib.parse
import youtube_dl
from discord.ext import commands
import time
from geopy.geocoders import Nominatim
import logging
from pybooru import Danbooru
import configparser
from datetime import date
import re
from string import Template
class Bean:
def __init__(self, bot):
self.bot = bot
configParser = configparser.RawConfigParser()
configfilepath = 'config.txt'
configParser.read(configfilepath)
self.danbooruUsername = configParser.get('danbooru', 'username')
self.danbooruPassword = configParser.get('danbooru', 'password')
self.imgurToken = configParser.get('imgur', 'token')
self.fuccboiname = 'bonkery'
self.fuccboidate = date(2018, 12, 6)
self.dbclient = Danbooru('danbooru', username=self.danbooruUsername, api_key=self.danbooruPassword)
self.uwuLetters = {
"l": "w",
"L": "W",
"r": "w",
"R": "W",
}
@commands.command(pass_context=True)
async def mymmr(self, ctx):
"""Check your true MMR"""
mmr = randint(0,9999)
reply_message = 'Hey ' + str(ctx.message.author).split('#', 1)[0] if ctx.message.author.nick == None else 'Hey ' + str(ctx.message.author.nick)
reply_message += ', your MMR is ' + str(mmr)
reply_message += ' PogChamp' if mmr > 5999 else ' LUL'
await self.bot.say(reply_message)
@commands.command(pass_context=True)
async def gei(self, ctx, *member: discord.User):
"""big gei"""
gei_percent = randint(0,100)
if(member):
reply_message = member[0].mention + ' is '
else:
reply_message = 'Hey ' + ctx.message.author.mention + ', you are '
reply_message += str(gei_percent) + '% gei'
categorylist = [(100, 'Tidp#9927'), (99, 'nitro members'), (97, '4Head'), (96, 'twin turbine cock'), (86, 'maybe touches your dick'), (76, 'nice butt'), (61, 'arguably gay'), (46, 'wears boating shoes'), (30, 'average european'), (26, '**DO NOT DISTURB THE STATUES**'), (11, '*How did we match clothes again*'), (1, "basic understanding of fashion"), (0, 'HYPERION CHAD PogChamp')]
for i in range(len(categorylist)):
if gei_percent >= categorylist[i][0]:
category = categorylist[i][1]
break
await self.bot.say(reply_message + ' - ' + category)
@commands.command(pass_context=True)
async def weeb(self, ctx, *member: discord.User):
"""VoHiYo"""
weeb_percent = 100 if(ctx.message.author.id == "106822120276832256") else randint(0,100)
if(member):
if(member[0].id == "106822120276832256"):
weeb_percent = 100
reply_message = member[0].mention + ' is '
else:
reply_message = 'Hey ' + ctx.message.author.mention + ', you are '
reply_message += str(weeb_percent) + '% weeb'
categorylist = [(100, 'Lesbian Anime Lover'), (99, 'Owns crusty figurines'), (98, 'Owns crusty body pillow'), (95, 'Owns body pillow'), (84, 'Listens to anime soundtracks almost exclusively'), (71, 'Owns figurines but hides them when guests are over'), (59, 'Still owns old yugohi cards'), (49, 'Works out often, but only because they were inspired by DBZ bodies'), (36, 'Listens to JoJo soundtrack but has never watched JoJo'), (25, 'Watches Shonen anime behind closed doors'), (11, '"Woah are there any animes like my hero?"'), (1, 'Watched Naruto at the age of 12'), (0, 'Functioning member of society')]
for i in range(len(categorylist)):
if weeb_percent >= categorylist[i][0]:
category = categorylist[i][1]
break
await self.bot.say(reply_message + ' - ' + category)
@commands.command(pass_context=True)
async def jew(self, ctx, *member: discord.User):
"""VoHiYo"""
jew_percent = 100 if(ctx.message.author.id == "106822120276832256") else randint(0,100)
if(member):
if(member[0].id == "106822120276832256"):
jew_percent = 100
reply_message = member[0].mention + ' is '
else:
reply_message = 'Hey ' + ctx.message.author.mention + ', you are '
reply_message += str(jew_percent) + '% jew'
categorylist = [(100, 'ONE OF (((THEM)))'), (99, 'Media Control Wizard'), (98, 'actual Tay Sachs disease'), (97, 'Free law school'), (90, '"-stein"'), (80, 'Real chance of Tay Sachs disease'), (70, 'Has attended at least one Bar mitzvah'), (60, 'Grandparents escaped Holocaust'), (40, 'Little bit inbred'), (30, 'Amateur fortunebuilder'), (10, 'Grandmama sends $100 gift cards every birthday'), (0, 'Probably also says "I\'m 1/6 Irish" at parties')]
for i in range(len(categorylist)):
if jew_percent >= categorylist[i][0]:
category = categorylist[i][1]
break
await self.bot.say(reply_message + ' - ' + category)
@commands.command(pass_context=True)
async def mydong(self, ctx):
"""ur mum knows about it anyway haHAA"""
dicc = randint(0,25)
reply_message = 'Hey ' + str(ctx.message.author).split('#', 1)[0] if ctx.message.author.nick == None else 'Hey ' + str(ctx.message.author.nick)
reply_message = reply_message + ', your dong hangs ' + str(dicc)
reply_message = reply_message + ' cms low KappaPride' if dicc > 17 else reply_message + ' cms low Jebaited'
await self.bot.say(reply_message)
@commands.command(pass_context=True)
async def mylove(self, ctx):
"""gachiGASM"""
if ctx.message.author.nick == None:
reply_message = 'Hey ' + str(ctx.message.author).split('#', 1)[0]
else:
reply_message = 'Hey ' + str(ctx.message.author.nick)
reply_message = reply_message + ' your true love is '
server = ctx.message.author.server
member_list = [x for x in server.members]
online_member_list = [i for i in member_list if str(i.status) == 'online']
loveboy = random.choice(online_member_list)
await self.bot.say(reply_message + loveboy.display_name)
@commands.command(pass_context=True)
async def love(self, ctx, *member: discord.User):
"""KappaPride"""
if member == None:
reply_message = "You need to mention a name."
else:
mmr = randint(0,100)
reply_message = 'Hey ' + str(ctx.message.author).split('#', 1)[0] if ctx.message.author.nick == None else 'Hey ' + str(ctx.message.author.nick)
reply_message += ' '.join[', your love for ', member[0].display_name, ' is around ', str(mmr), '%']
reply_message = reply_message + ' KappaPride' if(mmr > 50) else reply_message + ' FeelsBadMan'
await self.bot.say(reply_message)
@commands.group(pass_context=True, invoke_without_command=True)
async def kumiko(self, ctx):
"""ehhhhhh"""
with open("cogs/res/kumiko.txt", "r+") as f:
kumiko_img_list = f.read().splitlines()
reply_message = random.choice(kumiko_img_list)
await self.bot.say(reply_message)
@kumiko.command(pass_context=True, name='add')
async def add_kumiko(self, ctx, *args):
kumiko_link = list(args)
link = ' '.join(kumiko_link)
with open("cogs/res/kumiko.txt", "a+") as f:
if(ctx.message.author.id in ['106822120276832256', '77462706893881344']):
f.write(link)
f.write("\n")
reply_message = 'Image added'
else:
reply_message = "You are not authorized to add rarekumikos"
await self.bot.say(reply_message)
@commands.command()
async def koi(self):
"""bad taste"""
reply_message = 'https://www.youtube.com/watch?v=DBYDvnAkiao'
await self.bot.say(reply_message)
@commands.command()
async def msking(self):
await self.bot.say("https://media.discordapp.net/attachments/292869746293211146/563413592888705024/unknown.png")
@commands.command()
async def gorgcblame(self):
await self.bot.say("Rolling the Gorgc Wheel of Blame")
time.sleep(2)
with open("cogs/res/gorgcblame.txt", "r") as f:
blame = f.readlines()
await self.bot.say(random.choice(blame))
@commands.command()
async def vlecc(self):
"""Retard"""
await self.bot.say("https://cdn.discordapp.com/attachments/292869746293211146/565075918939357184/unknown.png")
@commands.group(pass_context=True, invoke_without_command=True)
async def grill(self, ctx):
"""not gay"""
with open("cogs/res/grill_list.txt", "r+") as f:
trap_list = f.read().splitlines()
reply_message = random.choice(trap_list)
await self.bot.say(reply_message)
@grill.command(pass_context=True, name='add')
async def add_grill(self, ctx, *, query:str):
with open("cogs/res/grill_list.txt", "a+") as f:
if(ctx.message.author.id in ['120473568382418945', '77462706893881344']):
print(query)
f.write(query)
f.write("\n")
reply_message = 'Image added'
else:
reply_message = "You are not authorized to add traps"
await self.bot.say(reply_message)
@commands.command()
async def explosion(self):
"""EKSUPUROOOOOSHUN"""
expfile = os.path.join("cogs", "res", "explosion.txt")
with open(expfile, "r") as f:
explist = f.readlines()
await self.bot.say(random.choice(explist))
@commands.group(pass_context=True, invoke_without_command=True)
async def smug(self, ctx):
"""no explanation needed"""
with open("cogs/res/smug.txt", "r+") as f:
smug_list = f.readlines()
reply_message = random.choice(smug_list)
await self.bot.say(reply_message)
@smug.command(pass_context=True, name='add')
async def add_smug(self, ctx, *args):
smug_link = list(args)
link = ' '.join(smug_link)
with open("cogs/res/smug.txt", "a+") as f:
if(ctx.message.author.id in ['106822120276832256', '77462706893881344']):
f.write(link)
f.write("\n")
reply_message = 'Image added'
else:
reply_message = "You are not authorized to add rarekumikos"
await self.bot.say(reply_message)
@commands.command()
async def mycolor(self):
"""cmonBruh"""
colors = ['TriHard', 'C-Word', 'KKona', 'jew', 'pajeet', 'ANELE', 'Ruski']
reply_message = "You're a " + random.choice(colors)
await self.bot.say(reply_message)
@commands.command(pass_context=True)
async def gender(self, ctx, *args):
"""HotPokket"""
dir = os.path.join("cogs", "res", "gender.txt")
with open(dir) as f:
lines = f.read().splitlines()
auth = str(ctx.message.author).split('#', 1)[0] if ctx.message.author.nick == None else ctx.message.author.nick
if(len(args) == 0):
reply_message = auth + " identifies themselves as " + random.choice(lines)
else:
genderlist = list(args)
gender = ' '.join(genderlist)
print(gender)
reply_message = gender + " identifies themselves as " + random.choice(lines) if gender.startswith('<@') or gender.startswith('@') else "Enter a valid username or role"
await self.bot.say(reply_message)
@commands.command(pass_context=True)
async def avatar(self, ctx, *member: discord.User):
"""the fucc do you need a description for"""
reply_message = member[0].avatar_url if member else ctx.message.author.avatar_url
await self.bot.say(reply_message)
@commands.group(pass_context=True, invoke_without_command=True, aliases=['f'])
async def frenzlin(self, ctx):
"""love u m8"""
dir = os.path.join("cogs", "res", "frenzlin.txt")
with open(dir) as f:
lines = f.readlines()
await self.bot.say(random.choice(lines))
@frenzlin.command(pass_context=True, name='add')
async def add_ritsu(self, ctx, *args):
ritsu_link = list(args)
link = ' '.join(ritsu_link)
with open("cogs/res/frenzlin.txt", "a+") as f:
if(ctx.message.author.id in ['110840185155010560', '77462706893881344']):
f.write(link)
f.write("\n")
reply_message = 'Image added'
else:
reply_message = "You are not authorized to add ritsus"
await self.bot.say(reply_message)
@commands.group(pass_context=True, invoke_without_command=True)
async def bidet(self, ctx):
"""love u m8"""
dir = os.path.join("cogs", "res", "bidets.txt")
with open(dir) as f:
lines = f.readlines()
await self.bot.say(random.choice(lines))
@bidet.command(pass_context=True, name='add')
async def add_bidet(self, ctx, *args):
ritsu_link = list(args)
link = ' '.join(ritsu_link)
with open("cogs/res/frenzlin.txt", "a+") as f:
if(ctx.message.author.id in ['110840185155010560', '77462706893881344']):
f.write(link)
f.write("\n")
reply_message = 'Image added'
else:
reply_message = "You are not authorized to add ritsus"
await self.bot.say(reply_message)
@commands.command()
async def haidomo(self):
"""bacchuaru youtuba"""
dir = os.path.join("cogs", "res", "kizuna.txt")
with open(dir) as f:
lines = f.read().splitlines()
await self.bot.say(random.choice(lines))
@commands.command()
async def havocc(self):
"""gei"""
dir = os.path.join("cogs", "res", "havok.txt")
with open(dir) as f:
lines = f.readlines()
await self.bot.say(random.choice(lines))
@commands.command()
async def bean(self):
"""BEANED"""
await self.bot.say("http://i0.kym-cdn.com/photos/images/facebook/001/166/993/284.png")
@commands.group(pass_context=True, invoke_without_command=True)
async def danbooru(self, ctx, *query : str):
search_string = ' '.join(query)
print(search_string)
response = self.dbclient.post_list(tags=search_string, limit=1, random=True)
print(response)
try:
response_url = response[0]["file_url"]
if response_url.startswith("https") == False:
response_url = response_url.replace("/data/", "/data/__")
response_url = "https://danbooru.donmai.us" + response_url
await self.bot.say(response_url)
except:
await self.bot.say("There's something wrong with your query, because I can't find anything with that tag.")
@commands.command(pass_context=True)
async def nyx(self, ctx):
img = os.path.join("cogs", "res", "nyx.png")
await self.bot.send_file(ctx.message.channel, fp=img)
@commands.command(pass_context=True)
async def nyxkoi(self, ctx):
await self.bot.say("https://i.imgur.com/xabuEyd.jpg")
@commands.command()
async def padoru(self):
dir = os.path.join("cogs", "res", "padoruids.txt")
with open(dir) as f:
lines = f.readlines()
await self.bot.say("https://imgur.com/" + random.choice(lines))
@commands.command()
async def givemetheipofeveryoneinthischat(self):
reply_message = '.'.join([str(randint(0, 255)) for x in range(4)])
await self.bot.say(reply_message)
@commands.command(pass_context=True)
async def tidp(self,ctx, *emoji_arg: discord.Emoji):
if emoji_arg:
emoji = str(emoji_arg[0])
else:
emoji = '<:tidp:477955740481355787>'
blank_emoji = '<:blank:554472402684477453>'
t = Template("""$emoji$emoji$emoji $blank_emoji $emoji $blank_emoji $emoji$emoji $blank_emoji $blank_emoji $emoji$emoji
$blank_emoji$emoji$blank_emoji $blank_emoji $emoji $blank_emoji $emoji$blank_emoji$emoji $blank_emoji $emoji$blank_emoji$emoji
$blank_emoji$emoji$blank_emoji $blank_emoji $emoji $blank_emoji $emoji$blank_emoji$emoji $blank_emoji $emoji$emoji
$blank_emoji$emoji$blank_emoji $blank_emoji $emoji $blank_emoji $emoji$emoji$blank_emoji $blank_emoji $emoji""")
message = t.substitute(emoji=emoji, blank_emoji=blank_emoji)
await self.bot.say(message)
@commands.command(pass_context=True)
async def fuccboi(self, ctx):
server = ctx.message.author.server
member_list = [x for x in server.members]
self.fuccboiname = random.choice(member_list).display_name if self.fuccboidate != date.today() else self.fuccboiname
self.fuccboidate = date.today() if self.fuccboidate != date.today() else self.fuccboidate
reply_message = "Today's fuccboi is " + self.fuccboiname
await self.bot.say(reply_message)
@commands.command(pass_context=True)
async def donkp(self, ctx):
url = 'https://api.imgur.com/3/gallery/search/viral/{{window}}/{{page}}?q=sadcat'
payload = {}
headers = {
'Authorization': 'Client-ID ' + self.imgurToken
}
response = requests.request('GET', url, headers = headers, data = payload, allow_redirects=False)
responsejson = json.loads(response.text)
imgurls = []
for data in responsejson["data"]:
try:
for image in data["images"]:
imgurls.append(image["link"])
except Exception as e:
pass
await self.bot.say(random.choice(imgurls))
@commands.command(pass_context=True)
async def imgur(self, ctx, *query : str):
search_string = ' '.join(query)
url = 'https://api.imgur.com/3/gallery/search/viral/{{window}}/{{page}}?q=' + search_string
payload = {}
headers = {
'Authorization': 'Client-ID ' + self.imgurToken
}
response = requests.request('GET', url, headers = headers, data = payload, allow_redirects=False)
responsejson = json.loads(response.text)
imgurls = []
for data in responsejson["data"]:
try:
for image in data["images"]:
imgurls.append(image["link"])
except Exception as e:
pass
await self.bot.say(random.choice(imgurls))
@commands.command(pass_context=True)
async def block(self, ctx, *member: discord.User):
message = await self.bot.get_message(self.bot.get_channel("292869746293211146"), 423951424217677824)
await self.bot.say("*"+ message.content + "* - Vlecxius, March 15, 2018.")
@commands.command(pass_context=True)
async def uwu(self, ctx, *input: str):
full_input = list(' '.join(input))
reply_message = ''
for letter in full_input:
reply_message += self.uwuLetters.get(letter, letter)
await self.bot.say(reply_message + " uwu")
@commands.command()
async def qoip(self):
reply_message = '''What the fuck is qoip doing? The whole chetp is being spammed and absolutely nothing is being done about it. This is the worst fucking chat experience I have ever seen. I wonder why any fucking retard would join this shit hole. This message will probably be drowned out by pajeet or other shit. Fuck you, you fucking spammers. You ruined my fucking day!'''
await self.bot.say(reply_message)
def setup(bot):
bot.add_cog(Bean(bot))
|
|
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The joint transfer model that bridges latent spaces of dataspace models.
The whole experiment handles transfer between latent space
of generative models that model the data. This file defines the joint model
that models the transfer between latent spaces (z1, z2) of models on dataspace.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from magenta.models.latent_transfer import nn
from six import iteritems
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
ds = tfp.distributions
def affine(x, output_size, z=None, residual=False, softplus=False):
"""Make an affine layer with optional residual link and softplus activation.
Args:
x: An TF tensor which is the input.
output_size: The size of output, e.g. the dimension of this affine layer.
z: An TF tensor which is added when residual link is enabled.
residual: A boolean indicating whether to enable residual link.
softplus: Whether to apply softplus activation at the end.
Returns:
The output tensor.
"""
if residual:
x = snt.Linear(2 * output_size)(x)
z = snt.Linear(output_size)(z)
dz = x[:, :output_size]
gates = tf.nn.sigmoid(x[:, output_size:])
output = (1 - gates) * z + gates * dz
else:
output = snt.Linear(output_size)(x)
if softplus:
output = tf.nn.softplus(output)
return output
class EncoderLatentFull(snt.AbstractModule):
"""An MLP (Full layers) encoder for modeling latent space."""
def __init__(self,
input_size,
output_size,
layers=(2048,) * 4,
name='EncoderLatentFull',
residual=True):
super(EncoderLatentFull, self).__init__(name=name)
self.layers = layers
self.input_size = input_size
self.output_size = output_size
self.residual = residual
def _build(self, z):
x = z
for l in self.layers:
x = tf.nn.relu(snt.Linear(l)(x))
mu = affine(x, self.output_size, z, residual=self.residual, softplus=False)
sigma = affine(
x, self.output_size, z, residual=self.residual, softplus=True)
return mu, sigma
class DecoderLatentFull(snt.AbstractModule):
"""An MLP (Full layers) decoder for modeling latent space."""
def __init__(self,
input_size,
output_size,
layers=(2048,) * 4,
name='DecoderLatentFull',
residual=True):
super(DecoderLatentFull, self).__init__(name=name)
self.layers = layers
self.input_size = input_size
self.output_size = output_size
self.residual = residual
def _build(self, z):
x = z
for l in self.layers:
x = tf.nn.relu(snt.Linear(l)(x))
mu = affine(x, self.output_size, z, residual=self.residual, softplus=False)
return mu
class VAE(snt.AbstractModule):
"""VAE for modling latant space."""
def __init__(self, config, name=''):
super(VAE, self).__init__(name=name)
self.config = config
def _build(self, unused_input=None):
# pylint:disable=unused-variable,possibly-unused-variable
# Reason:
# All endpoints are stored as attribute at the end of `_build`.
# Pylint cannot infer this case so it emits false alarm of
# unused-variable if we do not disable this warning.
config = self.config
# Constants
batch_size = config['batch_size']
n_latent = config['n_latent']
n_latent_shared = config['n_latent_shared']
# ---------------------------------------------------------------------
# ## Placeholders
# ---------------------------------------------------------------------
x = tf.placeholder(tf.float32, shape=(None, n_latent))
# ---------------------------------------------------------------------
# ## Modules with parameters
# ---------------------------------------------------------------------
# Variable that is class has name consider to be invalid by pylint so we
# disable the warning.
# pylint:disable=invalid-name
Encoder = config['Encoder']
Decoder = config['Decoder']
encoder = Encoder(name='encoder')
decoder = Decoder(name='decoder')
# pylint:enable=invalid-name
# ---------------------------------------------------------------------
# ## Placeholders
# ---------------------------------------------------------------------
mu, sigma = encoder(x)
mean_abs_mu, mean_abs_sigma = tf.reduce_mean(tf.abs(mu)), tf.reduce_mean(
tf.abs(sigma)) # for summary only
q_z = ds.Normal(loc=mu, scale=sigma)
q_z_sample = q_z.sample()
# Decode
x_prime = decoder(q_z_sample)
# Reconstruction Loss
# Don't use log_prob from tf.ds (larger = better)
# Instead, we use L2 norm (smaller = better)
# # recons = tf.reduce_sum(p_x.log_prob(x), axis=[-1])
recons = tf.reduce_mean(tf.square(x_prime - x))
mean_recons = tf.reduce_mean(recons)
# Prior
p_z = ds.Normal(loc=0., scale=1.)
p_z_sample = p_z.sample(sample_shape=[batch_size, n_latent_shared])
x_from_prior = decoder(p_z_sample)
# Space filling
# We use `KL` in variable name for naming consistency with math.
# pylint:disable=invalid-name
beta = config['prior_loss_beta']
if beta == 0:
prior_loss = tf.constant(0.0)
else:
if config['prior_loss'].lower() == 'KL'.lower():
KL_qp = ds.kl_divergence(ds.Normal(loc=mu, scale=sigma), p_z)
KL = tf.reduce_sum(KL_qp, axis=-1)
mean_KL = tf.reduce_mean(KL)
prior_loss = mean_KL
else:
raise NotImplementedError()
# pylint:enable=invalid-name
# VAE Loss
beta = tf.constant(config['prior_loss_beta'])
scaled_prior_loss = prior_loss * beta
vae_loss = mean_recons + scaled_prior_loss
# ---------------------------------------------------------------------
# ## Training
# ---------------------------------------------------------------------
# Learning rates
vae_lr = tf.constant(3e-4)
# Training Ops
vae_vars = list(encoder.get_variables())
vae_vars.extend(decoder.get_variables())
if vae_vars:
# Here, if we use identity transferm, there is no var to optimize,
# so in this case we shall avoid building optimizer and saver,
# otherwise there would be
# "No variables to optimize." / "No variables to save" error.
# Optimizer
train_vae = tf.train.AdamOptimizer(learning_rate=vae_lr).minimize(
vae_loss, var_list=vae_vars)
# Savers
vae_saver = tf.train.Saver(vae_vars, max_to_keep=100)
# Add all endpoints as object attributes
for k, v in iteritems(locals()):
self.__dict__[k] = v
# pylint:enable=unused-variable,possibly-unused-variable
class Model(snt.AbstractModule):
"""A joint model with two VAEs for latent spaces and ops for transfer.
This model containts two VAEs to model two latant spaces individually,
as well as extra Baysian Inference in training to enable transfer.
"""
def __init__(self, config, name=''):
super(Model, self).__init__(name=name)
self.config = config
def _build(self, unused_input=None):
# pylint:disable=unused-variable,possibly-unused-variable
# Reason:
# All endpoints are stored as attribute at the end of `_build`.
# Pylint cannot infer this case so it emits false alarm of
# unused-variable if we do not disable this warning.
# pylint:disable=invalid-name
# Reason:
# Following variables have their name consider to be invalid by pylint so
# we disable the warning.
# - Variable that is class
# - Variable that in its name has A or B indictating their belonging of
# one side of data.
# ---------------------------------------------------------------------
# ## Extract parameters from config
# ---------------------------------------------------------------------
config = self.config
lr = config.get('lr', 3e-4)
n_latent_shared = config['n_latent_shared']
if 'n_latent' in config:
n_latent_A = n_latent_B = config['n_latent']
else:
n_latent_A = config['vae_A']['n_latent']
n_latent_B = config['vae_B']['n_latent']
# ---------------------------------------------------------------------
# ## VAE containing Modules with parameters
# ---------------------------------------------------------------------
vae_A = VAE(config['vae_A'], name='vae_A')
vae_A()
vae_B = VAE(config['vae_B'], name='vae_B')
vae_B()
vae_lr = tf.constant(lr)
vae_vars = vae_A.vae_vars + vae_B.vae_vars
vae_loss = vae_A.vae_loss + vae_B.vae_loss
train_vae = tf.train.AdamOptimizer(learning_rate=vae_lr).minimize(
vae_loss, var_list=vae_vars)
vae_saver = tf.train.Saver(vae_vars, max_to_keep=100)
# ---------------------------------------------------------------------
# ## Computation Flow
# ---------------------------------------------------------------------
# Tensor Endpoints
x_A = vae_A.x
x_B = vae_B.x
q_z_sample_A = vae_A.q_z_sample
q_z_sample_B = vae_B.q_z_sample
mu_A, sigma_A = vae_A.mu, vae_A.sigma
mu_B, sigma_B = vae_B.mu, vae_B.sigma
x_prime_A = vae_A.x_prime
x_prime_B = vae_B.x_prime
x_from_prior_A = vae_A.x_from_prior
x_from_prior_B = vae_B.x_from_prior
x_A_to_B = vae_B.decoder(q_z_sample_A)
x_B_to_A = vae_A.decoder(q_z_sample_B)
x_A_to_B_direct = vae_B.decoder(mu_A)
x_B_to_A_direct = vae_A.decoder(mu_B)
z_hat = tf.placeholder(tf.float32, shape=(None, n_latent_shared))
x_joint_A = vae_A.decoder(z_hat)
x_joint_B = vae_B.decoder(z_hat)
vae_loss_A = vae_A.vae_loss
vae_loss_B = vae_B.vae_loss
x_align_A = tf.placeholder(tf.float32, shape=(None, n_latent_A))
x_align_B = tf.placeholder(tf.float32, shape=(None, n_latent_B))
mu_align_A, sigma_align_A = vae_A.encoder(x_align_A)
mu_align_B, sigma_align_B = vae_B.encoder(x_align_B)
q_z_align_A = ds.Normal(loc=mu_align_A, scale=sigma_align_A)
q_z_align_B = ds.Normal(loc=mu_align_B, scale=sigma_align_B)
# VI in joint space
mu_align, sigma_align = nn.product_two_guassian_pdfs(
mu_align_A, sigma_align_A, mu_align_B, sigma_align_B)
q_z_align = ds.Normal(loc=mu_align, scale=sigma_align)
p_z_align = ds.Normal(loc=0., scale=1.)
# - KL
KL_qp_align = ds.kl_divergence(q_z_align, p_z_align)
KL_align = tf.reduce_sum(KL_qp_align, axis=-1)
mean_KL_align = tf.reduce_mean(KL_align)
prior_loss_align = mean_KL_align
prior_loss_align_beta = config.get('prior_loss_align_beta', 0.0)
scaled_prior_loss_align = prior_loss_align * prior_loss_align_beta
# - Reconstruction (from joint Gussian)
q_z_sample_align = q_z_align.sample()
x_prime_A_align = vae_A.decoder(q_z_sample_align)
x_prime_B_align = vae_B.decoder(q_z_sample_align)
mean_recons_A_align = tf.reduce_mean(tf.square(x_prime_A_align - x_align_A))
mean_recons_B_align = tf.reduce_mean(tf.square(x_prime_B_align - x_align_B))
mean_recons_A_align_beta = config.get('mean_recons_A_align_beta', 0.0)
scaled_mean_recons_A_align = mean_recons_A_align * mean_recons_A_align_beta
mean_recons_B_align_beta = config.get('mean_recons_B_align_beta', 0.0)
scaled_mean_recons_B_align = mean_recons_B_align * mean_recons_B_align_beta
scaled_mean_recons_align = (
scaled_mean_recons_A_align + scaled_mean_recons_B_align)
# - Reconstruction (from transfer)
q_z_align_A_sample = q_z_align_A.sample()
q_z_align_B_sample = q_z_align_B.sample()
x_A_to_B_align = vae_B.decoder(q_z_align_A_sample)
x_B_to_A_align = vae_A.decoder(q_z_align_B_sample)
mean_recons_A_to_B_align = tf.reduce_mean(
tf.square(x_A_to_B_align - x_align_B))
mean_recons_B_to_A_align = tf.reduce_mean(
tf.square(x_B_to_A_align - x_align_A))
mean_recons_A_to_B_align_beta = config.get('mean_recons_A_to_B_align_beta',
0.0)
scaled_mean_recons_A_to_B_align = (
mean_recons_A_to_B_align * mean_recons_A_to_B_align_beta)
mean_recons_B_to_A_align_beta = config.get('mean_recons_B_to_A_align_beta',
0.0)
scaled_mean_recons_B_to_A_align = (
mean_recons_B_to_A_align * mean_recons_B_to_A_align_beta)
scaled_mean_recons_cross_A_B_align = (
scaled_mean_recons_A_to_B_align + scaled_mean_recons_B_to_A_align)
# Full loss
full_loss = (vae_loss_A + vae_loss_B + scaled_mean_recons_align +
scaled_mean_recons_cross_A_B_align)
# train op
full_lr = tf.constant(lr)
train_full = tf.train.AdamOptimizer(learning_rate=full_lr).minimize(
full_loss, var_list=vae_vars)
# Add all endpoints as object attributes
for k, v in iteritems(locals()):
self.__dict__[k] = v
# pylint:enable=unused-variable,possibly-unused-variable
# pylint:enable=invalid-name
def get_summary_kv_dict(self):
m = self
return {
'm.vae_A.mean_recons':
m.vae_A.mean_recons,
'm.vae_A.prior_loss':
m.vae_A.prior_loss,
'm.vae_A.scaled_prior_loss':
m.vae_A.scaled_prior_loss,
'm.vae_A.vae_loss':
m.vae_A.vae_loss,
'm.vae_B.mean_recons':
m.vae_B.mean_recons,
'm.vae_A.mean_abs_mu':
m.vae_A.mean_abs_mu,
'm.vae_A.mean_abs_sigma':
m.vae_A.mean_abs_sigma,
'm.vae_B.prior_loss':
m.vae_B.prior_loss,
'm.vae_B.scaled_prior_loss':
m.vae_B.scaled_prior_loss,
'm.vae_B.vae_loss':
m.vae_B.vae_loss,
'm.vae_B.mean_abs_mu':
m.vae_B.mean_abs_mu,
'm.vae_B.mean_abs_sigma':
m.vae_B.mean_abs_sigma,
'm.vae_loss_A':
m.vae_loss_A,
'm.vae_loss_B':
m.vae_loss_B,
'm.prior_loss_align':
m.prior_loss_align,
'm.scaled_prior_loss_align':
m.scaled_prior_loss_align,
'm.mean_recons_A_align':
m.mean_recons_A_align,
'm.mean_recons_B_align':
m.mean_recons_B_align,
'm.scaled_mean_recons_A_align':
m.scaled_mean_recons_A_align,
'm.scaled_mean_recons_B_align':
m.scaled_mean_recons_B_align,
'm.scaled_mean_recons_align':
m.scaled_mean_recons_align,
'm.mean_recons_A_to_B_align':
m.mean_recons_A_to_B_align,
'm.mean_recons_B_to_A_align':
m.mean_recons_B_to_A_align,
'm.scaled_mean_recons_A_to_B_align':
m.scaled_mean_recons_A_to_B_align,
'm.scaled_mean_recons_B_to_A_align':
m.scaled_mean_recons_B_to_A_align,
'm.scaled_mean_recons_cross_A_B_align':
m.scaled_mean_recons_cross_A_B_align,
'm.full_loss':
m.full_loss
}
|
|
# -*- coding: utf-8 -*-
#
# REST Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/s3/s3rest.py
#
import unittest
from gluon import *
from gluon.storage import Storage
from s3.s3rest import S3Request
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# =============================================================================
class POSTFilterTests(unittest.TestCase):
""" Tests for POST filter queries """
# -------------------------------------------------------------------------
def setUp(self):
request = current.request
self.request_body = request._body
self.content_type = request.env.content_type
# -------------------------------------------------------------------------
def tearDown(self):
request = current.request
request._body = self.request_body
request.env.content_type = self.content_type
# -------------------------------------------------------------------------
def testPOSTFilter(self):
""" Test POST filter interpretation with multipart request body """
assertEqual = self.assertEqual
assertNotIn = self.assertNotIn
assertIn = self.assertIn
request = current.request
request.env.content_type = "multipart/form-data"
# Test with valid filter expression JSON
r = S3Request(prefix = "org",
name = "organisation",
http = "POST",
get_vars = {"$search": "form", "test": "retained"},
post_vars = {"service_organisation.service_id__belongs": "1",
"other": "testing",
},
)
# Method changed to GET:
assertEqual(r.http, "GET")
get_vars = r.get_vars
post_vars = r.post_vars
# $search removed from GET vars:
assertNotIn("$search", get_vars)
# Filter queries from POST vars added to GET vars:
assertEqual(get_vars.get("service_organisation.service_id__belongs"), "1")
# Filter queries removed from POST vars:
assertNotIn("service_organisation.service_id__belongs", post_vars)
# Must retain other GET vars:
assertEqual(get_vars.get("test"), "retained")
# Test without $search
r = S3Request(prefix = "org",
name = "organisation",
http = "POST",
get_vars = {"test": "retained"},
post_vars = {"service_organisation.service_id__belongs": "1",
"other": "testing",
},
)
# Method should still be POST:
assertEqual(r.http, "POST")
get_vars = r.get_vars
post_vars = r.post_vars
# $search never was in GET vars - confirm this to exclude test regression
assertNotIn("$search", get_vars)
# Filter queries from POST vars not added to GET vars:
assertNotIn("service_organisation.service_id__belongs", get_vars)
# Filter queries still in POST vars:
assertIn("service_organisation.service_id__belongs", post_vars)
# Must retain other GET vars:
assertEqual(get_vars.get("test"), "retained")
# Test with empty post vars
request._body = StringIO('')
r = S3Request(prefix = "org",
name = "organisation",
http = "POST",
get_vars = {"$search": "ajax", "test": "retained"},
post_vars = {"service_organisation.service_id__belongs": "1",
"other": "testing",
},
)
# Method changed to GET:
assertEqual(r.http, "GET")
get_vars = r.get_vars
post_vars = r.post_vars
# $search removed from GET vars:
assertNotIn("$search", get_vars)
# Filter queries from POST vars not added to GET vars:
assertNotIn("service_organisation.service_id__belongs", get_vars)
# Filter queries still in POST vars:
assertIn("service_organisation.service_id__belongs", post_vars)
# Must retain other GET vars:
assertEqual(get_vars.get("test"), "retained")
# -------------------------------------------------------------------------
def testPOSTFilterAjax(self):
""" Test POST filter interpretation with JSON request body """
assertEqual = self.assertEqual
assertNotIn = self.assertNotIn
request = current.request
# Test with valid filter expression JSON
request._body = StringIO('{"service_organisation.service_id__belongs":"1"}')
r = S3Request(prefix = "org",
name = "organisation",
http = "POST",
get_vars = {"$search": "ajax", "test": "retained"},
)
# Method changed to GET:
assertEqual(r.http, "GET")
get_vars = r.get_vars
# $search removed from GET vars:
assertNotIn("$search", get_vars)
# Filter queries from JSON body added to GET vars:
assertEqual(get_vars.get("service_organisation.service_id__belongs"), "1")
# Must retain other GET vars:
assertEqual(get_vars.get("test"), "retained")
# Test without $search
request._body = StringIO('{"service_organisation.service_id__belongs":"1"}')
r = S3Request(prefix = "org",
name = "organisation",
http = "POST",
get_vars = {"test": "retained"},
)
# Method should still be POST:
assertEqual(r.http, "POST")
get_vars = r.get_vars
# $search never was in GET vars - confirm this to exclude test regression
assertNotIn("$search", get_vars)
# Filter queries from JSON body not added to GET vars:
assertNotIn("service_organisation.service_id__belongs", get_vars)
# Must retain other GET vars:
assertEqual(get_vars.get("test"), "retained")
# Test with valid JSON but invalid filter expression
request._body = StringIO('[1,2,3]')
r = S3Request(prefix = "org",
name = "organisation",
http = "POST",
get_vars = {"$search": "ajax", "test": "retained"},
)
# Method changed to GET:
assertEqual(r.http, "GET")
get_vars = r.get_vars
# $search removed from GET vars:
assertNotIn("$search", get_vars)
# Filter queries from JSON body not added to GET vars:
assertNotIn("service_organisation.service_id__belongs", get_vars)
# Must retain other GET vars:
assertEqual(get_vars.get("test"), "retained")
# Test with empty body
request._body = StringIO('')
r = S3Request(prefix = "org",
name = "organisation",
http = "POST",
get_vars = {"$search": "ajax", "test": "retained"},
)
# Method changed to GET:
assertEqual(r.http, "GET")
get_vars = r.get_vars
# $search removed from GET vars:
assertNotIn("$search", get_vars)
# Filter queries from JSON body not added to GET vars:
assertNotIn("service_organisation.service_id__belongs", get_vars)
# Must retain other GET vars:
assertEqual(get_vars.get("test"), "retained")
# =============================================================================
class URLBuilderTests(unittest.TestCase):
# -------------------------------------------------------------------------
def setUp(self):
current.auth.override = True
s3db = current.s3db
ptable = s3db.pr_person
ctable = s3db.pr_contact
if not hasattr(self, "r"):
record = current.db(ptable.pe_id == ctable.pe_id).select(
ctable.id,
ptable.id,
limitby=(0, 1)).first()
self.assertNotEqual(record, None)
self.a = current.request.application
self.p = str(record[ptable.id])
self.c = str(record[ctable.id])
self.r = S3Request(prefix="pr",
name="person",
c="pr",
f="person",
args=[self.p, "contact", self.c, "method"],
vars=Storage(format="xml", test="test"))
# -------------------------------------------------------------------------
def tearDown(self):
current.auth.override = False
# -------------------------------------------------------------------------
def testURLConstruction(self):
(a, p, c, r) = (self.a, self.p, self.c, self.r)
self.assertEqual(r.url(),
"/%s/pr/person/%s/contact/%s/method.xml?test=test" % (a, p, c))
# -------------------------------------------------------------------------
def testURLMethodOverride(self):
(a, p, c, r) = (self.a, self.p, self.c, self.r)
# No change
self.assertEqual(r.url(method=None),
"/%s/pr/person/%s/contact/%s/method.xml?test=test" % (a, p, c))
# Set to None (resets target record ID)
self.assertEqual(r.url(method=""),
"/%s/pr/person/%s/contact.xml?test=test" % (a, p))
# Change method (retains target record ID)
self.assertEqual(r.url(method="read"),
"/%s/pr/person/%s/contact/%s/read.xml?test=test" % (a, p, c))
# Test without component
r = S3Request(prefix="pr",
name="person",
c="pr",
f="person",
args=[self.p, "method"],
vars=Storage(format="xml", test="test"))
# No change
self.assertEqual(r.url(method=None),
"/%s/pr/person/%s/method.xml?test=test" % (a, p))
# Set to None (resets target record ID and method)
self.assertEqual(r.url(method=""),
"/%s/pr/person.xml?test=test" % a)
# Change method (retains target record ID)
self.assertEqual(r.url(method="read"),
"/%s/pr/person/%s/read.xml?test=test" % (a, p))
# -------------------------------------------------------------------------
def testURLRepresentationOverride(self):
(a, p, c, r) = (self.a, self.p, self.c, self.r)
# No change
self.assertEqual(r.url(representation=None),
"/%s/pr/person/%s/contact/%s/method.xml?test=test" % (a, p, c))
# Set to None (uses default)
self.assertEqual(r.url(representation=""),
"/%s/pr/person/%s/contact/%s/method?test=test" % (a, p, c))
# Change representation
self.assertEqual(r.url(representation="pdf"),
"/%s/pr/person/%s/contact/%s/method.pdf?test=test" % (a, p, c))
# -------------------------------------------------------------------------
def testURLMasterIDOverride(self):
(a, p, c, r) = (self.a, self.p, self.c, self.r)
# No change
self.assertEqual(r.url(id=None),
"/%s/pr/person/%s/contact/%s/method.xml?test=test" % (a, p, c))
# Set to None (retains component ID and method)
self.assertEqual(r.url(id=""),
"/%s/pr/person/contact/%s/method.xml?test=test" % (a, c))
self.assertEqual(r.url(id=0),
"/%s/pr/person/contact/%s/method.xml?test=test" % (a, c))
# Same ID (retains component ID and method)
self.assertEqual(r.url(id=p),
"/%s/pr/person/%s/contact/%s/method.xml?test=test" % (a, p, c))
# Change ID (resets component ID and method)
self.assertEqual(r.url(id=5),
"/%s/pr/person/5/contact.xml?test=test" % a)
# Set to wildcard (resets component ID and method)
self.assertEqual(r.url(id="[id]"),
"/%s/pr/person/%%5Bid%%5D/contact.xml?test=test" % a)
self.assertEqual(r.url(id="*"),
"/%s/pr/person/%%5Bid%%5D/contact.xml?test=test" % a)
self.assertEqual(r.url(id=[]),
"/%s/pr/person/%%5Bid%%5D/contact.xml?test=test" % a)
# -------------------------------------------------------------------------
def testURLComponentOverride(self):
(a, p, c, r) = (self.a, self.p, self.c, self.r)
# No change
self.assertEqual(r.url(component=None),
"/%s/pr/person/%s/contact/%s/method.xml?test=test" % (a, p, c))
self.assertEqual(r.url(component="contact"),
"/%s/pr/person/%s/contact/%s/method.xml?test=test" % (a, p, c))
# Set to None (resets component ID and method)
self.assertEqual(r.url(component=""),
"/%s/pr/person/%s.xml?test=test" % (a, p))
# Change component (resets component ID and method)
self.assertEqual(r.url(component="other"),
"/%s/pr/person/%s/other.xml?test=test" % (a, p))
# -------------------------------------------------------------------------
def testURLComponentIDOverride(self):
(a, p, c, r) = (self.a, self.p, self.c, self.r)
# No change
self.assertEqual(r.url(component_id=None),
"/%s/pr/person/%s/contact/%s/method.xml?test=test" % (a, p, c))
# Set to None (resets the method)
self.assertEqual(r.url(component_id=0),
"/%s/pr/person/%s/contact.xml?test=test" % (a, p))
# Change component ID (retains method)
self.assertEqual(r.url(component_id=5),
"/%s/pr/person/%s/contact/5/method.xml?test=test" % (a, p))
# -------------------------------------------------------------------------
def testURLTargetOverrideMaster(self):
(a, p, c, r) = (self.a, self.p, self.c, self.r)
r = S3Request(prefix="pr",
name="person",
c="pr",
f="person",
args=[self.p, "method"],
vars=Storage(format="xml", test="test"))
# No change
self.assertEqual(r.url(target=None),
"/%s/pr/person/%s/method.xml?test=test" % (a, p))
self.assertEqual(r.url(target=p),
"/%s/pr/person/%s/method.xml?test=test" % (a, p))
# Set to None (resets method)
self.assertEqual(r.url(target=0),
"/%s/pr/person.xml?test=test" % a)
# Change target ID (retains method)
self.assertEqual(r.url(target=5),
"/%s/pr/person/5/method.xml?test=test" % a)
# -------------------------------------------------------------------------
def testURLTargetOverrideComponent(self):
(a, p, c, r) = (self.a, self.p, self.c, self.r)
# No change
self.assertEqual(r.url(target=None),
"/%s/pr/person/%s/contact/%s/method.xml?test=test" % (a, p, c))
self.assertEqual(r.url(target=c),
"/%s/pr/person/%s/contact/%s/method.xml?test=test" % (a, p, c))
# Set to None (resets method)
self.assertEqual(r.url(target=0),
"/%s/pr/person/%s/contact.xml?test=test" % (a, p))
# Change target ID (retains method)
self.assertEqual(r.url(target=5),
"/%s/pr/person/%s/contact/5/method.xml?test=test" % (a, p))
# -------------------------------------------------------------------------
def testURLVarsOverride(self):
(a, p, c, r) = (self.a, self.p, self.c, self.r)
# No Change
self.assertEqual(r.url(vars=None),
"/%s/pr/person/%s/contact/%s/method.xml?test=test" % (a, p, c))
# Set to None
self.assertEqual(r.url(vars={}),
"/%s/pr/person/%s/contact/%s/method.xml" % (a, p, c))
self.assertEqual(r.url(vars=""),
"/%s/pr/person/%s/contact/%s/method.xml" % (a, p, c))
# Change vars
self.assertEqual(r.url(vars={"other":"test"}),
"/%s/pr/person/%s/contact/%s/method.xml?other=test" % (a, p, c))
# -------------------------------------------------------------------------
def testURLCombinations(self):
(a, p, c, r) = (self.a, self.p, self.c, self.r)
# Test request with component + component ID
self.assertEqual(r.url(method="", id=5),
"/%s/pr/person/5/contact.xml?test=test" % a)
self.assertEqual(r.url(method="", vars=None),
"/%s/pr/person/%s/contact.xml?test=test" % (a, p))
self.assertEqual(r.url(id="[id]", method="review"),
"/%s/pr/person/%%5Bid%%5D/contact/review.xml?test=test" % a)
self.assertEqual(r.url(method="deduplicate", target=0, vars={}),
"/%s/pr/person/%s/contact/deduplicate.xml" % (a, p))
# Test request with component (without component ID)
r = S3Request(prefix="pr",
name="person",
c="pr",
f="person",
args=[self.p, "contact", "method"],
vars=Storage(format="xml", test="test"))
self.assertEqual(r.url(method="", id=5),
"/%s/pr/person/5/contact.xml?test=test" % a)
self.assertEqual(r.url(method="", vars=None),
"/%s/pr/person/%s/contact.xml?test=test" % (a, p))
self.assertEqual(r.url(id="[id]", method="review"),
"/%s/pr/person/%%5Bid%%5D/contact/review.xml?test=test" % a)
self.assertEqual(r.url(method="deduplicate", target=0, vars={}),
"/%s/pr/person/%s/contact/deduplicate.xml" % (a, p))
# Test request without component
r = S3Request(prefix="pr",
name="person",
c="pr",
f="person",
args=[self.p, "method"],
vars=Storage(format="xml", test="test"))
self.assertEqual(r.url(method="", id=5),
"/%s/pr/person/5.xml?test=test" % a)
self.assertEqual(r.url(method="", vars=None),
"/%s/pr/person.xml?test=test" % a)
self.assertEqual(r.url(id="[id]", method="review"),
"/%s/pr/person/%%5Bid%%5D/review.xml?test=test" % a)
self.assertEqual(r.url(method="deduplicate", target=0, vars={}),
"/%s/pr/person/deduplicate.xml" % a)
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner(verbosity=2).run(suite)
return
if __name__ == "__main__":
run_suite(
POSTFilterTests,
URLBuilderTests,
)
# END ========================================================================
|
|
"""
OMEGA iTHX Series Temperature and Humidity Chart Recorder.
This class is compatible with the following model numbers:
* iTHX-W3
* iTHX-D3
* iTHX-SD
* iTHX-M
* iTHX-W
* iTHX-2
"""
import os
import re
import time
import socket
import sqlite3
from datetime import datetime
try:
ConnectionResetError
except NameError:
ConnectionResetError = socket.error # for Python 2.7
from msl.equipment.exceptions import OmegaError
from msl.equipment.connection_socket import ConnectionSocket
from msl.equipment.resources import register
@register(manufacturer=r'OMEGA', model=r'iTHX-[2DMSW][3D]?', flags=re.IGNORECASE)
class iTHX(ConnectionSocket):
def __init__(self, record):
"""OMEGA iTHX Series Temperature and Humidity Chart Recorder.
The :attr:`~msl.equipment.record_types.ConnectionRecord.properties`
for an iTHX connection supports the following key-value pairs in the
:ref:`connections-database`::
'nprobes': int, the number of probes the device has
'nbytes': int, the number of bytes to read from each probe
as well as those key-value pairs supported by the parent
:class:`~msl.equipment.connection_socket.ConnectionSocket` class.
Do not instantiate this class directly. Use the :meth:`~.EquipmentRecord.connect`
method to connect to the equipment.
Parameters
----------
record : :class:`~.EquipmentRecord`
A record from an :ref:`equipment-database`.
"""
super(iTHX, self).__init__(record)
self.set_exception_class(OmegaError)
def temperature(self, probe=1, celsius=True, nbytes=None):
"""Read the temperature.
Parameters
----------
probe : :class:`int`, optional
The probe number to read the temperature of
(for iTHX's that contain multiple probes).
celsius : class:`bool`, optional
:data:`True` to return the temperature in celsius,
:data:`False` for fahrenheit.
nbytes : class:`int`, optional
The number of bytes to read. If :data:`None` then
read until the termination character sequence.
Returns
-------
:class:`float` or :class:`tuple` of :class:`float`
The temperature.
"""
msg = 'TC' if celsius else 'TF'
return self._get(msg, probe, size=nbytes)
def humidity(self, probe=1, nbytes=None):
"""Read the percent humidity.
Parameters
----------
probe : :class:`int`, optional
The probe number to read the humidity of
(for iTHX's that contain multiple probes).
nbytes : class:`int`, optional
The number of bytes to read. If :data:`None` then
read until the termination character sequence.
Returns
-------
:class:`float` or :class:`tuple` of :class:`float`
The percent humidity.
"""
return self._get('H', probe, size=nbytes)
def dewpoint(self, probe=1, celsius=True, nbytes=None):
"""Read the dew point.
Parameters
----------
probe : :class:`int`, optional
The probe number to read the dew point of
(for iTHX's that contain multiple probes).
celsius : :class:`bool`, optional
:data:`True` to return the dew point in celsius,
:data:`False` for fahrenheit.
nbytes : class:`int`, optional
The number of bytes to read. If :data:`None` then
read until the termination character sequence.
Returns
-------
:class:`float` or :class:`tuple` of :class:`float`
The dew point.
"""
msg = 'DC' if celsius else 'DF'
return self._get(msg, probe, size=nbytes)
def temperature_humidity(self, probe=1, celsius=True, nbytes=None):
"""Read the temperature and the humidity.
Parameters
----------
probe : :class:`int`, optional
The probe number to read the temperature and humidity of
(for iTHX's that contain multiple probes).
celsius : :class:`bool`, optional
:data:`True` to return the temperature in celsius,
:data:`False` for fahrenheit.
nbytes : class:`int`, optional
The number of bytes to read. If :data:`None` then read
until the termination character sequence. If specified,
`nbytes` is the combined value to read both values.
Returns
-------
:class:`float`
The temperature.
:class:`float`
The humidity.
"""
# iTHX-D3 and iTHX-W3 support the *SRB and *SRBF commands,
# however, the returned bytes are of the form b'019.4\r,057.0\r'
# and if nbytes is None then the socket would stop reading bytes
# at the first instance of '\r' and leave ',057.0\r' in the buffer.
#
# Also, iTHX-W and iTHX-2 do not support the *SRB and *SRBF commands.
#
# With these complications we do not use the *SRB and *SRBF
# commands and read the temperature and humidity sequentially.
if nbytes is not None:
nbytes = nbytes//2
t = self.temperature(probe=probe, celsius=celsius, nbytes=nbytes)
h = self.humidity(probe=probe, nbytes=nbytes)
return t, h
def temperature_humidity_dewpoint(self, probe=1, celsius=True, nbytes=None):
"""Read the temperature, the humidity and the dew point.
Parameters
----------
probe : :class:`int`, optional
The probe number to read the temperature, humidity and dew point
(for iTHX's that contain multiple probes).
celsius : :class:`bool`, optional
If :data:`True` then return the temperature and dew point
in celsius, :data:`False` for fahrenheit.
nbytes : :class:`int`, optional
The number of bytes to read. If :data:`None` then read until
the termination character sequence. If specified, `nbytes`
is the combined value to read all three values.
Returns
-------
:class:`float`
The temperature.
:class:`float`
The humidity.
:class:`float`
The dew point.
"""
nth = None if nbytes is None else (nbytes*2)//3
nd = None if nbytes is None else nbytes//3
t, h = self.temperature_humidity(probe=probe, celsius=celsius, nbytes=nth)
return t, h, self.dewpoint(probe=probe, celsius=celsius, nbytes=nd)
def reset(self, wait=True, password=None, port=2002, timeout=10):
"""Power reset the iServer.
Some iServers accept the reset command to be sent via the
TCP/UDP protocol and some require the reset command to be sent
via the Telnet protocol.
Parameters
----------
wait : :class:`bool`, optional
Whether to wait for the connection to the iServer to be
re-established before returning to the calling program. Rebooting
an iServer takes about 10 to 15 seconds.
password : :class:`str`, optional
The administrator's password of the iServer. If not specified then
uses the default manufacturer's password. Only used if the iServer
needs to be reset via the Telnet protocol.
port : :class:`int`, optional
The port to use for the Telnet connection. Only used if the iServer
needs to be reset via the Telnet protocol.
timeout : :class:`float`, optional
The timeout value to use during the Telnet session. Only used if
the iServer needs to be reset via the Telnet protocol.
"""
def use_telnet():
from telnetlib import Telnet
pw = password or '00000000'
with Telnet(self.host, port, timeout=timeout) as tn:
tn.read_until(b'Password:', timeout=timeout)
tn.write(pw.encode() + b'\n')
tn.read_until(b'Login Successful', timeout=timeout)
tn.write(b'reset\n')
tn.read_until(b'The unit will reset in 5 seconds.', timeout=timeout)
if wait:
# 5 seconds from the Telnet message
# 10 seconds for the time it takes to reboot
time.sleep(15)
self.reconnect(max_attempts=-1)
# according to the manual, these models require Telnet
if self.equipment_record.model in ['iTHX-W', 'iTHX-2']:
return use_telnet()
# The manual indicates that iTHX-W3, iTHX-D3, iTHX-SD and iTHX-M
# all accept the *SRYRST command
reply = self.query('*SRYRST').strip()
if reply == 'Reset':
# this was the reply that was received with an iTHX-W3
# which accepts the reset command via TCP/UDP
if wait:
time.sleep(10)
self.reconnect(max_attempts=-1)
elif reply == 'Serial Time Out':
# this was the reply that was received with an iTHX-W
# which does not recognize the *SRYRST command
use_telnet()
else:
self.raise_exception(
'Received an unexpected reply, {!r}, for the *SRYRST command'.format(reply)
)
def start_logging(self, path, wait=60, nprobes=None, nbytes=None,
celsius=True, msg_format=None, db_timeout=10, validator=None):
"""Start logging the temperature, humidity and dew point to the specified path.
The information is logged to an SQLite_ database. To stop logging press ``CTRL+C``.
.. _SQLite: https://www.sqlite.org/index.html
Parameters
----------
path : :class:`str`
The path to the SQLite_ database. If you only specify a directory
then a database with the default filename, ``model_serial.sqlite3``,
is created/opened in this directory.
wait : :class:`int`, optional
The number of seconds to wait between each log event.
nprobes : :class:`int`, optional
The number of probes that the iServer has (1 or 2).
If not specified then gets the value defined in
:attr:`~msl.equipment.record_types.ConnectionRecord.properties`.
Default is 1.
nbytes : :class:`int`, optional
The number of bytes to read from each probe (the probes are read
sequentially). The value is passed to :meth:`.temperature_humidity_dewpoint`.
If not specified then gets the value defined in
:attr:`~msl.equipment.record_types.ConnectionRecord.properties`.
Default is :data:`None`.
celsius : :class:`bool`, optional
:data:`True` to return the temperature and dew point in celsius,
:data:`False` for fahrenheit.
msg_format : :class:`str`, optional
The format to use for the INFO :mod:`logging` messages each time
data is read from an iServer. The format must use the
:meth:`str.format` syntax, ``{}``. The positional arguments to
:meth:`str.format` are the values from the iServer, where the values
are `(temperature, humidity, dewpoint)` for a 1-probe sensor and
`(temperature1, humidity1, dewpoint1, temperature2, humidity2, dewpoint2)`
for a 2-probe sensor. The keyword arguments to :meth:`str.format`
are the attributes of an :class:`~.EquipmentRecord`.
Examples:
* T={0} H={1} D={2}
* {connection[address]} T={0:.1f} H={1:.1f} D={2:.1f}
* T1={0} T2={3} H1={1} H2={4} D1={2} D2={5}
* {alias} {serial} -> T={0}C H={1}% D={2}C
db_timeout : :class:`float`, optional
The number of seconds the connection to the database should wait
for the lock to go away until raising an exception.
validator
A callback that is used to validate the data. The callback must
accept two arguments `(data, ithx)`, where `data` is a
:class:`tuple` of the temperature, humidity and dewpoint values
for each probe and `ithx` is the :class:`.iTHX` instance
(i.e., `self`). The callback must return a value whose truthness
decides whether to insert the data into the database. If the
returned value evaluates to :data:`True` then the data is inserted
into the database.
"""
if os.path.isdir(path):
filename = self.equipment_record.model + '_' + self.equipment_record.serial + '.sqlite3'
path = os.path.join(path, filename)
record_as_dict = self.equipment_record.to_dict()
db = sqlite3.connect(path, timeout=db_timeout)
self.log_info('start logging to {}'.format(path))
props = self.equipment_record.connection.properties
if nprobes is None:
nprobes = props.get('nprobes', 1)
if nbytes is None:
nbytes = props.get('nbytes', None)
if nprobes == 1:
db.execute(
'CREATE TABLE IF NOT EXISTS data ('
'pid INTEGER PRIMARY KEY AUTOINCREMENT, '
'datetime DATETIME, '
'temperature FLOAT, '
'humidity FLOAT, '
'dewpoint FLOAT);'
)
if not msg_format:
msg_format = 'Sn={serial} T={0} H={1} D={2}'
elif nprobes == 2:
db.execute(
'CREATE TABLE IF NOT EXISTS data ('
'pid INTEGER PRIMARY KEY AUTOINCREMENT, '
'datetime DATETIME, '
'temperature1 FLOAT, '
'humidity1 FLOAT, '
'dewpoint1 FLOAT, '
'temperature2 FLOAT, '
'humidity2 FLOAT, '
'dewpoint2 FLOAT);'
)
if not msg_format:
msg_format = 'Sn={serial} T1={0} H1={1} D1={2} T2={3} H2={4} D2={5}'
else:
raise ValueError('The number-of-probes value must be either 1 or 2. Got {}'.format(nprobes))
db.commit()
db.close()
try:
while True:
t0 = time.time()
# get the values
try:
data = self.temperature_humidity_dewpoint(probe=1, celsius=celsius, nbytes=nbytes)
if nprobes == 2:
data += self.temperature_humidity_dewpoint(probe=2, celsius=celsius, nbytes=nbytes)
self.log_info(msg_format.format(*data, **record_as_dict))
except Exception as e:
self.log_error('{}: {}'.format(e.__class__.__name__, e))
self.reconnect(max_attempts=-1)
continue
now = datetime.now().replace(microsecond=0).isoformat(sep='T')
if validator is not None and not validator(data, self):
time.sleep(max(0.0, wait - (time.time() - t0)))
continue
# save the values to the database and then wait
values = [now] + list(data)
try:
db = sqlite3.connect(path, timeout=db_timeout)
if nprobes == 1:
db.execute('INSERT INTO data VALUES (NULL, ?, ?, ?, ?);', values)
else:
db.execute('INSERT INTO data VALUES (NULL, ?, ?, ?, ?, ?, ?, ?);', values)
db.commit()
db.close()
except sqlite3.DatabaseError as e:
db.close()
self.log_error('{}: {}'.format(e.__class__.__name__, e))
else:
time.sleep(max(0.0, wait - (time.time() - t0)))
except (KeyboardInterrupt, SystemExit):
pass
db.close()
self.log_info('stopped logging to {}'.format(path))
@staticmethod
def data(path, start=None, end=None, as_datetime=True, select='*'):
"""Fetch all the log records between two dates.
Parameters
----------
path : :class:`str`
The path to the SQLite_ database.
start : :class:`~datetime.datetime` or :class:`str`, optional
Include all records that have a timestamp :math:`\\ge` `start`.
If a :class:`str` then in the ISO 8601 ``yyyy-mm-dd`` or
``yyyy-mm-ddTHH:MM:SS`` format.
end : :class:`~datetime.datetime` or :class:`str`, optional
Include all records that have a timestamp :math:`\\le` `end`.
If a :class:`str` then in the ISO 8601 ``yyyy-mm-dd`` or
``yyyy-mm-ddTHH:MM:SS`` format.
as_datetime : :class:`bool`, optional
Whether to fetch the timestamps in the database as
:class:`~datetime.datetime` objects. If :data:`False` then the
timestamps will be of type :class:`str` and this function
will return much faster if requesting data over a large date range.
select : :class:`str` or :class:`list` of :class:`str`, optional
The field name(s) in the database table to use for the ``SELECT``
SQL command (e.g., ``'datetime,temperature'`` or
``['datetime', 'humidity']``).
Returns
-------
:class:`list` of :class:`tuple`
A list of ``(pid, datetime, temperature, humidity, dewpoint, ...)``
log records, depending on the value of `select`.
"""
if not os.path.isfile(path):
raise OSError('Cannot find {}'.format(path))
detect_types = sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES if as_datetime else 0
db = sqlite3.connect(path, timeout=10.0, detect_types=detect_types)
cursor = db.cursor()
if select != '*':
if isinstance(select, (list, tuple, set)):
select = ','.join(select)
base = 'SELECT {} FROM data'.format(select)
if isinstance(start, datetime):
start = start.isoformat(sep='T')
if isinstance(end, datetime):
end = end.isoformat(sep='T')
if start is None and end is None:
cursor.execute(base + ';')
elif start is not None and end is None:
cursor.execute(base + ' WHERE datetime >= ?;', (start,))
elif start is None and end is not None:
cursor.execute(base + ' WHERE datetime <= ?;', (end,))
else:
cursor.execute(base + ' WHERE datetime BETWEEN ? AND ?;', (start, end))
data = cursor.fetchall()
cursor.close()
db.close()
return data
def _get(self, message, probe, size=None):
if not (probe == 1 or probe == 2):
# iTHX-SD supports probe=3 but we don't have one of those devices to test
self.raise_exception('Invalid probe number, {}. Must be either 1 or 2'.format(probe))
command = '*SR' + message
if probe > 1:
command += str(probe)
try:
ret = self.query(command, size=size)
except ConnectionResetError:
# for some reason the socket closes if a certain amount of time passes and no
# messages have been sent. For example, querying the temperature, humidity and
# dew point every >60 seconds raised:
# [Errno errno.ECONNRESET] An existing connection was forcibly closed by the remote host
self.reconnect(max_attempts=1)
return self._get(message, probe, size=size) # retry
else:
values = tuple(float(v) for v in re.split(r'[,;]', ret))
if len(values) == 1:
return values[0]
else:
return values
def convert_datetime(value):
"""Convert a date and time to a :class:`~datetime.datetime` object.
Parameters
----------
value : :class:`bytes`
The datetime value from an SQLite database.
Returns
-------
:class:`datetime.datetime`
The `value` as a datetime object.
"""
try:
# datetime.fromisoformat is available in Python 3.7+
return datetime.fromisoformat(value.decode())
except AttributeError:
# mimics the sqlite3.dbapi2.convert_timestamp function
datepart, timepart = value[:10], value[11:]
year, month, day = map(int, datepart.split(b'-'))
timepart_full = timepart.split(b'.')
hours, minutes, seconds = map(int, timepart_full[0].split(b':'))
if len(timepart_full) == 2:
microseconds = int('{:0<6.6}'.format(timepart_full[1].decode()))
else:
microseconds = 0
return datetime(year, month, day, hours, minutes, seconds, microseconds)
# Do not use the builtin TIMESTAMP converter since it does not support
# the T separator between the date and time. Also, according to
# https://www.sqlite.org/lang_datefunc.html the name DATETIME seems
# to be more logical than TIMESTAMP as a field name.
sqlite3.register_converter('DATETIME', convert_datetime)
|
|
from os import urandom
from Queue import Queue
from os.path import abspath
from random import randrange
from cPickle import dumps, loads
from sys import _getframe, modules
from exceptions import BaseException
from stackless import channel, schedule
from _weakref import proxy, ref as weakref
from copy import deepcopy, error as CopyError
from thread import allocate_lock, start_new_thread
try:
from gdbm import open as dbm
except ImportError:
from shelve import open as dbm
def open(filename, pool=None):
pool = get_global_threadpool() if pool is None else pool
e = channel()
pool.queue.put((e, BTreeBaseConnectionWrapper, (filename, pool), {}))
errno, e = e.receive()
if errno == 0:
return e
raise e
class Pool:
def __init__(self, n=16):
self.queue = Queue()
for i in xrange(n):
start_new_thread(self.pipe, ())
def __call__(self, func):
def wrapper(*args, **kw):
e = channel()
self.queue.put((e, func, args, kw))
errno, e = e.receive()
if errno == 0:
return e
raise e
return wrapper
def pipe(self):
while True:
rst, func, args, kw = self.queue.get()
try:
result = func(*args, **kw)
except BaseException, e:
rst.send((-1, e))
else:
rst.send((0, result))
class Base(object):
@property
def _p_ldata(self):
try:
return self.__dict__['_p_data']
except KeyError:
self.__dict__['_p_data'] = self._p_conn[self._p_key]
return self._p_data
def __getstate__(self):
attrs = self.__dict__
try:
return attrs['_p_key']
except KeyError:
conn = _getframe(1).f_locals['self']
attrs['_p_key' ] = conn << self
attrs['_p_conn'] = proxy(conn)
return attrs['_p_key']
def __setstate__(self, key):
attrs = self.__dict__
attrs['_p_key'] = key
attrs['_p_conn'] = proxy(_getframe(1).f_locals['self'])
del attrs['_p_data']
def __deepcopy__(self, memo):
if '__del__' not in memo:
raise CopyError('uncopyable object')
attrs = self.__dict__
try:
key = attrs['_p_key']
except KeyError:
return 0
try:
if self._p_conn.closed:
return 0
deepcopy(self._p_ldata, {'__del__': 0})
del self._p_conn[key]
except ReferenceError:
pass
return 0
def _p_note_change(self):
try:
key = self.__dict__['_p_key']
except KeyError:
pass
else:
self._p_conn[key] = self
class Persistent(Base):
def __new__(klass, *args, **kw):
o = __new__(klass)
o.__dict__['_p_data'] = {}
return o
def __getattr__(self, name):
try:
return self._p_ldata[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
if name[:3] == '_p_':
self.__dict__[name] = value
return
self._p_ldata[name] = value
self._p_note_change()
def __delattr__(self, name):
if name[:3] == '_p_':
try:
del self.__dict__[name]
except KeyError:
raise AttributeError(name)
else:
return
try:
o = self._p_ldata[name]
except KeyError:
raise AttributeError(name)
deepcopy(o, {'__del__': 0})
del self._p_ldata[name]
self._p_note_change()
def __getnewargs__(self):
return ()
class BNode(Base):
@property
def min_item(self):
if not self._p_ldata[1]:
return self._p_data[0][0]
else:
return self._p_data[1][0].min_item
@property
def max_item(self):
if not self._p_ldata[1]:
return self._p_data[0][-1]
else:
return self._p_data[1][-1].max_item
def __init__(self, key=None, conn=None):
if key:
self._p_key, self._p_conn = key, conn
else:
self._p_data = [[], None]
def __setstate__(self, key):
self._p_key = key
self._p_conn = proxy(_getframe(1).f_locals['self'])
def __len__(self):
result = len(self._p_ldata[0])
for node in self._p_data[1] or []:
result += len(node)
return result
def __delitem__(self, key):
p = self.get_position(key)
matches = p < len(self._p_ldata[0]) and self._p_data[0][p][0] == key
if not self._p_data[1]:
if matches:
del self._p_data[0][p]
self._p_note_change()
else:
raise KeyError(key)
else:
node = self._p_data[1][p]
lower_sibling = p > 0 and self._p_data[1][p - 1]
upper_sibling = p < len(self._p_data[1]) - 1 and self._p_data[1][p + 1]
if matches:
if node and len(node._p_ldata[0]) >= minimum_degree:
extreme = node.max_item
del node[extreme[0]]
self._p_data[0][p] = extreme
elif upper_sibling and len(upper_sibling._p_ldata[0]
) >= minimum_degree:
extreme = upper_sibling.min_item
del upper_sibling[extreme[0]]
self._p_data[0][p] = extreme
else:
extreme = upper_sibling.min_item
del upper_sibling[extreme[0]]
node._p_data[0] = node._p_ldata[0] + [extreme
] + upper_sibling._p_ldata[0]
if node._p_data[1]:
node._p_data[1] = node._p_data[1] + upper_sibling._p_data[1]
del self._p_data[0][p]
del self._p_data[1][p + 1]
self._p_note_change()
else:
if not (node and len(node._p_ldata[0]) >= minimum_degree):
if lower_sibling and len(lower_sibling._p_ldata[0]
) >= minimum_degree:
node._p_data[0].insert(0, self._p_data[0][p - 1])
self._p_data[0][p - 1] = lower_sibling._p_data[0][-1]
del lower_sibling._p_data[0][-1]
if node._p_data[1]:
node._p_data[1].insert(0, lower_sibling._p_data[1][-1])
del lower_sibling._p_data[1][-1]
lower_sibling._p_note_change()
elif upper_sibling and len(upper_sibling._p_ldata[0]
) >= minimum_degree:
node._p_data[0].append(self._p_data[0][p])
self._p_data[0][p] = upper_sibling._p_data[0][0]
del upper_sibling._p_data[0][0]
if node._p_data[1]:
node._p_data[1].append(upper_sibling._p_data[1][0])
del upper_sibling._p_data[1][0]
upper_sibling._p_note_change()
elif lower_sibling:
p1 = p - 1
node._p_data[0] = (lower_sibling._p_ldata[0] + [self._p_data[0][p1]] +
node._p_data[0])
if node._p_data[1]:
node._p_data[1] = lower_sibling._p_data[1] + node._p_data[1]
del self._p_data[0][p1]
del self._p_data[1][p1]
else:
node._p_data[0] = (node._p_data[0] + [self._p_data[0][p]] +
upper_sibling._p_ldata[0])
if node._p_data[1]:
node._p_data[1] = node._p_data[1] + upper_sibling._p_data[1]
del self._p_data[0][p]
del self._p_data[1][p + 1]
self._p_note_change()
node._p_note_change()
assert (node and len(node._p_data[0]) >= minimum_degree)
del node[key]
if not self._p_data[0]:
o = self._p_data[1][0]
self._p_data[0] = o._p_ldata[0]
self._p_data[1] = o._p_data[1]
def __iter__(self):
if not self._p_ldata[1]:
for item in self._p_data[0]:
yield item
else:
for position, item in enumerate(self._p_data[0]):
for it in self._p_data[1][position]:
yield it
yield item
for it in self._p_data[1][-1]:
yield it
def __reversed__(self):
if not self._p_ldata[1]:
for item in reversed(self._p_data[0]):
yield item
else:
for item in reversed(self._p_data[1][-1]):
yield item
for position in range(len(self._p_data[0]) - 1, -1, -1):
yield self._p_data[0][position]
for item in reversed(self._p_data[1][position]):
yield item
def iter_from(self, key):
position = self.get_position(key)
if not self._p_data[1]:
for item in self._p_data[0][position:]:
yield item
else:
for item in self._p_data[1][position].iter_from(key):
yield item
for p in range(position, len(self._p_data[0])):
yield self._p_data[0][p]
for item in self._p_data[1][p + 1]:
yield item
def iter_backward_from(self, key):
position = self.get_position(key)
if not self._p_data[1]:
for item in reversed(self._p_data[0][:position]):
yield item
else:
for item in self._p_data[1][position].iter_backward_from(key):
yield item
for p in range(position - 1, -1, -1):
yield self._p_data[0][p]
for item in reversed(self._p_data[1][p]):
yield item
def get_position(self, key):
for position, item in enumerate(self._p_ldata[0]):
if item[0] >= key:
return position
return len(self._p_data[0])
def search(self, key):
position = self.get_position(key)
if position < len(self._p_data[0]) and self._p_data[0][position][0] == key:
return self._p_data[0][position]
elif not self._p_data[1]:
return None
else:
return self._p_data[1][position].search(key)
def insert_item(self, item):
assert not len(self._p_ldata[0]) == 2 * minimum_degree - 1
key = item[0]
position = self.get_position(key)
if position < len(self._p_data[0]) and self._p_data[0][position][0] == key:
self._p_data[0][position] = item
self._p_note_change()
elif not self._p_data[1]:
self._p_data[0].insert(position, item)
self._p_note_change()
else:
child = self._p_data[1][position]
if len(child._p_ldata[0]) == 2 * minimum_degree - 1:
self.split_child(position, child)
if key == self._p_data[0][position][0]:
self._p_data[0][position] = item
self._p_note_change()
else:
if key > self._p_data[0][position][0]:
position += 1
self._p_data[1][position].insert_item(item)
else:
self._p_data[1][position].insert_item(item)
def split_child(self, position, child):
assert len(self._p_ldata[0]) != 2 * minimum_degree - 1
assert self._p_data[1]
assert self._p_data[1][position] is child
assert len(child._p_ldata[0]) == 2 * minimum_degree - 1
bigger = BNode()
middle = minimum_degree - 1
splitting_key = child._p_data[0][middle]
bigger._p_data[0] = child._p_data[0][middle + 1:]
child._p_data[0] = child._p_data[0][:middle]
assert len(bigger._p_data[0]) == len(child._p_data[0])
if child._p_data[1]:
bigger._p_data[1] = child._p_data[1][middle + 1:]
child._p_data[1] = child._p_data[1][:middle + 1]
assert len(bigger._p_data[1]) == len(child._p_data[1])
self._p_data[0].insert(position, splitting_key)
self._p_data[1].insert(position + 1, bigger)
child._p_note_change()
self._p_note_change()
class BTree(Base):
@property
def min_item(self):
assert self._p_root._p_ldata[0], 'empty BTree has no min item'
key, value = self._p_root.min_item
return key, value
@property
def max_item(self):
assert self._p_root._p_ldata[0], 'empty BTree has no max item'
key, value = self._p_root.max_item
return key, value
@property
def _p_key(self):
return self._p_root._p_key
@property
def _p_conn(self):
return self._p_root._p_conn
@property
def _p_data(self):
return self._p_root._p_data
def __new__(klass, *args, **kw):
o = __new__(klass)
o._p_root = BNode()
return o
def __deepcopy__(self, memo):
if '__del__' not in memo:
raise CopyError('uncopyable object')
node = self._p_root
try:
key = node._p_key
except AttributeError:
return 0
try:
if node._p_conn.closed:
return 0
deepcopy(node._p_ldata, {'__del__': 0})
del node._p_conn[key]
except ReferenceError:
pass
return 0
def __getstate__(self):
node = self._p_root
try:
return node._p_key
except AttributeError:
conn = _getframe(1).f_locals['self']
node._p_key = conn << node
node._p_conn = proxy(conn)
return node._p_key
def __setstate__(self, key):
self._p_pnt = proxy(_getframe(2).f_locals['self'])
self._p_root = BNode(key, proxy(_getframe(1).f_locals['self']))
def __repr__(self):
return '{%s}' % ', '.join('%r: %r' % (key, value
) for key, value in self.items())
def __len__(self):
return len(self._p_root)
def __nonzero__(self):
return bool(self._p_root._p_ldata[0])
def __iter__(self):
for item in self._p_root:
yield item[0]
def __reversed__(self):
for item in reversed(self._p_root):
yield item[0]
def __contains__(self, key):
return self._p_root.search(key) is not None
def __getitem__(self, key):
item = self._p_root.search(key)
if item is None:
raise KeyError(key)
return item[1]
def __setitem__(self, key, value=True):
if len(self._p_root._p_ldata[0]) == 2 * minimum_degree - 1:
node = BNode()
node._p_data[1] = [self._p_root]
node.split_child(0, node._p_data[1][0])
self._p_root = node
if hasattr(self, '_p_pnt'):
self._p_pnt._p_note_change()
self._p_root.insert_item((key, value))
def __delitem__(self, key):
item = self._p_root.search(key)
if item is None:
raise KeyError(key)
del self._p_root[key]
deepcopy(key , {'__del__': 0})
deepcopy(item, {'__del__': 0})
def has_key(self, key):
return self._p_root.search(key) is not None
def get(self, key, default=None):
item = self._p_root.search(key)
if item is None:
return default
return item[1]
def setdefault(self, key, value):
item = self._p_root.search(key)
if item is None:
self[key] = value
return value
return item[1]
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError( (
'update expected at most 1 argument, '
'got %s') % len(args) )
items = args[0]
if hasattr(items, 'iteritems'):
item_sequence = items.iteritems()
else:
item_sequence = items
for key, value in item_sequence:
self[key] = value
for key, value in kwargs.iteritems():
self[key] = value
def clear(self):
self._p_root = BNode()
if hasattr(self, '_p_pnt'):
self._p_pnt._p_note_change()
def iterkeys(self):
for item in self._p_root:
yield item[0]
def keys(self):
return list(self.iterkeys())
def itervalues(self):
for item in self._p_root:
yield item[1]
def values(self):
return list(self.itervalues())
def items(self):
return list(self.iteritems())
def iteritems(self):
for key, value in self._p_root:
yield (key, value)
def items_backward(self):
for key, value in reversed(self._p_root):
yield (key, value)
def items_from(self, key, closed=True):
for key2, value in self._p_root.iter_from(key):
if closed or key2 != key:
yield (key2, value)
def items_backward_from(self, key, closed=False):
if closed:
item = self._p_root.search(key)
if item is not None:
yield (item[0], item[1])
for key, value in self._p_root.iter_backward_from(key):
yield (key, value)
def items_range(self, start, end, closed_start=True, closed_end=False):
if start <= end:
for item in self.items_from(start, closed=closed_start):
if item[0] > end:
break
if closed_end or item[0] < end:
yield item
else:
for item in self.items_backward_from(start, closed=closed_start):
if item[0] < end:
break
if closed_end or item[0] > end:
yield item
class BTreeBaseConnectionWrapper(BTree):
def __init__(self, filename, pool=None):
self._p_conn_ref = Connection(filename, pool=pool)
try:
self._p_root_ref = self._p_conn_ref.load(id0)
except KeyError:
self._p_root_ref = BTree()
self._p_conn_ref.dump(id0, self._p_root_ref)
self._p_root = self._p_root_ref._p_root
def __del__(self):
pass
def __getstate__(self):
raise TypeError('can\'t pickle connection objects')
def __setitem__(self, key, value=True):
if len(self._p_root._p_ldata[0]) == 2 * minimum_degree - 1:
node = BNode()
node._p_data[1] = [self._p_root]
node.split_child(0, node._p_data[1][0])
self._p_root_ref._p_root = self._p_root = node
self._p_conn_ref.note_change(id0)
self._p_root.insert_item((key, value))
def clear(self):
self._p_root_ref._p_data = self._p_data = BNode()
self._p_conn_ref.note_change(id0)
def sync(self):
return self._p_conn_ref.sync()
def close(self):
return self._p_conn_ref.close()
class Connection:
def __init__(self, filename, pool=None):
filename = abspath(filename)
pool = get_global_threadpool() if pool is None else pool
self.db, self.closed = dbm(filename, 'c'), False
self.filename, self.queue, self.pool = filename, pool.queue, pool
self.invalid, self.cache_invalid_lock = set(), allocate_lock()
self.cache, self.changed, self.deleted, self.created = {}, {}, {}, []
self.register_connection()
def __del__(self):
if hasattr(self, 'db'):
e = channel()
self.queue.put((e, self._close, (), {}))
errno, e = e.receive()
if errno != 0:
raise e
def __lshift__(self, o):
oid = uuid4()
while self.db.has_key(oid) or oid in self.created:
oid = uuid4()
self.db[oid] = dumps(o._p_data, 2)
if oid in self.deleted:
del self.deleted[oid]
self.cache[oid] = o._p_data
self.created.append(oid)
return oid
def __getitem__(self, key):
if key in self.deleted:
raise KeyError(key)
try:
return self.cache[key]
except KeyError:
pass
while not self.cache_invalid_lock.acquire(0):
schedule()
try:
if key in self.invalid:
raise ReadConflictError(key)
e = channel()
self.queue.put((e, dbmget, (self.db, key), {}))
errno, e = e.receive()
if errno != 0:
raise e
o = loads(e)
self.cache[key] = o
finally:
self.cache_invalid_lock.release()
return o
def __setitem__(self, key, o):
if key in self.deleted:
del self.deleted[key]
self.changed[key] = None
def __delitem__(self, key):
try:
del self.changed[key]
except KeyError:
pass
self.deleted[key] = None
def get(self, key):
e = channel()
self.queue.put((e, dbmget, (self.db, key), {}))
errno, e = e.receive()
if errno != 0:
raise e
return loads(e)
def sync(self):
if self.changed or self.deleted:
e = channel()
self.queue.put((e, self._sync, (), {}))
errno, e = e.receive()
if errno == 0:
return
raise e
def close(self):
if hasattr(self, 'db'):
if self.changed or self.deleted:
e = channel()
self.queue.put((e, self._close_and_sync, (), {}))
errno, e = e.receive()
if errno != 0:
raise e
else:
e = channel()
self.queue.put((e, self._close, (), {}))
errno, e = e.receive()
if errno != 0:
raise e
def _close(self):
self.db.close()
del self.db
self.unregister_connection()
self.closed = True
def _close_and_sync(self):
self._sync()
self.db.close()
del self.db
self.unregister_connection()
self.closed = True
def _sync(self):
commit_lock, dct = environ[1][self.filename]
commit_lock.acquire()
try:
self.cache_invalid_lock.acquire()
try:
if self.invalid:
raise WriteConflictError(self.invalid)
connections = []
for dummy, ref in dct.items():
conn = ref()
if conn is not self:
try:
conn.cache_invalid_lock.acquire()
except:
pass
else:
connections.append(conn)
try:
for key in self.changed:
self.db[key] = dumps(self.cache[key], 2)
for key in self.deleted:
del self.db[key]
self.db.sync()
changed = set(self.changed)
changed.update(self.deleted)
changed.update(self.created)
for conn in connections:
if has_intersection(conn.cache, changed):
conn.invalid.update(changed)
self.changed, self.deleted, self.created, \
self.invalid = {}, {}, [], set()
finally:
for conn in connections:
try:
conn.cache_invalid_lock.release()
except:
pass
finally:
self.cache_invalid_lock.release()
finally:
commit_lock.release()
def load(self, oid):
try:
o = self.cache[oid]
except KeyError:
self.cache_invalid_lock.acquire()
try:
if oid in self.invalid:
raise ReadConflictError(oid)
o = loads(self.db[oid])
self.cache[oid] = o
finally:
self.cache_invalid_lock.release()
return o
def dump(self, oid, o):
self.cache_invalid_lock.acquire()
try:
self.db[oid] = dumps(o, 2)
self.cache[oid] = o
self.cache[o._p_key] = o._p_data
self.changed[oid] = None
finally:
self.cache_invalid_lock.release()
def note_change(self, oid):
self.changed[oid] = None
def register_connection(self):
envlock, envfiles = environ
envlock.acquire()
try:
try:
lock, dct = envfiles[self.filename]
except KeyError:
lock, dct = allocate_lock(), {}
envfiles[self.filename] = (lock, dct)
dct[id(self)] = weakref(self)
finally:
envlock.release()
def unregister_connection(self):
envlock, envfiles = environ
try:
lock, dct = envfiles[self.filename]
lock.acquire()
try:
del dct[id(self)]
finally:
lock.release()
except KeyError:
pass
else:
envlock.acquire()
try:
if not dct:
del envfiles[self.filename]
finally:
envlock.release()
def has_intersection(dct1, dct2):
if len(dct1) > len(dct2):
for i in dct2:
if i in dct1:
return True
else:
for i in dct1:
if i in dct2:
return True
return False
def get_global_threadpool():
if global_threadpool is None:
try:
import global_threadpool as pool
except ImportError:
pool = Pool()
modules['global_threadpool'] = pool
globals()['global_threadpool'] = pool
else:
globals()['global_threadpool'] = pool
return pool
return global_threadpool
try:
urandom(16)
except NotImplementedError:
def uuid4():
n = long(fmt % tuple(randrange(256) for i in range16))
n &= n1; n |= n2; n &= n3; n |= n4
return ''.join(chr((n >> shift) & 0xff) for shift in range01288)
else:
def uuid4():
n = long(fmt % tuple(map(ord, urandom(16))), 16)
n &= n1; n |= n2; n &= n3; n |= n4
return ''.join(chr((n >> shift) & 0xff) for shift in range01288)
__new__ = object.__new__
dbmget = lambda db, key: db[key]
global_threadpool, environ = None, (allocate_lock(), {})
ConflictError = type('ConflictError', (Exception, ), {})
ReadConflictError = type('ReadConflictError' , (ConflictError, ), {})
WriteConflictError = type('WriteConflictError', (ConflictError, ), {})
range01288 = list(reversed(range(0, 128, 8)))
n1, n2, n3, n4 = ~(0xc000 << 48L), 0x8000 << 48L, ~(0xf000 << 64L), 4 << 76L
range16, fmt, id0, minimum_degree = '\x00' * 16, '%02x' * 16, '\x00' * 16, 16
|
|
import os
import time
import pickle
import rsa
import socket
import random
import win32api
import ctypes
import win32com
import urllib2
from config import *
from core import *
from models import *
from rsa.bigfile import *
class Dreamr():
clientID = ""
crypt = None
store = None
keystore = None
unseen = None
exiting = False
isCorp = False
isDomainController = False
isAdmin = False
started = False
updating = False
server = False
nat = False
internal = ""
external = ""
hostlist = []
# Create the initial class to be used as globals
def __init__(self):
global KeyPath
debug("context", "Started CTX with key path: %s" % KeyPath)
self.store = PSSTMessageStore()
self.keystore = Keystore(KeyPath)
self.unseen = Unseen(KeyPath)
self.internal = socket.gethostbyname(socket.gethostname())
# Calculate ID
self.clientID = win32api.GetComputerName().lower() + "." + win32api.GetDomainName().lower()
self.isAdmin = ctypes.windll.shell32.IsUserAnAdmin()
# Get external IP address
try:
# Get External IP Address
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
target = "http://www.icanhazip.com/"
response = opener.open(target).read()
self.external = response.strip()
except Exception as e:
print(str(e))
# Calculate corp and DC
#TODO
def startCrypto(self, public=None, private=None):
if not self.started:
self.crypt = Crypto(KeyPath, public, private)
self.started = True
# Hold on to messages that need to be sent at a later date
# getLastValid() and appendMessage() handle messages that need to be
# forwarded on to other hosts as they connect.
# popMessageQueue() and appendMessageQueue()
class PSSTMessageStore():
messages = [ PeerMessage ]
messageQueue = [ PeerMessage ]
# Check to see the last times we saw hosts
def __init__(self):
debug("PSST", "Message store init")
return
# looks in the message store for messages that haven't
# expired, and returns only the first. After it expires
# it will get removed, and the next will get sent
def getLastValid(self):
result = []
if (len(self.messages) > 0):
sample = self.messages
sample.reverse()
for message in sample:
if ((int(time.time()) - int(message.msgTime)) < MSG_EXPIRE):
debug("psst", "pop valid command from store")
return message
else:
try:
self.messages.remove(message)
except Exception as e:
debug("psst", "remove valid command from store")
return result
# add a message to the message store if the message shouldrt
# be forwarded to other hosts
def appendMessage(self, message):
if (message):
self.messages.append(message)
return
# Keep hostlist, verify public keys, and invalidate hosts
# after a certain amount of time
class Keystore():
keystore = []
tempkeys = []
pendingHosts = []
path = ""
# Check to see the last times we saw hosts
def __init__(self, path):
self.path = path
self.loadKeystore()
return
# Check to see that hosts are still valid
def validateHost(self, unknownHost):
# This is the way to bypass the check
if (unknownHost.hostAddr == "network-control"):
print("possible message from control -> all threads on deck")
return True
# Test each IP address for validity without returning True
try:
socket.inet_aton(unknownHost.hostAddr)
except socket.error:
print("message had invalid sender IP")
return False
# See if the host is already in the keystore
for host in self.keystore:
if (host != None):
if (int(time.time()) - int(host.lastSeen) >= (HOST_EXPIRE * 2)):
debug("keystore", "That host hasn't been seen in so long. Re-registering")
host.lastSeen = int(time.time())
self.saveKeystore()
return True
if (int(time.time()) - int(host.lastSeen) >= HOST_EXPIRE):
debug("keystore", "That host hasn't been seen in a while.")
return False
if (host.publicKey == unknownHost.publicKey):
host.lastSeen = int(time.time())
self.saveKeystore()
print("host timestamp updated")
return True
else:
debug("keystore", "an empty host popped out")
# Don't add if already full
if (len(self.keystore) > 32):
print("keystore full")
return False
# Host wasn't in keystore at all (register)
self.keystore.append(unknownHost)
if (self.saveKeystore()):
print("registered")
return True
else:
print("the keystore is broken")
return False
def byAddr(self, address):
try:
for host in self.keystore:
if (host != None):
if (host.hostAddr == address):
return host.publicKey
except Exception as e:
debug("keystore", "Error looking up a public key")
return None
# Add a temp message key for a future message that expires after one minute
def addMessageKey(self, ckey):
try:
if (ckey):
self.tempkeys.append(ckey)
debug("keystore", "Added a message key for a future message.")
except Exception as e:
debug("keystore", "Couldn't add that temp key to the keystore. -> %s" % str(e))
return
# Get content keys back from the keystore
def getMessageKey(self, contentID):
if (contentID):
for ckey in self.tempkeys:
if (ckey.contentID == contentID):
if (int(time.time()) - int(ckey.contentTime) < 60):
return ckey
else:
self.tempkeys.remove(ckey)
else:
debug("keystore", "There wasn't a key in the keystore for that message.")
return
# Get content keys back from the keystore
def dumpMessageKeys(self):
results = []
print("Message Keys")
print('-' * 80)
for ckey in self.tempkeys:
print("id: %s, name: %s, time: %s, key: %s" % (ckey.contentID, ckey.contentName, ckey.contentTime, ckey.contentKey))
print('-' * 80)
# Adds a host to the keystore pending list. Once verified, they get moved into
# the keystore
def addPendingHost(self, hostAddr):
self.pendingHosts.append(hostAddr)
return
# Grab the next host in the list to be processed for
# connectivity
def popPendingHost(self):
if (len(self.pendingHosts) > 0):
return self.pendingHosts.pop()
else:
return ""
# Preferred hostlist location if dreamr binary is not available for encoding
def getServerList(self):
result = []
try:
for host in self.keystore:
if (host.isServer):
result.append(host.hostAddr)
else:
debug("keystore", "Host was invalid or not a server.")
except Exception as e:
debug("keystore", "An empty host was in the keystore. -> %s" % str(e))
return result
# return one random host from the keystore
def getRandomServerAddress(self):
result = ""
try:
for host in self.keystore:
if (host.isServer and host.hostAddr):
if (int(time.time()) % 8 == 0):
debug("keystore", "Selected a potential random host: %s. Will we return it, or select another?" % host.hostAddr)
result = host.hostAddr
if not result:
for host in self.keystore:
if (host.isServer and host.hostAddr):
result = host.hostAddr
except:
debug("keystore", "error pulling random host from the keystore -> %s" % str(e))
debug("keystore", "sharing info about %s" % result)
return result
def saveKeystore(self):
os.chdir(self.path)
try:
debug("keystore", "saving keystore")
with open("known_hosts", "wb") as handle:
pickle.dump(self.keystore, handle)
if (self.keystore):
print("keystore saved")
return True
except Exception as e:
debug("keystore", "error write keystore file -> %s" % str(e))
return False
# Load keystore from pickle encoded storage
def loadKeystore(self):
os.chdir(self.path)
try:
debug("keystore", "trying to load keystore")
if (os.path.isfile("known_hosts")):
with open("known_hosts", "rb") as handle:
self.keystore = pickle.load(handle)
if (self.keystore):
return True
else:
self.saveKeystore()
except Exception as e:
debug("keystore", "Could not load keystore because of this -> %s" % str(e))
return False
# Clear the keys out of the keystore
def clearKeystore(self):
try:
self.keystore = []
self.saveKeystore()
except Exception as e:
debug("keystore", " -> %s" % str(e))
return
class Unseen():
seenMessages = []
path = ""
def __init__(self, path):
debug("unseen", "starting..")
self.path = path
if (os.path.isfile("seen")):
self.loadSeen()
else:
self.seen(0)
debug("unseen", "initialized")
return
# Have we seen this message before?
# True = seen before
def seen(self, msgID):
try:
for seenID in self.seenMessages:
if (seenID == msgID):
return True
# None on the entries matched, so it's unique.
self.seenMessages.append(msgID)
os.chdir(self.path) # probably KeyPath
with open("seen", "wb") as handle:
pickle.dump(self.seenMessages, handle)
os.chdir(WebPath) # try to stay in WebPath
return False
except Exception as e:
debug("unseen", "seen unknown error -> %s" % str(e))
return False
# Load seen list from pickle encoded storage
def loadSeen(self, dontdothatagain=None):
try:
os.chdir(self.path)
if (os.path.isfile("seen")):
with open("seen", "rb") as handle:
self.seenMessages = pickle.load(handle)
return True
except Exception as e:
debug("unseen", "Load the message IDs that we've already seen. -> %s" % str(e))
return False
# Handle encryption/decryption/and public key verification
class Crypto():
DRMPublicKey = ""
DRMPrivateKey = ""
DRMTrustedKey = ""
path = ""
def __init__(self, path, public=None, private=None):
debug("crypto", "starting crpyto init")
self.path = path
if (public and private):
self.DRMPublicKey = rsa.PublicKey.load_pkcs1(public)
self.DRMPrivateKey = rsa.PrivateKey.load_pkcs1(private)
else:
try:
if (not os.path.isfile("%sdrm.pub" % self.path) or not os.path.isfile("%sdrm.pem" % self.path)):
(self.DRMPublicKey, self.DRMPrivateKey) = rsa.newkeys(RSA_KEY_LEN)
self.saveKeys()
else:
self.loadKeys()
except Exception as e:
debug("crypto", "failed gen keys %s" % str(e))
melt()
os._exit(420)
# Load internal trusted key
self.DRMTrustedKey = rsa.PublicKey.load_pkcs1(TRUSTED_KEY)
return
# AES crypts a file with s simple lib
def encryptFile(self, sourceLocation, destLocation):
try:
with open(sourceLocation, 'rb') as fin, open(destLocation, 'wb') as fout:
encrypt_bigfile(fin, fout, self.DRMPublicKey)
except Exception as e:
debug("crypt", "Error encrypting large file. -> %s" % str(e))
return True
def decryptFile(self, sourceLocation, destLocation):
try:
with open(sourceLocation, 'rb') as fin, open(destLocation, 'wb') as fout:
decrypt_bigfile(fin, fout, self.DRMPrivateKey);
except Exception as e:
debug("crypt", "Error decrypting the file. -> %s" % str(e))
return True
# Returns a list ->
def signMessage(self, data):
signature = ""
try:
signature = rsa.sign(data, self.DRMPrivateKey, 'SHA-1')
except Exception as e:
debug("crypt", "Failed to sign the message to the remote host. -> %s" % str(e))
return signature
def verifyTrustedSignature(self, data, signature):
result = False
try:
rsa.verify(data, signature, self.DRMTrustedKey)
result = True
except Exception as e:
debug("crypt", "Verifying the signature of the remote host's message. -> %s" % str(e))
return result
# Encrypt to recipient's public key, and sign with our private key
def encryptAndSign(self, plaintext, recipientPublic):
try:
content = rsa.encrypt(plaintext, recipientPublic)
signature = rsa.sign(content, self.DRMPrivateKey, "SHA-1")
return pickle.dumps([signature, content])
except Exception as e:
debug("crypt", "Failed encrypting the message. -> %s" % str(e))
return
# Verify signature with sender's public key, then proceed to use
# our own private key to decrypt the message
# pass the expected timestamp, to thawrt rplay attacks. gets cryptd
def decryptAndVerify(self, ciphertextPickle, senderPublic):
try:
if (ciphertextPickle):
decoded = pickle.loads(ciphertextPickle)
if (decoded):
signature = decoded[0]
content = decoded[1]
if (rsa.verify(content, signature, senderPublic)):
debug("crypt", "Message signature was verified. Decrypting..")
return rsa.decrypt(content, self.DRMPrivateKey)
else:
debug("crypt", "Message was empty. \"No Updates\"")
except Exception as e:
debug("crypt", "Error decrypting message. -> %s" % str(e))
# Load the keys
def loadKeys(self):
debug("crypto", "Loading Keys")
os.chdir(self.path)
with open("drm.pub", "rb") as handle:
data = handle.read()
self.DRMPublicKey = rsa.PublicKey.load_pkcs1(data)
with open("drm.pem", "rb") as handle:
data = handle.read()
self.DRMPrivateKey = rsa.PrivateKey.load_pkcs1(data)
debug("core", "keys loaded")
# Save the keys
def saveKeys(self):
debug("crypto", "saving keys")
os.chdir(self.path)
try:
if (self.DRMPublicKey and self.DRMPrivateKey):
with open("drm.pub", "wb") as handle:
handle.write(self.DRMPublicKey.save_pkcs1(format='PEM'))
with open("drm.pem", "wb") as handle:
handle.write(self.DRMPrivateKey.save_pkcs1(format='PEM'))
return True
except Exception as e:
debug("crypt", "Error saving keys -> %s" % str(e))
return False
return False
def printProgress(self, progress):
print "crypt_progress %s" % progress
return
class Creds():
path = ""
creds = None
def __init__(self, path):
debug("creds", "initialized")
self.path = path
os.chdir(self.path)
try:
if (os.path.isfile("tmp")):
with open("tmp", "rb") as handle:
self.creds = pickle.load(handle)
else:
self.save()
except:
self.save()
pass # wht we gonna do
return
def save(self):
os.chdir(self.path)
try:
with open("tmp", "wb") as handle:
pickle.dump(self.creds, handle)
except:
pass
Dreamr = Dreamr()
|
|
# Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Django specific
from celery.decorators import task
from celery.signals import worker_process_init, worker_process_shutdown
from .models import Query, Result, ResultType, Metadata
import numpy as np
import math
import xarray as xr
import collections
import gdal
import shutil
import sys
import osr
import os
import datetime
from collections import OrderedDict
from dateutil.tz import tzutc
from utils.data_access_api import DataAccessApi
from utils.dc_mosaic import create_mosaic_iterative, create_median_mosaic, create_max_ndvi_mosaic, create_min_ndvi_mosaic
from utils.dc_utilities import get_spatial_ref, save_to_geotiff, create_rgb_png_from_tiff, create_cfmask_clean_mask, split_task
from .utils import update_model_bounds_with_dataset
"""
Class for handling loading celery workers to perform tasks asynchronously.
"""
# Author: AHDS
# Creation date: 2016-06-23
# Modified by:
# Last modified date:
# constants up top for easy access/modification
base_result_path = '/ui_results/custom_mosaic/'
base_temp_path = '/ui_results_temp/'
# Datacube instance to be initialized.
# A seperate DC instance is created for each worker.
dc = None
#default measurements. leaves out all qa bands.
measurements = ['blue', 'green', 'red', 'nir', 'swir1', 'swir2', 'cf_mask']
"""
functions used to combine time sliced data after being combined geographically.
Fill nodata uses the first timeslice as a base, then uses subsequent slices to
fill in indices with nodata values.
this should be used for recent/leastrecent + anything that is done in a single time chunk (median pixel?)
things like max/min ndvi should be able to compound max/min ops between ddifferent timeslices so this will be
different for that.
"""
def fill_nodata(dataset, dataset_intermediate):
if dataset_intermediate is None:
return dataset.copy(deep=True)
dataset_out = dataset_intermediate.copy(deep=True)
for key in list(dataset_out.data_vars):
# Get raw data for current variable and mask the data
dataset_out[key].values[dataset_out[key].values==-9999] = dataset[key].values[dataset_out[key].values==-9999]
return dataset_out
def max_value(dataset, dataset_intermediate):
if dataset_intermediate is None:
return dataset.copy(deep=True)
dataset_out = dataset_intermediate.copy(deep=True)
for key in list(dataset_out.data_vars):
# Get raw data for current variable and mask the data
dataset_out[key].values[dataset.ndvi.values > dataset_out.ndvi.values] = dataset[key].values[dataset.ndvi.values > dataset_out.ndvi.values]
return dataset_out
def min_value(dataset, dataset_intermediate):
if dataset_intermediate is None:
return dataset.copy(deep=True)
dataset_out = dataset_intermediate.copy(deep=True)
for key in list(dataset_out.data_vars):
# Get raw data for current variable and mask the data
dataset_out[key].values[dataset.ndvi.values < dataset_out.ndvi.values] = dataset[key].values[dataset.ndvi.values < dataset_out.ndvi.values]
return dataset_out
#holds the different compositing algorithms. Most/least recent, max/min ndvi, median, etc.
# all options are required. setting None to a option will have the algo/task splitting
# process disregard it.
#experimentally optimized geo/time/slices_per_iter
processing_algorithms = {
'most_recent': {
'geo_chunk_size': 0.5,
'time_chunks': 5,
'time_slices_per_iteration': 5,
'reverse_time': True,
'chunk_combination_method': fill_nodata,
'processing_method': create_mosaic_iterative
},
'least_recent': {
'geo_chunk_size': 0.5,
'time_chunks': 5,
'time_slices_per_iteration': 1,
'reverse_time': False,
'chunk_combination_method': fill_nodata,
'processing_method': create_mosaic_iterative
},
'median_pixel': {
'geo_chunk_size': 0.01,
'time_chunks': None,
'time_slices_per_iteration': None,
'reverse_time': False,
'chunk_combination_method': fill_nodata,
'processing_method': create_median_mosaic
},
'max_ndvi': {
'geo_chunk_size': 0.5,
'time_chunks': 5,
'time_slices_per_iteration': 5,
'reverse_time': False,
'chunk_combination_method': max_value,
'processing_method': create_max_ndvi_mosaic
},
'min_ndvi': {
'geo_chunk_size': 0.5,
'time_chunks': 5,
'time_slices_per_iteration': 5,
'reverse_time': False,
'chunk_combination_method': min_value,
'processing_method': create_min_ndvi_mosaic
}
}
@task(name="get_data_task")
def create_cloudfree_mosaic(query_id, user_id):
"""
Creates metadata and result objects from a query id. gets the query, computes metadata for the
parameters and saves the model. Uses the metadata to query the datacube for relevant data and
creates the result. Results computed in single time slices for memory efficiency, pushed into a
single numpy array containing the total result. this is then used to create png/tifs to populate
a result model. Result model is constantly updated with progress and checked for task
cancellation.
Args:
query_id (int): The ID of the query that will be created.
user_id (string): The ID of the user that requested the query be made.
Returns:
Doesn't return as the method is ran asynchronously.
"""
print("Starting for query:" + query_id)
# its fair to assume that the query_id will exist at this point, as if it wasn't it wouldn't
# start the task.
queries = Query.objects.filter(query_id=query_id, user_id=user_id)
# if there is a matching query other than the one we're using now then do nothing.
# the ui section has already grabbed the result from the db.
if queries.count() > 1:
print("Repeat query, client will receive cached result.")
if Result.objects.filter(query_id=query_id).count() > 0:
queries.update(complete=True)
return
query = queries[0]
print("Got the query, creating metadata.")
result_type = ResultType.objects.get(satellite_id=query.platform, result_id=query.query_type)
# creates the empty result.
result = query.generate_result()
product_details = dc.dc.list_products()[dc.dc.list_products().name == query.product]
# do metadata before actually submitting the task.
metadata = dc.get_scene_metadata(query.platform, query.product, time=(query.time_start, query.time_end), longitude=(
query.longitude_min, query.longitude_max), latitude=(query.latitude_min, query.latitude_max))
if not metadata:
error_with_message(result, "There was an exception when handling this query.")
return
meta = query.generate_metadata(scene_count=metadata['scene_count'], pixel_count=metadata['pixel_count'])
# wrapping this in a try/catch, as it will throw a few different errors
# having to do with memory etc.
try:
# lists all acquisition dates for use in single tmeslice queries.
acquisitions = dc.list_acquisition_dates(query.platform, query.product, time=(query.time_start, query.time_end), longitude=(
query.longitude_min, query.longitude_max), latitude=(query.latitude_min, query.latitude_max))
if len(acquisitions) < 1:
error_with_message(result, "There were no acquisitions for this parameter set.")
return
processing_options = processing_algorithms[query.compositor]
# Reversed time = True will make it so most recent = First, oldest = Last.
#default is in order from oldest -> newwest.
lat_ranges, lon_ranges, time_ranges = split_task(resolution=product_details.resolution.values[0][1], latitude=(query.latitude_min, query.latitude_max), longitude=(
query.longitude_min, query.longitude_max), acquisitions=acquisitions, geo_chunk_size=processing_options['geo_chunk_size'], time_chunks=processing_options['time_chunks'], reverse_time=processing_options['reverse_time'])
result.total_scenes = len(time_ranges) * len(lat_ranges)
# Iterates through the acquisition dates with the step in acquisitions_per_iteration.
# Uses a time range computed with the index and index+acquisitions_per_iteration.
# ensures that the start and end are both valid.
print("Getting data and creating mosaic")
# create a temp folder that isn't on the nfs server so we can quickly
# access/delete.
if not os.path.exists(base_temp_path + query.query_id):
os.mkdir(base_temp_path + query.query_id)
os.chmod(base_temp_path + query.query_id, 0o777)
time_chunk_tasks = []
# iterate over the time chunks.
print("Time chunks: " + str(len(time_ranges)))
print("Geo chunks: " + str(len(lat_ranges)))
for time_range_index in range(len(time_ranges)):
# iterate over the geographic chunks.
geo_chunk_tasks = []
for geographic_chunk_index in range(len(lat_ranges)):
geo_chunk_tasks.append(generate_mosaic_chunk.delay(time_range_index, geographic_chunk_index, processing_options=processing_options, query=query, acquisition_list=time_ranges[
time_range_index], lat_range=lat_ranges[geographic_chunk_index], lon_range=lon_ranges[geographic_chunk_index], measurements=measurements))
time_chunk_tasks.append(geo_chunk_tasks)
# holds some acquisition based metadata. dict of objs keyed by date
dataset_out = None
acquisition_metadata = {}
for geographic_group in time_chunk_tasks:
full_dataset = None
tiles = []
for t in geographic_group:
tile = t.get()
# tile is [path, metadata]. Append tiles to list of tiles for concat, compile metadata.
if tile == "CANCEL":
print("Cancelled task.")
shutil.rmtree(base_temp_path + query.query_id)
query.delete()
meta.delete()
result.delete()
return
if tile[0] is not None:
tiles.append(tile)
result.scenes_processed += 1
result.save()
print("Got results for a time slice, computing intermediate product..")
xr_tiles = []
for tile in tiles:
tile_metadata = tile[1]
for acquisition_date in tile_metadata:
if acquisition_date in acquisition_metadata:
acquisition_metadata[acquisition_date]['clean_pixels'] += tile_metadata[acquisition_date]['clean_pixels']
else:
acquisition_metadata[acquisition_date] = {'clean_pixels': tile_metadata[acquisition_date]['clean_pixels']}
xr_tiles.append(xr.open_dataset(tile[0]))
full_dataset = xr.concat(reversed(xr_tiles), dim='latitude')
dataset = full_dataset.load()
dataset_out = processing_options['chunk_combination_method'](dataset, dataset_out)
latitude = dataset_out.latitude
longitude = dataset_out.longitude
# grabs the resolution.
geotransform = [longitude.values[0], product_details.resolution.values[0][1],
0.0, latitude.values[0], 0.0, product_details.resolution.values[0][0]]
#hardcoded crs for now. This is not ideal. Should maybe store this in the db with product type?
crs = str("EPSG:4326")
# remove intermediates
shutil.rmtree(base_temp_path + query.query_id)
# populate metadata values.
dates = list(acquisition_metadata.keys())
dates.sort()
for date in reversed(dates):
meta.acquisition_list += date.strftime("%m/%d/%Y") + ","
meta.clean_pixels_per_acquisition += str(
acquisition_metadata[date]['clean_pixels']) + ","
meta.clean_pixel_percentages_per_acquisition += str(
acquisition_metadata[date]['clean_pixels'] * 100 / meta.pixel_count) + ","
# Count clean pixels and correct for the number of measurements.
clean_pixels = np.sum(dataset_out[measurements[0]].values != -9999)
meta.clean_pixel_count = clean_pixels
meta.percentage_clean_pixels = (meta.clean_pixel_count / meta.pixel_count) * 100
meta.save()
# generate all the results
file_path = base_result_path + query_id
tif_path = file_path + '.tif'
netcdf_path = file_path + '.nc'
png_path = file_path + '.png'
png_filled_path = file_path + "_filled.png"
print("Creating query results.")
save_to_geotiff(tif_path, gdal.GDT_Int16, dataset_out, geotransform, get_spatial_ref(crs),
x_pixels=dataset_out.dims['longitude'], y_pixels=dataset_out.dims['latitude'],
band_order=['blue', 'green', 'red', 'nir', 'swir1', 'swir2'])
dataset_out.to_netcdf(netcdf_path)
# we've got the tif, now do the png.
bands = [measurements.index(result_type.red)+1, measurements.index(result_type.green)+1, measurements.index(result_type.blue)+1]
create_rgb_png_from_tiff(tif_path, png_path, png_filled_path=png_filled_path, fill_color=result_type.fill, bands=bands)
# update the results and finish up.
update_model_bounds_with_dataset([result, meta, query], dataset_out)
result.result_path = png_path
result.data_path = tif_path
result.data_netcdf_path = netcdf_path
result.result_filled_path = png_filled_path
result.status = "OK"
result.total_scenes = len(acquisitions)
result.save()
print("Finished processing results")
# all data has been processed, create results and finish up.
query.complete = True
query.query_end = datetime.datetime.now()
query.save()
except:
error_with_message(
result, "There was an exception when handling this query.")
raise
# end error wrapping.
return
@task(name="generate_TOOL_chunk")
def generate_TOOL_chunk(time_num, chunk_num, processing_options=None, query=None, acquisition_list=None, lat_range=None, lon_range=None, measurements=None):
"""
responsible for generating a piece of a custom mosaic product. This grabs the x/y area specified in the lat/lon ranges, gets all data
from acquisition_list, which is a list of acquisition dates, and creates the custom mosaic using the function named in processing_options.
saves the result to disk using time/chunk num, and returns the path and the acquisition date keyed metadata.
"""
time_index = 0
iteration_data = None
acquisition_metadata = {}
print("Starting chunk: " + str(time_num) + " " + str(chunk_num))
# holds some acquisition based metadata.
while time_index < len(acquisition_list):
# check if the task has been cancelled. if the result obj doesn't exist anymore then return.
try:
result = Result.objects.get(query_id=query.query_id)
except:
print("Cancelled task as result does not exist")
return
if result.status == "CANCEL":
print("Cancelling...")
return "CANCEL"
# time ranges set based on if the acquisition_list has been reversed or not. If it has, then the 'start' index is the later date, and must be handled appropriately.
start = acquisition_list[time_index] + datetime.timedelta(seconds=1) if processing_options['reverse_time'] else acquisition_list[time_index]
if processing_options['time_slices_per_iteration'] is not None and (time_index + processing_options['time_slices_per_iteration'] - 1) < len(acquisition_list):
end = acquisition_list[time_index + processing_options['time_slices_per_iteration'] - 1]
else:
end = acquisition_list[-1] if processing_options['reverse_time'] else acquisition_list[-1] + datetime.timedelta(seconds=1)
time_range = (end, start) if processing_options['reverse_time'] else (start, end)
raw_data = dc.get_dataset_by_extent(query.product, product_type=None, platform=query.platform, time=time_range, longitude=lon_range, latitude=lat_range, measurements=measurements)
if "cf_mask" not in raw_data:
time_index = time_index + (processing_options['time_slices_per_iteration'] if processing_options['time_slices_per_iteration'] is not None else 10000)
continue
clear_mask = create_cfmask_clean_mask(raw_data.cf_mask)
# update metadata. # here the clear mask has all the clean
# pixels for each acquisition.
for timeslice in range(clear_mask.shape[0]):
time = acquisition_list[time_index + timeslice]
clean_pixels = np.sum(
clear_mask[timeslice, :, :] == True)
if time not in acquisition_metadata:
acquisition_metadata[time] = {}
acquisition_metadata[time]['clean_pixels'] = 0
acquisition_metadata[time][
'clean_pixels'] += clean_pixels
# Removes the cf mask variable from the dataset after the clear mask has been created.
# prevents the cf mask from being put through the mosaicing function as it doesn't fit
# the correct format w/ nodata values for mosaicing.
raw_data = raw_data.drop('cf_mask')
iteration_data = processing_options['processing_method'](
raw_data, clean_mask=clear_mask, intermediate_product=iteration_data)
time_index = time_index + (processing_options['time_slices_per_iteration'] if processing_options['time_slices_per_iteration'] is not None else 10000)
# Save this geographic chunk to disk.
geo_path = base_temp_path + query.query_id + "/geo_chunk_" + \
str(time_num) + "_" + str(chunk_num) + ".nc"
# if this is an empty chunk, just return an empty dataset.
if iteration_data is None:
return [None, None]
iteration_data.to_netcdf(geo_path)
print("Done with chunk: " + str(time_num) + " " + str(chunk_num))
return [geo_path, acquisition_metadata]
def error_with_message(result, message):
"""
Errors out under specific circumstances, used to pass error msgs to user. Uses the result path as
a message container: TODO? Change this.
Args:
result (Result): The current result of the query being ran.
message (string): The message to be stored in the result object.
Returns:
Nothing is returned as the method is ran asynchronously.
"""
if os.path.exists(base_temp_path + result.query_id):
shutil.rmtree(base_temp_path + result.query_id)
result.status = "ERROR"
result.result_path = message
result.save()
print(message)
return
# Init/shutdown functions for handling dc instances.
# this is done to prevent synchronization/conflicts between workers when
# accessing DC resources.
@worker_process_init.connect
def init_worker(**kwargs):
"""
Creates an instance of the DataAccessApi worker.
"""
print("Creating DC instance for worker.")
global dc
dc = DataAccessApi()
@worker_process_shutdown.connect
def shutdown_worker(**kwargs):
"""
Deletes the instance of the DataAccessApi worker.
"""
print('Closing DC instance for worker.')
global dc
dc = None
|
|
# Copyright 2014 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from nova import db
from nova import exception
from nova.objects import base
from nova.objects import fields as obj_fields
from nova.virt import hardware
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceNUMACell(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add pagesize field
# Version 1.2: Add cpu_pinning_raw and topology fields
# Version 1.3: Add cpu_policy and cpu_thread_policy fields
VERSION = '1.3'
def obj_make_compatible(self, primitive, target_version):
super(InstanceNUMACell, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 3):
primitive.pop('cpu_policy', None)
primitive.pop('cpu_thread_policy', None)
fields = {
'id': obj_fields.IntegerField(),
'cpuset': obj_fields.SetOfIntegersField(),
'memory': obj_fields.IntegerField(),
'pagesize': obj_fields.IntegerField(nullable=True),
'cpu_topology': obj_fields.ObjectField('VirtCPUTopology',
nullable=True),
'cpu_pinning_raw': obj_fields.DictOfIntegersField(nullable=True),
'cpu_policy': obj_fields.CPUAllocationPolicyField(nullable=True),
'cpu_thread_policy': obj_fields.CPUThreadAllocationPolicyField(
nullable=True),
}
cpu_pinning = obj_fields.DictProxyField('cpu_pinning_raw')
def __init__(self, **kwargs):
super(InstanceNUMACell, self).__init__(**kwargs)
if 'pagesize' not in kwargs:
self.pagesize = None
self.obj_reset_changes(['pagesize'])
if 'cpu_topology' not in kwargs:
self.cpu_topology = None
self.obj_reset_changes(['cpu_topology'])
if 'cpu_pinning' not in kwargs:
self.cpu_pinning = None
self.obj_reset_changes(['cpu_pinning_raw'])
if 'cpu_policy' not in kwargs:
self.cpu_policy = None
self.obj_reset_changes(['cpu_policy'])
if 'cpu_thread_policy' not in kwargs:
self.cpu_thread_policy = None
self.obj_reset_changes(['cpu_thread_policy'])
def __len__(self):
return len(self.cpuset)
def _to_dict(self):
# NOTE(sahid): Used as legacy, could be renamed in
# _legacy_to_dict_ to the future to avoid confusing.
return {'cpus': hardware.format_cpu_spec(self.cpuset,
allow_ranges=False),
'mem': {'total': self.memory},
'id': self.id,
'pagesize': self.pagesize}
@classmethod
def _from_dict(cls, data_dict):
# NOTE(sahid): Used as legacy, could be renamed in
# _legacy_from_dict_ to the future to avoid confusing.
cpuset = hardware.parse_cpu_spec(data_dict.get('cpus', ''))
memory = data_dict.get('mem', {}).get('total', 0)
cell_id = data_dict.get('id')
pagesize = data_dict.get('pagesize')
return cls(id=cell_id, cpuset=cpuset,
memory=memory, pagesize=pagesize)
@property
def siblings(self):
cpu_list = sorted(list(self.cpuset))
threads = 0
if self.cpu_topology:
threads = self.cpu_topology.threads
if threads == 1:
threads = 0
return list(map(set, zip(*[iter(cpu_list)] * threads)))
@property
def cpu_pinning_requested(self):
return self.cpu_policy == obj_fields.CPUAllocationPolicy.DEDICATED
def pin(self, vcpu, pcpu):
if vcpu not in self.cpuset:
return
pinning_dict = self.cpu_pinning or {}
pinning_dict[vcpu] = pcpu
self.cpu_pinning = pinning_dict
def pin_vcpus(self, *cpu_pairs):
for vcpu, pcpu in cpu_pairs:
self.pin(vcpu, pcpu)
def clear_host_pinning(self):
"""Clear any data related to how this cell is pinned to the host.
Needed for aborting claims as we do not want to keep stale data around.
"""
self.id = -1
self.cpu_pinning = {}
return self
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceNUMATopology(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Takes into account pagesize
# Version 1.2: InstanceNUMACell 1.2
VERSION = '1.2'
fields = {
# NOTE(danms): The 'id' field is no longer used and should be
# removed in the future when convenient
'id': obj_fields.IntegerField(),
'instance_uuid': obj_fields.UUIDField(),
'cells': obj_fields.ListOfObjectsField('InstanceNUMACell'),
}
@classmethod
def obj_from_primitive(cls, primitive, context=None):
if 'nova_object.name' in primitive:
obj_topology = super(InstanceNUMATopology, cls).obj_from_primitive(
primitive, context=None)
else:
# NOTE(sahid): This compatibility code needs to stay until we can
# guarantee that there are no cases of the old format stored in
# the database (or forever, if we can never guarantee that).
obj_topology = InstanceNUMATopology._from_dict(primitive)
obj_topology.id = 0
return obj_topology
@classmethod
def obj_from_db_obj(cls, instance_uuid, db_obj):
primitive = jsonutils.loads(db_obj)
obj_topology = cls.obj_from_primitive(primitive)
if 'nova_object.name' not in db_obj:
obj_topology.instance_uuid = instance_uuid
# No benefit to store a list of changed fields
obj_topology.obj_reset_changes()
return obj_topology
# TODO(ndipanov) Remove this method on the major version bump to 2.0
@base.remotable
def create(self):
values = {'numa_topology': self._to_json()}
db.instance_extra_update_by_uuid(self._context, self.instance_uuid,
values)
self.obj_reset_changes()
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_extra = db.instance_extra_get_by_instance_uuid(
context, instance_uuid, columns=['numa_topology'])
if not db_extra:
raise exception.NumaTopologyNotFound(instance_uuid=instance_uuid)
if db_extra['numa_topology'] is None:
return None
return cls.obj_from_db_obj(instance_uuid, db_extra['numa_topology'])
def _to_json(self):
return jsonutils.dumps(self.obj_to_primitive())
def __len__(self):
"""Defined so that boolean testing works the same as for lists."""
return len(self.cells)
def _to_dict(self):
# NOTE(sahid): Used as legacy, could be renamed in _legacy_to_dict_
# in the future to avoid confusing.
return {'cells': [cell._to_dict() for cell in self.cells]}
@classmethod
def _from_dict(cls, data_dict):
# NOTE(sahid): Used as legacy, could be renamed in _legacy_from_dict_
# in the future to avoid confusing.
return cls(cells=[
InstanceNUMACell._from_dict(cell_dict)
for cell_dict in data_dict.get('cells', [])])
@property
def cpu_pinning_requested(self):
return all(cell.cpu_pinning_requested for cell in self.cells)
def clear_host_pinning(self):
"""Clear any data related to how instance is pinned to the host.
Needed for aborting claims as we do not want to keep stale data around.
"""
for cell in self.cells:
cell.clear_host_pinning()
return self
|
|
"""
Copyright 2015 Paul T. Grogan, Massachusetts Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Test cases for L{ofspy.federate}.
"""
import unittest
import logging
#logging.disable(logging.WARNING)
from ..contract import Contract
from ..context import Context
from ..game import Game
from ..simulator import Simulator
from ..federate import Federate
class FederateTestCase(unittest.TestCase):
def setUp(self):
self.game = Game(numPlayers=1, initialCash=2000)
self.context = self.game.generateContext()
self.sim = Simulator(entities=[self.context],
initTime=0, timeStep=1, maxTime=3)
self.default = Federate(name='Default')
self.fed = self.context.federations[0].federates[0]
self.station = self.game.generateElement('GroundSta',pId=0,eId=0,mTypes=['pSGL'])
self.sat1 = self.game.generateElement('SmallSat',pId=0,eId=1,mTypes=['pSGL','SAR'])
self.sat2 = self.game.generateElement('SmallSat',pId=0,eId=2,mTypes=['pSGL','VIS'])
self.sat3= self.game.generateElement('SmallSat',pId=0,eId=3,mTypes=['pISL','VIS'])
self.sat4 = self.game.generateElement('SmallSat',pId=0,eId=4,mTypes=['pSGL','pISL'])
def tearDown(self):
self.game = None
self.context = None
self.sim = None
self.default = None
self.fed = None
self.station = None
self.sat1 = None
self.sat2 = None
self.sat3 = None
self.sat4 = None
class FederateDesignTestCase(FederateTestCase):
def test_design(self):
self.assertFalse(self.default.design(self.station))
self.assertFalse(self.default.design(self.sat1))
self.assertTrue(self.fed.design(self.station))
self.assertEqual(self.fed.cash, self.fed.initialCash - self.station.getDesignCost())
self.assertTrue(self.station in self.fed.elements)
self.assertTrue(self.fed.design(self.sat1))
self.assertEqual(self.fed.cash, self.fed.initialCash - self.station.getDesignCost()
- self.sat1.getDesignCost())
self.assertTrue(self.sat1 in self.fed.elements)
class FederateCommissionTestCase(FederateTestCase):
def test_commission(self):
self.fed.design(self.sat1)
self.assertFalse(self.fed.commission(
self.sat1, self.context.locations[0], self.context))
self.assertTrue(self.fed.commission(
self.sat1, self.context.locations[3], self.context))
self.assertEqual(self.sat1.location, self.context.locations[3])
self.assertEqual(self.fed.cash, self.fed.initialCash
- self.sat1.getDesignCost()
- self.sat1.getCommissionCost(
self.context.locations[3], self.context))
self.fed.design(self.station)
self.assertFalse(self.fed.commission(
self.station, self.context.locations[1], self.context))
self.assertTrue(self.fed.commission(
self.station, self.context.locations[0], self.context))
self.assertEqual(self.station.location, self.context.locations[0])
class FederateDecommissionTestCase(FederateTestCase):
def test_decommission(self):
self.assertFalse(self.fed.decommission(self.station))
self.fed.design(self.station)
self.fed.commission(self.station, self.context.locations[0], self.context)
self.assertTrue(self.fed.decommission(self.station))
self.assertTrue(self.station not in self.fed.elements)
self.fed.design(self.sat1)
self.fed.commission(self.sat1, self.context.locations[1], self.context)
self.assertTrue(self.fed.decommission(self.sat1))
self.assertTrue(self.sat1 not in self.fed.elements)
class FederateLiquidateTestCase(FederateTestCase):
def test_liquidate(self):
self.fed.design(self.station)
self.fed.commission(self.station, self.context.locations[0], self.context)
self.fed.design(self.sat1)
self.fed.commission(self.sat1, self.context.locations[1], self.context)
self.fed.liquidate(self.context)
self.assertTrue(self.station not in self.fed.elements)
self.assertTrue(self.sat1 not in self.fed.elements)
class FederateCanContractTestCase(FederateTestCase):
def test_canContract(self):
self.sim.init()
self.fed.design(self.sat2)
self.fed.commission(self.sat2,
self.context.locations[1],
self.context)
self.sim.advance()
event = next(e for e in self.context.currentEvents
if e.name == "VIS2.13")
self.assertTrue(self.fed.canContract(event, self.context))
self.assertFalse(self.fed.canContract(
next(e for e in self.context.futureEvents), self.context))
class FederateContractTestCase(FederateTestCase):
def test_contract(self):
self.sim.init()
self.fed.design(self.sat2)
self.fed.commission(self.sat2,
self.context.locations[1],
self.context)
self.sim.advance()
event = next(e for e in self.context.currentEvents
if e.name == "VIS2.13")
contract1 = self.fed.contract(event, self.context)
self.assertIsNot(contract1, None)
self.assertIn(contract1, self.fed.contracts)
contract2 = self.fed.contract(
next(e for e in self.context.futureEvents), self.context)
self.assertIs(contract2, None)
self.assertNotIn(contract2, self.fed.contracts)
class FederateCanSenseTestCase(FederateTestCase):
def test_canSense(self):
self.sim.init()
self.fed.design(self.station)
self.fed.commission(self.station,
self.context.locations[0],
self.context)
self.fed.design(self.sat1)
self.fed.commission(self.sat1,
self.context.locations[1],
self.context)
self.fed.design(self.sat2)
self.fed.commission(self.sat2,
self.context.locations[1],
self.context)
self.sim.advance()
event = next(e for e in self.context.currentEvents
if e.name == "VIS2.13")
self.assertFalse(self.fed.canSense(event, self.sat1, self.context))
self.assertTrue(self.fed.canSense(event, self.sat2, self.context))
self.sim.advance()
event = next(e for e in self.context.currentEvents
if e.name == "VIS3.7")
self.assertTrue(self.fed.canSense(event, self.sat2, self.context))
class FederateSenseAndStoreTestCase(FederateTestCase):
def test_senseAndStore(self):
self.sim.init()
self.fed.design(self.station)
self.fed.commission(self.station,
self.context.locations[0],
self.context)
self.fed.design(self.sat1)
self.fed.commission(self.sat1,
self.context.locations[1],
self.context)
self.fed.design(self.sat2)
self.fed.commission(self.sat2,
self.context.locations[1],
self.context)
self.sim.advance()
event = next(e for e in self.context.currentEvents
if e.name == "VIS2.13")
self.assertTrue(self.fed.senseAndStore(
self.fed.contract(event, self.context),
self.sat2, self.context))
self.assertEqual(len(self.sat2.modules[1].data), 1)
self.sim.advance()
event = next(e for e in self.context.currentEvents
if e.name == "VIS3.7")
self.assertFalse(self.fed.senseAndStore(
Contract(event), self.sat2, self.context))
class FederateCanTransportTestCase(FederateTestCase):
def test_canTransport(self):
self.sim.init()
self.fed.design(self.station)
self.fed.commission(self.station,
self.context.locations[0],
self.context)
self.fed.design(self.sat2)
self.fed.commission(self.sat2,
self.context.locations[1],
self.context)
self.fed.design(self.sat3)
self.fed.commission(self.sat3,
self.context.locations[1],
self.context)
self.fed.design(self.sat4)
self.fed.commission(self.sat4,
self.context.locations[5],
self.context)
self.sim.advance()
event = next(e for e in self.context.currentEvents
if e.name == "VIS2.13")
contract1 = self.fed.contract(event, self.context)
self.fed.senseAndStore(contract1, self.sat2, self.context)
data1 = next(d for d in self.sat2.modules[1].data if d.contract is contract1)
self.assertFalse(self.fed.canTransport('pSGL', data1, self.sat2, self.station))
self.sim.advance()
event = next(e for e in self.context.currentEvents
if e.name == "VIS3.7")
contract2 = self.fed.contract(event, self.context)
self.fed.senseAndStore(contract2, self.sat3, self.context)
data2 = next(d for d in self.sat3.modules[1].data if d.contract is contract2)
self.assertTrue(self.fed.canTransport('pISL', data2, self.sat3, self.sat4))
self.assertFalse(self.fed.canTransport('pSGL', data2, self.sat4, self.station))
self.sim.advance()
self.assertTrue(self.fed.canTransport('pSGL', data1, self.sat2, self.station))
class FederateTransportTestCase(FederateTestCase):
def test_transport(self):
self.sim.init()
self.fed.design(self.station)
self.fed.commission(self.station,
self.context.locations[0],
self.context)
self.fed.design(self.sat2)
self.fed.commission(self.sat2,
self.context.locations[1],
self.context)
self.fed.design(self.sat3)
self.fed.commission(self.sat3,
self.context.locations[1],
self.context)
self.fed.design(self.sat4)
self.fed.commission(self.sat4,
self.context.locations[5],
self.context)
self.sim.advance()
event = next(e for e in self.context.currentEvents
if e.name == "VIS2.13")
contract1 = self.fed.contract(event, self.context)
self.fed.senseAndStore(contract1, self.sat2, self.context)
data1 = next(d for d in self.sat2.modules[1].data if d.contract is contract1)
self.sim.advance()
event = next(e for e in self.context.currentEvents
if e.name == "VIS3.7")
contract2 = self.fed.contract(event, self.context)
self.fed.senseAndStore(contract2, self.sat3, self.context)
data2 = next(d for d in self.sat3.modules[1].data if d.contract is contract2)
self.assertTrue(self.fed.transport('pISL', data2, self.sat3, self.sat4))
self.assertNotIn(data2, self.sat3.modules[0].data)
self.assertNotIn(data2, self.sat3.modules[1].data)
self.assertIn(data2, self.sat4.modules[1].data)
self.fed.resolve(contract2, self.context)
self.sim.advance()
self.assertTrue(self.fed.transport('pSGL', data1, self.sat2, self.station))
self.assertNotIn(data1, self.sat2.modules[0].data)
self.assertNotIn(data1, self.sat2.modules[1].data)
self.assertIn(data1, self.station.modules[0].data)
self.assertFalse(self.fed.transport('pSGL', data1, self.sat4, self.station))
class FederateDeleteDataTestCase(FederateTestCase):
def test_deleteData(self):
self.sim.init()
self.fed.design(self.station)
self.fed.commission(self.station,
self.context.locations[0],
self.context)
self.fed.design(self.sat2)
self.fed.commission(self.sat2,
self.context.locations[1],
self.context)
self.sim.advance()
event = next(e for e in self.context.currentEvents
if e.name == "VIS2.13")
contract1 = self.fed.contract(event, self.context)
self.fed.senseAndStore(contract1, self.sat2, self.context)
data1 = next(d for d in self.sat2.modules[1].data
if d.contract is contract1)
self.fed.deleteData(contract1)
self.assertNotIn(data1, self.sat2.modules[1].data)
self.assertIn(contract1, self.fed.contracts)
class FederateContractTestCase(FederateTestCase):
def test_getContract(self):
self.sim.init()
self.fed.design(self.station)
self.fed.commission(self.station,
self.context.locations[0],
self.context)
self.fed.design(self.sat2)
self.fed.commission(self.sat2,
self.context.locations[1],
self.context)
self.sim.advance()
event = next(e for e in self.context.currentEvents
if e.name == "VIS2.13")
contract1 = self.fed.contract(event, self.context)
self.assertIn(contract1, self.fed.contracts)
class FederateResolveTestCase(FederateTestCase):
def test_resolve(self):
self.sim.init()
self.fed.design(self.station)
self.fed.commission(self.station,
self.context.locations[0],
self.context)
self.fed.design(self.sat2)
self.fed.commission(self.sat2,
self.context.locations[1],
self.context)
self.sim.advance()
event = next(e for e in self.context.currentEvents
if e.name == "VIS2.13")
contract1 = self.fed.contract(event, self.context)
self.fed.senseAndStore(contract1, self.sat2, self.context)
data1 = next(d for d in self.sat2.modules[1].data if d.contract is contract1)
self.sim.advance()
event = next(e for e in self.context.currentEvents
if e.name == "VIS3.7")
self.sim.advance()
self.fed.transport('pSGL', data1, self.sat2, self.station)
cash = self.fed.cash
self.assertIn(data1, self.station.modules[0].data)
self.assertTrue(self.fed.resolve(contract1, self.context))
self.assertNotIn(data1, self.station.modules[0].data)
self.assertNotIn(contract1, self.fed.contracts)
self.assertEqual(self.fed.cash, cash + contract1.getValue())
class FederateInitTestCase(FederateTestCase):
def test_init(self):
pass
class FederateTickTestCase(FederateTestCase):
def test_tick(self):
pass
class FederateTockTestCase(FederateTestCase):
def test_tock(self):
pass
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import operator
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
import six
from nova import block_device
from nova import exception
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import objects
from nova.objects import base as obj_base
from nova.volume import encryptors
LOG = logging.getLogger(__name__)
class _NotTransformable(Exception):
pass
class _InvalidType(_NotTransformable):
pass
class _NoLegacy(Exception):
pass
def update_db(method):
@functools.wraps(method)
def wrapped(obj, context, *args, **kwargs):
try:
ret_val = method(obj, context, *args, **kwargs)
finally:
obj.save()
return ret_val
return wrapped
class DriverBlockDevice(dict):
"""A dict subclass that represents block devices used by the virt layer.
Uses block device objects internally to do the database access.
_fields and _legacy_fields class attributes present a set of fields that
are expected on a certain DriverBlockDevice type. We may have more legacy
versions in the future.
If an attribute access is attempted for a name that is found in the
_proxy_as_attr set, it will be proxied to the underlying object. This
allows us to access stuff that is not part of the data model that all
drivers understand.
The save() method allows us to update the database using the underlying
object. _update_on_save class attribute dictionary keeps the following
mapping:
{'object field name': 'driver dict field name (or None if same)'}
These fields will be updated on the internal object, from the values in the
dict, before the actual database update is done.
"""
_fields = set()
_legacy_fields = set()
_proxy_as_attr = set()
_update_on_save = {'disk_bus': None,
'device_name': None,
'device_type': None}
def __init__(self, bdm):
# TODO(ndipanov): Remove this check when we have all the rpc methods
# use objects for block devices.
if isinstance(bdm, obj_base.NovaObject):
self.__dict__['_bdm_obj'] = bdm
else:
self.__dict__['_bdm_obj'] = objects.BlockDeviceMapping()
self._bdm_obj.update(block_device.BlockDeviceDict(bdm))
self._bdm_obj.obj_reset_changes()
if self._bdm_obj.no_device:
raise _NotTransformable()
self.update({field: None for field in self._fields})
self._transform()
def __getattr__(self, name):
if name in self._proxy_as_attr:
return getattr(self._bdm_obj, name)
else:
super(DriverBlockDevice, self).__getattr__(name)
def __setattr__(self, name, value):
if name in self._proxy_as_attr:
return setattr(self._bdm_obj, name, value)
else:
super(DriverBlockDevice, self).__setattr__(name, value)
def _transform(self):
"""Transform bdm to the format that is passed to drivers."""
raise NotImplementedError()
def legacy(self):
"""Basic legacy transformation.
Basic method will just drop the fields that are not in
_legacy_fields set. Override this in subclass if needed.
"""
return {key: self.get(key) for key in self._legacy_fields}
def attach(self, **kwargs):
"""Make the device available to be used by VMs.
To be overridden in subclasses with the connecting logic for
the type of device the subclass represents.
"""
raise NotImplementedError()
def save(self):
for attr_name, key_name in six.iteritems(self._update_on_save):
lookup_name = key_name or attr_name
if self[lookup_name] != getattr(self._bdm_obj, attr_name):
setattr(self._bdm_obj, attr_name, self[lookup_name])
self._bdm_obj.save()
class DriverSwapBlockDevice(DriverBlockDevice):
_fields = set(['device_name', 'swap_size', 'disk_bus'])
_legacy_fields = _fields - set(['disk_bus'])
_update_on_save = {'disk_bus': None,
'device_name': None}
def _transform(self):
if not block_device.new_format_is_swap(self._bdm_obj):
raise _InvalidType
self.update({
'device_name': self._bdm_obj.device_name,
'swap_size': self._bdm_obj.volume_size or 0,
'disk_bus': self._bdm_obj.disk_bus
})
class DriverEphemeralBlockDevice(DriverBlockDevice):
_new_only_fields = set(['disk_bus', 'device_type', 'guest_format'])
_fields = set(['device_name', 'size']) | _new_only_fields
_legacy_fields = (_fields - _new_only_fields |
set(['num', 'virtual_name']))
def _transform(self):
if not block_device.new_format_is_ephemeral(self._bdm_obj):
raise _InvalidType
self.update({
'device_name': self._bdm_obj.device_name,
'size': self._bdm_obj.volume_size or 0,
'disk_bus': self._bdm_obj.disk_bus,
'device_type': self._bdm_obj.device_type,
'guest_format': self._bdm_obj.guest_format
})
def legacy(self, num=0):
legacy_bdm = super(DriverEphemeralBlockDevice, self).legacy()
legacy_bdm['num'] = num
legacy_bdm['virtual_name'] = 'ephemeral' + str(num)
return legacy_bdm
class DriverVolumeBlockDevice(DriverBlockDevice):
_legacy_fields = set(['connection_info', 'mount_device',
'delete_on_termination'])
_new_fields = set(['guest_format', 'device_type',
'disk_bus', 'boot_index'])
_fields = _legacy_fields | _new_fields
_valid_source = 'volume'
_valid_destination = 'volume'
_proxy_as_attr = set(['volume_size', 'volume_id'])
_update_on_save = {'disk_bus': None,
'device_name': 'mount_device',
'device_type': None}
def _transform(self):
if (not self._bdm_obj.source_type == self._valid_source
or not self._bdm_obj.destination_type ==
self._valid_destination):
raise _InvalidType
self.update(
{k: v for k, v in six.iteritems(self._bdm_obj)
if k in self._new_fields | set(['delete_on_termination'])}
)
self['mount_device'] = self._bdm_obj.device_name
try:
self['connection_info'] = jsonutils.loads(
self._bdm_obj.connection_info)
except TypeError:
self['connection_info'] = None
def _preserve_multipath_id(self, connection_info):
if self['connection_info'] and 'data' in self['connection_info']:
if 'multipath_id' in self['connection_info']['data']:
connection_info['data']['multipath_id'] =\
self['connection_info']['data']['multipath_id']
LOG.info(_LI('preserve multipath_id %s'),
connection_info['data']['multipath_id'])
@update_db
def attach(self, context, instance, volume_api, virt_driver,
do_check_attach=True, do_driver_attach=False, **kwargs):
volume = volume_api.get(context, self.volume_id)
if do_check_attach:
volume_api.check_attach(context, volume, instance=instance)
volume_id = volume['id']
context = context.elevated()
connector = virt_driver.get_volume_connector(instance)
connection_info = volume_api.initialize_connection(context,
volume_id,
connector)
if 'serial' not in connection_info:
connection_info['serial'] = self.volume_id
self._preserve_multipath_id(connection_info)
# If do_driver_attach is False, we will attach a volume to an instance
# at boot time. So actual attach is done by instance creation code.
if do_driver_attach:
encryption = encryptors.get_encryption_metadata(
context, volume_api, volume_id, connection_info)
try:
virt_driver.attach_volume(
context, connection_info, instance,
self['mount_device'], disk_bus=self['disk_bus'],
device_type=self['device_type'], encryption=encryption)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Driver failed to attach volume "
"%(volume_id)s at %(mountpoint)s"),
{'volume_id': volume_id,
'mountpoint': self['mount_device']},
context=context, instance=instance)
volume_api.terminate_connection(context, volume_id,
connector)
self['connection_info'] = connection_info
if self.volume_size is None:
self.volume_size = volume.get('size')
mode = 'rw'
if 'data' in connection_info:
mode = connection_info['data'].get('access_mode', 'rw')
if volume['attach_status'] == "detached":
# NOTE(mriedem): save our current state so connection_info is in
# the database before the volume status goes to 'in-use' because
# after that we can detach and connection_info is required for
# detach.
self.save()
volume_api.attach(context, volume_id, instance.uuid,
self['mount_device'], mode=mode)
@update_db
def refresh_connection_info(self, context, instance,
volume_api, virt_driver):
# NOTE (ndipanov): A no-op if there is no connection info already
if not self['connection_info']:
return
connector = virt_driver.get_volume_connector(instance)
connection_info = volume_api.initialize_connection(context,
self.volume_id,
connector)
if 'serial' not in connection_info:
connection_info['serial'] = self.volume_id
self._preserve_multipath_id(connection_info)
self['connection_info'] = connection_info
def save(self):
# NOTE(ndipanov): we might want to generalize this by adding it to the
# _update_on_save and adding a transformation function.
try:
connection_info_string = jsonutils.dumps(
self.get('connection_info'))
if connection_info_string != self._bdm_obj.connection_info:
self._bdm_obj.connection_info = connection_info_string
except TypeError:
pass
super(DriverVolumeBlockDevice, self).save()
def _call_wait_func(self, context, wait_func, volume_api, volume_id):
try:
wait_func(context, volume_id)
except exception.VolumeNotCreated:
with excutils.save_and_reraise_exception():
if self['delete_on_termination']:
try:
volume_api.delete(context, volume_id)
except Exception as exc:
LOG.warn(_LW('Failed to delete volume: %(volume_id)s '
'due to %(exc)s'),
{'volume_id': volume_id, 'exc': exc})
class DriverSnapshotBlockDevice(DriverVolumeBlockDevice):
_valid_source = 'snapshot'
_proxy_as_attr = set(['volume_size', 'volume_id', 'snapshot_id'])
def attach(self, context, instance, volume_api,
virt_driver, wait_func=None, do_check_attach=True):
if not self.volume_id:
av_zone = instance.availability_zone
snapshot = volume_api.get_snapshot(context,
self.snapshot_id)
vol = volume_api.create(context, self.volume_size, '', '',
snapshot, availability_zone=av_zone)
if wait_func:
self._call_wait_func(context, wait_func, volume_api, vol['id'])
self.volume_id = vol['id']
# Call the volume attach now
super(DriverSnapshotBlockDevice, self).attach(
context, instance, volume_api, virt_driver,
do_check_attach=do_check_attach)
class DriverImageBlockDevice(DriverVolumeBlockDevice):
_valid_source = 'image'
_proxy_as_attr = set(['volume_size', 'volume_id', 'image_id'])
def attach(self, context, instance, volume_api,
virt_driver, wait_func=None, do_check_attach=True):
if not self.volume_id:
av_zone = instance.availability_zone
vol = volume_api.create(context, self.volume_size,
'', '', image_id=self.image_id,
availability_zone=av_zone)
if wait_func:
self._call_wait_func(context, wait_func, volume_api, vol['id'])
self.volume_id = vol['id']
super(DriverImageBlockDevice, self).attach(
context, instance, volume_api, virt_driver,
do_check_attach=do_check_attach)
class DriverBlankBlockDevice(DriverVolumeBlockDevice):
_valid_source = 'blank'
_proxy_as_attr = set(['volume_size', 'volume_id', 'image_id'])
def attach(self, context, instance, volume_api,
virt_driver, wait_func=None, do_check_attach=True):
if not self.volume_id:
vol_name = instance.uuid + '-blank-vol'
av_zone = instance.availability_zone
vol = volume_api.create(context, self.volume_size, vol_name, '',
availability_zone=av_zone)
if wait_func:
self._call_wait_func(context, wait_func, volume_api, vol['id'])
self.volume_id = vol['id']
super(DriverBlankBlockDevice, self).attach(
context, instance, volume_api, virt_driver,
do_check_attach=do_check_attach)
def _convert_block_devices(device_type, block_device_mapping):
devices = []
for bdm in block_device_mapping:
try:
devices.append(device_type(bdm))
except _NotTransformable:
pass
return devices
convert_swap = functools.partial(_convert_block_devices,
DriverSwapBlockDevice)
convert_ephemerals = functools.partial(_convert_block_devices,
DriverEphemeralBlockDevice)
convert_volumes = functools.partial(_convert_block_devices,
DriverVolumeBlockDevice)
convert_snapshots = functools.partial(_convert_block_devices,
DriverSnapshotBlockDevice)
convert_images = functools.partial(_convert_block_devices,
DriverImageBlockDevice)
convert_blanks = functools.partial(_convert_block_devices,
DriverBlankBlockDevice)
def convert_all_volumes(*volume_bdms):
source_volume = convert_volumes(volume_bdms)
source_snapshot = convert_snapshots(volume_bdms)
source_image = convert_images(volume_bdms)
source_blank = convert_blanks(volume_bdms)
return [vol for vol in
itertools.chain(source_volume, source_snapshot,
source_image, source_blank)]
def convert_volume(volume_bdm):
try:
return convert_all_volumes(volume_bdm)[0]
except IndexError:
pass
def attach_block_devices(block_device_mapping, *attach_args, **attach_kwargs):
def _log_and_attach(bdm):
context = attach_args[0]
instance = attach_args[1]
LOG.info(_LI('Booting with volume %(volume_id)s at %(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
bdm.attach(*attach_args, **attach_kwargs)
map(_log_and_attach, block_device_mapping)
return block_device_mapping
def refresh_conn_infos(block_device_mapping, *refresh_args, **refresh_kwargs):
map(operator.methodcaller('refresh_connection_info',
*refresh_args, **refresh_kwargs),
block_device_mapping)
return block_device_mapping
def legacy_block_devices(block_device_mapping):
def _has_legacy(bdm):
try:
bdm.legacy()
except _NoLegacy:
return False
return True
bdms = [bdm.legacy()
for bdm in block_device_mapping
if _has_legacy(bdm)]
# Re-enumerate ephemeral devices
if all(isinstance(bdm, DriverEphemeralBlockDevice)
for bdm in block_device_mapping):
for i, dev in enumerate(bdms):
dev['virtual_name'] = dev['virtual_name'][:-1] + str(i)
dev['num'] = i
return bdms
def get_swap(transformed_list):
"""Get the swap device out of the list context.
The block_device_info needs swap to be a single device,
not a list - otherwise this is a no-op.
"""
if not all(isinstance(device, DriverSwapBlockDevice) or
'swap_size' in device
for device in transformed_list):
return None
try:
return transformed_list.pop()
except IndexError:
return None
_IMPLEMENTED_CLASSES = (DriverSwapBlockDevice, DriverEphemeralBlockDevice,
DriverVolumeBlockDevice, DriverSnapshotBlockDevice,
DriverImageBlockDevice, DriverBlankBlockDevice)
def is_implemented(bdm):
for cls in _IMPLEMENTED_CLASSES:
try:
cls(bdm)
return True
except _NotTransformable:
pass
return False
def is_block_device_mapping(bdm):
return (bdm.source_type in ('image', 'volume', 'snapshot', 'blank')
and bdm.destination_type == 'volume'
and is_implemented(bdm))
|
|
# -*- coding: utf-8 -*-
import os
import base64
import hashlib
import hmac
import logging
from multiprocessing.pool import ThreadPool
from multiprocessing import TimeoutError
import json
import requests
import time
from file_crypto import FileCrypto
import model
from exception import BridgeError, FarmerError, SuppliedTokenNotAcceptedError
from http import Client
import threading
import thread
class Uploader:
"""
Attributes:
email:
password:
timeout:
shared_already_uploaded (int): number of shards already uploaded (0 at begin).
max_retries_contract_negotiation (int): maximum number of contract negotiation retries (default=10).
max_retries_upload_same_farmer (int): maximum number of uploads retries to the same farmer (default=3).
"""
__logger = logging.getLogger('%s.Uploader' % __name__)
def __init__(
self, email, password, timeout=None,
max_retries_contract_negotiation=10,
max_retries_upload_same_farmer=3):
self.client = Client(email, password, timeout=timeout)
self.shards_already_uploaded = 0
self.max_retries_contract_negotiation = max_retries_contract_negotiation
self.max_retries_upload_same_farmer = max_retries_upload_same_farmer
def _calculate_hmac(self, base_string, key):
"""HMAC hash calculation and returning
the results in dictionary collection.
Args:
base_string (): .
key (): .
"""
hmacs = dict()
# --- MD5 ---
hashed = hmac.new(key, base_string, hashlib.md5)
hmac_md5 = hashed.digest().encode('base64').rstrip('\n')
hmacs['MD5'] = hmac_md5
# --- SHA-1 ---
hashed = hmac.new(key, base_string, hashlib.sha1)
hmac_sha1 = hashed.digest().encode('base64').rstrip('\n')
hmacs['SHA-1'] = hmac_sha1
# --- SHA-224 ---
hashed = hmac.new(key, base_string, hashlib.sha224)
hmac_sha224 = hashed.digest().encode('base64').rstrip('\n')
hmacs['SHA-224'] = hmac_sha224
# --- SHA-256 ---
hashed = hmac.new(key, base_string, hashlib.sha256)
hmac_sha256 = hashed.digest().encode('base64').rstrip('\n')
hmacs['SHA-256'] = hmac_sha256
# --- SHA-384 ---
hashed = hmac.new(key, base_string, hashlib.sha384)
hmac_sha384 = hashed.digest().encode('base64').rstrip('\n')
hmacs['SHA-384'] = hmac_sha384
# --- SHA-512 ---
hashed = hmac.new(key, base_string, hashlib.sha512)
hmac_sha512 = hashed.digest().encode('base64').rstrip('\n')
hmacs['SHA-512'] = hmac_sha512
return hmacs
def _prepare_bucket_entry_hmac(self, shard_array):
"""
Args:
shard_array (): .
"""
storj_keyring = model.Keyring()
encryption_key = storj_keyring.get_encryption_key('test')
current_hmac = ''
for shard in shard_array:
base64_decoded = '%s%s' % (base64.decodestring(shard.hash),
current_hmac)
current_hmac = self._calculate_hmac(base64_decoded, encryption_key)
self.__logger.debug('current_hmac=%s' % current_hmac)
return current_hmac
def require_upload(self, shard_path, url, index):
with open(shard_path, 'rb') as f:
response = requests.post(
url,
data=self._read_in_chunks(
f, shard_index=index),
timeout=self.client.timeout)
return response
def _calculate_timeout(self, shard_size, mbps=0.5):
"""
Args:
shard_size: shard size in Byte
mbps: upload throughtput. Default 500 kbps
"""
if not self.client.timeout:
self.client.timeout = int(shard_size * 8.0 / (1024 ** 2 * mbps))
self.__logger.debug('Set timeout to %s seconds' % self.client.timeout)
def upload_shard(self, shard, chapters, frame,
file_name_ready_to_shard_upload, tmp_path):
"""
Args:
shard:
chapters:
frame:
file_name_ready_to_shard_upload:
tmp_path:
"""
contract_negotiation_tries = 0
exchange_report = model.ExchangeReport()
while self.max_retries_contract_negotiation > \
contract_negotiation_tries:
contract_negotiation_tries += 1
self.__logger.debug('Negotiating contract')
self.__logger.debug('Trying to negotiate storage contract for \
shard at index %s. Attempt %s' % (chapters, contract_negotiation_tries))
try:
frame_content = self.client.frame_add_shard(shard, frame.id)
farmerNodeID = frame_content['farmer']['nodeID']
url = 'http://%s:%d/shards/%s?token=%s' % (
frame_content['farmer']['address'],
frame_content['farmer']['port'],
frame_content['hash'],
frame_content['token'])
self.__logger.debug('Done contract for shard %s with url=%s',
chapters,
url)
# begin recording exchange report
# exchange_report = model.ExchangeReport()
current_timestamp = int(time.time())
exchange_report.exchangeStart = str(current_timestamp)
exchange_report.farmerId = str(farmerNodeID)
exchange_report.dataHash = str(shard.hash)
farmer_tries = 0
response = None
while self.max_retries_upload_same_farmer > farmer_tries:
farmer_tries += 1
try:
self.__logger.debug(
'Upload shard at index %s to %s attempt #%d',
shard.index,
frame_content['farmer']['address'],
farmer_tries)
mypath = os.path.join(
tmp_path, '%s-%s' % (
file_name_ready_to_shard_upload,
chapters + 1))
"""
with open(mypath, 'rb') as f:
response = requests.post(
url,
data=self._read_in_chunks(
f, shard_index=chapters),
timeout=1)
"""
tp_content = ThreadPool(processes=1)
async_result = tp_content.apply_async(
self.require_upload,
(mypath, url, chapters))
response = async_result.get(self.client.timeout)
# response = self.require_upload(mypath, url, chapters)
self.__logger.debug('>>> Shard %s Uploaded' % chapters)
j = json.loads(str(response.content))
self.__logger.info('>>>> %s' % str(j))
if j.get('result') == \
'The supplied token is not accepted':
raise SuppliedTokenNotAcceptedError()
# Exceptions raised when uploading shards
except FarmerError as e:
self.__logger.error('Farmer error')
self.__logger.error(e)
continue
except TimeoutError:
self.__logger.error(
'Upload shard %s to %s too slow.' % (
chapters, url))
self.__logger.error(
'Upload timed out. Redo upload of shard %s' %
chapters)
raise BridgeError('Farmer too slow. Try another one.')
else:
self.shards_already_uploaded += 1
self.__logger.info(
'Shard uploaded successfully to %s:%d',
frame_content['farmer']['address'],
frame_content['farmer']['port'])
self.__logger.debug(
'%s shards, %s sent',
self.all_shards_count,
self.shards_already_uploaded)
if self.all_shards_count <= \
self.shards_already_uploaded:
self.__logger.debug('finish upload')
break
# Exceptions raised negotiating contracts
except BridgeError as e:
self.__logger.error('Bridge error')
self.__logger.error(e)
# upload failed due to Storj Bridge failure
self.__logger.debug('Exception raised while trying to \
negotiate contract: ')
continue
except Exception as e:
# now send Exchange Report
# upload failed probably while sending data to farmer
self.__logger.error(e)
self.__logger.error('Error occured while trying to upload \
shard or negotiate contract. Retrying... ')
self.__logger.error('Unhandled exception occured while trying \
to upload shard or negotiate contract for shard at index %s . Retrying...',
chapters)
current_timestamp = int(time.time())
exchange_report.exchangeEnd = str(current_timestamp)
exchange_report.exchangeResultCode = (exchange_report.FAILURE)
exchange_report.exchangeResultMessage = \
(exchange_report.STORJ_REPORT_UPLOAD_ERROR)
# Send exchange report
# self.client.send_exchange_report(exchange_report)
continue
else:
# uploaded with success
current_timestamp = int(time.time())
# prepare second half of exchange heport
exchange_report.exchangeEnd = str(current_timestamp)
exchange_report.exchangeResultCode = exchange_report.SUCCESS
exchange_report.exchangeResultMessage = \
exchange_report.STORJ_REPORT_SHARD_UPLOADED
self.__logger.info('Shard %s successfully added and exchange \
report sent. ', chapters + 1)
# Send exchange report
# self.client.send_exchange_report(exchange_report)
# break
return True
return False
def _read_in_chunks(self, file_object, blocksize=4096, chunks=-1,
shard_index=None):
"""Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k.
Args:
file_object (): .
blocksize (): .
chunks (): .
"""
i = 0
while chunks:
data = file_object.read(blocksize)
if not data:
break
yield data
i += 1
chunks -= 1
def file_upload(self, bucket_id, file_path, tmp_file_path):
"""
Upload a new file
Args:
bucket_id: ID of the bucket
file_path: path of the file to upload
tmp_file_path: folder where to store the temporary shards
"""
self.__logger.debug('Upload %s in bucket %s', file_path, bucket_id)
self.__logger.debug('Temp folder %s', tmp_file_path)
bname = os.path.split(file_path)[1] # File name
file_mime_type = 'text/plain'
# Encrypt file
self.__logger.debug('Encrypting file...')
file_crypto_tools = FileCrypto()
# File name of encrypted file
file_name_ready_to_shard_upload = '%s.encrypted' % bname
# Path where to save the encrypted file in temp dir
file_path_ready = os.path.join(tmp_file_path,
file_name_ready_to_shard_upload)
self.__logger.debug('file_path_ready: %s', file_path_ready)
# Begin file encryption
file_crypto_tools.encrypt_file(
'AES',
file_path,
file_path_ready,
self.client.password)
self.fileisdecrypted_str = ''
file_size = os.stat(file_path).st_size
self.__logger.info('File encrypted')
# Get the PUSH token from Storj Bridge
self.__logger.debug('Get PUSH Token')
push_token = None
try:
push_token = self.client.token_create(bucket_id, 'PUSH')
except BridgeError as e:
self.__logger.error(e)
self.__logger.debug('PUSH token create exception')
self.__logger.error('File not uploaded')
return
self.__logger.info('PUSH Token ID %s', push_token.id)
# Get a frame
self.__logger.debug('Frame')
frame = None
try:
frame = self.client.frame_create()
except BridgeError as e:
self.__logger.error(e)
self.__logger.debug('Unhandled exception while creating file \
staging frame')
self.__logger.error('File not uploaded')
return
self.__logger.info('frame.id = %s', frame.id)
# Now generate shards
self.__logger.debug('Sharding started...')
shards_manager = model.ShardManager(filepath=file_path_ready,
tmp_path=tmp_file_path)
self.all_shards_count = len(shards_manager.shards)
self.__logger.debug('Sharding ended...')
self.__logger.info('There are %s shards', self.all_shards_count)
# Calculate timeout
self._calculate_timeout(shard_size=shards_manager.shards[0].size,
mbps=1)
# Upload shards
mp = ThreadPool()
res = mp.map(lambda n_s: self.upload_shard(
n_s[1], n_s[0], frame, file_name_ready_to_shard_upload, tmp_file_path),
enumerate(shards_manager.shards))
self.__logger.debug('===== RESULTS =====')
self.__logger.debug(res)
if False in res or None in res:
self.__logger.error('File not uploaded: shard %s not uploaded' %
res.index(False))
self.__logger.error('Exiting with errors')
exit(1)
# finish_upload
self.__logger.debug('Generating HMAC...')
# create file hash
hash_sha512_hmac_b64 = self._prepare_bucket_entry_hmac(
shards_manager.shards)
hash_sha512_hmac = hashlib.sha224(str(
hash_sha512_hmac_b64['SHA-512'])).hexdigest()
self.__logger.debug('Now upload file')
data = {
'x-token': push_token.id,
'x-filesize': str(file_size),
'frame': frame.id,
'mimetype': file_mime_type,
'filename': str(bname) + str(self.fileisdecrypted_str),
'hmac': {
'type': 'sha512',
'value': hash_sha512_hmac
},
}
self.__logger.debug('Finishing upload')
self.__logger.debug('Adding file %s to bucket...', bname)
success = False
try:
# Post an upload_file request
response = self.client._request(
method='POST',
path='/buckets/%s/files' % bucket_id,
headers={
'x-token': push_token.id,
'x-filesize': str(file_size),
},
json=data,
)
success = True
except BridgeError as e:
self.__logger.error(e)
self.__logger.debug('Unhandled bridge exception')
if success:
self.__logger.info('File uploaded successfully!')
# Remove temp files
try:
# Remove shards
file_shards = map(lambda i: '%s-%s' % (file_path_ready, i),
range(1, self.all_shards_count + 1))
self.__logger.debug('Remove shards %s' % file_shards)
map(os.remove, file_shards)
# Remove encrypted file
self.__logger.debug('Remove encrypted file %s' % file_path_ready)
os.remove(file_path_ready)
except OSError as e:
self.__logger.error(e)
|
|
#!/usr/bin/env python
# coding=utf-8
import datetime
import os.path
import sys
import traceback as tb
from sacred import metrics_logger
from sacred.metrics_logger import linearize_metrics
from sacred.randomness import set_global_seed
from sacred.utils import SacredInterrupt, join_paths, IntervalTimer
from sacred.stdout_capturing import get_stdcapturer
class Run:
"""Represent and manage a single run of an experiment."""
def __init__(
self,
config,
config_modifications,
main_function,
observers,
root_logger,
run_logger,
experiment_info,
host_info,
pre_run_hooks,
post_run_hooks,
captured_out_filter=None,
):
self._id = None
"""The ID of this run as assigned by the first observer"""
self.captured_out = ""
"""Captured stdout and stderr"""
self.config = config
"""The final configuration used for this run"""
self.config_modifications = config_modifications
"""A ConfigSummary object with information about config changes"""
self.experiment_info = experiment_info
"""A dictionary with information about the experiment"""
self.host_info = host_info
"""A dictionary with information about the host"""
self.info = {}
"""Custom info dict that will be sent to the observers"""
self.root_logger = root_logger
"""The root logger that was used to create all the others"""
self.run_logger = run_logger
"""The logger that is used for this run"""
self.main_function = main_function
"""The main function that is executed with this run"""
self.observers = observers
"""A list of all observers that observe this run"""
self.pre_run_hooks = pre_run_hooks
"""List of pre-run hooks (captured functions called before this run)"""
self.post_run_hooks = post_run_hooks
"""List of post-run hooks (captured functions called after this run)"""
self.result = None
"""The return value of the main function"""
self.status = None
"""The current status of the run, from QUEUED to COMPLETED"""
self.start_time = None
"""The datetime when this run was started"""
self.stop_time = None
"""The datetime when this run stopped"""
self.debug = False
"""Determines whether this run is executed in debug mode"""
self.pdb = False
"""If true the pdb debugger is automatically started after a failure"""
self.meta_info = {}
"""A custom comment for this run"""
self.beat_interval = 10.0 # sec
"""The time between two heartbeat events measured in seconds"""
self.unobserved = False
"""Indicates whether this run should be unobserved"""
self.force = False
"""Disable warnings about suspicious changes"""
self.queue_only = False
"""If true then this run will only fire the queued_event and quit"""
self.captured_out_filter = captured_out_filter
"""Filter function to be applied to captured output"""
self.fail_trace = None
"""A stacktrace, in case the run failed"""
self.capture_mode = None
"""Determines the way the stdout/stderr are captured"""
self._heartbeat = None
self._failed_observers = []
self._output_file = None
self._metrics = metrics_logger.MetricsLogger()
def open_resource(self, filename, mode="r"):
"""Open a file and also save it as a resource.
Opens a file, reports it to the observers as a resource, and returns
the opened file.
In Sacred terminology a resource is a file that the experiment needed
to access during a run. In case of a MongoObserver that means making
sure the file is stored in the database (but avoiding duplicates) along
its path and md5 sum.
See also :py:meth:`sacred.Experiment.open_resource`.
Parameters
----------
filename : str
name of the file that should be opened
mode : str
mode that file will be open
Returns
-------
file
the opened file-object
"""
filename = os.path.abspath(filename)
self._emit_resource_added(filename) # TODO: maybe non-blocking?
return open(filename, mode)
def add_resource(self, filename):
"""Add a file as a resource.
In Sacred terminology a resource is a file that the experiment needed
to access during a run. In case of a MongoObserver that means making
sure the file is stored in the database (but avoiding duplicates) along
its path and md5 sum.
See also :py:meth:`sacred.Experiment.add_resource`.
Parameters
----------
filename : str
name of the file to be stored as a resource
"""
filename = os.path.abspath(filename)
self._emit_resource_added(filename)
def add_artifact(self, filename, name=None, metadata=None, content_type=None):
"""Add a file as an artifact.
In Sacred terminology an artifact is a file produced by the experiment
run. In case of a MongoObserver that means storing the file in the
database.
See also :py:meth:`sacred.Experiment.add_artifact`.
Parameters
----------
filename : str
name of the file to be stored as artifact
name : str, optional
optionally set the name of the artifact.
Defaults to the filename.
metadata: dict
optionally attach metadata to the artifact.
This only has an effect when using the MongoObserver.
content_type: str, optional
optionally attach a content-type to the artifact.
This only has an effect when using the MongoObserver.
"""
filename = os.path.abspath(filename)
name = os.path.basename(filename) if name is None else name
self._emit_artifact_added(name, filename, metadata, content_type)
def __call__(self, *args):
r"""Start this run.
Parameters
----------
\*args
parameters passed to the main function
Returns
-------
the return value of the main function
"""
if self.start_time is not None:
raise RuntimeError(
"A run can only be started once. "
"(Last start was {})".format(self.start_time)
)
if self.unobserved:
self.observers = []
else:
self.observers = sorted(self.observers, key=lambda x: -x.priority)
self.warn_if_unobserved()
set_global_seed(self.config["seed"])
if self.capture_mode is None and not self.observers:
capture_mode = "no"
else:
capture_mode = self.capture_mode
capture_mode, capture_stdout = get_stdcapturer(capture_mode)
self.run_logger.debug('Using capture mode "%s"', capture_mode)
if self.queue_only:
self._emit_queued()
return
try:
with capture_stdout() as self._output_file:
self._emit_started()
self._start_heartbeat()
self._execute_pre_run_hooks()
self.result = self.main_function(*args)
self._execute_post_run_hooks()
if self.result is not None:
self.run_logger.info("Result: {}".format(self.result))
elapsed_time = self._stop_time()
self.run_logger.info("Completed after %s", elapsed_time)
self._get_captured_output()
self._stop_heartbeat()
self._emit_completed(self.result)
except (SacredInterrupt, KeyboardInterrupt) as e:
self._stop_heartbeat()
status = getattr(e, "STATUS", "INTERRUPTED")
self._emit_interrupted(status)
raise
except BaseException:
exc_type, exc_value, trace = sys.exc_info()
self._stop_heartbeat()
self._emit_failed(exc_type, exc_value, trace.tb_next)
raise
finally:
self._warn_about_failed_observers()
self._wait_for_observers()
return self.result
def _get_captured_output(self):
if self._output_file.closed:
return
text = self._output_file.get()
if isinstance(text, bytes):
text = text.decode("utf-8", "replace")
if self.captured_out:
text = self.captured_out + text
if self.captured_out_filter is not None:
text = self.captured_out_filter(text)
self.captured_out = text
def _start_heartbeat(self):
self.run_logger.debug("Starting Heartbeat")
if self.beat_interval > 0:
self._stop_heartbeat_event, self._heartbeat = IntervalTimer.create(
self._emit_heartbeat, self.beat_interval
)
self._heartbeat.start()
def _stop_heartbeat(self):
self.run_logger.debug("Stopping Heartbeat")
# only stop if heartbeat was started
if self._heartbeat is not None:
self._stop_heartbeat_event.set()
self._heartbeat.join(timeout=2)
def _emit_queued(self):
self.status = "QUEUED"
queue_time = datetime.datetime.utcnow()
self.meta_info["queue_time"] = queue_time
command = join_paths(
self.main_function.prefix, self.main_function.signature.name
)
self.run_logger.info("Queuing-up command '%s'", command)
for observer in self.observers:
_id = observer.queued_event(
ex_info=self.experiment_info,
command=command,
host_info=self.host_info,
queue_time=queue_time,
config=self.config,
meta_info=self.meta_info,
_id=self._id,
)
if self._id is None:
self._id = _id
# do not catch any exceptions on startup:
# the experiment SHOULD fail if any of the observers fails
if self._id is None:
self.run_logger.info("Queued")
else:
self.run_logger.info('Queued-up run with ID "{}"'.format(self._id))
def _emit_started(self):
self.status = "RUNNING"
self.start_time = datetime.datetime.utcnow()
command = join_paths(
self.main_function.prefix, self.main_function.signature.name
)
self.run_logger.info("Running command '%s'", command)
for observer in self.observers:
_id = observer.started_event(
ex_info=self.experiment_info,
command=command,
host_info=self.host_info,
start_time=self.start_time,
config=self.config,
meta_info=self.meta_info,
_id=self._id,
)
if self._id is None:
self._id = _id
# do not catch any exceptions on startup:
# the experiment SHOULD fail if any of the observers fails
if self._id is None:
self.run_logger.info("Started")
else:
self.run_logger.info('Started run with ID "{}"'.format(self._id))
def _emit_heartbeat(self):
beat_time = datetime.datetime.utcnow()
self._get_captured_output()
# Read all measured metrics since last heartbeat
logged_metrics = self._metrics.get_last_metrics()
metrics_by_name = linearize_metrics(logged_metrics)
for observer in self.observers:
self._safe_call(
observer, "log_metrics", metrics_by_name=metrics_by_name, info=self.info
)
self._safe_call(
observer,
"heartbeat_event",
info=self.info,
captured_out=self.captured_out,
beat_time=beat_time,
result=self.result,
)
def _stop_time(self):
self.stop_time = datetime.datetime.utcnow()
elapsed_time = datetime.timedelta(
seconds=round((self.stop_time - self.start_time).total_seconds())
)
return elapsed_time
def _emit_completed(self, result):
self.status = "COMPLETED"
for observer in self.observers:
self._final_call(
observer, "completed_event", stop_time=self.stop_time, result=result
)
def _emit_interrupted(self, status):
self.status = status
elapsed_time = self._stop_time()
self.run_logger.warning("Aborted after %s!", elapsed_time)
for observer in self.observers:
self._final_call(
observer,
"interrupted_event",
interrupt_time=self.stop_time,
status=status,
)
def _emit_failed(self, exc_type, exc_value, trace):
self.status = "FAILED"
elapsed_time = self._stop_time()
self.run_logger.error("Failed after %s!", elapsed_time)
self.fail_trace = tb.format_exception(exc_type, exc_value, trace)
for observer in self.observers:
self._final_call(
observer,
"failed_event",
fail_time=self.stop_time,
fail_trace=self.fail_trace,
)
def _emit_resource_added(self, filename):
for observer in self.observers:
self._safe_call(observer, "resource_event", filename=filename)
def _emit_artifact_added(self, name, filename, metadata, content_type):
for observer in self.observers:
self._safe_call(
observer,
"artifact_event",
name=name,
filename=filename,
metadata=metadata,
content_type=content_type,
)
def _safe_call(self, obs, method, **kwargs):
if obs not in self._failed_observers:
try:
getattr(obs, method)(**kwargs)
except Exception as e:
self._failed_observers.append(obs)
self.run_logger.warning(
"An error ocurred in the '{}' " "observer: {}".format(obs, e)
)
def _final_call(self, observer, method, **kwargs):
try:
getattr(observer, method)(**kwargs)
except Exception:
# Feels dirty to catch all exceptions, but it is just for
# finishing up, so we don't want one observer to kill the
# others
self.run_logger.error(tb.format_exc())
def _wait_for_observers(self):
"""Block until all observers finished processing."""
for observer in self.observers:
self._safe_call(observer, "join")
def _warn_about_failed_observers(self):
for observer in self._failed_observers:
self.run_logger.warning(
"The observer '{}' failed at some point "
"during the run.".format(observer)
)
def _execute_pre_run_hooks(self):
for pr in self.pre_run_hooks:
pr()
def _execute_post_run_hooks(self):
for pr in self.post_run_hooks:
pr()
def warn_if_unobserved(self):
if not self.observers and not self.debug and not self.unobserved:
self.run_logger.warning("No observers have been added to this run")
def log_scalar(self, metric_name, value, step=None):
"""
Add a new measurement.
The measurement will be processed by the MongoDB observer
during a heartbeat event.
Other observers are not yet supported.
:param metric_name: The name of the metric, e.g. training.loss
:param value: The measured value
:param step: The step number (integer), e.g. the iteration number
If not specified, an internal counter for each metric
is used, incremented by one.
"""
# Method added in change https://github.com/chovanecm/sacred/issues/4
# The same as Experiment.log_scalar (if something changes,
# update the docstring too!)
self._metrics.log_scalar_metric(metric_name, value, step)
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitsend Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
import time
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import CBlockHeader, CInv, msg_block, msg_headers, msg_inv
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import BitsendTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, connect_nodes, sync_blocks
class AcceptBlockTest(BitsendTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
# 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as it is not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_tx_with_script(block_290f.vtx[0], 0, script_sig=b"42", amount=1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Entry points into GRPC."""
import threading
from grpc._adapter import fore as _fore
from grpc._adapter import rear as _rear
from grpc.framework.alpha import _face_utilities
from grpc.framework.alpha import _reexport
from grpc.framework.alpha import interfaces
from grpc.framework.base import implementations as _base_implementations
from grpc.framework.base import util as _base_utilities
from grpc.framework.face import implementations as _face_implementations
from grpc.framework.foundation import logging_pool
_THREAD_POOL_SIZE = 80
_ONE_DAY_IN_SECONDS = 24 * 60 * 60
class _Server(interfaces.Server):
def __init__(self, breakdown, port, private_key, certificate_chain):
self._lock = threading.Lock()
self._breakdown = breakdown
self._port = port
if private_key is None or certificate_chain is None:
self._key_chain_pairs = ()
else:
self._key_chain_pairs = ((private_key, certificate_chain),)
self._pool = None
self._back = None
self._fore_link = None
def _start(self):
with self._lock:
if self._pool is None:
self._pool = logging_pool.pool(_THREAD_POOL_SIZE)
servicer = _face_implementations.servicer(
self._pool, self._breakdown.implementations, None)
self._back = _base_implementations.back_link(
servicer, self._pool, self._pool, self._pool, _ONE_DAY_IN_SECONDS,
_ONE_DAY_IN_SECONDS)
self._fore_link = _fore.ForeLink(
self._pool, self._breakdown.request_deserializers,
self._breakdown.response_serializers, None, self._key_chain_pairs,
port=self._port)
self._back.join_fore_link(self._fore_link)
self._fore_link.join_rear_link(self._back)
self._fore_link.start()
else:
raise ValueError('Server currently running!')
def _stop(self):
with self._lock:
if self._pool is None:
raise ValueError('Server not running!')
else:
self._fore_link.stop()
_base_utilities.wait_for_idle(self._back)
self._pool.shutdown(wait=True)
self._fore_link = None
self._back = None
self._pool = None
def __enter__(self):
self._start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._stop()
return False
def start(self):
self._start()
def stop(self):
self._stop()
def port(self):
with self._lock:
return self._fore_link.port()
class _Stub(interfaces.Stub):
def __init__(
self, breakdown, host, port, secure, root_certificates, private_key,
certificate_chain, server_host_override=None):
self._lock = threading.Lock()
self._breakdown = breakdown
self._host = host
self._port = port
self._secure = secure
self._root_certificates = root_certificates
self._private_key = private_key
self._certificate_chain = certificate_chain
self._server_host_override = server_host_override
self._pool = None
self._front = None
self._rear_link = None
self._understub = None
def __enter__(self):
with self._lock:
if self._pool is None:
self._pool = logging_pool.pool(_THREAD_POOL_SIZE)
self._front = _base_implementations.front_link(
self._pool, self._pool, self._pool)
self._rear_link = _rear.RearLink(
self._host, self._port, self._pool,
self._breakdown.request_serializers,
self._breakdown.response_deserializers, self._secure,
self._root_certificates, self._private_key, self._certificate_chain,
server_host_override=self._server_host_override)
self._front.join_rear_link(self._rear_link)
self._rear_link.join_fore_link(self._front)
self._rear_link.start()
self._understub = _face_implementations.dynamic_stub(
self._breakdown.face_cardinalities, self._front, self._pool, '')
else:
raise ValueError('Tried to __enter__ already-__enter__ed Stub!')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with self._lock:
if self._pool is None:
raise ValueError('Tried to __exit__ non-__enter__ed Stub!')
else:
self._rear_link.stop()
_base_utilities.wait_for_idle(self._front)
self._pool.shutdown(wait=True)
self._rear_link = None
self._front = None
self._pool = None
self._understub = None
return False
def __getattr__(self, attr):
with self._lock:
if self._pool is None:
raise ValueError('Tried to __getattr__ non-__enter__ed Stub!')
else:
method_cardinality = self._breakdown.cardinalities.get(attr)
underlying_attr = getattr(
self._understub, self._breakdown.qualified_names.get(attr), None)
if method_cardinality is interfaces.Cardinality.UNARY_UNARY:
return _reexport.unary_unary_sync_async(underlying_attr)
elif method_cardinality is interfaces.Cardinality.UNARY_STREAM:
return lambda request, timeout: _reexport.cancellable_iterator(
underlying_attr(request, timeout))
elif method_cardinality is interfaces.Cardinality.STREAM_UNARY:
return _reexport.stream_unary_sync_async(underlying_attr)
elif method_cardinality is interfaces.Cardinality.STREAM_STREAM:
return lambda request_iterator, timeout: (
_reexport.cancellable_iterator(underlying_attr(
request_iterator, timeout)))
else:
raise AttributeError(attr)
def stub(
service_name, methods, host, port, secure=False, root_certificates=None,
private_key=None, certificate_chain=None, server_host_override=None):
"""Constructs an interfaces.Stub.
Args:
service_name: The package-qualified full name of the service.
methods: A dictionary from RPC method name to
interfaces.RpcMethodInvocationDescription describing the RPCs to be
supported by the created stub. The RPC method names in the dictionary are
not qualified by the service name or decorated in any other way.
host: The host to which to connect for RPC service.
port: The port to which to connect for RPC service.
secure: Whether or not to construct the stub with a secure connection.
root_certificates: The PEM-encoded root certificates or None to ask for
them to be retrieved from a default location.
private_key: The PEM-encoded private key to use or None if no private key
should be used.
certificate_chain: The PEM-encoded certificate chain to use or None if no
certificate chain should be used.
server_host_override: (For testing only) the target name used for SSL
host name checking.
Returns:
An interfaces.Stub affording RPC invocation.
"""
breakdown = _face_utilities.break_down_invocation(service_name, methods)
return _Stub(
breakdown, host, port, secure, root_certificates, private_key,
certificate_chain, server_host_override=server_host_override)
def server(
service_name, methods, port, private_key=None, certificate_chain=None):
"""Constructs an interfaces.Server.
Args:
service_name: The package-qualified full name of the service.
methods: A dictionary from RPC method name to
interfaces.RpcMethodServiceDescription describing the RPCs to
be serviced by the created server. The RPC method names in the dictionary
are not qualified by the service name or decorated in any other way.
port: The port on which to serve or zero to ask for a port to be
automatically selected.
private_key: A pem-encoded private key, or None for an insecure server.
certificate_chain: A pem-encoded certificate chain, or None for an insecure
server.
Returns:
An interfaces.Server that will serve secure traffic.
"""
breakdown = _face_utilities.break_down_service(service_name, methods)
return _Server(breakdown, port, private_key, certificate_chain)
|
|
# jsb/socklib/irc/irc.py
#
#
"""
an Irc object handles the connection to the irc server .. receiving,
sending, connect and reconnect code.
"""
## jsb imports
from jsb.utils.exception import handle_exception
from jsb.utils.generic import toenc, fromenc
from jsb.utils.generic import getrandomnick, strippedtxt
from jsb.utils.generic import fix_format, splittxt, uniqlist
from jsb.utils.locking import lockdec, lock_object, release_object
from jsb.lib.botbase import BotBase
from jsb.lib.threads import start_new_thread, threaded
from jsb.utils.pdod import Pdod
from jsb.lib.channelbase import ChannelBase
from jsb.lib.morphs import inputmorphs, outputmorphs
from jsb.lib.exit import globalshutdown
from jsb.lib.config import Config, getmainconfig
from jsb.lib.errors import UserIgnored, NoSuchUser
## jsb.irc imports
from ircevent import IrcEvent
## basic imports
import time
import thread
import socket
import threading
import os
import Queue
import random
import logging
import types
import re
import select
## locks
outlock = thread.allocate_lock()
outlocked = lockdec(outlock)
## exceptions
class Irc(BotBase):
""" the irc class, provides interface to irc related stuff. """
def __init__(self, cfg=None, users=None, plugs=None, *args, **kwargs):
BotBase.__init__(self, cfg, users, plugs, *args, **kwargs)
#BotBase.setstate(self)
self.type = 'irc'
self.fsock = None
self.oldsock = None
self.sock = None
self.reconnectcount = 0
self.pongcheck = 0
self.nickchanged = False
self.noauto433 = False
if self.state:
if not self.state.has_key('alternick'): self.state['alternick'] = self.cfg['alternick']
if not self.state.has_key('no-op'): self.state['no-op'] = []
self.nicks401 = []
self.cfg.port = self.cfg.port or 6667
self.connecttime = 0
self.encoding = 'utf-8'
self.blocking = 1
self.lastoutput = 0
self.splitted = []
if not self.cfg.server: self.cfg.server = self.cfg.host or "localhost"
assert self.cfg.port
assert self.cfg.server
def _raw(self, txt):
""" send raw text to the server. """
if not txt or self.stopped or not self.sock:
logging.debug("%s - bot is stopped .. not sending." % self.cfg.name)
return 0
try:
self.lastoutput = time.time()
itxt = toenc(txt, self.encoding)
if not self.sock: logging.debug("%s - socket disappeared - not sending." % self.cfg.name) ; return
if not txt.startswith("PONG"): logging.warn("> %s (%s)" % (itxt, self.cfg.name))
else: logging.info("> %s (%s)" % (itxt, self.cfg.name))
if self.cfg.has_key('ssl') and self.cfg['ssl']: self.sock.write(itxt + '\n')
else: self.sock.send(itxt[:500] + '\n')
except Exception, ex: logging.error("%s - can't send: %s" % (self.cfg.name, str(ex)))
def _connect(self):
""" connect to server/port using nick. """
self.stopped = False
self.connecting = True
self.connectok.clear()
if self.cfg.ipv6:
self.oldsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
self.oldsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
assert self.oldsock
assert self.cfg.server
assert self.cfg.port
server = self.bind()
logging.warn('connecting to %s - %s - %s (%s)' % (server, self.cfg.server, self.cfg.port, self.cfg.name))
self.oldsock.settimeout(30)
self.oldsock.connect((server, int(str(self.cfg.port))))
self.blocking = 1
self.oldsock.setblocking(self.blocking)
logging.warn('connected! (%s)' % self.cfg.name)
self.connected = True
self.fsock = self.oldsock.makefile("r")
self.fsock._sock.setblocking(self.blocking)
if self.blocking:
socktimeout = self.cfg['socktimeout']
if not socktimeout:
socktimeout = 301.0
else:
socktimeout = float(socktimeout)
self.oldsock.settimeout(socktimeout)
self.fsock._sock.settimeout(socktimeout)
if self.cfg.has_key('ssl') and self.cfg['ssl']:
logging.warn('ssl enabled (%s)' % self.cfg.name)
self.sock = socket.ssl(self.oldsock)
else: self.sock = self.oldsock
try:
self.outputlock.release()
except thread.error:
pass
self.connecttime = time.time()
return True
def bind(self):
server = self.cfg.server
elite = self.cfg['bindhost'] or getmainconfig()['bindhost']
if elite:
logging.warn("trying to bind to %s" % elite)
try:
self.oldsock.bind((elite, 0))
except socket.gaierror:
logging.debug("%s - can't bind to %s" % (self.cfg.name, elite))
if not server:
try: socket.inet_pton(socket.AF_INET6, self.cfg.server)
except socket.error: pass
else: server = self.cfg.server
if not server:
try: socket.inet_pton(socket.AF_INET, self.cfg.server)
except socket.error: pass
else: server = self.cfg.server
if not server:
ips = []
try:
for item in socket.getaddrinfo(self.cfg.server, None):
if item[0] in [socket.AF_INET, socket.AF_INET6] and item[1] == socket.SOCK_STREAM:
ip = item[4][0]
if ip not in ips: ips.append(ip)
except socket.error: pass
else: server = random.choice(ips)
return server
def _readloop(self):
""" loop on the socketfile. """
self.stopreadloop = False
self.stopped = False
doreconnect = True
timeout = 1
logging.debug('%s - starting readloop' % self.cfg.name)
prevtxt = ""
while not self.stopped and not self.stopreadloop and self.sock and self.fsock:
try:
time.sleep(0.01)
if self.cfg.has_key('ssl') and self.cfg['ssl']: intxt = inputmorphs.do(self.sock.read()).split('\n')
else: intxt = inputmorphs.do(self.fsock.readline()).split('\n')
if self.stopreadloop or self.stopped:
doreconnect = 0
break
if intxt == ["",]:
logging.error("remote disconnected")
doreconnect = 1
break
if prevtxt:
intxt[0] = prevtxt + intxt[0]
prevtxt = ""
if intxt[-1] != '':
prevtxt = intxt[-1]
intxt = intxt[:-1]
for r in intxt:
if not r: continue
try:
r = strippedtxt(r.rstrip(), ["\001", "\002", "\003"])
rr = unicode(fromenc(r.rstrip(), self.encoding))
except UnicodeDecodeError:
if not r: logging.warn("decode error - ignoring (%s)" % self.cfg.name) ; continue
rr = r
if not rr: continue
res = rr
try:
ievent = IrcEvent().parse(self, res)
except Exception, ex:
handle_exception()
continue
try:
if int(ievent.cmnd) > 400: logging.error(u"< %s (%s)" % (res, self.cfg.name))
elif int(ievent.cmnd) >= 300: logging.info(u"< %s (%s)" % (res, self.cfg.name))
except ValueError:
if not res.startswith("PING") and not res.startswith("NOTICE"): logging.warn(u"< %s (%s)" % (res, self.cfg.name))
else: logging.info(u"< %s (%s)" % (res, self.cfg.name))
if ievent: self.handle_ievent(ievent)
timeout = 1
except UnicodeError:
handle_exception()
continue
except socket.timeout, ex:
logging.warn("socket timeout (%s)" % self.cfg.name)
self.error = str(ex)
if self.stopped or self.stopreadloop: break
timeout += 1
if timeout > 2:
doreconnect = 1
logging.warn('no pong received (%s)' % self.cfg.name)
break
pingsend = self.ping()
if not pingsend:
doreconnect = 1
break
continue
except socket.sslerror, ex:
self.error = str(ex)
if self.stopped or self.stopreadloop: break
if not 'timed out' in str(ex):
handle_exception()
doreconnect = 1
break
timeout += 1
if timeout > 2:
doreconnect = 1
logging.warn('no pong received (%s)' % self.cfg.name)
break
logging.warn("socket timeout (%s)" % self.cfg.name)
pingsend = self.ping()
if not pingsend:
doreconnect = 1
break
continue
except IOError, ex:
self.error = str(ex)
if self.blocking and 'temporarily' in str(ex):
logging.warn("iorror: %s (%s)" % (self.error, self.cfg.name))
time.sleep(1)
continue
if not self.stopped:
logging.error('connecting error: %s (%s)' % (str(ex), self.cfg.name))
handle_exception()
doreconnect = 1
break
except socket.error, ex:
self.error = str(ex)
if self.blocking and 'temporarily' in str(ex):
logging.warn("ioerror: %s (%s)" % (self.error, self.cfg.name))
time.sleep(0.5)
continue
if not self.stopped:
logging.error('connecting error: %s (%s)' % (str(ex), self.cfg.name))
doreconnect = 1
break
except Exception, ex:
self.error = str(ex)
if self.stopped or self.stopreadloop:
break
logging.error("%s - error in readloop: %s" % (self.cfg.name, str(ex)))
doreconnect = 1
break
logging.debug('%s - readloop stopped - %s' % (self.cfg.name, self.error))
self.connectok.clear()
self.connected = False
if doreconnect and not self.stopped:
time.sleep(2)
self.reconnect()
def logon(self):
""" log on to the network. """
time.sleep(2)
if self.cfg.password:
logging.debug('%s - sending password' % self.cfg.name)
self._raw("PASS %s" % self.cfg.password)
logging.warn('registering with %s using nick %s (%s)' % (self.cfg.server, self.cfg.nick, self.cfg.name))
logging.warn('%s - this may take a while' % self.cfg.name)
username = self.cfg.username or "jsb"
realname = self.cfg.realname or "jsonbot"
time.sleep(1)
self._raw("NICK %s" % self.cfg.nick)
time.sleep(1)
self._raw("USER %s localhost %s :%s" % (username, self.cfg.server, realname))
def _onconnect(self):
""" overload this to run after connect. """
on = self.cfg.onconnect
logging.debug("onconnect is %s" % on)
if on: time.sleep(2) ; self._raw(on)
m = self.cfg.servermodes
if m:
time.sleep(2)
logging.debug("sending servermodes %s" % m)
self._raw("MODE %s %s" % (self.cfg.nick, m))
def _resume(self, data, botname, reto=None):
""" resume to server/port using nick. """
try:
if data['ssl']:
self.exit()
time.sleep(3)
self.start()
return 1
except KeyError:
pass
self.stopped = False
try:
fd = int(data['fd'])
except (KeyError, TypeError, ValueError):
fd = None
logging.error("%s - can't determine file descriptor" % self.cfg.name)
return 0
logging.warn("resume - file descriptor is %s (%s)" % (fd, data.name))
# create socket
if self.cfg.ipv6: self.oldsock = socket.fromfd(fd , socket.AF_INET6, socket.SOCK_STREAM)
else: self.oldsock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
assert self.oldsock
self.oldsock.settimeout(30)
self.fsock = self.oldsock.makefile("r")
self.oldsock.setblocking(self.blocking)
if self.blocking:
socktimeout = self.cfg['socktimeout']
if not socktimeout: socktimeout = 301.0
else: socktimeout = float(socktimeout)
self.oldsock.settimeout(socktimeout)
self.sock = self.oldsock
self.nickchanged = 0
self.connecting = False
time.sleep(2)
self._raw('PING :RESUME %s' % str(time.time()))
self.dostart(self.cfg.name, self.type)
self.connectok.set()
self.connected = True
self.reconnectcount = 0
if reto: self.say(reto, 'rebooting done')
logging.warn("rebooting done (%s)" % self.cfg.name)
return True
def outnocb(self, printto, what, how='msg', *args, **kwargs):
what = fix_format(what)
what = self.normalize(what)
if 'socket' in repr(printto) and self.sock:
printto.send(unicode(what) + u"\n")
return True
if not printto: self._raw(what)
elif how == 'notice': self.notice(printto, what)
elif how == 'ctcp': self.ctcp(printto, what)
else: self.privmsg(printto, what)
def broadcast(self, txt):
""" broadcast txt to all joined channels. """
for i in self.state['joinedchannels']:
self.say(i, txt, speed=1)
def normalize(self, what):
txt = strippedtxt(what, ["\001", "\002", "\003"])
txt = txt.replace("<b>", "\002")
txt = txt.replace("</b>", "\002")
txt = txt.replace("<i>", "\0032")
txt = txt.replace("</i>", "\003")
txt = txt.replace("<li>", "\0033*\003 ")
txt = txt.replace("</li>", "")
txt = txt.replace("<br><br>", " [!] ")
txt = txt.replace("<br>", " - ")
txt = txt.replace("<b>", "\002")
txt = txt.replace("</b>", "\002")
txt = txt.replace("<i>", "\003")
txt = txt.replace("</i>", "")
txt = txt.replace("<h2>", "\0033")
txt = txt.replace("</h2>", "\003")
txt = txt.replace("<h3>", "\0034")
txt = txt.replace("</h3>", "\003")
txt = txt.replace("<li>", "\0034")
txt = txt.replace("</li>", "\003")
return txt
def save(self):
""" save state data. """
if self.state: self.state.save()
def connect(self, reconnect=True):
"""
connect to server/port using nick .. connect can timeout so catch
exception .. reconnect if enabled.
"""
res = self._connect()
logging.info("%s - starting logon" % self.cfg.name)
self.logon()
time.sleep(1)
self.nickchanged = 0
self.reconnectcount = 0
self._onconnect()
self.connected = True
self.connecting = False
return res
def shutdown(self):
""" shutdown the bot. """
logging.warn('shutdown (%s)' % self.cfg.name)
self.stopoutputloop = 1
#self.close()
self.connecting = False
self.connected = False
self.connectok.clear()
def close(self):
""" close the connection. """
try:
if self.cfg.has_key('ssl') and self.cfg['ssl']: self.oldsock.shutdown(2)
else: self.sock.shutdown(2)
except:
pass
try:
if self.cfg.has_key('ssl') and self.cfg['ssl']: self.oldsock.close()
else: self.sock.close()
self.fsock.close()
except:
pass
def handle_pong(self, ievent):
""" set pongcheck on received pong. """
logging.debug('%s - received server pong' % self.cfg.name)
self.pongcheck = 1
def sendraw(self, txt):
""" send raw text to the server. """
if self.stopped: return
logging.debug(u'%s - sending %s' % (self.cfg.name, txt))
self._raw(txt)
def fakein(self, txt):
""" do a fake ircevent. """
if not txt: return
logging.debug('%s - fakein - %s' % (self.cfg.name, txt))
self.handle_ievent(IrcEvent().parse(self, txt))
def donick(self, nick, setorig=False, save=False, whois=False):
""" change nick .. optionally set original nick and/or save to config. """
if not nick: return
self.noauto433 = True
nick = nick[:16]
self._raw('NICK %s\n' % nick)
self.noauto433 = False
def join(self, channel, password=None):
""" join channel with optional password. """
if not channel: return
if password:
self._raw('JOIN %s %s' % (channel, password))
else: self._raw('JOIN %s' % channel)
if self.state:
if channel not in self.state.data.joinedchannels:
self.state.data.joinedchannels.append(channel)
self.state.save()
def part(self, channel):
""" leave channel. """
if not channel: return
self._raw('PART %s' % channel)
try:
self.state.data['joinedchannels'].remove(channel)
self.state.save()
except (KeyError, ValueError), ex:
logging.error("error removing %s from joinedchannels: %s" % (channel, str(ex)))
if self.cfg.channels and channel in self.cfg.channels: self.cfg.channels.remove(channel) ; self.cfg.save()
def who(self, who):
""" send who query. """
if not who: return
self.putonqueue(4, None, 'WHO %s' % who.strip())
def names(self, channel):
""" send names query. """
if not channel: return
self.putonqueue(4, None, 'NAMES %s' % channel)
def whois(self, who):
""" send whois query. """
if not who: return
self.putonqueue(4, None, 'WHOIS %s' % who)
def privmsg(self, printto, what):
""" send privmsg to irc server. """
if not printto or not what: return
self.send('PRIVMSG %s :%s' % (printto, what))
@outlocked
def send(self, txt):
""" send text to irc server. """
if not txt: return
if self.stopped: return
try:
#lock_object(self)
now = time.time()
txt = txt.rstrip()
self._raw(txt)
if self.cfg.sleepsec: timetosleep = float(self.cfg.sleepsec) - float(now - self.lastoutput)
else: timetosleep = 4.0 - float(now - self.lastoutput)
if timetosleep > 0 and not self.cfg.nolimiter and not (time.time() - self.connecttime < 5):
logging.info('%s - flood protect - sleeping %s seconds' % (self.cfg.name, timetosleep))
time.sleep(timetosleep)
logging.info('%s - fp done' % self.cfg.name)
except Exception, ex:
logging.error('%s - send error: %s' % (self.cfg.name, str(ex)))
handle_exception()
#finally: release_object(self)
def voice(self, channel, who):
""" give voice. """
if not channel or not who: return
self.putonqueue(9, None, 'MODE %s +v %s' % (channel, who))
def doop(self, channel, who):
""" give ops. """
if not channel or not who: return
self._raw('MODE %s +o %s' % (channel, who))
def delop(self, channel, who):
""" de-op user. """
if not channel or not who: return
self._raw('MODE %s -o %s' % (channel, who))
def quit(self, reason='http://jsonbot.googlecode.com'):
""" send quit message. """
logging.warn('sending quit - %s (%s)' % (reason, self.cfg.name))
self._raw('QUIT :%s' % reason)
def notice(self, printto, what):
""" send notice. """
if not printto or not what: return
self.send('NOTICE %s :%s' % (printto, what))
def ctcp(self, printto, what):
""" send ctcp privmsg. """
if not printto or not what: return
self.send("PRIVMSG %s :\001%s\001" % (printto, what))
def ctcpreply(self, printto, what):
""" send ctcp notice. """
if not printto or not what: return
self.send("NOTICE %s :\001%s\001" % (printto, what))
def action(self, printto, what, event=None, *args, **kwargs):
""" do action. """
if not printto or not what: return
self.send("PRIVMSG %s :\001ACTION %s\001" % (printto, what))
def handle_ievent(self, ievent):
""" handle ircevent .. dispatch to 'handle_command' method. """
try:
if ievent.cmnd == 'JOIN' or ievent.msg:
if ievent.nick in self.nicks401:
self.nicks401.remove(ievent.nick)
logging.debug('%s - %s joined .. unignoring' % (self.cfg.name, ievent.nick))
ievent.bind(self)
method = getattr(self,'handle_' + ievent.cmnd.lower())
if method:
try:
method(ievent)
except:
handle_exception()
except AttributeError:
pass
except UserIgnored:
pass
except NoSuchUser:
pass
def handle_432(self, ievent):
""" erroneous nick. """
self.handle_433(ievent)
def handle_433(self, ievent):
""" handle nick already taken. """
if self.noauto433:
return
nick = ievent.arguments[1]
alternick = self.state['alternick']
if alternick and not self.nickchanged:
logging.debug('%s - using alternick %s' % (self.cfg.name, alternick))
self.donick(alternick)
self.nickchanged = 1
return
randomnick = getrandomnick()
self._raw("NICK %s" % randomnick)
self.cfg.wantnick = self.cfg.nick
self.cfg.nick = randomnick
logging.warn('ALERT: nick %s already in use/unavailable .. using randomnick %s (%s)' % (nick, randomnick, randomnick))
self.nickchanged = 1
def handle_ping(self, ievent):
""" send pong response. """
if not ievent.txt: logging.debug("no txt set") ; return
self._raw('PONG :%s' % ievent.txt)
def handle_001(self, ievent):
""" we are connected. """
time.sleep(1)
self._onconnect()
self.connectok.set()
self.connected = True
self.whois(self.cfg.nick)
def handle_privmsg(self, ievent):
""" check if msg is ctcp or not .. return 1 on handling. """
if ievent.txt and ievent.txt[0] == '\001':
self.handle_ctcp(ievent)
return 1
def handle_notice(self, ievent):
""" handle notice event .. check for version request. """
if ievent.txt and ievent.txt.find('VERSION') != -1:
from jsb.version import getversion
self.say(ievent.nick, getversion(), None, 'notice')
return 1
ievent.cbtype == "NOTICE"
def handle_ctcp(self, ievent):
""" handle client to client request .. version and ping. """
if ievent.txt.find('VERSION') != -1:
from jsb.version import getversion
self.ctcpreply(ievent.nick, 'VERSION %s' % getversion())
if ievent.txt.find('PING') != -1:
try:
pingtime = ievent.txt.split()[1]
pingtijsb = ievent.txt.split()[2]
if pingtime:
self.ctcpreply(ievent.nick, 'PING ' + pingtime + ' ' + pingtijsb)
except IndexError:
pass
def handle_error(self, ievent):
""" show error. """
txt = ievent.txt
if 'Closing' in txt:
if "banned" in txt.lower() or "-lined" in txt.lower():
logging.error("WE ARE BANNED !! - %s - %s" % (self.cfg.server, txt))
self.exit()
else: logging.error("%s - %s" % (self.cfg.name, txt))
else: logging.error("%s - %s - %s" % (self.cfg.name.upper(), ", ".join(ievent.arguments[1:]), txt))
def ping(self):
""" ping the irc server. """
logging.debug('%s - sending ping' % self.cfg.name)
try:
self._raw('PING :%s' % self.cfg.server)
return 1
except Exception, ex:
logging.warn("can't send ping: %s (%s)" % (str(ex), self.cfg.name))
return 0
def handle_401(self, ievent):
""" handle 401 .. nick not available. """
pass
def handle_700(self, ievent):
""" handle 700 .. encoding request of the server. """
try:
self.encoding = ievent.arguments[1]
logging.warn('700 encoding now is %s (%s)' % (self.encoding, self.cfg.name))
except:
pass
def handle_465(self, ievent):
""" we are banned.. exit the bot. """
self.exit()
|
|
# -*- coding: utf-8 -*-
"""
This module provides helpers for C++11+ projects using pybind11.
LICENSE:
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>, All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# IMPORTANT: If you change this file in the pybind11 repo, also review
# setup_helpers.pyi for matching changes.
#
# If you copy this file in, you don't
# need the .pyi file; it's just an interface file for static type checkers.
import contextlib
import os
import shutil
import sys
import tempfile
import threading
import platform
import warnings
try:
from setuptools.command.build_ext import build_ext as _build_ext
from setuptools import Extension as _Extension
except ImportError:
from distutils.command.build_ext import build_ext as _build_ext
from distutils.extension import Extension as _Extension
import distutils.errors
import distutils.ccompiler
WIN = sys.platform.startswith("win32")
PY2 = sys.version_info[0] < 3
MACOS = sys.platform.startswith("darwin")
STD_TMPL = "/std:c++{}" if WIN else "-std=c++{}"
# It is recommended to use PEP 518 builds if using this module. However, this
# file explicitly supports being copied into a user's project directory
# standalone, and pulling pybind11 with the deprecated setup_requires feature.
# If you copy the file, remember to add it to your MANIFEST.in, and add the current
# directory into your path if it sits beside your setup.py.
class Pybind11Extension(_Extension):
"""
Build a C++11+ Extension module with pybind11. This automatically adds the
recommended flags when you init the extension and assumes C++ sources - you
can further modify the options yourself.
The customizations are:
* ``/EHsc`` and ``/bigobj`` on Windows
* ``stdlib=libc++`` on macOS
* ``visibility=hidden`` and ``-g0`` on Unix
Finally, you can set ``cxx_std`` via constructor or afterwords to enable
flags for C++ std, and a few extra helper flags related to the C++ standard
level. It is _highly_ recommended you either set this, or use the provided
``build_ext``, which will search for the highest supported extension for
you if the ``cxx_std`` property is not set. Do not set the ``cxx_std``
property more than once, as flags are added when you set it. Set the
property to None to disable the addition of C++ standard flags.
If you want to add pybind11 headers manually, for example for an exact
git checkout, then set ``include_pybind11=False``.
Warning: do not use property-based access to the instance on Python 2 -
this is an ugly old-style class due to Distutils.
"""
def _add_cflags(self, *flags):
for flag in flags:
if flag not in self.extra_compile_args:
self.extra_compile_args.append(flag)
def _add_lflags(self, *flags):
for flag in flags:
if flag not in self.extra_link_args:
self.extra_link_args.append(flag)
def __init__(self, *args, **kwargs):
self._cxx_level = 0
cxx_std = kwargs.pop("cxx_std", 0)
if "language" not in kwargs:
kwargs["language"] = "c++"
include_pybind11 = kwargs.pop("include_pybind11", True)
# Can't use super here because distutils has old-style classes in
# Python 2!
_Extension.__init__(self, *args, **kwargs)
# Include the installed package pybind11 headers
if include_pybind11:
# If using setup_requires, this fails the first time - that's okay
try:
import pybind11
pyinc = pybind11.get_include()
if pyinc not in self.include_dirs:
self.include_dirs.append(pyinc)
except ImportError:
pass
# Have to use the accessor manually to support Python 2 distutils
Pybind11Extension.cxx_std.__set__(self, cxx_std)
if WIN:
self._add_cflags("/EHsc", "/bigobj")
else:
self._add_cflags("-fvisibility=hidden", "-g0")
if MACOS:
self._add_cflags("-stdlib=libc++")
self._add_lflags("-stdlib=libc++")
@property
def cxx_std(self):
"""
The CXX standard level. If set, will add the required flags. If left
at 0, it will trigger an automatic search when pybind11's build_ext
is used. If None, will have no effect. Besides just the flags, this
may add a register warning/error fix for Python 2 or macos-min 10.9
or 10.14.
"""
return self._cxx_level
@cxx_std.setter
def cxx_std(self, level):
if self._cxx_level:
warnings.warn("You cannot safely change the cxx_level after setting it!")
# MSVC 2015 Update 3 and later only have 14 (and later 17) modes, so
# force a valid flag here.
if WIN and level == 11:
level = 14
self._cxx_level = level
if not level:
return
self.extra_compile_args.append(STD_TMPL.format(level))
if MACOS and "MACOSX_DEPLOYMENT_TARGET" not in os.environ:
# C++17 requires a higher min version of macOS. An earlier version
# (10.12 or 10.13) can be set manually via environment variable if
# you are careful in your feature usage, but 10.14 is the safest
# setting for general use. However, never set higher than the
# current macOS version!
current_macos = tuple(int(x) for x in platform.mac_ver()[0].split(".")[:2])
desired_macos = (10, 9) if level < 17 else (10, 14)
macos_string = ".".join(str(x) for x in min(current_macos, desired_macos))
macosx_min = "-mmacosx-version-min=" + macos_string
self.extra_compile_args.append(macosx_min)
self.extra_link_args.append(macosx_min)
if PY2:
if WIN:
# Will be ignored on MSVC 2015, where C++17 is not supported so
# this flag is not valid.
self.extra_compile_args.append("/wd5033")
elif level >= 17:
self.extra_compile_args.append("-Wno-register")
elif level >= 14:
self.extra_compile_args.append("-Wno-deprecated-register")
# Just in case someone clever tries to multithread
tmp_chdir_lock = threading.Lock()
cpp_cache_lock = threading.Lock()
@contextlib.contextmanager
def tmp_chdir():
"Prepare and enter a temporary directory, cleanup when done"
# Threadsafe
with tmp_chdir_lock:
olddir = os.getcwd()
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(olddir)
shutil.rmtree(tmpdir)
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flag):
"""
Return the flag if a flag name is supported on the
specified compiler, otherwise None (can be used as a boolean).
If multiple flags are passed, return the first that matches.
"""
with tmp_chdir():
fname = "flagcheck.cpp"
with open(fname, "w") as f:
# Don't trigger -Wunused-parameter.
f.write("int main (int, char **) { return 0; }")
try:
compiler.compile([fname], extra_postargs=[flag])
except distutils.errors.CompileError:
return False
return True
# Every call will cache the result
cpp_flag_cache = None
def auto_cpp_level(compiler):
"""
Return the max supported C++ std level (17, 14, or 11). Returns latest on Windows.
"""
if WIN:
return "latest"
global cpp_flag_cache
# If this has been previously calculated with the same args, return that
with cpp_cache_lock:
if cpp_flag_cache:
return cpp_flag_cache
levels = [17, 14, 11]
for level in levels:
if has_flag(compiler, STD_TMPL.format(level)):
with cpp_cache_lock:
cpp_flag_cache = level
return level
msg = "Unsupported compiler -- at least C++11 support is needed!"
raise RuntimeError(msg)
class build_ext(_build_ext): # noqa: N801
"""
Customized build_ext that allows an auto-search for the highest supported
C++ level for Pybind11Extension. This is only needed for the auto-search
for now, and is completely optional otherwise.
"""
def build_extensions(self):
"""
Build extensions, injecting C++ std for Pybind11Extension if needed.
"""
for ext in self.extensions:
if hasattr(ext, "_cxx_level") and ext._cxx_level == 0:
# Python 2 syntax - old-style distutils class
ext.__class__.cxx_std.__set__(ext, auto_cpp_level(self.compiler))
# Python 2 doesn't allow super here, since distutils uses old-style
# classes!
_build_ext.build_extensions(self)
def naive_recompile(obj, src):
"""
This will recompile only if the source file changes. It does not check
header files, so a more advanced function or Ccache is better if you have
editable header files in your package.
"""
return os.stat(obj).st_mtime < os.stat(src).st_mtime
def no_recompile(obg, src):
"""
This is the safest but slowest choice (and is the default) - will always
recompile sources.
"""
return True
# Optional parallel compile utility
# inspired by: http://stackoverflow.com/questions/11013851/speeding-up-build-process-with-distutils
# and: https://github.com/tbenthompson/cppimport/blob/stable/cppimport/build_module.py
# and NumPy's parallel distutils module:
# https://github.com/numpy/numpy/blob/master/numpy/distutils/ccompiler.py
class ParallelCompile(object):
"""
Make a parallel compile function. Inspired by
numpy.distutils.ccompiler.CCompiler_compile and cppimport.
This takes several arguments that allow you to customize the compile
function created:
envvar:
Set an environment variable to control the compilation threads, like
NPY_NUM_BUILD_JOBS
default:
0 will automatically multithread, or 1 will only multithread if the
envvar is set.
max:
The limit for automatic multithreading if non-zero
needs_recompile:
A function of (obj, src) that returns True when recompile is needed. No
effect in isolated mode; use ccache instead, see
https://github.com/matplotlib/matplotlib/issues/1507/
To use::
ParallelCompile("NPY_NUM_BUILD_JOBS").install()
or::
with ParallelCompile("NPY_NUM_BUILD_JOBS"):
setup(...)
By default, this assumes all files need to be recompiled. A smarter
function can be provided via needs_recompile. If the output has not yet
been generated, the compile will always run, and this function is not
called.
"""
__slots__ = ("envvar", "default", "max", "_old", "needs_recompile")
def __init__(self, envvar=None, default=0, max=0, needs_recompile=no_recompile):
self.envvar = envvar
self.default = default
self.max = max
self.needs_recompile = needs_recompile
self._old = []
def function(self):
"""
Builds a function object usable as distutils.ccompiler.CCompiler.compile.
"""
def compile_function(
compiler,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None,
):
# These lines are directly from distutils.ccompiler.CCompiler
macros, objects, extra_postargs, pp_opts, build = compiler._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs
)
cc_args = compiler._get_cc_args(pp_opts, debug, extra_preargs)
# The number of threads; start with default.
threads = self.default
# Determine the number of compilation threads, unless set by an environment variable.
if self.envvar is not None:
threads = int(os.environ.get(self.envvar, self.default))
def _single_compile(obj):
try:
src, ext = build[obj]
except KeyError:
return
if not os.path.exists(obj) or self.needs_recompile(obj, src):
compiler._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
try:
import multiprocessing
from multiprocessing.pool import ThreadPool
except ImportError:
threads = 1
if threads == 0:
try:
threads = multiprocessing.cpu_count()
threads = self.max if self.max and self.max < threads else threads
except NotImplementedError:
threads = 1
if threads > 1:
for _ in ThreadPool(threads).imap_unordered(_single_compile, objects):
pass
else:
for ob in objects:
_single_compile(ob)
return objects
return compile_function
def install(self):
distutils.ccompiler.CCompiler.compile = self.function()
return self
def __enter__(self):
self._old.append(distutils.ccompiler.CCompiler.compile)
return self.install()
def __exit__(self, *args):
distutils.ccompiler.CCompiler.compile = self._old.pop()
|
|
"""The tests for the demo climate component."""
import unittest
from homeassistant.util.unit_system import (
METRIC_SYSTEM
)
from homeassistant.bootstrap import setup_component
from homeassistant.components import climate
from tests.common import get_test_home_assistant
ENTITY_CLIMATE = 'climate.hvac'
ENTITY_ECOBEE = 'climate.ecobee'
ENTITY_HEATPUMP = 'climate.heatpump'
class TestDemoClimate(unittest.TestCase):
"""Test the demo climate hvac."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.units = METRIC_SYSTEM
self.assertTrue(setup_component(self.hass, climate.DOMAIN, {
'climate': {
'platform': 'demo',
}}))
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_params(self):
"""Test the inititial parameters."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(21, state.attributes.get('temperature'))
self.assertEqual('on', state.attributes.get('away_mode'))
self.assertEqual(22, state.attributes.get('current_temperature'))
self.assertEqual("On High", state.attributes.get('fan_mode'))
self.assertEqual(67, state.attributes.get('humidity'))
self.assertEqual(54, state.attributes.get('current_humidity'))
self.assertEqual("Off", state.attributes.get('swing_mode'))
self.assertEqual("cool", state.attributes.get('operation_mode'))
self.assertEqual('off', state.attributes.get('aux_heat'))
def test_default_setup_params(self):
"""Test the setup with default parameters."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(7, state.attributes.get('min_temp'))
self.assertEqual(35, state.attributes.get('max_temp'))
self.assertEqual(30, state.attributes.get('min_humidity'))
self.assertEqual(99, state.attributes.get('max_humidity'))
def test_set_only_target_temp_bad_attr(self):
"""Test setting the target temperature without required attribute."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(21, state.attributes.get('temperature'))
climate.set_temperature(self.hass, None, ENTITY_CLIMATE)
self.hass.block_till_done()
self.assertEqual(21, state.attributes.get('temperature'))
def test_set_only_target_temp(self):
"""Test the setting of the target temperature."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(21, state.attributes.get('temperature'))
climate.set_temperature(self.hass, 30, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(30.0, state.attributes.get('temperature'))
def test_set_only_target_temp_with_convert(self):
"""Test the setting of the target temperature."""
state = self.hass.states.get(ENTITY_HEATPUMP)
self.assertEqual(20, state.attributes.get('temperature'))
climate.set_temperature(self.hass, 21, ENTITY_HEATPUMP)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_HEATPUMP)
self.assertEqual(21.0, state.attributes.get('temperature'))
def test_set_target_temp_range(self):
"""Test the setting of the target temperature with range."""
state = self.hass.states.get(ENTITY_ECOBEE)
self.assertEqual(None, state.attributes.get('temperature'))
self.assertEqual(21.0, state.attributes.get('target_temp_low'))
self.assertEqual(24.0, state.attributes.get('target_temp_high'))
climate.set_temperature(self.hass, target_temp_high=25,
target_temp_low=20, entity_id=ENTITY_ECOBEE)
self.hass.pool.block_till_done()
state = self.hass.states.get(ENTITY_ECOBEE)
self.assertEqual(None, state.attributes.get('temperature'))
self.assertEqual(20.0, state.attributes.get('target_temp_low'))
self.assertEqual(25.0, state.attributes.get('target_temp_high'))
def test_set_target_temp_range_bad_attr(self):
"""Test setting the target temperature range without required
attribute."""
state = self.hass.states.get(ENTITY_ECOBEE)
self.assertEqual(None, state.attributes.get('temperature'))
self.assertEqual(21.0, state.attributes.get('target_temp_low'))
self.assertEqual(24.0, state.attributes.get('target_temp_high'))
climate.set_temperature(self.hass, temperature=None,
entity_id=ENTITY_ECOBEE, target_temp_low=None,
target_temp_high=None)
self.hass.pool.block_till_done()
state = self.hass.states.get(ENTITY_ECOBEE)
self.assertEqual(None, state.attributes.get('temperature'))
self.assertEqual(21.0, state.attributes.get('target_temp_low'))
self.assertEqual(24.0, state.attributes.get('target_temp_high'))
def test_set_target_humidity_bad_attr(self):
"""Test setting the target humidity without required attribute."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(67, state.attributes.get('humidity'))
climate.set_humidity(self.hass, None, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(67, state.attributes.get('humidity'))
def test_set_target_humidity(self):
"""Test the setting of the target humidity."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(67, state.attributes.get('humidity'))
climate.set_humidity(self.hass, 64, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(64.0, state.attributes.get('humidity'))
def test_set_fan_mode_bad_attr(self):
"""Test setting fan mode without required attribute."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("On High", state.attributes.get('fan_mode'))
climate.set_fan_mode(self.hass, None, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("On High", state.attributes.get('fan_mode'))
def test_set_fan_mode(self):
"""Test setting of new fan mode."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("On High", state.attributes.get('fan_mode'))
climate.set_fan_mode(self.hass, "On Low", ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("On Low", state.attributes.get('fan_mode'))
def test_set_swing_mode_bad_attr(self):
"""Test setting swing mode without required attribute."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("Off", state.attributes.get('swing_mode'))
climate.set_swing_mode(self.hass, None, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("Off", state.attributes.get('swing_mode'))
def test_set_swing(self):
"""Test setting of new swing mode."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("Off", state.attributes.get('swing_mode'))
climate.set_swing_mode(self.hass, "Auto", ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("Auto", state.attributes.get('swing_mode'))
def test_set_operation_bad_attr_and_state(self):
"""Test setting operation mode without required attribute, and
check the state."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("cool", state.attributes.get('operation_mode'))
self.assertEqual("cool", state.state)
climate.set_operation_mode(self.hass, None, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("cool", state.attributes.get('operation_mode'))
self.assertEqual("cool", state.state)
def test_set_operation(self):
"""Test setting of new operation mode."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("cool", state.attributes.get('operation_mode'))
self.assertEqual("cool", state.state)
climate.set_operation_mode(self.hass, "heat", ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("heat", state.attributes.get('operation_mode'))
self.assertEqual("heat", state.state)
def test_set_away_mode_bad_attr(self):
"""Test setting the away mode without required attribute."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual('on', state.attributes.get('away_mode'))
climate.set_away_mode(self.hass, None, ENTITY_CLIMATE)
self.hass.block_till_done()
self.assertEqual('on', state.attributes.get('away_mode'))
def test_set_away_mode_on(self):
"""Test setting the away mode on/true."""
climate.set_away_mode(self.hass, True, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual('on', state.attributes.get('away_mode'))
def test_set_away_mode_off(self):
"""Test setting the away mode off/false."""
climate.set_away_mode(self.hass, False, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual('off', state.attributes.get('away_mode'))
def test_set_aux_heat_bad_attr(self):
"""Test setting the auxillary heater without required attribute."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual('off', state.attributes.get('aux_heat'))
climate.set_aux_heat(self.hass, None, ENTITY_CLIMATE)
self.hass.block_till_done()
self.assertEqual('off', state.attributes.get('aux_heat'))
def test_set_aux_heat_on(self):
"""Test setting the axillary heater on/true."""
climate.set_aux_heat(self.hass, True, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual('on', state.attributes.get('aux_heat'))
def test_set_aux_heat_off(self):
"""Test setting the auxillary heater off/false."""
climate.set_aux_heat(self.hass, False, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual('off', state.attributes.get('aux_heat'))
|
|
"""Test the onboarding views."""
import asyncio
import os
from unittest.mock import patch
import pytest
from homeassistant.components import onboarding
from homeassistant.components.onboarding import const, views
from homeassistant.const import HTTP_FORBIDDEN
from homeassistant.helpers import area_registry as ar
from homeassistant.setup import async_setup_component
from . import mock_storage
from tests.common import CLIENT_ID, CLIENT_REDIRECT_URI, register_auth_provider
from tests.components.met.conftest import mock_weather # noqa: F401
@pytest.fixture(autouse=True)
def always_mock_weather(mock_weather): # noqa: F811
"""Mock the Met weather provider."""
pass
@pytest.fixture(autouse=True)
def auth_active(hass):
"""Ensure auth is always active."""
hass.loop.run_until_complete(
register_auth_provider(hass, {"type": "homeassistant"})
)
@pytest.fixture(name="rpi")
async def rpi_fixture(hass, aioclient_mock, mock_supervisor):
"""Mock core info with rpi."""
aioclient_mock.get(
"http://127.0.0.1/core/info",
json={
"result": "ok",
"data": {"version_latest": "1.0.0", "machine": "raspberrypi3"},
},
)
assert await async_setup_component(hass, "hassio", {})
await hass.async_block_till_done()
@pytest.fixture(name="no_rpi")
async def no_rpi_fixture(hass, aioclient_mock, mock_supervisor):
"""Mock core info with rpi."""
aioclient_mock.get(
"http://127.0.0.1/core/info",
json={
"result": "ok",
"data": {"version_latest": "1.0.0", "machine": "odroid-n2"},
},
)
assert await async_setup_component(hass, "hassio", {})
await hass.async_block_till_done()
@pytest.fixture(name="mock_supervisor")
async def mock_supervisor_fixture(hass, aioclient_mock):
"""Mock supervisor."""
aioclient_mock.post("http://127.0.0.1/homeassistant/options", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/supervisor/options", json={"result": "ok"})
with patch.dict(os.environ, {"HASSIO": "127.0.0.1"}), patch(
"homeassistant.components.hassio.HassIO.is_connected",
return_value=True,
), patch(
"homeassistant.components.hassio.HassIO.get_info",
return_value={},
), patch(
"homeassistant.components.hassio.HassIO.get_host_info",
return_value={},
), patch(
"homeassistant.components.hassio.HassIO.get_store",
return_value={},
), patch(
"homeassistant.components.hassio.HassIO.get_supervisor_info",
return_value={"diagnostics": True},
), patch(
"homeassistant.components.hassio.HassIO.get_os_info",
return_value={},
), patch(
"homeassistant.components.hassio.HassIO.get_ingress_panels",
return_value={"panels": {}},
), patch.dict(
os.environ, {"HASSIO_TOKEN": "123456"}
):
yield
async def test_onboarding_progress(hass, hass_storage, hass_client_no_auth):
"""Test fetching progress."""
mock_storage(hass_storage, {"done": ["hello"]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
with patch.object(views, "STEPS", ["hello", "world"]):
resp = await client.get("/api/onboarding")
assert resp.status == 200
data = await resp.json()
assert len(data) == 2
assert data[0] == {"step": "hello", "done": True}
assert data[1] == {"step": "world", "done": False}
async def test_onboarding_user_already_done(hass, hass_storage, hass_client_no_auth):
"""Test creating a new user when user step already done."""
mock_storage(hass_storage, {"done": [views.STEP_USER]})
with patch.object(onboarding, "STEPS", ["hello", "world"]):
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp = await client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test Name",
"username": "test-user",
"password": "test-pass",
"language": "en",
},
)
assert resp.status == HTTP_FORBIDDEN
async def test_onboarding_user(hass, hass_storage, hass_client_no_auth):
"""Test creating a new user."""
assert await async_setup_component(hass, "person", {})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp = await client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test Name",
"username": "test-user",
"password": "test-pass",
"language": "en",
},
)
assert resp.status == 200
assert const.STEP_USER in hass_storage[const.DOMAIN]["data"]["done"]
data = await resp.json()
assert "auth_code" in data
users = await hass.auth.async_get_users()
assert len(users) == 1
user = users[0]
assert user.name == "Test Name"
assert len(user.credentials) == 1
assert user.credentials[0].data["username"] == "test-user"
assert len(hass.data["person"][1].async_items()) == 1
# Validate refresh token 1
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "authorization_code",
"code": data["auth_code"],
},
)
assert resp.status == 200
tokens = await resp.json()
assert (
await hass.auth.async_validate_access_token(tokens["access_token"]) is not None
)
# Validate created areas
area_registry = ar.async_get(hass)
assert len(area_registry.areas) == 3
assert sorted(area.name for area in area_registry.async_list_areas()) == [
"Bedroom",
"Kitchen",
"Living Room",
]
async def test_onboarding_user_invalid_name(hass, hass_storage, hass_client_no_auth):
"""Test not providing name."""
mock_storage(hass_storage, {"done": []})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp = await client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"username": "test-user",
"password": "test-pass",
"language": "en",
},
)
assert resp.status == 400
async def test_onboarding_user_race(hass, hass_storage, hass_client_no_auth):
"""Test race condition on creating new user."""
mock_storage(hass_storage, {"done": ["hello"]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp1 = client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test 1",
"username": "1-user",
"password": "1-pass",
"language": "en",
},
)
resp2 = client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test 2",
"username": "2-user",
"password": "2-pass",
"language": "es",
},
)
res1, res2 = await asyncio.gather(resp1, resp2)
assert sorted([res1.status, res2.status]) == [200, HTTP_FORBIDDEN]
async def test_onboarding_integration(hass, hass_storage, hass_client, hass_admin_user):
"""Test finishing integration step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post(
"/api/onboarding/integration",
json={"client_id": CLIENT_ID, "redirect_uri": CLIENT_REDIRECT_URI},
)
assert resp.status == 200
data = await resp.json()
assert "auth_code" in data
# Validate refresh token
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "authorization_code",
"code": data["auth_code"],
},
)
assert resp.status == 200
assert const.STEP_INTEGRATION in hass_storage[const.DOMAIN]["data"]["done"]
tokens = await resp.json()
assert (
await hass.auth.async_validate_access_token(tokens["access_token"]) is not None
)
# Onboarding refresh token and new refresh token
for user in await hass.auth.async_get_users():
assert len(user.refresh_tokens) == 2, user
async def test_onboarding_integration_missing_credential(
hass, hass_storage, hass_client, hass_access_token
):
"""Test that we fail integration step if user is missing credentials."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
refresh_token = await hass.auth.async_validate_access_token(hass_access_token)
refresh_token.credential = None
client = await hass_client()
resp = await client.post(
"/api/onboarding/integration",
json={"client_id": CLIENT_ID, "redirect_uri": CLIENT_REDIRECT_URI},
)
assert resp.status == 403
async def test_onboarding_integration_invalid_redirect_uri(
hass, hass_storage, hass_client
):
"""Test finishing integration step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post(
"/api/onboarding/integration",
json={"client_id": CLIENT_ID, "redirect_uri": "http://invalid-redirect.uri"},
)
assert resp.status == 400
# We will still mark the last step as done because there is nothing left.
assert const.STEP_INTEGRATION in hass_storage[const.DOMAIN]["data"]["done"]
# Only refresh token from onboarding should be there
for user in await hass.auth.async_get_users():
assert len(user.refresh_tokens) == 1, user
async def test_onboarding_integration_requires_auth(
hass, hass_storage, hass_client_no_auth
):
"""Test finishing integration step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp = await client.post(
"/api/onboarding/integration", json={"client_id": CLIENT_ID}
)
assert resp.status == 401
async def test_onboarding_core_sets_up_met(hass, hass_storage, hass_client):
"""Test finishing the core step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post("/api/onboarding/core_config")
assert resp.status == 200
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("weather")) == 1
async def test_onboarding_core_sets_up_rpi_power(
hass, hass_storage, hass_client, aioclient_mock, rpi
):
"""Test that the core step sets up rpi_power on RPi."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
await async_setup_component(hass, "persistent_notification", {})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
with patch(
"homeassistant.components.rpi_power.config_flow.new_under_voltage"
), patch("homeassistant.components.rpi_power.binary_sensor.new_under_voltage"):
resp = await client.post("/api/onboarding/core_config")
assert resp.status == 200
await hass.async_block_till_done()
rpi_power_state = hass.states.get("binary_sensor.rpi_power_status")
assert rpi_power_state
async def test_onboarding_core_no_rpi_power(
hass, hass_storage, hass_client, aioclient_mock, no_rpi
):
"""Test that the core step do not set up rpi_power on non RPi."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
await async_setup_component(hass, "persistent_notification", {})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
with patch(
"homeassistant.components.rpi_power.config_flow.new_under_voltage"
), patch("homeassistant.components.rpi_power.binary_sensor.new_under_voltage"):
resp = await client.post("/api/onboarding/core_config")
assert resp.status == 200
await hass.async_block_till_done()
rpi_power_state = hass.states.get("binary_sensor.rpi_power_status")
assert not rpi_power_state
async def test_onboarding_analytics(hass, hass_storage, hass_client, hass_admin_user):
"""Test finishing analytics step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post("/api/onboarding/analytics")
assert resp.status == 200
assert const.STEP_ANALYTICS in hass_storage[const.DOMAIN]["data"]["done"]
resp = await client.post("/api/onboarding/analytics")
assert resp.status == 403
async def test_onboarding_installation_type(hass, hass_storage, hass_client):
"""Test returning installation type during onboarding."""
mock_storage(hass_storage, {"done": []})
await async_setup_component(hass, "persistent_notification", {})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
with patch(
"homeassistant.components.onboarding.views.async_get_system_info",
return_value={"installation_type": "Home Assistant Core"},
):
resp = await client.get("/api/onboarding/installation_type")
assert resp.status == 200
resp_content = await resp.json()
assert resp_content["installation_type"] == "Home Assistant Core"
async def test_onboarding_installation_type_after_done(hass, hass_storage, hass_client):
"""Test raising for installation type after onboarding."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
await async_setup_component(hass, "persistent_notification", {})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.get("/api/onboarding/installation_type")
assert resp.status == 401
|
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Histogram class which lets you build your histograms just passing
the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
try:
import scipy.special
_is_scipy = True
except ImportError as e:
_is_scipy = False
import numpy as np
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import Line, Quad
from ...properties import Bool, Float, Int
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Histogram(values, bins, mu=None, sigma=None, density=True, **kws):
""" Create a histogram chart using :class:`HistogramBuilder <bokeh.charts.builder.histogram_builder.HistogramBuilder>`
to render the geometry from values, bins, sigma and density.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
bins (int): number of bins to use in the Histogram building.
mu (float, optional): theoretical mean value for the normal
distribution. (default: None)
sigma (float, optional): theoretical sigma value for the
normal distribution. (default: None)
density (bool, optional): If False, the result will contain
the number of samples in each bin. If True, the result
is the value of the probability *density* function at
the bin, normalized such that the *integral* over the
range is 1. For more info check numpy.histogram
function documentation. (default: True)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import pandas as pd
from bokeh.charts import Histogram, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = pd.DataFrame(dict(normal=[1, 2, 3, 1], lognormal=[5, 4, 4, 1]))
hm = Histogram(xyvalues, bins=5, title='Histogram')
output_file('histogram.html')
show(hm)
"""
return create_and_build(
HistogramBuilder, values, bins=bins, mu=mu, sigma=sigma, density=density,
**kws
)
class HistogramBuilder(Builder):
"""This is the Histogram class and it is in charge of plotting
histograms in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (quads and lines) taking the
references from the source.
"""
bins = Int(10, help="""
Number of bins to use for the histogram. (default: 10)
""")
mu = Float(help="""
Theoretical mean value for the normal distribution. (default: None)
""")
sigma = Float(help="""
Theoretical standard deviation value for the normal distribution.
(default: None)
""")
density = Bool(True, help="""
Whether to normalize the histogram. (default: True)
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check ``numpy.histogram`` function documentation.
""")
def _process_data(self):
"""Take the Histogram data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the quad and line glyphs inside the ``_yield_renderers`` method.
"""
# list to save all the groups available in the incomming input
self._groups.extend(self._values.keys())
# fill the data dictionary with the proper values
for i, (val, values) in enumerate(self._values.items()):
self.set_and_get("", val, values)
#build the histogram using the set bins number
hist, edges = np.histogram(
np.array(values), density=self.density, bins=self.bins
)
self.set_and_get("hist", val, hist)
self.set_and_get("edges", val, edges)
self.set_and_get("left", val, edges[:-1])
self.set_and_get("right", val, edges[1:])
self.set_and_get("bottom", val, np.zeros(len(hist)))
self._mu_and_sigma = False
if self.mu is not None and self.sigma is not None:
if _is_scipy:
self._mu_and_sigma = True
self.set_and_get("x", val, np.linspace(-2, 2, len(self._data[val])))
den = 2 * self.sigma ** 2
x_val = self._data["x" + val]
x_val_mu = x_val - self.mu
sigsqr2pi = self.sigma * np.sqrt(2 * np.pi)
pdf = 1 / (sigsqr2pi) * np.exp(-x_val_mu ** 2 / den)
self.set_and_get("pdf", val, pdf)
self._groups.append("pdf")
cdf = (1 + scipy.special.erf(x_val_mu / np.sqrt(den))) / 2
self.set_and_get("cdf", val, cdf)
self._groups.append("cdf")
else:
print("You need scipy to get the theoretical probability distributions.")
def _set_sources(self):
"""Push the Histogram data into the ColumnDataSource and calculate
the proper ranges."""
self._source = ColumnDataSource(data=self._data)
if not self._mu_and_sigma:
x_names, y_names = self._attr[2::6], self._attr[1::6]
else:
x_names, y_names = self._attr[2::9], self._attr[1::9]
endx = max(max(self._data[i]) for i in x_names)
startx = min(min(self._data[i]) for i in x_names)
self.x_range = Range1d(start=startx - 0.1 * (endx - startx),
end=endx + 0.1 * (endx - startx))
endy = max(max(self._data[i]) for i in y_names)
self.y_range = Range1d(start=0, end=1.1 * endy)
def _yield_renderers(self):
"""Use the several glyphs to display the Histogram and pdf/cdf.
It uses the quad (and line) glyphs to display the Histogram
bars, taking as reference points the data loaded at the
ColumnDataSurce.
"""
if not self._mu_and_sigma:
sextets = list(chunk(self._attr, 6))
colors = cycle_colors(sextets, self.palette)
# TODO (bev) this is a perfect use for a namedtuple
# sextet: values, his, edges, left, right, bottom
for i, sextet in enumerate(sextets):
glyph = Quad(
top=sextet[1], bottom=sextet[5], left=sextet[3], right=sextet[4],
fill_color=colors[i], fill_alpha=0.7,
line_color="white", line_alpha=1.0
)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
else:
nonets = list(chunk(self._attr, 9))
colors = cycle_colors(nonets, self.palette)
# TODO (bev) this is a perfect use for a namedtuple
# nonet: values, his, edges, left, right, bottom, x, pdf, cdf
for i, nonet in enumerate(nonets):
glyph = Quad(
top=nonet[1], bottom=nonet[5], left=nonet[3], right=nonet[4],
fill_color=colors[i], fill_alpha=0.7,
line_color="white", line_alpha=1.0
)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
glyph = Line(x=nonet[6], y=nonet[7], line_color="black")
yield GlyphRenderer(data_source=self._source, glyph=glyph)
glyph = Line(x=nonet[6], y=nonet[8], line_color="blue")
yield GlyphRenderer(data_source=self._source, glyph=glyph)
|
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User friendly container for Cloud Spanner Database."""
import re
import google.auth.credentials
from google.gax.errors import GaxError
from google.gax.grpc import exc_to_code
from google.cloud.gapic.spanner.v1.spanner_client import SpannerClient
from grpc import StatusCode
import six
# pylint: disable=ungrouped-imports
from google.cloud.exceptions import Conflict
from google.cloud.exceptions import NotFound
from google.cloud.spanner import __version__
from google.cloud.spanner._helpers import _options_with_prefix
from google.cloud.spanner.batch import Batch
from google.cloud.spanner.session import Session
from google.cloud.spanner.pool import BurstyPool
from google.cloud.spanner.snapshot import Snapshot
from google.cloud.spanner.pool import SessionCheckout
# pylint: enable=ungrouped-imports
SPANNER_DATA_SCOPE = 'https://www.googleapis.com/auth/spanner.data'
_DATABASE_NAME_RE = re.compile(
r'^projects/(?P<project>[^/]+)/'
r'instances/(?P<instance_id>[a-z][-a-z0-9]*)/'
r'databases/(?P<database_id>[a-z][a-z0-9_\-]*[a-z0-9])$'
)
class Database(object):
"""Representation of a Cloud Spanner Database.
We can use a :class:`Database` to:
* :meth:`create` the database
* :meth:`reload` the database
* :meth:`update` the database
* :meth:`drop` the database
:type database_id: str
:param database_id: The ID of the database.
:type instance: :class:`~google.cloud.spanner.instance.Instance`
:param instance: The instance that owns the database.
:type ddl_statements: list of string
:param ddl_statements: (Optional) DDL statements, excluding the
CREATE DATABASE statement.
:type pool: concrete subclass of
:class:`~google.cloud.spanner.pool.AbstractSessionPool`.
:param pool: (Optional) session pool to be used by database. If not
passed, the database will construct an instance of
:class:`~google.cloud.spanner.pool.BurstyPool`.
"""
_spanner_api = None
def __init__(self, database_id, instance, ddl_statements=(), pool=None):
self.database_id = database_id
self._instance = instance
self._ddl_statements = _check_ddl_statements(ddl_statements)
if pool is None:
pool = BurstyPool()
self._pool = pool
pool.bind(self)
@classmethod
def from_pb(cls, database_pb, instance, pool=None):
"""Creates an instance of this class from a protobuf.
:type database_pb:
:class:`google.spanner.v2.spanner_instance_admin_pb2.Instance`
:param database_pb: A instance protobuf object.
:type instance: :class:`~google.cloud.spanner.instance.Instance`
:param instance: The instance that owns the database.
:type pool: concrete subclass of
:class:`~google.cloud.spanner.pool.AbstractSessionPool`.
:param pool: (Optional) session pool to be used by database.
:rtype: :class:`Database`
:returns: The database parsed from the protobuf response.
:raises ValueError:
if the instance name does not match the expected format
or if the parsed project ID does not match the project ID
on the instance's client, or if the parsed instance ID does
not match the instance's ID.
"""
match = _DATABASE_NAME_RE.match(database_pb.name)
if match is None:
raise ValueError('Database protobuf name was not in the '
'expected format.', database_pb.name)
if match.group('project') != instance._client.project:
raise ValueError('Project ID on database does not match the '
'project ID on the instance\'s client')
instance_id = match.group('instance_id')
if instance_id != instance.instance_id:
raise ValueError('Instance ID on database does not match the '
'Instance ID on the instance')
database_id = match.group('database_id')
return cls(database_id, instance, pool=pool)
@property
def name(self):
"""Database name used in requests.
.. note::
This property will not change if ``database_id`` does not, but the
return value is not cached.
The database name is of the form
``"projects/../instances/../databases/{database_id}"``
:rtype: str
:returns: The database name.
"""
return self._instance.name + '/databases/' + self.database_id
@property
def ddl_statements(self):
"""DDL Statements used to define database schema.
See
cloud.google.com/spanner/docs/data-definition-language
:rtype: sequence of string
:returns: the statements
"""
return self._ddl_statements
@property
def spanner_api(self):
"""Helper for session-related API calls."""
if self._spanner_api is None:
credentials = self._instance._client.credentials
if isinstance(credentials, google.auth.credentials.Scoped):
credentials = credentials.with_scopes((SPANNER_DATA_SCOPE,))
self._spanner_api = SpannerClient(
lib_name='gccl',
lib_version=__version__,
credentials=credentials,
)
return self._spanner_api
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.database_id == self.database_id and
other._instance == self._instance)
def __ne__(self, other):
return not self.__eq__(other)
def create(self):
"""Create this database within its instance
Inclues any configured schema assigned to :attr:`ddl_statements`.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase
:rtype: :class:`~google.cloud.future.operation.Operation`
:returns: a future used to poll the status of the create request
:raises Conflict: if the database already exists
:raises NotFound: if the instance owning the database does not exist
:raises GaxError:
for errors other than ``ALREADY_EXISTS`` returned from the call
"""
api = self._instance._client.database_admin_api
options = _options_with_prefix(self.name)
db_name = self.database_id
if '-' in db_name:
db_name = '`%s`' % (db_name,)
try:
future = api.create_database(
parent=self._instance.name,
create_statement='CREATE DATABASE %s' % (db_name,),
extra_statements=list(self._ddl_statements),
options=options,
)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.ALREADY_EXISTS:
raise Conflict(self.name)
elif exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound('Instance not found: {name}'.format(
name=self._instance.name,
))
raise
return future
def exists(self):
"""Test whether this database exists.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDDL
:rtype: bool
:returns: True if the database exists, else false.
:raises GaxError:
for errors other than ``NOT_FOUND`` returned from the call
"""
api = self._instance._client.database_admin_api
options = _options_with_prefix(self.name)
try:
api.get_database_ddl(self.name, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
return False
raise
return True
def reload(self):
"""Reload this database.
Refresh any configured schema into :attr:`ddl_statements`.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDDL
:raises NotFound: if the database does not exist
:raises GaxError:
for errors other than ``NOT_FOUND`` returned from the call
"""
api = self._instance._client.database_admin_api
options = _options_with_prefix(self.name)
try:
response = api.get_database_ddl(self.name, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(self.name)
raise
self._ddl_statements = tuple(response.statements)
def update_ddl(self, ddl_statements):
"""Update DDL for this database.
Apply any configured schema from :attr:`ddl_statements`.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase
:rtype: :class:`google.cloud.future.operation.Operation`
:returns: an operation instance
:raises NotFound: if the database does not exist
:raises GaxError:
for errors other than ``NOT_FOUND`` returned from the call
"""
client = self._instance._client
api = client.database_admin_api
options = _options_with_prefix(self.name)
try:
future = api.update_database_ddl(
self.name, ddl_statements, '', options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(self.name)
raise
return future
def drop(self):
"""Drop this database.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase
"""
api = self._instance._client.database_admin_api
options = _options_with_prefix(self.name)
try:
api.drop_database(self.name, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(self.name)
raise
def session(self):
"""Factory to create a session for this database.
:rtype: :class:`~google.cloud.spanner.session.Session`
:returns: a session bound to this database.
"""
return Session(self)
def read(self, table, columns, keyset, index='', limit=0,
resume_token=b''):
"""Perform a ``StreamingRead`` API request for rows in a table.
:type table: str
:param table: name of the table from which to fetch data
:type columns: list of str
:param columns: names of columns to be retrieved
:type keyset: :class:`~google.cloud.spanner.keyset.KeySet`
:param keyset: keys / ranges identifying rows to be retrieved
:type index: str
:param index: (Optional) name of index to use, rather than the
table's primary key
:type limit: int
:param limit: (Optional) maxiumn number of rows to return
:type resume_token: bytes
:param resume_token: token for resuming previously-interrupted read
:rtype: :class:`~google.cloud.spanner.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
"""
with SessionCheckout(self._pool) as session:
return session.read(
table, columns, keyset, index, limit, resume_token)
def execute_sql(self, sql, params=None, param_types=None, query_mode=None,
resume_token=b''):
"""Perform an ``ExecuteStreamingSql`` API request.
:type sql: str
:param sql: SQL query statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``sql``.
:type param_types:
dict, {str -> :class:`google.spanner.v1.type_pb2.TypeCode`}
:param param_types: (Optional) explicit types for one or more param
values; overrides default type detection on the
back-end.
:type query_mode:
:class:`google.spanner.v1.spanner_pb2.ExecuteSqlRequest.QueryMode`
:param query_mode: Mode governing return of results / query plan. See
https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1
:type resume_token: bytes
:param resume_token: token for resuming previously-interrupted query
:rtype: :class:`~google.cloud.spanner.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
"""
with SessionCheckout(self._pool) as session:
return session.execute_sql(
sql, params, param_types, query_mode, resume_token)
def run_in_transaction(self, func, *args, **kw):
"""Perform a unit of work in a transaction, retrying on abort.
:type func: callable
:param func: takes a required positional argument, the transaction,
and additional positional / keyword arguments as supplied
by the caller.
:type args: tuple
:param args: additional positional arguments to be passed to ``func``.
:type kw: dict
:param kw: optional keyword arguments to be passed to ``func``.
If passed, "timeout_secs" will be removed and used to
override the default timeout.
:rtype: :class:`datetime.datetime`
:returns: timestamp of committed transaction
"""
with SessionCheckout(self._pool) as session:
return session.run_in_transaction(func, *args, **kw)
def batch(self):
"""Return an object which wraps a batch.
The wrapper *must* be used as a context manager, with the batch
as the value returned by the wrapper.
:rtype: :class:`~google.cloud.spanner.database.BatchCheckout`
:returns: new wrapper
"""
return BatchCheckout(self)
def snapshot(self, **kw):
"""Return an object which wraps a snapshot.
The wrapper *must* be used as a context manager, with the snapshot
as the value returned by the wrapper.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.TransactionOptions.ReadOnly
:type kw: dict
:param kw:
Passed through to
:class:`~google.cloud.spanner.snapshot.Snapshot` constructor.
:rtype: :class:`~google.cloud.spanner.database.SnapshotCheckout`
:returns: new wrapper
"""
return SnapshotCheckout(self, **kw)
class BatchCheckout(object):
"""Context manager for using a batch from a database.
Inside the context manager, checks out a session from the database,
creates a batch from it, making the batch available.
Caller must *not* use the batch to perform API requests outside the scope
of the context manager.
:type database: :class:`~google.cloud.spannder.database.Database`
:param database: database to use
"""
def __init__(self, database):
self._database = database
self._session = self._batch = None
def __enter__(self):
"""Begin ``with`` block."""
session = self._session = self._database._pool.get()
batch = self._batch = Batch(session)
return batch
def __exit__(self, exc_type, exc_val, exc_tb):
"""End ``with`` block."""
try:
if exc_type is None:
self._batch.commit()
finally:
self._database._pool.put(self._session)
class SnapshotCheckout(object):
"""Context manager for using a snapshot from a database.
Inside the context manager, checks out a session from the database,
creates a snapshot from it, making the snapshot available.
Caller must *not* use the snapshot to perform API requests outside the
scope of the context manager.
:type database: :class:`~google.cloud.spannder.database.Database`
:param database: database to use
:type kw: dict
:param kw:
Passed through to
:class:`~google.cloud.spanner.snapshot.Snapshot` constructor.
"""
def __init__(self, database, **kw):
self._database = database
self._session = None
self._kw = kw
def __enter__(self):
"""Begin ``with`` block."""
session = self._session = self._database._pool.get()
return Snapshot(session, **self._kw)
def __exit__(self, exc_type, exc_val, exc_tb):
"""End ``with`` block."""
self._database._pool.put(self._session)
def _check_ddl_statements(value):
"""Validate DDL Statements used to define database schema.
See
https://cloud.google.com/spanner/docs/data-definition-language
:type value: list of string
:param value: DDL statements, excluding the 'CREATE DATABSE' statement
:rtype: tuple
:returns: tuple of validated DDL statement strings.
:raises ValueError:
if elements in ``value`` are not strings, or if ``value`` contains
a ``CREATE DATABASE`` statement.
"""
if not all(isinstance(line, six.string_types) for line in value):
raise ValueError("Pass a list of strings")
if any('create database' in line.lower() for line in value):
raise ValueError("Do not pass a 'CREATE DATABASE' statement")
return tuple(value)
|
|
##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import subprocess
import shutil
import unittest
import functools
import imath
import IECore
import Gaffer
import GafferTest
import GafferOSL
import GafferOSLTest
class OSLCodeTest( GafferOSLTest.OSLTestCase ) :
def testPlugTypes( self ) :
oslCode = GafferOSL.OSLCode()
code = ""
for i, plugType in enumerate( [
Gaffer.IntPlug,
Gaffer.FloatPlug,
functools.partial( Gaffer.V3fPlug, interpretation = IECore.GeometricData.Interpretation.Vector ),
Gaffer.Color3fPlug,
Gaffer.M44fPlug,
Gaffer.StringPlug,
GafferOSL.ClosurePlug,
] ) :
inName = "in%d" % i
outName = "out%d" % i
oslCode["parameters"][inName] = plugType( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslCode["out"][outName] = plugType( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
code += "%s = %s;\n" % ( outName, inName )
oslCode["code"].setValue( code )
# The OSLCode node will have generated a shader from
# the code and parameters we gave it. Load this onto
# a regular OSLShader node to check it.
oslShader = GafferOSL.OSLShader()
oslShader.loadShader( self.__osoFileName( oslCode ) )
self.assertEqual( oslShader["parameters"].keys(), oslCode["parameters"].keys() )
self.assertEqual( oslShader["out"].keys(), oslCode["out"].keys() )
for p in oslShader["parameters"].children() :
p.setFlags( Gaffer.Plug.Flags.Dynamic, True )
self.assertEqual( repr( p ), repr( oslCode["parameters"][p.getName()] ) )
for p in oslShader["out"].children() :
p.setFlags( Gaffer.Plug.Flags.Dynamic, True )
self.assertEqual( repr( p ), repr( oslCode["out"][p.getName()] ) )
def testParseError( self ) :
n = GafferOSL.OSLCode()
self.__assertError( n, n["code"].setValue, "oops" )
def testParseErrorDoesntDestroyExistingPlugs( self ) :
n = GafferOSL.OSLCode()
n["parameters"]["in"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["out"]["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
originalPlugs = n["parameters"].children() + n["out"].children()
self.__assertError( n, n["code"].setValue, "oops" )
self.assertEqual( n["parameters"].children() + n["out"].children(), originalPlugs )
def testChildAddedSignalNotSuppressedByError( self ) :
n = GafferOSL.OSLCode()
self.__assertError( n, n["code"].setValue, "oops" )
cs = GafferTest.CapturingSlot( n["parameters"].childAddedSignal() )
n["parameters"]["in"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( len( cs ), 1 )
def testEmpty( self ) :
# We want empty shaders to still output a
# shader so that the ShaderView picks it
# up, ready to update when an output is
# added.
n = GafferOSL.OSLCode()
self.assertTrue( self.__osoFileName( n ) )
self.assertEqual( n["type"].getValue(), "osl:shader" )
n["code"].setValue( "//" )
self.assertTrue( self.__osoFileName( n ) )
self.assertEqual( n["type"].getValue(), "osl:shader" )
n["code"].setValue( "" )
self.assertTrue( self.__osoFileName( n ) )
self.assertEqual( n["type"].getValue(), "osl:shader" )
def testMissingSemiColon( self ) :
n1 = GafferOSL.OSLCode()
n1["parameters"]["in"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n1["out"]["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n2 = GafferOSL.OSLCode()
n2["parameters"]["in"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n2["out"]["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
# The OSLCode node will often be used to throw in a one-liner,
# and omitting a semicolon is an easy mistake that we should
# correct automatically.
n1["code"].setValue( "out = in * 2" )
n2["code"].setValue( "out = in * 2;" )
self.assertEqual( self.__osoFileName( n1 ), self.__osoFileName( n2 ) )
def testAddingAndRemovingPlugsUpdatesShader( self ) :
oslCode = GafferOSL.OSLCode()
oslCode["parameters"]["in"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslCode["out"]["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslShader = GafferOSL.OSLShader()
oslShader.loadShader( self.__osoFileName( oslCode ) )
self.assertTrue( "in" in oslShader["parameters"] )
self.assertTrue( "out" in oslShader["out"] )
def testObjectProcessingFunctions( self ) :
oslCode = GafferOSL.OSLCode()
oslCode["out"]["out"] = Gaffer.FloatPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.__assertNoError( oslCode, oslCode["code"].setValue, 'out = inFloat( "s", 0 );' )
def testImageProcessingFunctions( self ) :
oslCode = GafferOSL.OSLCode()
oslCode["out"]["out"] = Gaffer.FloatPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.__assertNoError( oslCode, oslCode["code"].setValue, 'out = inChannel( "R", 0 );' )
def testColorSpline( self ) :
oslCode = GafferOSL.OSLCode()
oslCode["parameters"]["sp"] = Gaffer.SplinefColor3fPlug(
defaultValue = IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
(
( 0, imath.Color3f( 0 ) ),
( 0, imath.Color3f( 0 ) ),
( 1, imath.Color3f( 1 ) ),
( 1, imath.Color3f( 1 ) ),
)
),
flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic
)
oslCode["out"]["o"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslCode["code"].setValue( "o = colorSpline( spPositions, spValues, spBasis, u );" )
# Load the generated shader onto an OSLShader
# node to verify it.
oslShader = GafferOSL.OSLShader()
oslShader.loadShader( self.__osoFileName( oslCode ) )
oslShader["parameters"]["sp"].setFlags( Gaffer.Plug.Flags.Dynamic, True )
self.assertEqual( repr( oslShader["parameters"]["sp"] ), repr( oslCode["parameters"]["sp"] ) )
def testShaderNameMatchesFileName( self ) :
oslCode = GafferOSL.OSLCode()
oslCode["out"]["o"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslCode["code"].setValue( "o = color( 0, 1, 0 );" )
info = subprocess.check_output( [ "oslinfo", self.__osoFileName( oslCode ) ], universal_newlines = True )
self.assertTrue(
info.startswith( "shader \"{0}\"".format( os.path.basename( self.__osoFileName( oslCode ) ) ) )
)
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["o"] = GafferOSL.OSLCode()
s["o"]["parameters"]["i"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["o"]["out"]["o"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["o"]["code"].setValue( "o = i * color( u, v, 0 );")
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( self.__osoFileName( s2["o"] ), self.__osoFileName( s["o"] ) )
def testUndo( self ) :
s = Gaffer.ScriptNode()
s["o"] = GafferOSL.OSLCode()
f1 = self.__osoFileName( s["o"] )
with Gaffer.UndoScope( s ) :
s["o"]["parameters"]["i"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["o"]["out"]["o"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
f2 = self.__osoFileName( s["o"] )
with Gaffer.UndoScope( s ) :
s["o"]["code"].setValue( "o = i * color( u, v, 0 );")
f3 = self.__osoFileName( s["o"] )
s.undo()
self.assertEqual( self.__osoFileName( s["o"] ), f2 )
s.undo()
self.assertEqual( self.__osoFileName( s["o"] ), f1 )
s.redo()
self.assertEqual( self.__osoFileName( s["o"] ), f2 )
s.redo()
self.assertEqual( self.__osoFileName( s["o"] ), f3 )
def testSource( self ) :
# Make a shader using the OSLCode node.
oslCode = GafferOSL.OSLCode()
oslCode["parameters"]["i"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslCode["out"]["o"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslCode["code"].setValue( "o = i * color( u, v, 0 );")
# Export it to a .osl file and compile it.
oslFileName = os.path.join( self.temporaryDirectory(), "test.osl" )
with open( oslFileName, "w" ) as f :
f.write( oslCode.source( "test") )
shader = self.compileShader( oslFileName )
# Load that onto an OSLShader and check that
# it matches.
oslShader = GafferOSL.OSLShader()
oslShader.loadShader( shader )
self.assertEqual( oslShader["parameters"].keys(), oslCode["parameters"].keys() )
self.assertEqual( oslShader["out"].keys(), oslCode["out"].keys() )
for p in oslShader["parameters"].children() :
p.setFlags( Gaffer.Plug.Flags.Dynamic, True )
self.assertEqual( repr( p ), repr( oslCode["parameters"][p.getName()] ) )
for p in oslShader["out"].children() :
p.setFlags( Gaffer.Plug.Flags.Dynamic, True )
self.assertEqual( repr( p ), repr( oslCode["out"][p.getName()] ) )
def testSourceUsesRequestedName( self ) :
oslCode = GafferOSL.OSLCode()
source = oslCode.source( "test" )
self.assertTrue( "shader test" in source )
def testParameterRenaming( self ) :
oslCode = GafferOSL.OSLCode()
oslCode["parameters"]["i"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslCode["out"]["o"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.__assertError( oslCode, oslCode["code"].setValue, "o = in" )
cs = GafferTest.CapturingSlot( oslCode.plugDirtiedSignal() )
self.__assertNoError( oslCode, oslCode["parameters"]["i"].setName, "in" )
self.assertTrue( oslCode["out"] in [ x[0] for x in cs ] )
self.__assertError( oslCode, oslCode["parameters"]["in"].setName, "i" )
def testMoveCodeDirectory( self ) :
oslCodeDir = os.environ.get( "GAFFEROSL_CODE_DIRECTORY" )
if oslCodeDir :
self.addCleanup( os.environ.__setitem__, "GAFFEROSL_CODE_DIRECTORY", oslCodeDir )
else :
self.addCleanup( os.environ.__delitem__, "GAFFEROSL_CODE_DIRECTORY" )
# Make an OSL shader in a specific code directory.
os.environ["GAFFEROSL_CODE_DIRECTORY"] = os.path.join( self.temporaryDirectory(), "codeDirectoryA" )
s = Gaffer.ScriptNode()
s["o"] = GafferOSL.OSLCode()
s["o"]["parameters"]["i"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["o"]["out"]["o"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["o"]["code"].setValue( "o = i * color( u, v, 0 );")
self.assertTrue( self.__osoFileName( s["o"] ).startswith( os.environ["GAFFEROSL_CODE_DIRECTORY"] ) )
# Now simulate the loading of that script in a different environment,
# with a different code directory.
ss = s.serialise()
shutil.rmtree( os.environ["GAFFEROSL_CODE_DIRECTORY"] )
os.environ["GAFFEROSL_CODE_DIRECTORY"] = os.path.join( self.temporaryDirectory(), "codeDirectoryB" )
s2 = Gaffer.ScriptNode()
s2.execute( ss )
self.assertTrue( self.__osoFileName( s2["o"] ).startswith( os.environ["GAFFEROSL_CODE_DIRECTORY"] ) )
def __osoFileName( self, oslCode ) :
# Right now we could get this information by
# getting the value directly from the "name" plug
# on the OSLCode node, but we're getting it from
# the computed shader instead, in the hope that
# one day we can refactor things so that it's the
# generation of the shader network that also generates
# the file on disk. It might be that the
# `GafferScene::Shader` base class shouldn't even
# mandate the existence of "name" and "type" plugs.
return oslCode.attributes()["osl:shader"].outputShader().name
def __assertError( self, oslCode, fn, *args, **kw ) :
cs = GafferTest.CapturingSlot( oslCode.errorSignal() )
fn( *args, **kw )
self.__osoFileName( oslCode )
self.assertEqual( len( cs ), 1 )
def __assertNoError( self, oslCode, fn, *args, **kw ) :
cs = GafferTest.CapturingSlot( oslCode.errorSignal() )
fn( *args, **kw )
self.__osoFileName( oslCode )
self.assertEqual( len( cs ), 0 )
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listtransactions API."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.enable_mocktime()
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
#Viacoin: Disabled RBF
#self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1)
assert_equal(utxo_to_use["safe"], True)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
assert_equal(utxo_to_use["safe"], False)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
|
|
"""
These test the method maybe_promote from core/dtypes/cast.py
"""
import datetime
import numpy as np
import pytest
from pandas._libs.tslibs import NaT
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
is_complex_dtype,
is_datetime64_dtype,
is_datetime_or_timedelta_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
is_scalar,
is_timedelta64_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
import pandas as pd
@pytest.fixture(
params=[
bool,
"uint8",
"int32",
"uint64",
"float32",
"float64",
"complex64",
"complex128",
"M8[ns]",
"m8[ns]",
str,
bytes,
object,
]
)
def any_numpy_dtype_reduced(request):
"""
Parameterized fixture for numpy dtypes, reduced from any_numpy_dtype.
* bool
* 'int32'
* 'uint64'
* 'float32'
* 'float64'
* 'complex64'
* 'complex128'
* 'M8[ns]'
* 'M8[ns]'
* str
* bytes
* object
"""
return request.param
def _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar=None):
"""
Auxiliary function to unify testing of scalar/array promotion.
Parameters
----------
dtype : dtype
The value to pass on as the first argument to maybe_promote.
fill_value : scalar
The value to pass on as the second argument to maybe_promote as
a scalar.
expected_dtype : dtype
The expected dtype returned by maybe_promote (by design this is the
same regardless of whether fill_value was passed as a scalar or in an
array!).
exp_val_for_scalar : scalar
The expected value for the (potentially upcast) fill_value returned by
maybe_promote.
"""
assert is_scalar(fill_value)
# here, we pass on fill_value as a scalar directly; the expected value
# returned from maybe_promote is fill_value, potentially upcast to the
# returned dtype.
result_dtype, result_fill_value = maybe_promote(dtype, fill_value)
expected_fill_value = exp_val_for_scalar
assert result_dtype == expected_dtype
_assert_match(result_fill_value, expected_fill_value)
def _assert_match(result_fill_value, expected_fill_value):
# GH#23982/25425 require the same type in addition to equality/NA-ness
res_type = type(result_fill_value)
ex_type = type(expected_fill_value)
if hasattr(result_fill_value, "dtype"):
# Compare types in a way that is robust to platform-specific
# idiosyncracies where e.g. sometimes we get "ulonglong" as an alias
# for "uint64" or "intc" as an alias for "int32"
assert result_fill_value.dtype.kind == expected_fill_value.dtype.kind
assert result_fill_value.dtype.itemsize == expected_fill_value.dtype.itemsize
else:
# On some builds, type comparison fails, e.g. np.int32 != np.int32
assert res_type == ex_type or res_type.__name__ == ex_type.__name__
match_value = result_fill_value == expected_fill_value
# Note: type check above ensures that we have the _same_ NA value
# for missing values, None == None (which is checked
# through match_value above), but np.nan != np.nan and pd.NaT != pd.NaT
match_missing = isna(result_fill_value) and isna(expected_fill_value)
assert match_value or match_missing
@pytest.mark.parametrize(
"dtype, fill_value, expected_dtype",
[
# size 8
("int8", 1, "int8"),
("int8", np.iinfo("int8").max + 1, "int16"),
("int8", np.iinfo("int16").max + 1, "int32"),
("int8", np.iinfo("int32").max + 1, "int64"),
("int8", np.iinfo("int64").max + 1, "object"),
("int8", -1, "int8"),
("int8", np.iinfo("int8").min - 1, "int16"),
("int8", np.iinfo("int16").min - 1, "int32"),
("int8", np.iinfo("int32").min - 1, "int64"),
("int8", np.iinfo("int64").min - 1, "object"),
# keep signed-ness as long as possible
("uint8", 1, "uint8"),
("uint8", np.iinfo("int8").max + 1, "uint8"),
("uint8", np.iinfo("uint8").max + 1, "uint16"),
("uint8", np.iinfo("int16").max + 1, "uint16"),
("uint8", np.iinfo("uint16").max + 1, "uint32"),
("uint8", np.iinfo("int32").max + 1, "uint32"),
("uint8", np.iinfo("uint32").max + 1, "uint64"),
("uint8", np.iinfo("int64").max + 1, "uint64"),
("uint8", np.iinfo("uint64").max + 1, "object"),
# max of uint8 cannot be contained in int8
("uint8", -1, "int16"),
("uint8", np.iinfo("int8").min - 1, "int16"),
("uint8", np.iinfo("int16").min - 1, "int32"),
("uint8", np.iinfo("int32").min - 1, "int64"),
("uint8", np.iinfo("int64").min - 1, "object"),
# size 16
("int16", 1, "int16"),
("int16", np.iinfo("int8").max + 1, "int16"),
("int16", np.iinfo("int16").max + 1, "int32"),
("int16", np.iinfo("int32").max + 1, "int64"),
("int16", np.iinfo("int64").max + 1, "object"),
("int16", -1, "int16"),
("int16", np.iinfo("int8").min - 1, "int16"),
("int16", np.iinfo("int16").min - 1, "int32"),
("int16", np.iinfo("int32").min - 1, "int64"),
("int16", np.iinfo("int64").min - 1, "object"),
("uint16", 1, "uint16"),
("uint16", np.iinfo("int8").max + 1, "uint16"),
("uint16", np.iinfo("uint8").max + 1, "uint16"),
("uint16", np.iinfo("int16").max + 1, "uint16"),
("uint16", np.iinfo("uint16").max + 1, "uint32"),
("uint16", np.iinfo("int32").max + 1, "uint32"),
("uint16", np.iinfo("uint32").max + 1, "uint64"),
("uint16", np.iinfo("int64").max + 1, "uint64"),
("uint16", np.iinfo("uint64").max + 1, "object"),
("uint16", -1, "int32"),
("uint16", np.iinfo("int8").min - 1, "int32"),
("uint16", np.iinfo("int16").min - 1, "int32"),
("uint16", np.iinfo("int32").min - 1, "int64"),
("uint16", np.iinfo("int64").min - 1, "object"),
# size 32
("int32", 1, "int32"),
("int32", np.iinfo("int8").max + 1, "int32"),
("int32", np.iinfo("int16").max + 1, "int32"),
("int32", np.iinfo("int32").max + 1, "int64"),
("int32", np.iinfo("int64").max + 1, "object"),
("int32", -1, "int32"),
("int32", np.iinfo("int8").min - 1, "int32"),
("int32", np.iinfo("int16").min - 1, "int32"),
("int32", np.iinfo("int32").min - 1, "int64"),
("int32", np.iinfo("int64").min - 1, "object"),
("uint32", 1, "uint32"),
("uint32", np.iinfo("int8").max + 1, "uint32"),
("uint32", np.iinfo("uint8").max + 1, "uint32"),
("uint32", np.iinfo("int16").max + 1, "uint32"),
("uint32", np.iinfo("uint16").max + 1, "uint32"),
("uint32", np.iinfo("int32").max + 1, "uint32"),
("uint32", np.iinfo("uint32").max + 1, "uint64"),
("uint32", np.iinfo("int64").max + 1, "uint64"),
("uint32", np.iinfo("uint64").max + 1, "object"),
("uint32", -1, "int64"),
("uint32", np.iinfo("int8").min - 1, "int64"),
("uint32", np.iinfo("int16").min - 1, "int64"),
("uint32", np.iinfo("int32").min - 1, "int64"),
("uint32", np.iinfo("int64").min - 1, "object"),
# size 64
("int64", 1, "int64"),
("int64", np.iinfo("int8").max + 1, "int64"),
("int64", np.iinfo("int16").max + 1, "int64"),
("int64", np.iinfo("int32").max + 1, "int64"),
("int64", np.iinfo("int64").max + 1, "object"),
("int64", -1, "int64"),
("int64", np.iinfo("int8").min - 1, "int64"),
("int64", np.iinfo("int16").min - 1, "int64"),
("int64", np.iinfo("int32").min - 1, "int64"),
("int64", np.iinfo("int64").min - 1, "object"),
("uint64", 1, "uint64"),
("uint64", np.iinfo("int8").max + 1, "uint64"),
("uint64", np.iinfo("uint8").max + 1, "uint64"),
("uint64", np.iinfo("int16").max + 1, "uint64"),
("uint64", np.iinfo("uint16").max + 1, "uint64"),
("uint64", np.iinfo("int32").max + 1, "uint64"),
("uint64", np.iinfo("uint32").max + 1, "uint64"),
("uint64", np.iinfo("int64").max + 1, "uint64"),
("uint64", np.iinfo("uint64").max + 1, "object"),
("uint64", -1, "object"),
("uint64", np.iinfo("int8").min - 1, "object"),
("uint64", np.iinfo("int16").min - 1, "object"),
("uint64", np.iinfo("int32").min - 1, "object"),
("uint64", np.iinfo("int64").min - 1, "object"),
],
)
def test_maybe_promote_int_with_int(dtype, fill_value, expected_dtype):
dtype = np.dtype(dtype)
expected_dtype = np.dtype(expected_dtype)
# output is not a generic int, but corresponds to expected_dtype
exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_int_with_float(any_int_dtype, float_dtype):
dtype = np.dtype(any_int_dtype)
fill_dtype = np.dtype(float_dtype)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling int with float always upcasts to float64
expected_dtype = np.float64
# fill_value can be different float type
exp_val_for_scalar = np.float64(fill_value)
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_float_with_int(float_dtype, any_int_dtype):
dtype = np.dtype(float_dtype)
fill_dtype = np.dtype(any_int_dtype)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling float with int always keeps float dtype
# because: np.finfo('float32').max > np.iinfo('uint64').max
expected_dtype = dtype
# output is not a generic float, but corresponds to expected_dtype
exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
@pytest.mark.parametrize(
"dtype, fill_value, expected_dtype",
[
# float filled with float
("float32", 1, "float32"),
("float32", np.finfo("float32").max * 1.1, "float64"),
("float64", 1, "float64"),
("float64", np.finfo("float32").max * 1.1, "float64"),
# complex filled with float
("complex64", 1, "complex64"),
("complex64", np.finfo("float32").max * 1.1, "complex128"),
("complex128", 1, "complex128"),
("complex128", np.finfo("float32").max * 1.1, "complex128"),
# float filled with complex
("float32", 1 + 1j, "complex64"),
("float32", np.finfo("float32").max * (1.1 + 1j), "complex128"),
("float64", 1 + 1j, "complex128"),
("float64", np.finfo("float32").max * (1.1 + 1j), "complex128"),
# complex filled with complex
("complex64", 1 + 1j, "complex64"),
("complex64", np.finfo("float32").max * (1.1 + 1j), "complex128"),
("complex128", 1 + 1j, "complex128"),
("complex128", np.finfo("float32").max * (1.1 + 1j), "complex128"),
],
)
def test_maybe_promote_float_with_float(dtype, fill_value, expected_dtype):
dtype = np.dtype(dtype)
expected_dtype = np.dtype(expected_dtype)
# output is not a generic float, but corresponds to expected_dtype
exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_bool_with_any(any_numpy_dtype_reduced):
dtype = np.dtype(bool)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling bool with anything but bool casts to object
expected_dtype = np.dtype(object) if fill_dtype != bool else fill_dtype
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_any_with_bool(any_numpy_dtype_reduced):
dtype = np.dtype(any_numpy_dtype_reduced)
fill_value = True
# filling anything but bool with bool casts to object
expected_dtype = np.dtype(object) if dtype != bool else dtype
# output is not a generic bool, but corresponds to expected_dtype
exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_bytes_with_any(bytes_dtype, any_numpy_dtype_reduced):
dtype = np.dtype(bytes_dtype)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# we never use bytes dtype internally, always promote to object
expected_dtype = np.dtype(np.object_)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_any_with_bytes(any_numpy_dtype_reduced, bytes_dtype):
dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype
fill_value = b"abc"
# we never use bytes dtype internally, always promote to object
expected_dtype = np.dtype(np.object_)
# output is not a generic bytes, but corresponds to expected_dtype
exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_datetime64_with_any(datetime64_dtype, any_numpy_dtype_reduced):
dtype = np.dtype(datetime64_dtype)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling datetime with anything but datetime casts to object
if is_datetime64_dtype(fill_dtype):
expected_dtype = dtype
# for datetime dtypes, scalar values get cast to to_datetime64
exp_val_for_scalar = pd.Timestamp(fill_value).to_datetime64()
else:
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
@pytest.mark.parametrize(
"fill_value",
[
pd.Timestamp("now"),
np.datetime64("now"),
datetime.datetime.now(),
datetime.date.today(),
],
ids=["pd.Timestamp", "np.datetime64", "datetime.datetime", "datetime.date"],
)
def test_maybe_promote_any_with_datetime64(
any_numpy_dtype_reduced, datetime64_dtype, fill_value
):
dtype = np.dtype(any_numpy_dtype_reduced)
# filling datetime with anything but datetime casts to object
if is_datetime64_dtype(dtype):
expected_dtype = dtype
# for datetime dtypes, scalar values get cast to pd.Timestamp.value
exp_val_for_scalar = pd.Timestamp(fill_value).to_datetime64()
else:
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_datetimetz_with_any_numpy_dtype(
tz_aware_fixture, any_numpy_dtype_reduced
):
dtype = DatetimeTZDtype(tz=tz_aware_fixture)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling datetimetz with any numpy dtype casts to object
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_datetimetz_with_datetimetz(tz_aware_fixture, tz_aware_fixture2):
dtype = DatetimeTZDtype(tz=tz_aware_fixture)
fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture2)
# create array of given dtype; casts "1" to correct dtype
fill_value = pd.Series([10 ** 9], dtype=fill_dtype)[0]
# filling datetimetz with datetimetz casts to object, unless tz matches
exp_val_for_scalar = fill_value
if dtype.tz == fill_dtype.tz:
expected_dtype = dtype
else:
expected_dtype = np.dtype(object)
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
@pytest.mark.parametrize("fill_value", [None, np.nan, NaT])
def test_maybe_promote_datetimetz_with_na(tz_aware_fixture, fill_value):
dtype = DatetimeTZDtype(tz=tz_aware_fixture)
expected_dtype = dtype
exp_val_for_scalar = NaT
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
@pytest.mark.parametrize(
"fill_value",
[
pd.Timestamp("now"),
np.datetime64("now"),
datetime.datetime.now(),
datetime.date.today(),
],
ids=["pd.Timestamp", "np.datetime64", "datetime.datetime", "datetime.date"],
)
def test_maybe_promote_any_numpy_dtype_with_datetimetz(
any_numpy_dtype_reduced, tz_aware_fixture, fill_value
):
dtype = np.dtype(any_numpy_dtype_reduced)
fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture)
fill_value = pd.Series([fill_value], dtype=fill_dtype)[0]
# filling any numpy dtype with datetimetz casts to object
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_timedelta64_with_any(timedelta64_dtype, any_numpy_dtype_reduced):
dtype = np.dtype(timedelta64_dtype)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling timedelta with anything but timedelta casts to object
if is_timedelta64_dtype(fill_dtype):
expected_dtype = dtype
# for timedelta dtypes, scalar values get cast to pd.Timedelta.value
exp_val_for_scalar = pd.Timedelta(fill_value).to_timedelta64()
else:
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
@pytest.mark.parametrize(
"fill_value",
[pd.Timedelta(days=1), np.timedelta64(24, "h"), datetime.timedelta(1)],
ids=["pd.Timedelta", "np.timedelta64", "datetime.timedelta"],
)
def test_maybe_promote_any_with_timedelta64(
any_numpy_dtype_reduced, timedelta64_dtype, fill_value
):
dtype = np.dtype(any_numpy_dtype_reduced)
# filling anything but timedelta with timedelta casts to object
if is_timedelta64_dtype(dtype):
expected_dtype = dtype
# for timedelta dtypes, scalar values get cast to pd.Timedelta.value
exp_val_for_scalar = pd.Timedelta(fill_value).to_timedelta64()
else:
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_string_with_any(string_dtype, any_numpy_dtype_reduced):
dtype = np.dtype(string_dtype)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling string with anything casts to object
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_any_with_string(any_numpy_dtype_reduced, string_dtype):
dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype
fill_value = "abc"
# filling anything with a string casts to object
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_object_with_any(object_dtype, any_numpy_dtype_reduced):
dtype = np.dtype(object_dtype)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
# filling object with anything stays object
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_any_with_object(any_numpy_dtype_reduced, object_dtype):
dtype = np.dtype(any_numpy_dtype_reduced)
# create array of object dtype from a scalar value (i.e. passing
# dtypes.common.is_scalar), which can however not be cast to int/float etc.
fill_value = pd.DateOffset(1)
# filling object with anything stays object
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
@pytest.mark.parametrize("fill_value", [None, np.nan, NaT])
def test_maybe_promote_any_numpy_dtype_with_na(any_numpy_dtype_reduced, fill_value):
dtype = np.dtype(any_numpy_dtype_reduced)
if is_integer_dtype(dtype) and fill_value is not NaT:
# integer + other missing value (np.nan / None) casts to float
expected_dtype = np.float64
exp_val_for_scalar = np.nan
elif is_object_dtype(dtype) and fill_value is NaT:
# inserting into object does not cast the value
# but *does* cast None to np.nan
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
elif is_datetime_or_timedelta_dtype(dtype):
# datetime / timedelta cast all missing values to dtyped-NaT
expected_dtype = dtype
exp_val_for_scalar = dtype.type("NaT", "ns")
elif fill_value is NaT:
# NaT upcasts everything that's not datetime/timedelta to object
expected_dtype = np.dtype(object)
exp_val_for_scalar = NaT
elif is_float_dtype(dtype) or is_complex_dtype(dtype):
# float / complex + missing value (!= NaT) stays the same
expected_dtype = dtype
exp_val_for_scalar = np.nan
else:
# all other cases cast to object, and use np.nan as missing value
expected_dtype = np.dtype(object)
exp_val_for_scalar = np.nan
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
@pytest.mark.parametrize("dim", [0, 2, 3])
def test_maybe_promote_dimensions(any_numpy_dtype_reduced, dim):
dtype = np.dtype(any_numpy_dtype_reduced)
# create 0-dim array of given dtype; casts "1" to correct dtype
fill_array = np.array(1, dtype=dtype)
# expand to desired dimension:
for _ in range(dim):
fill_array = np.expand_dims(fill_array, 0)
if dtype != object:
# test against 1-dimensional case
with pytest.raises(ValueError, match="fill_value must be a scalar"):
maybe_promote(dtype, np.array([1], dtype=dtype))
with pytest.raises(ValueError, match="fill_value must be a scalar"):
maybe_promote(dtype, fill_array)
else:
expected_dtype, expected_missing_value = maybe_promote(
dtype, np.array([1], dtype=dtype)
)
result_dtype, result_missing_value = maybe_promote(dtype, fill_array)
assert result_dtype == expected_dtype
_assert_match(result_missing_value, expected_missing_value)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl.testing import parameterized
import numpy
import six
from tensorflow.contrib.estimator.python.estimator import extenders
from tensorflow.contrib.timeseries.examples import lstm as lstm_example
from tensorflow.contrib.timeseries.python.timeseries import ar_model
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import head as ts_head_lib
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import model
from tensorflow.contrib.timeseries.python.timeseries import state_management
from tensorflow.core.example import example_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import adam
from tensorflow.python.training import coordinator as coordinator_lib
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import training as train
class HeadTest(test.TestCase):
def test_labels_provided_error(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL,
estimator_lib.ModeKeys.PREDICT]:
with self.assertRaisesRegexp(ValueError, "received a `labels`"):
model_fn(features={}, labels={"a": "b"}, mode=mode)
with self.assertRaisesRegexp(ValueError, "received a `labels`"):
model_fn(features={}, labels=array_ops.zeros([]), mode=mode)
def test_unknown_mode(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError, "Unknown mode 'Not a mode'"):
model_fn(features={}, labels={}, mode="Not a mode")
class _TickerModel(object):
num_features = 1
dtype = dtypes.float32
def initialize_graph(self, input_statistics):
pass
def define_loss(self, features, mode):
del mode # unused
return model.ModelOutputs(
loss=features["ticker"],
end_state=(features["ticker"], features["ticker"]),
prediction_times=array_ops.zeros(()),
predictions={"ticker": features["ticker"]})
class EvaluationMetricsTests(test.TestCase):
def test_metrics_consistent(self):
# Tests that the identity metrics used to report in-sample predictions match
# the behavior of standard metrics.
g = ops.Graph()
with g.as_default():
features = {
feature_keys.TrainEvalFeatures.TIMES:
array_ops.zeros((1, 1)),
feature_keys.TrainEvalFeatures.VALUES:
array_ops.zeros((1, 1, 1)),
"ticker":
array_ops.reshape(
math_ops.cast(
variables.Variable(
name="ticker",
initial_value=0,
dtype=dtypes.int64,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
.count_up_to(10),
dtype=dtypes.float32), (1, 1, 1))
}
model_fn = ts_head_lib.TimeSeriesRegressionHead(
model=_TickerModel(),
state_manager=state_management.PassthroughStateManager(),
optimizer=train.GradientDescentOptimizer(0.001)).create_estimator_spec
outputs = model_fn(
features=features, labels=None, mode=estimator_lib.ModeKeys.EVAL)
metric_update_ops = [
metric[1] for metric in outputs.eval_metric_ops.values()]
loss_mean, loss_update = metrics.mean(outputs.loss)
metric_update_ops.append(loss_update)
with self.test_session() as sess:
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(sess, coord=coordinator)
variables.local_variables_initializer().run()
sess.run(metric_update_ops)
loss_evaled, metric_evaled, nested_metric_evaled = sess.run(
(loss_mean, outputs.eval_metric_ops["ticker"][0],
outputs.eval_metric_ops[feature_keys.FilteringResults.STATE_TUPLE][
0][0]))
# The custom model_utils metrics for in-sample predictions should be in
# sync with the Estimator's mean metric for model loss.
self.assertAllClose(0., loss_evaled)
self.assertAllClose((((0.,),),), metric_evaled)
self.assertAllClose((((0.,),),), nested_metric_evaled)
coordinator.request_stop()
coordinator.join()
def test_custom_metrics(self):
"""Tests that the custom metrics can be applied to the estimator."""
model_dir = self.get_temp_dir()
estimator = ts_estimators.TimeSeriesRegressor(
model=lstm_example._LSTMModel(num_features=1, num_units=4),
optimizer=adam.AdamOptimizer(0.001),
config=estimator_lib.RunConfig(tf_random_seed=4),
model_dir=model_dir)
def input_fn():
return {
feature_keys.TrainEvalFeatures.TIMES: [[1, 2, 3], [7, 8, 9]],
feature_keys.TrainEvalFeatures.VALUES:
numpy.array([[[0.], [1.], [0.]], [[2.], [3.], [2.]]])
}
def metrics_fn(predictions, features):
# checking that the inputs are properly passed.
predict = predictions["mean"]
target = features[feature_keys.TrainEvalFeatures.VALUES][:, -1, 0]
return {
"plain_boring_metric386":
(math_ops.reduce_mean(math_ops.abs(predict - target)),
control_flow_ops.no_op()),
"fun_metric101": (math_ops.reduce_sum(predict + target),
control_flow_ops.no_op()),
}
# Evaluation without training is enough for testing custom metrics.
estimator = extenders.add_metrics(estimator, metrics_fn)
evaluation = estimator.evaluate(input_fn, steps=1)
self.assertIn("plain_boring_metric386", evaluation)
self.assertIn("fun_metric101", evaluation)
# The values are deterministic because of fixed tf_random_seed.
# However if they become flaky, remove such exacts comparisons.
self.assertAllClose(evaluation["plain_boring_metric386"], 1.130380)
self.assertAllClose(evaluation["fun_metric101"], 10.435442)
class _StubModel(object):
num_features = 3
dtype = dtypes.float64
def initialize_graph(self, input_statistics):
del input_statistics # unused
def _stub_model_fn():
return ts_head_lib.TimeSeriesRegressionHead(
model=_StubModel(),
state_manager=state_management.PassthroughStateManager(),
optimizer=train.AdamOptimizer(0.001)).create_estimator_spec
class TrainEvalFeatureCheckingTests(test.TestCase):
def test_no_time_feature(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.TrainEvalFeatures.TIMES)):
model_fn(
features={feature_keys.TrainEvalFeatures.VALUES: [[[1.]]]},
labels=None,
mode=mode)
def test_no_value_feature(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.TrainEvalFeatures.VALUES)):
model_fn(
features={feature_keys.TrainEvalFeatures.TIMES: [[1]]},
labels=None,
mode=mode)
def test_bad_time_rank(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError,
"Expected shape.*for feature '{}'".format(
feature_keys.TrainEvalFeatures.TIMES)):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[[1]]],
feature_keys.TrainEvalFeatures.VALUES: [[[1.]]]
},
labels=None,
mode=mode)
def test_bad_value_rank(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError,
"Expected shape.*for feature '{}'".format(
feature_keys.TrainEvalFeatures.VALUES)):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[1]],
feature_keys.TrainEvalFeatures.VALUES: [[1.]]
},
labels=None,
mode=mode)
def test_bad_value_num_features(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(
ValueError, "Expected shape.*, 3.*for feature '{}'".format(
feature_keys.TrainEvalFeatures.VALUES)):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[1]],
feature_keys.TrainEvalFeatures.VALUES: [[[1.]]]
},
labels=None,
mode=mode)
def test_bad_exogenous_shape(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(
ValueError,
"Features must have shape.*for feature 'exogenous'"):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[1]],
feature_keys.TrainEvalFeatures.VALUES: [[[1., 2., 3.]]],
"exogenous": [[1], [2]]
},
labels=None,
mode=mode)
class PredictFeatureCheckingTests(test.TestCase):
def test_no_time_feature(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.PredictionFeatures.TIMES)):
model_fn(
features={
feature_keys.PredictionFeatures.STATE_TUPLE: ([[[1.]]], 1.)
},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def test_no_start_state_feature(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.PredictionFeatures.STATE_TUPLE)):
model_fn(
features={feature_keys.PredictionFeatures.TIMES: [[1]]},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def test_bad_time_rank(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError,
"Expected shape.*for feature '{}'".format(
feature_keys.PredictionFeatures.TIMES)):
model_fn(
features={
feature_keys.PredictionFeatures.TIMES: 1,
feature_keys.PredictionFeatures.STATE_TUPLE: (1, (2, 3.))
},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def test_bad_exogenous_shape(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(
ValueError,
"Features must have shape.*for feature 'exogenous'"):
model_fn(
features={
feature_keys.PredictionFeatures.TIMES: [[1]],
feature_keys.PredictionFeatures.STATE_TUPLE: (1, (2, 3.)),
"exogenous": 1.
},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def _custom_time_series_regressor(
model_dir, head_type, exogenous_feature_columns):
return ts_estimators.TimeSeriesRegressor(
model=lstm_example._LSTMModel(
num_features=5, num_units=128,
exogenous_feature_columns=exogenous_feature_columns),
optimizer=adam.AdamOptimizer(0.001),
config=estimator_lib.RunConfig(tf_random_seed=4),
state_manager=state_management.ChainingStateManager(),
head_type=head_type,
model_dir=model_dir)
def _structural_ensemble_regressor(
model_dir, head_type, exogenous_feature_columns):
return ts_estimators.StructuralEnsembleRegressor(
periodicities=None,
num_features=5,
exogenous_feature_columns=exogenous_feature_columns,
head_type=head_type,
model_dir=model_dir)
def _ar_lstm_regressor(
model_dir, head_type, exogenous_feature_columns):
return ts_estimators.TimeSeriesRegressor(
model=ar_model.ARModel(
periodicities=10, input_window_size=10, output_window_size=6,
num_features=5,
exogenous_feature_columns=exogenous_feature_columns,
prediction_model_factory=functools.partial(
ar_model.LSTMPredictionModel,
num_units=10)),
head_type=head_type,
model_dir=model_dir)
class OneShotTests(parameterized.TestCase):
@parameterized.named_parameters(
{"testcase_name": "ar_lstm_regressor",
"estimator_factory": _ar_lstm_regressor},
{"testcase_name": "custom_time_series_regressor",
"estimator_factory": _custom_time_series_regressor},
{"testcase_name": "structural_ensemble_regressor",
"estimator_factory": _structural_ensemble_regressor})
def test_one_shot_prediction_head_export(self, estimator_factory):
def _new_temp_dir():
return os.path.join(test.get_temp_dir(), str(ops.uid()))
model_dir = _new_temp_dir()
categorical_column = feature_column.categorical_column_with_hash_bucket(
key="categorical_exogenous_feature", hash_bucket_size=16)
exogenous_feature_columns = [
feature_column.numeric_column(
"2d_exogenous_feature", shape=(2,)),
feature_column.embedding_column(
categorical_column=categorical_column, dimension=10)]
estimator = estimator_factory(
model_dir=model_dir,
exogenous_feature_columns=exogenous_feature_columns,
head_type=ts_head_lib.OneShotPredictionHead)
train_features = {
feature_keys.TrainEvalFeatures.TIMES: numpy.arange(
20, dtype=numpy.int64),
feature_keys.TrainEvalFeatures.VALUES: numpy.tile(numpy.arange(
20, dtype=numpy.float32)[:, None], [1, 5]),
"2d_exogenous_feature": numpy.ones([20, 2]),
"categorical_exogenous_feature": numpy.array(
["strkey"] * 20)[:, None]
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(train_features), shuffle_seed=2,
num_threads=1, batch_size=16, window_size=16)
estimator.train(input_fn=train_input_fn, steps=5)
result = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertNotIn(feature_keys.State.STATE_TUPLE, result)
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_savedmodel(_new_temp_dir(),
input_receiver_fn)
graph = ops.Graph()
with graph.as_default():
with session_lib.Session() as session:
signatures = loader.load(
session, [tag_constants.SERVING], export_location)
self.assertEqual([feature_keys.SavedModelLabels.PREDICT],
list(signatures.signature_def.keys()))
predict_signature = signatures.signature_def[
feature_keys.SavedModelLabels.PREDICT]
six.assertCountEqual(
self,
[feature_keys.FilteringFeatures.TIMES,
feature_keys.FilteringFeatures.VALUES,
"2d_exogenous_feature",
"categorical_exogenous_feature"],
predict_signature.inputs.keys())
features = {
feature_keys.TrainEvalFeatures.TIMES: numpy.tile(
numpy.arange(35, dtype=numpy.int64)[None, :], [2, 1]),
feature_keys.TrainEvalFeatures.VALUES: numpy.tile(numpy.arange(
20, dtype=numpy.float32)[None, :, None], [2, 1, 5]),
"2d_exogenous_feature": numpy.ones([2, 35, 2]),
"categorical_exogenous_feature": numpy.tile(numpy.array(
["strkey"] * 35)[None, :, None], [2, 1, 1])
}
feeds = {
graph.as_graph_element(input_value.name): features[input_key]
for input_key, input_value in predict_signature.inputs.items()}
fetches = {output_key: graph.as_graph_element(output_value.name)
for output_key, output_value
in predict_signature.outputs.items()}
output = session.run(fetches, feed_dict=feeds)
self.assertEqual((2, 15, 5), output["mean"].shape)
# Build a parsing input function, then make a tf.Example for it to parse.
export_location = estimator.export_savedmodel(
_new_temp_dir(),
estimator.build_one_shot_parsing_serving_input_receiver_fn(
filtering_length=20, prediction_length=15))
graph = ops.Graph()
with graph.as_default():
with session_lib.Session() as session:
example = example_pb2.Example()
times = example.features.feature[feature_keys.TrainEvalFeatures.TIMES]
values = example.features.feature[feature_keys.TrainEvalFeatures.VALUES]
times.int64_list.value.extend(range(35))
for i in range(20):
values.float_list.value.extend(
[float(i) * 2. + feature_number
for feature_number in range(5)])
real_feature = example.features.feature["2d_exogenous_feature"]
categortical_feature = example.features.feature[
"categorical_exogenous_feature"]
for i in range(35):
real_feature.float_list.value.extend([1, 1])
categortical_feature.bytes_list.value.append(b"strkey")
# Serialize the tf.Example for feeding to the Session
examples = [example.SerializeToString()] * 2
signatures = loader.load(
session, [tag_constants.SERVING], export_location)
predict_signature = signatures.signature_def[
feature_keys.SavedModelLabels.PREDICT]
((_, input_value),) = predict_signature.inputs.items()
feeds = {graph.as_graph_element(input_value.name): examples}
fetches = {output_key: graph.as_graph_element(output_value.name)
for output_key, output_value
in predict_signature.outputs.items()}
output = session.run(fetches, feed_dict=feeds)
self.assertEqual((2, 15, 5), output["mean"].shape)
if __name__ == "__main__":
test.main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for export tools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import tempfile
import numpy as np
import six
from tensorflow.contrib import learn
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.session_bundle import exporter
from tensorflow.contrib.session_bundle import manifest_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import saver
_X_KEY = 'my_x_key'
_X_COLUMN = feature_column.real_valued_column(_X_KEY, dimension=1)
def _training_input_fn():
x = random_ops.random_uniform(shape=(1,), minval=0.0, maxval=1000.0)
y = 2 * x + 3
return {_X_KEY: x}, y
class ExportTest(test.TestCase):
def _get_default_signature(self, export_meta_filename):
""" Gets the default signature from the export.meta file. """
with session.Session():
save = saver.import_meta_graph(export_meta_filename)
meta_graph_def = save.export_meta_graph()
collection_def = meta_graph_def.collection_def
signatures_any = collection_def['serving_signatures'].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
default_signature = signatures.default_signature
return default_signature
def _assert_export(self, export_monitor, export_dir, expected_signature):
self.assertTrue(gfile.Exists(export_dir))
# Only the written checkpoints are exported.
self.assertTrue(
saver.checkpoint_exists(os.path.join(export_dir, '00000001', 'export')),
'Exported checkpoint expected but not found: %s' %
os.path.join(export_dir, '00000001', 'export'))
self.assertTrue(
saver.checkpoint_exists(os.path.join(export_dir, '00000010', 'export')),
'Exported checkpoint expected but not found: %s' %
os.path.join(export_dir, '00000010', 'export'))
self.assertEquals(
six.b(os.path.join(export_dir, '00000010')),
export_monitor.last_export_dir)
# Validate the signature
signature = self._get_default_signature(
os.path.join(export_dir, '00000010', 'export.meta'))
self.assertTrue(signature.HasField(expected_signature))
def testExportMonitor_EstimatorProvidesSignature(self):
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
cont_features = [feature_column.real_valued_column('', dimension=1)]
regressor = learn.LinearRegressor(feature_columns=cont_features)
export_dir = os.path.join(tempfile.mkdtemp(), 'export')
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1, export_dir=export_dir, exports_to_keep=2)
regressor.fit(x, y, steps=10, monitors=[export_monitor])
self._assert_export(export_monitor, export_dir, 'regression_signature')
def testExportMonitor(self):
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
cont_features = [feature_column.real_valued_column('', dimension=1)]
export_dir = os.path.join(tempfile.mkdtemp(), 'export')
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=export_dir,
exports_to_keep=2,
signature_fn=export.generic_signature_fn)
regressor = learn.LinearRegressor(feature_columns=cont_features)
regressor.fit(x, y, steps=10, monitors=[export_monitor])
self._assert_export(export_monitor, export_dir, 'generic_signature')
def testExportMonitorInputFeatureKeyMissing(self):
random.seed(42)
def _serving_input_fn():
return {
_X_KEY:
random_ops.random_uniform(
shape=(1,), minval=0.0, maxval=1000.0)
}, None
input_feature_key = 'my_example_key'
monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=os.path.join(tempfile.mkdtemp(), 'export'),
input_fn=_serving_input_fn,
input_feature_key=input_feature_key,
exports_to_keep=2,
signature_fn=export.generic_signature_fn)
regressor = learn.LinearRegressor(feature_columns=[_X_COLUMN])
with self.assertRaisesRegexp(KeyError, input_feature_key):
regressor.fit(input_fn=_training_input_fn, steps=10, monitors=[monitor])
def testExportMonitorInputFeatureKeyNoneNoFeatures(self):
random.seed(42)
input_feature_key = 'my_example_key'
def _serving_input_fn():
return {input_feature_key: None}, None
monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=os.path.join(tempfile.mkdtemp(), 'export'),
input_fn=_serving_input_fn,
input_feature_key=input_feature_key,
exports_to_keep=2,
signature_fn=export.generic_signature_fn)
regressor = learn.LinearRegressor(feature_columns=[_X_COLUMN])
with self.assertRaisesRegexp(ValueError,
'features or examples must be defined'):
regressor.fit(input_fn=_training_input_fn, steps=10, monitors=[monitor])
def testExportMonitorInputFeatureKeyNone(self):
random.seed(42)
input_feature_key = 'my_example_key'
def _serving_input_fn():
return {
input_feature_key:
None,
_X_KEY:
random_ops.random_uniform(
shape=(1,), minval=0.0, maxval=1000.0)
}, None
monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=os.path.join(tempfile.mkdtemp(), 'export'),
input_fn=_serving_input_fn,
input_feature_key=input_feature_key,
exports_to_keep=2,
signature_fn=export.generic_signature_fn)
regressor = learn.LinearRegressor(feature_columns=[_X_COLUMN])
with self.assertRaisesRegexp(ValueError, 'examples cannot be None'):
regressor.fit(input_fn=_training_input_fn, steps=10, monitors=[monitor])
def testExportMonitorInputFeatureKeyNoFeatures(self):
random.seed(42)
input_feature_key = 'my_example_key'
def _serving_input_fn():
return {
input_feature_key:
array_ops.placeholder(
dtype=dtypes.string, shape=(1,))
}, None
monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=os.path.join(tempfile.mkdtemp(), 'export'),
input_fn=_serving_input_fn,
input_feature_key=input_feature_key,
exports_to_keep=2,
signature_fn=export.generic_signature_fn)
regressor = learn.LinearRegressor(feature_columns=[_X_COLUMN])
with self.assertRaisesRegexp(KeyError, _X_KEY):
regressor.fit(input_fn=_training_input_fn, steps=10, monitors=[monitor])
def testExportMonitorInputFeature(self):
random.seed(42)
input_feature_key = 'my_example_key'
def _serving_input_fn():
return {
input_feature_key:
array_ops.placeholder(
dtype=dtypes.string, shape=(1,)),
_X_KEY:
random_ops.random_uniform(
shape=(1,), minval=0.0, maxval=1000.0)
}, None
export_dir = os.path.join(tempfile.mkdtemp(), 'export')
monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=export_dir,
input_fn=_serving_input_fn,
input_feature_key=input_feature_key,
exports_to_keep=2,
signature_fn=export.generic_signature_fn)
regressor = learn.LinearRegressor(feature_columns=[_X_COLUMN])
regressor.fit(input_fn=_training_input_fn, steps=10, monitors=[monitor])
self._assert_export(monitor, export_dir, 'generic_signature')
def testExportMonitorRegressionSignature(self):
def _regression_signature(examples, unused_features, predictions):
signatures = {}
signatures['regression'] = (exporter.regression_signature(examples,
predictions))
return signatures['regression'], signatures
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
cont_features = [feature_column.real_valued_column('', dimension=1)]
regressor = learn.LinearRegressor(feature_columns=cont_features)
export_dir = os.path.join(tempfile.mkdtemp(), 'export')
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=export_dir,
exports_to_keep=1,
signature_fn=_regression_signature)
regressor.fit(x, y, steps=10, monitors=[export_monitor])
self.assertTrue(gfile.Exists(export_dir))
with self.assertRaises(errors.NotFoundError):
saver.checkpoint_exists(os.path.join(export_dir, '00000000', 'export'))
self.assertTrue(
saver.checkpoint_exists(os.path.join(export_dir, '00000010', 'export')))
# Validate the signature
signature = self._get_default_signature(
os.path.join(export_dir, '00000010', 'export.meta'))
self.assertTrue(signature.HasField('regression_signature'))
if __name__ == '__main__':
test.main()
|
|
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
import abc
import errno
import logging as std_logging
import os
import signal
import sys
import time
import eventlet
from eventlet.green import socket
from eventlet.green import ssl
import eventlet.greenio
import eventlet.wsgi
import functools
from oslo_concurrency import processutils
from oslo_config import cfg
import oslo_i18n as i18n
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from paste import deploy
import routes
import routes.middleware
import six
import webob.dec
import webob.exc
from heat.api.aws import exception as aws_exception
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import serializers
LOG = logging.getLogger(__name__)
URL_LENGTH_LIMIT = 50000
api_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_('Address to bind the server. Useful when '
'selecting a particular network interface.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('bind_port', default=8004, min=1, max=65535,
help=_('The port on which the server will listen.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('backlog', default=4096,
help=_("Number of backlog requests "
"to configure the socket with."),
deprecated_group='DEFAULT'),
cfg.StrOpt('cert_file',
help=_("Location of the SSL certificate file "
"to use for SSL mode."),
deprecated_group='DEFAULT'),
cfg.StrOpt('key_file',
help=_("Location of the SSL key file to use "
"for enabling SSL mode."),
deprecated_group='DEFAULT'),
cfg.IntOpt('workers', default=processutils.get_worker_count(),
help=_("Number of workers for Heat service."),
deprecated_group='DEFAULT'),
cfg.IntOpt('max_header_line', default=16384,
help=_('Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs).')),
cfg.IntOpt('tcp_keepidle', default=600,
help=_('The value for the socket option TCP_KEEPIDLE. This is '
'the time in seconds that the connection must be idle '
'before TCP starts sending keepalive probes.')),
]
api_group = cfg.OptGroup('heat_api')
cfg.CONF.register_group(api_group)
cfg.CONF.register_opts(api_opts,
group=api_group)
api_cfn_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_('Address to bind the server. Useful when '
'selecting a particular network interface.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('bind_port', default=8000, min=1, max=65535,
help=_('The port on which the server will listen.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('backlog', default=4096,
help=_("Number of backlog requests "
"to configure the socket with."),
deprecated_group='DEFAULT'),
cfg.StrOpt('cert_file',
help=_("Location of the SSL certificate file "
"to use for SSL mode."),
deprecated_group='DEFAULT'),
cfg.StrOpt('key_file',
help=_("Location of the SSL key file to use "
"for enabling SSL mode."),
deprecated_group='DEFAULT'),
cfg.IntOpt('workers', default=0,
help=_("Number of workers for Heat service."),
deprecated_group='DEFAULT'),
cfg.IntOpt('max_header_line', default=16384,
help=_('Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs).')),
cfg.IntOpt('tcp_keepidle', default=600,
help=_('The value for the socket option TCP_KEEPIDLE. This is '
'the time in seconds that the connection must be idle '
'before TCP starts sending keepalive probes.')),
]
api_cfn_group = cfg.OptGroup('heat_api_cfn')
cfg.CONF.register_group(api_cfn_group)
cfg.CONF.register_opts(api_cfn_opts,
group=api_cfn_group)
api_cw_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_('Address to bind the server. Useful when '
'selecting a particular network interface.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('bind_port', default=8003, min=1, max=65535,
help=_('The port on which the server will listen.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('backlog', default=4096,
help=_("Number of backlog requests "
"to configure the socket with."),
deprecated_group='DEFAULT'),
cfg.StrOpt('cert_file',
help=_("Location of the SSL certificate file "
"to use for SSL mode."),
deprecated_group='DEFAULT'),
cfg.StrOpt('key_file',
help=_("Location of the SSL key file to use "
"for enabling SSL mode."),
deprecated_group='DEFAULT'),
cfg.IntOpt('workers', default=0,
help=_("Number of workers for Heat service."),
deprecated_group='DEFAULT'),
cfg.IntOpt('max_header_line', default=16384,
help=_('Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs.)')),
cfg.IntOpt('tcp_keepidle', default=600,
help=_('The value for the socket option TCP_KEEPIDLE. This is '
'the time in seconds that the connection must be idle '
'before TCP starts sending keepalive probes.')),
]
api_cw_group = cfg.OptGroup('heat_api_cloudwatch')
cfg.CONF.register_group(api_cw_group)
cfg.CONF.register_opts(api_cw_opts,
group=api_cw_group)
wsgi_elt_opts = [
cfg.BoolOpt('wsgi_keep_alive',
default=True,
help=_("If False, closes the client socket connection "
"explicitly.")),
cfg.IntOpt('client_socket_timeout', default=900,
help=_("Timeout for client connections' socket operations. "
"If an incoming connection is idle for this number of "
"seconds it will be closed. A value of '0' means "
"wait forever.")),
]
wsgi_elt_group = cfg.OptGroup('eventlet_opts')
cfg.CONF.register_group(wsgi_elt_group)
cfg.CONF.register_opts(wsgi_elt_opts,
group=wsgi_elt_group)
json_size_opt = cfg.IntOpt('max_json_body_size',
default=1048576,
help=_('Maximum raw byte size of JSON request body.'
' Should be larger than max_template_size.'))
cfg.CONF.register_opt(json_size_opt)
def list_opts():
yield None, [json_size_opt]
yield 'heat_api', api_opts
yield 'heat_api_cfn', api_cfn_opts
yield 'heat_api_cloudwatch', api_cw_opts
yield 'eventlet_opts', wsgi_elt_opts
def get_bind_addr(conf, default_port=None):
"""Return the host and port to bind to."""
return (conf.bind_host, conf.bind_port or default_port)
def get_socket(conf, default_port):
"""Bind socket to bind ip:port in conf.
Note: Mostly comes from Swift with a few small changes...
:param conf: a cfg.ConfigOpts object
:param default_port: port to bind to if none is specified in conf
:returns : a socket object as returned from socket.listen or
ssl.wrap_socket if conf specifies cert_file
"""
bind_addr = get_bind_addr(conf, default_port)
# TODO(jaypipes): eventlet's greened socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
address_family = [addr[0] for addr in socket.getaddrinfo(bind_addr[0],
bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
cert_file = conf.cert_file
key_file = conf.key_file
use_ssl = cert_file or key_file
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
sock = None
retry_until = time.time() + 30
while not sock and time.time() < retry_until:
try:
sock = eventlet.listen(bind_addr,
backlog=conf.backlog,
family=address_family)
except socket.error as err:
if err.args[0] != errno.EADDRINUSE:
raise
eventlet.sleep(0.1)
if not sock:
raise RuntimeError(_("Could not bind to %(bind_addr)s"
"after trying for 30 seconds")
% {'bind_addr': bind_addr})
return sock
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, LOG, level=std_logging.DEBUG):
self.LOG = LOG
self.level = level
def write(self, msg):
self.LOG.log(self.level, msg.rstrip("\n"))
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, name, conf, threads=1000):
os.umask(0o27) # ensure files are created with the correct privileges
self._logger = logging.getLogger("eventlet.wsgi.server")
self._wsgi_logger = WritableLogger(self._logger)
self.name = name
self.threads = threads
self.children = set()
self.stale_children = set()
self.running = True
self.pgid = os.getpid()
self.conf = conf
try:
os.setpgid(self.pgid, self.pgid)
except OSError:
self.pgid = 0
def kill_children(self, *args):
"""Kills the entire process group."""
LOG.error(_LE('SIGTERM received'))
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
self.running = False
os.killpg(0, signal.SIGTERM)
def hup(self, *args):
"""Reloads configuration files with zero down time."""
LOG.error(_LE('SIGHUP received'))
signal.signal(signal.SIGHUP, signal.SIG_IGN)
raise exception.SIGHUPInterrupt
def start(self, application, default_port):
"""Run a WSGI server with the given application.
:param application: The application to run in the WSGI server
:param default_port: Port to bind to if none is specified in conf
"""
eventlet.wsgi.MAX_HEADER_LINE = self.conf.max_header_line
self.application = application
self.default_port = default_port
self.configure_socket()
self.start_wsgi()
def start_wsgi(self):
if self.conf.workers == 0:
# Useful for profiling, test, debug etc.
self.pool = eventlet.GreenPool(size=self.threads)
self.pool.spawn_n(self._single_run, self.application, self.sock)
return
LOG.info(_LI("Starting %d workers"), self.conf.workers)
signal.signal(signal.SIGTERM, self.kill_children)
signal.signal(signal.SIGINT, self.kill_children)
signal.signal(signal.SIGHUP, self.hup)
while len(self.children) < self.conf.workers:
self.run_child()
def wait_on_children(self):
while self.running:
try:
pid, status = os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
self._remove_children(pid)
self._verify_and_respawn_children(pid, status)
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
except KeyboardInterrupt:
LOG.info(_LI('Caught keyboard interrupt. Exiting.'))
os.killpg(0, signal.SIGTERM)
break
except exception.SIGHUPInterrupt:
self.reload()
continue
eventlet.greenio.shutdown_safe(self.sock)
self.sock.close()
LOG.debug('Exited')
def configure_socket(self, old_conf=None, has_changed=None):
"""Ensure a socket exists and is appropriately configured.
This function is called on start up, and can also be
called in the event of a configuration reload.
When called for the first time a new socket is created.
If reloading and either bind_host or bind port have been
changed the existing socket must be closed and a new
socket opened (laws of physics).
In all other cases (bind_host/bind_port have not changed)
the existing socket is reused.
:param old_conf: Cached old configuration settings (if any)
:param has changed: callable to determine if a parameter has changed
"""
# Do we need a fresh socket?
new_sock = (old_conf is None or (
has_changed('bind_host') or
has_changed('bind_port')))
# Will we be using https?
use_ssl = not (not self.conf.cert_file or not self.conf.key_file)
# Were we using https before?
old_use_ssl = (old_conf is not None and not (
not old_conf.get('key_file') or
not old_conf.get('cert_file')))
# Do we now need to perform an SSL wrap on the socket?
wrap_sock = use_ssl is True and (old_use_ssl is False or new_sock)
# Do we now need to perform an SSL unwrap on the socket?
unwrap_sock = use_ssl is False and old_use_ssl is True
if new_sock:
self._sock = None
if old_conf is not None:
self.sock.close()
_sock = get_socket(self.conf, self.default_port)
_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
self._sock = _sock
if wrap_sock:
self.sock = ssl.wrap_socket(self._sock,
certfile=self.conf.cert_file,
keyfile=self.conf.key_file)
if unwrap_sock:
self.sock = self._sock
if new_sock and not use_ssl:
self.sock = self._sock
# Pick up newly deployed certs
if old_conf is not None and use_ssl is True and old_use_ssl is True:
if has_changed('cert_file'):
self.sock.certfile = self.conf.cert_file
if has_changed('key_file'):
self.sock.keyfile = self.conf.key_file
if new_sock or (old_conf is not None and has_changed('tcp_keepidle')):
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
self.conf.tcp_keepidle)
if old_conf is not None and has_changed('backlog'):
self.sock.listen(self.conf.backlog)
def _remove_children(self, pid):
if pid in self.children:
self.children.remove(pid)
LOG.info(_LI('Removed dead child %s'), pid)
elif pid in self.stale_children:
self.stale_children.remove(pid)
LOG.info(_LI('Removed stale child %s'), pid)
else:
LOG.warn(_LW('Unrecognised child %s'), pid)
def _verify_and_respawn_children(self, pid, status):
if len(self.stale_children) == 0:
LOG.debug('No stale children')
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
LOG.error(_LE('Not respawning child %d, cannot '
'recover from termination'), pid)
if not self.children and not self.stale_children:
LOG.info(
_LI('All workers have terminated. Exiting'))
self.running = False
else:
if len(self.children) < self.conf.workers:
self.run_child()
def stash_conf_values(self):
"""Make a copy of some of the current global CONF's settings.
Allows determining if any of these values have changed when the config
is reloaded.
"""
conf = {}
conf['bind_host'] = self.conf.bind_host
conf['bind_port'] = self.conf.bind_port
conf['backlog'] = self.conf.backlog
conf['key_file'] = self.conf.key_file
conf['cert_file'] = self.conf.cert_file
return conf
def reload(self):
"""Reload and re-apply configuration settings.
Existing child processes are sent a SIGHUP signal
and will exit after completing existing requests.
New child processes, which will have the updated
configuration, are spawned. This allows preventing
interruption to the service.
"""
def _has_changed(old, new, param):
old = old.get(param)
new = getattr(new, param)
return (new != old)
old_conf = self.stash_conf_values()
has_changed = functools.partial(_has_changed, old_conf, self.conf)
cfg.CONF.reload_config_files()
os.killpg(self.pgid, signal.SIGHUP)
self.stale_children = self.children
self.children = set()
# Ensure any logging config changes are picked up
logging.setup(cfg.CONF, self.name)
self.configure_socket(old_conf, has_changed)
self.start_wsgi()
def wait(self):
"""Wait until all servers have completed running."""
try:
if self.children:
self.wait_on_children()
else:
self.pool.waitall()
except KeyboardInterrupt:
pass
def run_child(self):
def child_hup(*args):
"""Shuts down child processes, existing requests are handled."""
signal.signal(signal.SIGHUP, signal.SIG_IGN)
eventlet.wsgi.is_accepting = False
self.sock.close()
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, child_hup)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
# ignore the interrupt signal to avoid a race whereby
# a child worker receives the signal before the parent
# and is respawned unnecessarily as a result
signal.signal(signal.SIGINT, signal.SIG_IGN)
# The child has no need to stash the unwrapped
# socket, and the reference prevents a clean
# exit on sighup
self._sock = None
self.run_server()
LOG.info(_LI('Child %d exiting normally'), os.getpid())
# self.pool.waitall() is now called in wsgi's server so
# it's safe to exit here
sys.exit(0)
else:
LOG.info(_LI('Started child %s'), pid)
self.children.add(pid)
def run_server(self):
"""Run a WSGI server."""
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
eventlet.hubs.use_hub('poll')
eventlet.patcher.monkey_patch(all=False, socket=True)
self.pool = eventlet.GreenPool(size=self.threads)
socket_timeout = cfg.CONF.eventlet_opts.client_socket_timeout or None
try:
eventlet.wsgi.server(
self.sock,
self.application,
custom_pool=self.pool,
url_length_limit=URL_LENGTH_LIMIT,
log=self._wsgi_logger,
debug=cfg.CONF.debug,
keepalive=cfg.CONF.eventlet_opts.wsgi_keep_alive,
socket_timeout=socket_timeout)
except socket.error as err:
if err[0] != errno.EINVAL:
raise
self.pool.waitall()
def _single_run(self, application, sock):
"""Start a WSGI server in a new green thread."""
LOG.info(_LI("Starting single process server"))
eventlet.wsgi.server(sock, application,
custom_pool=self.pool,
url_length_limit=URL_LENGTH_LIMIT,
log=self._wsgi_logger,
debug=cfg.CONF.debug)
class Middleware(object):
"""Base WSGI middleware wrapper.
These classes require an application to be initialized that will be called
next. By default the middleware will simply call its wrapped app, or you
can override __call__ to customize its behavior.
"""
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
"""Helper class to get information about the request and response.
Helper class that can be inserted into any WSGI application chain
to get information about the request and response.
"""
@webob.dec.wsgify
def __call__(self, req):
print(("*" * 40) + " REQUEST ENVIRON")
for key, value in req.environ.items():
print(key, "=", value)
print('')
resp = req.get_response(self.application)
print(("*" * 40) + " RESPONSE HEADERS")
for (key, value) in six.iteritems(resp.headers):
print(key, "=", value)
print('')
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Prints the contents of a wrapper string iterator when iterated."""
print(("*" * 40) + " BODY")
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print('')
def debug_filter(app, conf, **local_conf):
return Debug(app)
class DefaultMethodController(object):
"""Controller that handles the OPTIONS request method.
This controller handles the OPTIONS request method and any of the HTTP
methods that are not explicitly implemented by the application.
"""
def options(self, req, allowed_methods, *args, **kwargs):
"""Return a response that includes the 'Allow' header.
Return a response that includes the 'Allow' header listing the methods
that are implemented. A 204 status code is used for this response.
"""
raise webob.exc.HTTPNoContent(headers=[('Allow', allowed_methods)])
def reject(self, req, allowed_methods, *args, **kwargs):
"""Return a 405 method not allowed error.
As a convenience, the 'Allow' header with the list of implemented
methods is included in the response as well.
"""
raise webob.exc.HTTPMethodNotAllowed(
headers=[('Allow', allowed_methods)])
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be a wsgi.Controller, who will route
the request to the action method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, "/svrlist", controller=sc, action="list")
# Actions are all implicitly defined
mapper.resource("server", "servers", controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify
def _dispatch(req):
"""Returns controller after matching the incoming request to a route.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404 or the
routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def best_match_content_type(self):
"""Determine the requested response content-type."""
supported = ('application/json',)
bm = self.accept.best_match(supported)
return bm or 'application/json'
def get_content_type(self, allowed_content_types):
"""Determine content type of the request body."""
if "Content-Type" not in self.headers:
raise exception.InvalidContentType(content_type=None)
content_type = self.content_type
if content_type not in allowed_content_types:
raise exception.InvalidContentType(content_type=content_type)
else:
return content_type
def best_match_language(self):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
all_languages = i18n.get_available_languages('heat')
return self.accept_language.best_match(all_languages)
def is_json_content_type(request):
if request.method == 'GET':
try:
aws_content_type = request.params.get("ContentType")
except Exception:
aws_content_type = None
# respect aws_content_type when both available
content_type = aws_content_type or request.content_type
else:
content_type = request.content_type
# bug #1887882
# for back compatible for null or plain content type
if not content_type or content_type.startswith('text/plain'):
content_type = 'application/json'
if (content_type in ('JSON', 'application/json')
and request.body.startswith(b'{')):
return True
return False
class JSONRequestDeserializer(object):
def has_body(self, request):
"""Returns whether a Webob.Request object will possess an entity body.
:param request: Webob.Request object
"""
if (int(request.content_length or 0) > 0 and
is_json_content_type(request)):
return True
return False
def from_json(self, datastring):
try:
if len(datastring) > cfg.CONF.max_json_body_size:
msg = _('JSON body size (%(len)s bytes) exceeds maximum '
'allowed size (%(limit)s bytes).'
) % {'len': len(datastring),
'limit': cfg.CONF.max_json_body_size}
raise exception.RequestLimitExceeded(message=msg)
return jsonutils.loads(datastring)
except ValueError as ex:
raise webob.exc.HTTPBadRequest(six.text_type(ex))
def default(self, request):
if self.has_body(request):
return {'body': self.from_json(request.body)}
else:
return {}
class Resource(object):
"""WSGI app that handles (de)serialization and controller dispatch.
Reads routing information supplied by RoutesMiddleware and calls
the requested action method upon its deserializer, controller,
and serializer. Those three objects may implement any of the basic
controller action methods (create, update, show, index, delete)
along with any that may be specified in the api router. A 'default'
method may also be implemented to be used in place of any
non-implemented actions. Deserializer methods must accept a request
argument and return a dictionary. Controller methods must accept a
request argument. Additionally, they must also accept keyword
arguments that represent the keys returned by the Deserializer. They
may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
"""
def __init__(self, controller, deserializer, serializer=None):
"""Initialisation of the WSGI app.
:param controller: object that implement methods created by routes lib
:param deserializer: object that supports webob request deserialization
through controller-like actions
:param serializer: object that supports webob response serialization
through controller-like actions
"""
self.controller = controller
self.deserializer = deserializer
self.serializer = serializer
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
# From reading the boto code, and observation of real AWS api responses
# it seems that the AWS api ignores the content-type in the html header
# Instead it looks at a "ContentType" GET query parameter
# This doesn't seem to be documented in the AWS cfn API spec, but it
# would appear that the default response serialization is XML, as
# described in the API docs, but passing a query parameter of
# ContentType=JSON results in a JSON serialized response...
content_type = request.params.get("ContentType")
try:
deserialized_request = self.dispatch(self.deserializer,
action, request)
action_args.update(deserialized_request)
LOG.debug(('Calling %(controller)s : %(action)s'),
{'controller': self.controller, 'action': action})
action_result = self.dispatch(self.controller, action,
request, **action_args)
except TypeError as err:
LOG.error(_LE('Exception handling resource: %s'), err)
msg = _('The server could not comply with the request since '
'it is either malformed or otherwise incorrect.')
err = webob.exc.HTTPBadRequest(msg)
http_exc = translate_exception(err, request.best_match_language())
# NOTE(luisg): We disguise HTTP exceptions, otherwise they will be
# treated by wsgi as responses ready to be sent back and they
# won't make it into the pipeline app that serializes errors
raise exception.HTTPExceptionDisguise(http_exc)
except webob.exc.HTTPException as err:
if isinstance(err, aws_exception.HeatAPIException):
# The AWS compatible API's don't use faultwrap, so
# we want to detect the HeatAPIException subclasses
# and raise rather than wrapping in HTTPExceptionDisguise
raise
if not isinstance(err, webob.exc.HTTPError):
# Some HTTPException are actually not errors, they are
# responses ready to be sent back to the users, so we don't
# error log, disguise or translate those
raise
if isinstance(err, webob.exc.HTTPServerError):
LOG.error(
_LE("Returning %(code)s to user: %(explanation)s"),
{'code': err.code, 'explanation': err.explanation})
http_exc = translate_exception(err, request.best_match_language())
raise exception.HTTPExceptionDisguise(http_exc)
except exception.HeatException as err:
raise translate_exception(err, request.best_match_language())
except Exception as err:
log_exception(err, sys.exc_info())
raise translate_exception(err, request.best_match_language())
# Here we support either passing in a serializer or detecting it
# based on the content type.
try:
serializer = self.serializer
if serializer is None:
if content_type == "JSON":
serializer = serializers.JSONResponseSerializer()
else:
serializer = serializers.XMLResponseSerializer()
response = webob.Response(request=request)
self.dispatch(serializer, action, response, action_result)
return response
# return unserializable result (typically an exception)
except Exception:
# Here we should get API exceptions derived from HeatAPIException
# these implement get_unserialized_body(), which allow us to get
# a dict containing the unserialized error response.
# We only need to serialize for JSON content_type, as the
# exception body is pre-serialized to the default XML in the
# HeatAPIException constructor
# If we get something else here (e.g a webob.exc exception),
# this will fail, and we just return it without serializing,
# which will not conform to the expected AWS error response format
if content_type == "JSON":
try:
err_body = action_result.get_unserialized_body()
serializer.default(action_result, err_body)
except Exception:
LOG.warning(_LW("Unable to serialize exception "
"response"))
return action_result
def dispatch(self, obj, action, *args, **kwargs):
"""Find action-specific method on self and call it."""
try:
method = getattr(obj, action)
except AttributeError:
method = getattr(obj, 'default')
return method(*args, **kwargs)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except Exception:
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def log_exception(err, exc_info):
args = {'exc_info': exc_info} if cfg.CONF.verbose or cfg.CONF.debug else {}
LOG.error(_LE("Unexpected error occurred serving API: %s"), err,
**args)
def translate_exception(exc, locale):
"""Translates all translatable elements of the given exception."""
if isinstance(exc, exception.HeatException):
exc.message = i18n.translate(exc.message, locale)
else:
exc.message = i18n.translate(six.text_type(exc), locale)
if isinstance(exc, webob.exc.HTTPError):
exc.explanation = i18n.translate(exc.explanation, locale)
exc.detail = i18n.translate(getattr(exc, 'detail', ''), locale)
return exc
@six.add_metaclass(abc.ABCMeta)
class BasePasteFactory(object):
"""A base class for paste app and filter factories.
Sub-classes must override the KEY class attribute and provide
a __call__ method.
"""
KEY = None
def __init__(self, conf):
self.conf = conf
@abc.abstractmethod
def __call__(self, global_conf, **local_conf):
return
def _import_factory(self, local_conf):
"""Import an app/filter class.
Lookup the KEY from the PasteDeploy local conf and import the
class named there. This class can then be used as an app or
filter factory.
Note we support the <module>:<class> format.
Note also that if you do e.g.
key =
value
then ConfigParser returns a value with a leading newline, so
we strip() the value before using it.
"""
class_name = local_conf[self.KEY].replace(':', '.').strip()
return importutils.import_class(class_name)
class AppFactory(BasePasteFactory):
"""A Generic paste.deploy app factory.
This requires heat.app_factory to be set to a callable which returns a
WSGI app when invoked. The format of the name is <module>:<callable> e.g.
[app:apiv1app]
paste.app_factory = heat.common.wsgi:app_factory
heat.app_factory = heat.api.cfn.v1:API
The WSGI app constructor must accept a ConfigOpts object and a local config
dict as its two arguments.
"""
KEY = 'heat.app_factory'
def __call__(self, global_conf, **local_conf):
"""The actual paste.app_factory protocol method."""
factory = self._import_factory(local_conf)
return factory(self.conf, **local_conf)
class FilterFactory(AppFactory):
"""A Generic paste.deploy filter factory.
This requires heat.filter_factory to be set to a callable which returns a
WSGI filter when invoked. The format is <module>:<callable> e.g.
[filter:cache]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.middleware.cache:CacheFilter
The WSGI filter constructor must accept a WSGI app, a ConfigOpts object and
a local config dict as its three arguments.
"""
KEY = 'heat.filter_factory'
def __call__(self, global_conf, **local_conf):
"""The actual paste.filter_factory protocol method."""
factory = self._import_factory(local_conf)
def filter(app):
return factory(app, self.conf, **local_conf)
return filter
def setup_paste_factories(conf):
"""Set up the generic paste app and filter factories.
Set things up so that:
paste.app_factory = heat.common.wsgi:app_factory
and
paste.filter_factory = heat.common.wsgi:filter_factory
work correctly while loading PasteDeploy configuration.
The app factories are constructed at runtime to allow us to pass a
ConfigOpts object to the WSGI classes.
:param conf: a ConfigOpts object
"""
global app_factory, filter_factory
app_factory = AppFactory(conf)
filter_factory = FilterFactory(conf)
def teardown_paste_factories():
"""Reverse the effect of setup_paste_factories()."""
global app_factory, filter_factory
del app_factory
del filter_factory
def paste_deploy_app(paste_config_file, app_name, conf):
"""Load a WSGI app from a PasteDeploy configuration.
Use deploy.loadapp() to load the app from the PasteDeploy configuration,
ensuring that the supplied ConfigOpts object is passed to the app and
filter constructors.
:param paste_config_file: a PasteDeploy config file
:param app_name: the name of the app/pipeline to load from the file
:param conf: a ConfigOpts object to supply to the app and its filters
:returns: the WSGI app
"""
setup_paste_factories(conf)
try:
return deploy.loadapp("config:%s" % paste_config_file, name=app_name)
finally:
teardown_paste_factories()
|
|
"""
nav.py
Icons used in this script were created by
oxygenicons (http://www.oxygen-icons.org/)
and distributed at the IconArchive (http://www.iconarchive.com) under
the GNU Lesser General Public License.
"""
from __future__ import print_function
# Load the needed packages
from functools import partial
import os, glob
import numpy as np
import pyart
import time
from ..core import (Component, Variable, common, QtWidgets, QtCore, QtGui,
log)
class FileNavigator(Component):
'''
Interface for executing :py:class:`pyart.filters.GateFilter`.
'''
Vradar = None #: see :ref:`shared_variable`
Vgrid = None #: see :ref:`shared_variable`
Vfilelist = None #: see :ref:`shared_variable`
@classmethod
def guiStart(self, parent=None):
'''Graphical interface for starting this class.'''
kwargs, independent = \
common._SimplePluginStart("FileNavigator").startDisplay()
kwargs['parent'] = parent
return self(**kwargs), independent
def __init__(self, pathDir=None, filename=None, Vradar=None, Vgrid=None,
Vfilelist=None, name="FileNavigator", parent=None):
'''Initialize the class to create the interface.
Parameters
----------
[Optional]
pathDir : string
Input directory path to open. If None user current directory
filename : string, False or None
File to open as first. None will open file dialog. False will
open no file.
Vradar : :py:class:`~artview.core.core.Variable` instance
Radar signal variable.
A value of None initializes an empty Variable.
Vgrid : :py:class:`~artview.core.core.Variable` instance
Grid signal variable.
A value of None initializes an empty Variable.
mode : list
List with strings "Radar" or "Grid". Determine which type of files
will be open
name : string
Menu name.
parent : PyQt instance
Parent instance to associate to menu.
If None, then Qt owns, otherwise associated with parent PyQt
instance.
'''
super(FileNavigator, self).__init__(name=name, parent=parent)
self.central_widget = QtWidgets.QWidget()
self.setCentralWidget(self.central_widget)
self.layout = QtWidgets.QGridLayout(self.central_widget)
if pathDir is None:
pathDir = os.getcwd()
self.fileindex = 0
# Set up signal, so that DISPLAY can react to
# changes in radar or gatefilter shared variables
if Vradar is None:
self.Vradar = Variable(None)
else:
self.Vradar = Vradar
if Vgrid is None:
self.Vgrid = Variable(None)
else:
self.Vgrid = Vgrid
if Vfilelist is None:
self.Vfilelist = Variable([])
else:
self.Vfilelist = Vfilelist
self.sharedVariables = {"Vradar": self.NewFile,
"Vgrid": self.NewFile,
"Vfilelist": self.NewFilelist}
# Connect the components
self.connectAllVariables()
# Set up the Display layout
self.createUI()
self.filename = ''
if Vradar is None and Vgrid is None:
if filename is None:
self._openfile(filename)
elif filename is not False:
self._openfile(filename)
self.directoryAction.setText(pathDir)
self.NewFile(self.Vradar, True)
self.NewFilelist(self.Vfilelist, True)
self.raise_()
self.setWindowState(QtCore.Qt.WindowActive)
self.show()
######################
# Layout Methods #
######################
def createUI(self):
'''Mount the navigation layout.'''
parentdir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
pixfirst = QtGui.QPixmap(os.sep.join([parentdir, 'icons',
"arrow_go_first_icon.png"]))
pixprev = QtGui.QPixmap(os.sep.join([parentdir, 'icons',
"arrow_go_previous_icon.png"]))
pixnext = QtGui.QPixmap(os.sep.join([parentdir, 'icons',
"arrow_go_next_icon.png"]))
pixlast = QtGui.QPixmap(os.sep.join([parentdir, 'icons',
"arrow_go_last_icon.png"]))
pixsave = QtGui.QPixmap(os.sep.join(
[parentdir, 'icons',
"save_icon.png"]))
pixopen = QtGui.QPixmap(os.sep.join(
[parentdir, 'icons',
"open_icon.png"]))
self.openButton = QtWidgets.QPushButton(QtGui.QIcon(pixopen),"open")
self.layout.addWidget(self.openButton, 0, 0)
self.saveButton = QtWidgets.QPushButton(QtGui.QIcon(pixsave),"save")
self.layout.addWidget(self.saveButton, 0, 1)
self.act_first = QtWidgets.QToolButton()
self.act_first.setIcon(QtGui.QIcon(pixfirst))
self.act_first.clicked.connect(self.goto_first_file)
self.layout.addWidget(self.act_first, 0, 2)
self.act_prev = QtWidgets.QToolButton()
self.act_prev.setIcon(QtGui.QIcon(pixprev))
self.act_prev.clicked.connect(self.goto_prev_file)
self.layout.addWidget(self.act_prev, 0, 3)
self.act_next = QtWidgets.QToolButton()
self.act_next.setIcon(QtGui.QIcon(pixnext))
self.act_next.clicked.connect(self.goto_next_file)
self.layout.addWidget(self.act_next, 0, 4)
self.act_last = QtWidgets.QToolButton()
self.act_last.setIcon(QtGui.QIcon(pixlast))
self.act_last.clicked.connect(self.goto_last_file)
self.layout.addWidget(self.act_last, 0, 5)
self.openMenu = QtWidgets.QMenu()
self.openButton.setMenu(self.openMenu)
self.directoryMenu = self.openMenu.addMenu("Directory:")
self.directoryAction = self.directoryMenu.addAction("")
self.directoryAction.triggered.connect(lambda: self._openfile())
self.fileMenu = self.openMenu.addMenu("File:")
self.fileAction = self.fileMenu.addAction("",)
self.fileAction.triggered.connect(lambda: self._openfile())
action = QtWidgets.QAction("Open", self,
triggered=lambda: self._openfile())
self.openMenu.addAction(action)
self.saveMenu = QtWidgets.QMenu()
self.saveButton.setMenu(self.saveMenu)
self.saveRadarAction = QtWidgets.QAction("Save Radar", self,
triggered=self.saveRadar)
if self.Vradar.value is None:
self.saveRadarAction.setEnabled(False)
self.saveMenu.addAction(self.saveRadarAction)
self.saveGridAction = QtWidgets.QAction("Save Grid", self,
triggered=self.saveGrid)
if self.Vgrid.value is None:
self.saveGridAction.setEnabled(False)
self.saveMenu.addAction(self.saveGridAction)
action = QtWidgets.QAction("Help", self,
triggered=self._show_help)
self.openMenu.addAction(action)
self.layout.addItem(QtWidgets.QSpacerItem(
0, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding),
0, 5)
######################
# Update Methods #
######################
def _show_help(self):
helptext = ("Use Icons above for navigation.<br>"
"By linking/unliking the radar variables in the<br>"
"LinkSharedVariables menu for various components, "
"you can<br>"
"control which Display is navigated."
)
common.ShowLongText(helptext)
def _update_tools(self):
'''Update the navigation button.'''
filelist = self.Vfilelist.value
if filelist is None:
return
if self.filename in filelist:
self.fileindex = filelist.index(self.filename)
else:
self.fileindex = 0
if self.fileindex > 0 and self.fileindex < len(filelist):
self.act_prev.setEnabled(True)
self.act_prev.setToolTip(
'Previous file: %s' %
os.path.basename(filelist[self.fileindex - 1]))
else:
self.act_prev.setEnabled(False)
self.act_prev.setToolTip('Previous file:')
if self.fileindex >= 0 and self.fileindex < len(filelist) - 1:
self.act_next.setEnabled(True)
self.act_next.setToolTip(
'Next file: %s' %
os.path.basename(filelist[self.fileindex + 1]))
else:
self.act_next.setEnabled(False)
self.act_next.setToolTip('Next file:')
if filelist:
self.act_first.setEnabled(True)
self.act_first.setToolTip(
"First file: %s" %
os.path.basename(filelist[0]))
self.act_last.setEnabled(True)
self.act_last.setToolTip(
"Last file: %s" %
os.path.basename(filelist[-1]))
else:
self.act_first.setEnabled(False)
self.act_first.setToolTip("First file:")
self.act_last.setEnabled(False)
self.act_last.setToolTip("Last file:")
#########################
# Selection Methods #
#########################
def AdvanceFileSelect(self, findex):
'''Captures a selection and open file.'''
if findex > (len(self.Vfilelist.value) - 1):
msg = "End of directory, cannot advance!"
common.ShowWarning(msg)
findex = (len(self.Vfilelist.value) - 1)
return
elif findex < 0:
msg = "Beginning of directory, must move forward!"
common.ShowWarning(msg)
findex = 0
return
self.fileindex = findex
self.filename = self.Vfilelist.value[findex]
self._openfile(self.filename)
def goto_first_file(self):
self.fileindex = 0
self.AdvanceFileSelect(self.fileindex)
def goto_last_file(self):
self.fileindex = len(self.Vfilelist.value) - 1
self.AdvanceFileSelect(self.fileindex)
def goto_prev_file(self):
self.fileindex = self.fileindex - 1
self.AdvanceFileSelect(self.fileindex)
def goto_next_file(self):
self.fileindex = self.fileindex + 1
self.AdvanceFileSelect(self.fileindex)
def _openfile(self, filename=None):
'''Open a file via a file selection window.'''
if filename is None:
dirIn = str(self.directoryAction.text())
filename = QtWidgets.QFileDialog.getOpenFileName(
None, 'Open file', dirIn)
if isinstance(filename, tuple): # PyQt5
filename = filename[0]
if filename == '':
return
filename = str(filename)
self.filename = filename
print("Opening file " + filename, file=log.info)
# Read the data from file
radar_warning = False
grid_warning = False
try:
radar = pyart.io.read(filename, delay_field_loading=True)
# Add the filename for Display
radar.filename = filename
self.replaceRadar(radar)
return
except:
try:
radar = pyart.io.read(filename)
# Add the filename for Display
radar.filename = filename
self.replaceRadar(radar)
return
except:
import traceback
print(traceback.format_exc(), file=log.error)
radar_warning = True
try:
grid = pyart.io.read_grid(
filename, delay_field_loading=True)
self.replaceGrid(grid)
return
except:
try:
grid = pyart.io.read_grid(filename)
self.replaceGrid(grid)
return
except:
import traceback
print(traceback.format_exc(), file=log.error)
grid_warning = True
if grid_warning or radar_warning:
msg = "Py-ART didn't recognize this file!"
common.ShowWarning(msg)
else:
msg = "Could not open file, invalid mode!"
common.ShowWarning(msg)
return
def NewFilelist(self, variable, strong):
'''respond to change in filelist.'''
if strong:
self._update_tools()
def NewFile(self, variable, strong):
'''Respond to change in a container (radar or grid).'''
if hasattr(variable.value, 'filename'):
# Update the info label.'''
self.filename = variable.value.filename
dirIn = os.path.dirname(self.filename)
self.directoryAction.setText(dirIn)
self.fileAction.setText(os.path.basename(self.filename))
if (self.Vfilelist.value is None or
self.filename not in self.Vfilelist.value):
filelist = [path for path in glob.glob(os.path.join(dirIn, '*'))
if os.path.isfile(path)]
filelist.sort()
self.fileindex = filelist.index(self.filename)
self.Vfilelist.change(filelist)
else:
self.fileindex = self.Vfilelist.value.index(self.filename)
self._update_tools()
if variable == self.Vradar:
self.saveRadarAction.setEnabled(variable.value is not None)
else:
self.saveGridAction.setEnabled(variable.value is not None)
def replaceRadar(self, radar):
'''Replace current radar, warning for data lost.'''
if hasattr(self.Vradar.value, 'changed') and self.Vradar.value.changed:
resp = common.ShowQuestionYesNo("Save changes before moving to next File?")
if resp == QtWidgets.QMessageBox.Yes:
self.Vradar.change(radar)
elif resp != QtWidgets.QMessageBox.No:
return
else:
self.Vradar.change(radar)
def replaceGrid(self, grid):
'''Replace current grid, warning for data lost.'''
if hasattr(self.Vgrid.value, 'changed') and self.Vgrid.value.changed:
resp = common.ShowQuestionYesNo("Save changes before moving to next File?")
if resp == QtWidgets.QMessageBox.Yes:
self.Vgrid.change(grid)
elif resp != QtWidgets.QMessageBox.No:
return
else:
self.Vgrid.change(grid)
def saveRadar(self):
'''
Open a dialog box to save radar file.
Parameters
----------
input : Vradar instance
Optional parameter to allow access from
other ARTView plugins, etc.
'''
filename = QtWidgets.QFileDialog.getSaveFileName(
self, 'Save Radar File', str(self.directoryAction.text()))
if isinstance(filename, tuple): # PyQt5
filename = filename[0]
filename = str(filename)
if filename == '' or self.Vradar.value is None:
return
else:
pyart.io.write_cfradial(filename, self.Vradar.value)
print("Saved %s" % (filename), file=log.info)
def saveGrid(self):
'''Open a dialog box to save grid file.'''
filename = QtWidgets.QFileDialog.getSaveFileName(
self, 'Save grid File', str(self.directoryAction.text()))
if isinstance(filename, tuple): # PyQt5
filename = filename[0]
filename = str(filename)
if filename == '' or self.Vgrid.value is None:
return
else:
pyart.io.write_grid(filename, self.Vgrid.value)
_plugins = [FileNavigator]
|
|
#!/usr/bin/env vpython
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provisions Android devices with settings required for bots.
Usage:
./provision_devices.py [-d <device serial number>]
"""
import argparse
import datetime
import json
import logging
import os
import posixpath
import re
import subprocess
import sys
import time
# Import _strptime before threaded code. datetime.datetime.strptime is
# threadsafe except for the initial import of the _strptime module.
# See crbug.com/584730 and https://bugs.python.org/issue7980.
import _strptime # pylint: disable=unused-import
import devil_chromium
from devil.android import battery_utils
from devil.android import device_denylist
from devil.android import device_errors
from devil.android import device_temp_file
from devil.android import device_utils
from devil.android.sdk import keyevent
from devil.android.sdk import version_codes
from devil.constants import exit_codes
from devil.utils import run_tests_helper
from devil.utils import timeout_retry
from pylib import constants
from pylib import device_settings
from pylib.constants import host_paths
_SYSTEM_WEBVIEW_PATHS = ['/system/app/webview', '/system/app/WebViewGoogle']
_CHROME_PACKAGE_REGEX = re.compile('.*chrom.*')
_TOMBSTONE_REGEX = re.compile('tombstone.*')
class _DEFAULT_TIMEOUTS(object):
# L can take a while to reboot after a wipe.
LOLLIPOP = 600
PRE_LOLLIPOP = 180
HELP_TEXT = '{}s on L, {}s on pre-L'.format(LOLLIPOP, PRE_LOLLIPOP)
class _PHASES(object):
WIPE = 'wipe'
PROPERTIES = 'properties'
FINISH = 'finish'
ALL = [WIPE, PROPERTIES, FINISH]
def ProvisionDevices(args):
denylist = (device_denylist.Denylist(args.denylist_file)
if args.denylist_file else None)
devices = [
d for d in device_utils.DeviceUtils.HealthyDevices(denylist)
if not args.emulators or d.adb.is_emulator
]
if args.device:
devices = [d for d in devices if d == args.device]
if not devices:
raise device_errors.DeviceUnreachableError(args.device)
parallel_devices = device_utils.DeviceUtils.parallel(devices)
if args.emulators:
parallel_devices.pMap(SetProperties, args)
else:
parallel_devices.pMap(ProvisionDevice, denylist, args)
if args.auto_reconnect:
_LaunchHostHeartbeat()
denylisted_devices = denylist.Read() if denylist else []
if args.output_device_denylist:
with open(args.output_device_denylist, 'w') as f:
json.dump(denylisted_devices, f)
if all(d in denylisted_devices for d in devices):
raise device_errors.NoDevicesError
return 0
def ProvisionDevice(device, denylist, options):
def should_run_phase(phase_name):
return not options.phases or phase_name in options.phases
def run_phase(phase_func, reboot_timeout, reboot=True):
try:
device.WaitUntilFullyBooted(timeout=reboot_timeout, retries=0)
except device_errors.CommandTimeoutError:
logging.error('Device did not finish booting. Will try to reboot.')
device.Reboot(timeout=reboot_timeout)
phase_func(device, options)
if reboot:
device.Reboot(False, retries=0)
device.adb.WaitForDevice()
try:
if options.reboot_timeout:
reboot_timeout = options.reboot_timeout
elif device.build_version_sdk >= version_codes.LOLLIPOP:
reboot_timeout = _DEFAULT_TIMEOUTS.LOLLIPOP
else:
reboot_timeout = _DEFAULT_TIMEOUTS.PRE_LOLLIPOP
if should_run_phase(_PHASES.WIPE):
if (options.chrome_specific_wipe or device.IsUserBuild() or
device.build_version_sdk >= version_codes.MARSHMALLOW):
run_phase(WipeChromeData, reboot_timeout)
else:
run_phase(WipeDevice, reboot_timeout)
if should_run_phase(_PHASES.PROPERTIES):
run_phase(SetProperties, reboot_timeout)
if should_run_phase(_PHASES.FINISH):
run_phase(FinishProvisioning, reboot_timeout, reboot=False)
if options.chrome_specific_wipe:
package = "com.google.android.gms"
version_name = device.GetApplicationVersion(package)
logging.info("Version name for %s is %s", package, version_name)
CheckExternalStorage(device)
except device_errors.CommandTimeoutError:
logging.exception('Timed out waiting for device %s. Adding to denylist.',
str(device))
if denylist:
denylist.Extend([str(device)], reason='provision_timeout')
except (device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logging.exception('Failed to provision device %s. Adding to denylist.',
str(device))
if denylist:
denylist.Extend([str(device)], reason='provision_failure')
def CheckExternalStorage(device):
"""Checks that storage is writable and if not makes it writable.
Arguments:
device: The device to check.
"""
try:
with device_temp_file.DeviceTempFile(
device.adb, suffix='.sh', dir=device.GetExternalStoragePath()) as f:
device.WriteFile(f.name, 'test')
except device_errors.CommandFailedError:
logging.info('External storage not writable. Remounting / as RW')
device.RunShellCommand(['mount', '-o', 'remount,rw', '/'],
check_return=True, as_root=True)
device.EnableRoot()
with device_temp_file.DeviceTempFile(
device.adb, suffix='.sh', dir=device.GetExternalStoragePath()) as f:
device.WriteFile(f.name, 'test')
def WipeChromeData(device, options):
"""Wipes chrome specific data from device
(1) uninstall any app whose name matches *chrom*, except
com.android.chrome, which is the chrome stable package. Doing so also
removes the corresponding dirs under /data/data/ and /data/app/
(2) remove any dir under /data/app-lib/ whose name matches *chrom*
(3) remove any files under /data/tombstones/ whose name matches "tombstone*"
(4) remove /data/local.prop if there is any
(5) remove /data/local/chrome-command-line if there is any
(6) remove anything under /data/local/.config/ if the dir exists
(this is telemetry related)
(7) remove anything under /data/local/tmp/
Arguments:
device: the device to wipe
"""
if options.skip_wipe:
return
try:
if device.IsUserBuild():
_UninstallIfMatch(device, _CHROME_PACKAGE_REGEX,
constants.PACKAGE_INFO['chrome_stable'].package)
device.RunShellCommand('rm -rf %s/*' % device.GetExternalStoragePath(),
check_return=True)
device.RunShellCommand('rm -rf /data/local/tmp/*', check_return=True)
else:
device.EnableRoot()
_UninstallIfMatch(device, _CHROME_PACKAGE_REGEX,
constants.PACKAGE_INFO['chrome_stable'].package)
_WipeUnderDirIfMatch(device, '/data/app-lib/', _CHROME_PACKAGE_REGEX)
_WipeUnderDirIfMatch(device, '/data/tombstones/', _TOMBSTONE_REGEX)
_WipeFileOrDir(device, '/data/local.prop')
_WipeFileOrDir(device, '/data/local/chrome-command-line')
_WipeFileOrDir(device, '/data/local/.config/')
_WipeFileOrDir(device, '/data/local/tmp/')
device.RunShellCommand('rm -rf %s/*' % device.GetExternalStoragePath(),
check_return=True)
except device_errors.CommandFailedError:
logging.exception('Possible failure while wiping the device. '
'Attempting to continue.')
def WipeDevice(device, options):
"""Wipes data from device, keeping only the adb_keys for authorization.
After wiping data on a device that has been authorized, adb can still
communicate with the device, but after reboot the device will need to be
re-authorized because the adb keys file is stored in /data/misc/adb/.
Thus, adb_keys file is rewritten so the device does not need to be
re-authorized.
Arguments:
device: the device to wipe
"""
if options.skip_wipe:
return
try:
device.EnableRoot()
device_authorized = device.FileExists(constants.ADB_KEYS_FILE)
if device_authorized:
adb_keys = device.ReadFile(constants.ADB_KEYS_FILE,
as_root=True).splitlines()
device.RunShellCommand(['wipe', 'data'],
as_root=True, check_return=True)
device.adb.WaitForDevice()
if device_authorized:
adb_keys_set = set(adb_keys)
for adb_key_file in options.adb_key_files or []:
try:
with open(adb_key_file, 'r') as f:
adb_public_keys = f.readlines()
adb_keys_set.update(adb_public_keys)
except IOError:
logging.warning('Unable to find adb keys file %s.', adb_key_file)
_WriteAdbKeysFile(device, '\n'.join(adb_keys_set))
except device_errors.CommandFailedError:
logging.exception('Possible failure while wiping the device. '
'Attempting to continue.')
def _WriteAdbKeysFile(device, adb_keys_string):
dir_path = posixpath.dirname(constants.ADB_KEYS_FILE)
device.RunShellCommand(['mkdir', '-p', dir_path],
as_root=True, check_return=True)
device.RunShellCommand(['restorecon', dir_path],
as_root=True, check_return=True)
device.WriteFile(constants.ADB_KEYS_FILE, adb_keys_string, as_root=True)
device.RunShellCommand(['restorecon', constants.ADB_KEYS_FILE],
as_root=True, check_return=True)
def SetProperties(device, options):
try:
device.EnableRoot()
except device_errors.CommandFailedError as e:
logging.warning(str(e))
if not device.IsUserBuild():
_ConfigureLocalProperties(device, options.enable_java_debug)
else:
logging.warning('Cannot configure properties in user builds.')
device_settings.ConfigureContentSettings(
device, device_settings.DETERMINISTIC_DEVICE_SETTINGS)
if options.disable_location:
device_settings.ConfigureContentSettings(
device, device_settings.DISABLE_LOCATION_SETTINGS)
else:
device_settings.ConfigureContentSettings(
device, device_settings.ENABLE_LOCATION_SETTINGS)
if options.disable_mock_location:
device_settings.ConfigureContentSettings(
device, device_settings.DISABLE_MOCK_LOCATION_SETTINGS)
else:
device_settings.ConfigureContentSettings(
device, device_settings.ENABLE_MOCK_LOCATION_SETTINGS)
device_settings.SetLockScreenSettings(device)
if options.disable_network:
device_settings.ConfigureContentSettings(
device, device_settings.NETWORK_DISABLED_SETTINGS)
if device.build_version_sdk >= version_codes.MARSHMALLOW:
# Ensure that NFC is also switched off.
device.RunShellCommand(['svc', 'nfc', 'disable'],
as_root=True, check_return=True)
if options.disable_system_chrome:
# The system chrome version on the device interferes with some tests.
device.RunShellCommand(['pm', 'disable', 'com.android.chrome'],
check_return=True)
if options.remove_system_webview:
if any(device.PathExists(p) for p in _SYSTEM_WEBVIEW_PATHS):
logging.info('System WebView exists and needs to be removed')
if device.HasRoot():
# Disabled Marshmallow's Verity security feature
if device.build_version_sdk >= version_codes.MARSHMALLOW:
device.adb.DisableVerity()
device.Reboot()
device.WaitUntilFullyBooted()
device.EnableRoot()
# This is required, e.g., to replace the system webview on a device.
device.adb.Remount()
device.RunShellCommand(['stop'], check_return=True)
device.RunShellCommand(['rm', '-rf'] + _SYSTEM_WEBVIEW_PATHS,
check_return=True)
device.RunShellCommand(['start'], check_return=True)
else:
logging.warning('Cannot remove system webview from a non-rooted device')
else:
logging.info('System WebView already removed')
# Some device types can momentarily disappear after setting properties.
device.adb.WaitForDevice()
def _ConfigureLocalProperties(device, java_debug=True):
"""Set standard readonly testing device properties prior to reboot."""
local_props = [
'persist.sys.usb.config=adb',
'ro.monkey=1',
'ro.test_harness=1',
'ro.audio.silent=1',
'ro.setupwizard.mode=DISABLED',
]
if java_debug:
local_props.append(
'%s=all' % device_utils.DeviceUtils.JAVA_ASSERT_PROPERTY)
local_props.append('debug.checkjni=1')
try:
device.WriteFile(
device.LOCAL_PROPERTIES_PATH,
'\n'.join(local_props), as_root=True)
# Android will not respect the local props file if it is world writable.
device.RunShellCommand(
['chmod', '644', device.LOCAL_PROPERTIES_PATH],
as_root=True, check_return=True)
except device_errors.CommandFailedError:
logging.exception('Failed to configure local properties.')
def FinishProvisioning(device, options):
# The lockscreen can't be disabled on user builds, so send a keyevent
# to unlock it.
if device.IsUserBuild():
device.SendKeyEvent(keyevent.KEYCODE_MENU)
if options.min_battery_level is not None:
battery = battery_utils.BatteryUtils(device)
try:
battery.ChargeDeviceToLevel(options.min_battery_level)
except device_errors.DeviceChargingError:
device.Reboot()
battery.ChargeDeviceToLevel(options.min_battery_level)
if options.max_battery_temp is not None:
try:
battery = battery_utils.BatteryUtils(device)
battery.LetBatteryCoolToTemperature(options.max_battery_temp)
except device_errors.CommandFailedError:
logging.exception('Unable to let battery cool to specified temperature.')
def _set_and_verify_date():
if device.build_version_sdk >= version_codes.MARSHMALLOW:
date_format = '%m%d%H%M%Y.%S'
set_date_command = ['date', '-u']
get_date_command = ['date', '-u']
else:
date_format = '%Y%m%d.%H%M%S'
set_date_command = ['date', '-s']
get_date_command = ['date']
# TODO(jbudorick): This is wrong on pre-M devices -- get/set are
# dealing in local time, but we're setting based on GMT.
strgmtime = time.strftime(date_format, time.gmtime())
set_date_command.append(strgmtime)
device.RunShellCommand(set_date_command, as_root=True, check_return=True)
get_date_command.append('+"%Y%m%d.%H%M%S"')
device_time = device.RunShellCommand(
get_date_command, as_root=True, single_line=True).replace('"', '')
device_time = datetime.datetime.strptime(device_time, "%Y%m%d.%H%M%S")
correct_time = datetime.datetime.strptime(strgmtime, date_format)
tdelta = (correct_time - device_time).seconds
if tdelta <= 1:
logging.info('Date/time successfully set on %s', device)
return True
else:
logging.error('Date mismatch. Device: %s Correct: %s',
device_time.isoformat(), correct_time.isoformat())
return False
# Sometimes the date is not set correctly on the devices. Retry on failure.
if device.IsUserBuild():
# TODO(bpastene): Figure out how to set the date & time on user builds.
pass
else:
if not timeout_retry.WaitFor(
_set_and_verify_date, wait_period=1, max_tries=2):
raise device_errors.CommandFailedError(
'Failed to set date & time.', device_serial=str(device))
props = device.RunShellCommand('getprop', check_return=True)
for prop in props:
logging.info(' %s', prop)
if options.auto_reconnect:
_PushAndLaunchAdbReboot(device, options.target)
def _UninstallIfMatch(device, pattern, app_to_keep):
installed_packages = device.RunShellCommand(['pm', 'list', 'packages'])
installed_system_packages = [
pkg.split(':')[1] for pkg in device.RunShellCommand(['pm', 'list',
'packages', '-s'])]
for package_output in installed_packages:
package = package_output.split(":")[1]
if pattern.match(package) and not package == app_to_keep:
if not device.IsUserBuild() or package not in installed_system_packages:
device.Uninstall(package)
def _WipeUnderDirIfMatch(device, path, pattern):
for filename in device.ListDirectory(path):
if pattern.match(filename):
_WipeFileOrDir(device, posixpath.join(path, filename))
def _WipeFileOrDir(device, path):
if device.PathExists(path):
device.RunShellCommand(['rm', '-rf', path], check_return=True)
def _PushAndLaunchAdbReboot(device, target):
"""Pushes and launches the adb_reboot binary on the device.
Arguments:
device: The DeviceUtils instance for the device to which the adb_reboot
binary should be pushed.
target: The build target (example, Debug or Release) which helps in
locating the adb_reboot binary.
"""
logging.info('Will push and launch adb_reboot on %s', str(device))
# Kill if adb_reboot is already running.
device.KillAll('adb_reboot', blocking=True, timeout=2, quiet=True)
# Push adb_reboot
logging.info(' Pushing adb_reboot ...')
adb_reboot = os.path.join(host_paths.DIR_SOURCE_ROOT,
'out/%s/adb_reboot' % target)
device.PushChangedFiles([(adb_reboot, '/data/local/tmp/')])
# Launch adb_reboot
logging.info(' Launching adb_reboot ...')
device.RunShellCommand(
['/data/local/tmp/adb_reboot'],
check_return=True)
def _LaunchHostHeartbeat():
# Kill if existing host_heartbeat
KillHostHeartbeat()
# Launch a new host_heartbeat
logging.info('Spawning host heartbeat...')
subprocess.Popen([os.path.join(host_paths.DIR_SOURCE_ROOT,
'build/android/host_heartbeat.py')])
def KillHostHeartbeat():
ps = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
stdout, _ = ps.communicate()
matches = re.findall('\\n.*host_heartbeat.*', stdout)
for match in matches:
logging.info('An instance of host heart beart running... will kill')
pid = re.findall(r'(\S+)', match)[1]
subprocess.call(['kill', str(pid)])
def main():
# Recommended options on perf bots:
# --disable-network
# TODO(tonyg): We eventually want network on. However, currently radios
# can cause perfbots to drain faster than they charge.
# --min-battery-level 95
# Some perf bots run benchmarks with USB charging disabled which leads
# to gradual draining of the battery. We must wait for a full charge
# before starting a run in order to keep the devices online.
parser = argparse.ArgumentParser(
description='Provision Android devices with settings required for bots.')
parser.add_argument('-d', '--device', metavar='SERIAL',
help='the serial number of the device to be provisioned'
' (the default is to provision all devices attached)')
parser.add_argument('--adb-path',
help='Absolute path to the adb binary to use.')
parser.add_argument('--denylist-file', help='Device denylist JSON file.')
parser.add_argument('--phase', action='append', choices=_PHASES.ALL,
dest='phases',
help='Phases of provisioning to run. '
'(If omitted, all phases will be run.)')
parser.add_argument('--skip-wipe', action='store_true', default=False,
help="don't wipe device data during provisioning")
parser.add_argument('--reboot-timeout', metavar='SECS', type=int,
help='when wiping the device, max number of seconds to'
' wait after each reboot '
'(default: %s)' % _DEFAULT_TIMEOUTS.HELP_TEXT)
parser.add_argument('--min-battery-level', type=int, metavar='NUM',
help='wait for the device to reach this minimum battery'
' level before trying to continue')
parser.add_argument('--disable-location', action='store_true',
help='disable Google location services on devices')
parser.add_argument('--disable-mock-location', action='store_true',
default=False, help='Set ALLOW_MOCK_LOCATION to false')
parser.add_argument('--disable-network', action='store_true',
help='disable network access on devices')
parser.add_argument('--disable-java-debug', action='store_false',
dest='enable_java_debug', default=True,
help='disable Java property asserts and JNI checking')
parser.add_argument('--disable-system-chrome', action='store_true',
help='Disable the system chrome from devices.')
parser.add_argument('--remove-system-webview', action='store_true',
help='Remove the system webview from devices.')
parser.add_argument('-t', '--target', default='Debug',
help='the build target (default: %(default)s)')
parser.add_argument('-r', '--auto-reconnect', action='store_true',
help='push binary which will reboot the device on adb'
' disconnections')
parser.add_argument('--adb-key-files', type=str, nargs='+',
help='list of adb keys to push to device')
parser.add_argument('-v', '--verbose', action='count', default=1,
help='Log more information.')
parser.add_argument('--max-battery-temp', type=int, metavar='NUM',
help='Wait for the battery to have this temp or lower.')
parser.add_argument('--output-device-denylist',
help='Json file to output the device denylist.')
parser.add_argument('--chrome-specific-wipe', action='store_true',
help='only wipe chrome specific data during provisioning')
parser.add_argument('--emulators', action='store_true',
help='provision only emulators and ignore usb devices')
args = parser.parse_args()
constants.SetBuildType(args.target)
run_tests_helper.SetLogLevel(args.verbose)
devil_chromium.Initialize(adb_path=args.adb_path)
try:
return ProvisionDevices(args)
except (device_errors.DeviceUnreachableError, device_errors.NoDevicesError):
logging.exception('Unable to provision local devices.')
return exit_codes.INFRA
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright (C) 2011 - Soren Hansen
# Copyright (C) 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import calendar
import datetime
import getpass
import logging
import optparse
import sys
from reviewstats import utils
def sec_to_period_string(seconds):
days = seconds / (3600 * 24)
hours = (seconds / 3600) - (days * 24)
minutes = (seconds / 60) - (days * 24 * 60) - (hours * 60)
return '%d days, %d hours, %d minutes' % (days, hours, minutes)
def average_age(changes, key='age'):
if not changes:
return 0
total_seconds = 0
for change in changes:
total_seconds += change[key]
avg_age = total_seconds / len(changes)
return sec_to_period_string(avg_age)
def quartile_age(changes, quartile=2, key='age'):
"""Quartile age
quartile 1: 25%
quartile 2: 50% (median) default
quartile 3: 75%
"""
if not changes:
return 0
changes = sorted(changes, key=lambda change: change[key])
quartile_age = changes[len(changes) * quartile / 4][key]
return sec_to_period_string(quartile_age)
def number_waiting_more_than(changes, seconds, key='age'):
index = 0
for change in changes:
if change[key] < seconds:
return index
index += 1
return len(changes)
def format_url(url, options):
return '%s%s%s' % ('<a href="' if options.html else '',
url,
('">%s</a>' % url) if options.html else '')
def gen_stats(projects, waiting_on_reviewer, waiting_on_submitter, options):
age_sorted = sorted(waiting_on_reviewer,
key=lambda change: change['age'], reverse=True)
age2_sorted = sorted(waiting_on_reviewer,
key=lambda change: change['age2'], reverse=True)
age3_sorted = sorted(waiting_on_reviewer,
key=lambda change: change['age3'], reverse=True)
result = []
result.append(('Projects', '%s' % [project['name']
for project in projects]))
stats = []
stats.append(('Total Open Reviews', '%d'
% (len(waiting_on_reviewer) + len(waiting_on_submitter))))
stats.append(('Waiting on Submitter', '%d' % len(waiting_on_submitter)))
stats.append(('Waiting on Reviewer', '%d' % len(waiting_on_reviewer)))
latest_rev_stats = []
latest_rev_stats.append(('Average wait time', '%s'
% (average_age(waiting_on_reviewer))))
latest_rev_stats.append(('1st quartile wait time', '%s'
% (quartile_age(waiting_on_reviewer,
quartile=1))))
latest_rev_stats.append(('Median wait time', '%s'
% (quartile_age(waiting_on_reviewer))))
latest_rev_stats.append(('3rd quartile wait time', '%s'
% (quartile_age(waiting_on_reviewer,
quartile=3))))
latest_rev_stats.append((
'Number waiting more than %i days' % options.waiting_more,
'%i' % (number_waiting_more_than(
age_sorted, 60 * 60 * 24 * options.waiting_more))))
stats.append(('Stats since the latest revision', latest_rev_stats))
last_without_nack_stats = []
last_without_nack_stats.append(('Average wait time', '%s'
% (average_age(waiting_on_reviewer,
key='age3'))))
last_without_nack_stats.append(('1st quartile wait time', '%s'
% (quartile_age(waiting_on_reviewer,
quartile=1,
key='age3'))))
last_without_nack_stats.append(('Median wait time', '%s'
% (quartile_age(waiting_on_reviewer,
key='age3'))))
last_without_nack_stats.append(('3rd quartile wait time', '%s'
% (quartile_age(waiting_on_reviewer,
quartile=3,
key='age3'))))
stats.append(('Stats since the last revision without -1 or -2 ',
last_without_nack_stats))
first_rev_stats = []
first_rev_stats.append(('Average wait time', '%s'
% (average_age(waiting_on_reviewer, key='age2'))))
first_rev_stats.append(('1st quartile wait time', '%s'
% (quartile_age(waiting_on_reviewer, quartile=1,
key='age2'))))
first_rev_stats.append(('Median wait time', '%s'
% (quartile_age(waiting_on_reviewer, key='age2'))))
first_rev_stats.append(('3rd quartile wait time', '%s'
% (quartile_age(waiting_on_reviewer, quartile=3,
key='age2'))))
stats.append(('Stats since the first revision (total age)',
first_rev_stats))
changes = []
for change in age_sorted[:options.longest_waiting]:
changes.append('%s %s (%s)' % (sec_to_period_string(change['age']),
format_url(change['url'], options),
change['subject']))
stats.append(('Longest waiting reviews (based on latest revision)',
changes))
changes = []
for change in age3_sorted[:options.longest_waiting]:
changes.append('%s %s (%s)' % (sec_to_period_string(change['age3']),
format_url(change['url'], options),
change['subject']))
stats.append(('Longest waiting reviews (based on oldest rev without -1 or'
' -2)', changes))
changes = []
for change in age2_sorted[:options.longest_waiting]:
changes.append('%s %s (%s)' % (sec_to_period_string(change['age2']),
format_url(change['url'], options),
change['subject']))
stats.append(('Oldest reviews (time since first revision)',
changes))
result.append(stats)
return result
def print_stats_txt(stats, f=sys.stdout):
def print_list_txt(l, level):
for item in l:
if not isinstance(item, list):
f.write('%s> ' % ('--' * level))
print_item_txt(item, level)
def print_item_txt(item, level):
if isinstance(item, str):
f.write('%s\n' % item.encode('utf-8'))
elif isinstance(item, list):
print_list_txt(item, level + 1)
elif isinstance(item, tuple):
f.write('%s: ' % item[0])
if isinstance(item[1], list):
f.write('\n')
print_item_txt(item[1], level)
else:
raise Exception('Unhandled type')
print_list_txt(stats, 0)
def print_stats_html(stats, f=sys.stdout):
def print_list_html(l, level):
if level:
f.write('<%s>\n' % ('ul' if level == 1 else 'ol'))
for item in l:
if level:
f.write('%s<li>' % (' ' * level))
print_item_html(item, level)
if level:
f.write('</li>\n')
if level:
f.write('</%s>\n' % ('ul' if level == 1 else 'ol'))
def print_item_html(item, level):
if isinstance(item, str):
f.write('%s' % item.encode('utf-8'))
elif isinstance(item, list):
print_list_html(item, level + 1)
elif isinstance(item, tuple):
f.write('%s: ' % item[0])
if isinstance(item[1], list):
f.write('\n')
print_item_html(item[1], level)
else:
raise Exception('Unhandled type')
f.write('<html>\n')
f.write('<head><title>Open Reviews for %s</title></head>\n' % stats[0][1])
print_list_html(stats, 0)
f.write('</html>\n')
def find_oldest_no_nack(change):
last_patch = None
for patch in reversed(change['patchSets']):
nacked = False
for review in patch.get('approvals', []):
if review['value'] in ('-1', '-2'):
nacked = True
break
if nacked:
break
last_patch = patch
return last_patch
def main(argv=None):
if argv is None:
argv = sys.argv
optparser = optparse.OptionParser()
optparser.add_option(
'-p', '--project', default='projects/nova.json',
help='JSON file describing the project to generate stats for')
optparser.add_option(
'-a', '--all', action='store_true',
help='Generate stats across all known projects (*.json)')
optparser.add_option(
'-u', '--user', default=getpass.getuser(), help='gerrit user')
optparser.add_option(
'-k', '--key', default=None, help='ssh key for gerrit')
optparser.add_option(
'-s', '--stable', action='store_true',
help='Include stable branch commits')
optparser.add_option(
'-l', '--longest-waiting', type='int', default=5,
help='Show n changesets that have waited the longest)')
optparser.add_option(
'-m', '--waiting-more', type='int', default=7,
help='Show number of changesets that have waited more than n days)')
optparser.add_option(
'-H', '--html', action='store_true',
help='Use HTML output instead of plain text')
optparser.add_option(
'--server', default='review.opendev.org',
help='Gerrit server to connect to')
optparser.add_option(
'--debug', action='store_true', help='Show extra debug output')
optparser.add_option(
'--projects-dir', default='./projects',
help='Directory where to locate the project files')
optparser.add_option(
'--output', '-o', default='-',
help="Where to write output. - for stdout. The file will be appended"
" if it exists.")
options, args = optparser.parse_args()
logging.basicConfig(level=logging.ERROR)
if options.debug:
logging.root.setLevel(logging.DEBUG)
projects = utils.get_projects_info(options.project, options.all,
base_dir=options.projects_dir)
if not projects:
print("Please specify a project.")
sys.exit(1)
changes = utils.get_changes(projects, options.user, options.key,
only_open=True, server=options.server)
waiting_on_submitter = []
waiting_on_reviewer = []
now = datetime.datetime.utcnow()
now_ts = calendar.timegm(now.timetuple())
for change in changes:
if 'rowCount' in change:
continue
if not options.stable and 'stable' in change['branch']:
continue
if utils.is_workinprogress(change):
# Filter out WORKINPROGRESS
continue
latest_patch = change['patchSets'][-1]
if utils.patch_set_approved(latest_patch):
# Ignore patches already approved and just waiting to merge
continue
waiting_for_review = True
approvals = latest_patch.get('approvals', [])
approvals.sort(key=lambda a: a['grantedOn'])
for review in approvals:
if review['type'] not in ('CRVW', 'VRIF',
'Code-Review', 'Verified'):
continue
if review['value'] in ('-1', '-2'):
waiting_for_review = False
break
change['age'] = utils.get_age_of_patch(latest_patch, now_ts)
change['age2'] = utils.get_age_of_patch(change['patchSets'][0], now_ts)
patch = find_oldest_no_nack(change)
change['age3'] = utils.get_age_of_patch(patch, now_ts) if patch else 0
if waiting_for_review:
waiting_on_reviewer.append(change)
else:
waiting_on_submitter.append(change)
stats = gen_stats(projects, waiting_on_reviewer, waiting_on_submitter,
options)
if options.output == '-':
output = sys.stdout
else:
output = open(options.output, 'at')
try:
if options.html:
print_stats_html(stats, f=output)
else:
print_stats_txt(stats, f=output)
finally:
if output is not sys.stdout:
output.close()
|
|
"""
Convenience functions for creating a context and evaluating nodes
for a range of dates and collecting the results.
"""
from .context import MDFContext, NodeOrBuilderTimer, _profiling_is_enabled
from .nodes import MDFNode
from datetime import datetime
import numpy as np
import pandas as pa
import logging
import inspect
import sys
import atexit
import time
import multiprocessing.util
from multiprocessing import Process, Pipe
from matplotlib import cm
import matplotlib.pyplot as pp
from .builders import CSVWriter, DataFrameBuilder, FinalValueCollector
try:
import win32gui
except ImportError:
win32gui = None
_logger = logging.getLogger(__name__)
def _create_context(date, values={}, ctx=None, **kwargs):
if ctx is None:
ctx = MDFContext(date)
ctx.set_date(date)
for node, value in values.items():
ctx.set_value(node, value)
for key, value in kwargs.items():
ctx.set_value(key, value)
return ctx
def _localize(dt, tzinfo):
"""
:return: A localized datetime
:paramd dt: Naive datetime
:param tzinfo: A pytz or python time zone object
Attempts to localize the datetime using pytz if possible. Falls back to the builtin
timezone handling otherwise.
"""
try:
return tzinfo.localize(dt)
except AttributeError:
return dt.replace(tzinfo=tzinfo)
def run(date_range,
callbacks=[],
values={},
shifts=None,
filter=None,
ctx=None,
num_processes=0,
tzinfo=None,
**kwargs):
"""
creates a context and iterates through the dates in the
date range updating the context and calling the callbacks
for each date.
If the context needs some initial values set they can be
passed in the values dict or as kwargs.
For running the same calculation but with different inputs
shifts can be set to a list of dictionaries of (node -> value)
shifts.
If shifts is not None and num_processes is greater than 0 then that many
child processes will be spawned and the shifts will be processed in parallel.
Any time-dependent nodes are reset before starting by setting the context's
date to datetime.min (after applying time zone information if available).
"""
unshifted_ctx = _create_context(date_range[0], values, ctx, **kwargs)
contexts = [unshifted_ctx]
callbacks_per_ctx = {}
generators_per_ctx = {}
profiling_enabled = _profiling_is_enabled()
# The time to use for resetting time dependent nodes.
# Note: strftime() methods requires year >= 1900
adj_datetime_min = datetime(1900, 1, 1)
# Attempt to guess the tzinfo from the date range if one isn't specified explicitly
if tzinfo is None:
if isinstance(date_range, pa.DatetimeIndex):
# pa.DatetimeIndex has a tzinfo attribute
tzinfo = date_range.tzinfo
elif isinstance(date_range, (list, tuple)):
# In a list of dates, look at the first item
tzinfo = date_range[0].tzinfo
# ensure that any time-dependent nodes are reset before running through
# the date range by setting the current date on the context to the default.
unshifted_ctx.set_date(adj_datetime_min if tzinfo is None else _localize(adj_datetime_min, tzinfo))
if shifts:
if num_processes > 0:
return _run_multiprocess(date_range, callbacks, shifts, filter, num_processes, unshifted_ctx)
# get each shift set as a sorted list so when the shifts are
# applied they're always done in the same order
shift_sets = [sorted(x.items()) for x in shifts]
contexts = []
for shift_set in shift_sets:
# create the shifted context and add it to the list
shifted_ctx = unshifted_ctx.shift(shift_set)
contexts.append(shifted_ctx)
for ctx in contexts:
callbacks_per_ctx[ctx.get_id()] = list(callbacks)
for date in date_range:
unshifted_ctx.set_date(date)
for ctx in contexts:
# skip dates where the filter doesn't return True
if filter is not None:
if not ctx.get_value(filter):
_logger.debug("Skipping %s" % date)
continue
_logger.debug("Processing %s %s" % (date, ctx))
ctx_id = ctx.get_id()
# advance the generators
generators = generators_per_ctx.setdefault(ctx_id, [])
for callback, generator in generators:
with ctx._profile_builder(callback):
generator.send(date)
# call the callbacks
found_generator = False
callbacks = callbacks_per_ctx[ctx_id]
for i, callback in enumerate(callbacks):
with ctx._profile_builder(callback):
result = callback(date, ctx)
# if the result is a generator remove this callback from
# the list of callbacks and add the generator to be advanced
# next time
if inspect.isgenerator(result):
generator = result
callbacks[i] = None
generators.append((callback, generator))
found_generator = True
# advance to the first yield statement
generator.next()
# if any of the callbacks are actually generators remove them
if found_generator:
callbacks_per_ctx[ctx_id] = [x for x in callbacks if x is not None]
if shifts:
return contexts
return unshifted_ctx
def _start_remote_server(argv, pipe):
"""
function for use with multiprocessing.Process object for creating
a Pyro server
"""
from .remote import start_server
start_server(pipe=pipe)
def _run_multiprocess(date_range, callbacks, shifts, filter, num_processes, unshifted_ctx):
"""
process each context in a pool of processes - called from run
"""
from .remote import Pyro4, SerializedContext, get_daemon, messaging
import select
for callback in callbacks:
if not hasattr(callback, "combine_result"):
raise Exception("All callback objects must have a 'combine_result' method")
num_shifts = len(shifts)
num_processes = min(num_processes, num_shifts)
# batch the shifts into a set per-process
i = 0
shifts_per_process = {}
for shift_set in shifts:
shifts_per_process.setdefault(i, []).append(shift_set)
i = (i + 1) % num_processes
def start_proc_thread_func():
# start a child process running a pyro server and get the uri
parent_conn, child_conn = Pipe()
process = Process(target=_start_remote_server, args=(sys.argv, child_conn))
process.daemon = True
process.start()
timeout = time.clock() + 60
while process.is_alive() and time.clock() < timeout:
if parent_conn.poll(1):
break
else:
raise Exception("failed to start sub-process")
uri = parent_conn.recv()
server = Pyro4.Proxy(uri)
server._pyroOneway.add("shutdown")
return process, server
# multiprocessing expects sys.executable to take --multiprocessing-fork option if
# frozen if True, but for us it's always python.exe even if frozen is set
sys_frozen = getattr(sys, "frozen", False)
sys.frozen = False
try:
# create the pool of processes and pyro servers
promises = [Pyro4.Future(start_proc_thread_func)() for _ in range(num_processes)]
processes = map(lambda promise: promise.value, promises)
finally:
sys.frozen = sys_frozen
try:
# serialize the context once as it could be quite large
serialized_context = SerializedContext(unshifted_ctx)
# start running in each process
batches = []
for i, (process, remote_api) in enumerate(processes):
shift_set = shifts_per_process[i]
batch = Pyro4.batch(remote_api)
batch.run(date_range,
callbacks=callbacks,
shifts=shift_set,
filter=filter,
ctx=serialized_context)
batches.append(batch)
# kick off all the asynchronous runs
future_results = []
for batch in batches:
future_results.append(batch(async=True))
# poll the daemon for this process in case anything is using it while
# waiting for the results
while True:
for batch in future_results:
if not batch.ready:
break
else:
break
# poll the daemon for this process while we wait for results
daemon = get_daemon()
read_sockets, unused, unused = select.select(daemon.sockets, [], [], 0.02)
if read_sockets:
daemon.events(read_sockets)
# poll the remote message loop
messaging.poll_messages()
# everything's done, get the results
try:
results = [x.value.next() for x in future_results]
except:
_logger.error("".join(Pyro4.util.getPyroTraceback()))
raise
# re-order the results into the order as the original shifts
ordered_results = []
for i, shift_set in enumerate(shifts):
proc_index = i % num_processes
remote_ctxs, remote_cbs = results[proc_index]
ordered_results.append((remote_ctxs.pop(0), remote_cbs, shift_set))
# combine the results into the local callback objects
shifted_ctxs = []
for remote_ctx, remote_callbacks, shift_set in ordered_results:
local_ctx = unshifted_ctx.shift(shift_set)
shifted_ctxs.append(local_ctx)
with remote_ctx:
for local_cb, remote_cb in zip(callbacks, remote_callbacks):
local_cb.combine_result(remote_cb, remote_ctx, local_ctx)
return shifted_ctxs
finally:
# shutdown the child processes
for process, remote_api in processes:
with remote_api:
remote_api.shutdown()
@atexit.register
def _multprocessing_exit():
"""
The atexit function registered by multiprocessing.util
is a bit buggy so this one's used instead
"""
multiprocessing.util._exiting = True
multiprocessing.util._run_finalizers(0)
for p in multiprocessing.util.active_children():
if p._daemonic:
p._popen.terminate()
for p in multiprocessing.util.active_children():
p.join()
multiprocessing.util._run_finalizers()
def to_csv(fh, date_range, nodes, columns=None, values={}, filter=None, ctx=None, tzinfo=None, **kwargs):
"""
evaluates a list of nodes for each date in date_range
and writes the results to a csv file.
"""
writer = CSVWriter(fh, nodes, columns)
return run(date_range, [writer], values=values, filter=filter, ctx=ctx, tzinfo=tzinfo, **kwargs)
def build_dataframe(date_range, nodes, values={}, filter=None, ctx=None, tzinfo=None, **kwargs):
"""
evaluates a list of nodes for each date in date_range
and returns a dataframe of results
"""
builder = DataFrameBuilder(nodes)
run(date_range, [builder], values=values, filter=filter, ctx=ctx, tzinfo=tzinfo, **kwargs)
return builder.dataframe
def plot(date_range, nodes, values={}, filter=None, ctx=None, tzinfo=None, show=True, plot_args={}, **kwargs):
"""
evaluates a list of nodes for each date in date_range
and plots the results using matplotlib.
"""
builder = DataFrameBuilder(nodes)
run(date_range, [builder], values=values, filter=filter, ctx=ctx, tzinfo=tzinfo, **kwargs)
builder.plot(show=show, **plot_args)
def get_final_values(date_range, nodes, values={}, filter=None, ctx=None, tzinfo=None, **kwargs):
"""
evaluates a list of nodes for each date in date_range and
returns a list of final values in the same order as nodes.
"""
return_as_list = True
if isinstance(nodes, MDFNode):
nodes = [nodes]
return_as_list = False
collector = FinalValueCollector(nodes)
ctx = run(date_range, [collector], values=values, filter=filter, ctx=ctx, tzinfo=tzinfo, **kwargs)
values = collector.get_values(ctx)
if return_as_list:
return values
return values[0]
def scenario(date_range,
result_node,
x_node, x_shifts,
y_node, y_shifts,
values={}, filter=None, ctx=None, dtype=float, tzinfo=None, **kwargs):
"""
evaluates a single result_node for each date in date_range and gets
its final value for each shift in x_shifts and y_shifts.
x_shifts and y_shifts are values for x_node and y_node respectively.
result_node should evaluate to a single float, and the result is a 2d nparray
"""
collector = FinalValueCollector([result_node])
# build a list of shifts from the x and y shifts
shifts = []
for y_value in y_shifts:
for x_value in x_shifts:
shifts.append({
x_node: x_value,
y_node: y_value
})
# collect the values for result_node for all the shifts
contexts = run(date_range,
[collector],
shifts=shifts,
values=values,
filter=filter,
ctx=ctx,
tzinfo=tzinfo,
**kwargs)
# build a numpy array of the results
array = np.ndarray(shape=(len(y_shifts), len(x_shifts)), dtype=dtype)
ctx_iter = iter(contexts)
for y in range(len(y_shifts)):
for x in range(len(x_shifts)):
ctx = ctx_iter.next()
array[y][x] = collector.get_values(ctx)[0]
return array
def plot_surface(date_range,
result_node,
x_node, x_shifts,
y_node, y_shifts,
values={}, filter=None, ctx=None, dtype=float, tzinfo=None, **kwargs):
"""
evaluates a single result_node for each date in date_range and gets
its final value for each shift in x_shifts and y_shifts.
x_shifts and y_shifts are values for x_node and y_node respectively.
result_node should evaluate to a single float.
The results are plotted as a 3d graph and returned as a 2d numpy array.
"""
results = scenario(date_range,
result_node,
x_node, x_shifts,
y_node, y_shifts,
values=values,
filter=filter,
ctx=ctx,
tzinfo=tzinfo,
dtype=dtype,
**kwargs)
try:
X, Y = np.meshgrid(x_shifts, y_shifts)
except ValueError:
X, Y = np.meshgrid(range(len(x_shifts)), range(len(y_shifts)))
fig = pp.figure()
ax = fig.add_subplot(111, projection="3d")
ax.plot_surface(X, Y, results, rstride=1, cstride=1, cmap=cm.jet)
ax.set_xlabel(x_node.name)
ax.set_ylabel(y_node.name)
ax.set_zlabel(result_node.name)
pp.show()
return results
def heatmap(date_range,
result_node,
x_node, x_shifts,
y_node, y_shifts,
values={}, filter=None, ctx=None, dtype=float, tzinfo=None, **kwargs):
"""
evaluates a single result_node for each date in date_range and gets
its final value for each shift in x_shifts and y_shifts.
x_shifts and y_shifts are values for x_node and y_node respectively.
result_node should evaluate to a single float.
The results are plotted as a heat map and returned as a 2d numpy array.
"""
results = scenario(date_range,
result_node,
x_node, x_shifts,
y_node, y_shifts,
values=values,
filter=filter,
ctx=ctx,
tzinfo=tzinfo,
dtype=dtype,
**kwargs)
try:
X, Y = map(float, x_shifts), map(float, y_shifts)
except ValueError:
X, Y = range(len(x_shifts)), range(len(y_shifts))
pp.figure()
pp.xlabel(x_node.name)
pp.xticks(range(len(X)), X)
pp.ylabel(y_node.name)
pp.yticks(range(len(Y)), Y)
pp.imshow(results, interpolation="bicubic")
pp.grid(True)
pp.show()
return results
|
|
#!/usr/bin/env python
"""A keyword index of client machines.
An index of client machines, associating likely identifiers to client IDs.
"""
import functools
import operator
from typing import Mapping, Iterable, Sequence
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.util import precondition
from grr_response_server import data_store
from grr_response_server.rdfvalues import objects as rdf_objects
def GetClientIDsForHostnames(
hostnames: Iterable[str]) -> Mapping[str, Sequence[str]]:
"""Gets all client_ids for a given list of hostnames or FQDNS.
Args:
hostnames: A list of hostnames / FQDNs.
Returns:
A dict with a list of all known GRR client_ids for each hostname.
"""
index = ClientIndex()
keywords = set()
for hostname in hostnames:
if hostname.startswith("host:"):
keywords.add(hostname)
else:
keywords.add("host:%s" % hostname)
results = index.ReadClientPostingLists(keywords)
result = {}
for keyword, hits in results.items():
result[keyword[len("host:"):]] = hits
return result
class ClientIndex(object):
"""An index of client machines."""
START_TIME_PREFIX = "start_date:"
START_TIME_PREFIX_LEN = len(START_TIME_PREFIX)
def _NormalizeKeyword(self, keyword):
return str(keyword).lower()
def _AnalyzeKeywords(self, keywords):
"""Extracts a start time from a list of keywords if present."""
start_time = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From(
180, rdfvalue.DAYS)
filtered_keywords = []
for k in keywords:
if k.startswith(self.START_TIME_PREFIX):
try:
start_time = rdfvalue.RDFDatetime.FromHumanReadable(
k[self.START_TIME_PREFIX_LEN:])
except ValueError:
pass
else:
filtered_keywords.append(k)
if not filtered_keywords:
filtered_keywords.append(".")
return start_time, filtered_keywords
def LookupClients(self, keywords: Iterable[str]) -> Sequence[str]:
"""Returns a list of client URNs associated with keywords.
Args:
keywords: The list of keywords to search by.
Returns:
A list of client URNs.
Raises:
ValueError: A string (single keyword) was passed instead of an iterable.
"""
if isinstance(keywords, str):
raise ValueError(
"Keywords should be an iterable, not a string (got %s)." % keywords)
start_time, filtered_keywords = self._AnalyzeKeywords(keywords)
keyword_map = data_store.REL_DB.ListClientsForKeywords(
list(map(self._NormalizeKeyword, filtered_keywords)),
start_time=start_time)
relevant_set = functools.reduce(operator.and_, map(set,
keyword_map.values()))
return sorted(relevant_set)
def ReadClientPostingLists(
self, keywords: Iterable[str]) -> Mapping[str, Sequence[str]]:
"""Looks up all clients associated with any of the given keywords.
Args:
keywords: A list of keywords we are interested in.
Returns:
A dict mapping each keyword to a list of matching clients.
"""
start_time, filtered_keywords = self._AnalyzeKeywords(keywords)
return data_store.REL_DB.ListClientsForKeywords(
filtered_keywords, start_time=start_time)
def AnalyzeClient(self, client: rdf_objects.ClientSnapshot) -> Sequence[str]:
"""Finds the client_id and keywords for a client.
Args:
client: A ClientSnapshot object record to find keywords for.
Returns:
A Sequence of keywords related to client.
"""
# Start with a universal keyword, used to find all clients.
#
# TODO(user): Remove the universal keyword once we have a better way
# to do this, i.e., once we have a storage library which can list all
# clients directly.
keywords = set(["."])
def TryAppend(prefix, keyword):
precondition.AssertType(prefix, str)
precondition.AssertType(keyword, str)
if keyword:
keyword_string = self._NormalizeKeyword(keyword)
keywords.add(keyword_string)
if prefix:
keywords.add(prefix + ":" + keyword_string)
def TryAppendPrefixes(prefix, keyword, delimiter):
TryAppend(prefix, keyword)
segments = keyword.split(delimiter)
for i in range(1, len(segments)):
TryAppend(prefix, delimiter.join(segments[0:i]))
return len(segments)
def TryAppendIP(ip):
TryAppend("ip", ip)
# IP4v?
if TryAppendPrefixes("ip", str(ip), ".") == 4:
return
# IP6v?
TryAppendPrefixes("ip", str(ip), ":")
def TryAppendMac(mac):
TryAppend("mac", mac)
if len(mac) == 12:
# If looks like a mac address without ":" symbols, also add the keyword
# with them.
TryAppend("mac", ":".join([mac[i:i + 2] for i in range(0, 12, 2)]))
TryAppend("host", client.knowledge_base.fqdn)
host = client.knowledge_base.fqdn.split(".", 1)[0]
TryAppendPrefixes("host", host, "-")
TryAppendPrefixes("host", client.knowledge_base.fqdn, ".")
TryAppend("", client.knowledge_base.os)
TryAppend("", client.Uname())
TryAppend("", client.os_release)
TryAppend("", client.os_version)
TryAppend("", client.kernel)
TryAppend("", client.arch)
TryAppend("serial_number", client.hardware_info.serial_number)
TryAppend("system_uuid", client.hardware_info.system_uuid)
kb = client.knowledge_base
if kb:
for user in kb.users:
TryAppend("user", user.username)
TryAppend("", user.full_name)
if user.full_name:
for name in user.full_name.split():
# full_name often includes nicknames and similar, wrapped in
# punctuation, e.g. "Thomas 'TJ' Jones". We remove the most common
# wrapping characters.
TryAppend("", name.strip("\"'()"))
for ip in client.GetIPAddresses():
TryAppendIP(ip)
for mac in client.GetMacAddresses():
TryAppendMac(mac)
client_info = client.startup_info.client_info
if client_info:
TryAppend("client", client_info.client_name)
TryAppend("client", str(client_info.client_version))
if client_info.labels:
for label in client_info.labels:
TryAppend("label", label)
return keywords
def AddClient(self, client: rdf_objects.ClientSnapshot):
"""Adds a client to the index.
Args:
client: A ClientSnapshot object record.
"""
keywords = self.AnalyzeClient(client)
keywords.add(self._NormalizeKeyword(client.client_id))
data_store.REL_DB.AddClientKeywords(client.client_id, keywords)
def AddClientLabels(self, client_id: str, labels: Iterable[str]):
precondition.AssertIterableType(labels, str)
keywords = set()
for label in labels:
keyword_string = self._NormalizeKeyword(label)
keywords.add(keyword_string)
keywords.add("label:" + keyword_string)
data_store.REL_DB.AddClientKeywords(client_id, keywords)
def RemoveAllClientLabels(self, client_id: str):
"""Removes all labels for a given client.
Args:
client_id: The client_id.
"""
labels_to_remove = set(
[l.name for l in data_store.REL_DB.ReadClientLabels(client_id)])
self.RemoveClientLabels(client_id, labels_to_remove)
def RemoveClientLabels(self, client_id: str, labels: Iterable[str]):
"""Removes all labels for a given client.
Args:
client_id: The client_id.
labels: A list of labels to remove.
"""
for label in labels:
keyword = self._NormalizeKeyword(label)
# This might actually delete a keyword with the same name as the label (if
# there is one).
data_store.REL_DB.RemoveClientKeyword(client_id, keyword)
data_store.REL_DB.RemoveClientKeyword(client_id, "label:%s" % keyword)
|
|
#!/usr/bin/env python
"""Command-line interface to gen_client."""
import contextlib
import json
import logging
import os
import pkgutil
import sys
from google.apputils import appcommands
import gflags as flags
from apitools.base.py import exceptions
from apitools.gen import gen_client_lib
from apitools.gen import util
flags.DEFINE_string(
'infile', '',
'Filename for the discovery document. Mutually exclusive with '
'--discovery_url.')
flags.DEFINE_string(
'discovery_url', '',
'URL (or "name.version") of the discovery document to use. '
'Mutually exclusive with --infile.')
flags.DEFINE_string(
'base_package',
'apitools.base.py',
'Base package path of apitools (defaults to '
'apitools.base.py)'
)
flags.DEFINE_string(
'outdir', '',
'Directory name for output files. (Defaults to the API name.)')
flags.DEFINE_boolean(
'overwrite', False,
'Only overwrite the output directory if this flag is specified.')
flags.DEFINE_string(
'root_package', '',
'Python import path for where these modules should be imported from.')
flags.DEFINE_multistring(
'strip_prefix', [],
'Prefix to strip from type names in the discovery document. (May '
'be specified multiple times.)')
flags.DEFINE_string(
'api_key', None,
'API key to use for API access.')
flags.DEFINE_string(
'client_json', None,
'Use the given file downloaded from the dev. console for client_id '
'and client_secret.')
flags.DEFINE_string(
'client_id', '1042881264118.apps.googleusercontent.com',
'Client ID to use for the generated client.')
flags.DEFINE_string(
'client_secret', 'x_Tw5K8nnjoRAqULM9PFAC2b',
'Client secret for the generated client.')
flags.DEFINE_multistring(
'scope', [],
'Scopes to request in the generated client. May be specified more than '
'once.')
flags.DEFINE_string(
'user_agent', '',
'User agent for the generated client. Defaults to <api>-generated/0.1.')
flags.DEFINE_boolean(
'generate_cli', True, 'If True, a CLI is also generated.')
flags.DEFINE_list(
'unelidable_request_methods', [],
'Full method IDs of methods for which we should NOT try to elide '
'the request type. (Should be a comma-separated list.)')
flags.DEFINE_boolean(
'experimental_capitalize_enums', False,
'Dangerous: attempt to rewrite enum values to be uppercase.')
flags.DEFINE_enum(
'experimental_name_convention', util.Names.DEFAULT_NAME_CONVENTION,
util.Names.NAME_CONVENTIONS,
'Dangerous: use a particular style for generated names.')
flags.DEFINE_boolean(
'experimental_proto2_output', False,
'Dangerous: also output a proto2 message file.')
FLAGS = flags.FLAGS
flags.RegisterValidator(
'infile', lambda i: not (i and FLAGS.discovery_url),
'Cannot specify both --infile and --discovery_url')
flags.RegisterValidator(
'discovery_url', lambda i: not (i and FLAGS.infile),
'Cannot specify both --infile and --discovery_url')
def _CopyLocalFile(filename):
with contextlib.closing(open(filename, 'w')) as out:
src_data = pkgutil.get_data(
'apitools.base.py', filename)
if src_data is None:
raise exceptions.GeneratedClientError(
'Could not find file %s' % filename)
out.write(src_data)
_DISCOVERY_DOC = None
def _GetDiscoveryDocFromFlags():
"""Get the discovery doc from flags."""
global _DISCOVERY_DOC # pylint: disable=global-statement
if _DISCOVERY_DOC is None:
if FLAGS.discovery_url:
try:
discovery_doc = util.FetchDiscoveryDoc(FLAGS.discovery_url)
except exceptions.CommunicationError:
raise exceptions.GeneratedClientError(
'Could not fetch discovery doc')
else:
infile = os.path.expanduser(FLAGS.infile) or '/dev/stdin'
discovery_doc = json.load(open(infile))
_DISCOVERY_DOC = discovery_doc
return _DISCOVERY_DOC
def _GetCodegenFromFlags():
"""Create a codegen object from flags."""
discovery_doc = _GetDiscoveryDocFromFlags()
names = util.Names(
FLAGS.strip_prefix,
FLAGS.experimental_name_convention,
FLAGS.experimental_capitalize_enums)
if FLAGS.client_json:
try:
with open(FLAGS.client_json) as client_json:
f = json.loads(client_json.read())
web = f.get('installed', f.get('web', {}))
client_id = web.get('client_id')
client_secret = web.get('client_secret')
except IOError:
raise exceptions.NotFoundError(
'Failed to open client json file: %s' % FLAGS.client_json)
else:
client_id = FLAGS.client_id
client_secret = FLAGS.client_secret
if not client_id:
logging.warning('No client ID supplied')
client_id = ''
if not client_secret:
logging.warning('No client secret supplied')
client_secret = ''
client_info = util.ClientInfo.Create(
discovery_doc, FLAGS.scope, client_id, client_secret,
FLAGS.user_agent, names, FLAGS.api_key)
outdir = os.path.expanduser(FLAGS.outdir) or client_info.default_directory
if os.path.exists(outdir) and not FLAGS.overwrite:
raise exceptions.ConfigurationValueError(
'Output directory exists, pass --overwrite to replace '
'the existing files.')
if not os.path.exists(outdir):
os.makedirs(outdir)
root_package = FLAGS.root_package or util.GetPackage(outdir)
return gen_client_lib.DescriptorGenerator(
discovery_doc, client_info, names, root_package, outdir,
base_package=FLAGS.base_package,
generate_cli=FLAGS.generate_cli,
use_proto2=FLAGS.experimental_proto2_output,
unelidable_request_methods=FLAGS.unelidable_request_methods)
# TODO(craigcitro): Delete this if we don't need this functionality.
def _WriteBaseFiles(codegen):
with util.Chdir(codegen.outdir):
_CopyLocalFile('app2.py')
_CopyLocalFile('base_api.py')
_CopyLocalFile('base_cli.py')
_CopyLocalFile('credentials_lib.py')
_CopyLocalFile('exceptions.py')
def _WriteIntermediateInit(codegen):
with open('__init__.py', 'w') as out:
codegen.WriteIntermediateInit(out)
def _WriteProtoFiles(codegen):
with util.Chdir(codegen.outdir):
with open(codegen.client_info.messages_proto_file_name, 'w') as out:
codegen.WriteMessagesProtoFile(out)
with open(codegen.client_info.services_proto_file_name, 'w') as out:
codegen.WriteServicesProtoFile(out)
def _WriteGeneratedFiles(codegen):
if codegen.use_proto2:
_WriteProtoFiles(codegen)
with util.Chdir(codegen.outdir):
with open(codegen.client_info.messages_file_name, 'w') as out:
codegen.WriteMessagesFile(out)
with open(codegen.client_info.client_file_name, 'w') as out:
codegen.WriteClientLibrary(out)
if FLAGS.generate_cli:
with open(codegen.client_info.cli_file_name, 'w') as out:
codegen.WriteCli(out)
os.chmod(codegen.client_info.cli_file_name, 0o755)
def _WriteInit(codegen):
with util.Chdir(codegen.outdir):
with open('__init__.py', 'w') as out:
codegen.WriteInit(out)
def _WriteSetupPy(codegen):
with open('setup.py', 'w') as out:
codegen.WriteSetupPy(out)
class GenerateClient(appcommands.Cmd):
"""Driver for client code generation."""
def Run(self, _):
"""Create a client library."""
codegen = _GetCodegenFromFlags()
if codegen is None:
logging.error('Failed to create codegen, exiting.')
return 128
_WriteGeneratedFiles(codegen)
_WriteInit(codegen)
class GeneratePipPackage(appcommands.Cmd):
"""Generate a client as a pip-installable tarball."""
def Run(self, _):
"""Create a client in a pip package."""
discovery_doc = _GetDiscoveryDocFromFlags()
package = discovery_doc['name']
original_outdir = os.path.expanduser(FLAGS.outdir)
FLAGS.outdir = os.path.join(
FLAGS.outdir, 'apitools/clients/%s' % package)
FLAGS.root_package = 'apitools.clients.%s' % package
FLAGS.generate_cli = False
codegen = _GetCodegenFromFlags()
if codegen is None:
logging.error('Failed to create codegen, exiting.')
return 1
_WriteGeneratedFiles(codegen)
_WriteInit(codegen)
with util.Chdir(original_outdir):
_WriteSetupPy(codegen)
with util.Chdir('apitools'):
_WriteIntermediateInit(codegen)
with util.Chdir('clients'):
_WriteIntermediateInit(codegen)
class GenerateProto(appcommands.Cmd):
"""Generate just the two proto files for a given API."""
def Run(self, _):
"""Create proto definitions for an API."""
codegen = _GetCodegenFromFlags()
_WriteProtoFiles(codegen)
# pylint:disable=invalid-name
def run_main():
"""Function to be used as setuptools script entry point."""
# Put the flags for this module somewhere the flags module will look
# for them.
# pylint:disable=protected-access
new_name = flags._GetMainModule()
sys.modules[new_name] = sys.modules['__main__']
for flag in FLAGS.FlagsByModuleDict().get(__name__, []):
FLAGS._RegisterFlagByModule(new_name, flag)
for key_flag in FLAGS.KeyFlagsByModuleDict().get(__name__, []):
FLAGS._RegisterKeyFlagForModule(new_name, key_flag)
# pylint:enable=protected-access
# Now set __main__ appropriately so that appcommands will be
# happy.
sys.modules['__main__'] = sys.modules[__name__]
appcommands.Run()
sys.modules['__main__'] = sys.modules.pop(new_name)
def main(_):
appcommands.AddCmd('client', GenerateClient)
appcommands.AddCmd('pip_package', GeneratePipPackage)
appcommands.AddCmd('proto', GenerateProto)
if __name__ == '__main__':
appcommands.Run()
|
|
from builtins import str
from builtins import range
import calendar
import json
import re
import traceback
from collections import Iterable
from datetime import datetime
import six
from jsonpath_rw import parse
from .exceptions import FuncException, StopCCEIteration, QuitJobError
from .pipemgr import PipeManager
from ..common import util, log
_logger = log.get_cc_logger()
def regex_search(pattern, source, flags=0):
"""Search substring in source through regex"""
if not isinstance(source, six.string_types):
_logger.warning('Cannot apply regex search on non-string: %s', type(source))
return {}
try:
matches = re.search(pattern=pattern, string=source, flags=flags)
except Exception:
_logger.warning('Unable to search pattern=%s and flags=%s in string, error=%s',
pattern, flags, traceback.format_exc())
return {}
else:
return matches.groupdict() if matches else {}
def regex_match(pattern, source, flags=0):
"""
Determine whether a string is match a regex pattern.
:param pattern: regex pattern
:param source: candidate to match regex
:param flags: flags for regex match
:return: `True` if candidate match pattern else `False`
"""
try:
return re.match(pattern, source, flags) is not None
except Exception:
_logger.warning(
'Unable to match source with pattern=%s, cause=%s',
pattern,
traceback.format_exc()
)
return False
def regex_not_match(pattern, source, flags=0):
"""
Determine whether a string is not match a regex pattern.
:param pattern: regex expression
:param source: candidate to match regex
:param flags: flags for regex match
:return: `True` if candidate not match pattern else `False`
"""
return not regex_match(pattern, source, flags)
def json_path(source, json_path_expr):
""" Extract value from string with JSONPATH expression.
:param json_path_expr: JSONPATH expression
:param source: string to extract value
:return: A `list` contains all values extracted
"""
if not source:
_logger.debug('source to apply JSONPATH is empty, return empty.')
return ''
if isinstance(source, six.string_types):
_logger.debug(
'source expected is a JSON, not %s. Attempt to'
' convert it to JSON',
type(source)
)
try:
source = json.loads(source)
except Exception as ex:
_logger.warning(
'Unable to load JSON from source: %s. '
'Attempt to apply JSONPATH "%s" on source directly.',
ex,
json_path_expr
)
try:
expression = parse(json_path_expr)
results = [match.value for match in expression.find(source)]
_logger.debug(
'Got %s elements extracted with JSONPATH expression "%s"',
len(results), json_path_expr
)
if not results:
return ''
return results[0] or '' if len(results) == 1 else results
except Exception as ex:
_logger.warning(
'Unable to apply JSONPATH expression "%s" on source,'
' message=%s cause=%s',
json_path_expr,
ex,
traceback.format_exc()
)
return ''
def splunk_xml(candidates,
time=None,
index=None,
host=None,
source=None,
sourcetype=None):
""" Wrap a event with splunk xml format.
:param candidates: data used to wrap as event
:param time: timestamp which must be empty or a valid float
:param index: index name for event
:param host: host for event
:param source: source for event
:param sourcetype: sourcetype for event
:return: A wrapped event with splunk xml format.
"""
if not isinstance(candidates, (list, tuple)):
candidates = [candidates]
time = time or None
if time:
try:
time = float(time)
except ValueError:
_logger.warning(
'"time" %s is expected to be a float, set "time" to None',
time
)
time = None
xml_events = util.format_events(
candidates,
time=time,
index=index,
host=host,
source=source,
sourcetype=sourcetype
)
_logger.info(
"[%s] events are formated as splunk stream xml",
len(candidates)
)
return xml_events
def std_output(candidates):
""" Output a string to stdout.
:param candidates: List of string to output to stdout or a single string.
"""
if isinstance(candidates, six.string_types):
candidates = [candidates]
all_str = True
for candidate in candidates:
if all_str and not isinstance(candidate, six.string_types):
all_str = False
_logger.debug(
'The type of data needs to print is "%s" rather than %s',
type(candidate),
str(six.string_types)
)
try:
candidate = json.dumps(candidate)
except:
_logger.exception('The type of data needs to print is "%s"'
' rather than %s',
type(candidate),
str(six.string_types))
if not PipeManager().write_events(candidate):
raise FuncException('Fail to output data to stdout. The event'
' writer is stopped or encountered exception')
_logger.debug('Writing events to stdout finished.')
return True
def _parse_json(source, json_path_expr=None):
if not source:
_logger.debug('Unable to parse JSON from empty source, return empty.')
return {}
if json_path_expr:
_logger.debug(
'Try to extract JSON from source with JSONPATH expression: %s, ',
json_path_expr
)
source = json_path(source, json_path_expr)
elif isinstance(source, six.string_types):
source = json.loads(source)
return source
def json_empty(source, json_path_expr=None):
"""Check whether a JSON is empty, return True only if the JSON to
check is a valid JSON and is empty.
:param json_path_expr: A optional JSONPATH expression
:param source: source to extract JSON
:return: `True` if the result JSON is empty
"""
try:
data = _parse_json(source, json_path_expr)
if isinstance(data, (list, tuple)):
return all(len(ele) == 0 for ele in data)
return len(data) == 0
except Exception as ex:
_logger.warning(
'Unable to determine whether source is json_empty, treat it as '
'not json_empty: %s', ex
)
return False
def json_not_empty(source, json_path_expr=None):
"""Check if a JSON object is not empty, return True only if the
source is a valid JSON object and the value leading by
json_path_expr is empty.
:param json_path_expr: A optional JSONPATH expression
:param source: source to extract JSON
:return: `True` if the result JSON is not empty
"""
try:
data = _parse_json(source, json_path_expr)
if isinstance(data, (list, tuple)):
return any(len(ele) > 0 for ele in data)
return len(data) > 0
except Exception as ex:
_logger.warning(
'Unable to determine whether source is json_not_empty, '
'treat it as not json_not_empty: %s',
ex
)
return False
def set_var(value):
"""Set a variable which name should be specified in `output` with value"""
return value
def _fix_microsecond_format(fmt, micros):
"""
implement %Nf so that user can control the digital number of microsecond.
If number of % is even, don't do replacement.
If N is not in [1-6], don't do replacement.
If time length m is less than N, convert it to 6 digitals and return N
digitals.
"""
micros = str(micros).zfill(6)
def do_replacement(x, micros):
if int(x.group(1)) in range(1, 7) and len(x.group()) % 2:
return x.group().replace('%' + x.group(1) + 'f',
micros[:min(int(x.group(1)), len(micros))])
return x.group()
return re.sub(r'%+([1-6])f', lambda x: do_replacement(x, micros), fmt)
def _fix_timestamp_format(fmt, timestamp):
"""Replace '%s' in time format with timestamp if the number
of '%' before 's' is odd."""
return re.sub(
r'%+s',
(
lambda x:
x.group() if len(x.group()) % 2 else x.group().replace('%s',
timestamp)
),
fmt
)
def time_str2str(date_string, from_format, to_format):
"""Convert a date string with given format to another format. Return
the original date string if it's type is not string or failed to parse or
convert it with format."""
if not isinstance(date_string, six.string_types):
_logger.warning(
'"date_string" must be a string type, found %s,'
' return the original date_string directly.',
type(date_string)
)
return date_string
try:
dt = datetime.strptime(date_string, from_format)
# Need to pre process '%s' in to_format here because '%s' is not
# available on all platforms. Even on supported platforms, the
# result may be different because it depends on implementation on each
# platform. Replace it with UTC timestamp here directly.
if to_format:
timestamp = calendar.timegm(dt.timetuple())
to_format = _fix_timestamp_format(to_format, str(timestamp))
to_format = _fix_microsecond_format(to_format, str(dt.microsecond))
return dt.strftime(to_format)
except Exception:
_logger.warning(
'Unable to convert date_string "%s" from format "%s" to "%s",'
' return the original date_string, cause=%s',
date_string,
from_format,
to_format,
traceback.format_exc()
)
return date_string
def is_true(value):
"""Determine whether value is True"""
return str(value).strip().lower() == 'true'
def exit_if_true(value):
"""Raise a StopCCEIteration exception if value is True"""
if is_true(value):
raise StopCCEIteration
def exit_job_if_true(value):
"""Raise a QuitJob exception if value is True"""
if is_true(value):
raise QuitJobError
def assert_true(value, message=None):
"""Assert value is True"""
if not is_true(value):
raise AssertionError(
message or '"{value}" is not true'.format(value=value)
)
def split_by(source, target, separator=None):
"""Split the source to multiple values by the separator"""
try:
if not source:
return []
elif isinstance(source, six.string_types) and separator:
values = source.split(separator)
return [{target: value.strip()} for value in values]
elif isinstance(source, six.string_types):
return [{target: source}]
elif isinstance(source, Iterable):
return [{target: value} for value in source]
else:
return [{target: source}]
except Exception as ex:
_logger.warning("split_by method encountered exception "
"source=%s message=%s cause=%s", source, ex,
traceback.format_exc())
return []
_extension_functions = {
'assert_true': assert_true,
'exit_if_true': exit_if_true,
'exit_job_if_true': exit_job_if_true,
'is_true': is_true,
'regex_match': regex_match,
'regex_not_match': regex_not_match,
'regex_search': regex_search,
'set_var': set_var,
'splunk_xml': splunk_xml,
'std_output': std_output,
'json_path': json_path,
'json_empty': json_empty,
'json_not_empty': json_not_empty,
'time_str2str': time_str2str,
'split_by': split_by
}
def lookup_method(name):
""" Find a predefined function with given function name.
:param name: function name.
:return: A function with given name.
"""
return _extension_functions.get(name)
|
|
#! /usr/bin/pythonw
# The Keccak sponge function, designed by Guido Bertoni, Joan Daemen,
# questions, please refer to our website: http://keccak.noekeon.org/
#
# Implementation by Renaud Bauvin,
# hereby denoted as "the implementer".
#
# To the extent possible under law, the implementer has waived all copyright
# and related or neighboring rights to the source code in this file.
# http://creativecommons.org/publicdomain/zero/1.0/
#https://github.com/monero-project/mininero/blob/master/Keccak.py
import math
class KeccakError(Exception):
"""Class of error used in the Keccak implementation
Use: raise KeccakError.KeccakError("Text to be displayed")"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Keccak:
"""
Class implementing the Keccak sponge function
"""
def __init__(self, b=1600):
"""Constructor:
b: parameter b, must be 25, 50, 100, 200, 400, 800 or 1600 (default value)"""
self.setB(b)
def setB(self,b):
"""Set the value of the parameter b (and thus w,l and nr)
b: parameter b, must be choosen among [25, 50, 100, 200, 400, 800, 1600]
"""
if b not in [25, 50, 100, 200, 400, 800, 1600]:
raise KeccakError.KeccakError('b value not supported - use 25, 50, 100, 200, 400, 800 or 1600')
# Update all the parameters based on the used value of b
self.b=b
self.w=b//25
self.l=int(math.log(self.w,2))
self.nr=12+2*self.l
# Constants
## Round constants
RC=[0x0000000000000001,
0x0000000000008082,
0x800000000000808A,
0x8000000080008000,
0x000000000000808B,
0x0000000080000001,
0x8000000080008081,
0x8000000000008009,
0x000000000000008A,
0x0000000000000088,
0x0000000080008009,
0x000000008000000A,
0x000000008000808B,
0x800000000000008B,
0x8000000000008089,
0x8000000000008003,
0x8000000000008002,
0x8000000000000080,
0x000000000000800A,
0x800000008000000A,
0x8000000080008081,
0x8000000000008080,
0x0000000080000001,
0x8000000080008008]
## Rotation offsets
r=[[0, 36, 3, 41, 18] ,
[1, 44, 10, 45, 2] ,
[62, 6, 43, 15, 61] ,
[28, 55, 25, 21, 56] ,
[27, 20, 39, 8, 14] ]
## Generic utility functions
def rot(self,x,n):
"""Bitwise rotation (to the left) of n bits considering the \
string of bits is w bits long"""
n = n%self.w
return ((x>>(self.w-n))+(x<<n))%(1<<self.w)
def fromHexStringToLane(self, string):
"""Convert a string of bytes written in hexadecimal to a lane value"""
#Check that the string has an even number of characters i.e. whole number of bytes
if len(string)%2!=0:
raise KeccakError.KeccakError("The provided string does not end with a full byte")
#Perform the modification
temp=''
nrBytes=len(string)//2
for i in range(nrBytes):
offset=(nrBytes-i-1)*2
temp+=string[offset:offset+2]
return int(temp, 16)
def fromLaneToHexString(self, lane):
"""Convert a lane value to a string of bytes written in hexadecimal"""
laneHexBE = (("%%0%dX" % (self.w//4)) % lane)
#Perform the modification
temp=''
nrBytes=len(laneHexBE)//2
for i in range(nrBytes):
offset=(nrBytes-i-1)*2
temp+=laneHexBE[offset:offset+2]
return temp.upper()
def printState(self, state, info):
"""Print on screen the state of the sponge function preceded by \
string info
state: state of the sponge function
info: a string of characters used as identifier"""
print("Current value of state: %s" % (info))
for y in range(5):
line=[]
for x in range(5):
line.append(hex(state[x][y]))
print('\t%s' % line)
### Conversion functions String <-> Table (and vice-versa)
def convertStrToTable(self,string):
#Check that input paramaters
if self.w%8!= 0:
raise KeccakError("w is not a multiple of 8")
if len(string)!=2*(self.b)//8:
raise KeccakError.KeccakError("string can't be divided in 25 blocks of w bits\
i.e. string must have exactly b bits")
#Convert
output=[[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0]]
for x in range(5):
for y in range(5):
offset=2*((5*y+x)*self.w)//8
output[x][y]=self.fromHexStringToLane(string[offset:offset+(2*self.w//8)])
return output
def convertTableToStr(self,table):
#Check input format
if self.w%8!= 0:
raise KeccakError.KeccakError("w is not a multiple of 8")
if (len(table)!=5) or (False in [len(row)==5 for row in table]):
raise KeccakError.KeccakError("table must b")
#Convert
output=['']*25
for x in range(5):
for y in range(5):
output[5*y+x]=self.fromLaneToHexString(table[x][y])
output =''.join(output).upper()
return output
def Round(self,A,RCfixed):
"""Perform one round of computation as defined in the Keccak-f permutation
"""
#Initialisation of temporary variables
B=[[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0]]
C= [0,0,0,0,0]
D= [0,0,0,0,0]
#Theta step
for x in range(5):
C[x] = A[x][0]^A[x][1]^A[x][2]^A[x][3]^A[x][4]
for x in range(5):
D[x] = C[(x-1)%5]^self.rot(C[(x+1)%5],1)
for x in range(5):
for y in range(5):
A[x][y] = A[x][y]^D[x]
#Rho and Pi steps
for x in range(5):
for y in range(5):
B[y][(2*x+3*y)%5] = self.rot(A[x][y], self.r[x][y])
#Chi step
for x in range(5):
for y in range(5):
A[x][y] = B[x][y]^((~B[(x+1)%5][y]) & B[(x+2)%5][y])
#Iota step
A[0][0] = A[0][0]^RCfixed
return A
def KeccakF(self,A, verbose=False):
"""Perform Keccak-f function on the state A
verbose: a boolean flag activating the printing of intermediate computations
"""
if verbose:
self.printState(A,"Before first round")
for i in range(self.nr):
#NB: result is truncated to lane size
A = self.Round(A,self.RC[i]%(1<<self.w))
if verbose:
self.printState(A,"Satus end of round #%d/%d" % (i+1,self.nr))
return A
### Padding rule
def pad10star1(self, M, n):
"""Pad M with the pad10*1 padding rule to reach a length multiple of r bits
M: message pair (length in bits, string of hex characters ('9AFC...')
n: length in bits (must be a multiple of 8)
Example: pad10star1([60, 'BA594E0FB9EBBD30'],8) returns 'BA594E0FB9EBBD93'
"""
[my_string_length, my_string]=M
# Check the parameter n
if n%8!=0:
raise KeccakError.KeccakError("n must be a multiple of 8")
# Check the length of the provided string
if len(my_string)%2!=0:
#Pad with one '0' to reach correct length (don't know test
#vectors coding)
my_string=my_string+'0'
if my_string_length>(len(my_string)//2*8):
raise KeccakError.KeccakError("the string is too short to contain the number of bits announced")
nr_bytes_filled=my_string_length//8
nbr_bits_filled=my_string_length%8
l = my_string_length % n
if ((n-8) <= l <= (n-2)):
if (nbr_bits_filled == 0):
my_byte = 0
else:
my_byte=int(my_string[nr_bytes_filled*2:nr_bytes_filled*2+2],16)
my_byte=(my_byte>>(8-nbr_bits_filled))
my_byte=my_byte+2**(nbr_bits_filled)+2**7
my_byte="%02X" % my_byte
my_string=my_string[0:nr_bytes_filled*2]+my_byte
else:
if (nbr_bits_filled == 0):
my_byte = 0
else:
my_byte=int(my_string[nr_bytes_filled*2:nr_bytes_filled*2+2],16)
my_byte=(my_byte>>(8-nbr_bits_filled))
my_byte=my_byte+2**(nbr_bits_filled)
my_byte="%02X" % my_byte
my_string=my_string[0:nr_bytes_filled*2]+my_byte
while((8*len(my_string)//2)%n < (n-8)):
my_string=my_string+'00'
my_string = my_string+'80'
return my_string
def Keccak(self,M,r=1024,c=512,n=1024,verbose=False):
"""Compute the Keccak[r,c,d] sponge function on message M
M: message pair (length in bits, string of hex characters ('9AFC...')
r: bitrate in bits (defautl: 1024)
c: capacity in bits (default: 576)
n: length of output in bits (default: 1024),
verbose: print the details of computations(default:False)
"""
#Check the inputs
if (r<0) or (r%8!=0):
raise KeccakError.KeccakError('r must be a multiple of 8 in this implementation')
if (n%8!=0):
raise KeccakError.KeccakError('outputLength must be a multiple of 8')
self.setB(r+c)
if verbose:
print("Create a Keccak function with (r=%d, c=%d (i.e. w=%d))" % (r,c,(r+c)//25))
#Compute lane length (in bits)
w=(r+c)//25
# Initialisation of state
S=[[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0]]
#Padding of messages
P = self.pad10star1(M, r)
if verbose:
print("String ready to be absorbed: %s (will be completed by %d x '00')" % (P, c//8))
#Absorbing phase
for i in range((len(P)*8//2)//r):
Pi=self.convertStrToTable(P[i*(2*r//8):(i+1)*(2*r//8)]+'00'*(c//8))
for y in range(5):
for x in range(5):
S[x][y] = S[x][y]^Pi[x][y]
S = self.KeccakF(S, verbose)
if verbose:
print("Value after absorption : %s" % (self.convertTableToStr(S)))
#Squeezing phase
Z = ''
outputLength = n
while outputLength>0:
string=self.convertTableToStr(S)
Z = Z + string[:r*2//8]
outputLength -= r
if outputLength>0:
S = self.KeccakF(S, verbose)
# NB: done by block of length r, could have to be cut if outputLength
# is not a multiple of r
if verbose:
print("Value after squeezing : %s" % (self.convertTableToStr(S)))
return Z[:2*n//8]
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright (c) 2011-2013 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for baremetal tilera driver."""
import os
from oslo.config import cfg
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.tests.baremetal.db import base as bm_db_base
from nova.tests.baremetal.db import utils as bm_db_utils
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import tilera
from nova.virt.baremetal import utils as bm_utils
from nova.virt.disk import api as disk_api
from nova.virt import fake as fake_virt
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.tilera.Tilera',
instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
group='baremetal',
)
class BareMetalTileraTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalTileraTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
self.driver = tilera.Tilera(fake_virt.FakeVirtAPI())
fake_image.stub_out_image_service(self.stubs)
self.addCleanup(fake_image.FakeImageService_reset)
self.context = utils.get_test_admin_context()
self.test_block_device_info = None,
self.instance = utils.get_test_instance()
self.test_network_info = utils.get_test_network_info(),
self.node_info = bm_db_utils.new_bm_node(
service_host='test_host',
cpus=4,
memory_mb=2048,
prov_mac_address='11:11:11:11:11:11',
)
self.nic_info = [
{'address': '22:22:22:22:22:22', 'datapath_id': '0x1',
'port_no': 1},
{'address': '33:33:33:33:33:33', 'datapath_id': '0x2',
'port_no': 2},
]
def _create_node(self):
self.node = db.bm_node_create(self.context, self.node_info)
for nic in self.nic_info:
db.bm_interface_create(
self.context,
self.node['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
self.instance['node'] = self.node['id']
self.spawn_params = dict(
admin_password='test_pass',
block_device_info=self.test_block_device_info,
context=self.context,
image_meta=utils.get_test_image_info(None,
self.instance),
injected_files=[('/fake/path', 'hello world')],
instance=self.instance,
network_info=self.test_network_info,
)
class TileraClassMethodsTestCase(BareMetalTileraTestCase):
def test_build_network_config(self):
net = utils.get_test_network_info(1)
config = tilera.build_network_config(net)
self.assertIn('eth0', config)
self.assertNotIn('eth1', config)
net = utils.get_test_network_info(2)
config = tilera.build_network_config(net)
self.assertIn('eth0', config)
self.assertIn('eth1', config)
def test_build_network_config_dhcp(self):
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-dhcp.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0][1]['ips'][0]['ip'] = '1.2.3.4'
config = tilera.build_network_config(net)
self.assertIn('iface eth0 inet dhcp', config)
self.assertNotIn('address 1.2.3.4', config)
def test_build_network_config_static(self):
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-static.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0][1]['ips'][0]['ip'] = '1.2.3.4'
config = tilera.build_network_config(net)
self.assertIn('iface eth0 inet static', config)
self.assertIn('address 1.2.3.4', config)
def test_image_dir_path(self):
self.assertEqual(
tilera.get_image_dir_path(self.instance),
os.path.join(CONF.instances_path, 'instance-00000001'))
def test_image_file_path(self):
self.assertEqual(
tilera.get_image_file_path(self.instance),
os.path.join(
CONF.instances_path, 'instance-00000001', 'disk'))
def test_tilera_nfs_path(self):
self._create_node()
self.node['id'] = '123'
tilera_nfs_dir = "fs_" + self.node['id']
self.assertEqual(
tilera.get_tilera_nfs_path(self.node['id']),
os.path.join(CONF.baremetal.tftp_root,
tilera_nfs_dir))
def test_get_partition_sizes(self):
# default "kinda.big" instance
sizes = tilera.get_partition_sizes(self.instance)
self.assertEqual(sizes[0], 40960)
self.assertEqual(sizes[1], 1024)
def test_swap_not_zero(self):
# override swap to 0
instance_type = utils.get_test_instance_type(self.context)
instance_type['swap'] = 0
self.instance = utils.get_test_instance(self.context, instance_type)
sizes = tilera.get_partition_sizes(self.instance)
self.assertEqual(sizes[0], 40960)
self.assertEqual(sizes[1], 1)
def test_get_tftp_image_info(self):
# Tilera case needs only kernel_id.
self.instance['kernel_id'] = 'aaaa'
self.instance['uuid'] = 'fake-uuid'
# Here, we confirm both that kernel_id was set
# and that the proper paths are getting set for all of them
base = os.path.join(CONF.baremetal.tftp_root, self.instance['uuid'])
res = tilera.get_tftp_image_info(self.instance)
expected = {
'kernel': ['aaaa', os.path.join(base, 'kernel')],
}
self.assertEqual(res, expected)
class TileraPrivateMethodsTestCase(BareMetalTileraTestCase):
def test_collect_mac_addresses(self):
self._create_node()
address_list = [nic['address'] for nic in self.nic_info]
address_list.append(self.node_info['prov_mac_address'])
address_list.sort()
macs = self.driver._collect_mac_addresses(self.context, self.node)
self.assertEqual(macs, address_list)
def test_cache_tftp_images(self):
self.instance['kernel_id'] = 'aaaa'
image_info = tilera.get_tftp_image_info(self.instance)
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(os.path.join(CONF.baremetal.tftp_root,
self.instance['uuid'])).AndReturn(True)
for uuid, path in [image_info[label] for label in image_info]:
os.path.exists(path).AndReturn(True)
self.mox.ReplayAll()
self.driver._cache_tftp_images(
self.context, self.instance, image_info)
self.mox.VerifyAll()
def test_cache_image(self):
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(tilera.get_image_dir_path(self.instance)).\
AndReturn(True)
os.path.exists(tilera.get_image_file_path(self.instance)).\
AndReturn(True)
self.mox.ReplayAll()
image_meta = utils.get_test_image_info(
self.context, self.instance)
self.driver._cache_image(
self.context, self.instance, image_meta)
self.mox.VerifyAll()
def test_inject_into_image(self):
self._create_node()
files = []
self.instance['hostname'] = 'fake hostname'
files.append(('/etc/hostname', 'fake hostname'))
self.instance['key_data'] = 'fake ssh key'
net_info = utils.get_test_network_info(1)
net = tilera.build_network_config(net_info)
admin_password = 'fake password'
self.mox.StubOutWithMock(disk_api, 'inject_data')
disk_api.inject_data(
admin_password=admin_password,
image=tilera.get_image_file_path(self.instance),
key='fake ssh key',
metadata=None,
partition=None,
net=net,
files=files,
).AndReturn(True)
self.mox.ReplayAll()
self.driver._inject_into_image(
self.context, self.node, self.instance,
network_info=net_info,
admin_password=admin_password,
injected_files=None)
self.mox.VerifyAll()
class TileraPublicMethodsTestCase(BareMetalTileraTestCase):
def test_cache_images(self):
self._create_node()
self.mox.StubOutWithMock(tilera, "get_tftp_image_info")
self.mox.StubOutWithMock(self.driver, "_cache_tftp_images")
self.mox.StubOutWithMock(self.driver, "_cache_image")
self.mox.StubOutWithMock(self.driver, "_inject_into_image")
tilera.get_tftp_image_info(self.instance).AndReturn([])
self.driver._cache_tftp_images(self.context, self.instance, [])
self.driver._cache_image(self.context, self.instance, [])
self.driver._inject_into_image(self.context, self.node, self.instance,
self.test_network_info, None, '')
self.mox.ReplayAll()
self.driver.cache_images(
self.context, self.node, self.instance,
admin_password='',
image_meta=[],
injected_files=None,
network_info=self.test_network_info,
)
self.mox.VerifyAll()
def test_destroy_images(self):
self._create_node()
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
bm_utils.unlink_without_raise(tilera.get_image_file_path(
self.instance))
bm_utils.rmtree_without_raise(tilera.get_image_dir_path(self.instance))
self.mox.ReplayAll()
self.driver.destroy_images(self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_bootloader_passes_details(self):
self._create_node()
macs = [nic['address'] for nic in self.nic_info]
macs.append(self.node_info['prov_mac_address'])
macs.sort()
image_info = {
'kernel': [None, 'cccc'],
}
self.instance['uuid'] = 'fake-uuid'
iqn = "iqn-%s" % self.instance['uuid']
tilera_config = 'this is a fake tilera config'
self.instance['uuid'] = 'fake-uuid'
tilera_path = tilera.get_tilera_nfs_path(self.instance)
image_path = tilera.get_image_file_path(self.instance)
self.mox.StubOutWithMock(tilera, 'get_tftp_image_info')
self.mox.StubOutWithMock(tilera, 'get_partition_sizes')
tilera.get_tftp_image_info(self.instance).AndReturn(image_info)
tilera.get_partition_sizes(self.instance).AndReturn((0, 0))
self.mox.ReplayAll()
self.driver.activate_bootloader(self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_and_deactivate_bootloader(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
tilera_path = tilera.get_tilera_nfs_path(self.instance)
image_path = tilera.get_image_file_path(self.instance)
self.mox.ReplayAll()
# activate and deactivate the bootloader
# and check the deployment task_state in the database
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is None)
self.driver.activate_bootloader(self.context, self.node,
self.instance)
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is not None)
self.driver.deactivate_bootloader(self.context, self.node,
self.instance)
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is None)
self.mox.VerifyAll()
def test_deactivate_bootloader_for_nonexistent_instance(self):
self._create_node()
self.node['id'] = 'fake-node-id'
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
self.mox.StubOutWithMock(tilera, 'get_tftp_image_info')
self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses')
tilera_path = tilera.get_tilera_nfs_path(self.node['id'])
tilera.get_tftp_image_info(self.instance).\
AndRaise(exception.NovaException)
self.driver._collect_mac_addresses(self.context, self.node).\
AndRaise(db_exc.DBError)
self.mox.ReplayAll()
self.driver.deactivate_bootloader(
self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_node(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYING,
'instance_uuid': 'fake-uuid'})
# test DEPLOYDONE
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYDONE})
self.driver.activate_node(self.context, self.node, self.instance)
# test no deploy -- state is just ACTIVE
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.ACTIVE})
self.driver.activate_node(self.context, self.node, self.instance)
# test node gone
db.bm_node_destroy(self.context, 1)
self.assertRaises(exception.InstanceDeployFailure,
self.driver.activate_node,
self.context, self.node, self.instance)
|
|
#---------------------------------------------------------------------------
# Copyright 2013 PwC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
## @package REG_Suite001
## Registration Package Test (Suite)
'''
These are the Registration package Suite001 tests, implemented as Python functions.
Each test has a unique name derived from the test method name.
Each test has a unique result log with a filename derived from the testname and a datestamp.
There is a parent resultlog that is also used for pass/fail logging.
In general each test establishes a connection to the target application (VistA),
signs on as a user, provider, or programmer and then performs a set of test functions.
When testing is complete the connection is closed and a pass/fail indicate is written
to the resultlog.
Created on November 2012
@author: pbradley
@copyright PwC
@license http://www.apache.org/licenses/LICENSE-2.0
'''
import sys
sys.path = ['./Functional/RAS/lib'] + ['./dataFiles'] + ['./Python/vista'] + sys.path
from ADTActions import ADTActions
from Actions import Actions
import datetime
import TestHelper
import time
def reg_test001(test_suite_details):
'''Test admission of 4 patients, then verify, then discharge '''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA1 = test_driver.connect_VistA(test_suite_details)
reg = ADTActions(VistA1, user='fakedoc1', code='1Doc!@#$')
reg.signon()
reg.admit_a_patient(ssn='888776666', bed='1-A')
reg.roster_list(vlist=['TWO,PATIENT B', '1-A'])
reg.det_inpatient_inquiry(ssn='888776666', item='1', vlist=['DIRECT', '1-A', 'ALEXANDER,ROBER', 'SMITH,MARY'])
reg.switch_bed(ssn='888776666', bed='1-B')
reg.admit_a_patient(ssn='656451234', bed='1-A')
reg.roster_list(vlist=['SIX,PATIENT F', '1-A'])
reg.switch_bed(ssn='656451234', bed='2-A', badbed='1-B')
reg.admit_a_patient(ssn='656771234', bed='1-A')
reg.roster_list(vlist=['SEVEN,PATIENT G', '1-A'])
reg.admit_a_patient(ssn='444678924', bed='2-B')
reg.roster_list(vlist=['FOURTEEN,PATIENT', '2-B'])
time.sleep(10)
reg.seriously_ill_list(ssnlist=['888776666', '656451234', '656771234', '444678924'],
vlist1=['FOURTEEN,PATIENT', 'SEVEN,PATIENT', 'SIX,PATIENT', 'TWO,PATIENT'],
vlist2=[['TWO,PATIENT', '888776666'],
['SIX,PATIENT', '656451234'],
['SEVEN,PATIENT', '656771234'],
['FOURTEEN,PATIENT', '444678924']])
reg.treating_spcl_trans(ssn='888776666', spcl='CARDIAC SURGERY')
time.sleep(10)
reg.discharge_patient(ssn='888776666', dtime='NOW+1')
reg.discharge_patient(ssn='656451234', dtime='NOW+10')
reg.discharge_patient(ssn='656771234', dtime='NOW+100')
reg.discharge_patient(ssn='444678924', dtime='NOW+1000')
reg.signoff()
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
def reg_test002(test_suite_details):
'''Test to Schedule, Unschedule, and Transfer Patients '''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA1 = test_driver.connect_VistA(test_suite_details)
reg = ADTActions(VistA1, user='fakedoc1', code='1Doc!@#$')
reg.signon()
reg.admit_a_patient(ssn='888776666', bed='1-A')
reg.roster_list(vlist=['TWO,PATIENT B', '1-A'])
reg.det_inpatient_inquiry(ssn='888776666', item='1', vlist=['DIRECT', '1-A', 'ALEXANDER,ROBER', 'SMITH,MARY'])
reg.schedule_admission(ssn='656451234')
reg.schedule_admission(ssn='656771234')
reg.scheduled_admit_list(vlist=['SEVEN,PATIENT G', 'SIX,PATIENT F'])
time.sleep(10)
reg.provider_change(ssn='888776666')
time.sleep(10)
reg.transfer_patient(ssn='888776666')
reg.cancel_scheduled_admission(ssn='656451234')
reg.cancel_scheduled_admission(ssn='656771234')
reg.signoff()
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
def reg_test003(test_suite_details):
'''Test for Wait list entries '''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA1 = test_driver.connect_VistA(test_suite_details)
reg = ADTActions(VistA1)
reg.signon()
reg.gotoADTmenu()
reg.waiting_list_entry(ssn='323554567')
reg.signon()
reg.gotoADTmenu()
reg.waiting_list_entry(ssn='123455678')
reg.signon()
reg.gotoADTmenu()
reg.waiting_list_output(vlist=['TWENTYFOUR,PATIENT', 'TWENTYTHREE,PATIENT'])
reg.signon()
reg.gotoADTmenu()
reg.delete_waiting_list_entry(ssn='323554567')
reg.signon()
reg.gotoADTmenu()
reg.delete_waiting_list_entry(ssn='123455678')
reg.signoff()
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
def reg_test004(test_suite_details):
'''Test for Lodger checkin / checkout '''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA1 = test_driver.connect_VistA(test_suite_details)
reg = ADTActions(VistA1, user='fakedoc1', code='1Doc!@#$')
reg.signon()
reg.checkin_lodger(ssn='323554567', bed='1-A')
reg.checkin_lodger(ssn='123455678', bed='1-B')
time.sleep(10)
reg.lodger_checkout(ssn='323554567')
reg.lodger_checkout(ssn='123455678')
# DRG Calculation
reg.wwgeneric(dlist=[[['Option:'], ['bed control menu']],
[['Option:'], ['DRG Calculation']],
[['Effective Date:'], ['t']],
[['Choose Patient from PATIENT file'], ['Yes']],
[['Select PATIENT NAME:'], ['123455678']],
[['Transfer to an acute care facility'], ['No']],
[['Discharged against medical advice'], ['No']],
[['Enter PRINCIPAL diagnosis:'], ['787.1']],
[['YES//'], ['YES']],
[['Enter SECONDARY diagnosis'], ['786.50']],
[['YES//'], ['YES']],
[['Enter SECONDARY diagnosis'], ['']],
[['Enter Operation/Procedure'], ['31.93']],
[['Yes//'], ['YES']],
[['Enter Operation/Procedure'], ['']],
[['Diagnosis Related Group: +[0-9]+', 'Average Length of Stay\(ALOS\): +[0-9.]+', 'Weight: +[0-9.]+', 'Low Day\(s\): +[0-9]+', 'High Days: +[0-9]+', '392- ESOPHAGITIS'], []],
[['Effective Date'], ['']],
[['Choose Patient from PATIENT file'], ['']],
[['Select PATIENT NAME:'], ['']],
[['Bed Control Menu'], ['']]])
reg.signoff()
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
def reg_test005(test_suite_details):
'''This is a basic ADT Menu Smoke Tests '''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA1 = test_driver.connect_VistA(test_suite_details)
reg = ADTActions(VistA1, user='fakedoc1', code='1Doc!@#$')
reg.signon()
reg.adt_menu_smoke(ssn='323554567')
reg.signoff()
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
def reg_test006(test_suite_details):
'''Discharge previously discharged patient (break test, REF-221 ticket) and then perform Detailed Inpatient Inquire (REF-268) '''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA1 = test_driver.connect_VistA(test_suite_details)
reg = ADTActions(VistA1)
reg.signon()
reg.gotoADTmenu()
reg.discharge_patient(ssn='444678924', dtime='NOW')
reg.det_inpatient_inquiry(ssn='444678924', item='1', vlist=['DIRECT', '2-B', 'ALEXANDER,ROBER', 'SMITH,MARY'])
reg.signoff()
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
def reg_test007(test_suite_details):
'''Add a new doctor, wait 2 minutes, add another doctor, then attempt to add doctor during patient admitting using a prior date (REF-218) '''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA1 = test_driver.connect_VistA(test_suite_details)
reg = Actions(VistA1, user='SM1234', code='SM1234!!!')
reg.signon()
reg.adduser(name='JONES,JOE', ssn='000000050', gender='M', initials='JJ', acode='fakejoe1', vcode1='1SWUSH1234!!')
VistA1 = test_driver.connect_VistA(testname + '_01', result_dir, namespace)
reg = Actions(VistA1)
reg.sigsetup(acode='fakejoe1', vcode1='1SWUSH1234!!', vcode2='1SWUSH12345!!', sigcode='JOEJ123')
VistA1 = test_driver.connect_VistA(testname + '_02', result_dir, namespace)
reg = Actions(VistA1, user='SM1234', code='SM1234!!!')
reg.signon()
reg.adduser(name='BURKE,BARBARA', ssn='000000051', gender='F', initials='BB', acode='fakebar1', vcode1='1OIG1234!!')
VistA1 = test_driver.connect_VistA(testname + '_03', result_dir, namespace)
reg = Actions(VistA1)
reg.sigsetup(acode='fakebar1', vcode1='1OIG1234!!', vcode2='1OGI12345!!', sigcode='BARB123')
reg.signoff()
VistA1 = test_driver.connect_VistA(testname + '_04', result_dir, namespace)
reg = ADTActions(VistA1, user='fakedoc1', code='1Doc!@#$')
reg.signon()
reg.admit_a_patient(ssn='666551234', bed='2-B', time='t-1@01am', doctorlist=['BURKE', 'Alexander', 'JONES', 'Alexander'])
reg.signoff()
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
def reg_logflow(test_suite_details):
'''Use XTFCR to log flow to file. Note a test, just creates flow diagrams. '''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA1 = test_driver.connect_VistA(test_suite_details)
reg = ADTActions(VistA1)
reg.logflow(['DGPMV', 'DGSWITCH'])
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
def setup_ward(test_suite_details):
''' Set up ward for ADT testing '''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA1 = test_driver.connect_VistA(test_suite_details)
reg = ADTActions(VistA1)
reg.signon()
reg.adt_setup()
reg.signoff()
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
def startmon(test_suite_details):
'''This starts the Coverage Monitor'''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
VistA1 = None
try:
VistA1 = test_driver.connect_VistA(test_suite_details)
VistA1.startCoverage(routines=['DGPMV', 'DGSWITCH', 'DGSCHAD', 'DGPMEX', 'DGWAIT', 'DGSILL'])
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
'''
Close Vista
'''
VistA1.write('^\r^\r^\r')
VistA1.write('h\r')
test_driver.finally_handling(test_suite_details)
test_driver.end_method_handling(test_suite_details)
def stopmon(test_suite_details):
'''This stops the Coverage Monitor'''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
VistA1 = None
try:
VistA1 = test_driver.connect_VistA(test_suite_details)
path = (test_suite_details.result_dir + '/' + TestHelper.timeStamped('ADT_coverage.txt'))
VistA1.stopCoverage(path, test_suite_details.coverage_type)
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
'''
Close Vista
'''
VistA1.write('^\r^\r^\r')
VistA1.write('h\r')
test_driver.finally_handling(test_suite_details)
test_driver.end_method_handling(test_suite_details)
|
|
# Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Generation of OAuth2 token for a service account.
Supports three ways to generate OAuth2 tokens:
* app_identity.get_access_token(...) to use native GAE service account.
* OAuth flow with JWT token, for @*.iam.gserviceaccount.com service
accounts (the one with a private key).
* Acting as another service account (via signJwt IAM RPC).
"""
import base64
import collections
import hashlib
import json
import logging
import os
import random
from six.moves import urllib
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.runtime import apiproxy_errors
from . import project_tokens
from . import exceptions
from components import utils
# Part of public API of 'auth' component, exposed by this module.
__all__ = [
'get_access_token',
'get_access_token_async',
'get_project_access_token',
'get_project_access_token_async',
'AccessTokenError',
'ServiceAccountKey',
]
# Information about @*.iam.gserviceaccount.com. Field values can be extracted
# from corresponding fields in JSON file produced by "Generate new JSON key"
# button in "Credentials" section of any Cloud Console project.
ServiceAccountKey = collections.namedtuple('ServiceAccountKey', [
# Service account email.
'client_email',
# Service account PEM encoded private key.
'private_key',
# Service account key fingerprint, an unique identifier of this key.
'private_key_id',
])
class AccessTokenError(Exception):
"""Raised by get_access_token() on fatal or transient errors."""
def __init__(self, msg, transient=False):
super(AccessTokenError, self).__init__(msg)
self.transient = transient
# Do not log AccessTokenError exception raised from a tasklet.
ndb.add_flow_exception(AccessTokenError)
@ndb.tasklet
def authenticated_request_async(url, method='GET', payload=None, params=None):
"""Sends an authenticated JSON API request, returns deserialized response.
Raises:
TokenCreationError if request failed or response is malformed.
TokenAuthorizationError on HTTP 401 or 403 response from service.
"""
scope = 'https://www.googleapis.com/auth/userinfo.email'
access_token = get_access_token(scope)[0]
headers = {
'Accept': 'application/json; charset=utf-8',
'Authorization': 'Bearer %s' % access_token,
}
if payload is not None:
assert method in ('CREATE', 'POST', 'PUT'), method
headers['Content-Type'] = 'application/json; charset=utf-8'
payload = utils.encode_to_json(payload)
if utils.is_local_dev_server():
protocols = ('http://', 'https://')
else:
protocols = ('https://',)
assert url.startswith(protocols) and '?' not in url, url
if params:
url += '?' + urllib.parse.urlencode(params)
try:
res = yield _urlfetch_async(
url=url,
payload=payload,
method=method,
headers=headers,
follow_redirects=False,
deadline=10,
validate_certificate=True)
except (apiproxy_errors.DeadlineExceededError, urlfetch.Error) as e:
raise exceptions.TokenCreationError(str(e))
if res.status_code == 404:
logging.warning('Token server HTTP %d: %s', res.status_code, res.content)
raise exceptions.NotFoundError(
'HTTP %d: %s' % (res.status_code, res.content))
if res.status_code in (401, 403):
logging.error('Token server HTTP %d: %s', res.status_code, res.content)
raise exceptions.TokenAuthorizationError(
'HTTP %d: %s' % (res.status_code, res.content))
if res.status_code >= 300:
logging.error('Token server HTTP %d: %s', res.status_code, res.content)
raise exceptions.TokenCreationError(
'HTTP %d: %s' % (res.status_code, res.content))
try:
content = res.content
if content.startswith(")]}'\n"):
content = content[5:]
json_res = json.loads(content)
except ValueError as e:
raise exceptions.TokenCreationError('Bad JSON response: %s' % e)
raise ndb.Return(json_res)
def authenticated_request(**kwargs):
"""Blocking version of authenticated_request_async."""
return authenticated_request_async(**kwargs).get_result()
@ndb.tasklet
def get_project_access_token_async(
project_id, scopes, min_lifetime_sec=5*60):
"""Returns an OAuth2 access token for a project.
Args:
project_id: id of the LUCI project to obtain a token for.
scopes: list of OAuth2 scopes to grant on the token.
min_lifetime_sec: desired minimal lifetime of the produced token.
Returns:
Tuple (access token, expiration time in seconds since the epoch).
Raises:
AccessTokenError on errors.
"""
# Limit min_lifetime_sec, since requesting very long-lived tokens reduces
# efficiency of the cache (we need to constantly update it to keep tokens
# fresh).
if min_lifetime_sec <= 0 or min_lifetime_sec > 30 * 60:
raise ValueError(
'"min_lifetime_sec" should be in range (0; 1800], actual: %d'
% min_lifetime_sec)
# Accept a single string to mimic app_identity.get_access_token behavior.
if isinstance(scopes, basestring):
scopes = [scopes]
scopes = sorted(scopes)
# Cache key for the token.
cache_key = _memcache_key(
method='tokenserver',
email=project_id,
scopes=scopes,
key_id=None)
# We need token only on cache miss, so generate it lazily.
token = yield _get_or_mint_token_async(
cache_key,
min_lifetime_sec,
lambda: project_tokens.project_token_async(
project_id,
scopes,
authenticated_request_async,
min_lifetime_sec,
))
raise ndb.Return(token)
def get_project_access_token(*args, **kwargs):
"""Blocking version of get_project_access_token_async."""
return get_project_access_token_async(*args, **kwargs).get_result()
@ndb.tasklet
def get_access_token_async(
scopes, service_account_key=None, act_as=None, min_lifetime_sec=5*60):
"""Returns an OAuth2 access token for a service account.
If 'service_account_key' is specified, will use it to generate access token
for corresponding @*iam.gserviceaccount.com account. Otherwise will invoke
app_identity.get_access_token(...) to use app's @appspot.gserviceaccount.com
account.
If 'act_as' is specified, will return an access token for this account with
given scopes, generating it through a call to IAM:generateAccessToken, using
IAM-scoped access token of a primary service account (an appspot one, or the
one specified via 'service_account_key'). In this case the primary service
account should have 'serviceAccountTokenCreator' role in the service account
it acts as.
See https://cloud.google.com/iam/docs/service-accounts.
If using 'act_as' or 'service_account_key', the returned token will be valid
for at least approximately 'min_lifetime_sec' (5 min by default), but possibly
longer (up to 1h). If both 'act_as' and 'service_account_key' are None,
'min_lifetime_sec' is ignored and the returned token should be assumed
short-lived (<5 min).
Args:
scopes: the requested API scope string, or a list of strings.
service_account_key: optional instance of ServiceAccountKey.
act_as: email of an account to impersonate.
min_lifetime_sec: desired minimal lifetime of the produced token.
Returns:
Tuple (access token, expiration time in seconds since the epoch).
Raises:
AccessTokenError on errors.
"""
# Limit min_lifetime_sec, since requesting very long-lived tokens reduces
# efficiency of the cache (we need to constantly update it to keep tokens
# fresh).
if min_lifetime_sec <= 0 or min_lifetime_sec > 30 * 60:
raise ValueError(
'"min_lifetime_sec" should be in range (0; 1800], actual: %d'
% min_lifetime_sec)
# Accept a single string to mimic app_identity.get_access_token behavior.
if isinstance(scopes, basestring):
scopes = [scopes]
scopes = sorted(scopes)
# When acting as account, grab an IAM-scoped token of a primary account first,
# and use it to sign JWT when making a token for the target account.
if act_as:
# Cache key for the target token! Not the IAM-scoped one. The key ID is not
# known in advance when using signJwt RPC.
cache_key = _memcache_key(
method='iam',
email=act_as,
scopes=scopes,
key_id=None)
# We need IAM-scoped token only on cache miss, so generate it lazily.
iam_token_factory = (
lambda: get_access_token_async(
scopes=['https://www.googleapis.com/auth/iam'],
service_account_key=service_account_key,
act_as=None,
min_lifetime_sec=5*60))
# On cache miss or if the cached token expires too soon, mint a token that
# lives ~1h, so we can cache and reuse it.
token = yield _get_or_mint_token_async(
cache_key,
min_lifetime_sec,
lambda: _mint_oauth_token_async(
iam_token_factory,
act_as,
scopes,
lifetime_sec=3600)
)
raise ndb.Return(token)
# Generate a token directly from the service account key.
if service_account_key:
# Empty private_key_id probably means that the app is not configured yet.
if not service_account_key.private_key_id:
raise AccessTokenError('Service account secret key is not initialized')
cache_key = _memcache_key(
method='pkey',
email=service_account_key.client_email,
scopes=scopes,
key_id=service_account_key.private_key_id)
token = yield _get_or_mint_token_async(
cache_key,
min_lifetime_sec,
lambda: _mint_jwt_based_token_async(
scopes,
_LocalSigner(service_account_key))
)
raise ndb.Return(token)
# TODO(vadimsh): Use app_identity.make_get_access_token_call to make it async.
raise ndb.Return(app_identity.get_access_token(scopes))
def get_access_token(*args, **kwargs):
"""Blocking version of get_access_token_async."""
return get_access_token_async(*args, **kwargs).get_result()
## Private stuff.
_MEMCACHE_NS = 'access_tokens'
def _memcache_key(method, email, scopes, key_id=None):
"""Returns a string to use as a memcache key for a token.
Args:
method: 'pkey' or 'iam'.
email: identity (usually service account email) we are getting a token for.
scopes: list of strings with scopes.
key_id: private key ID used (if known).
"""
blob = utils.encode_to_json({
'method': method,
'email': email,
'scopes': scopes,
'key_id': key_id,
})
return hashlib.sha256(blob).hexdigest()
@ndb.tasklet
def _get_or_mint_token_async(
cache_key,
min_lifetime_sec,
minter,
namespace=_MEMCACHE_NS):
"""Gets an access token from the cache or triggers mint flow."""
# Randomize refresh time to avoid the thundering herd effect when token
# expires. Also add 5 seconds extra to make sure callers will get the token
# that lives for at least min_lifetime_sec even taking into account possible
# delays in propagating the token up the stack. We can't give any strict
# guarantees here though (we'd need to be able to stop time to do that).
token_info = yield _memcache_get(cache_key, namespace=namespace)
min_allowed_exp = (
utils.time_time() +
_randint(min_lifetime_sec + 5, min_lifetime_sec + 305))
if not token_info or token_info['exp_ts'] < min_allowed_exp:
token_info = yield minter()
yield _memcache_set(cache_key, token_info,
token_info['exp_ts'], namespace=namespace)
raise ndb.Return(token_info['access_token'], token_info['exp_ts'])
@ndb.tasklet
def _mint_jwt_based_token_async(scopes, signer):
"""Creates new access token given a JWT signer."""
# For more info see:
# * https://developers.google.com/accounts/docs/OAuth2ServiceAccount.
# Prepare a claim set to be signed by the service account key. Note that
# Google backends seem to ignore 'exp' field and always give one-hour long
# tokens, so we just always request 1h long token too.
#
# Also revert time back a tiny bit, for the sake of machines whose time is not
# perfectly in sync with global time. If client machine's time is in the
# future according to Google server clock, the access token request will be
# denied. It doesn't complain about slightly late clock though.
logging.info(
'Refreshing the access token for %s with scopes %s',
signer.email, scopes)
now = int(utils.time_time()) - 5
jwt = yield signer.sign_claimset_async({
'aud': 'https://www.googleapis.com/oauth2/v4/token',
'exp': now + 3600,
'iat': now,
'iss': signer.email,
'jti': _b64_encode(os.urandom(16)),
'scope': ' '.join(scopes),
})
# URL encoded body of a token request.
request_body = urllib.parse.urlencode({
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'assertion': jwt,
})
# Exchange signed claimset for an access token.
token = yield _call_async(
url='https://www.googleapis.com/oauth2/v4/token',
payload=request_body,
method='POST',
headers={
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
})
raise ndb.Return({
'access_token': str(token['access_token']),
'exp_ts': int(utils.time_time() + token['expires_in'])
})
@ndb.tasklet
def _mint_oauth_token_async(
token_factory, email, scopes, lifetime_sec=0, delegates=None):
"""Creates a new access token using IAM credentials API."""
# Query IAM credentials generateAccessToken API to obtain an OAuth token for
# a given service account. Maximum lifetime is 1 hour. And can be obtained
# through a chain of delegates.
logging.info(
'Refreshing the access token for %s with scopes %s',
email, scopes
)
request_body = {'scope': scopes}
if delegates:
request_body['delegates'] = delegates
if lifetime_sec > 0:
# Api accepts number of seconds with trailing 's'
request_body['lifetime'] = '%ds' % lifetime_sec
http_auth, _ = yield token_factory()
response = yield _call_async(
url='https://iamcredentials.googleapis.com/v1/projects/-/'
'serviceAccounts/%s:generateAccessToken' % urllib.parse.quote_plus(email),
method='POST',
headers={
'Accept': 'application/json',
'Authorization': 'Bearer %s' % http_auth,
'Content-Type': 'application/json; charset=utf-8',
},
payload=utils.encode_to_json(request_body),
)
expired_at = int(utils.datetime_to_timestamp(
utils.parse_rfc3339_datetime(response['expireTime'])) / 1e6)
raise ndb.Return({
'access_token': response['accessToken'],
'exp_ts': expired_at,
})
@ndb.tasklet
def _call_async(url, payload, method, headers):
"""Makes URL fetch call aggressively retrying on errors a bunch of times.
On success returns deserialized JSON response body.
On failure raises AccessTokenError.
"""
attempt = 0
while attempt < 4:
if attempt:
logging.info('Retrying...')
attempt += 1
logging.info('%s %s', method, url)
try:
response = yield _urlfetch(
url=url,
payload=payload,
method=method,
headers=headers,
follow_redirects=False,
deadline=5, # all RPCs we do should be fast
validate_certificate=True)
except (apiproxy_errors.DeadlineExceededError, urlfetch.Error) as e:
# Transient network error or URL fetch service RPC deadline.
logging.warning('%s %s failed: %s', method, url, e)
continue
# Transient error on the other side.
if response.status_code >= 500:
logging.warning(
'%s %s failed with HTTP %d: %r',
method, url, response.status_code, response.content)
continue
# Non-transient error.
if 300 <= response.status_code < 500:
logging.warning(
'%s %s failed with HTTP %d: %r',
method, url, response.status_code, response.content)
raise AccessTokenError(
'Failed to call %s: HTTP %d' % (url, response.status_code))
# Success.
try:
body = json.loads(response.content)
except ValueError:
logging.error('Non-JSON response from %s: %r', url, response.content)
raise AccessTokenError('Non-JSON response from %s' % url)
raise ndb.Return(body)
# All our attempts failed with transient errors. Perhaps some later retry
# can help, so set transient to True.
raise AccessTokenError(
'Failed to call %s after multiple attempts' % url, transient=True)
def _randint(*args, **kwargs):
"""To be mocked in tests."""
return random.randint(*args, **kwargs)
def _urlfetch(**kwargs):
"""To be mocked in tests."""
return ndb.get_context().urlfetch(**kwargs)
def _memcache_get(*args, **kwargs):
"""To be mocked in tests."""
return ndb.get_context().memcache_get(*args, **kwargs)
def _memcache_set(*args, **kwargs):
"""To be mocked in tests."""
return ndb.get_context().memcache_set(*args, **kwargs)
def _is_json_object(blob):
"""True if blob is valid JSON object, i.e '{...}'."""
try:
return isinstance(json.loads(blob), dict)
except ValueError:
return False
def _log_jwt(email, method, jwt):
"""Logs information about the signed JWT.
Does some minimal validation which fails only if Google backends misbehave,
which should not happen. Logs broken JWTs, assuming they are unusable.
"""
parts = jwt.split('.')
if len(parts) != 3:
logging.error(
'Got broken JWT (not <hdr>.<claims>.<sig>): by=%s method=%s jwt=%r',
email, method, jwt)
raise AccessTokenError('Got broken JWT, see logs')
try:
hdr = _b64_decode(parts[0]) # includes key ID
claims = _b64_decode(parts[1]) # includes scopes and timestamp
sig = parts[2][:12] # only 9 bytes of the signature
except (TypeError, ValueError):
logging.error(
'Got broken JWT (can\'t base64-decode): by=%s method=%s jwt=%r',
email, method, jwt)
raise AccessTokenError('Got broken JWT, see logs')
if not _is_json_object(hdr):
logging.error(
'Got broken JWT (the header is not JSON dict): by=%s method=%s jwt=%r',
email, method, jwt)
raise AccessTokenError('Got broken JWT, see logs')
if not _is_json_object(claims):
logging.error(
'Got broken JWT (claims are not JSON dict): by=%s method=%s jwt=%r',
email, method, jwt)
raise AccessTokenError('Got broken JWT, see logs')
logging.info(
'signed_jwt: by=%s method=%s hdr=%s claims=%s sig_prefix=%s fp=%s',
email, method, hdr, claims, sig, utils.get_token_fingerprint(jwt))
def _b64_encode(data):
return base64.urlsafe_b64encode(data).rstrip('=')
def _b64_decode(data):
mod = len(data) % 4
if mod:
data += '=' * (4 - mod)
return base64.urlsafe_b64decode(data)
## Signers implementation.
class _LocalSigner(object):
"""Knows how to sign JWTs with local private key."""
def __init__(self, service_account_key):
self._key = service_account_key
@property
def email(self):
return self._key.client_email
@ndb.tasklet
def sign_claimset_async(self, claimset):
# Prepare JWT header and claimset as base 64.
header_b64 = _b64_encode(utils.encode_to_json({
'alg': 'RS256',
'kid': self._key.private_key_id,
'typ': 'JWT',
}))
claimset_b64 = _b64_encode(utils.encode_to_json(claimset))
# Sign <header>.<claimset> with account's private key.
signature_b64 = _b64_encode(self._rsa_sign(
'%s.%s' % (header_b64, claimset_b64), self._key.private_key))
jwt = '%s.%s.%s' % (header_b64, claimset_b64, signature_b64)
_log_jwt(self.email, 'local', jwt)
raise ndb.Return(jwt)
@staticmethod
def _rsa_sign(blob, private_key_pem):
"""Byte blob + PEM key => RSA-SHA256 signature byte blob."""
# Lazy import crypto. It is not available in unit tests outside of sandbox.
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
pkey = RSA.importKey(private_key_pem)
return PKCS1_v1_5.new(pkey).sign(SHA256.new(blob))
def _urlfetch_async(**kwargs):
"""To be mocked in tests."""
return ndb.get_context().urlfetch(**kwargs)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_equal, assert_allclose
from astropy import units as u
from astropy.time import Time, TimeDelta
from astropy.utils.data import get_pkg_data_filename
from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle
from astropy.timeseries.binned import BinnedTimeSeries
from astropy.tests.helper import assert_quantity_allclose
CSV_FILE = get_pkg_data_filename('data/binned.csv')
def test_empty_initialization():
ts = BinnedTimeSeries()
ts['time_bin_start'] = Time([1, 2, 3], format='mjd')
def test_empty_initialization_invalid():
# Make sure things crash when the first column added is not a time column
ts = BinnedTimeSeries()
with pytest.raises(ValueError) as exc:
ts['flux'] = [1, 2, 3]
assert exc.value.args[0] == ("BinnedTimeSeries object is invalid - expected "
"'time_bin_start' as the first column but found 'flux'")
def test_initialization_time_bin_invalid():
# Make sure things crash when time_bin_* is passed incorrectly.
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data=[[1, 4, 3]])
assert exc.value.args[0] == ("'time_bin_start' has not been specified")
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', data=[[1, 4, 3]])
assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified")
def test_initialization_time_bin_both():
# Make sure things crash when time_bin_* is passed twice.
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time_bin_start": ["2016-03-22T12:30:31"]},
time_bin_start="2016-03-22T12:30:31")
assert exc.value.args[0] == ("'time_bin_start' has been given both in the table "
"and as a keyword argument")
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time_bin_size": ["2016-03-22T12:30:31"]},
time_bin_size=[1]*u.s)
assert exc.value.args[0] == ("'time_bin_size' has been given both in the table "
"and as a keyword argument")
def test_initialization_time_bin_size():
# Make sure things crash when time_bin_size has no units
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start="2016-03-22T12:30:31",
time_bin_size=1)
assert exc.value.args[0] == ("'time_bin_size' should be a Quantity or a TimeDelta")
# TimeDelta for time_bin_size
ts = BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start="2016-03-22T12:30:31",
time_bin_size=TimeDelta(1))
assert isinstance(ts.time_bin_size, u.quantity.Quantity)
def test_initialization_time_bin_start_scalar():
# Make sure things crash when time_bin_start is a scalar with no time_bin_size
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=Time(1, format='mjd'),
time_bin_end=Time(1, format='mjd'))
assert exc.value.args[0] == ("'time_bin_start' is scalar, so 'time_bin_size' is required")
def test_initialization_n_bins():
# Make sure things crash with incorrect n_bins
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=Time(1, format='mjd'),
time_bin_size=1*u.s,
time_bin_end=Time(1, format='mjd'),
n_bins=10)
assert exc.value.args[0] == ("'n_bins' has been given and it is not the "
"same length as the input data.")
def test_initialization_non_scalar_time():
# Make sure things crash with incorrect size of time_bin_start
with pytest.raises(ValueError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=["2016-03-22T12:30:31", "2016-03-22T12:30:32"],
time_bin_size=1*u.s,
time_bin_end=Time(1, format='mjd'))
assert exc.value.args[0] == ("Length of 'time_bin_start' (2) should match table length (1)")
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=["2016-03-22T12:30:31"],
time_bin_size=None,
time_bin_end=None)
assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified")
def test_even_contiguous():
# Initialize a ``BinnedTimeSeries`` with even contiguous bins by specifying
# the bin width:
ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31',
time_bin_size=3 * u.s, data=[[1, 4, 3]])
assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000',
'2016-03-22T12:30:34.000',
'2016-03-22T12:30:37.000'])
assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:32.500',
'2016-03-22T12:30:35.500',
'2016-03-22T12:30:38.500'])
assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:34.000',
'2016-03-22T12:30:37.000',
'2016-03-22T12:30:40.000'])
def test_uneven_contiguous():
# Initialize a ``BinnedTimeSeries`` with uneven contiguous bins by giving an
# end time:
ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31',
'2016-03-22T12:30:32',
'2016-03-22T12:30:40'],
time_bin_end='2016-03-22T12:30:55',
data=[[1, 4, 3]])
assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000',
'2016-03-22T12:30:32.000',
'2016-03-22T12:30:40.000'])
assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500',
'2016-03-22T12:30:36.000',
'2016-03-22T12:30:47.500'])
assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000',
'2016-03-22T12:30:40.000',
'2016-03-22T12:30:55.000'])
def test_uneven_non_contiguous():
# Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins with
# lists of start times, bin sizes and data:
ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31',
'2016-03-22T12:30:38',
'2016-03-22T12:34:40'],
time_bin_size=[5, 100, 2]*u.s,
data=[[1, 4, 3]])
assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000',
'2016-03-22T12:30:38.000',
'2016-03-22T12:34:40.000'])
assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:33.500',
'2016-03-22T12:31:28.000',
'2016-03-22T12:34:41.000'])
assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:36.000',
'2016-03-22T12:32:18.000',
'2016-03-22T12:34:42.000'])
def test_uneven_non_contiguous_full():
# Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins by
# specifying the start and end times for the bins:
ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31',
'2016-03-22T12:30:33',
'2016-03-22T12:30:40'],
time_bin_end=['2016-03-22T12:30:32',
'2016-03-22T12:30:35',
'2016-03-22T12:30:41'],
data=[[1, 4, 3]])
assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000',
'2016-03-22T12:30:33.000',
'2016-03-22T12:30:40.000'])
assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500',
'2016-03-22T12:30:34.000',
'2016-03-22T12:30:40.500'])
assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000',
'2016-03-22T12:30:35.000',
'2016-03-22T12:30:41.000'])
def test_read_empty():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, format='csv')
assert exc.value.args[0] == '``time_bin_start_column`` should be provided since the default Table readers are being used.'
def test_read_no_size_end():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', format='csv')
assert exc.value.args[0] == 'Either `time_bin_end_column` or `time_bin_size_column` should be provided.'
def test_read_both_extra_bins():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='END', time_bin_size_column='bin_size', format='csv')
assert exc.value.args[0] == "Cannot specify both `time_bin_end_column` and `time_bin_size_column`."
def test_read_size_no_unit():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', format='csv')
assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``."
def test_read_start_time_missing():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='abc', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv')
assert exc.value.args[0] == "Bin start time column 'abc' not found in the input data."
def test_read_end_time_missing():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column="missing", format='csv')
assert exc.value.args[0] == "Bin end time column 'missing' not found in the input data."
def test_read_size_missing():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="missing", time_bin_size_unit=u.second, format='csv')
assert exc.value.args[0] == "Bin size column 'missing' not found in the input data."
def test_read_time_unit_missing():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="bin_size", format='csv')
assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``."
def test_read():
timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start',
time_bin_end_column='time_end', format='csv')
assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'bin_size', 'A', 'B', 'C', 'D', 'E', 'F']
assert len(timeseries) == 10
assert timeseries['B'].sum() == 1151.54
timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start',
time_bin_size_column='bin_size',
time_bin_size_unit=u.second, format='csv')
assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'time_end', 'A', 'B', 'C', 'D', 'E', 'F']
assert len(timeseries) == 10
assert timeseries['B'].sum() == 1151.54
@pytest.mark.parametrize('cls', [BoxLeastSquares, LombScargle])
def test_periodogram(cls):
# Note that we don't need to check the actual results from the periodogram
# classes here since these are tested extensively in
# astropy.timeseries.periodograms.
ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31',
time_bin_size=3 * u.s, data=[[1, 4, 3], [3, 4, 3]], names=['a', 'b'])
p1 = cls.from_timeseries(ts, 'a')
assert isinstance(p1, cls)
assert_allclose(p1.t.jd, ts.time_bin_center.jd)
assert_equal(p1.y, ts['a'])
assert p1.dy is None
p2 = cls.from_timeseries(ts, 'a', uncertainty='b')
assert_quantity_allclose(p2.dy, ts['b'])
p3 = cls.from_timeseries(ts, 'a', uncertainty=0.1)
assert_allclose(p3.dy, 0.1)
|
|
# -*- coding: utf-8 -*-
##########################################################################
# Copyright 2018 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
from __future__ import print_function
import argparse
import aerospike
from aerospike import exception as as_exceptions
'''
This provides a rough implementation of an expandable list for Aerospike
It utilizes a metadata record to provide information about associated subrecords.
It should be considered to be roughly accurate, as distruptions and timeouts can
cause stale data and pointers to be referenced.
The rough algorithm to add an item is:
1. Read top level metadata record to find a subrecord into which an item should be stored. If this fails
Create the top level record.
2. Try to store the items in the appropriate subrecord.
3. If step 2 fails because the subrecord is too big, create a new subrecord with the given items,
and increment the metadata record's reference count.
Caveats:
1. Due to records potentially being stored on different nodes, it is possible that the
metadata record exists, and is reachable, but certain subrecords are not reachable.
2. In case of disruptions it is possible to have a subrecord created and not have a reference
to it stored in the top level metadata record.
3. No ordering can be assumed, as various delays between additions of items may cause them to be
stored out of orer.
4. Fetching all subrecords, will only fetch subrecords known about at the time of the request. Subrecords created after
the metadata record has been read will not be returned.
5. This design does not facilitate a way to remove items from the conceptual list.
6. This design does not allow for writing a single record larger than the write-block-size.
The top level record is roughly
{
'subrecord_count': #
}
Where the # is greater than or less than or equal to the number of subrecords
each subrecord is roughly:
{
'items': [item10, item2, item5, item0, item100]
}
'''
class ASMetadataRecordTooLarge(Exception):
pass
class ClientSideBigList(object):
'''
Abstraction around an unbounded size list for Aerospike. Relies on a top level record
containing metadata about subrecords. When a subrecord fills up, a new subrecord is created
and the metadata is updated.
'''
def __init__(self, client, base_key, ns='test', setname='demo',
subrecourd_count_name='sr_count', subrecord_list_bin='items'):
'''
Args:
client (Aerospike.client): a connected client to be used to talk with the database
base_key (string): The base key around which all record keys will be constructed
if base_key is 'person1', the top record will have the key (ns, set, 'person1')
and subrecords will be of the form (ns, set, 'person1-1'), (ns, set, 'person1-2')...
ns (string): The namespace into which the records will be stored.
setname (string): The set into which the records will be stored.
subrecord_count_name (string): The name of the bin in the metadata record which will store the count
of subrecords.
subrecord_list_bin (string): The name of the list bin in each of the subrecords.
'''
self.ns = ns
self.set = setname
self.client = client
self.base_key = base_key
self.metadata_key = (self.ns, self.set, self.base_key)
# This is the name of the bin in the top record which contains a set of subrecords
# There are at least this many subrecords. Unless the value of this is 1, in which case there may be no
# subrecords.
self.subrecourd_count_name = subrecourd_count_name
# This is the name of the bin containing items in each of the subrecords
self.subrecord_list_bin = subrecord_list_bin
def get_metadata_record(self):
'''
Fetches the top level record containing metadata about this list.
Returns None if the record does not exist
Returns:
Tuple: The record tuple: (key, meta, bins) for the metadata record if found
None: if the record does not exist.
Raises:
RecordNotFound: If the metadata record for this list does not yet exist.
AerospikeError: If the operation fails
'''
return self.client.get(self.metadata_key)
def add_item(self, item):
'''
Add a given item to this conceptual group of lists. If a top level
record has not yet been created, this operation will create it.
Args:
item: The item to be stored into the list.
Raises:
AerospikeError:
If communication with the cluster fails.
'''
try:
_, meta, bins = self.get_metadata_record()
generation = meta['gen']
subrecord_count = bins[self.subrecourd_count_name]
subrecord_count = max(1, subrecord_count) # Ensure that level is at least 1
self._create_or_update_subrecord(item, subrecord_count, generation)
except as_exceptions.RecordNotFound as e:
try:
# If this fails for a reason other than it already existing, error out
self._create_metadata_record()
userkey_1 = self._make_user_key(1)
# Create the first subrecord
self.client.put((self.ns, self.set, userkey_1), {self.subrecord_list_bin: []})
# Metadata record has just been created. Add the first subrecord
self._create_or_update_subrecord(item, 1, 1)
except as_exceptions.RecordExistsError:
# If the metadata record alread exists, try to insert into the correct subrecord by recursing.
add_item(self, item)
def get_all_entries(self, extended_search=False):
'''
Get all of the entries from all subrecords flattened and buffered into a list
Args:
extended_search(bool): Whether to attempt to fetch records beyond the count of known subrecords
Returns:
list: A list of all of the items from all subrecords for this record
None: If the metadata record does not exist.
Raises:
AerospikeError:
If any of the fetch operations fail.
'''
try:
_, _, bins = self.get_metadata_record()
min_count = bins[self.subrecourd_count_name]
keys = []
for i in range(1, min_count + 1):
key = (
self.ns,
self.set,
self._make_user_key(i)
)
keys.append(key)
subrecords = self.client.get_many(keys)
entries = self._get_items_from_subrecords(subrecords)
# Try to get subrecords beyond the listed amount.
# It is possible but not guaranteed that they exist.
if extended_search:
record_number = min_count + 1
while True:
key = (
self.ns,
self.set,
self._make_user_key(record_number)
)
try:
_, _, bins = self.client.get(key)
entries.append(bins)
except as_exceptions.RecordNotFound:
break
return entries
except as_exceptions.RecordNotFound:
return None
def _make_user_key(self, record_number):
'''
Returns a formatted string to be used as the userkey portion of a key.
Args:
record_number (int): Integer >= 1 specifying which subrecord for which to create a key.
Returns:
string: A formatted string of the form: 'base-#'
'''
return "{}-{}".format(self.base_key, record_number)
def _create_metadata_record(self):
'''
Create the top level information about the key.
Raises:
RecordExistsError:
If the metadata record already exists.
AerospikeError:
If the operation fails for any other reason
'''
# Only create the metadata record if it does not exist
policy = {'exists': aerospike.POLICY_EXISTS_CREATE}
self.client.put(self.metadata_key, {self.subrecourd_count_name: 1})
def _create_or_update_subrecord(self, item, subrecord_number, generation, retries_remaining=3):
'''
Create a new subrecord for the item.
1. Create or append an item to the given specified subrecord
2. Update the top level metadata record to mark this subrecord's existence
If the update causes the specified record to be too large, the operation
is retried with a new subrecord number
Args:
item: The item to be inserted into a subrecord
subrecord_number (int): An integer >= 1 indicating which subrecord to insert into.
generation (int): The generation of the metadata record.
retries_remaining (int): Number of retries remaining for an error caused by inserting
a record which by itself is greater than the write block size.
Raises:
Subclass of AerospikeError if an operation fails for a reason
other than the update causing the record to be too large.
'''
subrecord_userkey = self._make_user_key(subrecord_number)
subrecord_record_key = (self.ns, self.set, subrecord_userkey)
try:
self.client.list_append(
subrecord_record_key, self.subrecord_list_bin, item)
except as_exceptions.RecordTooBig as e:
if retries_remaining == 0:
raise e
# The insert overflowed the size capacity of the record, increment the top level record metadata.
self._update_metadata_record(generation)
self._create_or_update_subrecord(item, subrecord_number + 1, generation, retries_remaining=(retries_remaining - 1))
def _update_metadata_record(self, generation):
'''
Increment the metadata record's count of subrecords. This is only safe to do if the generation of the metadata matches
the expected value. Ignore if this fails.
'''
update_policy = {'gen': aerospike.POLICY_GEN_EQ}
meta = {'gen': generation}
try:
self.client.increment(self.metadata_key, self.subrecourd_count_name, 1, meta=meta, policy=update_policy)
except as_exceptions.RecordTooBig:
raise ASMetadataRecordTooLarge
except as_exceptions.RecordGenerationError:
# This means that somebody else has updated the record count already. Don't risk updating again.
pass
def _get_items_from_subrecords(self, subrecords):
'''
Extract only the items from a list of subrecord tuples
given records = [
(key, meta, {'items': [1, 2, ,3]}),
(key, meta, {'items': [4, 5, 6]}),
(key, meta, {'items': 7, 8, 9}))
]
returns [1, 2, 3, 4, 5, 6, 7, 8, 9]
Args:
subrecords (list<(k, m, b)>): A list of record tuples.
Returns:
list: A flattened list of items. Containing all items from the subrecords.
'''
entries = []
# If a subrecord was included in the header of the top level record, but the matching subrecord
# was not found, ignore it.
for _, _, sr_bins in subrecords:
if sr_bins:
entries.extend(sr_bins[self.subrecord_list_bin])
return entries
def subrecord_iterator(self):
_, _, bins = self.get_metadata_record()
count = bins[self.subrecourd_count_name]
def sr_iter():
'''
Generator which fetches one subrecord at a time, and yields it to caller
'''
for i in range(1, count + 1):
key = (
self.ns,
self.set,
self._make_user_key(i)
)
try:
yield self.client.get(key)
except as_exceptions.RecordNotFound:
continue
# Instantiate the generator and return it.
return sr_iter()
def main():
'''
Simple tests demonstrating the functionality.
If the database is set up with a small enough write block-size, several subrecords
will be created.
'''
optparser = argparse.ArgumentParser()
optparser.add_argument(
"--host", type=str, default="127.0.0.1", metavar="<ADDRESS>",
help="Address of Aerospike server.")
optparser.add_argument(
"--port", type=int, default=3000, metavar="<PORT>",
help="Port of the Aerospike server.")
optparser.add_argument(
"--namespace", type=str, default="test", metavar="<NS>",
help="Namespace to use for this example")
optparser.add_argument(
"-s", "--set", type=str, default="demo", metavar="<SET>",
help="Set to use for this example")
optparser.add_argument(
"-i", "--items", type=int, default=1000, metavar="<ITEMS>",
help="Number of items to store into the big list")
options = optparser.parse_args()
print(options)
client = aerospike.client({'hosts': [('localhost', 3000)]}).connect()
ldt = ClientSideBigList(client, 'person1_friends')
for i in range(options.items):
# Store a reasonably large item
ldt.add_item('friend{}'.format(i) * 100)
print("Stored {} items".format(options.items))
items = ldt.get_all_entries()
_, _, bins = ldt.get_metadata_record()
print(bins)
print("Known subrecord count is: {}".format(bins['sr_count']))
print("Fetched {} items:".format(len(items)))
count = 0
for sr in ldt.subrecord_iterator():
if sr:
count = count + 1
print("Records yielded: {}".format(count))
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.translate_v3.types import translation_service
from google.longrunning import operations_pb2 # type: ignore
from .base import TranslationServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import TranslationServiceGrpcTransport
class TranslationServiceGrpcAsyncIOTransport(TranslationServiceTransport):
"""gRPC AsyncIO backend transport for TranslationService.
Provides natural language translation operations.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "translate.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "translate.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def translate_text(
self,
) -> Callable[
[translation_service.TranslateTextRequest],
Awaitable[translation_service.TranslateTextResponse],
]:
r"""Return a callable for the translate text method over gRPC.
Translates input text and returns translated text.
Returns:
Callable[[~.TranslateTextRequest],
Awaitable[~.TranslateTextResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "translate_text" not in self._stubs:
self._stubs["translate_text"] = self.grpc_channel.unary_unary(
"/google.cloud.translation.v3.TranslationService/TranslateText",
request_serializer=translation_service.TranslateTextRequest.serialize,
response_deserializer=translation_service.TranslateTextResponse.deserialize,
)
return self._stubs["translate_text"]
@property
def detect_language(
self,
) -> Callable[
[translation_service.DetectLanguageRequest],
Awaitable[translation_service.DetectLanguageResponse],
]:
r"""Return a callable for the detect language method over gRPC.
Detects the language of text within a request.
Returns:
Callable[[~.DetectLanguageRequest],
Awaitable[~.DetectLanguageResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "detect_language" not in self._stubs:
self._stubs["detect_language"] = self.grpc_channel.unary_unary(
"/google.cloud.translation.v3.TranslationService/DetectLanguage",
request_serializer=translation_service.DetectLanguageRequest.serialize,
response_deserializer=translation_service.DetectLanguageResponse.deserialize,
)
return self._stubs["detect_language"]
@property
def get_supported_languages(
self,
) -> Callable[
[translation_service.GetSupportedLanguagesRequest],
Awaitable[translation_service.SupportedLanguages],
]:
r"""Return a callable for the get supported languages method over gRPC.
Returns a list of supported languages for
translation.
Returns:
Callable[[~.GetSupportedLanguagesRequest],
Awaitable[~.SupportedLanguages]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_supported_languages" not in self._stubs:
self._stubs["get_supported_languages"] = self.grpc_channel.unary_unary(
"/google.cloud.translation.v3.TranslationService/GetSupportedLanguages",
request_serializer=translation_service.GetSupportedLanguagesRequest.serialize,
response_deserializer=translation_service.SupportedLanguages.deserialize,
)
return self._stubs["get_supported_languages"]
@property
def translate_document(
self,
) -> Callable[
[translation_service.TranslateDocumentRequest],
Awaitable[translation_service.TranslateDocumentResponse],
]:
r"""Return a callable for the translate document method over gRPC.
Translates documents in synchronous mode.
Returns:
Callable[[~.TranslateDocumentRequest],
Awaitable[~.TranslateDocumentResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "translate_document" not in self._stubs:
self._stubs["translate_document"] = self.grpc_channel.unary_unary(
"/google.cloud.translation.v3.TranslationService/TranslateDocument",
request_serializer=translation_service.TranslateDocumentRequest.serialize,
response_deserializer=translation_service.TranslateDocumentResponse.deserialize,
)
return self._stubs["translate_document"]
@property
def batch_translate_text(
self,
) -> Callable[
[translation_service.BatchTranslateTextRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the batch translate text method over gRPC.
Translates a large volume of text in asynchronous
batch mode. This function provides real-time output as
the inputs are being processed. If caller cancels a
request, the partial results (for an input file, it's
all or nothing) may still be available on the specified
output location.
This call returns immediately and you can
use google.longrunning.Operation.name to poll the status
of the call.
Returns:
Callable[[~.BatchTranslateTextRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_translate_text" not in self._stubs:
self._stubs["batch_translate_text"] = self.grpc_channel.unary_unary(
"/google.cloud.translation.v3.TranslationService/BatchTranslateText",
request_serializer=translation_service.BatchTranslateTextRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_translate_text"]
@property
def batch_translate_document(
self,
) -> Callable[
[translation_service.BatchTranslateDocumentRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the batch translate document method over gRPC.
Translates a large volume of document in asynchronous
batch mode. This function provides real-time output as
the inputs are being processed. If caller cancels a
request, the partial results (for an input file, it's
all or nothing) may still be available on the specified
output location.
This call returns immediately and you can use
google.longrunning.Operation.name to poll the status of
the call.
Returns:
Callable[[~.BatchTranslateDocumentRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_translate_document" not in self._stubs:
self._stubs["batch_translate_document"] = self.grpc_channel.unary_unary(
"/google.cloud.translation.v3.TranslationService/BatchTranslateDocument",
request_serializer=translation_service.BatchTranslateDocumentRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_translate_document"]
@property
def create_glossary(
self,
) -> Callable[
[translation_service.CreateGlossaryRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the create glossary method over gRPC.
Creates a glossary and returns the long-running operation.
Returns NOT_FOUND, if the project doesn't exist.
Returns:
Callable[[~.CreateGlossaryRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_glossary" not in self._stubs:
self._stubs["create_glossary"] = self.grpc_channel.unary_unary(
"/google.cloud.translation.v3.TranslationService/CreateGlossary",
request_serializer=translation_service.CreateGlossaryRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_glossary"]
@property
def list_glossaries(
self,
) -> Callable[
[translation_service.ListGlossariesRequest],
Awaitable[translation_service.ListGlossariesResponse],
]:
r"""Return a callable for the list glossaries method over gRPC.
Lists glossaries in a project. Returns NOT_FOUND, if the project
doesn't exist.
Returns:
Callable[[~.ListGlossariesRequest],
Awaitable[~.ListGlossariesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_glossaries" not in self._stubs:
self._stubs["list_glossaries"] = self.grpc_channel.unary_unary(
"/google.cloud.translation.v3.TranslationService/ListGlossaries",
request_serializer=translation_service.ListGlossariesRequest.serialize,
response_deserializer=translation_service.ListGlossariesResponse.deserialize,
)
return self._stubs["list_glossaries"]
@property
def get_glossary(
self,
) -> Callable[
[translation_service.GetGlossaryRequest],
Awaitable[translation_service.Glossary],
]:
r"""Return a callable for the get glossary method over gRPC.
Gets a glossary. Returns NOT_FOUND, if the glossary doesn't
exist.
Returns:
Callable[[~.GetGlossaryRequest],
Awaitable[~.Glossary]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_glossary" not in self._stubs:
self._stubs["get_glossary"] = self.grpc_channel.unary_unary(
"/google.cloud.translation.v3.TranslationService/GetGlossary",
request_serializer=translation_service.GetGlossaryRequest.serialize,
response_deserializer=translation_service.Glossary.deserialize,
)
return self._stubs["get_glossary"]
@property
def delete_glossary(
self,
) -> Callable[
[translation_service.DeleteGlossaryRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the delete glossary method over gRPC.
Deletes a glossary, or cancels glossary construction if the
glossary isn't created yet. Returns NOT_FOUND, if the glossary
doesn't exist.
Returns:
Callable[[~.DeleteGlossaryRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_glossary" not in self._stubs:
self._stubs["delete_glossary"] = self.grpc_channel.unary_unary(
"/google.cloud.translation.v3.TranslationService/DeleteGlossary",
request_serializer=translation_service.DeleteGlossaryRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_glossary"]
def close(self):
return self.grpc_channel.close()
__all__ = ("TranslationServiceGrpcAsyncIOTransport",)
|
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# Name: finance
# Purpose: International identifiers for tradable financial assets
#
# Author: Michael Amrhein (mamrhein@users.sourceforge.net)
#
# Copyright: (c) 2016 Michael Amrhein
# License: This program is part of a larger application. For license
# details please read the file LICENSE.TXT provided together
# with the application.
# ---------------------------------------------------------------------------
# $Source$
# $Revision$
"""International identifiers for tradable financial assets"""
# standard library imports
from string import ascii_uppercase, digits
from typing import Tuple
# third-party imports
from iso3166 import countries
# local imports
from .identifier import Identifier
from .luhn import luhn
from .micutils import get_mic_record
_ALPHABET = digits + ascii_uppercase
class MIC(Identifier):
"""Market Identifier Code
A unique identification code used to identify securities trading
exchanges, regulated and non-regulated trading markets.
Each MIC is a four alpha character code, defined in ISO 10383.
"""
__slots__ = ()
# noinspection PyMissingConstructor
def __init__(self, mic: str) -> None:
"""
Args:
mic (str): string representation of the MIC
Returns:
:class:`MIC` instance
Raises:
TypeError: given `mic` is not an instance of str
ValueError: given `mic` not found in the registry
"""
if not isinstance(mic, str):
raise TypeError("Argument must be instance of 'str'.")
mic = mic.strip()
try:
get_mic_record(mic)
except KeyError:
raise ValueError(f"Unknown MIC: '{mic}'.")
self._id = mic
def __str__(self) -> str:
"""str(self)"""
return self._id
class ISIN(Identifier):
"""International Securities Identification Number
An International Securities Identification Number uniquely identifies a
tradable financial asset, a.k.a security.
As defined in ISO 6166, each ISIN consists of a two-letter ISO 3166-1
Country Code for the issuing country, followed by nine alpha-numeric
characters (the National Securities Identifying Number, or NSIN, which
identifies the security), and one numerical check digit, calculated by the
Luhn algorithm.
"""
__slots__ = ()
@staticmethod
def calc_check_digit(country_code: str, nsin: str) -> str:
"""Calculate ISIN check digit."""
return str(luhn(country_code + nsin))
@property
def country_code(self) -> str:
"""Return the ISIN's Country Code."""
return self._id[:2]
@property
def check_digit(self) -> str:
"""Return the ISIN's check digits."""
return self._id[-1]
@property
def nsin(self) -> str:
"""Return the ISIN's National Securities Identifying Number."""
return self._id[2:-1]
def elements(self) -> Tuple[str, str, str]:
"""Return the ISIN's Country Code, National Securities Identifying
Number and check digit as tuple."""
return self.country_code, self.nsin, self.check_digit
# noinspection PyMissingConstructor
def __init__(self, *args: str) -> None:
"""Instances of :class:`ISIN` can be created in two ways, by providing
a Unicode string representation of an ISIN or by providing a country
code and a national securities identifying number.
**1. Form**
Args:
isin (str): string representation of an ISIN
Returns:
instance of :class:`ISIN`
Raises:
TypeError: given `isin` is not a `Unicode string`
ValueError: given `isin` contains an unknown country code
ValueError: given `isin` contains a wrong check digit
ValueError: given `isin` must be 12 characters long
ValueError: given `isin` contains invalid character(s)
**2. Form**
Args:
country_code (str): 2-character country code
according to ISO 3166
nsin (str): national securities identifying
number
Returns:
instance of :class:`ISIN`
Raises:
TypeError: invalid number of arguments
TypeError: given `country_code` is not a `Unicode string`
ValueError: given `country_code` contains an invalid or unknown
country code
TypeError: given `nsin` is not a `Unicode string`
ValueError: length of given `nsin` not valid
ValueError: given `nsin` contains invalid character(s)
"""
n_args = len(args)
if n_args == 1:
arg0 = args[0]
if not isinstance(arg0, str):
raise TypeError("Argument must be instance of 'str'.")
arg0 = arg0.strip()
if len(arg0) != 12:
raise ValueError('Invalid ISIN format: '
'given string must be 12 characters long.')
country_code = arg0[:2]
try:
countries.get(country_code)
except KeyError:
raise ValueError(f"Unknown country code: '{country_code}'.")
nsin = arg0[2:-1]
check_digit = self.__class__.calc_check_digit(country_code, nsin)
if check_digit != arg0[-1]:
raise ValueError("Wrong check digit; should be "
f"'{check_digit}'.")
self._id = arg0
elif n_args == 2:
arg0 = args[0]
if not isinstance(arg0, str):
raise TypeError("Country code must be instance of 'str'.")
if len(arg0) != 2:
raise ValueError("Country code must be a 2-character string.")
country_code = arg0
try:
countries.get(country_code)
except KeyError:
raise ValueError(f"Unknown country code: '{country_code}'.")
arg1 = args[1]
if isinstance(arg1, str):
len_nsin = len(arg1)
if len_nsin == 9:
nsin = arg1
elif 6 <= len_nsin < 9:
nsin = arg1.rjust(9, '0')
else:
raise ValueError("Given NSIN must contain between 6 and 9"
" characters.")
else:
raise TypeError("Given nsin must be instance of 'str'.")
check_digit = self.__class__.calc_check_digit(country_code, nsin)
self._id = ''.join((country_code, nsin, check_digit))
else:
raise TypeError('Invalid number of arguments.')
def __str__(self) -> str:
"""str(self)"""
return self._id
|
|
#!/usr/bin/env python
#
# Copyright (c) 2013 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=F0401
import distutils.dir_util
import optparse
import os
import shutil
import sys
import zipfile
from common_function import RemoveUnusedFilesInReleaseMode
from xml.dom.minidom import Document
XWALK_CORE_SHELL_APK = 'xwalk_core_shell_apk'
def AddGeneratorOptions(option_parser):
option_parser.add_option('-s', dest='source',
help='Source directory of project root.',
type='string')
option_parser.add_option('-t', dest='target',
help='Product out target directory.',
type='string')
option_parser.add_option('--shared', action='store_true',
default=False,
help='Generate shared library', )
option_parser.add_option('--src-package', action='store_true',
default=False,
help='Use java sources instead of java libs.')
option_parser.add_option('--use-lzma', action='store_true',
default=False,
help='Use LZMA compress native library when specified')
def CleanLibraryProject(out_project_dir):
if os.path.exists(out_project_dir):
for item in os.listdir(out_project_dir):
sub_path = os.path.join(out_project_dir, item)
if os.path.isdir(sub_path):
shutil.rmtree(sub_path)
elif os.path.isfile(sub_path):
os.remove(sub_path)
def CopyProjectFiles(project_source, out_project_dir, shared):
print('Copying library project files...')
if shared :
template_dir = os.path.join(project_source, 'xwalk', 'build', 'android',
'xwalk_shared_library_template')
else :
template_dir = os.path.join(project_source, 'xwalk', 'build', 'android',
'xwalkcore_library_template')
files_to_copy = [
# AndroidManifest.xml from template.
'AndroidManifest.xml',
# Eclipse project properties from template.
'project.properties',
# Ant build file.
'build.xml',
# Ant properties file.
'ant.properties',
]
for f in files_to_copy:
source_file = os.path.join(template_dir, f)
target_file = os.path.join(out_project_dir, f)
shutil.copy2(source_file, target_file)
def CopyJSBindingFiles(project_source, out_project_dir):
print 'Copying js binding files...'
jsapi_dir = os.path.join(out_project_dir, 'res', 'raw')
if not os.path.exists(jsapi_dir):
os.makedirs(jsapi_dir)
jsfiles_to_copy = [
'xwalk/experimental/launch_screen/launch_screen_api.js',
'xwalk/experimental/presentation/presentation_api.js',
'xwalk/runtime/android/core_internal/src/org/xwalk/core/'
+ 'internal/extension/api/contacts/contacts_api.js',
'xwalk/runtime/android/core_internal/src/org/xwalk/core/'
+ 'internal/extension/api/device_capabilities/device_capabilities_api.js',
'xwalk/runtime/android/core_internal/src/org/xwalk/core/'
+ 'internal/extension/api/messaging/messaging_api.js',
'xwalk/experimental/wifidirect/wifidirect_api.js'
]
# Copy JS binding file to assets/jsapi folder.
for jsfile in jsfiles_to_copy:
source_file = os.path.join(project_source, jsfile)
target_file = os.path.join(jsapi_dir, os.path.basename(source_file))
shutil.copyfile(source_file, target_file)
def CopyBinaries(out_dir, out_project_dir, src_package, shared):
# Copy jar files to libs.
libs_dir = os.path.join(out_project_dir, 'libs')
if not os.path.exists(libs_dir):
os.mkdir(libs_dir)
if shared:
libs_to_copy = ['xwalk_core_library_java_app_part.jar']
elif src_package:
libs_to_copy = ['jsr_305_javalib.jar', ]
else:
libs_to_copy = ['xwalk_core_library_java.jar', ]
for lib in libs_to_copy:
source_file = os.path.join(out_dir, 'lib.java', lib)
target_file = os.path.join(libs_dir, lib)
shutil.copyfile(source_file, target_file)
if shared:
return
print 'Copying binaries...'
# Copy assets.
res_raw_dir = os.path.join(out_project_dir, 'res', 'raw')
res_value_dir = os.path.join(out_project_dir, 'res', 'values')
if not os.path.exists(res_raw_dir):
os.mkdir(res_raw_dir)
if not os.path.exists(res_value_dir):
os.mkdir(res_value_dir)
paks_to_copy = [
'icudtl.dat',
# Please refer to XWALK-3516, disable v8 use external startup data,
# reopen it if needed later.
# 'natives_blob.bin',
# 'snapshot_blob.bin',
'xwalk.pak',
]
pak_list_xml = Document()
resources_node = pak_list_xml.createElement('resources')
string_array_node = pak_list_xml.createElement('string-array')
string_array_node.setAttribute('name', 'xwalk_resources_list')
pak_list_xml.appendChild(resources_node)
resources_node.appendChild(string_array_node)
for pak in paks_to_copy:
source_file = os.path.join(out_dir, pak)
target_file = os.path.join(res_raw_dir, pak)
shutil.copyfile(source_file, target_file)
item_node = pak_list_xml.createElement('item')
item_node.appendChild(pak_list_xml.createTextNode(pak))
string_array_node.appendChild(item_node)
pak_list_file = open(os.path.join(res_value_dir,
'xwalk_resources_list.xml'), 'w')
pak_list_xml.writexml(pak_list_file, newl='\n', encoding='utf-8')
pak_list_file.close()
# Copy native libraries.
source_dir = os.path.join(out_dir, XWALK_CORE_SHELL_APK, 'libs')
distutils.dir_util.copy_tree(source_dir, libs_dir)
def CopyDirAndPrefixDuplicates(input_dir, output_dir, prefix, blacklist=None):
""" Copy the files into the output directory. If one file in input_dir folder
doesn't exist, copy it directly. If a file exists, copy it and rename the
file so that the resources won't be overrided. So all of them could be
packaged into the xwalk core library.
"""
blacklist = blacklist or []
for root, _, files in os.walk(input_dir):
for f in files:
if f in blacklist:
continue
src_file = os.path.join(root, f)
relative_path = os.path.relpath(src_file, input_dir)
target_file = os.path.join(output_dir, relative_path)
target_dir_name = os.path.dirname(target_file)
if not os.path.exists(target_dir_name):
os.makedirs(target_dir_name)
# If the file exists, copy it and rename it with another name to
# avoid overwriting the existing one.
if os.path.exists(target_file):
target_base_name = os.path.basename(target_file)
target_base_name = prefix + '_' + target_base_name
target_file = os.path.join(target_dir_name, target_base_name)
shutil.copyfile(src_file, target_file)
def MoveImagesToNonMdpiFolders(res_root):
"""Move images from drawable-*-mdpi-* folders to drawable-* folders.
Why? http://crbug.com/289843
Copied from build/android/gyp/package_resources.py.
"""
for src_dir_name in os.listdir(res_root):
src_components = src_dir_name.split('-')
if src_components[0] != 'drawable' or 'mdpi' not in src_components:
continue
src_dir = os.path.join(res_root, src_dir_name)
if not os.path.isdir(src_dir):
continue
dst_components = [c for c in src_components if c != 'mdpi']
assert dst_components != src_components
dst_dir_name = '-'.join(dst_components)
dst_dir = os.path.join(res_root, dst_dir_name)
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
for src_file_name in os.listdir(src_dir):
if not src_file_name.endswith('.png'):
continue
src_file = os.path.join(src_dir, src_file_name)
dst_file = os.path.join(dst_dir, src_file_name)
assert not os.path.lexists(dst_file)
shutil.move(src_file, dst_file)
def ReplaceCrunchedImage(project_source, filename, filepath):
"""Replace crunched images with source images.
"""
search_dir = [
'components/web_contents_delegate_android/android/java/res',
'content/public/android/java/res',
'ui/android/java/res'
]
pathname = os.path.basename(filepath)
#replace crunched 9-patch image resources.
for search in search_dir:
absdir = os.path.join(project_source, search)
for dirname, _, files in os.walk(absdir):
if filename in files:
relativedir = os.path.basename(dirname)
if (pathname == 'drawable' and relativedir == 'drawable-mdpi') or \
relativedir == pathname:
source_file = os.path.abspath(os.path.join(dirname, filename))
target_file = os.path.join(filepath, filename)
shutil.copyfile(source_file, target_file)
return
def CopyResources(project_source, out_dir, out_project_dir, shared):
print 'Copying resources...'
res_dir = os.path.join(out_project_dir, 'res')
temp_dir = os.path.join(out_project_dir, 'temp')
if os.path.exists(res_dir):
shutil.rmtree(res_dir)
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
# All resources should be in specific folders in res_directory.
# Since there might be some resource files with same names from
# different folders like ui_java, content_java and others,
# it's necessary to rename some files to avoid overridding.
res_to_copy = [
# zip file list
'content_java.zip',
'content_strings_grd.zip',
'ui_java.zip',
'ui_strings_grd.zip',
'web_contents_delegate_android_java.zip',
'xwalk_core_internal_java.zip',
'xwalk_core_java.zip',
'xwalk_core_strings.zip',
'xwalk_app_strings.zip'
]
for res_zip in res_to_copy:
zip_file = os.path.join(out_dir, 'res.java', res_zip)
zip_name = os.path.splitext(res_zip)[0]
if not os.path.isfile(zip_file):
raise Exception('Resource zip not found: ' + zip_file)
subdir = os.path.join(temp_dir, zip_name)
if os.path.isdir(subdir):
raise Exception('Resource zip name conflict: ' + zip_name)
os.makedirs(subdir)
with zipfile.ZipFile(zip_file) as z:
z.extractall(path=subdir)
CopyDirAndPrefixDuplicates(subdir, res_dir, zip_name,
blacklist=['OWNERS'])
MoveImagesToNonMdpiFolders(res_dir)
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
#search 9-patch, then replace it with uncrunch image.
for dirname, _, files in os.walk(res_dir):
for filename in files:
if filename.endswith('.9.png'):
ReplaceCrunchedImage(project_source, filename, dirname)
def main(argv):
option_parser = optparse.OptionParser()
AddGeneratorOptions(option_parser)
options, _ = option_parser.parse_args(argv)
if not os.path.exists(options.source):
print 'Source project does not exist, please provide correct directory.'
sys.exit(1)
out_dir = options.target
if options.src_package:
if options.shared :
out_project_dir = os.path.join(out_dir, 'xwalk_shared_library_src')
else :
out_project_dir = os.path.join(out_dir, 'xwalk_core_library_src')
else:
if options.shared :
out_project_dir = os.path.join(out_dir, 'xwalk_shared_library')
else :
out_project_dir = os.path.join(out_dir, 'xwalk_core_library')
# Clean directory for project first.
CleanLibraryProject(out_project_dir)
if not os.path.exists(out_project_dir):
os.mkdir(out_project_dir)
# Copy Eclipse project files of library project.
CopyProjectFiles(options.source, out_project_dir, options.shared)
# Copy binaries and resuorces.
CopyResources(options.source, out_dir, out_project_dir, options.shared)
CopyBinaries(out_dir, out_project_dir, options.src_package, options.shared)
# Copy JS API binding files.
CopyJSBindingFiles(options.source, out_project_dir)
# Remove unused files.
mode = os.path.basename(os.path.normpath(out_dir))
RemoveUnusedFilesInReleaseMode(mode,
os.path.join(out_project_dir, 'libs'))
# Create empty src directory
src_dir = os.path.join(out_project_dir, 'src')
if not os.path.isdir(src_dir):
os.mkdir(src_dir)
readme = os.path.join(src_dir, 'README.md')
open(readme, 'w').write(
"# Source folder for xwalk library\n"
"## Why it's empty\n"
"xwalk library doesn't contain java sources.\n"
"## Why put me here\n"
"To make archives keep the folder, "
"the src directory is needed to build an apk by ant.")
print 'Your Android library project has been created at %s' % (
out_project_dir)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
"""Numerically integrate the dynamics of a hierarchical triple."""
import json
import time
from math import sqrt, cos, sin, pi, acos
import numpy as np
from scipy.integrate import ode
from ._kozai_constants import au
from ._kozai_constants import c
from ._kozai_constants import G
from ._kozai_constants import M_sun
from ._kozai_constants import R_sun
from ._kozai_constants import yr2s
class TripleDelaunay:
"""Evolve a hierarchical triple using the Delaunay orbital elements.
This class handles triples in which all objects are massive. To integrate
in the test particle approximation use the Triple_vector class.
Args:
a1: Semi-major axis of inner binary in AU.
a2: Semi-major axis of outer binary in AU.
e1: Eccentricity of inner binary.
e2: Eccentricity of outer binary.
inc: Inclination between inner and outer binaries in degrees.
g1: Argument of periapsis of the inner binary in degrees.
g2: Argument of periapsis of the outer binary in degrees.
m1: Mass of component 1 of the inner binary in solar masses.
m2: Mass of component 2 of the inner binary in solar masses.
m3: Mass of the tertiary in solar masses.
r1: Radius of component 1 of the inner binary in solar radii.
r2: Radius of component 2 of the inner binary in solar radii.
Attributes:
tstop: The time to integrate in years.
cputstop: The wall time to integrate in seconds.
outfreq: The number of steps between saving output.
atol: Absolute tolerance of the integrator.
rotl: Relative tolerance of the integrator.
quadrupole: Toggle the quadrupole term.
octupole: Toggle the octupole term.
hexadecapole: Toggle the hexadecapole term.
gr: Toggle GR effects.
algo: Set the integration algorithm (see the scipy.ode docs).
"""
def __init__(
self,
a1=1,
a2=20,
e1=0.1,
e2=0.3,
inc=80,
g1=0,
g2=0,
m1=1.0,
m2=1.0,
m3=1.0,
r1=0,
r2=0,
):
self._H = None
self.a1 = a1
self.a2 = a2
self.e1 = e1
self.e2 = e2
self.g1 = g1
self.g2 = g2
self.m1 = m1
self.m2 = m2
self.m3 = m3
self.r1 = r1
self.r2 = r2
self.inc = inc
self.t = 0
# Default integrator parameters
self.tstop = None
self.cputstop = 300
self.outfreq = 1
self.atol = 1e-9
self.rtol = 1e-9
self.quadrupole = True
self.octupole = True
self.hexadecapole = False
self.gr = False
self.algo = 'vode'
self.maxoutput = int(1e6)
self.collision = False
# Store the initial state
self.save_as_initial()
# Unit conversions & variable definitions.
#
# Properties beginning with an underscore are stored in radians or SI
# units. Most calculations are much easier when done in SI, but it is
# inconvenient for the user to deal with SI units. Thus, the properties
# can be set using AU, M_sun, degrees, yr, or whatever other units are
# appropriate.
# Times
@property
def t(self):
"""Time in yr"""
return self._t / yr2s
@t.setter
def t(self, val):
"""Set time in yr"""
self._t = val * yr2s
# Masses
@property
def m1(self):
"""m1 in solar masses"""
return self._m1 / M_sun
@m1.setter
def m1(self, val):
"""Set m1 in solar masses"""
if self._H is not None:
inc = self.inc
self._m1 = val * M_sun
if self._H is not None:
self.inc = inc # Reset the total ang. momentum.
@property
def m2(self):
"""m2 in solar masses"""
return self._m2 / M_sun
@m2.setter
def m2(self, val):
"""Set m2 in solar masses"""
if self._H is not None:
inc = self.inc
self._m2 = val * M_sun
if self._H is not None:
self.inc = inc # Reset the total ang. momentum.
@property
def m3(self):
"""m3 in solar masses"""
return self._m3 / M_sun
@m3.setter
def m3(self, val):
"""Set m3 in solar masses"""
if self._H is not None:
inc = self.inc
self._m3 = val * M_sun
if self._H is not None:
self.inc = inc # Reset the total ang. momentum.
# Distances
@property
def a1(self):
"""a1 in AU"""
return self._a1 / au
@a1.setter
def a1(self, val):
"""Set a1 in AU"""
if self._H is not None:
inc = self.inc
self._a1 = val * au
if self._H is not None:
self.inc = inc # Reset the total ang. momentum.
@property
def a2(self):
"""a2 in AU"""
return self._a2 / au
@a2.setter
def a2(self, val):
"""Set a2 in AU"""
if self._H is not None:
inc = self.inc
self._a2 = val * au
if self._H is not None:
self.inc = inc # Reset the total ang. momentum.
@property
def r1(self):
"""r1 in R_sun"""
return self._r1 / R_sun
@r1.setter
def r1(self, val):
"""r1 in R_sun"""
self._r1 = val * R_sun
@property
def r2(self):
"""r2 in R_sun"""
return self._r2 / R_sun
@r2.setter
def r2(self, val):
"""r2 in R_sun"""
self._r2 = val * R_sun
# Angles
@property
def g1(self):
"""g1 in degrees"""
return self._g1 * 180 / pi
@g1.setter
def g1(self, val):
self._g1 = val * pi / 180
@property
def g2(self):
"""g2 in degrees"""
return self._g2 * 180 / pi
@g2.setter
def g2(self, val):
self._g2 = val * pi / 180
@property
def cosphi(self):
"""Calculate the angle between the arguments of periapsis.
See Eq. 23 of Blaes et al. (2002).
"""
return -cos(self._g1) * cos(self._g2) - self.th * sin(self._g1) * sin(
self._g2
)
@property
def th(self):
"""Calculate the cosine of the inclination.
See Eq. 22 of Blaes et al. (2002).
"""
return (self._H**2 - self._G1**2 - self._G2**2) / (
2 * self._G1 * self._G2
)
@property
def _inc(self):
"""The mutual inclination in radians."""
return acos(self.th)
@property
def inc(self):
"""Calculate the mutual inclination."""
return self._inc * 180 / pi
@inc.setter
def inc(self, val):
"""Set the inclination.
This really sets _H and inc is recalculated.
"""
self._H = sqrt(
self._G1**2
+ self._G2**2
+ 2 * self._G1 * self._G2 * cos(val * pi / 180)
)
# Angular momenta
@property
def _G1(self):
"""Calculate G1. See Eq. 6 of Blaes et al. (2002)."""
return (
self._m1
* self._m2
* sqrt(G * self._a1 * (1 - self.e1**2) / (self._m1 + self._m2))
)
@property
def _G2(self):
"""Calculate G2. See Eq. 7 of Blaes et al. (2002)."""
return (
(self._m1 + self._m2)
* self._m3
* sqrt(
G
* self._a2
* (1 - self.e2**2)
/ (self._m1 + self._m2 + self._m3)
)
)
# Energies
@property
def C2(self):
"""Calculate C2. See Eq. 18 of Blaes et al., (2002)."""
return (
G
* self._m1
* self._m2
* self._m3
/ (
16
* (self._m1 + self._m2)
* self._a2
* (1 - self.e2**2)**(3.0 / 2)
)
* (self._a1 / self._a2)**2
)
@property
def C3(self):
"""Calculate C3. See Eq. 19 of Blaes et al., (2002)."""
return (
15
* G
* self._m1
* self._m2
* self._m3
* (self._m1 - self._m2)
/ (
64
* (self._m1 + self._m2)**2
* self._a2
* (1 - self.e2**2)**(5.0 / 2)
)
* (self._a1 / self._a2)**3
)
# Other parameters
@property
def epsoct(self):
return self.e2 / (1 - self.e2**2) * (self.a1 / self.a2)
@property
def Th(self):
"""Calculate Kozai's integral."""
return (1 - self.e1**2) * cos(self._inc)**2
@property
def CKL(self):
"""Calculate the libration constant."""
return self.e1**2 * (
1 - 5.0 / 2 * sin(self._inc)**2 * sin(self._g1)**2
)
@property
def Hhatquad(self):
"""The normalized quadrupole term of the Hamiltonian"""
return (2 + 3 * self.e1**2) * (
1 - 3 * self.th**2
) - 15 * self.e1**2 * (1 - self.th**2) * cos(2 * self._g1)
@property
def outfreq(self):
return self._outfreq
@outfreq.setter
def outfreq(self, val):
self._outfreq = int(val)
def save_as_initial(self):
"""Set the current parameters as the initial parameters."""
self.initial_state = {}
self.initial_state['a1'] = self.a1
self.initial_state['a2'] = self.a2
self.initial_state['e1'] = self.e1
self.initial_state['e2'] = self.e2
self.initial_state['g1'] = self.g1
self.initial_state['g2'] = self.g2
self.initial_state['m1'] = self.m1
self.initial_state['m2'] = self.m2
self.initial_state['m3'] = self.m3
self.initial_state['r1'] = self.r1
self.initial_state['r2'] = self.r2
self.initial_state['inc'] = self.inc
# Integration routines
def _deriv(self, t, y):
"""The EOMs. See Eqs. 11 -- 17 of Blaes et al. (2002)."""
# Unpack the values.
a1, e1, g1, e2, g2, H = y
# Calculate trig functions only once.
sing1 = sin(g1)
sing2 = sin(g2)
cosg1 = cos(g1)
cosg2 = cos(g2)
m1 = self._m1
m2 = self._m2
m3 = self._m3
a2 = self._a2
# TODO
# Are these necessary now that we are calculating them dynamically?
G1 = m1 * m2 * sqrt(G * a1 * (1 - e1**2) / (m1 + m2))
G2 = (m1 + m2) * m3 * sqrt(G * a2 * (1 - e2**2) / (m1 + m2 + m3))
# Python's `black` is very bad for long equations, so we temporarily
# disable it here.
# fmt: off
C2 = (
G * m1 * m2 * m3 /
(16 * (m1 + m2) * a2 * (1 - e2**2)**(3.0 / 2)) * (a1 / a2)**2
)
C3 = (
15 * G * m1 * m2 * m3 * (m2 - m1) /
(64 * (m1 + m2)**2 * a2 * (1 - e2**2)**(5.0 / 2)) * (a1 / a2)**3
)
th = (H**2 - G1**2 - G2**2) / (2 * G1 * G2)
cosphi = cosg1 * cosg2 - th * sing1 * sing2
B = 2 + 5 * e1**2 - 7 * e1**2 * cos(2 * g1)
A = 4 + 3 * e1**2 - 5 / 2.0 * (1 - th**2) * B
# Eq. 11 of Blaes et al. (2002).
da1dt = 0.0
if self.gr:
da1dt += -(
64 * G**3 * m1 * m2 * (m1 + m2) /
(5 * c**5 * a1**3 * sqrt((1 - e1**2)**7)) *
(1 + 73 / 24.0 * e1**2 + 37 / 96.0 * e1**4)
)
# Eq. 12 of Blaes et al. (2002).
dg1dt = 0.
if self.quadrupole:
dg1dt += (
6 * C2 * (1 / G1 * (4 * th**2 + (5 * cos(2 * g1) - 1) * (1 -
e1**2 - th**2)) + th /
G2 * (2 + e1**2 * (3 - 5 * cos(2 * g1))))
)
if self.octupole:
dg1dt += (
C3 * e2 * e1 * (1 / G2 + th / G1) *
(sing1 * sing2 * (A + 10 * (3 * th**2 - 1) * (1 - e1**2)) -
5 * th * B * cosphi) -
C3 * e2 * (1 - e1**2) / (e1 * G1) *
(10 * th * (1 - th**2) * (1 - 3 * e1**2) * sing1 * sing2 +
cosphi * (3 * A - 10 * th**2 + 2))
)
if self.gr:
dg1dt += ((3 / (c**2 * a1 * (1 - e1**2)) *
sqrt((G * (m1 + m2) / a1)**3)))
if self.hexadecapole:
dg1dt += (
1 / (4096. * a2**5 * sqrt(1 - e1**2) * (m1 + m2)**5) * 45 *
a1**3 * sqrt(a1 * G * (m1 + m2)) * (-1 / ((e2**2 - 1)**4 *
sqrt(a2 * G * (m1 + m2 + m3))) * (m1**2 - m1 * m2 + m2**2) *
(sqrt(1 - e2**2) * m2**2 * m3 *
sqrt(a2 * G * (m1 + m2 + m3)) * th + m1**2 *
(sqrt(1 - e1**2) * m2 * sqrt(a1 * G * (m1 + m2)) +
sqrt(1 - e2**2) * m3 * sqrt(a2 * G * (m1 + m2 + m3)) * th) +
m1 * m2 * (sqrt(1 - e1**2) * m2 * sqrt(a1 * G * (m1 + m2)) +
sqrt(1 - e1**2) * sqrt(a1 * G * (m1 + m2)) * m3 +
2 * sqrt(1 - e2**2) * m3 *
sqrt(a2 * G * (m1 + m2 + m3)) * th)) *
(96 * th + 480 * e1**2 * th + 180 * e1**4 * th +
144 * e2**2 * th + 720 * e1**2 * e2**2 * th +
270 * e1**4 * e2**2 * th - 224 * th**3 - 1120 * e1**2 * th**3 -
420 * e1**4 * th**3 - 336 * e2**2 * th**3 -
1680 * e1**2 * e2**2 * th**3 - 630 * e1**4 * e2**2 * th**3 +
56 * e1**2 * (2 + e1**2) * (2 + 3 * e2**2) * th *
(7 * th**2 - 4) * cos(2 * g1) -
294 * e1**4 * (2 + 3 * e2**2) * th * (th**2 - 1) *
cos(4 * g1) -
147 * e1**4 * e2**2 * cos(4 * g1 - 2 * g2) +
441 * e1**4 * e2**2 * th**2 * cos(4 * g1 - 2 * g2) +
294 * e1**4 * e2**2 * th**3 * cos(4 * g1 - 2 * g2) +
140 * e1**2 * e2**2 * cos(2 * (g1 - g2)) +
70 * e1**4 * e2**2 * cos(2 * (g1 - g2)) +
336 * e1**2 * e2**3 * th * cos(2 * (g1 - g2)) +
168 * e1**4 * e2**2 * th * cos(2 * (g1 - g2)) -
588 * e1**2 * e2**2 * th**2 * cos(2 * (g1 - g2)) -
294 * e1**4 * e2**2 * th**2 * cos(2 * (g1 - g2)) -
784 * e1**2 * e2**2 * th**3 * cos(2 * (g1 - g2)) -
392 * e1**4 * e2**2 * th**3 * cos(2 * (g1 - g2)) -
128 * e2**2 * th * cos(2 * g2) -
640 * e1**2 * e2**2 * th * cos(2 * g2) -
240 * e1**4 * e2**2 * th * cos(2 * g2) +
224 * e2**2 * th**3 * cos(2 * g2) +
1120 * e1**2 * e2**2 * th**3 * cos(2 * g2) +
420 * e1**4 * e2**2 * th**3 * cos(2 * g2) -
140 * e1**2 * e2**2 * cos(2 * (g1 + g2)) -
70 * e1**4 * e2**2 * cos(2 * (g1 + g2)) +
336 * e1**2 * e2**2 * th * cos(2 * (g1 + g2)) +
168 * e1**4 * e2**2 * th * cos(2 * (g1 + g2)) +
588 * e1**2 * e2**2 * th**2 * cos(2 * (g1 + g2)) +
294 * e1**4 * e2**2 * th**2 * cos(2 * (g1 + g2)) -
784 * e1**2 * e2**2 * th**3 * cos(2 * (g1 + g2)) -
392 * e1**4 * e2**2 * th**3 * cos(2 * (g1 + g2)) +
147 * e1**4 * e2**2 * cos(2 * (2 * g1 + g2)) -
441 * e1**4 * e2**2 * th**2 * cos(2 * (2 * g1 + g2)) +
294 * e1**4 * e2**2 * th**3 * cos(2 * (2 * g1 + g2))) +
1 / (e1 * sqrt((1 - e2**2)**7)) * 2 * (1 - e1**2) * (m1 + m2) *
(m1**3 + m2**3) * m3 * (e1 * (4 + 3 * e1**2) *
(2 + 3 * e2**2) * (3 - 30 * th**2 + 35 * th**4) -
28 * (e1 + e1**3) * (2 + 3 * e2**2) *
(1 - 8 * th**2 + 7 * th**4) * cos(2 * g1) +
147 * e1**3 * (2 + 3 * e2**2) * (th**2 - 1)**2 * cos(4 * g1) -
10 * e1 * (4 + 3 * e1**2) * e2**2 *
(1 - 8 * th**2 + 7 * th**4) * cos(2 * g2) +
28 * (e1 + e1**3) * e2**2 * ((1 + th)**2 *
(1 - 7 * th + 7 * th**2) * cos(2 * (g1 - g2)) +
(th - 1)**2 * (1 + 7 * th + 7 * th**2) * cos(2 * (g1 + g2))) -
147 * e1**3 * e2**2 * (th**2 - 1) * ((1 + th)**2 *
cos(4 * g1 - 2 * g2) +
(th - 1)**2 * cos(2 * (2 * g1 + g2)))))
)
# Eq. 13 of Blaes et al. (2002).
de1dt = 0.
if self.quadrupole:
de1dt += (
30 * C2 * e1 * (1 - e1**2) / G1 * (1 - th**2) * sin(2 * g1)
)
if self.octupole:
de1dt += (
-C3 * e2 * (1 - e1**2) / G1 * (35 * cosphi * (1 - th**2) *
e1**2 * sin(2 * g1) - 10 * th * (1 - e1**2) * (1 - th**2) *
cosg1 * sing2 - A * (sing1 * cosg2 - th * cosg1 * sing2))
)
if self.gr:
de1dt += (
-304 * G**3 * m1 * m2 * (m1 + m2) * e1 / (15 * c**4 * a1**4 *
sqrt((1 - e1**2)**5)) * (1 + 121 / 304. * e1**2)
)
if self.hexadecapole:
de1dt += (
-(315 * a1**3 * e1 * sqrt(1 - e1**2) * sqrt(a1 * G * (m1 + m2))
* (m1**2 - m1 * m2 + m2**2) * m3 * (2 * (2 + e1**2) *
(2 + 3 * e2**2) * (1 - 8 * th**2 + 7 * th**4) * sin(2 * g1) -
21 * e1**2 * (2 + 3 * e2**2) * (th**2 - 1)**2 * sin(4 * g1) +
e2**2 * (21 * e1**2 * (th - 1) * (1 + th)**3 *
sin(4 * g1 - 2 * g2) -
2 * (2 + e1**2) * (1 + th)**2 * (1 - 7 * th + 7 * th**2) *
sin(2 * (g1 - g2)) -
(th - 1)**2 * (2 * (2 + e1**2) * (1 + 7 * th + 7 * th**2) *
sin(2 * (g1 + g2)) -
21 * e1**2 * (th**2 - 1) * sin(2 * (2 * g1 + g2)))))) /
(2048 * a2**5 * sqrt((1 - e2**2)**7) * (m1 + m2)**3)
)
dg2dt = 0.
if self.quadrupole:
dg2dt += (
3 * C2 * (2 * th / G1 * (2 + e1**2 * (3 - 5 * cos(2 * g1))) +
1 / G2 * (4 + 6 * e1**2 + (5 * th**2 - 3) *
(2 + 3 * e1**2 - 5 * e1**2 * cos(2 * g1))))
)
if self.octupole:
dg2dt += (
-C3 * e1 * sing1 * sing2 * ((4 * e2**2 + 1) / (e2 * G2) * 10 *
th * (1 - th**2) * (1 - e1**2) - e2 * (1 / G1 + th / G2) *
(A + 10 * (3 * th**2 - 1) * (1 - e1**2))) -
C3 * e1 * cosphi * (5 * B * th * e2 * (1 / G1 + th / G2) +
(4 * e2**2 + 1) / (e2 * G2) * A)
)
if self.hexadecapole:
dg2dt += (
(9 * a1**3 * (-1 / sqrt(1 - e1**2) * 10 * a2 * sqrt(a1 * G *
(m1 + m2)) * (m1**2 - m1 * m2 + m2**2) * (sqrt(1 - e2**2) *
m2**2 * m3 * sqrt(a2 * G * (m1 + m2 + m3)) + m1**2 *
(sqrt(1 - e2**2) * m3 * sqrt(a2 * G * (m1 + m2 + m3)) +
sqrt(1 - e1**2) * m2 * sqrt(a1 * G * (m1 + m2)) * th) +
m1 * m2 * (2 * sqrt(1 - e2**2) * m3 *
sqrt(a2 * G * (m1 + m2 + m3)) + sqrt(1 - e1**2) * m2 *
sqrt(a1 * G * (m1 + m2)) * th + sqrt(1 - e1**2) *
sqrt(a1 * G * (m1 + m2)) * m3 * th)) *
(96 * th + 480 * e1**2 * th + 180 * e1**4 * th +
144 * e2**2 * th + 720 * e1**2 * e2**2 * th +
270 * e1**4 * e2**2 * th - 224 * th**3 - 1120 * e1**2 * th**3 -
420 * e1**4 * th**3 -
336 * e2**2 * th**3 - 1680 * e1**2 * e2**2 * th**3 -
630 * e1**4 * e2**2 * th**3 +
56 * e1**2 * (2 + e1**2) * (2 + 3 * e2**2) * th *
(7 * th**2 - 4) * cos(2 * g1) -
294 * e1**4 * (2 + 3 * e2**2) * th * (th**2 - 1) *
cos(4 * g1) -
147 * e1**4 * e2**2 * cos(4 * g1 - 2 * g2) +
441 * e1**4 * e2**2 * th**2 * cos(4 * g1 - 2 * g2) +
294 * e1**4 * e2**2 * th**3 * cos(4 * g1 - 2 * g2) +
140 * e1**2 * e2**2 * cos(2 * (g1 - g2)) +
70 * e1**4 * e2**2 * cos(2 * (g1 - g2)) +
336 * e1**2 * e2**2 * th * cos(2 * (g1 - g2)) +
168 * e1**4 * e2**2 * th * cos(2 * (g1 - g2)) -
588 * e1**2 * e2**2 * th**2 * cos(2 * (g1 - g2)) -
294 * e1**4 * e2**2 * th**2 * cos(2 * (g1 - g2)) -
784 * e1**2 * e2**2 * th**3 * cos(2 * (g1 - g2)) -
392 * e1**4 * e2**2 * th**3 * cos(2 * (g1 - g2)) -
128 * e2**2 * th * cos(2 * g2) -
640 * e1**2 * e2**2 * th * cos(2 * g2) -
240 * e1**4 * e2**2 * th * cos(2 * g2) +
224 * e2**2 * th**3 * cos(2 * g2) +
1120 * e1**2 * e2**2 * th**3 * cos(2 * g2) +
420 * e1**4 * e2**2 * th**3 * cos(2 * g2) -
140 * e1**2 * e2**2 * cos(2 * (g1 + g2)) -
70 * e1**4 * e2**2 * cos(2 * (g1 + g2)) +
336 * e1**2 * e2**2 * th * cos(2 * (g1 + g2)) +
168 * e1**4 * e2**2 * th * cos(2 * (g1 + g2)) +
588 * e1**2 * e2**2 * th**2 * cos(2 * (g1 + g2)) +
294 * e1**4 * e2**2 * th**2 * cos(2 * (g1 + g2)) -
784 * e1**2 * e2**2 * th**3 * cos(2 * (g1 + g2)) -
392 * e1**4 * e2**2 * th**3 * cos(2 * (g1 + g2)) +
147 * e1**4 * e2**2 * cos(2 * (2 * g1 + g2)) -
441 * e1**4 * e2**2 * th**2 * cos(2 * (2 * g1 + g2)) +
294 * e1**4 * e2**2 * th**3 * cos(2 * (2 * g1 + g2))) +
a1 * a2 * G * m1 * m2 * (m1**3 + m2**3) * (m1 + m2 + m3) *
(-6 * (8 + 40 * e1**2 + 15 * e1**4) * (-1 + e2**2) *
(3 - 30 * th**2 + 35 * th**4) +
7 * (8 + 40 * e1**2 + 15 * e1**4) * (2 + 3 * e2**2) *
(3 - 30 * th**2 + 35 * th**4) +
840 * e1**2 * (2 + e1**2) * (-1 + e2**2) *
(1 - 8 * th**2 + 7 * th**4) * cos(2 * g1) -
980 * e1**2 * (2 + e1**2) * (2 + 3 * e2**2) *
(1 - 8 * th**2 + 7 * th**4) * cos(2 * g1) -
4410 * e1**4 * (-1 + e2**2) * (-1 + th**2)**2 * cos(4 * g1) +
5145 * e1**4 * (2 + 3 * e2**2) * (-1 + th**2)**2 *
cos(4 * g1) -
70 * (8 + 40 * e1**2 + 15 * e1**4) * e2**2 *
(1 - 8 * th**2 + 7 * th**4) * cos(2 * g2) +
20 * (8 + 40 * e1**2 + 15 * e1**4) * (-1 + e2**2) *
(1 - 8 * th**2 + 7 * th**4) * cos(2 * g2) +
980 * e1**2 * (2 + e1**2) * e2**2 * ((1 + th)**2 *
(1 - 7 * th + 7 * th**2) * cos(2 * (g1 - g2)) +
(-1 + th)**2 * (1 + 7 * th + 7 * th**2) * cos(2 * (g1 + g2))) -
280 * e1**2 * (2 + e1**2) * (-1 + e2**2) * ((1 + th)**2
* (1 - 7 * th + 7 * th**2) * cos(2 * (g1 - g2)) +
(-1 + th)**2 * (1 + 7 * th + 7 * th**2) * cos(2 * (g1 + g2))) -
1470 * e1**4 * (1 - e2**2) * (-1 + th) * (1 + th) *
((1 + th)**2 * cos(4 * g1 - 2 * g2) + (-1 + th)**2 *
cos(2 * (2 * g1 + g2))) -
5145 * e1**4 * e2**2 * (-1 + th**2) * ((1 + th)**2 *
cos(4 * g1 - 2 * g2) +
(-1 + th)**2 * cos(2 * (2 * g1 + g2)))))) /
(8192 * a2**6 * (-1 + e2**2)**4 * (m1 + m2)**5 *
sqrt(a2 * G * (m1 + m2 + m3)))
)
# Eq. 16 of Blaes et al. (2002).
de2dt = 0.
if self.octupole:
de2dt += (
C3 * e1 * (1 - e2**2) / G2 * (10 * th * (1 - th**2) *
(1 - e1**2) * sing1 * cosg2 +
A * (cosg1 * sing2 - th * sing1 * cosg2))
)
if self.hexadecapole:
de2dt += (
45 * a1**4 * e2 * m1 * m2 * (m1**2 - m1 * m2 + m2**2) *
sqrt(a2 * G * (m1 + m2 + m3)) *
(-147 * e1**4 * (-1 + th) * (1 + th)**3 *
sin(4 * g1 - 2 * g2) +
28 * e1**2 * (2 + e1**2) * (1 + th)**2 *
(1 - 7 * th + 7 * th**2) * sin(2 * (g1 - g2)) +
(-1 + th) * (2 * (8 + 40 * e1**2 + 15 * e1**4) *
(-1 - th + 7 * th**2 + 7 * th**3) * sin(2 * g2) -
7 * e1**2 * (-1 + th) * (4 * (2 + e1**2) *
(1 + 7 * th + 7 * th**2) * sin(2 * (g1 + g2)) -
21 * e1**2 * (-1 + th**2) * sin(2 * (2 * g1 + g2))))) /
(4096 * a2**6 * (-1 + e2**2)**3 * (m1 + m2)**4)
)
# Eq. 17 of Blaes et al. (2002).
dHdt = 0.
if self.gr:
dHdt += (
-32 * G**3 * m1**2 * m2**2 /
(5 * c**5 * a1**3 * (1 - e1**2)**2) *
sqrt(G * (m1 + m2) / a1) * (1 + 7 / 8. * e1**2) *
(G1 + G2 * th) / H
)
# fmt: on
der = [da1dt, de1dt, dg1dt, de2dt, dg2dt, dHdt]
return der
def _step(self):
self.solver.integrate(self.tstop, step=True)
self.nstep += 1
self._t = self.solver.t
(
self._a1,
self.e1,
self._g1,
self.e2,
self._g2,
self._H,
) = self.solver.y
self._g1 %= 2 * pi
self._g2 %= 2 * pi
def integrator_setup(self):
"""Set up the integrator."""
# Integration parameters
self.nstep = 0
self._y = [self._a1, self.e1, self._g1, self.e2, self._g2, self._H]
# Set up the integrator.
self.solver = ode(self._deriv)
self.solver.set_integrator(
self.algo, nsteps=1, atol=self.atol, rtol=self.rtol
)
self.solver.set_initial_value(self._y, self._t)
if self.algo == 'vode':
self.solver._integrator.iwork[
2
] = -1 # Don't print FORTRAN errors.
def reset(self):
"""Reset the triple to its initial configuration.
This resets the orbital parameters and time, but does not reset the
integration options.
"""
self.t = 0
for key in self.initial_state:
setattr(self, key, self.initial_state[key])
def evolve(self, tstop):
"""Integrate the triple in time.
Parameters:
tstop: The time to integrate in years
"""
self.tstop = tstop
n_columns = len(self.state())
self.integrator_setup()
self.integration_steps = np.zeros((self.maxoutput, n_columns))
self.integration_steps[0] = self.state()
self.tstart = time.time()
while (self.t < tstop) and (
(time.time() - self.tstart) < self.cputstop
):
self._step()
if self.nstep % self.outfreq == 0:
self.integration_steps[
self.nstep // self.outfreq
] = self.state()
if self._a1 * (1 - self.e1) < self._r1 + self._r2:
self.collision = True
break
laststep = (self.nstep // self.outfreq) + 1
self.integration_steps[laststep] = self.state()
return self.integration_steps[: laststep + 1]
def extrema(self, tstop):
"""Integrate the triple, but only save the eccentricity extrema.
Parameters:
tstop: The time to integrate in years
"""
self.tstop = tstop
n_columns = len(self.state())
self.integrator_setup()
self.integration_steps = np.zeros((self.maxoutput, n_columns))
e_prev = 0
e_prev2 = 0
output_index = 0
self.tstart = time.time()
while (
self.t < self.tstop and time.time() - self.tstart < self.cputstop
):
prevstate = self.state()
self._step()
if e_prev2 < e_prev > self.e1:
self.integration_steps[output_index] = prevstate
output_index += 1
elif e_prev2 > e_prev < self.e1:
self.integration_steps[output_index] = prevstate
output_index += 1
# Check for collisions.
if self.a1 * (1 - self.e1) < self.r1 + self.r2:
self.collision = True
break
e_prev2 = e_prev
e_prev = self.e1
return self.integration_steps[:output_index]
def find_flips(self, tstop):
"""Integrate the triple, but print out only when there is a flip."""
self.tstop = tstop
n_columns = len(self.state())
self.integrator_setup()
self.integration_steps = np.zeros((self.maxoutput, n_columns))
e_prev = 0
e_prev2 = 0
sign_prev = np.sign(self.th)
output_index = 0
self.tstart = time.time()
while (
self.t < self.tstop and time.time() - self.tstart < self.cputstop
):
prevstate = self.state()
self._step()
if e_prev2 < e_prev > self.e1:
if np.sign(self.th) != sign_prev:
self.integration_steps[output_index] = prevstate
output_index += 1
sign_prev = np.sign(self.th)
e_prev2 = e_prev
e_prev = self.e1
return self.integration_steps[:output_index]
def state(self):
"""Return a tuple with the dynamical state of the system.
Returns:
t: The time.
a1: The semi-major axis of the inner binary.
e1: The eccentricity of the inner binary.
g1: The argument of periapsis of the inner binary.
a2: The semi-major axis of the outer binary.
e2: The eccentricity of the outer binary.
g2: The argumetn of periapsis of the outer binary.
inc: The inclination.
"""
return (
self.t,
self.a1,
self.e1,
self.g1,
self.a2,
self.e2,
self.g2,
self.inc,
)
def __repr__(self):
"""Print out the initial values in JSON format."""
# Get the initial state.
json_data = self.initial_state
# Add some other properties
json_data['epsoct'] = self.epsoct
json_data['tstop'] = self.tstop
json_data['cputstop'] = self.cputstop
json_data['outfreq'] = self.outfreq
json_data['atol'] = self.atol
json_data['rtol'] = self.rtol
json_data['quadrupole'] = self.quadrupole
json_data['octupole'] = self.octupole
json_data['hexadecapole'] = self.hexadecapole
json_data['gr'] = self.gr
json_data['algo'] = self.algo
json_data['maxoutput'] = self.maxoutput
json_data['collision'] = self.collision
return json.dumps(json_data, sort_keys=True, indent=2)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
About this technique
--------------------
In Python, we define the six faces of a cuboid to draw, as well as
texture cooridnates corresponding with the vertices of the cuboid.
The back faces of the cuboid are drawn (and front faces are culled)
because only the back faces are visible when the camera is inside the
volume.
In the vertex shader, we intersect the view ray with the near and far
clipping planes. In the fragment shader, we use these two points to
compute the ray direction and then compute the position of the front
cuboid surface (or near clipping plane) along the view ray.
Next we calculate the number of steps to walk from the front surface
to the back surface and iterate over these positions in a for-loop.
At each iteration, the fragment color or other voxel information is
updated depending on the selected rendering method.
It is important for the texture interpolation is 'linear', since with
nearest the result look very ugly. The wrapping should be clamp_to_edge
to avoid artifacts when the ray takes a small step outside the volume.
The ray direction is established by mapping the vertex to the document
coordinate frame, adjusting z to +/-1, and mapping the coordinate back.
The ray is expressed in coordinates local to the volume (i.e. texture
coordinates).
"""
from ..gloo import Texture3D, TextureEmulated3D, VertexBuffer, IndexBuffer
from . import Visual
from .shaders import Function, ModularProgram
from ..color import get_colormap
import numpy as np
# todo: implement more render methods (port from visvis)
# todo: allow anisotropic data
# todo: what to do about lighting? ambi/diffuse/spec/shinynes on each visual?
# Vertex shader
VERT_SHADER = """
attribute vec3 a_position;
attribute vec3 a_texcoord;
uniform vec3 u_shape;
varying vec3 v_texcoord;
varying vec3 v_position;
varying vec4 v_nearpos;
varying vec4 v_farpos;
void main() {
v_texcoord = a_texcoord;
v_position = a_position;
// Project local vertex coordinate to camera position. Then do a step
// backward (in cam coords) and project back. Voila, we get our ray vector.
vec4 pos_in_cam = $viewtransformf(vec4(v_position, 1));
// intersection of ray and near clipping plane (z = -1 in clip coords)
pos_in_cam.z = -pos_in_cam.w;
v_nearpos = $viewtransformi(pos_in_cam);
// intersection of ray and far clipping plane (z = +1 in clip coords)
pos_in_cam.z = pos_in_cam.w;
v_farpos = $viewtransformi(pos_in_cam);
gl_Position = $transform(vec4(v_position, 1.0));
}
""" # noqa
# Fragment shader
FRAG_SHADER = """
// uniforms
uniform $sampler_type u_volumetex;
uniform vec3 u_shape;
uniform float u_threshold;
uniform float u_relative_step_size;
//varyings
varying vec3 v_texcoord;
varying vec3 v_position;
varying vec4 v_nearpos;
varying vec4 v_farpos;
// uniforms for lighting. Hard coded until we figure out how to do lights
const vec4 u_ambient = vec4(0.2, 0.4, 0.2, 1.0);
const vec4 u_diffuse = vec4(0.8, 0.2, 0.2, 1.0);
const vec4 u_specular = vec4(1.0, 1.0, 1.0, 1.0);
const float u_shininess = 40.0;
//varying vec3 lightDirs[1];
// global holding view direction in local coordinates
vec3 view_ray;
vec4 calculateColor(vec4, vec3, vec3);
float rand(vec2 co);
void main() {{
vec3 farpos = v_farpos.xyz / v_farpos.w;
vec3 nearpos = v_nearpos.xyz / v_nearpos.w;
// Calculate unit vector pointing in the view direction through this
// fragment.
view_ray = normalize(farpos.xyz - nearpos.xyz);
// Compute the distance to the front surface or near clipping plane
float distance = dot(nearpos-v_position, view_ray);
distance = max(distance, min((-0.5 - v_position.x) / view_ray.x,
(u_shape.x - 0.5 - v_position.x) / view_ray.x));
distance = max(distance, min((-0.5 - v_position.y) / view_ray.y,
(u_shape.y - 0.5 - v_position.y) / view_ray.y));
distance = max(distance, min((-0.5 - v_position.z) / view_ray.z,
(u_shape.z - 0.5 - v_position.z) / view_ray.z));
// Now we have the starting position on the front surface
vec3 front = v_position + view_ray * distance;
// Decide how many steps to take
int nsteps = int(-distance / u_relative_step_size + 0.5);
if( nsteps < 1 )
discard;
// Get starting location and step vector in texture coordinates
vec3 step = ((v_position - front) / u_shape) / nsteps;
vec3 start_loc = front / u_shape;
// For testing: show the number of steps. This helps to establish
// whether the rays are correctly oriented
//gl_FragColor = vec4(0.0, nsteps / 3.0 / u_shape.x, 1.0, 1.0);
//return;
{before_loop}
// This outer loop seems necessary on some systems for large
// datasets. Ugly, but it works ...
vec3 loc = start_loc;
int iter = 0;
while (iter < nsteps) {{
for (iter=iter; iter<nsteps; iter++)
{{
// Get sample color
vec4 color = $sample(u_volumetex, loc);
float val = color.g;
{in_loop}
// Advance location deeper into the volume
loc += step;
}}
}}
{after_loop}
/* Set depth value - from visvis TODO
int iter_depth = int(maxi);
// Calculate end position in world coordinates
vec4 position2 = vertexPosition;
position2.xyz += ray*shape*float(iter_depth);
// Project to device coordinates and set fragment depth
vec4 iproj = gl_ModelViewProjectionMatrix * position2;
iproj.z /= iproj.w;
gl_FragDepth = (iproj.z+1.0)/2.0;
*/
}}
float rand(vec2 co)
{{
// Create a pseudo-random number between 0 and 1.
// http://stackoverflow.com/questions/4200224
return fract(sin(dot(co.xy ,vec2(12.9898, 78.233))) * 43758.5453);
}}
float colorToVal(vec4 color1)
{{
return color1.g; // todo: why did I have this abstraction in visvis?
}}
vec4 calculateColor(vec4 betterColor, vec3 loc, vec3 step)
{{
// Calculate color by incorporating lighting
vec4 color1;
vec4 color2;
// View direction
vec3 V = normalize(view_ray);
// calculate normal vector from gradient
vec3 N; // normal
color1 = $sample( u_volumetex, loc+vec3(-step[0],0.0,0.0) );
color2 = $sample( u_volumetex, loc+vec3(step[0],0.0,0.0) );
N[0] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
color1 = $sample( u_volumetex, loc+vec3(0.0,-step[1],0.0) );
color2 = $sample( u_volumetex, loc+vec3(0.0,step[1],0.0) );
N[1] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
color1 = $sample( u_volumetex, loc+vec3(0.0,0.0,-step[2]) );
color2 = $sample( u_volumetex, loc+vec3(0.0,0.0,step[2]) );
N[2] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
float gm = length(N); // gradient magnitude
N = normalize(N);
// Flip normal so it points towards viewer
float Nselect = float(dot(N,V) > 0.0);
N = (2.0*Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;
// Get color of the texture (albeido)
color1 = betterColor;
color2 = color1;
// todo: parametrise color1_to_color2
// Init colors
vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 specular_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 final_color;
// todo: allow multiple light, define lights on viewvox or subscene
int nlights = 1;
for (int i=0; i<nlights; i++)
{{
// Get light direction (make sure to prevent zero devision)
vec3 L = normalize(view_ray); //lightDirs[i];
float lightEnabled = float( length(L) > 0.0 );
L = normalize(L+(1.0-lightEnabled));
// Calculate lighting properties
float lambertTerm = clamp( dot(N,L), 0.0, 1.0 );
vec3 H = normalize(L+V); // Halfway vector
float specularTerm = pow( max(dot(H,N),0.0), u_shininess);
// Calculate mask
float mask1 = lightEnabled;
// Calculate colors
ambient_color += mask1 * u_ambient; // * gl_LightSource[i].ambient;
diffuse_color += mask1 * lambertTerm;
specular_color += mask1 * specularTerm * u_specular;
}}
// Calculate final color by componing different components
final_color = color2 * ( ambient_color + diffuse_color) + specular_color;
final_color.a = color2.a;
// Done
return final_color;
}}
""" # noqa
MIP_SNIPPETS = dict(
before_loop="""
float maxval = -99999.0; // The maximum encountered value
int maxi = 0; // Where the maximum value was encountered
""",
in_loop="""
if( val > maxval ) {
maxval = val;
maxi = iter;
}
""",
after_loop="""
// Refine search for max value
loc = start_loc + step * (float(maxi) - 0.5);
for (int i=0; i<10; i++) {
maxval = max(maxval, $sample(u_volumetex, loc).g);
loc += step * 0.1;
}
gl_FragColor = $cmap(maxval);
""",
)
MIP_FRAG_SHADER = FRAG_SHADER.format(**MIP_SNIPPETS)
TRANSLUCENT_SNIPPETS = dict(
before_loop="""
vec4 integrated_color = vec4(0., 0., 0., 0.);
""",
in_loop="""
color = $cmap(val);
float a1 = integrated_color.a;
float a2 = color.a * (1 - a1);
float alpha = max(a1 + a2, 0.001);
// Doesn't work.. GLSL optimizer bug?
//integrated_color = (integrated_color * a1 / alpha) +
// (color * a2 / alpha);
// This should be identical but does work correctly:
integrated_color *= a1 / alpha;
integrated_color += color * a2 / alpha;
integrated_color.a = alpha;
if( alpha > 0.99 ){
// stop integrating if the fragment becomes opaque
iter = nsteps;
}
""",
after_loop="""
gl_FragColor = integrated_color;
""",
)
TRANSLUCENT_FRAG_SHADER = FRAG_SHADER.format(**TRANSLUCENT_SNIPPETS)
ADDITIVE_SNIPPETS = dict(
before_loop="""
vec4 integrated_color = vec4(0., 0., 0., 0.);
""",
in_loop="""
color = $cmap(val);
integrated_color = 1.0 - (1.0 - integrated_color) * (1.0 - color);
""",
after_loop="""
gl_FragColor = integrated_color;
""",
)
ADDITIVE_FRAG_SHADER = FRAG_SHADER.format(**ADDITIVE_SNIPPETS)
ISO_SNIPPETS = dict(
before_loop="""
vec4 color3 = vec4(0.0); // final color
vec3 dstep = 1.5 / u_shape; // step to sample derivative
""",
in_loop="""
if (val > u_threshold-0.2) {
// Take the last interval in smaller steps
vec3 iloc = loc - step;
for (int i=0; i<10; i++) {
val = $sample(u_volumetex, iloc).g;
if (val > u_threshold) {
color = $cmap(val);
gl_FragColor = calculateColor(color, iloc, dstep);
iter = nsteps;
break;
}
iloc += step * 0.1;
}
}
""",
after_loop="""
""",
)
ISO_FRAG_SHADER = FRAG_SHADER.format(**ISO_SNIPPETS)
frag_dict = {'mip': MIP_FRAG_SHADER, 'iso': ISO_FRAG_SHADER,
'translucent': TRANSLUCENT_FRAG_SHADER,
'additive': ADDITIVE_FRAG_SHADER}
class VolumeVisual(Visual):
""" Displays a 3D Volume
Parameters
----------
vol : ndarray
The volume to display. Must be ndim==3.
clim : tuple of two floats | None
The contrast limits. The values in the volume are mapped to
black and white corresponding to these values. Default maps
between min and max.
method : {'mip', 'translucent', 'additive', 'iso'}
The render method to use. See corresponding docs for details.
Default 'mip'.
threshold : float
The threshold to use for the isosurafce render method. By default
the mean of the given volume is used.
relative_step_size : float
The relative step size to step through the volume. Default 0.8.
Increase to e.g. 1.5 to increase performance, at the cost of
quality.
cmap : str
Colormap to use.
emulate_texture : bool
Use 2D textures to emulate a 3D texture. OpenGL ES 2.0 compatible,
but has lower performance on desktop platforms.
"""
def __init__(self, vol, clim=None, method='mip', threshold=None,
relative_step_size=0.8, cmap='grays',
emulate_texture=False):
Visual.__init__(self)
# Only show back faces of cuboid. This is required because if we are
# inside the volume, then the front faces are outside of the clipping
# box and will not be drawn.
self.set_gl_state('translucent', cull_face=False)
tex_cls = TextureEmulated3D if emulate_texture else Texture3D
# Storage of information of volume
self._vol_shape = ()
self._vertex_cache_id = ()
self._clim = None
# Set the colormap
self._cmap = get_colormap(cmap)
# Create gloo objects
self._vbo = None
self._tex = tex_cls((10, 10, 10), interpolation='linear',
wrapping='clamp_to_edge')
# Create program
self._program = ModularProgram(VERT_SHADER)
self._program['u_volumetex'] = self._tex
self._index_buffer = None
# Set data
self.set_data(vol, clim)
# Set params
self.method = method
self.relative_step_size = relative_step_size
self.threshold = threshold if (threshold is not None) else vol.mean()
def set_data(self, vol, clim=None):
""" Set the volume data.
Parameters
----------
vol : ndarray
The 3D volume.
clim : tuple | None
Colormap limits to use. None will use the min and max values.
"""
# Check volume
if not isinstance(vol, np.ndarray):
raise ValueError('Volume visual needs a numpy array.')
if not ((vol.ndim == 3) or (vol.ndim == 4 and vol.shape[-1] <= 4)):
raise ValueError('Volume visual needs a 3D image.')
# Handle clim
if clim is not None:
clim = np.array(clim, float)
if not (clim.ndim == 1 and clim.size == 2):
raise ValueError('clim must be a 2-element array-like')
self._clim = tuple(clim)
if self._clim is None:
self._clim = vol.min(), vol.max()
# Apply clim
vol = np.array(vol, dtype='float32', copy=False)
if self._clim[1] == self._clim[0]:
if self._clim[0] != 0.:
vol *= 1.0 / self._clim[0]
else:
vol -= self._clim[0]
vol /= self._clim[1] - self._clim[0]
# Apply to texture
self._tex.set_data(vol) # will be efficient if vol is same shape
self._program['u_shape'] = vol.shape[2], vol.shape[1], vol.shape[0]
self._vol_shape = vol.shape[:3]
# Create vertices?
if self._index_buffer is None:
self._create_vertex_data()
@property
def clim(self):
""" The contrast limits that were applied to the volume data.
Settable via set_data().
"""
return self._clim
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, cmap):
self._cmap = get_colormap(cmap)
self._program.frag['cmap'] = Function(self._cmap.glsl_map)
self.update()
@property
def method(self):
"""The render method to use
Current options are:
* translucent: voxel colors are blended along the view ray until
the result is opaque.
* mip: maxiumum intensity projection. Cast a ray and display the
maximum value that was encountered.
* additive: voxel colors are added along the view ray until
the result is saturated.
* iso: isosurface. Cast a ray until a certain threshold is
encountered. At that location, lighning calculations are
performed to give the visual appearance of a surface.
"""
return self._method
@method.setter
def method(self, method):
# Check and save
known_methods = list(frag_dict.keys())
if method not in known_methods:
raise ValueError('Volume render method should be in %r, not %r' %
(known_methods, method))
self._method = method
# Get rid of specific variables - they may become invalid
self._program['u_threshold'] = None
self._program.frag = frag_dict[method]
#self._program.frag['calculate_steps'] = Function(calc_steps)
self._program.frag['sampler_type'] = self._tex.glsl_sampler_type
self._program.frag['sample'] = self._tex.glsl_sample
self._program.frag['cmap'] = Function(self._cmap.glsl_map)
self.update()
@property
def threshold(self):
""" The threshold value to apply for the isosurface render method.
"""
return self._threshold
@threshold.setter
def threshold(self, value):
self._threshold = float(value)
self.update()
@property
def relative_step_size(self):
""" The relative step size used during raycasting.
Larger values yield higher performance at reduced quality. If
set > 2.0 the ray skips entire voxels. Recommended values are
between 0.5 and 1.5. The amount of quality degredation depends
on the render method.
"""
return self._relative_step_size
@relative_step_size.setter
def relative_step_size(self, value):
value = float(value)
if value < 0.1:
raise ValueError('relative_step_size cannot be smaller than 0.1')
self._relative_step_size = value
def _create_vertex_data(self):
""" Create and set positions and texture coords from the given shape
We have six faces with 1 quad (2 triangles) each, resulting in
6*2*3 = 36 vertices in total.
"""
shape = self._vol_shape
# Do we already have this or not?
vertex_cache_id = self._vol_shape
if vertex_cache_id == self._vertex_cache_id:
return
self._vertex_cache_id = None
# Get corner coordinates. The -0.5 offset is to center
# pixels/voxels. This works correctly for anisotropic data.
x0, x1 = -0.5, shape[2] - 0.5
y0, y1 = -0.5, shape[1] - 0.5
z0, z1 = -0.5, shape[0] - 0.5
data = np.empty(8, dtype=[
('a_position', np.float32, 3),
('a_texcoord', np.float32, 3)
])
data['a_position'] = np.array([
[x0, y0, z0],
[x1, y0, z0],
[x0, y1, z0],
[x1, y1, z0],
[x0, y0, z1],
[x1, y0, z1],
[x0, y1, z1],
[x1, y1, z1],
], dtype=np.float32)
data['a_texcoord'] = np.array([
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1],
], dtype=np.float32)
"""
6-------7
/| /|
4-------5 |
| | | |
| 2-----|-3
|/ |/
0-------1
"""
# Order is chosen such that normals face outward; front faces will be
# culled.
indices = np.array([2, 6, 0, 4, 5, 6, 7, 2, 3, 0, 1, 5, 3, 7],
dtype=np.uint32)
# Get some stats
self._kb_for_texture = np.prod(self._vol_shape) / 1024
self._kb_for_vertices = (indices.nbytes + data.nbytes) / 1024
# Apply
if self._vbo is not None:
self._vbo.delete()
self._index_buffer.delete()
self._vbo = VertexBuffer(data)
self._program.bind(self._vbo)
self._index_buffer = IndexBuffer(indices)
self._vertex_cache_id = vertex_cache_id
def bounds(self, mode, axis):
"""Get the visual bounds
Parameters
----------
mode : str
The mode.
axis : int
The axis number.
Returns
-------
bounds : tuple
The lower and upper bounds.
"""
# Not sure if this is right. Do I need to take the transform if this
# node into account?
# Also, this method has no docstring, and I don't want to repeat
# the docstring here. Maybe Visual implements _bounds that subclasses
# can implement?
return 0, self._vol_shape[2-axis]
def draw(self, transforms):
"""Draw the visual
Parameters
----------
transforms : instance of TransformSystem
The transforms to use.
"""
Visual.draw(self, transforms)
full_tr = transforms.get_full_transform()
self._program.vert['transform'] = full_tr
self._program['u_relative_step_size'] = self._relative_step_size
# Get and set transforms
view_tr_f = transforms.visual_to_document
view_tr_i = view_tr_f.inverse
self._program.vert['viewtransformf'] = view_tr_f
self._program.vert['viewtransformi'] = view_tr_i
# Set attributes that are specific to certain methods
self._program.build_if_needed()
if self._method == 'iso':
self._program['u_threshold'] = self._threshold
# Draw!
self._program.draw('triangle_strip', self._index_buffer)
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class AwsList(ListResource):
""" """
def __init__(self, version):
"""
Initialize the AwsList
:param Version version: Version that contains the resource
:returns: twilio.rest.accounts.v1.credential.aws.AwsList
:rtype: twilio.rest.accounts.v1.credential.aws.AwsList
"""
super(AwsList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Credentials/AWS'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams AwsInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.accounts.v1.credential.aws.AwsInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists AwsInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.accounts.v1.credential.aws.AwsInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of AwsInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of AwsInstance
:rtype: twilio.rest.accounts.v1.credential.aws.AwsPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return AwsPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of AwsInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of AwsInstance
:rtype: twilio.rest.accounts.v1.credential.aws.AwsPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return AwsPage(self._version, response, self._solution)
def create(self, credentials, friendly_name=values.unset,
account_sid=values.unset):
"""
Create a new AwsInstance
:param unicode credentials: A string that contains the AWS access credentials in the format <AWS_ACCESS_KEY_ID>:<AWS_SECRET_ACCESS_KEY>
:param unicode friendly_name: A string to describe the resource
:param unicode account_sid: The Subaccount this Credential should be associated with.
:returns: Newly created AwsInstance
:rtype: twilio.rest.accounts.v1.credential.aws.AwsInstance
"""
data = values.of({
'Credentials': credentials,
'FriendlyName': friendly_name,
'AccountSid': account_sid,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return AwsInstance(self._version, payload, )
def get(self, sid):
"""
Constructs a AwsContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.accounts.v1.credential.aws.AwsContext
:rtype: twilio.rest.accounts.v1.credential.aws.AwsContext
"""
return AwsContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a AwsContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.accounts.v1.credential.aws.AwsContext
:rtype: twilio.rest.accounts.v1.credential.aws.AwsContext
"""
return AwsContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Accounts.V1.AwsList>'
class AwsPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the AwsPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.accounts.v1.credential.aws.AwsPage
:rtype: twilio.rest.accounts.v1.credential.aws.AwsPage
"""
super(AwsPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of AwsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.accounts.v1.credential.aws.AwsInstance
:rtype: twilio.rest.accounts.v1.credential.aws.AwsInstance
"""
return AwsInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Accounts.V1.AwsPage>'
class AwsContext(InstanceContext):
""" """
def __init__(self, version, sid):
"""
Initialize the AwsContext
:param Version version: Version that contains the resource
:param sid: The unique string that identifies the resource
:returns: twilio.rest.accounts.v1.credential.aws.AwsContext
:rtype: twilio.rest.accounts.v1.credential.aws.AwsContext
"""
super(AwsContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/Credentials/AWS/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a AwsInstance
:returns: Fetched AwsInstance
:rtype: twilio.rest.accounts.v1.credential.aws.AwsInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return AwsInstance(self._version, payload, sid=self._solution['sid'], )
def update(self, friendly_name=values.unset):
"""
Update the AwsInstance
:param unicode friendly_name: A string to describe the resource
:returns: Updated AwsInstance
:rtype: twilio.rest.accounts.v1.credential.aws.AwsInstance
"""
data = values.of({'FriendlyName': friendly_name, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return AwsInstance(self._version, payload, sid=self._solution['sid'], )
def delete(self):
"""
Deletes the AwsInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Accounts.V1.AwsContext {}>'.format(context)
class AwsInstance(InstanceResource):
""" """
def __init__(self, version, payload, sid=None):
"""
Initialize the AwsInstance
:returns: twilio.rest.accounts.v1.credential.aws.AwsInstance
:rtype: twilio.rest.accounts.v1.credential.aws.AwsInstance
"""
super(AwsInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'friendly_name': payload.get('friendly_name'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AwsContext for this AwsInstance
:rtype: twilio.rest.accounts.v1.credential.aws.AwsContext
"""
if self._context is None:
self._context = AwsContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def date_created(self):
"""
:returns: The RFC 2822 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The RFC 2822 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The URI for this resource, relative to `https://accounts.twilio.com`
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a AwsInstance
:returns: Fetched AwsInstance
:rtype: twilio.rest.accounts.v1.credential.aws.AwsInstance
"""
return self._proxy.fetch()
def update(self, friendly_name=values.unset):
"""
Update the AwsInstance
:param unicode friendly_name: A string to describe the resource
:returns: Updated AwsInstance
:rtype: twilio.rest.accounts.v1.credential.aws.AwsInstance
"""
return self._proxy.update(friendly_name=friendly_name, )
def delete(self):
"""
Deletes the AwsInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Accounts.V1.AwsInstance {}>'.format(context)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google Search Ads operators.
"""
from tempfile import NamedTemporaryFile
from typing import Any, Dict, Optional
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.marketing_platform.hooks.search_ads import GoogleSearchAdsHook
from airflow.utils.decorators import apply_defaults
class GoogleSearchAdsInsertReportOperator(BaseOperator):
"""
Inserts a report request into the reporting system.
.. seealso:
For API documentation check:
https://developers.google.com/search-ads/v2/reference/reports/request
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSearchAdsInsertReportOperator`
:param report: Report to be generated
:type report: Dict[str, Any]
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service accountmaking the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("report",)
template_ext = (".json",)
@apply_defaults
def __init__(
self,
report: Dict[str, Any],
api_version: str = "v2",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.report = report
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context: Dict):
hook = GoogleSearchAdsHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
self.log.info("Generating Search Ads report")
response = hook.insert_report(report=self.report)
report_id = response.get("id")
self.xcom_push(context, key="report_id", value=report_id)
self.log.info("Report generated, id: %s", report_id)
return response
class GoogleSearchAdsDownloadReportOperator(BaseOperator):
"""
Downloads a report to GCS bucket.
.. seealso:
For API documentation check:
https://developers.google.com/search-ads/v2/reference/reports/getFile
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSearchAdsGetfileReportOperator`
:param report_id: ID of the report.
:type report_id: str
:param bucket_name: The bucket to upload to.
:type bucket_name: str
:param report_name: The report name to set when uploading the local file. If not provided then
report_id is used.
:type report_name: str
:param gzip: Option to compress local file or file data for upload
:type gzip: bool
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service accountmaking the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("report_name", "report_id", "bucket_name")
@apply_defaults
def __init__(
self,
report_id: str,
bucket_name: str,
report_name: Optional[str] = None,
gzip: bool = True,
chunk_size: int = 10 * 1024 * 1024,
api_version: str = "v2",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.report_id = report_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.report_id = report_id
self.chunk_size = chunk_size
self.gzip = gzip
self.bucket_name = self._set_bucket_name(bucket_name)
self.report_name = report_name
def _resolve_file_name(self, name: str) -> str:
csv = ".csv"
gzip = ".gz"
if not name.endswith(csv):
name += csv
if self.gzip:
name += gzip
return name
@staticmethod
def _set_bucket_name(name: str) -> str:
bucket = name if not name.startswith("gs://") else name[5:]
return bucket.strip("/")
@staticmethod
def _handle_report_fragment(fragment: bytes) -> bytes:
fragment_records = fragment.split(b"\n", 1)
if len(fragment_records) > 1:
return fragment_records[1]
return b""
def execute(self, context: Dict):
hook = GoogleSearchAdsHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to
)
# Resolve file name of the report
report_name = self.report_name or self.report_id
report_name = self._resolve_file_name(report_name)
response = hook.get(report_id=self.report_id)
if not response['isReportReady']:
raise AirflowException('Report {} is not ready yet'.format(self.report_id))
# Resolve report fragments
fragments_count = len(response["files"])
# Download chunks of report's data
self.log.info("Downloading Search Ads report %s", self.report_id)
with NamedTemporaryFile() as temp_file:
for i in range(fragments_count):
byte_content = hook.get_file(
report_fragment=i, report_id=self.report_id
)
fragment = (
byte_content
if i == 0
else self._handle_report_fragment(byte_content)
)
temp_file.write(fragment)
temp_file.flush()
gcs_hook.upload(
bucket_name=self.bucket_name,
object_name=report_name,
gzip=self.gzip,
filename=temp_file.name,
)
self.xcom_push(context, key="file_name", value=report_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.