gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/python
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from QXAction import *
from QXApplication import *
from QXStaticConfig import *
from QXToolBar import *
import platform
import sys
import re
import quick_ui_res
import json
class QXSingleDocMainWindow(QMainWindow):
def __init__(self,mainWidget = None,parent=None):
QMainWindow.__init__(self,parent)
self.initDefaultUI(mainWidget)
def initDefaultUI(self,mainWidget = None,hasToolBar = True,hasMenuBar = True):
d = QApplication.desktop()
screenWidget = d.screen(d.primaryScreen())
w = screenWidget.size().height()
h = screenWidget.size().height() * 0.6
self.resize(w,h)
import random,os
random.seed(os.urandom(128))
self.move(QPoint(
random.randint(0,int((screenWidget.size().width() - w) / 2)) ,
random.randint(0,int((screenWidget.size().height() - h) / 2)))
)
self.appName = QXApplication.appName()
self.setWindowTitle("Untitled[*] - %s" % self.appName)
self._fileOpenSuffix = "All Files (*.*)"
self._fileSaveAsSuffix = "All Files (*.*)"
self.setFileReadOnly(False)
self._fileName = None
self.setFileCreateByMe(False)
self.actionDocumentNew = QXAction('&New',self,triggered=self.ActionFileNew)
self.actionDocumentOpen = QXAction('&Open',self,triggered=self.ActionFileOpen)
self.actionDocumentSave = QXAction('&Save',self,triggered=self.ActionFileSave)
self.actionDocumentSaveAs = QXAction('Save &As',self,triggered=self.ActionFileSaveAs)
self.actionConfigure = QXAction('Preferences...',self,triggered=self.ActionPreferences)
self.actionApplicationExit = QXAction('Quit',self,triggered=self.close)
self.actionEditUndo = QXAction('&Undo',self,triggered=self.onEditUndo)
self.actionEditRedo = QXAction('&Redo',self,triggered=self.onEditRedo)
self.actionEditCut = QXAction('Cu&t',self,triggered=self.onEditCut)
self.actionEditCopy = QXAction('&Copy',self,triggered=self.onEditCopy)
self.actionEditPaste = QXAction('&Paste',self,triggered=self.onEditPaste)
#toolbar
if hasToolBar:
self.tbrMain = self.addToolBar("Main")
self.tbrMain.addAction(self.actionDocumentNew)
self.tbrMain.addAction(self.actionDocumentOpen)
self.tbrMain.addAction(self.actionDocumentSave)
self.tbrMain.addSeparator()
self.tbrMain.addAction(self.actionEditUndo)
self.tbrMain.addAction(self.actionEditRedo)
self.tbrMain.addSeparator()
self.tbrMain.addAction(self.actionEditCut)
self.tbrMain.addAction(self.actionEditCopy)
self.tbrMain.addAction(self.actionEditPaste)
self.tbrMain.addSeparator()
self.tbrMain.addAction(self.actionConfigure)
self.setUnifiedTitleAndToolBarOnMac(True)
if hasMenuBar:
self.mnuMain = self.menuBar()
mnuFile = self.mnuMain.addMenu('&File')
mnuFile.addAction(self.actionDocumentNew)
mnuFile.addAction(self.actionDocumentOpen)
mnuFile.addAction(self.actionDocumentSave)
mnuFile.addAction(self.actionDocumentSaveAs)
mnuFile.addSeparator()
if platform.system() == 'Linux':
mnuFile.addSeparator()
mnuFile.addAction(self.actionConfigure)
mnuFile.addAction(self.actionApplicationExit)
mnuEdit = self.mnuMain.addMenu('&Edit')
mnuEdit.addAction(self.actionEditUndo)
mnuEdit.addAction(self.actionEditRedo)
mnuEdit.addSeparator()
mnuEdit.addAction(self.actionEditCut)
mnuEdit.addAction(self.actionEditCopy)
mnuEdit.addAction(self.actionEditPaste)
if platform.system() == 'Windows':
mnuEdit.addSeparator()
mnuEdit.addAction(self.actionConfigure)
self.preferenceDialog = QXStaticConfig()
self.preferenceDialog.setWindowTitle("%s - Preferences" % QXApplication.appName())
system_items = [
{'item_title' : 'Style',
'item_type' : QXStaticConfig.List ,
'item_default' : QXApplication.getStyleList() ,
'call_back' : QXApplication.getStyleCallBack() },
{'item_title' : 'Icon Theme',
'item_type' : QXStaticConfig.List ,
'item_default' : QXApplication.getIconThemeList() ,
'call_back' : QXApplication.getIconThemeCallBack() }
]
if mainWidget is None:
mainWidget = QWidget()
self.setCentralWidget(mainWidget)
mainWidget.setFocus()
try:
mainWidget.undoAvailable.connect(self.actionEditUndo.setEnabled)
mainWidget.redoAvailable.connect(self.actionEditRedo.setEnabled)
except:
pass
try:
system_items.append({'item_title' : 'Font',
'item_type' : QXStaticConfig.Font ,
'item_default' : mainWidget.currentFont() ,
'call_back' : mainWidget.setCurrentFont
})
except:
pass
self.addPreferencePage('General',QXApplication.findIcon('configure','default',self.preferenceDialog.cloGetActionSetIcon(0)),
[{'group_title' : '' ,
'items' : [
{'section_title' : 'Theme', 'items' : system_items}
]}
])
self.setEditUndoRedoStatus(False,False)
def addPreferencePage(self,title,icon,conf):
self.preferenceDialog.addConfigPage(
{
'title' : title,
'icon' : icon,
'items' : conf
}
)
def setFileDialogSuffix(self,s):
allFormat = []
for i in str(s).split(';;'):
m = re.match(r'.*\(([^\)]+)\)\s*$',i)
if m:
for j in re.split(r'\s+',m.group(1)):
allFormat.append(j)
allFormatStr='All supported format (%s)' % " ".join(allFormat)
self._fileOpenSuffix = allFormatStr + ';;' + s
if not re.match(r'.*;;\s*$',s):
s = s + ';;'
self._fileSaveAsSuffix = s + allFormatStr
def setFileSaveAsSuffix(self,s):
self._fileSaveAsSuffix = s
def setFileCreateByMe(self,t):
self._fileCreateByMe = t
def fileCreateByMe(self):
return self._fileCreateByMe
def setFileName(self,f):
self._fileName = f;
def setFileReadOnly(self,t):
self._fileReadOnly = t
def loadFinished(self,success = False):
if not success:
self.setFileName(None)
self.setWindowTitle("Untitled[*] - %s" % self.appName)
return
if not self._fileReadOnly:
self.setWindowTitle("%s[*] - %s" % (self._fileName,self.appName))
else:
self.setWindowTitle("%s (Read Only) - %s" % (self._fileName,self.appName))
def fileName(self):
return self._fileName
def ActionFileNew(self):
QXApplication.invokeSelf()
def ActionFileOpen(self):
fileName = QFileDialog.getOpenFileName(self,"Open",QDir.currentPath(),self._fileOpenSuffix)
if not fileName is None and fileName != '':
self.setFileCreateByMe(False)
self.ActionFileLoad(fileName)
def ActionFileLoad(self,f):
self.updateStatusBarMessage("Loading %s" % f)
self.setFileName(f)
self.t = QTimer()
self.t.setSingleShot(True)
self.t.timeout.connect(self.onFileLoad)
self.t.start(100)
def ActionFileSave(self):
if self._fileName is None or self._fileReadOnly:
self.setFileCreateByMe(True)
self.ActionFileSaveAs()
else:
self.onFileSave(self.fileName())
def ActionFileSaveAs(self):
fileName = QFileDialog.getSaveFileName(self,"Save As",QDir.currentPath(),self._fileSaveAsSuffix)
if not fileName is None and fileName != '':
if self.onFileSaveAs(fileName):
self.ActionFileLoad(fileName)
def ActionPreferences(self):
self.preferenceDialog.setAttribute(Qt.WA_DeleteOnClose,False)
self.preferenceDialog.show()
def onFileLoad(self):
suc = False
mainWidget = self.centralWidget()
mainWidget.setFocus()
try:
mainWidget.load(self.fileName)
suc = True
except:
fin = QFile(self._fileName)
if fin.open(QIODevice.ReadOnly):
s = str(fin.readAll())
try:
mainWidget.setText(s)
suc = True
except:
try:
mainWidget.setPlainText(s)
suc = True
except:
pass
fin.close()
self.loadFinished(suc)
self.updateStatusBarMessage('Ready')
def onDefaultSaveBeh(self,fileName):
mainWidget = self.centralWidget()
mainWidget.setFocus()
try:
mainWidget.save(fileName)
return True
except:
fin = QFile(self._fileName)
if fin.open(QIODevice.WriteOnly):
try:
text = mainWidget.text()
fin.write(str(text))
except:
try:
text = mainWidget.toPlainText()
fin.write(str(text))
return True
except:
return False
fin.close()
else:
return False
def onFileSaveAs(self,fileName):
return self.onDefaultSaveBeh(fileName)
def onFileSave(self,fileName):
return self.onDefaultSaveBeh(fileName)
def setEditUndoRedoStatus(self,canUndo,canRedo):
self.actionEditUndo.setEnabled(canUndo)
self.actionEditRedo.setEnabled(canRedo)
def onEditUndo(self):
mainWidget = self.centralWidget()
try:
mainWidget.undo()
except:
pass
def onEditRedo(self):
mainWidget = self.centralWidget()
try:
mainWidget.redo()
except:
pass
def onEditCut(self):
mainWidget = self.centralWidget()
try:
mainWidget.cut()
except:
pass
def onEditCopy(self):
mainWidget = self.centralWidget()
try:
mainWidget.copy()
except:
pass
def onEditPaste(self):
mainWidget = self.centralWidget()
try:
mainWidget.paste()
except:
pass
def updateStatusBarMessage(self,s):
self.statusBar().showMessage(s)
def closeEvent(self,e):
if (self.isWindowModified()):
msgBox = QMessageBox(self)
msgBox.setIcon(QMessageBox.Question)
msgBox.setWindowTitle(self.appName)
msgBox.setText("The document %s has been modified." % self.fileName())
msgBox.setInformativeText("Do you want to save your changes?")
msgBox.setStandardButtons(QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)
msgBox.setDefaultButton(QMessageBox.Save)
ret = msgBox.exec_()
if ret == QMessageBox.Cancel:
e.ignore()
return
if ret == QMessageBox.Save:
self.ActionFileSave()
e.accept()
return
e.accept()
return
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from pyramid.registry import Registry
from pyramid.response import Response
from pyramid.testing import DummyRequest
from opencensus.common.transports import sync
from opencensus.ext.pyramid import pyramid_middleware
from opencensus.ext.zipkin import trace_exporter as zipkin_exporter
from opencensus.trace import execution_context, print_exporter, samplers
from opencensus.trace import span as span_module
from opencensus.trace.blank_span import BlankSpan
from opencensus.trace.propagation import trace_context_http_header_format
from opencensus.trace.tracers import noop_tracer
class TestPyramidMiddleware(unittest.TestCase):
def tearDown(self):
from opencensus.trace import execution_context
execution_context.clear()
def test_constructor(self):
pyramid_trace_header = 'traceparent'
trace_id = '2dd43a1d6b2549c6bc2a1a54c2fc0b05'
span_id = '6e0c63257de34c92'
pyramid_trace_id = '00-{}-{}-00'.format(trace_id, span_id)
response = Response()
def dummy_handler(request):
return response
mock_registry = mock.Mock(spec=Registry)
mock_registry.settings = {
'OPENCENSUS': {
'TRACE': {
'EXPORTER': print_exporter.PrintExporter(),
}
}
}
middleware = pyramid_middleware.OpenCensusTweenFactory(
dummy_handler,
mock_registry,
)
assert isinstance(middleware.sampler, samplers.AlwaysOnSampler)
assert isinstance(
middleware.exporter, print_exporter.PrintExporter)
assert isinstance(
middleware.propagator,
trace_context_http_header_format.TraceContextPropagator)
# Just a smoke test to make sure things work
request = DummyRequest(
registry=mock_registry,
path='/',
headers={pyramid_trace_header: pyramid_trace_id},
)
assert middleware(request) == response
def test_constructor_zipkin(self):
service_name = 'test_service'
host_name = 'test_hostname'
port = 2333
response = Response()
def dummy_handler(request):
return response
exporter = zipkin_exporter.ZipkinExporter(
service_name=service_name,
host_name=host_name,
port=port,
transport=sync.SyncTransport
)
mock_registry = mock.Mock(spec=Registry)
mock_registry.settings = {
'OPENCENSUS': {
'TRACE': {
'EXPORTER': exporter,
}
}
}
middleware = pyramid_middleware.OpenCensusTweenFactory(
dummy_handler,
mock_registry,
)
assert isinstance(middleware.sampler, samplers.AlwaysOnSampler)
assert isinstance(
middleware.exporter, zipkin_exporter.ZipkinExporter)
assert isinstance(
middleware.propagator,
trace_context_http_header_format.TraceContextPropagator)
self.assertEqual(middleware.exporter.service_name, service_name)
self.assertEqual(middleware.exporter.host_name, host_name)
self.assertEqual(middleware.exporter.port, port)
def test__before_request(self):
pyramid_trace_header = 'traceparent'
trace_id = '2dd43a1d6b2549c6bc2a1a54c2fc0b05'
span_id = '6e0c63257de34c92'
pyramid_trace_id = '00-{}-{}-00'.format(trace_id, span_id)
response = Response()
def dummy_handler(request):
return response
mock_registry = mock.Mock(spec=Registry)
mock_registry.settings = {}
middleware = pyramid_middleware.OpenCensusTweenFactory(
dummy_handler,
mock_registry,
)
request = DummyRequest(
registry=mock_registry,
path='/',
headers={pyramid_trace_header: pyramid_trace_id},
)
middleware._before_request(request)
tracer = execution_context.get_opencensus_tracer()
self.assertIsNotNone(tracer)
span = tracer.current_span()
expected_attributes = {
'http.host': u'http://example.com',
'http.method': 'GET',
'http.path': u'/',
'http.route': u'/',
'http.url': u'http://example.com',
}
self.assertEqual(span.span_kind, span_module.SpanKind.SERVER)
self.assertEqual(span.attributes, expected_attributes)
self.assertEqual(span.parent_span.span_id, span_id)
span_context = tracer.span_context
self.assertEqual(span_context.trace_id, trace_id)
def test__before_request_excludelist(self):
pyramid_trace_header = 'traceparent'
trace_id = '2dd43a1d6b2549c6bc2a1a54c2fc0b05'
span_id = '6e0c63257de34c92'
pyramid_trace_id = '00-{}-{}-00'.format(trace_id, span_id)
response = Response()
def dummy_handler(request):
return response
mock_registry = mock.Mock(spec=Registry)
mock_registry.settings = {}
middleware = pyramid_middleware.OpenCensusTweenFactory(
dummy_handler,
mock_registry,
)
request = DummyRequest(
registry=mock_registry,
path='/_ah/health',
headers={pyramid_trace_header: pyramid_trace_id},
)
middleware._before_request(request)
tracer = execution_context.get_opencensus_tracer()
assert isinstance(tracer, noop_tracer.NoopTracer)
span = tracer.current_span()
assert isinstance(span, BlankSpan)
def test__after_request(self):
pyramid_trace_header = 'traceparent'
trace_id = '2dd43a1d6b2549c6bc2a1a54c2fc0b05'
span_id = '6e0c63257de34c92'
pyramid_trace_id = '00-{}-{}-00'.format(trace_id, span_id)
response = Response(status=200)
def dummy_handler(request):
return response
mock_registry = mock.Mock(spec=Registry)
mock_registry.settings = {}
middleware = pyramid_middleware.OpenCensusTweenFactory(
dummy_handler,
mock_registry,
)
request = DummyRequest(
registry=mock_registry,
path='/',
headers={pyramid_trace_header: pyramid_trace_id},
)
middleware._before_request(request)
tracer = execution_context.get_opencensus_tracer()
self.assertIsNotNone(tracer)
span = tracer.current_span()
expected_attributes = {
'http.host': u'http://example.com',
'http.method': 'GET',
'http.path': u'/',
'http.route': u'/',
'http.url': u'http://example.com',
'http.status_code': 200,
}
self.assertEqual(span.parent_span.span_id, span_id)
middleware._after_request(request, response)
self.assertEqual(span.attributes, expected_attributes)
def test__after_request_excludelist(self):
pyramid_trace_header = 'traceparent'
trace_id = '2dd43a1d6b2549c6bc2a1a54c2fc0b05'
span_id = '6e0c63257de34c92'
pyramid_trace_id = '00-{}-{}-00'.format(trace_id, span_id)
response = Response()
def dummy_handler(request):
return response
mock_registry = mock.Mock(spec=Registry)
mock_registry.settings = {}
middleware = pyramid_middleware.OpenCensusTweenFactory(
dummy_handler,
mock_registry,
)
request = DummyRequest(
registry=mock_registry,
path='/_ah/health',
headers={pyramid_trace_header: pyramid_trace_id},
)
middleware._before_request(request)
tracer = execution_context.get_opencensus_tracer()
assert isinstance(tracer, noop_tracer.NoopTracer)
span = tracer.current_span()
middleware._after_request(request, response)
assert isinstance(span, BlankSpan)
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import itertools
import threading
import time
from infra_libs.ts_mon.common import errors
def default_modify_fn(name):
def _modify_fn(value, delta):
if delta < 0:
raise errors.MonitoringDecreasingValueError(name, None, delta)
return value + delta
return _modify_fn
class MetricStore(object):
"""A place to store values for each metric.
Several methods take "a normalized field tuple". This is a tuple of
(key, value) tuples sorted by key. (The reason this is given as a tuple
instead of a dict is because tuples are hashable and can be used as dict keys,
dicts can not).
The MetricStore is also responsible for keeping the start_time of each metric.
This is what goes into the start_timestamp_us field in the MetricsData proto
for cumulative metrics and distributions, and helps Monarch identify when a
counter was reset. This is the MetricStore's job because an implementation
might share counter values across multiple instances of a task (like on
Appengine), so the start time must be associated with that value so that it
can be reset for all tasks at once when the value is reset.
External metric stores (like those backed by memcache) may be cleared (either
wholly or partially) at any time. When this happens the MetricStore *must*
generate a new start_time for all the affected metrics.
Metrics can specify their own explicit start time if they are mirroring the
value of some external counter that started counting at a known time.
Otherwise the MetricStore's time_fn (defaults to time.time()) is called the
first time a metric is set or incremented, or after it is cleared externally.
"""
def __init__(self, state, time_fn=None):
self._state = state
self._time_fn = time_fn or time.time
def get(self, name, fields, target_fields, default=None):
"""Fetches the current value for the metric.
Args:
name (string): the metric's name.
fields (tuple): a normalized field tuple.
target_fields (dict or None): target fields to override.
default: the value to return if the metric has no value of this set of
field values.
"""
raise NotImplementedError
def get_all(self):
"""Returns an iterator over all the metrics present in the store.
The iterator yields 5-tuples:
(target, metric, start_time, end_time, field_values)
"""
raise NotImplementedError
def set(self, name, fields, target_fields, value, enforce_ge=False):
"""Sets the metric's value.
Args:
name: the metric's name.
fields: a normalized field tuple.
target_fields (dict or None): target fields to override.
value: the new value for the metric.
enforce_ge: if this is True, raise an exception if the new value is
less than the old value.
Raises:
MonitoringDecreasingValueError: if enforce_ge is True and the new value is
smaller than the old value.
"""
raise NotImplementedError
def incr(self, name, fields, target_fields, delta, modify_fn=None):
"""Increments the metric's value.
Args:
name: the metric's name.
fields: a normalized field tuple.
target_fields (dict or None): target fields to override.
delta: how much to increment the value by.
modify_fn: this function is called with the original value and the delta
as its arguments and is expected to return the new value. The
function must be idempotent as it may be called multiple times.
"""
raise NotImplementedError
def reset_for_unittest(self, name=None):
"""Clears the values metrics. Useful in unittests.
Args:
name: the name of an individual metric to reset, or if None resets all
metrics.
"""
raise NotImplementedError
def _start_time(self, name):
if name in self._state.metrics:
ret = self._state.metrics[name].start_time
if ret is not None:
return ret
return self._time_fn()
class _TargetFieldsValues(object):
"""Holds all values for a single metric.
Values are keyed by metric fields and target fields (which override the
default target fields configured globally for the process).
"""
def __init__(self, start_time):
self.start_time = start_time
# {normalized_target_fields: {normalized_metric_fields: value}}
self._values = collections.defaultdict(dict)
def _get_target_values(self, target_fields):
# Normalize the target fields by converting them into a hashable tuple.
if not target_fields:
target_fields = {}
key = tuple(sorted(target_fields.items()))
return self._values[key]
def get_value(self, fields, target_fields, default=None):
return self._get_target_values(target_fields).get(
fields, default)
def set_value(self, fields, target_fields, value):
self._get_target_values(target_fields)[fields] = value
def iter_targets(self, default_target):
for target_fields, fields_values in self._values.items():
if target_fields:
target = copy.copy(default_target)
target.update({k: v for k, v in target_fields})
else:
target = default_target
yield target, fields_values
def __deepcopy__(self, memo_dict):
ret = _TargetFieldsValues(self.start_time)
ret._values = copy.deepcopy(self._values, memo_dict)
return ret
class InProcessMetricStore(MetricStore):
"""A thread-safe metric store that keeps values in memory."""
def __init__(self, state, time_fn=None):
super(InProcessMetricStore, self).__init__(state, time_fn=time_fn)
self._values = {}
self._thread_lock = threading.Lock()
def _entry(self, name):
if name not in self._values:
self._reset(name)
return self._values[name]
def get(self, name, fields, target_fields, default=None):
return self._entry(name).get_value(fields, target_fields, default)
def iter_field_values(self, name):
return itertools.chain.from_iterable(
x.items() for _, x
in self._entry(name).iter_targets(self._state.target))
def get_all(self):
# Make a copy of the metric values in case another thread (or this
# generator's consumer) modifies them while we're iterating.
with self._thread_lock:
values = copy.deepcopy(self._values)
end_time = self._time_fn()
for name, metric_values in values.items():
if name not in self._state.metrics:
continue
start_time = metric_values.start_time
for target, fields_values in metric_values.iter_targets(
self._state.target):
yield (target, self._state.metrics[name], start_time, end_time,
fields_values)
def set(self, name, fields, target_fields, value, enforce_ge=False):
with self._thread_lock:
if enforce_ge:
old_value = self._entry(name).get_value(fields, target_fields, 0)
if value < old_value:
raise errors.MonitoringDecreasingValueError(name, old_value, value)
self._entry(name).set_value(fields, target_fields, value)
def incr(self, name, fields, target_fields, delta, modify_fn=None):
if delta < 0:
raise errors.MonitoringDecreasingValueError(name, None, delta)
if modify_fn is None:
modify_fn = default_modify_fn(name)
with self._thread_lock:
self._entry(name).set_value(fields, target_fields, modify_fn(
self.get(name, fields, target_fields, 0), delta))
def reset_for_unittest(self, name=None):
if name is not None:
self._reset(name)
else:
for name in self._values.keys():
self._reset(name)
def _reset(self, name):
self._values[name] = _TargetFieldsValues(self._start_time(name))
|
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from oslo import messaging
from nova import context
from nova.db import base
from nova import exception
from nova.network import rpcapi as network_rpcapi
from nova.objects import dns_domain as dns_domain_obj
from nova.objects import fixed_ip as fixed_ip_obj
from nova.objects import floating_ip as floating_ip_obj
from nova.objects import instance as instance_obj
from nova.objects import network as network_obj
from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova.openstack.common import uuidutils
from nova import quota
from nova import rpc
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
floating_opts = [
cfg.StrOpt('default_floating_pool',
default='nova',
help='Default pool for floating IPs'),
cfg.BoolOpt('auto_assign_floating_ip',
default=False,
help='Autoassigning floating IP to VM'),
cfg.StrOpt('floating_ip_dns_manager',
default='nova.network.noop_dns_driver.NoopDNSDriver',
help='Full class name for the DNS Manager for floating IPs'),
cfg.StrOpt('instance_dns_manager',
default='nova.network.noop_dns_driver.NoopDNSDriver',
help='Full class name for the DNS Manager for instance IPs'),
cfg.StrOpt('instance_dns_domain',
default='',
help='Full class name for the DNS Zone for instance IPs'),
]
CONF = cfg.CONF
CONF.register_opts(floating_opts)
CONF.import_opt('public_interface', 'nova.network.linux_net')
CONF.import_opt('network_topic', 'nova.network.rpcapi')
class FloatingIP(object):
"""Mixin class for adding floating IP functionality to a manager."""
servicegroup_api = None
def init_host_floating_ips(self):
"""Configures floating ips owned by host."""
admin_context = context.get_admin_context()
try:
floating_ips = floating_ip_obj.FloatingIPList.get_by_host(
admin_context, self.host)
except exception.NotFound:
return
for floating_ip in floating_ips:
if floating_ip.fixed_ip_id:
try:
fixed_ip = floating_ip.fixed_ip
except exception.FixedIpNotFound:
msg = _('Fixed ip %s not found') % floating_ip.fixed_ip_id
LOG.debug(msg)
continue
interface = CONF.public_interface or floating_ip.interface
try:
self.l3driver.add_floating_ip(floating_ip.address,
fixed_ip.address,
interface,
fixed_ip.network)
except processutils.ProcessExecutionError:
LOG.debug('Interface %s not found', interface)
raise exception.NoFloatingIpInterface(interface=interface)
def allocate_for_instance(self, context, **kwargs):
"""Handles allocating the floating IP resources for an instance.
calls super class allocate_for_instance() as well
rpc.called by network_api
"""
instance_uuid = kwargs.get('instance_id')
if not uuidutils.is_uuid_like(instance_uuid):
instance_uuid = kwargs.get('instance_uuid')
project_id = kwargs.get('project_id')
requested_networks = kwargs.get('requested_networks')
# call the next inherited class's allocate_for_instance()
# which is currently the NetworkManager version
# do this first so fixed ip is already allocated
nw_info = super(FloatingIP, self).allocate_for_instance(context,
**kwargs)
if CONF.auto_assign_floating_ip:
# allocate a floating ip
floating_address = self.allocate_floating_ip(context, project_id,
True)
LOG.debug("floating IP allocation for instance "
"|%s|", floating_address,
instance_uuid=instance_uuid, context=context)
# get the first fixed address belonging to the instance
fixed_ips = nw_info.fixed_ips()
fixed_address = fixed_ips[0]['address']
# associate the floating ip to fixed_ip
self.associate_floating_ip(context,
floating_address,
fixed_address,
affect_auto_assigned=True)
# create a fresh set of network info that contains the floating ip
nw_info = self.get_instance_nw_info(context, **kwargs)
return nw_info
def deallocate_for_instance(self, context, **kwargs):
"""Handles deallocating floating IP resources for an instance.
calls super class deallocate_for_instance() as well.
rpc.called by network_api
"""
if 'instance' in kwargs:
instance_uuid = kwargs['instance'].uuid
else:
instance_uuid = kwargs['instance_id']
if not uuidutils.is_uuid_like(instance_uuid):
# NOTE(francois.charlier): in some cases the instance might be
# deleted before the IPs are released, so we need to get
# deleted instances too
instance = instance_obj.Instance.get_by_id(
context.elevated(read_deleted='yes'), instance_uuid)
instance_uuid = instance.uuid
try:
fixed_ips = fixed_ip_obj.FixedIPList.get_by_instance_uuid(
context, instance_uuid)
except exception.FixedIpNotFoundForInstance:
fixed_ips = []
# add to kwargs so we can pass to super to save a db lookup there
kwargs['fixed_ips'] = fixed_ips
for fixed_ip in fixed_ips:
fixed_id = fixed_ip.id
floating_ips = floating_ip_obj.FloatingIPList.get_by_fixed_ip_id(
context, fixed_id)
# disassociate floating ips related to fixed_ip
for floating_ip in floating_ips:
address = str(floating_ip.address)
try:
self.disassociate_floating_ip(context,
address,
affect_auto_assigned=True)
except exception.FloatingIpNotAssociated:
LOG.info(_("Floating IP %s is not associated. Ignore."),
address)
# deallocate if auto_assigned
if floating_ip.auto_assigned:
self.deallocate_floating_ip(context, address,
affect_auto_assigned=True)
# call the next inherited class's deallocate_for_instance()
# which is currently the NetworkManager version
# call this after so floating IPs are handled first
super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
def _floating_ip_owned_by_project(self, context, floating_ip):
"""Raises if floating ip does not belong to project."""
if context.is_admin:
return
if floating_ip.project_id != context.project_id:
if floating_ip.project_id is None:
LOG.warn(_('Address |%(address)s| is not allocated'),
{'address': floating_ip.address})
raise exception.Forbidden()
else:
LOG.warn(_('Address |%(address)s| is not allocated to your '
'project |%(project)s|'),
{'address': floating_ip.address,
'project': context.project_id})
raise exception.Forbidden()
def allocate_floating_ip(self, context, project_id, auto_assigned=False,
pool=None):
"""Gets a floating ip from the pool."""
# NOTE(tr3buchet): all network hosts in zone now use the same pool
pool = pool or CONF.default_floating_pool
use_quota = not auto_assigned
# Check the quota; can't put this in the API because we get
# called into from other places
try:
if use_quota:
reservations = QUOTAS.reserve(context, floating_ips=1,
project_id=project_id)
except exception.OverQuota:
LOG.warn(_("Quota exceeded for %s, tried to allocate "
"floating IP"), context.project_id)
raise exception.FloatingIpLimitExceeded()
try:
floating_ip = floating_ip_obj.FloatingIP.allocate_address(
context, project_id, pool, auto_assigned=auto_assigned)
payload = dict(project_id=project_id, floating_ip=floating_ip)
self.notifier.info(context,
'network.floating_ip.allocate', payload)
# Commit the reservations
if use_quota:
QUOTAS.commit(context, reservations, project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
if use_quota:
QUOTAS.rollback(context, reservations,
project_id=project_id)
return floating_ip
@messaging.expected_exceptions(exception.FloatingIpNotFoundForAddress)
def deallocate_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Returns a floating ip to the pool."""
floating_ip = floating_ip_obj.FloatingIP.get_by_address(context,
address)
# handle auto_assigned
if not affect_auto_assigned and floating_ip.auto_assigned:
return
use_quota = not floating_ip.auto_assigned
# make sure project owns this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# make sure floating ip is not associated
if floating_ip.fixed_ip_id:
floating_address = floating_ip.address
raise exception.FloatingIpAssociated(address=floating_address)
# clean up any associated DNS entries
self._delete_all_entries_for_ip(context,
floating_ip.address)
payload = dict(project_id=floating_ip.project_id,
floating_ip=str(floating_ip.address))
self.notifier.info(context, 'network.floating_ip.deallocate', payload)
project_id = floating_ip.project_id
# Get reservations...
try:
if use_quota:
reservations = QUOTAS.reserve(context,
project_id=project_id,
floating_ips=-1)
else:
reservations = None
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deallocating "
"floating IP"))
floating_ip_obj.FloatingIP.deallocate(context, address)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
@messaging.expected_exceptions(exception.FloatingIpNotFoundForAddress)
def associate_floating_ip(self, context, floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
Makes sure everything makes sense then calls _associate_floating_ip,
rpc'ing to correct host if i'm not it.
Access to the floating_address is verified but access to the
fixed_address is not verified. This assumes that that the calling
side has already verified that the fixed_address is legal by
checking access to the instance.
"""
floating_ip = floating_ip_obj.FloatingIP.get_by_address(
context, floating_address)
# handle auto_assigned
if not affect_auto_assigned and floating_ip.auto_assigned:
return
# make sure project owns this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# disassociate any already associated
orig_instance_uuid = None
if floating_ip.fixed_ip_id:
# find previously associated instance
fixed_ip = floating_ip.fixed_ip
if str(fixed_ip.address) == fixed_address:
# NOTE(vish): already associated to this address
return
orig_instance_uuid = fixed_ip.instance_uuid
self.disassociate_floating_ip(context, floating_address)
fixed_ip = fixed_ip_obj.FixedIP.get_by_address(context,
fixed_address)
# send to correct host, unless i'm the correct host
network = network_obj.Network.get_by_id(context.elevated(),
fixed_ip.network_id)
if network.multi_host:
instance = instance_obj.Instance.get_by_uuid(
context, fixed_ip.instance_uuid)
host = instance.host
else:
host = network.host
interface = floating_ip.interface
if host == self.host:
# i'm the correct host
self._associate_floating_ip(context, floating_address,
fixed_address, interface,
fixed_ip.instance_uuid)
else:
# send to correct host
self.network_rpcapi._associate_floating_ip(context,
floating_address, fixed_address, interface, host,
fixed_ip.instance_uuid)
return orig_instance_uuid
def _associate_floating_ip(self, context, floating_address, fixed_address,
interface, instance_uuid):
"""Performs db and driver calls to associate floating ip & fixed ip."""
interface = CONF.public_interface or interface
@utils.synchronized(unicode(floating_address))
def do_associate():
# associate floating ip
floating = floating_ip_obj.FloatingIP.associate(context,
floating_address,
fixed_address,
self.host)
fixed = floating.fixed_ip
if not fixed:
# NOTE(vish): ip was already associated
return
try:
# gogo driver time
self.l3driver.add_floating_ip(floating_address, fixed_address,
interface, fixed['network'])
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception() as exc_ctxt:
try:
floating_ip_obj.FloatingIP.disassociate(
context, floating_address)
except Exception:
LOG.warn(_('Failed to disassociated floating '
'address: %s'), floating_address)
pass
if "Cannot find device" in str(e):
try:
LOG.error(_('Interface %s not found'), interface)
except Exception:
pass
raise exception.NoFloatingIpInterface(
interface=interface)
payload = dict(project_id=context.project_id,
instance_id=instance_uuid,
floating_ip=floating_address)
self.notifier.info(context,
'network.floating_ip.associate', payload)
do_associate()
@messaging.expected_exceptions(exception.FloatingIpNotFoundForAddress)
def disassociate_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from its fixed ip.
Makes sure everything makes sense then calls _disassociate_floating_ip,
rpc'ing to correct host if i'm not it.
"""
floating_ip = floating_ip_obj.FloatingIP.get_by_address(context,
address)
# handle auto assigned
if not affect_auto_assigned and floating_ip.auto_assigned:
raise exception.CannotDisassociateAutoAssignedFloatingIP()
# make sure project owns this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# make sure floating ip is associated
if not floating_ip.fixed_ip_id:
floating_address = floating_ip.address
raise exception.FloatingIpNotAssociated(address=floating_address)
fixed_ip = fixed_ip_obj.FixedIP.get_by_id(context,
floating_ip.fixed_ip_id)
# send to correct host, unless i'm the correct host
network = network_obj.Network.get_by_id(context.elevated(),
fixed_ip.network_id)
interface = floating_ip.interface
if network.multi_host:
instance = instance_obj.Instance.get_by_uuid(
context, fixed_ip.instance_uuid)
service = service_obj.Service.get_by_host_and_topic(
context.elevated(), instance.host, CONF.network_topic)
if service and self.servicegroup_api.service_is_up(service):
host = instance.host
else:
# NOTE(vish): if the service is down just deallocate the data
# locally. Set the host to local so the call will
# not go over rpc and set interface to None so the
# teardown in the driver does not happen.
host = self.host
interface = None
else:
host = network.host
if host == self.host:
# i'm the correct host
self._disassociate_floating_ip(context, address, interface,
fixed_ip.instance_uuid)
else:
# send to correct host
self.network_rpcapi._disassociate_floating_ip(context, address,
interface, host, fixed_ip.instance_uuid)
def _disassociate_floating_ip(self, context, address, interface,
instance_uuid):
"""Performs db and driver calls to disassociate floating ip."""
interface = CONF.public_interface or interface
@utils.synchronized(unicode(address))
def do_disassociate():
# NOTE(vish): Note that we are disassociating in the db before we
# actually remove the ip address on the host. We are
# safe from races on this host due to the decorator,
# but another host might grab the ip right away. We
# don't worry about this case because the minuscule
# window where the ip is on both hosts shouldn't cause
# any problems.
floating = floating_ip_obj.FloatingIP.disassociate(context,
address)
fixed = floating.fixed_ip
if not fixed:
# NOTE(vish): ip was already disassociated
return
if interface:
# go go driver time
self.l3driver.remove_floating_ip(address, fixed.address,
interface, fixed.network)
payload = dict(project_id=context.project_id,
instance_id=instance_uuid,
floating_ip=address)
self.notifier.info(context,
'network.floating_ip.disassociate', payload)
do_disassociate()
@messaging.expected_exceptions(exception.FloatingIpNotFound)
def get_floating_ip(self, context, id):
"""Returns a floating IP as a dict."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
return dict(floating_ip_obj.FloatingIP.get_by_id(
context, id).iteritems())
def get_floating_pools(self, context):
"""Returns list of floating pools."""
# NOTE(maurosr) This method should be removed in future, replaced by
# get_floating_ip_pools. See bug #1091668
return self.get_floating_ip_pools(context)
def get_floating_ip_pools(self, context):
"""Returns list of floating ip pools."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
pools = floating_ip_obj.FloatingIP.get_pool_names(context)
return [dict(name=name) for name in pools]
def get_floating_ip_by_address(self, context, address):
"""Returns a floating IP as a dict."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
# NOTE(danms): Not converting to objects since it's not used
return dict(self.db.floating_ip_get_by_address(context,
address).iteritems())
def get_floating_ips_by_project(self, context):
"""Returns the floating IPs allocated to a project."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
# NOTE(danms): Not converting to objects since it's not used
ips = self.db.floating_ip_get_all_by_project(context,
context.project_id)
return [dict(ip.iteritems()) for ip in ips]
def get_floating_ips_by_fixed_address(self, context, fixed_address):
"""Returns the floating IPs associated with a fixed_address."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
# NOTE(danms): Not converting to objects since it's not used
floating_ips = self.db.floating_ip_get_by_fixed_address(context,
fixed_address)
return [floating_ip['address'] for floating_ip in floating_ips]
def _is_stale_floating_ip_address(self, context, floating_ip):
try:
self._floating_ip_owned_by_project(context, floating_ip)
except exception.Forbidden:
return True
return False if floating_ip.get('fixed_ip_id') else True
def migrate_instance_start(self, context, instance_uuid,
floating_addresses,
rxtx_factor=None, project_id=None,
source=None, dest=None):
# We only care if floating_addresses are provided and we're
# switching hosts
if not floating_addresses or (source and source == dest):
return
LOG.info(_("Starting migration network for instance %s"),
instance_uuid)
for address in floating_addresses:
floating_ip = floating_ip_obj.FloatingIP.get_by_address(context,
address)
if self._is_stale_floating_ip_address(context, floating_ip):
LOG.warn(_("Floating ip address |%(address)s| no longer "
"belongs to instance %(instance_uuid)s. Will not "
"migrate it "),
{'address': address, 'instance_uuid': instance_uuid})
continue
interface = CONF.public_interface or floating_ip.interface
fixed_ip = floating_ip.fixed_ip
self.l3driver.remove_floating_ip(floating_ip.address,
fixed_ip.address,
interface,
fixed_ip.network)
# NOTE(ivoks): Destroy conntrack entries on source compute
# host.
self.l3driver.clean_conntrack(fixed_ip.address)
# NOTE(wenjianhn): Make this address will not be bound to public
# interface when restarts nova-network on dest compute node
floating_ip.host = None
floating_ip.save()
def migrate_instance_finish(self, context, instance_uuid,
floating_addresses, host=None,
rxtx_factor=None, project_id=None,
source=None, dest=None):
# We only care if floating_addresses are provided and we're
# switching hosts
if host and not dest:
dest = host
if not floating_addresses or (source and source == dest):
return
LOG.info(_("Finishing migration network for instance %s"),
instance_uuid)
for address in floating_addresses:
floating_ip = floating_ip_obj.FloatingIP.get_by_address(context,
address)
if self._is_stale_floating_ip_address(context, floating_ip):
LOG.warn(_("Floating ip address |%(address)s| no longer "
"belongs to instance %(instance_uuid)s. Will not"
"setup it."),
{'address': address, 'instance_uuid': instance_uuid})
continue
floating_ip.host = dest
floating_ip.save()
interface = CONF.public_interface or floating_ip.interface
fixed_ip = floating_ip.fixed_ip
self.l3driver.add_floating_ip(floating_ip.address,
fixed_ip.address,
interface,
fixed_ip.network)
def _prepare_domain_entry(self, context, domainref):
scope = domainref.scope
if scope == 'private':
this_domain = {'domain': domainref.domain,
'scope': scope,
'availability_zone': domainref.availability_zone}
else:
this_domain = {'domain': domainref.domain,
'scope': scope,
'project': domainref.project_id}
return this_domain
def get_dns_domains(self, context):
domains = []
domain_list = dns_domain_obj.DNSDomainList.get_all(context)
floating_driver_domain_list = self.floating_dns_manager.get_domains()
instance_driver_domain_list = self.instance_dns_manager.get_domains()
for dns_domain in domain_list:
if (dns_domain.domain in floating_driver_domain_list or
dns_domain.domain in instance_driver_domain_list):
domain_entry = self._prepare_domain_entry(context,
dns_domain)
if domain_entry:
domains.append(domain_entry)
else:
LOG.warn(_('Database inconsistency: DNS domain |%s| is '
'registered in the Nova db but not visible to '
'either the floating or instance DNS driver. It '
'will be ignored.'), dns_domain.domain)
return domains
def add_dns_entry(self, context, address, name, dns_type, domain):
self.floating_dns_manager.create_entry(name, address,
dns_type, domain)
def modify_dns_entry(self, context, address, name, domain):
self.floating_dns_manager.modify_address(name, address,
domain)
def delete_dns_entry(self, context, name, domain):
self.floating_dns_manager.delete_entry(name, domain)
def _delete_all_entries_for_ip(self, context, address):
domain_list = self.get_dns_domains(context)
for domain in domain_list:
names = self.get_dns_entries_by_address(context,
address,
domain['domain'])
for name in names:
self.delete_dns_entry(context, name, domain['domain'])
def get_dns_entries_by_address(self, context, address, domain):
return self.floating_dns_manager.get_entries_by_address(address,
domain)
def get_dns_entries_by_name(self, context, name, domain):
return self.floating_dns_manager.get_entries_by_name(name,
domain)
def create_private_dns_domain(self, context, domain, av_zone):
dns_domain_obj.DNSDomain.register_for_zone(context, domain, av_zone)
try:
self.instance_dns_manager.create_domain(domain)
except exception.FloatingIpDNSExists:
LOG.warn(_('Domain |%(domain)s| already exists, '
'changing zone to |%(av_zone)s|.'),
{'domain': domain, 'av_zone': av_zone})
def create_public_dns_domain(self, context, domain, project):
dns_domain_obj.DNSDomain.register_for_project(context, domain, project)
try:
self.floating_dns_manager.create_domain(domain)
except exception.FloatingIpDNSExists:
LOG.warn(_('Domain |%(domain)s| already exists, '
'changing project to |%(project)s|.'),
{'domain': domain, 'project': project})
def delete_dns_domain(self, context, domain):
dns_domain_obj.DNSDomain.delete_by_domain(context, domain)
self.floating_dns_manager.delete_domain(domain)
class LocalManager(base.Base, FloatingIP):
def __init__(self):
super(LocalManager, self).__init__()
# NOTE(vish): setting the host to none ensures that the actual
# l3driver commands for l3 are done via rpc.
self.host = None
self.servicegroup_api = servicegroup.API()
self.network_rpcapi = network_rpcapi.NetworkAPI()
self.floating_dns_manager = importutils.import_object(
CONF.floating_ip_dns_manager)
self.instance_dns_manager = importutils.import_object(
CONF.instance_dns_manager)
self.notifier = rpc.get_notifier('network', CONF.host)
|
|
import numpy as np
from vesper.tests.test_case import TestCase
import vesper.util.time_frequency_analysis_utils as tfa_utils
'''
TODO: Add test cases for which window size differs from DFT size,
and for which window is not rectangular.
'''
'''
TODO: Given that we need to test FFTs and spectrograms implemented
in various programming languages, it might make sense to prepare a
set of test cases in a language-portable format like JSON that can
be used by test code in the different languages.
'''
class TimeFrequencyAnalysisUtilsTests(TestCase):
def test_get_dft_analysis_data(self):
cases = [
(1000, 4, None, 4, [0, 250, 500])
]
for sample_rate, window_size, dft_size, expected_dft_size, \
expected_freqs in cases:
expected_freqs = np.array(expected_freqs)
actual_dft_size, actual_freqs = tfa_utils.get_dft_analysis_data(
sample_rate, window_size, dft_size)
self.assertEqual(actual_dft_size, expected_dft_size)
self.assertTrue(np.array_equal(actual_freqs, expected_freqs))
def test_get_dft_size(self):
cases = [
(1, 1),
(2, 2),
(3, 4),
(4, 4),
(5, 8),
(6, 8),
(7, 8),
(8, 8),
(9, 16)
]
for window_size, expected in cases:
actual = tfa_utils.get_dft_size(window_size)
self.assertEqual(actual, expected)
def test_get_dft_freqs(self):
cases = [
(1000, 1, [0]),
(1000, 2, [0, 500]),
(1000, 4, [0, 250, 500]),
(2000, 8, [0, 250, 500, 750, 1000])
]
for sample_rate, dft_size, expected in cases:
expected = np.array(expected)
actual = tfa_utils.get_dft_freqs(sample_rate, dft_size)
self.assertTrue(np.array_equal(actual, expected))
def test_get_dft_bin_num(self):
cases = [
((0, 8000, 8), 0),
((4000, 8000, 8), 4),
((1000, 8000, 8), 1),
((499, 8000, 8), 0),
((501, 8000, 8), 1),
((11024.5, 22050., 8), 4)
]
for args, expected in cases:
actual = tfa_utils.get_dft_bin_num(*args)
self.assertEqual(actual, expected)
def test_get_num_analysis_records(self):
cases = [
(0, 8, 4, 0),
(8, 8, 4, 1),
(16, 8, 4, 3),
(17, 8, 4, 3),
(18, 8, 4, 3),
(19, 8, 4, 3),
(20, 8, 4, 4),
(20, 8, 3, 5),
(21, 8, 3, 5),
(22, 8, 3, 5),
(23, 8, 3, 6)
]
for num_samples, window_size, hop_size, expected in cases:
actual = tfa_utils.get_num_analysis_records(
num_samples, window_size, hop_size)
self.assertEqual(actual, expected)
def test_get_num_analysis_records_errors(self):
cases = [
# record size zero
(0, 0, 1),
# hop size zero
(0, 1, 0),
# hop size exceeds record size
(0, 1, 2)
]
for args in cases:
self._assert_raises(
ValueError, tfa_utils.get_num_analysis_records, *args)
def test_get_analysis_records_1d(self):
"""Tests `get_analysis_records` with 1-dimensional input."""
samples = np.arange(8)
cases = [
# record size and hop size equal
(1, 1, [[0], [1], [2], [3], [4], [5], [6], [7]]),
(2, 2, [[0, 1], [2, 3], [4, 5], [6, 7]]),
(3, 3, [[0, 1, 2], [3, 4, 5]]),
(4, 4, [[0, 1, 2, 3], [4, 5, 6, 7]]),
(5, 5, [[0, 1, 2, 3, 4]]),
(8, 8, [[0, 1, 2, 3, 4, 5, 6, 7]]),
# record size and hop size not equal
(2, 1, [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7]]),
(3, 2, [[0, 1, 2], [2, 3, 4], [4, 5, 6]]),
(4, 2, [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7]]),
(4, 3, [[0, 1, 2, 3], [3, 4, 5, 6]]),
]
self._test_get_analysis_records(samples, cases)
def _test_get_analysis_records(self, samples, cases):
for record_size, hop_size, expected in cases:
expected = np.array(expected)
actual = tfa_utils._get_analysis_records(
samples, record_size, hop_size)
self._assert_arrays_equal(actual, expected)
def test_get_analysis_records_2d(self):
"""Tests `get_analysis_records` with 2-dimensional input."""
samples = np.arange(8).reshape((2, 4))
cases = [
# record size and hop size equal
(1, 1, [[[0], [1], [2], [3]], [[4], [5], [6], [7]]]),
(2, 2, [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
(3, 3, [[[0, 1, 2]], [[4, 5, 6]]]),
(4, 4, [[[0, 1, 2, 3]], [[4, 5, 6, 7]]]),
# record size and hop size not equal
(2, 1, [[[0, 1], [1, 2], [2, 3]], [[4, 5], [5, 6], [6, 7]]]),
(3, 1, [[[0, 1, 2], [1, 2, 3]], [[4, 5, 6], [5, 6, 7]]]),
(3, 2, [[[0, 1, 2]], [[4, 5, 6]]])
]
self._test_get_analysis_records(samples, cases)
def test_compute_spectrogram(self):
# This tests that our spectrogram function produces the
# expected output for an input comprising a single channel
# with a single window's worth of cosine samples. We use
# a rectangular window so the expected output spectrum has
# a particularly simple form.
for num_channels in [1, 2]:
for dft_size in [1, 2, 4, 8, 16]:
if dft_size == 1:
hop_sizes = [1]
else:
hop_sizes = [dft_size // 2, dft_size]
for hop_size in hop_sizes:
for bin_num in range(dft_size // 2 + 1):
self._test_compute_spectrogram(
num_channels, dft_size, hop_size, bin_num)
def _test_compute_spectrogram(
self, num_channels, dft_size, hop_size, bin_num):
num_samples = dft_size * 2
samples = self._create_test_signal(
num_channels, num_samples, dft_size, bin_num)
window = np.ones(dft_size)
spectra = tfa_utils.compute_spectrogram(
samples, window, hop_size, dft_size)
expected = self._get_expected_spectra(
num_channels, num_samples, hop_size, dft_size, bin_num)
self.assertTrue(np.allclose(spectra, expected))
def _create_test_signal(
self, num_channels, num_samples, dft_size, bin_num):
phase_factor = 2 * np.pi * bin_num / dft_size
samples = np.cos(phase_factor * np.arange(num_samples))
if num_channels == 2:
samples = np.stack((samples, np.ones(num_samples)))
return samples
def _get_expected_spectra(
self, num_channels, num_samples, hop_size, dft_size, bin_num):
num_spectra = tfa_utils.get_num_analysis_records(
num_samples, dft_size, hop_size)
spectrum = self._get_expected_spectrum(dft_size, bin_num)
spectra = np.ones((num_spectra, 1)) * spectrum
if num_channels == 2:
spectrum = self._get_expected_spectrum(dft_size, 0)
spectra_1 = np.ones((num_spectra, 1)) * spectrum
spectra = np.stack((spectra, spectra_1))
return spectra
def _get_expected_spectrum(self, dft_size, bin_num):
num_bins = dft_size // 2 + 1
spectrum = np.zeros(num_bins)
spectrum[bin_num] = dft_size ** 2
if bin_num != 0 and bin_num != num_bins - 1:
spectrum[bin_num] /= 4
return spectrum.reshape((1, len(spectrum)))
def test_scale_spectrogram(self):
cases = [
# empty
(np.zeros((0, 1)), np.zeros((0, 1))),
(np.zeros((0, 3)), np.zeros((0, 3))),
# mono
([[1], [2]], [[1], [2]]),
([[1, 2], [3, 4]], [[.5, 1], [1.5, 2]]),
([[1, 2, 3]], [[.25, 1, .75]]),
# stereo
([[[1], [2]], [[3], [4]]], [[[1], [2]], [[3], [4]]]),
([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[.5, 1], [1.5, 2]], [[2.5, 3], [3.5, 4]]]),
([[[1, 2, 3]], [[4, 5, 6]]], [[[.25, 1, .75]], [[1, 2.5, 1.5]]])
]
for spectra, expected in cases:
spectra = np.array(spectra, dtype='float64')
expected = np.array(expected, dtype='float64')
self._test_op(expected, tfa_utils.scale_spectrogram, spectra)
def _test_op(self, expected, op, input, *args, **kwargs):
# out of place, result allocated by op
actual = op(input, *args, **kwargs)
self.assertFalse(actual is input)
self._assert_arrays_equal(actual, expected)
# out of place, result preallocated
actual = np.zeros_like(expected)
kwargs_ = dict(kwargs, out=actual)
actual = op(input, *args, **kwargs_)
self.assertFalse(actual is input)
self._assert_arrays_equal(actual, expected)
# in place
kwargs_ = dict(kwargs, out=input)
actual = op(input, *args, **kwargs_)
self.assertTrue(actual is input)
self._assert_arrays_equal(actual, expected)
def test_linear_to_log(self):
minus_infinity = tfa_utils.SMALL_POWER_DB
cases = [
# empty
(np.zeros((0, 1)), np.zeros((0, 1))),
(np.zeros((0, 3)), np.zeros((0, 3))),
# mono
([[0], [1], [10]], [[minus_infinity], [0], [10]]),
([[0, 1], [1, 10]], [[minus_infinity, 0], [0, 10]]),
# stereo
([[[0, 1], [1, 10]], [[1, 10], [10, 100]]],
[[[minus_infinity, 0], [0, 10]], [[0, 10], [10, 20]]])
]
# default reference power
for spectra, expected in cases:
spectra = np.array(spectra, dtype='float64')
expected = np.array(expected, dtype='float64')
self._test_op(expected, tfa_utils.linear_to_log, spectra)
# explicit reference power
reference_power = 10
reference_power_db = 10 * np.log10(reference_power)
for spectra, expected in cases:
spectra = np.array(spectra, dtype='float64')
expected = np.array(expected, dtype='float64')
expected[expected != minus_infinity] -= reference_power_db
self._test_op(
expected, tfa_utils.linear_to_log, spectra, reference_power)
def test_log_to_linear(self):
cases = [
# empty
(np.zeros((0, 1)), np.zeros((0, 1))),
(np.zeros((0, 3)), np.zeros((0, 3))),
# mono
([[-10], [0], [10]], [[.1], [1], [10]]),
([[-10, 0], [0, 10]], [[.1, 1], [1, 10]]),
# stereo
([[[-10, 0], [0, 10]], [[0, 10], [10, 20]]],
[[[.1, 1], [1, 10]], [[1, 10], [10, 100]]])
]
# default reference power
for spectra, expected in cases:
spectra = np.array(spectra, dtype='float64')
expected = np.array(expected, dtype='float64')
self._test_op(expected, tfa_utils.log_to_linear, spectra)
# explicit reference power
reference_power = 10
for spectra, expected in cases:
spectra = np.array(spectra, dtype='float64')
expected = np.array(expected, dtype='float64')
expected *= reference_power
self._test_op(
expected, tfa_utils.log_to_linear, spectra, reference_power)
|
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
import numpy.testing as npt
from scipy import integrate
from scipy import stats
from scipy.special import betainc
from common_tests import (check_normalization, check_moment, check_mean_expect,
check_var_expect, check_skew_expect,
check_kurt_expect, check_entropy,
check_private_entropy, NUMPY_BELOW_1_7,
check_edge_support, check_named_args,
check_random_state_property,
check_meth_dtype, check_ppf_dtype, check_cmplx_deriv,
check_pickling)
from scipy.stats._distr_params import distcont
"""
Test all continuous distributions.
Parameters were chosen for those distributions that pass the
Kolmogorov-Smirnov test. This provides safe parameters for each
distributions so that we can perform further testing of class methods.
These tests currently check only/mostly for serious errors and exceptions,
not for numerically exact results.
"""
# Note that you need to add new distributions you want tested
# to _distr_params
DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5
# Last four of these fail all around. Need to be checked
distcont_extra = [
['betaprime', (100, 86)],
['fatiguelife', (5,)],
['mielke', (4.6420495492121487, 0.59707419545516938)],
['invweibull', (0.58847112119264788,)],
# burr: sample mean test fails still for c<1
['burr', (0.94839838075366045, 4.3820284068855795)],
# genextreme: sample mean test, sf-logsf test fail
['genextreme', (3.3184017469423535,)],
]
distmissing = ['wald', 'gausshyper', 'genexpon', 'rv_continuous',
'loglaplace', 'rdist', 'semicircular', 'invweibull', 'ksone',
'cosine', 'kstwobign', 'truncnorm', 'mielke', 'recipinvgauss',
'levy', 'johnsonsu', 'levy_l', 'powernorm', 'wrapcauchy',
'johnsonsb', 'truncexpon', 'invgauss', 'invgamma',
'powerlognorm']
distmiss = [[dist, args] for dist, args in distcont if dist in distmissing]
distslow = ['rdist', 'gausshyper', 'recipinvgauss', 'ksone', 'genexpon',
'vonmises', 'vonmises_line', 'mielke', 'semicircular',
'cosine', 'invweibull', 'powerlognorm', 'johnsonsu', 'kstwobign']
# distslow are sorted by speed (very slow to slow)
# These distributions fail the complex derivative test below.
# Here 'fail' mean produce wrong results and/or raise exceptions, depending
# on the implementation details of corresponding special functions.
# cf https://github.com/scipy/scipy/pull/4979 for a discussion.
fails_cmplx = set(['alpha', 'beta', 'betaprime', 'burr12', 'chi', 'chi2', 'dgamma',
'dweibull', 'erlang', 'expon', 'exponnorm', 'exponpow',
'exponweib', 'f', 'fatiguelife', 'foldnorm', 'frechet_l',
'frechet_r', 'gamma', 'gausshyper', 'genexpon',
'genextreme', 'gengamma', 'genlogistic', 'gennorm',
'genpareto', 'gilbrat', 'gompertz', 'halfcauchy',
'halfgennorm', 'halflogistic', 'halfnorm', 'invgamma',
'invgauss', 'johnsonsb', 'johnsonsu', 'ksone', 'kstwobign',
'levy_l', 'loggamma', 'logistic', 'lognorm', 'lomax',
'maxwell', 'nakagami', 'ncf', 'nct', 'ncx2', 'norm',
'pearson3', 'powerlognorm', 'powernorm', 'rayleigh',
'recipinvgauss', 'rice', 'skewnorm', 't', 'truncexpon', 'truncnorm',
'tukeylambda', 'vonmises', 'vonmises_line', 'wald',
'weibull_min'])
# NB: not needed anymore?
def _silence_fp_errors(func):
# warning: don't apply to test_ functions as is, then those will be skipped
def wrap(*a, **kw):
olderr = np.seterr(all='ignore')
try:
return func(*a, **kw)
finally:
np.seterr(**olderr)
wrap.__name__ = func.__name__
return wrap
def test_cont_basic():
# this test skips slow distributions
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
category=integrate.IntegrationWarning)
for distname, arg in distcont[:]:
if distname in distslow:
continue
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
np.random.seed(765456)
sn = 500
rvs = distfn.rvs(size=sn, *arg)
sm = rvs.mean()
sv = rvs.var()
m, v = distfn.stats(*arg)
yield (check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn,
distname + 'sample mean test')
yield check_cdf_ppf, distfn, arg, distname
yield check_sf_isf, distfn, arg, distname
yield check_pdf, distfn, arg, distname
yield check_pdf_logpdf, distfn, arg, distname
yield check_cdf_logcdf, distfn, arg, distname
yield check_sf_logsf, distfn, arg, distname
if distname in distmissing:
alpha = 0.01
yield check_distribution_rvs, distname, arg, alpha, rvs
locscale_defaults = (0, 1)
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
spec_x = {'frechet_l': -0.5, 'weibull_max': -0.5, 'levy_l': -0.5,
'pareto': 1.5, 'tukeylambda': 0.3}
x = spec_x.get(distname, 0.5)
yield check_named_args, distfn, x, arg, locscale_defaults, meths
yield check_random_state_property, distfn, arg
yield check_pickling, distfn, arg
# Entropy
skp = npt.dec.skipif
yield check_entropy, distfn, arg, distname
if distfn.numargs == 0:
yield skp(NUMPY_BELOW_1_7)(check_vecentropy), distfn, arg
if distfn.__class__._entropy != stats.rv_continuous._entropy:
yield check_private_entropy, distfn, arg, stats.rv_continuous
yield check_edge_support, distfn, arg
yield check_meth_dtype, distfn, arg, meths
yield check_ppf_dtype, distfn, arg
yield skp(distname in fails_cmplx)(check_cmplx_deriv), distfn, arg
knf = npt.dec.knownfailureif
yield (knf(distname == 'truncnorm')(check_ppf_private), distfn,
arg, distname)
@npt.dec.slow
def test_cont_basic_slow():
# same as above for slow distributions
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
category=integrate.IntegrationWarning)
for distname, arg in distcont[:]:
if distname not in distslow:
continue
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
np.random.seed(765456)
sn = 500
rvs = distfn.rvs(size=sn, *arg)
sm = rvs.mean()
sv = rvs.var()
m, v = distfn.stats(*arg)
yield (check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn,
distname + 'sample mean test')
yield check_cdf_ppf, distfn, arg, distname
yield check_sf_isf, distfn, arg, distname
yield check_pdf, distfn, arg, distname
yield check_pdf_logpdf, distfn, arg, distname
yield check_cdf_logcdf, distfn, arg, distname
yield check_sf_logsf, distfn, arg, distname
# yield check_oth, distfn, arg # is still missing
if distname in distmissing:
alpha = 0.01
yield check_distribution_rvs, distname, arg, alpha, rvs
locscale_defaults = (0, 1)
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
x = 0.5
if distname == 'invweibull':
arg = (1,)
elif distname == 'ksone':
arg = (3,)
yield check_named_args, distfn, x, arg, locscale_defaults, meths
yield check_random_state_property, distfn, arg
yield check_pickling, distfn, arg
# Entropy
skp = npt.dec.skipif
ks_cond = distname in ['ksone', 'kstwobign']
yield skp(ks_cond)(check_entropy), distfn, arg, distname
if distfn.numargs == 0:
yield skp(NUMPY_BELOW_1_7)(check_vecentropy), distfn, arg
if distfn.__class__._entropy != stats.rv_continuous._entropy:
yield check_private_entropy, distfn, arg, stats.rv_continuous
yield check_edge_support, distfn, arg
yield check_meth_dtype, distfn, arg, meths
yield check_ppf_dtype, distfn, arg
yield skp(distname in fails_cmplx)(check_cmplx_deriv), distfn, arg
@npt.dec.slow
def test_moments():
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
category=integrate.IntegrationWarning)
knf = npt.dec.knownfailureif
fail_normalization = set(['vonmises', 'ksone'])
fail_higher = set(['vonmises', 'ksone', 'ncf'])
for distname, arg in distcont[:]:
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
m, v, s, k = distfn.stats(*arg, moments='mvsk')
cond1 = distname in fail_normalization
cond2 = distname in fail_higher
msg = distname + ' fails moments'
yield knf(cond1, msg)(check_normalization), distfn, arg, distname
yield knf(cond2, msg)(check_mean_expect), distfn, arg, m, distname
yield (knf(cond2, msg)(check_var_expect), distfn, arg, m, v,
distname)
yield (knf(cond2, msg)(check_skew_expect), distfn, arg, m, v, s,
distname)
yield (knf(cond2, msg)(check_kurt_expect), distfn, arg, m, v, k,
distname)
yield check_loc_scale, distfn, arg, m, v, distname
yield check_moment, distfn, arg, m, v, distname
def check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, msg):
# this did not work, skipped silently by nose
if np.isfinite(m):
check_sample_mean(sm, sv, sn, m)
if np.isfinite(v):
check_sample_var(sv, sn, v)
def check_sample_mean(sm, v, n, popmean):
# from stats.stats.ttest_1samp(a, popmean):
# Calculates the t-obtained for the independent samples T-test on ONE group
# of scores a, given a population mean.
#
# Returns: t-value, two-tailed prob
df = n-1
svar = ((n-1)*v) / float(df) # looks redundant
t = (sm-popmean) / np.sqrt(svar*(1.0/n))
prob = betainc(0.5*df, 0.5, df/(df + t*t))
# return t,prob
npt.assert_(prob > 0.01, 'mean fail, t,prob = %f, %f, m, sm=%f,%f' %
(t, prob, popmean, sm))
def check_sample_var(sv, n, popvar):
# two-sided chisquare test for sample variance equal to
# hypothesized variance
df = n-1
chi2 = (n-1)*popvar/float(popvar)
pval = stats.distributions.chi2.sf(chi2, df) * 2
npt.assert_(pval > 0.01, 'var fail, t, pval = %f, %f, v, sv=%f, %f' %
(chi2, pval, popvar, sv))
def check_cdf_ppf(distfn, arg, msg):
values = [0.001, 0.5, 0.999]
npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),
values, decimal=DECIMAL, err_msg=msg +
' - cdf-ppf roundtrip')
def check_sf_isf(distfn, arg, msg):
npt.assert_almost_equal(distfn.sf(distfn.isf([0.1, 0.5, 0.9], *arg), *arg),
[0.1, 0.5, 0.9], decimal=DECIMAL, err_msg=msg +
' - sf-isf roundtrip')
npt.assert_almost_equal(distfn.cdf([0.1, 0.9], *arg),
1.0 - distfn.sf([0.1, 0.9], *arg),
decimal=DECIMAL, err_msg=msg +
' - cdf-sf relationship')
def check_pdf(distfn, arg, msg):
# compares pdf at median with numerical derivative of cdf
median = distfn.ppf(0.5, *arg)
eps = 1e-6
pdfv = distfn.pdf(median, *arg)
if (pdfv < 1e-4) or (pdfv > 1e4):
# avoid checking a case where pdf is close to zero or
# huge (singularity)
median = median + 0.1
pdfv = distfn.pdf(median, *arg)
cdfdiff = (distfn.cdf(median + eps, *arg) -
distfn.cdf(median - eps, *arg))/eps/2.0
# replace with better diff and better test (more points),
# actually, this works pretty well
msg += ' - cdf-pdf relationship'
npt.assert_almost_equal(pdfv, cdfdiff, decimal=DECIMAL, err_msg=msg)
def check_pdf_logpdf(distfn, args, msg):
# compares pdf at several points with the log of the pdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
pdf = distfn.pdf(vals, *args)
logpdf = distfn.logpdf(vals, *args)
pdf = pdf[pdf != 0]
logpdf = logpdf[np.isfinite(logpdf)]
msg += " - logpdf-log(pdf) relationship"
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg)
def check_sf_logsf(distfn, args, msg):
# compares sf at several points with the log of the sf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
sf = distfn.sf(vals, *args)
logsf = distfn.logsf(vals, *args)
sf = sf[sf != 0]
logsf = logsf[np.isfinite(logsf)]
msg += " - logsf-log(sf) relationship"
npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg)
def check_cdf_logcdf(distfn, args, msg):
# compares cdf at several points with the log of the cdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
cdf = distfn.cdf(vals, *args)
logcdf = distfn.logcdf(vals, *args)
cdf = cdf[cdf != 0]
logcdf = logcdf[np.isfinite(logcdf)]
msg += " - logcdf-log(cdf) relationship"
npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg)
def check_distribution_rvs(dist, args, alpha, rvs):
# test from scipy.stats.tests
# this version reuses existing random variables
D, pval = stats.kstest(rvs, dist, args=args, N=1000)
if (pval < alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
def check_vecentropy(distfn, args):
npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args))
@npt.dec.skipif(NUMPY_BELOW_1_7)
def check_loc_scale(distfn, arg, m, v, msg):
loc, scale = 10.0, 10.0
mt, vt = distfn.stats(loc=loc, scale=scale, *arg)
npt.assert_allclose(m*scale + loc, mt)
npt.assert_allclose(v*scale*scale, vt)
def check_ppf_private(distfn, arg, msg):
# fails by design for truncnorm self.nb not defined
ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg)
npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan')
if __name__ == "__main__":
npt.run_module_suite()
|
|
"""Tests for Sentry integration."""
import logging
import pytest
from homeassistant.components.sentry import get_channel, process_before_send
from homeassistant.components.sentry.const import (
CONF_DSN,
CONF_ENVIRONMENT,
CONF_EVENT_CUSTOM_COMPONENTS,
CONF_EVENT_HANDLED,
CONF_EVENT_THIRD_PARTY_PACKAGES,
CONF_TRACING,
CONF_TRACING_SAMPLE_RATE,
DOMAIN,
)
from homeassistant.const import __version__ as current_version
from homeassistant.core import HomeAssistant
from tests.async_mock import MagicMock, Mock, patch
from tests.common import MockConfigEntry
async def test_setup_entry(hass: HomeAssistant) -> None:
"""Test integration setup from entry."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_DSN: "http://public@example.com/1", CONF_ENVIRONMENT: "production"},
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.sentry.AioHttpIntegration"
) as sentry_aiohttp_mock, patch(
"homeassistant.components.sentry.SqlalchemyIntegration"
) as sentry_sqlalchemy_mock, patch(
"homeassistant.components.sentry.LoggingIntegration"
) as sentry_logging_mock, patch(
"homeassistant.components.sentry.sentry_sdk"
) as sentry_mock:
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
# Test CONF_ENVIRONMENT is migrated to entry options
assert CONF_ENVIRONMENT not in entry.data
assert CONF_ENVIRONMENT in entry.options
assert entry.options[CONF_ENVIRONMENT] == "production"
assert sentry_logging_mock.call_count == 1
assert sentry_logging_mock.called_once_with(
level=logging.WARNING, event_level=logging.WARNING
)
assert sentry_aiohttp_mock.call_count == 1
assert sentry_sqlalchemy_mock.call_count == 1
assert sentry_mock.init.call_count == 1
call_args = sentry_mock.init.call_args[1]
assert set(call_args) == {
"dsn",
"environment",
"integrations",
"release",
"before_send",
}
assert call_args["dsn"] == "http://public@example.com/1"
assert call_args["environment"] == "production"
assert call_args["integrations"] == [
sentry_logging_mock.return_value,
sentry_aiohttp_mock.return_value,
sentry_sqlalchemy_mock.return_value,
]
assert call_args["release"] == current_version
assert call_args["before_send"]
async def test_setup_entry_with_tracing(hass: HomeAssistant) -> None:
"""Test integration setup from entry with tracing enabled."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_DSN: "http://public@example.com/1"},
options={CONF_TRACING: True, CONF_TRACING_SAMPLE_RATE: 0.5},
)
entry.add_to_hass(hass)
with patch("homeassistant.components.sentry.AioHttpIntegration"), patch(
"homeassistant.components.sentry.SqlalchemyIntegration"
), patch("homeassistant.components.sentry.LoggingIntegration"), patch(
"homeassistant.components.sentry.sentry_sdk"
) as sentry_mock:
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
call_args = sentry_mock.init.call_args[1]
assert set(call_args) == {
"dsn",
"environment",
"integrations",
"release",
"before_send",
"traceparent_v2",
"traces_sample_rate",
}
assert call_args["traces_sample_rate"] == 0.5
assert call_args["traceparent_v2"]
@pytest.mark.parametrize(
"version,channel",
[
("0.115.0.dev20200815", "nightly"),
("0.115.0", "stable"),
("0.115.0b4", "beta"),
("0.115.0dev0", "dev"),
],
)
async def test_get_channel(version, channel) -> None:
"""Test if channel detection works from Home Assistant version number."""
assert get_channel(version) == channel
async def test_process_before_send(hass: HomeAssistant):
"""Test regular use of the Sentry process before sending function."""
hass.config.components.add("puppies")
hass.config.components.add("a_integration")
# These should not show up in the result.
hass.config.components.add("puppies.light")
hass.config.components.add("auth")
result = process_before_send(
hass,
options={},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot", "fridge_opener"],
event={},
hint={},
)
assert result
assert result["tags"]
assert result["contexts"]
assert result["contexts"]
ha_context = result["contexts"]["Home Assistant"]
assert ha_context["channel"] == "test"
assert ha_context["custom_components"] == "fridge_opener\nironing_robot"
assert ha_context["integrations"] == "a_integration\npuppies"
tags = result["tags"]
assert tags["channel"] == "test"
assert tags["uuid"] == "12345"
assert tags["installation_type"] == "pytest"
user = result["user"]
assert user["id"] == "12345"
async def test_event_with_platform_context(hass: HomeAssistant):
"""Test extraction of platform context information during Sentry events."""
current_platform_mock = Mock()
current_platform_mock.get().platform_name = "hue"
current_platform_mock.get().domain = "light"
with patch(
"homeassistant.components.sentry.entity_platform.current_platform",
new=current_platform_mock,
):
result = process_before_send(
hass,
options={},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={},
hint={},
)
assert result
assert result["tags"]["integration"] == "hue"
assert result["tags"]["platform"] == "light"
assert result["tags"]["custom_component"] == "no"
current_platform_mock.get().platform_name = "ironing_robot"
current_platform_mock.get().domain = "switch"
with patch(
"homeassistant.components.sentry.entity_platform.current_platform",
new=current_platform_mock,
):
result = process_before_send(
hass,
options={CONF_EVENT_CUSTOM_COMPONENTS: True},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={},
hint={},
)
assert result
assert result["tags"]["integration"] == "ironing_robot"
assert result["tags"]["platform"] == "switch"
assert result["tags"]["custom_component"] == "yes"
@pytest.mark.parametrize(
"logger,tags",
[
("adguard", {"package": "adguard"}),
(
"homeassistant.components.hue.coordinator",
{"integration": "hue", "custom_component": "no"},
),
(
"homeassistant.components.hue.light",
{"integration": "hue", "platform": "light", "custom_component": "no"},
),
(
"homeassistant.components.ironing_robot.switch",
{
"integration": "ironing_robot",
"platform": "switch",
"custom_component": "yes",
},
),
(
"homeassistant.components.ironing_robot",
{"integration": "ironing_robot", "custom_component": "yes"},
),
("homeassistant.helpers.network", {"helpers": "network"}),
("tuyapi.test", {"package": "tuyapi"}),
],
)
async def test_logger_event_extraction(hass: HomeAssistant, logger, tags):
"""Test extraction of information from Sentry logger events."""
result = process_before_send(
hass,
options={
CONF_EVENT_CUSTOM_COMPONENTS: True,
CONF_EVENT_THIRD_PARTY_PACKAGES: True,
},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={"logger": logger},
hint={},
)
assert result
assert result["tags"] == {
"channel": "test",
"uuid": "12345",
"installation_type": "pytest",
**tags,
}
@pytest.mark.parametrize(
"logger,options,event",
[
("adguard", {CONF_EVENT_THIRD_PARTY_PACKAGES: True}, True),
("adguard", {CONF_EVENT_THIRD_PARTY_PACKAGES: False}, False),
(
"homeassistant.components.ironing_robot.switch",
{CONF_EVENT_CUSTOM_COMPONENTS: True},
True,
),
(
"homeassistant.components.ironing_robot.switch",
{CONF_EVENT_CUSTOM_COMPONENTS: False},
False,
),
],
)
async def test_filter_log_events(hass: HomeAssistant, logger, options, event):
"""Test filtering of events based on configuration options."""
result = process_before_send(
hass,
options=options,
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={"logger": logger},
hint={},
)
if event:
assert result
else:
assert result is None
@pytest.mark.parametrize(
"handled,options,event",
[
("yes", {CONF_EVENT_HANDLED: True}, True),
("yes", {CONF_EVENT_HANDLED: False}, False),
("no", {CONF_EVENT_HANDLED: False}, True),
("no", {CONF_EVENT_HANDLED: True}, True),
],
)
async def test_filter_handled_events(hass: HomeAssistant, handled, options, event):
"""Tests filtering of handled events based on configuration options."""
event_mock = MagicMock()
event_mock.__iter__ = ["tags"]
event_mock.__contains__ = lambda _, val: val == "tags"
event_mock.tags = {"handled": handled}
result = process_before_send(
hass,
options=options,
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=[],
event=event_mock,
hint={},
)
if event:
assert result
else:
assert result is None
|
|
#!/usr/bin/env python
# Purpose : Python Boot Camp - Basemap Teaching Program 3.
# Ensure that environment variable PYTHONUNBUFFERED=yes
# This allows STDOUT and STDERR to both be logged in chronological order
import sys # platform, args, run tools
import os # platform, args, run tools
import argparse # For parsing command line
import datetime # For date/time processing
import numpy as np
import h5py
import matplotlib as mpl
mpl.use('Agg', warn=False)
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, show, subplots
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import cm as bm_cm
import matplotlib.cm as mpl_cm
#########################################################################
# Command Line Parameters Class
#########################################################################
class Bcbm3CP():
def bcbm3_cp(self, bcbm3_cmd_line):
description = ("Python Boot Camp - Basemap Teaching Program 3")
parser = argparse.ArgumentParser(description=description)
help_text = ("Input file name")
parser.add_argument('input_file_name',
metavar='input_file_name',
#type=string,
help=help_text)
help_text = ("Display processing messages to STDOUT " +
"(DEFAULT=NO)")
parser.add_argument("-v", "--verbose",
default=False,
help=help_text,
action="store_true",
dest="verbose")
help_text = ("Run program in test mode " +
"(DEFAULT=NO)")
parser.add_argument("-t", "--test_mode",
default=False,
help=help_text,
action="store_true",
dest="test_mode")
self.args = parser.parse_args(bcbm3_cmd_line)
if (self.args.verbose):
sys.stdout.write("BCBM3 : bcbm3_cmd_line = " + str(bcbm3_cmd_line) + "\n")
# Return
return(0)
#########################################################################
# Main Program
#########################################################################
class Bcbm3():
def bcbm3(self, bcbm3_cmd_line):
# Start time
self.start_time = datetime.datetime.today()
# Parse input parameters from cmd line
bcbm3_cp1 = Bcbm3CP()
bcbm3_cp1_ret = bcbm3_cp1.bcbm3_cp(bcbm3_cmd_line)
self.bcbm3_cmd_line = bcbm3_cmd_line
if (len(self.bcbm3_cmd_line) == 0):
self.bcbm3_cmd_line = " "
if (bcbm3_cp1_ret):
return(bcbm3_cp1_ret)
self.verbose = bcbm3_cp1.args.verbose
self.test_mode = bcbm3_cp1.args.test_mode
self.input_file_name = bcbm3_cp1.args.input_file_name
if (self.test_mode):
self.timestamp = "Test Mode Date/Time Stamp"
if (self.verbose):
sys.stdout.write("BCBM3 : Running in test mode\n")
sys.stdout.write("BCBM3 : sys.version = " + str(sys.version) + "\n")
else:
self.timestamp = datetime.datetime.today().strftime("%Y-%m-%d %H:%M:%S")
if (self.verbose):
sys.stdout.write("BCBM3 : Program started : " + str(self.start_time) + "\n")
sys.stdout.write("BCBM3 : sys.version = " + str(sys.version) + "\n")
if (self.verbose):
sys.stdout.write("BCBM3 : sys.version = " + str(sys.version) + "\n")
sys.stdout.write("BCBM3 : self.verbose = " + str(self.verbose) + "\n")
sys.stdout.write("BCBM3 : self.test_mode = " + str(self.test_mode) + "\n")
sys.stdout.write("BCBM3 : self.input_file_name = " + str(self.input_file_name) + "\n")
# Call functions
bcbm3_f11_ret = self.read_omps_data()
if (bcbm3_f11_ret):
return(bcbm3_f11_ret)
bcbm3_f11_ret = self.make_mercator_projection()
if (bcbm3_f11_ret):
return(bcbm3_f11_ret)
# End program
self.end_time = datetime.datetime.today()
self.run_time = self.end_time - self.start_time
if (self.verbose):
if (self.test_mode):
pass
else:
sys.stdout.write("BCBM3 : Program ended : " + str(self.end_time) + "\n")
sys.stdout.write("BCBM3 : Run time : " + str(self.run_time) + "\n")
if (self.verbose):
sys.stdout.write("BCBM3 : Program completed normally\n")
return(0)
# Define functions
#------------------------------------------------------------------------------
def read_omps_data(self):
if (self.verbose):
sys.stdout.write("BCBM3 : read_omps_data ACTIVATED\n")
# Open input HDF5 file
self.input_file = h5py.File(self.input_file_name, "r")
sys.stdout.write("BCBM3 : self.input_file = " + str(self.input_file) + "\n")
self.o3 = self.input_file["ColumnAmountO3"]
self.lat = self.input_file["Latitude"]
self.lon = self.input_file["Longitude"]
# Convert from Numpy objects to list arrays
#self.o3 = self.o3[:,:]
#self.lat = self.lat[:]
#self.lon = self.lon[:]
sys.stdout.write("BCBM3 : self.o3 = " + str(self.o3) + "\n")
sys.stdout.write("BCBM3 : self.lat = " + str(self.lat) + "\n")
sys.stdout.write("BCBM3 : self.lon = " + str(self.lon) + "\n")
sys.stdout.write("BCBM3 : len(self.o3) = " + str(len(self.o3)) + "\n")
sys.stdout.write("BCBM3 : len(self.lat) = " + str(len(self.lat)) + "\n")
sys.stdout.write("BCBM3 : len(self.lon) = " + str(len(self.lon)) + "\n")
return(0)
#------------------------------------------------------------------------------
def make_mercator_projection(self):
if (self.verbose):
sys.stdout.write("BCBM3 : make_mercator_projection ACTIVATED\n")
# Set up figure in Matplotlib
self.current_figure = mpl.pyplot.figure(1, figsize=(14.0, 10.0))
self.current_figure.suptitle("Basemap - Mercator Map\n" +
self.timestamp)
self.current_figure.text(0.05, 0.95, "A Mercator Projection of the Earth")
self.current_figure.subplots_adjust(left=0.05,
right=0.95,
top=0.80,
bottom=0.05,
wspace=0.2,
hspace=0.4)
self.current_plot = self.current_figure.add_subplot(1, 1, 1)
# Plot figure
self.map = Basemap(projection='merc',
lat_0=0,
lon_0=0,
llcrnrlat=-80,
urcrnrlat=80,
llcrnrlon=-180,
urcrnrlon=180,
resolution='c')
#self.map.drawmapboundary(fill_color='aqua')
#self.map.fillcontinents(color='coral',lake_color='aqua')
self.map.drawcoastlines()
#self.map.drawcountries()
#self.map.drawrivers()
#self.map.drawstates()
self.map.drawparallels(np.arange( -90.0, 90.0, 20.0))
self.map.drawmeridians(np.arange(-180.0, 181.0, 20.0))
# Write the output to a graphic file
self.current_figure.savefig("bcbm3_plot1")
mpl.pyplot.close(self.current_figure)
# Plot colour scale and contour maps
#self.plot_colour_scale()
#self.plot_colour_contours()
return(0)
#------------------------------------------------------------------------------
def plot_colour_scale(self):
if (self.verbose):
sys.stdout.write("BCBM3 : plot_colour_scale ACTIVATED\n")
# Set up mesh for plotting
self.xmesh, self.ymesh = self.map(*np.meshgrid(self.lon, self.lat))
#sys.stdout.write("BCBM3 : self.xmesh = " + str(self.xmesh) + "\n")
#sys.stdout.write("BCBM3 : self.ymesh = " + str(self.ymesh) + "\n")
# Set colour map and levels
self.colormap = mpl_cm.jet
self.colormap.set_under(color='k',
alpha=1.0)
self.colormap.set_over(color='k',
alpha=1.0)
# Set colour levels
self.plot_colormap_level_lower = 200
self.plot_colormap_level_upper = 525
self.plot_colormap_level_space = 25
self.color_levels = np.arange(self.plot_colormap_level_lower,
self.plot_colormap_level_upper,
self.plot_colormap_level_space)
# Set plotting of data outside color map range to display in (default=)black
# If this is not done then python will take the first and last colors of the colormap
# and apply them to data outside the range.
# IE.: It will effectively shorten the color scale by two colors
# and then rescale everything else to fit that shortened scale.
# Normalize : "clip" must be set to FALSE
# "alpha=1.0" means use solid color values
# "alpha" controls transparency, alpha=1=solid, alpha=0=transparent
# 0<alpha<1, range of transparent values
self.norm_range = mpl.colors.Normalize(vmin=self.plot_colormap_level_lower,
vmax=(self.plot_colormap_level_upper-self.plot_colormap_level_space),
clip=False)
# Plot map
self.map_contourf = self.map.contourf(self.xmesh,
self.ymesh,
self.o3,
self.color_levels,
colors=None,
cmap=self.colormap,
#norm=self.norm_range,
extend="both")
# Add colour scale to output
self.current_colorbar = mpl.pyplot.colorbar(orientation="horizontal",
fraction=0.05,
pad=0.15,
aspect=60.0,
shrink=1.0,
extend="both",
spacing="uniform",
ticks=self.color_levels,
#ticks=None,
format=None)
self.current_colorbar.set_label("Ozone scale in Dobson Units",
fontsize=20)
# Write the output to a graphic file
self.current_figure.savefig("bcbm3_plot2")
mpl.pyplot.close(self.current_figure)
return(0)
#------------------------------------------------------------------------------
def plot_colour_contours(self):
if (self.verbose):
sys.stdout.write("BCBM3 : plot_colour_contours ACTIVATED\n")
# Set up mesh for plotting
self.xmesh, self.ymesh = self.map(*np.meshgrid(self.lon, self.lat))
#sys.stdout.write("BCBM3 : self.xmesh = " + str(self.xmesh) + "\n")
#sys.stdout.write("BCBM3 : self.ymesh = " + str(self.ymesh) + "\n")
# Set colour map and levels
self.colormap = mpl_cm.jet
self.colormap.set_under(color='k',
alpha=1.0)
self.colormap.set_over(color='k',
alpha=1.0)
# Set colour levels
self.plot_colormap_level_lower = 200
self.plot_colormap_level_upper = 525
self.plot_colormap_level_space = 25
self.color_levels = np.arange(self.plot_colormap_level_lower,
self.plot_colormap_level_upper,
self.plot_colormap_level_space)
# Set plotting of data outside color map range to display in (default=)black
# If this is not done then python will take the first and last colors of the colormap
# and apply them to data outside the range.
# IE.: It will effectively shorten the color scale by two colors
# and then rescale everything else to fit that shortened scale.
# Normalize : "clip" must be set to FALSE
# "alpha=1.0" means use solid color values
# "alpha" controls transparency, alpha=1=solid, alpha=0=transparent
# 0<alpha<1, range of transparent values
self.norm_range = mpl.colors.Normalize(vmin=self.plot_colormap_level_lower,
vmax=(self.plot_colormap_level_upper-self.plot_colormap_level_space),
clip=False)
# Plot map
self.map_contour = self.map.contour(self.xmesh,
self.ymesh,
self.o3,
self.color_levels,
colors=None,
cmap=self.colormap,
#norm=self.norm_range,
extend="both")
# Label the contours
self.map_contour_label = mpl.pyplot.clabel(self.map_contour,
self.color_levels,
inline=1,
fontsize=9,
fmt='%1.0f',)
# Write the output to a graphic file
self.current_figure.savefig("bcbm3_plot3")
mpl.pyplot.close(self.current_figure)
return(0)
#------------------------------------------------------------------------------
####################################################
def main(argv=None): # When run as a script
if argv is None:
bcbm3_cmd_line = sys.argv[1:]
bcbm3 = Bcbm3()
bcbm3_ret = bcbm3.bcbm3(bcbm3_cmd_line)
if __name__ == '__main__':
sys.exit(main())
|
|
# -*- coding utf-8 -*-
# classes/models/client.py
# class:: Client
from datetime import datetime, timedelta
from flask import current_app
from swtstore.classes.database import db
from swtstore.classes.models import User
from swtstore.classes import oauth
class Client(db.Model):
"""
The third-party application registering with the platform
"""
__tablename__ = 'clients'
id = db.Column(db.String(40), primary_key=True)
client_secret = db.Column(db.String(55), nullable=False)
name = db.Column(db.String(60), nullable=False)
description = db.Column(db.String(400))
# creator of the client application
user_id = db.Column(db.ForeignKey('users.id'))
creator = db.relationship('User')
_is_private = db.Column(db.Boolean)
_host_url = db.Column(db.String(255))
_redirect_uris = db.Column(db.Text)
_default_scopes = db.Column(db.Text)
@property
def client_id(self):
return self.id
@property
def client_type(self):
if self._is_private:
return 'private'
return 'public'
@property
def host_url(self):
return self._host_url
@property
def redirect_uris(self):
if self._redirect_uris:
return self._redirect_uris.split()
return []
@property
def default_redirect_uri(self):
return self.redirect_uris[0]
@property
def default_scopes(self):
if self._default_scopes:
return self._default_scopes.split()
return []
def __repr__(self):
return '<Client: %s :: ID: %s>' % (self.name, self.id)
def __str__(self):
return '<Client: %s :: ID: %s>' % (self.name, self.id)
# create and persist the client to the database
def persist(self):
db.session.add(self)
db.session.commit()
@staticmethod
def getClientsByCreator(user_id):
clients = Client.query.filter_by(user_id=user_id)
return [each for each in clients]
class Grant(db.Model):
"""
A grant token is created in the authorization flow, and will be
destroyed when the authorization finished. In this case, it would be better
to store the data in a cache, which would benefit a better performance.
"""
#TODO: this would perform better if its only in the cache. and not in a db.
__tablename__ = 'grants'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id',
ondelete='CASCADE'))
user = db.relationship('User')
client_id = db.Column(db.String(40), db.ForeignKey('clients.id'),
nullable=False)
client = db.relationship('Client')
code = db.Column(db.String(255), index=True, nullable=False)
redirect_uri = db.Column(db.String(255))
expires = db.Column(db.DateTime)
_scopes = db.Column(db.Text)
@property
def scopes(self):
if self._scopes:
return self._scopes.split()
return []
def delete(self):
db.session.delete(self)
db.session.commit()
class Token(db.Model):
"""
The final token to be used by a client
"""
__tablename__ = 'tokens'
id = db.Column(db.Integer, primary_key=True)
client_id = db.Column(db.String(40), db.ForeignKey('clients.id'),
nullable=False)
client = db.relationship('Client')
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
user = db.relationship('User')
token_type = db.Column(db.String(40))
access_token = db.Column(db.String(255), unique=True)
refresh_token = db.Column(db.String(255), unique=True)
expires = db.Column(db.DateTime)
_scopes = db.Column(db.Text)
@property
def scopes(self):
if self._scopes:
return self._scopes.split()
return []
#TODO: find out how to better structure the following code
# OAuthLib decorators used by OAuthLib in the OAuth flow
@oauth.clientgetter
def loadClient(client_id):
current_app.logger.debug('@oauth.clientgetter')
#return Client.query.filter_by(id=client_id).first()
return Client.query.get(client_id)
@oauth.grantgetter
def loadGrant(client_id, code):
current_app.logger.debug('@oauth.grantgetter')
return Grant.query.filter_by(client_id=client_id, code=code).first()
@oauth.grantsetter
def saveGrant(client_id, code, request, *args, **kwargs):
current_app.logger.debug('@oauth.grantsetter')
expires = datetime.utcnow() + timedelta(seconds=100)
grant = Grant(
client_id=client_id,
code=code['code'],
redirect_uri=request.redirect_uri,
_scopes=' '.join(request.scopes),
user=User.getCurrentUser(),
expires=expires
)
db.session.add(grant)
db.session.commit()
return grant
@oauth.tokengetter
def loadToken(access_token=None, refresh_token=None):
current_app.logger.debug('@oauth.tokengetter')
if access_token:
return Token.query.filter_by(access_token=access_token).first()
elif refresh_token:
return Token.query.filter_by(refresh_token=refresh_token).first()
@oauth.tokensetter
def saveToken(token, request, *args, **kwargs):
current_app.logger.debug('@oauth.tokensetter')
toks = Token.query.filter_by(client_id=request.client.id,
user_id=request.user.id)
# make sure that every client has only one token connected to a user
for t in toks:
db.session.delete(t)
expires_in = token.pop('expires_in')
expires = datetime.utcnow() + timedelta(seconds=expires_in)
tok = Token(
access_token=token['access_token'],
refresh_token=token['refresh_token'],
token_type=token['token_type'],
_scopes=token['scope'],
expires=expires,
client_id=request.client.id,
user=request.user
)
db.session.add(tok)
db.session.commit()
return tok
@oauth.usergetter
def getUser():
return User.getCurrentUser()
# Authorized Clients
class AuthorizedClients(db.Model):
"""
The clients authorized by users
"""
__tablename__ = 'authorized_clients'
id = db.Column(db.Integer, primary_key=True)
client_id = db.Column(db.String(40), db.ForeignKey('clients.id'),
nullable=False)
client = db.relationship('Client')
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
user = db.relationship('User')
def persist(self):
db.session.add(self)
db.session.commit()
@staticmethod
def revoke(**kwargs):
user = kwargs.get('user')
client = kwargs.get('client')
authorization = AuthorizedClients.query.filter_by(user_id=user.id,
client_id=client.client_id).first()
current_app.logger.debug('authorization to be revoked-- %s',
authorization)
db.session.delete(authorization)
db.session.commit()
@staticmethod
def getByUser(user):
authorized_clients = [row.client for row in
AuthorizedClients.query.filter_by(user_id=user.id).all()]
current_app.logger.debug('authorized clients %s', authorized_clients)
return authorized_clients
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import numpy as np
import scipy.constants as const
from pymatgen.core.structure import Structure
from pymatgen.util.coord import get_linear_interpolated_value
from monty.json import MSONable
from monty.functools import lazy_property
"""
This module defines classes to represent the phonon density of states, etc.
"""
BOLTZ_THZ_PER_K = const.value("Boltzmann constant in Hz/K") / const.tera # Boltzmann constant in THz/K
THZ_TO_J = const.value("hertz-joule relationship") * const.tera
class PhononDos(MSONable):
"""
Basic DOS object. All other DOS objects are extended versions of this
object.
Args:
frequencies: A sequences of frequencies in THz
densities: A list representing the density of states.
"""
def __init__(self, frequencies, densities):
self.frequencies = np.array(frequencies)
self.densities = np.array(densities)
def get_smeared_densities(self, sigma):
"""
Returns the densities, but with a Gaussian smearing of
std dev sigma applied.
Args:
sigma: Std dev of Gaussian smearing function.
Returns:
Gaussian-smeared densities.
"""
from scipy.ndimage.filters import gaussian_filter1d
diff = [self.frequencies[i + 1] - self.frequencies[i]
for i in range(len(self.frequencies) - 1)]
avgdiff = sum(diff) / len(diff)
smeared_dens = gaussian_filter1d(self.densities, sigma / avgdiff)
return smeared_dens
def __add__(self, other):
"""
Adds two DOS together. Checks that frequency scales are the same.
Otherwise, a ValueError is thrown.
Args:
other: Another DOS object.
Returns:
Sum of the two DOSs.
"""
if not all(np.equal(self.frequencies, other.frequencies)):
raise ValueError("Frequencies of both DOS are not compatible!")
densities = self.densities + other.densities
return PhononDos(self.frequencies, densities)
def __radd__(self, other):
"""
Reflected addition of two DOS objects
Args:
other: Another DOS object.
Returns:
Sum of the two DOSs.
"""
return self.__add__(other)
def get_interpolated_value(self, frequency):
"""
Returns interpolated density for a particular frequency.
Args:
frequency: frequency to return the density for.
"""
return get_linear_interpolated_value(self.frequencies,
self.densities, frequency)
def __str__(self):
"""
Returns a string which can be easily plotted (using gnuplot).
"""
stringarray = ["#{:30s} {:30s}".format("Frequency", "Density")]
for i, frequency in enumerate(self.frequencies):
stringarray.append("{:.5f} {:.5f}"
.format(frequency, self.densities[i]))
return "\n".join(stringarray)
@classmethod
def from_dict(cls, d):
"""
Returns PhononDos object from dict representation of PhononDos.
"""
return cls(d["frequencies"], d["densities"])
def as_dict(self):
"""
Json-serializable dict representation of PhononDos.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"frequencies": list(self.frequencies),
"densities": list(self.densities)}
@lazy_property
def ind_zero_freq(self):
"""
Index of the first point for which the freqencies are equal or greater than zero.
"""
ind = np.searchsorted(self.frequencies, 0)
if ind >= len(self.frequencies):
raise ValueError("No positive frequencies found")
return ind
@lazy_property
def _positive_frequencies(self):
"""
Numpy array containing the list of positive frequencies
"""
return self.frequencies[self.ind_zero_freq:]
@lazy_property
def _positive_densities(self):
"""
Numpy array containing the list of densities corresponding to positive frequencies
"""
return self.densities[self.ind_zero_freq:]
def cv(self, t, structure=None):
"""
Constant volume specific heat C_v at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/(K*mol-c). A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/(K*mol)
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Constant volume specific heat C_v
"""
if t == 0:
return 0
freqs = self._positive_frequencies
dens = self._positive_densities
csch2 = lambda x: 1.0 / (np.sinh(x) ** 2)
wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t)
cv = np.trapz(wd2kt ** 2 * csch2(wd2kt) * dens, x=freqs)
cv *= const.Boltzmann * const.Avogadro
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
cv /= formula_units
return cv
def entropy(self, t, structure=None):
"""
Vibrational entropy at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/(K*mol-c). A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/(K*mol)
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Vibrational entropy
"""
if t == 0:
return 0
freqs = self._positive_frequencies
dens = self._positive_densities
coth = lambda x: 1.0 / np.tanh(x)
wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t)
s = np.trapz((wd2kt * coth(wd2kt) - np.log(2 * np.sinh(wd2kt))) * dens, x=freqs)
s *= const.Boltzmann * const.Avogadro
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
s /= formula_units
return s
def internal_energy(self, t, structure=None):
"""
Phonon contribution to the internal energy at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/mol-c. A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/mol
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Phonon contribution to the internal energy
"""
if t==0:
return self.zero_point_energy(structure=structure)
freqs = self._positive_frequencies
dens = self._positive_densities
coth = lambda x: 1.0 / np.tanh(x)
wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t)
e = np.trapz(freqs * coth(wd2kt) * dens, x=freqs) / 2
e *= THZ_TO_J * const.Avogadro
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
e /= formula_units
return e
def helmholtz_free_energy(self, t, structure=None):
"""
Phonon contribution to the Helmholtz free energy at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/mol-c. A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/mol
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Phonon contribution to the Helmholtz free energy
"""
if t==0:
return self.zero_point_energy(structure=structure)
freqs = self._positive_frequencies
dens = self._positive_densities
wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t)
f = np.trapz(np.log(2 * np.sinh(wd2kt)) * dens, x=freqs)
f *= const.Boltzmann * const.Avogadro * t
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
f /= formula_units
return f
def zero_point_energy(self, structure=None):
"""
Zero point energy energy of the system. Only positive frequencies will be used.
Result in J/mol-c. A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/mol
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Phonon contribution to the internal energy
"""
freqs = self._positive_frequencies
dens = self._positive_densities
zpe = 0.5 * np.trapz(freqs * dens, x=freqs)
zpe *= THZ_TO_J * const.Avogadro
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
zpe /= formula_units
return zpe
class CompletePhononDos(PhononDos):
"""
This wrapper class defines a total dos, and also provides a list of PDos.
Args:
structure: Structure associated with this particular DOS.
total_dos: total Dos for structure
pdoss: The pdoss are supplied as an {Site: Densities}
.. attribute:: pdos
Dict of partial densities of the form {Site:Densities}
"""
def __init__(self, structure, total_dos, pdoss):
super(CompletePhononDos, self).__init__(
frequencies=total_dos.frequencies, densities=total_dos.densities)
self.pdos = {s: np.array(d) for s, d in pdoss.items()}
self.structure = structure
def get_site_dos(self, site):
"""
Get the Dos for a site.
Args:
site: Site in Structure associated with CompletePhononDos.
Returns:
PhononDos containing summed orbital densities for site.
"""
return PhononDos(self.frequencies, self.pdos[site])
def get_element_dos(self):
"""
Get element projected Dos.
Returns:
dict of {Element: Dos}
"""
el_dos = {}
for site, atom_dos in self.pdos.items():
el = site.specie
if el not in el_dos:
el_dos[el] = np.array(atom_dos)
else:
el_dos[el] += np.array(atom_dos)
return {el: PhononDos(self.frequencies, densities)
for el, densities in el_dos.items()}
@classmethod
def from_dict(cls, d):
"""
Returns CompleteDos object from dict representation.
"""
tdos = PhononDos.from_dict(d)
struct = Structure.from_dict(d["structure"])
pdoss = {}
for at, pdos in zip(struct, d["pdos"]):
pdoss[at] = pdos
return cls(struct, tdos, pdoss)
def as_dict(self):
"""
Json-serializable dict representation of CompletePhononDos.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"frequencies": list(self.frequencies),
"densities": list(self.densities),
"pdos": []}
if len(self.pdos) > 0:
for at in self.structure:
d["pdos"].append(list(self.pdos[at]))
return d
def __str__(self):
return "Complete phonon DOS for " + str(self.structure)
|
|
"""Setup CLI configuration."""
# :license: MIT, see LICENSE for more details.
import webbrowser
import configparser
import json
import os.path
import requests
import click
import SoftLayer
from SoftLayer.CLI import config
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
from SoftLayer.consts import USER_AGENT
from SoftLayer import utils
def get_api_key(client, username, secret): # pylint: disable=inconsistent-return-statements
"""Attempts API-Key and password auth to get an API key.
This will also generate an API key if one doesn't exist
"""
# Try to use a client with username/api key
if len(secret) == 64 or username == 'apikey':
try:
client['Account'].getCurrentUser()
return secret
except SoftLayer.SoftLayerAPIError as ex:
if 'invalid api token' not in ex.faultString.lower():
raise
else:
if isinstance(client, SoftLayer.API.IAMClient):
client.authenticate_with_iam_token(secret)
else:
# Try to use a client with username/password
client.authenticate_with_password(username, secret)
user_record = client.call('Account', 'getCurrentUser', mask='id, apiAuthenticationKeys')
api_keys = user_record['apiAuthenticationKeys']
if len(api_keys) == 0:
return client.call('User_Customer', 'addApiAuthenticationKey', id=user_record['id'])
return api_keys[0]['authenticationKey']
@click.command()
@click.option('-a', '--auth', type=click.Choice(['ibmid', 'cloud_key', 'classic_key', 'sso']),
help="Select a method of authentication.", default='classic_key', show_default=True)
@environment.pass_env
def cli(env, auth):
"""Setup the ~/.softlayer file with username and apikey.
[Auth Types]
ibmid: Requires your cloud.ibm.com username and password, and generates a classic infrastructure API key.
cloud_key: A 32 character API key. Username will be 'apikey'
classic_key: A 64 character API key used in the Softlayer/Classic Infrastructure systems.
sso: For users with @ibm.com email addresses.
"""
username = None
api_key = None
timeout = 0
defaults = config.get_settings_from_client(env.client)
endpoint_url = get_endpoint_url(env, defaults.get('endpoint_url', 'public'))
# Get ths username and API key
if auth == 'ibmid':
username, api_key = ibmid_login(env)
elif auth == 'cloud_key':
username = 'apikey'
secret = env.getpass('Classic Infrastructue API Key', default=defaults['api_key'])
new_client = SoftLayer.Client(username=username, api_key=secret, endpoint_url=endpoint_url, timeout=timeout)
api_key = get_api_key(new_client, username, secret)
elif auth == 'sso':
username, api_key = sso_login(env)
else:
username = env.input('Classic Infrastructue Username', default=defaults['username'])
secret = env.getpass('Classic Infrastructue API Key', default=defaults['api_key'])
new_client = SoftLayer.Client(username=username, api_key=secret, endpoint_url=endpoint_url, timeout=timeout)
api_key = get_api_key(new_client, username, secret)
# Ask for timeout, convert to float, then to int
timeout = int(float(env.input('Timeout', default=defaults['timeout'] or 0)))
path = '~/.softlayer'
if env.config_file:
path = env.config_file
config_path = os.path.expanduser(path)
env.out(env.fmt(config.config_table({'username': username,
'api_key': api_key,
'endpoint_url': endpoint_url,
'timeout': timeout})))
if not formatting.confirm('Are you sure you want to write settings to "%s"?' % config_path, default=True):
raise exceptions.CLIAbort('Aborted.')
# Persist the config file. Read the target config file in before
# setting the values to avoid clobbering settings
parsed_config = configparser.RawConfigParser()
parsed_config.read(config_path)
try:
parsed_config.add_section('softlayer')
except configparser.DuplicateSectionError:
pass
parsed_config.set('softlayer', 'username', username)
parsed_config.set('softlayer', 'api_key', api_key)
parsed_config.set('softlayer', 'endpoint_url', endpoint_url)
parsed_config.set('softlayer', 'timeout', timeout)
config_fd = os.fdopen(os.open(config_path, (os.O_WRONLY | os.O_CREAT | os.O_TRUNC), 0o600), 'w')
try:
parsed_config.write(config_fd)
finally:
config_fd.close()
env.fout("Configuration Updated Successfully")
def get_endpoint_url(env, endpoint='public'):
"""Gets the Endpoint to use."""
endpoint_type = env.input('Endpoint (public|private|custom)', default=endpoint)
endpoint_type = endpoint_type.lower()
if endpoint_type == 'public':
endpoint_url = SoftLayer.API_PUBLIC_ENDPOINT
elif endpoint_type == 'private':
endpoint_url = SoftLayer.API_PRIVATE_ENDPOINT
else:
if endpoint_type == 'custom':
endpoint_url = env.input('Endpoint URL', default=endpoint)
else:
endpoint_url = endpoint_type
return endpoint_url
def ibmid_login(env):
"""Uses an IBMid and Password to get an access token, and that access token to get an API key"""
email = env.input("Email").strip()
password = env.getpass("Password").strip()
client = SoftLayer.API.IAMClient(config_file=env.config_file)
# STEP 1: Get the base IAM Token with a username/password
tokens = client.authenticate_with_password(email, password)
# STEP 2: Figure out which account we want to use
account = get_accounts(env, tokens['access_token'])
# STEP 3: Refresh Token, using a specific account this time.
tokens = client.refresh_iam_token(tokens['refresh_token'], account['account_id'], account['ims_id'])
# STEP 4: Get or create the Classic Infrastructure API key
user = client.call('SoftLayer_Account', 'getCurrentUser', mask="mask[id,username,apiAuthenticationKeys]")
if len(user.get('apiAuthenticationKeys', [])) == 0:
env.fout("Creating a Classic Infrastrucutre API key for {}".format(user['username']))
api_key = client.call('User_Customer', 'addApiAuthenticationKey', id=user['id'])
else:
api_key = user['apiAuthenticationKeys'][0]['authenticationKey']
return user.get('username'), api_key
def get_accounts(env, a_token):
"""Gets account list from accounts.cloud.ibm.com/v1/accounts"""
iam_client = requests.Session()
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': USER_AGENT,
'Accept': 'application/json'
}
headers['Authorization'] = 'Bearer {}'.format(a_token)
response = iam_client.request(
'GET',
'https://accounts.cloud.ibm.com/v1/accounts',
headers=headers
)
response.raise_for_status()
accounts = json.loads(response.text)
selected = None
ims_id = None
if accounts.get('total_results', 0) == 1:
selected = accounts['resources'][0]
else:
env.fout("Select an Account...")
counter = 1
for selected in accounts.get('resources', []):
links = utils.lookup(selected, 'metadata', 'linked_accounts') or []
for link in links:
if link.get('origin') == "IMS":
ims_id = link.get('id')
if ims_id is None:
ims_id = "Unlinked"
env.fout("{}: {} ({})".format(counter, utils.lookup(selected, 'entity', 'name'), ims_id))
counter = counter + 1
ims_id = None # Reset ims_id to avoid any mix-match or something.
choice = click.prompt('Enter a number', type=int)
# Test to make sure choice is not out of bounds...
selected = accounts['resources'][choice - 1]
account_id = utils.lookup(selected, 'metadata', 'guid')
links = utils.lookup(selected, 'metadata', 'linked_accounts') or []
for link in links:
if link.get('origin') == "IMS":
ims_id = link.get('id')
print("Using account {}".format(utils.lookup(selected, 'entity', 'name')))
return {"account_id": account_id, "ims_id": ims_id}
def get_sso_url():
"""Gets the URL for using SSO Tokens"""
iam_client = requests.Session()
headers = {
'Content-Type': 'application/json',
'User-Agent': USER_AGENT,
'Accept': 'application/json'
}
response = iam_client.request(
'GET',
'https://iam.cloud.ibm.com/identity/.well-known/openid-configuration',
headers=headers
)
response.raise_for_status()
data = json.loads(response.text)
return data.get('passcode_endpoint')
def sso_login(env):
"""Uses a SSO token to get a SL apikey"""
passcode_url = get_sso_url()
env.fout("Get a one-time code from {} to proceed.".format(passcode_url))
open_browser = env.input("Open the URL in the default browser? [Y/n]", default='Y')
if open_browser.lower() == 'y':
webbrowser.open(passcode_url)
passcode = env.input("One-time code")
client = SoftLayer.API.IAMClient(config_file=env.config_file)
# STEP 1: Get the base IAM Token with a username/password
tokens = client.authenticate_with_passcode(passcode)
# STEP 2: Figure out which account we want to use
account = get_accounts(env, tokens['access_token'])
# STEP 3: Refresh Token, using a specific account this time.
tokens = client.refresh_iam_token(tokens['refresh_token'], account['account_id'], account['ims_id'])
# STEP 4: Get or create the Classic Infrastructure API key
# client.authenticate_with_iam_token(tokens['access_token'])
user = client.call('SoftLayer_Account', 'getCurrentUser', mask="mask[id,username,apiAuthenticationKeys]")
if len(user.get('apiAuthenticationKeys', [])) == 0:
env.fout("Creating a Classic Infrastrucutre API key for {}".format(user['username']))
api_key = client.call('User_Customer', 'addApiAuthenticationKey', id=user['id'])
else:
api_key = user['apiAuthenticationKeys'][0]['authenticationKey']
return user.get('username'), api_key
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import re
import time
from devil.android import device_errors
from devil.android import flag_changer
from devil.utils import reraiser_thread
from pylib import valgrind_tools
from pylib.base import base_test_result
from pylib.local.device import local_device_test_run
TIMEOUT_ANNOTATIONS = [
('Manual', 10 * 60 * 60),
('IntegrationTest', 30 * 60),
('External', 10 * 60),
('EnormousTest', 10 * 60),
('LargeTest', 5 * 60),
('MediumTest', 3 * 60),
('SmallTest', 1 * 60),
]
# TODO(jbudorick): Make this private once the instrumentation test_runner is
# deprecated.
def DidPackageCrashOnDevice(package_name, device):
# Dismiss any error dialogs. Limit the number in case we have an error
# loop or we are failing to dismiss.
try:
for _ in xrange(10):
package = device.DismissCrashDialogIfNeeded()
if not package:
return False
# Assume test package convention of ".test" suffix
if package in package_name:
return True
except device_errors.CommandFailedError:
logging.exception('Error while attempting to dismiss crash dialog.')
return False
_CURRENT_FOCUS_CRASH_RE = re.compile(
r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
class LocalDeviceInstrumentationTestRun(
local_device_test_run.LocalDeviceTestRun):
def __init__(self, env, test_instance):
super(LocalDeviceInstrumentationTestRun, self).__init__(env, test_instance)
self._flag_changers = {}
def TestPackage(self):
return None
def SetUp(self):
def substitute_external_storage(d, external_storage):
if not d:
return external_storage
elif isinstance(d, list):
return '/'.join(p if p else external_storage for p in d)
else:
return d
@local_device_test_run.handle_shard_failures_with(
self._env.BlacklistDevice)
def individual_device_set_up(dev, host_device_tuples):
def install_apk():
if self._test_instance.apk_under_test_incremental_install_script:
local_device_test_run.IncrementalInstall(
dev,
self._test_instance.apk_under_test,
self._test_instance.apk_under_test_incremental_install_script)
else:
permissions = self._test_instance.apk_under_test.GetPermissions()
dev.Install(self._test_instance.apk_under_test,
permissions=permissions)
if self._test_instance.test_apk_incremental_install_script:
local_device_test_run.IncrementalInstall(
dev,
self._test_instance.test_apk,
self._test_instance.test_apk_incremental_install_script)
else:
permissions = self._test_instance.test_apk.GetPermissions()
dev.Install(self._test_instance.test_apk, permissions=permissions)
for apk in self._test_instance.additional_apks:
dev.Install(apk)
def push_test_data():
external_storage = dev.GetExternalStoragePath()
host_device_tuples_substituted = [
(h, substitute_external_storage(d, external_storage))
for h, d in host_device_tuples]
logging.info('instrumentation data deps:')
for h, d in host_device_tuples_substituted:
logging.info('%r -> %r', h, d)
dev.PushChangedFiles(host_device_tuples_substituted)
def create_flag_changer():
if self._test_instance.flags:
if not self._test_instance.package_info:
logging.error("Couldn't set flags: no package info")
elif not self._test_instance.package_info.cmdline_file:
logging.error("Couldn't set flags: no cmdline_file")
else:
self._CreateFlagChangerIfNeeded(dev)
logging.debug('Attempting to set flags: %r',
self._test_instance.flags)
self._flag_changers[str(dev)].AddFlags(self._test_instance.flags)
valgrind_tools.SetChromeTimeoutScale(
dev, self._test_instance.timeout_scale)
steps = (install_apk, push_test_data, create_flag_changer)
if self._env.concurrent_adb:
reraiser_thread.RunAsync(steps)
else:
for step in steps:
step()
self._env.parallel_devices.pMap(
individual_device_set_up,
self._test_instance.GetDataDependencies())
def TearDown(self):
def individual_device_tear_down(dev):
if str(dev) in self._flag_changers:
self._flag_changers[str(dev)].Restore()
valgrind_tools.SetChromeTimeoutScale(dev, None)
self._env.parallel_devices.pMap(individual_device_tear_down)
def _CreateFlagChangerIfNeeded(self, device):
if not str(device) in self._flag_changers:
self._flag_changers[str(device)] = flag_changer.FlagChanger(
device, self._test_instance.package_info.cmdline_file)
#override
def _CreateShards(self, tests):
return tests
#override
def _GetTests(self):
return self._test_instance.GetTests()
#override
def _GetTestName(self, test):
return '%s#%s' % (test['class'], test['method'])
def _GetTestNameForDisplay(self, test):
display_name = self._GetTestName(test)
flags = test['flags']
if flags.add:
display_name = '%s with {%s}' % (display_name, ' '.join(flags.add))
if flags.remove:
display_name = '%s without {%s}' % (display_name, ' '.join(flags.remove))
return display_name
#override
def _RunTest(self, device, test):
extras = {}
flags = None
test_timeout_scale = None
if isinstance(test, list):
if not self._test_instance.driver_apk:
raise Exception('driver_apk does not exist. '
'Please build it and try again.')
def name_and_timeout(t):
n = self._GetTestName(t)
i = self._GetTimeoutFromAnnotations(t['annotations'], n)
return (n, i)
test_names, timeouts = zip(*(name_and_timeout(t) for t in test))
test_name = ','.join(test_names)
test_display_name = test_name
target = '%s/%s' % (
self._test_instance.driver_package,
self._test_instance.driver_name)
extras.update(
self._test_instance.GetDriverEnvironmentVars(
test_list=test_names))
timeout = sum(timeouts)
else:
test_name = self._GetTestName(test)
test_display_name = test_name
target = '%s/%s' % (
self._test_instance.test_package, self._test_instance.test_runner)
extras['class'] = test_name
if 'flags' in test:
flags = test['flags']
test_display_name = self._GetTestNameForDisplay(test)
timeout = self._GetTimeoutFromAnnotations(
test['annotations'], test_display_name)
test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
test['annotations'])
if test_timeout_scale and test_timeout_scale != 1:
valgrind_tools.SetChromeTimeoutScale(
device, test_timeout_scale * self._test_instance.timeout_scale)
logging.info('preparing to run %s: %s', test_display_name, test)
if flags:
self._CreateFlagChangerIfNeeded(device)
self._flag_changers[str(device)].PushFlags(
add=flags.add, remove=flags.remove)
try:
time_ms = lambda: int(time.time() * 1e3)
start_ms = time_ms()
output = device.StartInstrumentation(
target, raw=True, extras=extras, timeout=timeout, retries=0)
duration_ms = time_ms() - start_ms
finally:
if flags:
self._flag_changers[str(device)].Restore()
if test_timeout_scale:
valgrind_tools.SetChromeTimeoutScale(
device, self._test_instance.timeout_scale)
# TODO(jbudorick): Make instrumentation tests output a JSON so this
# doesn't have to parse the output.
logging.debug('output from %s:', test_display_name)
for l in output:
logging.debug(' %s', l)
result_code, result_bundle, statuses = (
self._test_instance.ParseAmInstrumentRawOutput(output))
results = self._test_instance.GenerateTestResults(
result_code, result_bundle, statuses, start_ms, duration_ms)
if flags:
for r in results:
if r.GetName() == test_name:
r.SetName(test_display_name)
if DidPackageCrashOnDevice(self._test_instance.test_package, device):
for r in results:
if r.GetType() == base_test_result.ResultType.UNKNOWN:
r.SetType(base_test_result.ResultType.CRASH)
# TODO(jbudorick): ClearApplicationState on failure before switching
# instrumentation tests to platform mode (but respect --skip-clear-data).
return results
#override
def _ShouldShard(self):
return True
@classmethod
def _GetTimeoutScaleFromAnnotations(cls, annotations):
try:
return int(annotations.get('TimeoutScale', 1))
except ValueError as e:
logging.warning("Non-integer value of TimeoutScale ignored. (%s)", str(e))
return 1
@classmethod
def _GetTimeoutFromAnnotations(cls, annotations, test_name):
for k, v in TIMEOUT_ANNOTATIONS:
if k in annotations:
timeout = v
break
else:
logging.warning('Using default 1 minute timeout for %s', test_name)
timeout = 60
timeout *= cls._GetTimeoutScaleFromAnnotations(annotations)
return timeout
|
|
from __future__ import unicode_literals
import uuid
from moto.core import BaseModel
from moto.core.utils import camelcase_to_underscores, unix_time
from ..constants import DECISIONS_FIELDS
from ..exceptions import (
SWFDefaultUndefinedFault,
SWFValidationException,
SWFDecisionValidationException,
)
from ..utils import decapitalize
from .activity_task import ActivityTask
from .activity_type import ActivityType
from .decision_task import DecisionTask
from .history_event import HistoryEvent
from .timeout import Timeout
# TODO: extract decision related logic into a Decision class
class WorkflowExecution(BaseModel):
# NB: the list is ordered exactly as in SWF validation exceptions so we can
# mimic error messages closely ; don't reorder it without checking SWF.
KNOWN_DECISION_TYPES = [
"CompleteWorkflowExecution",
"StartTimer",
"RequestCancelExternalWorkflowExecution",
"SignalExternalWorkflowExecution",
"CancelTimer",
"RecordMarker",
"ScheduleActivityTask",
"ContinueAsNewWorkflowExecution",
"ScheduleLambdaFunction",
"FailWorkflowExecution",
"RequestCancelActivityTask",
"StartChildWorkflowExecution",
"CancelWorkflowExecution",
]
def __init__(self, domain, workflow_type, workflow_id, **kwargs):
self.domain = domain
self.workflow_id = workflow_id
self.run_id = uuid.uuid4().hex
# WorkflowExecutionInfo
self.cancel_requested = False
# TODO: check valid values among:
# COMPLETED | FAILED | CANCELED | TERMINATED | CONTINUED_AS_NEW | TIMED_OUT
# TODO: implement them all
self.close_cause = None
self.close_status = None
self.close_timestamp = None
self.execution_status = "OPEN"
self.latest_activity_task_timestamp = None
self.latest_execution_context = None
self.parent = None
self.start_timestamp = None
self.tag_list = kwargs.get("tag_list", None) or []
self.timeout_type = None
self.workflow_type = workflow_type
# args processing
# NB: the order follows boto/SWF order of exceptions appearance (if no
# param is set, # SWF will raise DefaultUndefinedFault errors in the
# same order as the few lines that follow)
self._set_from_kwargs_or_workflow_type(
kwargs, "execution_start_to_close_timeout"
)
self._set_from_kwargs_or_workflow_type(kwargs, "task_list", "task_list")
self._set_from_kwargs_or_workflow_type(kwargs, "task_start_to_close_timeout")
self._set_from_kwargs_or_workflow_type(kwargs, "child_policy")
self.input = kwargs.get("input")
# counters
self.open_counts = {
"openTimers": 0,
"openDecisionTasks": 0,
"openActivityTasks": 0,
"openChildWorkflowExecutions": 0,
"openLambdaFunctions": 0,
}
# events
self._events = []
# child workflows
self.child_workflow_executions = []
self._previous_started_event_id = None
def __repr__(self):
return "WorkflowExecution(run_id: {0})".format(self.run_id)
def _set_from_kwargs_or_workflow_type(
self, kwargs, local_key, workflow_type_key=None
):
if workflow_type_key is None:
workflow_type_key = "default_" + local_key
value = kwargs.get(local_key)
if not value and hasattr(self.workflow_type, workflow_type_key):
value = getattr(self.workflow_type, workflow_type_key)
if not value:
raise SWFDefaultUndefinedFault(local_key)
setattr(self, local_key, value)
@property
def _configuration_keys(self):
return [
"executionStartToCloseTimeout",
"childPolicy",
"taskPriority",
"taskStartToCloseTimeout",
]
def to_short_dict(self):
return {"workflowId": self.workflow_id, "runId": self.run_id}
def to_medium_dict(self):
hsh = {
"execution": self.to_short_dict(),
"workflowType": self.workflow_type.to_short_dict(),
"startTimestamp": 1420066800.123,
"executionStatus": self.execution_status,
"cancelRequested": self.cancel_requested,
}
if hasattr(self, "tag_list") and self.tag_list:
hsh["tagList"] = self.tag_list
return hsh
def to_full_dict(self):
hsh = {
"executionInfo": self.to_medium_dict(),
"executionConfiguration": {"taskList": {"name": self.task_list}},
}
# info
if self.execution_status == "CLOSED":
hsh["executionInfo"]["closeStatus"] = self.close_status
hsh["executionInfo"]["closeTimestamp"] = self.close_timestamp
# configuration
for key in self._configuration_keys:
attr = camelcase_to_underscores(key)
if not hasattr(self, attr):
continue
if not getattr(self, attr):
continue
hsh["executionConfiguration"][key] = getattr(self, attr)
# counters
hsh["openCounts"] = self.open_counts
# latest things
if self.latest_execution_context:
hsh["latestExecutionContext"] = self.latest_execution_context
if self.latest_activity_task_timestamp:
hsh["latestActivityTaskTimestamp"] = self.latest_activity_task_timestamp
return hsh
def to_list_dict(self):
hsh = {
"execution": {"workflowId": self.workflow_id, "runId": self.run_id},
"workflowType": self.workflow_type.to_short_dict(),
"startTimestamp": self.start_timestamp,
"executionStatus": self.execution_status,
"cancelRequested": self.cancel_requested,
}
if self.tag_list:
hsh["tagList"] = self.tag_list
if self.parent:
hsh["parent"] = self.parent
if self.close_status:
hsh["closeStatus"] = self.close_status
if self.close_timestamp:
hsh["closeTimestamp"] = self.close_timestamp
return hsh
def _process_timeouts(self):
"""
SWF timeouts can happen on different objects (workflow executions,
activity tasks, decision tasks) and should be processed in order.
A specific timeout can change the workflow execution state and have an
impact on other timeouts: for instance, if the workflow execution
timeouts, subsequent timeouts on activity or decision tasks are
irrelevant ; if an activity task timeouts, other timeouts on this task
are irrelevant, and a new decision is fired, which could well timeout
before the end of the workflow.
So the idea here is to find the earliest timeout that would have been
triggered, process it, then make the workflow state progress and repeat
the whole process.
"""
timeout_candidates = []
# workflow execution timeout
timeout_candidates.append(self.first_timeout())
# decision tasks timeouts
for task in self.decision_tasks:
timeout_candidates.append(task.first_timeout())
# activity tasks timeouts
for task in self.activity_tasks:
timeout_candidates.append(task.first_timeout())
# remove blank values (foo.first_timeout() is a Timeout or None)
timeout_candidates = list(filter(None, timeout_candidates))
# now find the first timeout to process
first_timeout = None
if timeout_candidates:
first_timeout = min(timeout_candidates, key=lambda t: t.timestamp)
if first_timeout:
should_schedule_decision_next = False
if isinstance(first_timeout.obj, WorkflowExecution):
self.timeout(first_timeout)
elif isinstance(first_timeout.obj, DecisionTask):
self.timeout_decision_task(first_timeout)
should_schedule_decision_next = True
elif isinstance(first_timeout.obj, ActivityTask):
self.timeout_activity_task(first_timeout)
should_schedule_decision_next = True
else:
raise NotImplementedError("Unhandled timeout object")
# schedule decision task if needed
if should_schedule_decision_next:
self.schedule_decision_task()
# the workflow execution progressed, let's see if another
# timeout should be processed
self._process_timeouts()
def events(self, reverse_order=False):
if reverse_order:
return reversed(self._events)
else:
return self._events
def next_event_id(self):
event_ids = [evt.event_id for evt in self._events]
return max(event_ids or [0]) + 1
def _add_event(self, *args, **kwargs):
evt = HistoryEvent(self.next_event_id(), *args, **kwargs)
self._events.append(evt)
return evt
def start(self):
self.start_timestamp = unix_time()
self._add_event(
"WorkflowExecutionStarted",
child_policy=self.child_policy,
execution_start_to_close_timeout=self.execution_start_to_close_timeout,
# TODO: fix this hardcoded value
parent_initiated_event_id=0,
task_list=self.task_list,
task_start_to_close_timeout=self.task_start_to_close_timeout,
workflow_type=self.workflow_type,
input=self.input,
)
self.schedule_decision_task()
def _schedule_decision_task(self):
evt = self._add_event(
"DecisionTaskScheduled",
start_to_close_timeout=self.task_start_to_close_timeout,
task_list=self.task_list,
)
self.domain.add_to_decision_task_list(
self.task_list, DecisionTask(self, evt.event_id)
)
self.open_counts["openDecisionTasks"] += 1
def schedule_decision_task(self):
self._schedule_decision_task()
# Shortcut for tests: helps having auto-starting decision tasks when needed
def schedule_and_start_decision_task(self, identity=None):
self._schedule_decision_task()
decision_task = self.decision_tasks[-1]
self.start_decision_task(decision_task.task_token, identity=identity)
@property
def decision_tasks(self):
return [t for t in self.domain.decision_tasks if t.workflow_execution == self]
@property
def activity_tasks(self):
return [t for t in self.domain.activity_tasks if t.workflow_execution == self]
def _find_decision_task(self, task_token):
for dt in self.decision_tasks:
if dt.task_token == task_token:
return dt
raise ValueError("No decision task with token: {0}".format(task_token))
def start_decision_task(self, task_token, identity=None):
dt = self._find_decision_task(task_token)
evt = self._add_event(
"DecisionTaskStarted",
scheduled_event_id=dt.scheduled_event_id,
identity=identity,
)
dt.start(evt.event_id, self._previous_started_event_id)
self._previous_started_event_id = evt.event_id
def complete_decision_task(
self, task_token, decisions=None, execution_context=None
):
# 'decisions' can be None per boto.swf defaults, so replace it with something iterable
if not decisions:
decisions = []
# In case of a malformed or invalid decision task, SWF will raise an error and
# it won't perform any of the decisions in the decision set.
self.validate_decisions(decisions)
dt = self._find_decision_task(task_token)
evt = self._add_event(
"DecisionTaskCompleted",
scheduled_event_id=dt.scheduled_event_id,
started_event_id=dt.started_event_id,
execution_context=execution_context,
)
dt.complete()
self.should_schedule_decision_next = False
self.handle_decisions(evt.event_id, decisions)
if self.should_schedule_decision_next:
self.schedule_decision_task()
self.latest_execution_context = execution_context
def _check_decision_attributes(self, kind, value, decision_id):
problems = []
constraints = DECISIONS_FIELDS.get(kind, {})
for key, constraint in constraints.items():
if constraint["required"] and not value.get(key):
problems.append(
{
"type": "null_value",
"where": "decisions.{0}.member.{1}.{2}".format(
decision_id, kind, key
),
}
)
return problems
def validate_decisions(self, decisions):
"""
Performs some basic validations on decisions. The real SWF service
seems to break early and *not* process any decision if there's a
validation problem, such as a malformed decision for instance. I didn't
find an explicit documentation for that though, so criticisms welcome.
"""
problems = []
# check close decision is last
# (the real SWF service also works that way if you provide 2 close decision tasks)
for dcs in decisions[:-1]:
close_decision_types = [
"CompleteWorkflowExecution",
"FailWorkflowExecution",
"CancelWorkflowExecution",
]
if dcs["decisionType"] in close_decision_types:
raise SWFValidationException("Close must be last decision in list")
decision_number = 0
for dcs in decisions:
decision_number += 1
# check decision types mandatory attributes
# NB: the real SWF service seems to check attributes even for attributes list
# that are not in line with the decisionType, so we do the same
attrs_to_check = [d for d in dcs.keys() if d.endswith("DecisionAttributes")]
if dcs["decisionType"] in self.KNOWN_DECISION_TYPES:
decision_type = dcs["decisionType"]
decision_attr = "{0}DecisionAttributes".format(
decapitalize(decision_type)
)
attrs_to_check.append(decision_attr)
for attr in attrs_to_check:
problems += self._check_decision_attributes(
attr, dcs.get(attr, {}), decision_number
)
# check decision type is correct
if dcs["decisionType"] not in self.KNOWN_DECISION_TYPES:
problems.append(
{
"type": "bad_decision_type",
"value": dcs["decisionType"],
"where": "decisions.{0}.member.decisionType".format(
decision_number
),
"possible_values": ", ".join(self.KNOWN_DECISION_TYPES),
}
)
# raise if any problem
if any(problems):
raise SWFDecisionValidationException(problems)
def handle_decisions(self, event_id, decisions):
"""
Handles a Decision according to SWF docs.
See: http://docs.aws.amazon.com/amazonswf/latest/apireference/API_Decision.html
"""
# handle each decision separately, in order
for decision in decisions:
decision_type = decision["decisionType"]
attributes_key = "{0}DecisionAttributes".format(decapitalize(decision_type))
attributes = decision.get(attributes_key, {})
if decision_type == "CompleteWorkflowExecution":
self.complete(event_id, attributes.get("result"))
elif decision_type == "FailWorkflowExecution":
self.fail(event_id, attributes.get("details"), attributes.get("reason"))
elif decision_type == "ScheduleActivityTask":
self.schedule_activity_task(event_id, attributes)
else:
# TODO: implement Decision type: CancelTimer
# TODO: implement Decision type: CancelWorkflowExecution
# TODO: implement Decision type: ContinueAsNewWorkflowExecution
# TODO: implement Decision type: RecordMarker
# TODO: implement Decision type: RequestCancelActivityTask
# TODO: implement Decision type: RequestCancelExternalWorkflowExecution
# TODO: implement Decision type: ScheduleLambdaFunction
# TODO: implement Decision type: SignalExternalWorkflowExecution
# TODO: implement Decision type: StartChildWorkflowExecution
# TODO: implement Decision type: StartTimer
raise NotImplementedError(
"Cannot handle decision: {0}".format(decision_type)
)
# finally decrement counter if and only if everything went well
self.open_counts["openDecisionTasks"] -= 1
def complete(self, event_id, result=None):
self.execution_status = "CLOSED"
self.close_status = "COMPLETED"
self.close_timestamp = unix_time()
self._add_event(
"WorkflowExecutionCompleted",
decision_task_completed_event_id=event_id,
result=result,
)
def fail(self, event_id, details=None, reason=None):
# TODO: implement length constraints on details/reason
self.execution_status = "CLOSED"
self.close_status = "FAILED"
self.close_timestamp = unix_time()
self._add_event(
"WorkflowExecutionFailed",
decision_task_completed_event_id=event_id,
details=details,
reason=reason,
)
def schedule_activity_task(self, event_id, attributes):
# Helper function to avoid repeating ourselves in the next sections
def fail_schedule_activity_task(_type, _cause):
# TODO: implement other possible failure mode: OPEN_ACTIVITIES_LIMIT_EXCEEDED
# NB: some failure modes are not implemented and probably won't be implemented in
# the future, such as ACTIVITY_CREATION_RATE_EXCEEDED or
# OPERATION_NOT_PERMITTED
self._add_event(
"ScheduleActivityTaskFailed",
activity_id=attributes["activityId"],
activity_type=_type,
cause=_cause,
decision_task_completed_event_id=event_id,
)
self.should_schedule_decision_next = True
activity_type = self.domain.get_type(
"activity",
attributes["activityType"]["name"],
attributes["activityType"]["version"],
ignore_empty=True,
)
if not activity_type:
fake_type = ActivityType(
attributes["activityType"]["name"],
attributes["activityType"]["version"],
)
fail_schedule_activity_task(fake_type, "ACTIVITY_TYPE_DOES_NOT_EXIST")
return
if activity_type.status == "DEPRECATED":
fail_schedule_activity_task(activity_type, "ACTIVITY_TYPE_DEPRECATED")
return
if any(
at
for at in self.activity_tasks
if at.activity_id == attributes["activityId"]
):
fail_schedule_activity_task(activity_type, "ACTIVITY_ID_ALREADY_IN_USE")
return
# find task list or default task list, else fail
task_list = attributes.get("taskList", {}).get("name")
if not task_list and activity_type.task_list:
task_list = activity_type.task_list
if not task_list:
fail_schedule_activity_task(activity_type, "DEFAULT_TASK_LIST_UNDEFINED")
return
# find timeouts or default timeout, else fail
timeouts = {}
for _type in [
"scheduleToStartTimeout",
"scheduleToCloseTimeout",
"startToCloseTimeout",
"heartbeatTimeout",
]:
default_key = "default_task_" + camelcase_to_underscores(_type)
default_value = getattr(activity_type, default_key)
timeouts[_type] = attributes.get(_type, default_value)
if not timeouts[_type]:
error_key = default_key.replace("default_task_", "default_")
fail_schedule_activity_task(
activity_type, "{0}_UNDEFINED".format(error_key.upper())
)
return
# Only add event and increment counters now that nothing went wrong
evt = self._add_event(
"ActivityTaskScheduled",
activity_id=attributes["activityId"],
activity_type=activity_type,
control=attributes.get("control"),
decision_task_completed_event_id=event_id,
heartbeat_timeout=attributes.get("heartbeatTimeout"),
input=attributes.get("input"),
schedule_to_close_timeout=attributes.get("scheduleToCloseTimeout"),
schedule_to_start_timeout=attributes.get("scheduleToStartTimeout"),
start_to_close_timeout=attributes.get("startToCloseTimeout"),
task_list=task_list,
task_priority=attributes.get("taskPriority"),
)
task = ActivityTask(
activity_id=attributes["activityId"],
activity_type=activity_type,
input=attributes.get("input"),
scheduled_event_id=evt.event_id,
workflow_execution=self,
timeouts=timeouts,
)
self.domain.add_to_activity_task_list(task_list, task)
self.open_counts["openActivityTasks"] += 1
self.latest_activity_task_timestamp = unix_time()
def _find_activity_task(self, task_token):
for task in self.activity_tasks:
if task.task_token == task_token:
return task
raise ValueError("No activity task with token: {0}".format(task_token))
def start_activity_task(self, task_token, identity=None):
task = self._find_activity_task(task_token)
evt = self._add_event(
"ActivityTaskStarted",
scheduled_event_id=task.scheduled_event_id,
identity=identity,
)
task.start(evt.event_id)
def complete_activity_task(self, task_token, result=None):
task = self._find_activity_task(task_token)
self._add_event(
"ActivityTaskCompleted",
scheduled_event_id=task.scheduled_event_id,
started_event_id=task.started_event_id,
result=result,
)
task.complete()
self.open_counts["openActivityTasks"] -= 1
# TODO: ensure we don't schedule multiple decisions at the same time!
self.schedule_decision_task()
def fail_activity_task(self, task_token, reason=None, details=None):
task = self._find_activity_task(task_token)
self._add_event(
"ActivityTaskFailed",
scheduled_event_id=task.scheduled_event_id,
started_event_id=task.started_event_id,
reason=reason,
details=details,
)
task.fail()
self.open_counts["openActivityTasks"] -= 1
# TODO: ensure we don't schedule multiple decisions at the same time!
self.schedule_decision_task()
def terminate(self, child_policy=None, details=None, reason=None):
# TODO: handle child policy for child workflows here
# TODO: handle cause="CHILD_POLICY_APPLIED"
# Until this, we set cause manually to "OPERATOR_INITIATED"
cause = "OPERATOR_INITIATED"
if not child_policy:
child_policy = self.child_policy
self._add_event(
"WorkflowExecutionTerminated",
cause=cause,
child_policy=child_policy,
details=details,
reason=reason,
)
self.execution_status = "CLOSED"
self.close_status = "TERMINATED"
self.close_cause = "OPERATOR_INITIATED"
def signal(self, signal_name, input):
self._add_event(
"WorkflowExecutionSignaled", signal_name=signal_name, input=input
)
self.schedule_decision_task()
def first_timeout(self):
if not self.open or not self.start_timestamp:
return None
start_to_close_at = self.start_timestamp + int(
self.execution_start_to_close_timeout
)
_timeout = Timeout(self, start_to_close_at, "START_TO_CLOSE")
if _timeout.reached:
return _timeout
def timeout(self, timeout):
# TODO: process child policy on child workflows here or in the
# triggering function
self.execution_status = "CLOSED"
self.close_status = "TIMED_OUT"
self.timeout_type = timeout.kind
self._add_event(
"WorkflowExecutionTimedOut",
child_policy=self.child_policy,
event_timestamp=timeout.timestamp,
timeout_type=self.timeout_type,
)
def timeout_decision_task(self, _timeout):
task = _timeout.obj
task.timeout(_timeout)
self._add_event(
"DecisionTaskTimedOut",
event_timestamp=_timeout.timestamp,
scheduled_event_id=task.scheduled_event_id,
started_event_id=task.started_event_id,
timeout_type=task.timeout_type,
)
def timeout_activity_task(self, _timeout):
task = _timeout.obj
task.timeout(_timeout)
self._add_event(
"ActivityTaskTimedOut",
details=task.details,
event_timestamp=_timeout.timestamp,
scheduled_event_id=task.scheduled_event_id,
started_event_id=task.started_event_id,
timeout_type=task.timeout_type,
)
@property
def open(self):
return self.execution_status == "OPEN"
|
|
from PyQt4 import Qt, QtCore, QtGui
import vqt.main as vq_main
import vqt.tree as vq_tree
import envi.threads as e_threads
import cobra.remoteapp as c_remoteapp
import vivisect.remote.server as viv_server
from vqt.basics import *
class WorkspaceListModel(vq_tree.VQTreeModel):
columns = ('Name',)
class WorkspaceListView(vq_tree.VQTreeView):
def __init__(self, workspaces, parent=None):
vq_tree.VQTreeView.__init__(self, parent=parent)
model = WorkspaceListModel(parent=self)
self.setModel(model)
for wsname in workspaces:
model.append((wsname,))
class VivServerDialog(QtGui.QDialog):
def __init__(self, workspaces, parent=None):
QtGui.QDialog.__init__(self, parent=parent)
self.setWindowTitle('Select a workspace...')
self.wsname = None
self.wslist = WorkspaceListView(workspaces, parent=self)
self.buttons = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)
self.buttons.accepted.connect( self.accept )
self.buttons.rejected.connect( self.reject )
layout = VBox()
layout.addWidget(self.wslist)
layout.addWidget(self.buttons)
self.setLayout(layout)
self.wslist.doubleClicked.connect( self.workspaceActivated )
def getWorkspaceName(self):
self.exec_()
return self.wsname
def workspaceActivated(self, idx):
self.accept()
def accept(self):
for idx in self.wslist.selectedIndexes():
row = idx.internalPointer()
if row:
self.wsname = row.rowdata[0]
break
return QtGui.QDialog.accept(self)
class VivSaveServerDialog(QtGui.QDialog):
def __init__(self, vw, parent=None):
QtGui.QDialog.__init__(self, parent=parent)
self.setWindowTitle('Save to Workspace Server...')
self.vw = vw
try:
server = vw.config.remote.server
except AttributeError:
server = "visi.kenshoto.com"
self.wsname = QtGui.QLineEdit(vw.getMeta('StorageName',''), parent=self)
self.wsserver = QtGui.QLineEdit(server, parent=self)
self.setdef = QtGui.QCheckBox(parent=self)
self.buttons = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)
self.buttons.accepted.connect( self.accept )
self.buttons.rejected.connect( self.reject )
serverlayout = QtGui.QHBoxLayout()
serverlayout.addWidget(self.wsserver)
serverlayout.addWidget(QtGui.QLabel('Make Default:'))
serverlayout.addWidget(self.setdef)
layout = QtGui.QFormLayout()
layout.addRow('Workspace Name', self.wsname)
layout.addRow('Workspace Server', serverlayout)
layout.addWidget(self.buttons)
self.setLayout(layout)
def getNameAndServer(self):
if not self.exec_():
return (None,None)
wsname = str(self.wsname.text())
wsserver = str(self.wsserver.text())
return (wsname,wsserver)
def accept(self, *args, **kwargs):
QtGui.QDialog.accept(self, *args, **kwargs)
if self.setdef.isChecked():
cfg = self.vw.config.getSubConfig("remote")
cfg['server'] = str(self.wsserver.text())
self.vw.config.saveConfigFile()
# FIXME: should we combine the VivConnectServerDialog with the VivSaveServerDialog? there are like 10 lines different.
class VivConnectServerDialog(QtGui.QDialog):
def __init__(self, vw, parent=None):
QtGui.QDialog.__init__(self, parent=parent)
self.setWindowTitle('Workspace Server...')
self.vw = vw
try:
server = vw.config.remote.server
except AttributeError:
server = "visi.kenshoto.com"
self.wsserver = QtGui.QLineEdit(server, parent=self)
self.setdef = QtGui.QCheckBox(parent=self)
self.buttons = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)
self.buttons.accepted.connect( self.accept )
self.buttons.rejected.connect( self.reject )
serverlayout = QtGui.QHBoxLayout()
serverlayout.addWidget(self.wsserver)
serverlayout.addWidget(QtGui.QLabel('Make Default:'))
serverlayout.addWidget(self.setdef)
layout = QtGui.QFormLayout()
layout.addRow('Workspace Server', serverlayout)
layout.addWidget(self.buttons)
self.setLayout(layout)
def getServer(self):
if not self.exec_():
return None
wsserver = str(self.wsserver.text())
return wsserver
def accept(self, *args, **kwargs):
QtGui.QDialog.accept(self, *args, **kwargs)
if self.setdef.isChecked():
cfg = self.vw.config.getSubConfig("remote")
cfg['server'] = str(self.wsserver.text())
self.vw.config.saveConfigFile()
@vq_main.idlethread
def openServerAndWorkspace(vw, parent=None):
dia = VivConnectServerDialog(vw, parent=parent)
host = dia.getServer()
if host == None:
return
connServerAndWorkspace(vw, str(host), parent=parent)
@vq_main.workthread
def connServerAndWorkspace(vw, host,parent=None):
# NOTE: do *not* touch parent (or qt) in here!
try:
server = viv_server.connectToServer(host)
wslist = server.listWorkspaces()
selectServerWorkspace(vw, server, wslist, parent=parent)
except Exception, e:
vw.vprint('Server Error: %s' % e)
return
@vq_main.idlethread
def selectServerWorkspace(vw, server, workspaces, parent=None):
dia = VivServerDialog(workspaces, parent=parent)
workspace = dia.getWorkspaceName()
if workspace == None:
return
loadServerWorkspace(vw, server, workspace)
@vq_main.workthread
def loadServerWorkspace(oldvw, server, workspace):
oldvw.vprint('Loading Workspace: %s' % workspace)
vw = viv_server.getServerWorkspace(server, workspace)
import vivisect.qt.main as viv_q_main
viv_q_main.runqt(vw, closeme=oldvw.getVivGui())
@vq_main.idlethread
def saveToServer(vw, parent=None):
dia = VivSaveServerDialog(vw, parent=parent)
wsname,wsserver = dia.getNameAndServer()
vw.vprint('Saving to Workspace Server: %s (%s)' % (wsserver,wsname))
sendServerWorkspace(vw, wsname, wsserver)
@e_threads.firethread
def sendServerWorkspace(vw, wsname, wsserver):
try:
events = vw.exportWorkspace()
server = viv_server.connectToServer(wsserver)
server.addNewWorkspace(wsname, events)
except Exception, e:
vw.vprint('Workspace Server Error: %s' % e)
return
vw.setMeta('WorkspaceServer', wsserver)
def openSharedWorkspace(vw, parent=None):
'''
Open a workspace shared by a vivisect peer.
'''
hostport, ok = QtGui.QInputDialog.getText(parent, 'Shared Workspace...', 'host:port')
if not ok:
return
uri = 'cobra://%s/vivisect.remote.client?msgpack=1' % hostport
c_remoteapp.execRemoteApp(uri)
|
|
from __future__ import unicode_literals
import sys
from django.conf import settings
from django.template import Library, Node, TemplateSyntaxError, Variable
from django.template.base import TOKEN_TEXT, TOKEN_VAR, render_value_in_context
from django.template.defaulttags import token_kwargs
from django.utils import six, translation
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = lang_code
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = languages
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop, asvar=None,
message_context=None):
self.noop = noop
self.asvar = asvar
self.message_context = message_context
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, six.string_types):
self.filter_expression.var = Variable("'%s'" %
self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = (
self.message_context.resolve(context))
output = self.filter_expression.resolve(context)
value = render_value_in_context(output, context)
if self.asvar:
context[self.asvar] = value
return ''
else:
return value
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None, message_context=None, trimmed=False):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
self.message_context = message_context
self.trimmed = trimmed
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents.replace('%', '%%'))
elif token.token_type == TOKEN_VAR:
result.append('%%(%s)s' % token.contents)
vars.append(token.contents)
msg = ''.join(result)
if self.trimmed:
msg = translation.trim_whitespace(msg)
return msg, vars
def render(self, context, nested=False):
if self.message_context:
message_context = self.message_context.resolve(context)
else:
message_context = None
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
if message_context:
result = translation.npgettext(message_context, singular,
plural, count)
else:
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
if message_context:
result = translation.pgettext(message_context, singular)
else:
result = translation.ugettext(singular)
default_value = context.template.engine.string_if_invalid
def render_value(key):
if key in context:
val = context[key]
else:
val = default_value % key if '%s' in default_value else default_value
return render_value_in_context(val, context)
data = {v: render_value(v) for v in vars}
context.pop()
try:
result = result % data
except (KeyError, ValueError):
if nested:
# Either string is malformed, or it's a bug
raise TemplateSyntaxError("'blocktrans' is unable to format "
"string returned by gettext: %r using %r" % (result, data))
with translation.override(None):
result = self.render(context, nested=True)
return result
class LanguageNode(Node):
def __init__(self, nodelist, language):
self.nodelist = nodelist
self.language = language
def render(self, context):
with translation.override(self.language.resolve(context)):
output = self.nodelist.render(context)
return output
@register.tag("get_available_languages")
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
@register.tag("get_language_info")
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(parser.compile_filter(args[2]), args[4])
@register.tag("get_language_info_list")
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style list (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(parser.compile_filter(args[2]), args[4])
@register.filter
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
@register.filter
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
@register.filter
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
@register.tag("get_current_language")
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
@register.tag("get_current_language_bidi")
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
@register.tag("trans")
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
It is possible to store the translated string into a variable::
{% trans "this is a test" as var %}
{{ var }}
Contextual translations are also supported::
{% trans "this is a test" context "greeting" %}
This is equivalent to calling pgettext instead of (u)gettext.
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument" % bits[0])
message_string = parser.compile_filter(bits[1])
remaining = bits[2:]
noop = False
asvar = None
message_context = None
seen = set()
invalid_context = {'as', 'noop'}
while remaining:
option = remaining.pop(0)
if option in seen:
raise TemplateSyntaxError(
"The '%s' option was specified more than once." % option,
)
elif option == 'noop':
noop = True
elif option == 'context':
try:
value = remaining.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the context option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
if value in invalid_context:
raise TemplateSyntaxError(
"Invalid argument '%s' provided to the '%s' tag for the context option" % (value, bits[0]),
)
message_context = parser.compile_filter(value)
elif option == 'as':
try:
value = remaining.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the as option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
asvar = value
else:
raise TemplateSyntaxError(
"Unknown argument for '%s' tag: '%s'. The only options "
"available are 'noop', 'context' \"xxx\", and 'as VAR'." % (
bits[0], option,
)
)
seen.add(option)
return TranslateNode(message_string, noop, asvar, message_context)
@register.tag("blocktrans")
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with bar=foo|filter boo=baz|filter %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count count=var|length %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
The "var as value" legacy format is still supported::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
{% blocktrans count var|length as count %}
Contextual translations are supported::
{% blocktrans with bar=foo|filter context "greeting" %}
This is {{ bar }}.
{% endblocktrans %}
This is equivalent to calling pgettext/npgettext instead of
(u)gettext/(u)ngettext.
"""
bits = token.split_contents()
options = {}
remaining_bits = bits[1:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'count':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if len(value) != 1:
raise TemplateSyntaxError('"count" in %r tag expected exactly '
'one keyword argument.' % bits[0])
elif option == "context":
try:
value = remaining_bits.pop(0)
value = parser.compile_filter(value)
except Exception:
msg = (
'"context" in %r tag expected '
'exactly one argument.') % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
elif option == "trimmed":
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
if 'count' in options:
countervar, counter = list(six.iteritems(options['count']))[0]
else:
countervar, counter = None, None
if 'context' in options:
message_context = options['context']
else:
message_context = None
extra_context = options.get('with', {})
trimmed = options.get("trimmed", False)
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter, message_context, trimmed=trimmed)
@register.tag
def language(parser, token):
"""
This will enable the given language just for this block.
Usage::
{% language "de" %}
This is {{ bar }} and {{ boo }}.
{% endlanguage %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (language)" % bits[0])
language = parser.compile_filter(bits[1])
nodelist = parser.parse(('endlanguage',))
parser.delete_first_token()
return LanguageNode(nodelist, language)
|
|
#!/usr/bin/env python
# coding: utf-8
DESCRIPTION="This script applies clustering"
import numpy as np
import argparse
import glob2
import logging
import re
from os.path import basename
from os.path import dirname
import sys
sys.path.append(dirname(__file__))
from my_target_counter import TargetCounter
logger = logging.getLogger(__file__)
# HOW TO ADD NEW algorithm
# 1. add a process for your algorithm to 'get_model'
# 2. add a process to 'my_fit_predict' if the algorithm has outlier cluster and the outlier label is not 0.
#from memory_profiler import profile
#@profile
def main(args):
src_dir = args.src_dir
dest_dir = args.dest_dir
src_pat = "W_(\d{3}).csv$"
tar_template = "y_%s.dat"
tc=TargetCounter(src_pat,tar_template,src_dir,dest_dir)
target_ids,src_files = tc.listup_targets()
n_targets = len(target_ids)
if args.count_targets:
print(len(target_ids))
sys.exit()
if n_targets==0:
logger.warn("There are no before-process src files in '%s'"%src_dir)
sys.exit()
model = get_model(args)
epsilon=0.1**100
for id,src_file in zip(target_ids,src_files):
dest_file = "%s/%s"%(args.dest_dir,tc.id2destfile(id))
print(src_file)
W = np.loadtxt(src_file,delimiter=",")
if 'affinity' in src_dir:
W[W<epsilon]=epsilon
y = my_fit_predict(model,W,args)
np.savetxt(dest_file,y,fmt="%d")
def my_fit_predict(model,X,args):
alg = args.algorithm
y = model.fit_predict(X)
if alg == 'DBSCAN':
y = y+1 # DBSCAN uses -1 as outlier label, we treat 0 as the label.
return y
def get_model(args):
alg = args.algorithm
if alg=='SC':
from sklearn.cluster import SpectralClustering
model = SpectralClustering(\
n_clusters=args.n_clusters,\
eigen_solver='arpack',\
random_state=None,\
affinity='precomputed',\
assign_labels='discretize',
n_jobs=1)
elif alg=='IDC':
from isolated_dense_clustering import IsolatedDenseClustering
search_range = range(args.min_clusters,args.max_clusters)
model = IsolatedDenseClustering(\
search_range=search_range, \
affinity='precomputed', \
assign_labels='discretize',\
n_jobs=1,\
eigen_solver='arpack', \
random_state=None)
elif alg=='SG':
import spectral_gap
model = spectral_gap.SpectralClusteringSG(
max_clusters=args.max_clusters,\
eigen_solver='arpack',\
random_state=None,\
affinity='precomputed',\
assign_labels='discretize')
elif alg=='STSC':
import stsc_wrapper
model = stsc_wrapper.SelfTuningSpectralClustering(n_clusters_max=args.max_clusters)
elif alg=='MODULARITY':
pass
elif alg=='SEA':
pass
elif alg=='DBSCAN':
from sklearn.cluster import DBSCAN
model = DBSCAN(
eps=args.eps,
min_samples=args.min_samples,
metric="precomputed")
else:
logger.warn("Unknown Algorithm '%s' is directed."%alg)
sys.exit()
return model
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('algorithm', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=str, \
choices=None, \
help='Clustering algorithm (SC|IDC|SG|STSC|MODULARITY|SEA|DBSCAN).', \
metavar=None)
parser.add_argument('src_dir', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=str, \
choices=None, \
help='Directory path where the source data are located.', \
metavar=None)
parser.add_argument('dest_dir', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=str, \
choices=None, \
help='Directory path where the formatted data will be located.', \
metavar=None)
parser.add_argument('--n_clusters', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=int, \
choices=None, \
help='Number of clusters.', \
metavar=None)
parser.add_argument('--min_clusters',\
action='store', \
nargs=None, \
const=None, \
default=3, \
type=int, \
choices=None, \
help='Minimum number of clusters to set the search range.', \
metavar=None
)
parser.add_argument('--max_clusters',\
action='store', \
nargs=None, \
const=None, \
default=20, \
type=int, \
choices=None, \
help='Maximum number of clusters to set the search range.', \
metavar=None
)
parser.add_argument('--eps',\
action='store', \
nargs=None, \
const=None, \
default=0.5, \
type=float, \
choices=None, \
help='eps for DBSCAN.', \
metavar=None
)
parser.add_argument('--min_samples',\
action='store', \
nargs=None, \
const=None, \
default=3, \
type=int, \
choices=None, \
help='min_samples for DBSCAN.', \
metavar=None
)
parser.add_argument('--count_targets',\
action="store_true", default=False, help='count processing targets, and exit.')
if __name__ == '__main__':
args = parser.parse_args()
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
logger.addHandler(sh)
main(args)
|
|
# Copyright (c) 2016-2021 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
from functools import cmp_to_key
from urwid import *
from .decor_widgets import PatternBox
from .graphics import fz_box_pattern
class TableRowsListWalker(ListWalker):
def __init__(self, table, sort=None):
self.table = table
self.sort = sort
self.focus = 0
self.rows = []
super().__init__()
def __getitem__(self, position):
if position < 0 or position >= len(self.rows):
raise IndexError
return self.rows[position]
def __delitem__(self, index):
if -1 < index < len(self.rows):
del self.rows[index]
self._modified()
def __len__(self):
return len(self.rows)
def add(self, item):
self.rows.append(item)
self._modified()
def insert(self, *args):
self.rows.insert(*args)
self._modified()
def clear(self):
self.focus = 0
del self.rows[:]
def remove(self, value):
self.rows.remove(value)
def next_position(self, position):
index = position + 1
if position >= len(self.rows):
raise IndexError
return index
def prev_position(self, position):
index = position - 1
if position < 0:
raise IndexError
return index
def set_focus(self, position):
self.rows[self.focus].unhighlight()
self.focus = position
self.rows[self.focus].highlight()
def set_sort_column(self, column, **kwargs):
self._modified()
# It contains two columns: the content of the rows and the scrollbar (at least the original version).
class ScrollingListBox(WidgetWrap):
signals = ['select', 'load_more']
def __init__(self, body, infinite=False):
self.infinite = infinite
self.requery = False
self.height = 0
self.listbox = ListBox(body)
self.body = self.listbox.body
self.ends_visible = self.listbox.ends_visible
super().__init__(self.listbox)
def keypress(self, size, key):
if key == 'home':
if self.body: # len(self.body) != 0
self.focus_position = 0
self._invalidate()
return key
if key == 'end':
if self.body: # len(self.body) != 0
self.focus_position = len(self.body) - 1
self._invalidate()
return key
if key in ['page down', 'down'] and self.infinite and self.focus_position == len(self.body) - 1:
self.requery = True
self._invalidate()
return None
if key == 'enter':
if self.body: # len(self.body) != 0
emit_signal(self, 'select', self, self.selection)
return None
if key == 'left':
return None
return super().keypress(size, key)
def render(self, size, focus=False):
maxcol, maxrow = size
if self.requery and 'bottom' in self.ends_visible((maxcol, maxrow)):
self.requery = False
emit_signal(self, 'load_more', len(self.body))
self.height = maxrow
return super().render((maxcol, maxrow), focus)
@property
def focus(self):
return self.listbox.focus
@property
def focus_position(self):
if self.listbox.body: # len(self.listbox.body) != 0
return self.listbox.focus_position
return 0
@focus_position.setter
def focus_position(self, value):
self.listbox.focus_position = value
self.listbox._invalidate()
@property
def row_count(self):
return len(self.listbox.body)
@property
def selection(self):
if self.body: # len(self.body) != 0
return self.body[self.focus_position]
return None
class TableColumn(object):
align = 'left'
wrap = 'space'
padding = None
def __init__(self, name, label=None, width=('weight', 1),
format_fn=None,
sort_key=None, sort_fn=None, sort_reverse=False):
self.name = name
self.label = label if label else name
self.format_fn = format_fn
self.sort_key = sort_key
self.sort_fn = sort_fn
self.sort_reverse = sort_reverse
self.sizing, self.width = width
def _format(self, v):
if isinstance(v, str):
return Text(v, align=self.align, wrap=self.wrap)
# First, call the format function for the column, if there is one
if self.format_fn:
try:
v = self.format_fn(v)
except TypeError:
return Text('', align=self.align, wrap=self.wrap)
return self.format(v)
def format(self, v):
# Do our best to make the value into something presentable
if v is None:
v = ''
elif isinstance(v, int):
v = '%d' % v
elif isinstance(v, float):
v = '%.03f' % v
# If v doesn't match any of the previous options than it might be a Widget.
if not isinstance(v, Widget):
return Text(v, align=self.align, wrap=self.wrap)
return v
class HeaderColumns(Columns):
def __init__(self, contents):
self.selected_column = None
super().__init__(contents)
def __setitem__(self, i, v):
self.contents[i * 2] = (v, self.contents[i * 2][1])
class BodyColumns(Columns):
def __init__(self, contents, header=None):
self.header = header
super().__init__(contents)
@property
def selected_column(self):
return self.header.selected_column
@selected_column.setter
def selected_column(self, value):
self.header.selected_column = value
class TableCell(WidgetWrap):
signals = ['click', 'select']
def __init__(self, table, column, row, value):
self.table = table
self.column = column
self.row = row
self.value = value
self.contents = self.column._format(self.value)
padding = self.column.padding or self.table.padding
self.padding = Padding(self.contents, left=padding, right=padding)
self.attr = AttrMap(self.padding, attr_map=row.attr_map, focus_map=row.focus_map)
super().__init__(self.attr)
def selectable(self):
return isinstance(self.row, TableBodyRow)
def highlight(self):
self.attr.set_attr_map(self.row.focus_map)
def unhighlight(self):
self.attr.set_attr_map(self.row.attr_map)
def set_attr_map(self, attr_map):
self.attr.set_attr_map(attr_map)
def set_focus_map(self, focus_map):
self.attr.set_focus_map(focus_map)
def keypress(self, size, key):
if key == 'enter':
emit_signal(self, 'select')
return key
# Override the mouse_event method (param list is fixed).
def mouse_event(self, size, event, button, col, row, focus):
if event == 'mouse press':
emit_signal(self, 'click')
class TableRow(WidgetWrap):
attr_map = {}
focus_map = {}
border_char = ' '
column_class = Columns # To be redefined by subclasses.
decorate = True
_selectable = True
def __init__(self, table, data,
header=None,
cell_click=None, cell_select=None,
attr_map=None, focus_map=None):
self.table = table
if isinstance(data, (list, tuple)):
self.data = dict(zip([c.name for c in self.table.columns], data))
elif isinstance(data, dict):
self.data = data
self.header = header
self.cell_click = cell_click
self.cell_select = cell_select
self.contents = []
if self.decorate:
if attr_map:
self.attr_map = attr_map
elif table.attr_map:
self.attr_map.update(table.attr_map)
if focus_map:
self.focus_map = focus_map
elif table.focus_map:
self.focus_map.update(table.focus_map)
# Create tuples to describe the sizing of the column.
for i, col in enumerate(self.table.columns):
lst = []
if col.sizing == 'weight':
lst.extend([col.sizing, col.width])
else:
lst.append(col.width)
cell = TableCell(self.table, col, self, self.data.get(col.name, None))
if self.cell_click:
connect_signal(cell, 'click', self.cell_click, i * 2)
if self.cell_select:
connect_signal(cell, 'select', self.cell_select, i * 2)
lst.append(cell)
self.contents.append(tuple(lst))
if isinstance(table.border, tuple):
border_width = table.border[0]
elif isinstance(table.border, int):
border_width = table.border
else:
raise Exception('Invalid border specification: %s' % table.border)
self.row = self.column_class(self.contents)
if self.header:
self.row.header = self.header
self.row.selected_column = None
# content sep content sep ...
self.row.contents = sum(([x, (Divider(self.border_char), ('given', border_width, False))] for x in self.row.contents), [])
self.attr = AttrMap(self.row, attr_map=self.attr_map, focus_map=self.focus_map)
super().__init__(self.attr)
def __len__(self):
return len(self.contents)
def __getitem__(self, key):
return self.data.get(key, None)
def __iter__(self):
return iter(self.data)
def __setitem__(self, i, v):
self.row.contents[i * 2] = (v, self.row.options(self.table.columns[i].sizing,
self.table.columns[i].width))
@property
def focus(self):
return self.row.focus
def set_attr_map(self, attr_map):
self.attr.set_attr_map(attr_map)
def set_focus_map(self, focus_map):
self.attr.set_focus_map(focus_map)
def get(self, key, default):
if key in self:
return self[key]
return default
def _key(self):
return frozenset([self.get(c, None) for c in self.table.key_columns])
def cell(self, i):
return self.row[i * 2]
def highlight(self):
for x in self.contents:
x[-1].highlight()
def unhighlight(self):
for x in self.contents:
x[-1].unhighlight()
class TableBodyRow(TableRow):
column_class = BodyColumns
attr_map = {None: 'default'}
focus_map = {None: 'selected'}
class TableHeaderRow(TableRow):
signals = ['column_click']
column_class = HeaderColumns
decorate = False
def __init__(self, table, *args, **kwargs):
self.row = None
self.attr_map = {None: 'table_head'}
self.focus_map = {None: 'table_head'}
self.table = table
self.contents = [str(x.label) for x in self.table.columns]
super().__init__(
self.table,
self.contents,
cell_click=self.header_clicked,
cell_select=self.header_clicked,
*args, **kwargs)
@property
def selected_column(self):
return self.row.selected_column
@selected_column.setter
def selected_column(self, value):
self.row.selected_column = value
def header_clicked(self, index):
emit_signal(self, 'column_click', index)
def highlight_column(self, index):
if self.selected_column is not None:
self.row[self.selected_column].unhighlight()
self.row[index].highlight()
self.selected_column = index
class Table(WidgetWrap):
signals = ['select', 'refresh', 'focus', 'delete']
attr_map = {}
focus_map = {}
row_dict = {}
title = ''
columns = []
query_data = []
key_columns = None
sort_field = None
_selectable = True
def __init__(self, initial_sort=None, limit=None):
self.border = (1, ' ', 'table_border')
self.padding = 1
self.initial_sort = initial_sort
self.limit = limit
if not self.key_columns:
self.key_columns = self.columns
self.walker = TableRowsListWalker(self, sort=self.initial_sort)
self.listbox = ScrollingListBox(self.walker, infinite=self.limit)
self.selected_column = None
self.sort_reverse = False
# Forward 'select' signal to the caller of table.
connect_signal(self.listbox, 'select',
lambda source, selection: emit_signal(self, 'select', self, selection))
if self.limit:
connect_signal(self.listbox, 'load_more', self.load_more)
self.offset = 0
self.header = TableHeaderRow(self)
self.pile = Pile([('pack', self.header),
('weight', 1, self.listbox)])
self.pattern_box = PatternBox(self.pile, title=['[', ('border_title', ' {title} (0) '.format(title=self.title)), ']'], **fz_box_pattern())
self.attr = AttrMap(self.pattern_box, attr_map=self.attr_map)
super().__init__(self.attr)
connect_signal(self.header, 'column_click', lambda index: self.sort_by_column(index, toggle=True))
if self.initial_sort and self.initial_sort in [c.name for c in self.columns]:
self.sort_by_column(self.initial_sort, toggle=False)
else:
self.requery(self.query_data)
def update_header(self):
self.pattern_box.set_title(['[', ('border_title', ' {title} ({cnt}) '.format(title=self.title, cnt=len(self.walker))), ']'])
def __delitem__(self, i):
del self.body[i]
def __iter__(self):
return iter(self.body)
def __len__(self):
return len(self.body)
def __getitem__(self, i):
return self.body[i]
def __setitem__(self, i, v):
self.body[i] = v
def insert(self, i, v):
self.body.insert(i, v)
@property
def body(self):
return self.listbox.body
@property
def contents(self):
return self.listbox.listbox.contents
@property
def focus(self):
return self.listbox.focus
@property
def height(self):
return self.body.row_count + 1
@property
def focus_position(self):
return self.listbox.focus_position
@focus_position.setter
def focus_position(self, value):
self.listbox.focus_position = value
@property
def selection(self):
if self.body: # len(self.body) != 0
return self.body[self.focus_position]
return None
def add_row(self, data, position=None, attr_map=None, focus_map=None):
row = TableBodyRow(self, data, header=self.header.row, attr_map=attr_map, focus_map=focus_map)
if '_id' in data:
self.row_dict[data['_id']] = row
if not position:
self.walker.add(row)
else:
self.walker.insert(position, row)
self.update_header()
def update_row_style(self, row_id, attr_map, focus_map):
if not self.attr_map:
self.row_dict[row_id].attr_map = attr_map
else:
self.row_dict[row_id].attr_map = self.attr_map
self.row_dict[row_id].attr_map.update(attr_map)
if not self.focus_map:
self.row_dict[row_id].focus_map = focus_map
else:
self.row_dict[row_id].focus_map = self.focus_map
self.row_dict[row_id].focus_map.update(self.focus_map)
self.row_dict[row_id]._wrapped_widget.set_attr_map(self.row_dict[row_id].attr_map)
self.row_dict[row_id]._wrapped_widget.set_focus_map(self.row_dict[row_id].focus_map)
def clear(self):
self.listbox.body.clear()
def highlight_column(self, index):
self.header.highlight_column(index)
def load_more(self, offset):
self.requery(offset)
self._invalidate()
self.listbox._invalidate()
# These two methods will might be overridden in subclasses.
def query(self, data, sort=(None, None), offset=None):
sort_field, sort_reverse = sort
if sort_field:
def sort_natural_none_last(a, b):
if a is None:
return 1
if b is None:
return -1
return (a > b) - (a < b)
def sort_reverse_none_last(a, b):
if a is None:
return 1
if b is None:
return -1
return (a > b) - (a < b)
if not sort_reverse:
sort_fn = cmp_to_key(sort_natural_none_last)
else:
sort_fn = cmp_to_key(sort_reverse_none_last)
data.sort(key=lambda x: sort_fn(x[sort_field]))
if offset is not None:
r = data[offset:offset + self.limit]
else:
r = data
for d in r:
yield d
def requery(self, data, offset=0):
kwargs = {'sort': (self.sort_field, self.sort_reverse)}
if self.limit:
kwargs['offset'] = offset
if not offset:
self.clear()
if self.selected_column is not None:
self.highlight_column(self.selected_column)
for r in self.query(data, **kwargs):
if isinstance(r, (tuple, list)):
r = dict(zip([c.name for c in self.columns], r))
self.add_row(r)
self.update_header()
def sort_by_column(self, index=None, reverse=None, toggle=False):
if index is None:
if self.sort_field is None:
return
index = self.sort_field
if isinstance(index, str):
sort_field = index
for i, col in enumerate(self.columns):
if col.name == sort_field:
index = i * 2
break
else:
sort_field = self.columns[index // 2].name
if not isinstance(index, int):
raise Exception('invalid column index: %s' % index)
if reverse is not None:
self.sort_reverse = reverse ^ self.columns[index // 2].sort_reverse
elif not toggle or sort_field != self.sort_field:
self.sort_reverse = self.columns[index // 2].sort_reverse
else:
self.sort_reverse = not self.sort_reverse
self.sort_field = sort_field
self.selected_column = index
self.walker.set_sort_column(self.columns[index // 2], reverse=self.sort_reverse)
self.requery(self.query_data)
|
|
"""
sentry.web.forms.accounts
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from datetime import datetime
import pytz
from django import forms
from django.conf import settings
from django.contrib.auth import authenticate, get_user_model
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from sentry import options
from sentry.auth import password_validation
from sentry.app import ratelimiter, newsletter
from sentry.constants import LANGUAGES
from sentry.models import (
Organization, OrganizationStatus, User, UserOption, UserOptionValue
)
from sentry.security import capture_security_activity
from sentry.utils.auth import find_users, logger
from sentry.web.forms.fields import ReadOnlyTextField
from six.moves import range
def _get_timezone_choices():
results = []
for tz in pytz.common_timezones:
now = datetime.now(pytz.timezone(tz))
offset = now.strftime('%z')
results.append((int(offset), tz, '(GMT%s) %s' % (offset, tz)))
results.sort()
for i in range(len(results)):
results[i] = results[i][1:]
return results
TIMEZONE_CHOICES = _get_timezone_choices()
class AuthenticationForm(forms.Form):
username = forms.CharField(
label=_('Account'), max_length=128, widget=forms.TextInput(
attrs={'placeholder': _('username or email'),
}),
)
password = forms.CharField(
label=_('Password'), widget=forms.PasswordInput(
attrs={'placeholder': _('password'),
}),
)
error_messages = {
'invalid_login': _("Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."),
'rate_limited': _("You have made too many failed authentication "
"attempts. Please try again later."),
'no_cookies': _("Your Web browser doesn't appear to have cookies "
"enabled. Cookies are required for logging in."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if not self.fields['username'].label:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean_username(self):
value = (self.cleaned_data.get('username') or '').strip()
if not value:
return
return value.lower()
def is_rate_limited(self):
if self._is_ip_rate_limited():
return True
if self._is_user_rate_limited():
return True
return False
def _is_ip_rate_limited(self):
limit = options.get('auth.ip-rate-limit')
if not limit:
return False
ip_address = self.request.META['REMOTE_ADDR']
return ratelimiter.is_limited(
'auth:ip:{}'.format(ip_address),
limit,
)
def _is_user_rate_limited(self):
limit = options.get('auth.user-rate-limit')
if not limit:
return False
username = self.cleaned_data.get('username')
if not username:
return False
return ratelimiter.is_limited(
u'auth:username:{}'.format(username),
limit,
)
def clean(self):
username = self.cleaned_data.get('username')
if self.is_rate_limited():
logger.info('user.auth.rate-limited', extra={
'ip_address': self.request.META['REMOTE_ADDR'],
'username': username,
})
raise forms.ValidationError(self.error_messages['rate_limited'])
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'] % {
'username': self.username_field.verbose_name
})
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(self.error_messages['no_cookies'])
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class RegistrationForm(forms.ModelForm):
username = forms.EmailField(
label=_('Email'), max_length=128,
widget=forms.TextInput(attrs={'placeholder': 'you@example.com'}))
password = forms.CharField(
widget=forms.PasswordInput(attrs={'placeholder': 'something super secret'}))
subscribe = forms.BooleanField(
label=_('Subscribe to product updates newsletter'),
required=False,
initial=True,
)
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
if not newsletter.enabled:
del self.fields['subscribe']
class Meta:
fields = ('username',)
model = User
def clean_username(self):
value = (self.cleaned_data.get('username') or '').strip()
if not value:
return
if User.objects.filter(username__iexact=value).exists():
raise forms.ValidationError(_('An account is already registered with that email address.'))
return value.lower()
def clean_password(self):
password = self.cleaned_data['password']
password_validation.validate_password(password)
return password
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.email = user.username
user.set_password(self.cleaned_data['password'])
if commit:
user.save()
if self.cleaned_data.get('subscribe'):
newsletter.create_or_update_subscription(
user, list_id=newsletter.DEFAULT_LIST_ID)
return user
class RecoverPasswordForm(forms.Form):
user = forms.CharField(label=_('Username or email'))
def clean_user(self):
value = (self.cleaned_data.get('user') or '').strip()
if not value:
return
users = find_users(value, with_valid_password=False)
if not users:
raise forms.ValidationError(_("We were unable to find a matching user."))
users = [u for u in users if not u.is_managed]
if not users:
raise forms.ValidationError(_("The account you are trying to recover is managed and does not support password recovery."))
if len(users) > 1:
raise forms.ValidationError(_("Multiple accounts were found matching this email address."))
return users[0]
class ChangePasswordRecoverForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput())
def clean_password(self):
password = self.cleaned_data['password']
password_validation.validate_password(password)
return password
class EmailForm(forms.Form):
primary_email = forms.EmailField(label=_('Primary Email'))
alt_email = forms.EmailField(
label=_('New Email'),
required=False,
help_text='Designate an alternative email for this account',
)
password = forms.CharField(
label=_('Current password'),
widget=forms.PasswordInput(),
help_text=_('You will need to enter your current account password to make changes.'),
required=True,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(EmailForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if not needs_password:
del self.fields['password']
def save(self, commit=True):
if self.cleaned_data['primary_email'] != self.user.email:
new_username = self.user.email == self.user.username
else:
new_username = False
self.user.email = self.cleaned_data['primary_email']
if new_username and not User.objects.filter(username__iexact=self.user.email).exists():
self.user.username = self.user.email
if commit:
self.user.save()
return self.user
def clean_password(self):
value = self.cleaned_data.get('password')
if value and not self.user.check_password(value):
raise forms.ValidationError(_('The password you entered is not correct.'))
elif not value:
raise forms.ValidationError(_('You must confirm your current password to make changes.'))
return value
class AccountSettingsForm(forms.Form):
name = forms.CharField(required=True, label=_('Name'), max_length=30)
username = forms.CharField(label=_('Username'), max_length=128)
email = forms.EmailField(label=_('Email'))
new_password = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput(),
required=False,
# help_text=password_validation.password_validators_help_text_html(),
)
password = forms.CharField(
label=_('Current password'),
widget=forms.PasswordInput(),
help_text='You will need to enter your current account password to make changes.',
required=False,
)
def __init__(self, user, request, *args, **kwargs):
self.user = user
self.request = request
super(AccountSettingsForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if self.user.is_managed:
# username and password always managed, email and
# name optionally managed
for field in ('email', 'name', 'username'):
if field == 'username' or field in settings.SENTRY_MANAGED_USER_FIELDS:
self.fields[field] = ReadOnlyTextField(label=self.fields[field].label)
if field == 'email':
needs_password = False
del self.fields['new_password']
# don't show username field if its the same as their email address
if self.user.email == self.user.username:
del self.fields['username']
if not needs_password:
del self.fields['password']
def is_readonly(self):
if self.user.is_managed:
return set(('email', 'name')) == set(settings.SENTRY_MANAGED_USER_FIELDS)
return False
def _clean_managed_field(self, field):
if self.user.is_managed and (field == 'username' or
field in settings.SENTRY_MANAGED_USER_FIELDS):
return getattr(self.user, field)
return self.cleaned_data[field]
def clean_email(self):
return self._clean_managed_field('email')
def clean_name(self):
return self._clean_managed_field('name')
def clean_username(self):
value = self._clean_managed_field('username')
if User.objects.filter(username__iexact=value).exclude(id=self.user.id).exists():
raise forms.ValidationError(_("That username is already in use."))
return value
def clean_password(self):
value = self.cleaned_data.get('password')
if value and not self.user.check_password(value):
raise forms.ValidationError('The password you entered is not correct.')
elif not value and (
self.cleaned_data.get('email', self.user.email) != self.user.email
or self.cleaned_data.get('new_password')
):
raise forms.ValidationError('You must confirm your current password to make changes.')
return value
def clean_new_password(self):
new_password = self.cleaned_data.get('new_password')
if new_password:
password_validation.validate_password(new_password)
return new_password
def save(self, commit=True):
if self.cleaned_data.get('new_password'):
self.user.set_password(self.cleaned_data['new_password'])
self.user.refresh_session_nonce(self.request)
capture_security_activity(
account=self.user,
type='password-changed',
actor=self.request.user,
ip_address=self.request.META['REMOTE_ADDR'],
send_email=True,
)
self.user.name = self.cleaned_data['name']
if self.cleaned_data['email'] != self.user.email:
new_username = self.user.email == self.user.username
else:
new_username = False
self.user.email = self.cleaned_data['email']
if self.cleaned_data.get('username'):
self.user.username = self.cleaned_data['username']
elif new_username and not User.objects.filter(username__iexact=self.user.email).exists():
self.user.username = self.user.email
if commit:
self.user.save()
return self.user
class AppearanceSettingsForm(forms.Form):
language = forms.ChoiceField(
label=_('Language'), choices=LANGUAGES, required=False,
widget=forms.Select(attrs={'class': 'input-xlarge'}))
stacktrace_order = forms.ChoiceField(
label=_('Stacktrace order'), choices=(
('-1', _('Default (let Sentry decide)')),
('1', _('Most recent call last')),
('2', _('Most recent call first')),
), help_text=_('Choose the default ordering of frames in stacktraces.'),
required=False,
widget=forms.Select(attrs={'class': 'input-xlarge'}))
timezone = forms.ChoiceField(
label=_('Time zone'), choices=TIMEZONE_CHOICES, required=False,
widget=forms.Select(attrs={'class': 'input-xxlarge'}))
clock_24_hours = forms.BooleanField(
label=_('Use a 24-hour clock'),
required=False,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AppearanceSettingsForm, self).__init__(*args, **kwargs)
def save(self):
# Save user language
UserOption.objects.set_value(
user=self.user,
project=None,
key='language',
value=self.cleaned_data['language'],
)
# Save stacktrace options
UserOption.objects.set_value(
user=self.user,
project=None,
key='stacktrace_order',
value=self.cleaned_data['stacktrace_order'],
)
# Save time zone options
UserOption.objects.set_value(
user=self.user,
project=None,
key='timezone',
value=self.cleaned_data['timezone'],
)
# Save clock 24 hours option
UserOption.objects.set_value(
user=self.user,
project=None,
key='clock_24_hours',
value=self.cleaned_data['clock_24_hours'],
)
return self.user
class NotificationReportSettingsForm(forms.Form):
organizations = forms.ModelMultipleChoiceField(
queryset=Organization.objects.none(),
required=False,
widget=forms.CheckboxSelectMultiple(),
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(NotificationReportSettingsForm, self).__init__(*args, **kwargs)
org_queryset = Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
member_set__user=user,
)
disabled_orgs = set(UserOption.objects.get_value(
user=user,
project=None,
key='reports:disabled-organizations',
default=[],
))
self.fields['organizations'].queryset = org_queryset
self.fields['organizations'].initial = [
o.id for o in org_queryset
if o.id not in disabled_orgs
]
def save(self):
enabled_orgs = set((
o.id for o in self.cleaned_data.get('organizations')
))
all_orgs = set(self.fields['organizations'].queryset.values_list('id', flat=True))
UserOption.objects.set_value(
user=self.user,
project=None,
key='reports:disabled-organizations',
value=list(all_orgs.difference(enabled_orgs)),
)
class NotificationSettingsForm(forms.Form):
alert_email = forms.EmailField(
label=_('Email'),
help_text=_('Designate an alternative email address to send email notifications to.'),
required=False
)
subscribe_by_default = forms.BooleanField(
label=_('Automatically subscribe to alerts for new projects'),
help_text=_("When enabled, you'll automatically subscribe to alerts when you create or join a project."),
required=False,
)
workflow_notifications = forms.BooleanField(
label=_('Automatically subscribe to workflow notifications for new projects'),
help_text=_("When enabled, you'll automatically subscribe to workflow notifications when you create or join a project."),
required=False,
)
self_notifications = forms.BooleanField(
label=_('Receive notifications about my own activity'),
help_text=_('Enable this if you wish to receive emails for your own actions, as well as others.'),
required=False,
)
self_assign_issue = forms.BooleanField(
label=_('Claim unassigned issues when resolving them'),
help_text=_("When enabled, you'll automatically be assigned to unassigned issues when marking them as resolved."),
required=False,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(NotificationSettingsForm, self).__init__(*args, **kwargs)
self.fields['alert_email'].initial = UserOption.objects.get_value(
user=self.user,
project=None,
key='alert_email',
default=user.email,
)
self.fields['subscribe_by_default'].initial = (
UserOption.objects.get_value(
user=self.user,
project=None,
key='subscribe_by_default',
default='1',
) == '1'
)
self.fields['workflow_notifications'].initial = (
UserOption.objects.get_value(
user=self.user,
project=None,
key='workflow:notifications',
default=UserOptionValue.all_conversations,
) == UserOptionValue.all_conversations
)
self.fields['self_notifications'].initial = UserOption.objects.get_value(
user=self.user,
project=None,
key='self_notifications',
default='0'
) == '1'
self.fields['self_assign_issue'].initial = UserOption.objects.get_value(
user=self.user,
project=None,
key='self_assign_issue',
default='0'
) == '1'
def get_title(self):
return "General"
def save(self):
UserOption.objects.set_value(
user=self.user,
project=None,
key='alert_email',
value=self.cleaned_data['alert_email'],
)
UserOption.objects.set_value(
user=self.user,
project=None,
key='subscribe_by_default',
value='1' if self.cleaned_data['subscribe_by_default'] else '0',
)
UserOption.objects.set_value(
user=self.user,
project=None,
key='self_notifications',
value='1' if self.cleaned_data['self_notifications'] else '0',
)
UserOption.objects.set_value(
user=self.user,
project=None,
key='self_assign_issue',
value='1' if self.cleaned_data['self_assign_issue'] else '0',
)
if self.cleaned_data.get('workflow_notifications') is True:
UserOption.objects.set_value(
user=self.user,
project=None,
key='workflow:notifications',
value=UserOptionValue.all_conversations,
)
else:
UserOption.objects.set_value(
user=self.user,
project=None,
key='workflow:notifications',
value=UserOptionValue.participating_only,
)
class ProjectEmailOptionsForm(forms.Form):
alert = forms.BooleanField(required=False)
workflow = forms.BooleanField(required=False)
email = forms.ChoiceField(label="", choices=(), required=False,
widget=forms.Select())
def __init__(self, project, user, *args, **kwargs):
self.project = project
self.user = user
super(ProjectEmailOptionsForm, self).__init__(*args, **kwargs)
has_alerts = project.is_user_subscribed_to_mail_alerts(user)
has_workflow = project.is_user_subscribed_to_workflow(user)
# This allows users who have entered an alert_email value or have specified an email
# for notifications to keep their settings
emails = [e.email for e in user.get_verified_emails()]
alert_email = UserOption.objects.get_value(user=self.user, project=None, key='alert_email', default=None)
specified_email = UserOption.objects.get_value(user, project, 'mail:email', None)
emails.extend([user.email, alert_email, specified_email])
choices = [(email, email) for email in set(emails) if email is not None]
self.fields['email'].choices = choices
self.fields['alert'].initial = has_alerts
self.fields['workflow'].initial = has_workflow
self.fields['email'].initial = specified_email or alert_email or user.email
def save(self):
UserOption.objects.set_value(
self.user, self.project, 'mail:alert',
int(self.cleaned_data['alert']),
)
UserOption.objects.set_value(
self.user, self.project, 'workflow:notifications',
UserOptionValue.all_conversations if self.cleaned_data['workflow'] else UserOptionValue.participating_only,
)
if self.cleaned_data['email']:
UserOption.objects.set_value(
self.user, self.project, 'mail:email',
self.cleaned_data['email'],
)
else:
UserOption.objects.unset_value(
self.user, self.project, 'mail:email')
class TwoFactorForm(forms.Form):
otp = forms.CharField(
label=_('One-time password'), max_length=20, widget=forms.TextInput(
attrs={'placeholder': _('Code from authenticator'),
'autofocus': True,
}),
)
class ConfirmPasswordForm(forms.Form):
password = forms.CharField(
label=_('Sentry account password'),
widget=forms.PasswordInput(),
help_text='You will need to enter your current Sentry account password to make changes.',
required=True,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(ConfirmPasswordForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if not needs_password:
del self.fields['password']
def clean_password(self):
value = self.cleaned_data.get('password')
if value and not self.user.check_password(value):
raise forms.ValidationError(_('The password you entered is not correct.'))
elif not value:
raise forms.ValidationError(_('You must confirm your current password to make changes.'))
return value
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import fixtures
import mock
import six
import testtools
import uuid
import webob
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
from sqlalchemy.orm import exc as sqla_exc
from neutron.callbacks import registry
from neutron.common import constants
from neutron.common import exceptions as exc
from neutron.common import utils
from neutron import context
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2 as base_plugin
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.extensions import external_net
from neutron.extensions import multiprovidernet as mpnet
from neutron.extensions import portbindings
from neutron.extensions import providernet as pnet
from neutron import manager
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import config
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2 import driver_api
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2.drivers import type_vlan
from neutron.plugins.ml2 import models
from neutron.plugins.ml2 import plugin as ml2_plugin
from neutron.services.qos import qos_consts
from neutron.tests import base
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit.agent import test_securitygroups_rpc as test_sg_rpc
from neutron.tests.unit.db import test_allowedaddresspairs_db as test_pair
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
from neutron.tests.unit.extensions import test_extra_dhcp_opt as test_dhcpopts
from neutron.tests.unit.plugins.ml2.drivers import mechanism_logger as \
mech_logger
from neutron.tests.unit.plugins.ml2.drivers import mechanism_test as mech_test
config.cfg.CONF.import_opt('network_vlan_ranges',
'neutron.plugins.ml2.drivers.type_vlan',
group='ml2_type_vlan')
PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
DEVICE_OWNER_COMPUTE = 'compute:None'
HOST = 'fake_host'
# TODO(marun) - Move to somewhere common for reuse
class PluginConfFixture(fixtures.Fixture):
"""Plugin configuration shared across the unit and functional tests."""
def __init__(self, plugin_name, parent_setup=None):
super(PluginConfFixture, self).__init__()
self.plugin_name = plugin_name
self.parent_setup = parent_setup
def _setUp(self):
if self.parent_setup:
self.parent_setup()
class Ml2ConfFixture(PluginConfFixture):
def __init__(self, parent_setup=None):
super(Ml2ConfFixture, self).__init__(PLUGIN_NAME, parent_setup)
class Ml2PluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
_mechanism_drivers = ['logger', 'test']
l3_plugin = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatServicePlugin')
def setup_parent(self):
"""Perform parent setup with the common plugin configuration class."""
service_plugins = {'l3_plugin_name': self.l3_plugin}
# Ensure that the parent setup can be called without arguments
# by the common configuration setUp.
parent_setup = functools.partial(
super(Ml2PluginV2TestCase, self).setUp,
plugin=PLUGIN_NAME,
service_plugins=service_plugins,
)
self.useFixture(Ml2ConfFixture(parent_setup))
self.port_create_status = 'DOWN'
def setUp(self):
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
# driver apis.
config.cfg.CONF.set_override('mechanism_drivers',
self._mechanism_drivers,
group='ml2')
self.physnet = 'physnet1'
self.vlan_range = '1:100'
self.vlan_range2 = '200:300'
self.physnet2 = 'physnet2'
self.phys_vrange = ':'.join([self.physnet, self.vlan_range])
self.phys2_vrange = ':'.join([self.physnet2, self.vlan_range2])
config.cfg.CONF.set_override('network_vlan_ranges',
[self.phys_vrange, self.phys2_vrange],
group='ml2_type_vlan')
self.setup_parent()
self.driver = ml2_plugin.Ml2Plugin()
self.context = context.get_admin_context()
class TestMl2BulkToggleWithoutBulkless(Ml2PluginV2TestCase):
_mechanism_drivers = ['logger', 'test']
def test_bulk_enabled_with_bulk_drivers(self):
self.assertFalse(self._skip_native_bulk)
class TestMl2SupportedQosRuleTypes(Ml2PluginV2TestCase):
def test_empty_driver_list(self, *mocks):
mech_drivers_mock = mock.PropertyMock(return_value=[])
with mock.patch.object(self.driver.mechanism_manager,
'ordered_mech_drivers',
new_callable=mech_drivers_mock):
self.assertEqual(
[], self.driver.mechanism_manager.supported_qos_rule_types)
def test_no_rule_types_in_common(self):
self.assertEqual(
[], self.driver.mechanism_manager.supported_qos_rule_types)
@mock.patch.object(mech_logger.LoggerMechanismDriver,
'supported_qos_rule_types',
new_callable=mock.PropertyMock,
create=True)
@mock.patch.object(mech_test.TestMechanismDriver,
'supported_qos_rule_types',
new_callable=mock.PropertyMock,
create=True)
def test_rule_type_in_common(self, *mocks):
# make sure both plugins have the same supported qos rule types
for mock_ in mocks:
mock_.return_value = qos_consts.VALID_RULE_TYPES
self.assertEqual(
qos_consts.VALID_RULE_TYPES,
self.driver.mechanism_manager.supported_qos_rule_types)
class TestMl2BasicGet(test_plugin.TestBasicGet,
Ml2PluginV2TestCase):
pass
class TestMl2V2HTTPResponse(test_plugin.TestV2HTTPResponse,
Ml2PluginV2TestCase):
pass
class TestMl2NetworksV2(test_plugin.TestNetworksV2,
Ml2PluginV2TestCase):
def setUp(self, plugin=None):
super(TestMl2NetworksV2, self).setUp()
# provider networks
self.pnets = [{'name': 'net1',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'},
{'name': 'net2',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
pnet.SEGMENTATION_ID: 210,
'tenant_id': 'tenant_one'},
{'name': 'net3',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
pnet.SEGMENTATION_ID: 220,
'tenant_id': 'tenant_one'}
]
# multiprovider networks
self.mp_nets = [{'name': 'net4',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
pnet.SEGMENTATION_ID: 202}],
'tenant_id': 'tenant_one'}
]
self.nets = self.mp_nets + self.pnets
def test_port_delete_helper_tolerates_failure(self):
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, "delete_port",
side_effect=exc.PortNotFound(port_id="123")):
plugin._delete_ports(mock.MagicMock(), [mock.MagicMock()])
with mock.patch.object(plugin, "delete_port",
side_effect=sqla_exc.ObjectDeletedError(None)):
plugin._delete_ports(mock.MagicMock(), [mock.MagicMock()])
def test_subnet_delete_helper_tolerates_failure(self):
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, "delete_subnet",
side_effect=exc.SubnetNotFound(subnet_id="1")):
plugin._delete_subnets(mock.MagicMock(), [mock.MagicMock()])
with mock.patch.object(plugin, "delete_subnet",
side_effect=sqla_exc.ObjectDeletedError(None)):
plugin._delete_subnets(mock.MagicMock(), [mock.MagicMock()])
def _create_and_verify_networks(self, networks):
for net_idx, net in enumerate(networks):
# create
req = self.new_create_request('networks',
{'network': net})
# verify
network = self.deserialize(self.fmt,
req.get_response(self.api))['network']
if mpnet.SEGMENTS not in net:
for k, v in six.iteritems(net):
self.assertEqual(net[k], network[k])
self.assertNotIn(mpnet.SEGMENTS, network)
else:
segments = network[mpnet.SEGMENTS]
expected_segments = net[mpnet.SEGMENTS]
self.assertEqual(len(expected_segments), len(segments))
for expected, actual in zip(expected_segments, segments):
self.assertEqual(expected, actual)
def _lookup_network_by_segmentation_id(self, seg_id, num_expected_nets):
params_str = "%s=%s" % (pnet.SEGMENTATION_ID, seg_id)
net_req = self.new_list_request('networks', None,
params=params_str)
networks = self.deserialize(self.fmt, net_req.get_response(self.api))
if num_expected_nets:
self.assertIsNotNone(networks)
self.assertEqual(num_expected_nets, len(networks['networks']))
else:
self.assertIsNone(networks)
return networks
def test_list_networks_with_segmentation_id(self):
self._create_and_verify_networks(self.pnets)
# verify we can find the network that we expect
lookup_vlan_id = 1
expected_net = [n for n in self.pnets
if n[pnet.SEGMENTATION_ID] == lookup_vlan_id].pop()
networks = self._lookup_network_by_segmentation_id(lookup_vlan_id, 1)
# verify all provider attributes
network = networks['networks'][0]
for attr in pnet.ATTRIBUTES:
self.assertEqual(expected_net[attr], network[attr])
def test_list_mpnetworks_with_segmentation_id(self):
self._create_and_verify_networks(self.nets)
# get all networks with seg_id=1 (including multisegment networks)
lookup_vlan_id = 1
networks = self._lookup_network_by_segmentation_id(lookup_vlan_id, 2)
# get the mpnet
networks = [n for n in networks['networks'] if mpnet.SEGMENTS in n]
network = networks.pop()
# verify attributes of the looked up item
segments = network[mpnet.SEGMENTS]
expected_segments = self.mp_nets[0][mpnet.SEGMENTS]
self.assertEqual(len(expected_segments), len(segments))
for expected, actual in zip(expected_segments, segments):
self.assertEqual(expected, actual)
def test_create_network_segment_allocation_fails(self):
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin.type_manager, 'create_network_segments',
side_effect=db_exc.RetryRequest(ValueError())) as f:
self.assertRaises(ValueError,
plugin.create_network,
context.get_admin_context(),
{'network': {'tenant_id': 'sometenant',
'name': 'dummy',
'admin_state_up': True,
'shared': False}})
self.assertEqual(db_api.MAX_RETRIES + 1, f.call_count)
class TestExternalNetwork(Ml2PluginV2TestCase):
def _create_external_network(self):
data = {'network': {'name': 'net1',
'router:external': 'True',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
return network
def test_external_network_type_none(self):
config.cfg.CONF.set_default('external_network_type',
None,
group='ml2')
network = self._create_external_network()
# For external network, expected network type to be
# tenant_network_types which is by default 'local'.
self.assertEqual(p_const.TYPE_LOCAL,
network['network'][pnet.NETWORK_TYPE])
# No physical network specified, expected 'None'.
self.assertIsNone(network['network'][pnet.PHYSICAL_NETWORK])
# External network will not have a segmentation id.
self.assertIsNone(network['network'][pnet.SEGMENTATION_ID])
# External network will not have multiple segments.
self.assertNotIn(mpnet.SEGMENTS, network['network'])
def test_external_network_type_vlan(self):
config.cfg.CONF.set_default('external_network_type',
p_const.TYPE_VLAN,
group='ml2')
network = self._create_external_network()
# For external network, expected network type to be 'vlan'.
self.assertEqual(p_const.TYPE_VLAN,
network['network'][pnet.NETWORK_TYPE])
# Physical network is expected.
self.assertIsNotNone(network['network'][pnet.PHYSICAL_NETWORK])
# External network will have a segmentation id.
self.assertIsNotNone(network['network'][pnet.SEGMENTATION_ID])
# External network will not have multiple segments.
self.assertNotIn(mpnet.SEGMENTS, network['network'])
class TestMl2NetworksWithVlanTransparencyAndMTU(TestMl2NetworksV2):
def setUp(self, plugin=None):
config.cfg.CONF.set_override('path_mtu', 1000, group='ml2')
config.cfg.CONF.set_override('segment_mtu', 1000, group='ml2')
config.cfg.CONF.set_override('advertise_mtu', True)
config.cfg.CONF.set_override('vlan_transparent', True)
super(TestMl2NetworksWithVlanTransparencyAndMTU, self).setUp(plugin)
def test_create_network_vlan_transparent_and_mtu(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
network = self.deserialize(self.fmt, res)['network']
self.assertEqual(network['mtu'], 1000)
self.assertIn('vlan_transparent', network)
class TestMl2SubnetsV2(test_plugin.TestSubnetsV2,
Ml2PluginV2TestCase):
def test_delete_subnet_race_with_dhcp_port_creation(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
subnet_id = subnet['subnet']['id']
attempt = [0]
def check_and_create_ports(context, subnet_id):
"""A method to emulate race condition.
Adds dhcp port in the middle of subnet delete
"""
if attempt[0] > 0:
return False
attempt[0] += 1
data = {'port': {'network_id': network['network']['id'],
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'device_owner':
constants.DEVICE_OWNER_DHCP,
'fixed_ips': [{'subnet_id': subnet_id}]}}
port_req = self.new_create_request('ports', data)
port_res = port_req.get_response(self.api)
self.assertEqual(201, port_res.status_int)
return (context.session.query(models_v2.IPAllocation).
filter_by(subnet_id=subnet_id).
join(models_v2.Port).first())
plugin = manager.NeutronManager.get_plugin()
# we mock _subnet_check_ip_allocations with method
# that creates DHCP port 'in the middle' of subnet_delete
# causing retry this way subnet is deleted on the
# second attempt
with mock.patch.object(plugin, '_subnet_check_ip_allocations',
side_effect=check_and_create_ports):
req = self.new_delete_request('subnets', subnet_id)
res = req.get_response(self.api)
self.assertEqual(204, res.status_int)
class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
def test_update_port_status_build(self):
with self.port() as port:
self.assertEqual('DOWN', port['port']['status'])
self.assertEqual('DOWN', self.port_create_status)
def test_update_port_status_short_id(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with self.port() as port:
with mock.patch.object(ml2_db, 'get_binding_levels',
return_value=[]) as mock_gbl:
port_id = port['port']['id']
short_id = port_id[:11]
plugin.update_port_status(ctx, short_id, 'UP')
mock_gbl.assert_called_once_with(mock.ANY, port_id, mock.ANY)
def test_update_port_fixed_ip_changed(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with self.port() as port, mock.patch.object(
plugin.notifier,
'security_groups_member_updated') as sg_member_update:
port['port']['fixed_ips'][0]['ip_address'] = '10.0.0.3'
plugin.update_port(ctx, port['port']['id'], port)
self.assertTrue(sg_member_update.called)
def test_update_port_status_with_network(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with self.port() as port:
net = plugin.get_network(ctx, port['port']['network_id'])
with mock.patch.object(plugin, 'get_network') as get_net:
plugin.update_port_status(ctx, port['port']['id'], 'UP',
network=net)
self.assertFalse(get_net.called)
def test_update_port_mac(self):
self.check_update_port_mac(
host_arg={portbindings.HOST_ID: HOST},
arg_list=(portbindings.HOST_ID,))
def test_update_non_existent_port(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
data = {'port': {'admin_state_up': False}}
self.assertRaises(exc.PortNotFound, plugin.update_port, ctx,
'invalid-uuid', data)
def test_delete_non_existent_port(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(ml2_plugin.LOG, 'debug') as log_debug:
plugin.delete_port(ctx, 'invalid-uuid', l3_port_check=False)
log_debug.assert_has_calls([
mock.call(_("Deleting port %s"), 'invalid-uuid'),
mock.call(_("The port '%s' was deleted"), 'invalid-uuid')
])
def test_l3_cleanup_on_net_delete(self):
l3plugin = manager.NeutronManager.get_service_plugins().get(
p_const.L3_ROUTER_NAT)
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
with self.network(**kwargs) as n:
with self.subnet(network=n, cidr='200.0.0.0/22'):
l3plugin.create_floatingip(
context.get_admin_context(),
{'floatingip': {'floating_network_id': n['network']['id'],
'tenant_id': n['network']['tenant_id']}}
)
self._delete('networks', n['network']['id'])
flips = l3plugin.get_floatingips(context.get_admin_context())
self.assertFalse(flips)
def test_create_ports_bulk_port_binding_failure(self):
ctx = context.get_admin_context()
with self.network() as net:
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, '_bind_port_if_needed',
side_effect=ml2_exc.MechanismDriverError(
method='create_port_bulk')) as _bind_port_if_needed:
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True, context=ctx)
self.assertTrue(_bind_port_if_needed.called)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'ports', webob.exc.HTTPServerError.code)
def test_create_ports_bulk_with_sec_grp(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with self.network() as net,\
mock.patch.object(plugin.notifier,
'security_groups_member_updated') as m_upd,\
mock.patch.object(plugin.notifier,
'security_groups_provider_updated') as p_upd:
res = self._create_port_bulk(self.fmt, 3, net['network']['id'],
'test', True, context=ctx)
ports = self.deserialize(self.fmt, res)
used_sg = ports['ports'][0]['security_groups']
m_upd.assert_called_once_with(ctx, used_sg)
self.assertFalse(p_upd.called)
def _check_security_groups_provider_updated_args(self, p_upd_mock, net_id):
query_params = "network_id=%s" % net_id
network_ports = self._list('ports', query_params=query_params)
network_ports_ids = [port['id'] for port in network_ports['ports']]
self.assertTrue(p_upd_mock.called)
p_upd_args = p_upd_mock.call_args
ports_ids = p_upd_args[0][1]
self.assertEqual(sorted(network_ports_ids), sorted(ports_ids))
def test_create_ports_bulk_with_sec_grp_member_provider_update(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with self.network() as net,\
mock.patch.object(plugin.notifier,
'security_groups_member_updated') as m_upd,\
mock.patch.object(plugin.notifier,
'security_groups_provider_updated') as p_upd:
net_id = net['network']['id']
data = [{
'network_id': net_id,
'tenant_id': self._tenant_id
},
{
'network_id': net_id,
'tenant_id': self._tenant_id,
'device_owner': constants.DEVICE_OWNER_DHCP
}
]
res = self._create_bulk_from_list(self.fmt, 'port',
data, context=ctx)
ports = self.deserialize(self.fmt, res)
used_sg = ports['ports'][0]['security_groups']
m_upd.assert_called_once_with(ctx, used_sg)
self._check_security_groups_provider_updated_args(p_upd, net_id)
m_upd.reset_mock()
p_upd.reset_mock()
data[0]['device_owner'] = constants.DEVICE_OWNER_DHCP
self._create_bulk_from_list(self.fmt, 'port',
data, context=ctx)
self.assertFalse(m_upd.called)
self._check_security_groups_provider_updated_args(p_upd, net_id)
def test_create_ports_bulk_with_sec_grp_provider_update_ipv6(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
fake_prefix = '2001:db8::/64'
fake_gateway = 'fe80::1'
with self.network() as net:
with self.subnet(net,
gateway_ip=fake_gateway,
cidr=fake_prefix,
ip_version=6) as snet_v6,\
mock.patch.object(
plugin.notifier,
'security_groups_member_updated') as m_upd,\
mock.patch.object(
plugin.notifier,
'security_groups_provider_updated') as p_upd:
net_id = net['network']['id']
data = [{
'network_id': net_id,
'tenant_id': self._tenant_id,
'fixed_ips': [{'subnet_id': snet_v6['subnet']['id']}],
'device_owner': constants.DEVICE_OWNER_ROUTER_INTF
}
]
self._create_bulk_from_list(self.fmt, 'port',
data, context=ctx)
self.assertFalse(m_upd.called)
self._check_security_groups_provider_updated_args(
p_upd, net_id)
def test_delete_port_no_notify_in_disassociate_floatingips(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
l3plugin = manager.NeutronManager.get_service_plugins().get(
p_const.L3_ROUTER_NAT)
with self.port() as port,\
mock.patch.object(
l3plugin,
'disassociate_floatingips') as disassociate_floatingips,\
mock.patch.object(registry, 'notify') as notify:
port_id = port['port']['id']
plugin.delete_port(ctx, port_id)
# check that no notification was requested while under
# transaction
disassociate_floatingips.assert_has_calls([
mock.call(ctx, port_id, do_notify=False)
])
# check that notifier was still triggered
self.assertTrue(notify.call_counts)
def test_check_if_compute_port_serviced_by_dvr(self):
self.assertTrue(utils.is_dvr_serviced('compute:None'))
def test_check_if_lbaas_vip_port_serviced_by_dvr(self):
self.assertTrue(utils.is_dvr_serviced(
constants.DEVICE_OWNER_LOADBALANCER))
def test_check_if_dhcp_port_serviced_by_dvr(self):
self.assertTrue(utils.is_dvr_serviced(constants.DEVICE_OWNER_DHCP))
def test_check_if_port_not_serviced_by_dvr(self):
self.assertFalse(utils.is_dvr_serviced(
constants.DEVICE_OWNER_ROUTER_INTF))
def test_disassociate_floatingips_do_notify_returns_nothing(self):
ctx = context.get_admin_context()
l3plugin = manager.NeutronManager.get_service_plugins().get(
p_const.L3_ROUTER_NAT)
with self.port() as port:
port_id = port['port']['id']
# check that nothing is returned when notifications are handled
# by the called method
self.assertIsNone(l3plugin.disassociate_floatingips(ctx, port_id))
def test_create_port_tolerates_db_deadlock(self):
ctx = context.get_admin_context()
with self.network() as net:
with self.subnet(network=net) as subnet:
segments = ml2_db.get_network_segments(ctx.session,
net['network']['id'])
with mock.patch('neutron.plugins.ml2.plugin.'
'db.get_network_segments') as get_seg_mock:
get_seg_mock.side_effect = [db_exc.DBDeadlock, segments,
segments, segments]
with self.port(subnet=subnet) as port:
self.assertTrue(port['port']['id'])
self.assertEqual(4, get_seg_mock.call_count)
def test_delete_port_tolerates_db_deadlock(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with self.port() as port:
port_db, binding = ml2_db.get_locked_port_and_binding(
ctx.session, port['port']['id'])
with mock.patch('neutron.plugins.ml2.plugin.'
'db.get_locked_port_and_binding') as lock:
lock.side_effect = [db_exc.DBDeadlock,
(port_db, binding)]
req = self.new_delete_request('ports', port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(204, res.status_int)
self.assertEqual(2, lock.call_count)
self.assertRaises(
exc.PortNotFound, plugin.get_port, ctx, port['port']['id'])
class TestMl2PluginOnly(Ml2PluginV2TestCase):
"""For testing methods that don't call drivers"""
def _test_check_mac_update_allowed(self, vif_type, expect_change=True):
plugin = manager.NeutronManager.get_plugin()
port = {'mac_address': "fake_mac", 'id': "fake_id"}
if expect_change:
new_attrs = {"mac_address": "dummy_mac"}
else:
new_attrs = {"mac_address": port['mac_address']}
binding = mock.Mock()
binding.vif_type = vif_type
mac_changed = plugin._check_mac_update_allowed(port, new_attrs,
binding)
self.assertEqual(expect_change, mac_changed)
def test_check_mac_update_allowed_if_no_mac_change(self):
self._test_check_mac_update_allowed(portbindings.VIF_TYPE_UNBOUND,
expect_change=False)
def test_check_mac_update_allowed_unless_bound(self):
with testtools.ExpectedException(exc.PortBound):
self._test_check_mac_update_allowed(portbindings.VIF_TYPE_OVS)
def test__device_to_port_id_prefix_names(self):
input_output = [('sg-abcdefg', 'abcdefg'),
('tap123456', '123456'),
('qvo567890', '567890')]
for device, expected in input_output:
self.assertEqual(expected,
ml2_plugin.Ml2Plugin._device_to_port_id(
self.context, device))
def test__device_to_port_id_mac_address(self):
with self.port() as p:
mac = p['port']['mac_address']
port_id = p['port']['id']
self.assertEqual(port_id,
ml2_plugin.Ml2Plugin._device_to_port_id(
self.context, mac))
def test__device_to_port_id_not_uuid_not_mac(self):
dev = '1234567'
self.assertEqual(dev, ml2_plugin.Ml2Plugin._device_to_port_id(
self.context, dev))
def test__device_to_port_id_UUID(self):
port_id = uuidutils.generate_uuid()
self.assertEqual(port_id, ml2_plugin.Ml2Plugin._device_to_port_id(
self.context, port_id))
class TestMl2DvrPortsV2(TestMl2PortsV2):
def setUp(self):
super(TestMl2DvrPortsV2, self).setUp()
extensions = ['router',
constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS]
self.plugin = manager.NeutronManager.get_plugin()
self.l3plugin = mock.Mock()
type(self.l3plugin).supported_extension_aliases = (
mock.PropertyMock(return_value=extensions))
self.service_plugins = {'L3_ROUTER_NAT': self.l3plugin}
def _test_delete_dvr_serviced_port(self, device_owner, floating_ip=False):
ns_to_delete = {'host': 'myhost', 'agent_id': 'vm_l3_agent',
'router_id': 'my_router'}
fip_set = set()
if floating_ip:
fip_set.add(ns_to_delete['router_id'])
with mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value=self.service_plugins),\
self.port(device_owner=device_owner) as port,\
mock.patch.object(registry, 'notify') as notify,\
mock.patch.object(self.l3plugin,
'disassociate_floatingips',
return_value=fip_set),\
mock.patch.object(
self.l3plugin,
'dvr_deletens_if_no_port',
return_value=[ns_to_delete]) as dvr_delns_ifno_port:
port_id = port['port']['id']
self.plugin.delete_port(self.context, port_id)
self.assertTrue(notify.call_count)
dvr_delns_ifno_port.assert_called_once_with(self.context,
port['port']['id'])
def test_delete_last_vm_port(self):
self._test_delete_dvr_serviced_port(device_owner='compute:None')
def test_delete_last_vm_port_with_floatingip(self):
self._test_delete_dvr_serviced_port(device_owner='compute:None',
floating_ip=True)
def test_delete_lbaas_vip_port(self):
self._test_delete_dvr_serviced_port(
device_owner=constants.DEVICE_OWNER_LOADBALANCER)
def test_concurrent_csnat_port_delete(self):
plugin = manager.NeutronManager.get_service_plugins()[
p_const.L3_ROUTER_NAT]
r = plugin.create_router(
self.context,
{'router': {'name': 'router', 'admin_state_up': True}})
with self.subnet() as s:
p = plugin.add_router_interface(self.context, r['id'],
{'subnet_id': s['subnet']['id']})
# lie to turn the port into an SNAT interface
with self.context.session.begin():
rp = self.context.session.query(l3_db.RouterPort).filter_by(
port_id=p['port_id']).first()
rp.port_type = constants.DEVICE_OWNER_ROUTER_SNAT
# take the port away before csnat gets a chance to delete it
# to simulate a concurrent delete
orig_get_ports = plugin._core_plugin.get_ports
def get_ports_with_delete_first(*args, **kwargs):
plugin._core_plugin.delete_port(self.context,
p['port_id'],
l3_port_check=False)
return orig_get_ports(*args, **kwargs)
plugin._core_plugin.get_ports = get_ports_with_delete_first
# This should be able to handle a concurrent delete without raising
# an exception
router = plugin._get_router(self.context, r['id'])
plugin.delete_csnat_router_interface_ports(self.context, router)
class TestMl2PortBinding(Ml2PluginV2TestCase,
test_bindings.PortBindingsTestCase):
# Test case does not set binding:host_id, so ml2 does not attempt
# to bind port
VIF_TYPE = portbindings.VIF_TYPE_UNBOUND
HAS_PORT_FILTER = False
ENABLE_SG = True
FIREWALL_DRIVER = test_sg_rpc.FIREWALL_HYBRID_DRIVER
def setUp(self, firewall_driver=None):
test_sg_rpc.set_firewall_driver(self.FIREWALL_DRIVER)
config.cfg.CONF.set_override(
'enable_security_group', self.ENABLE_SG,
group='SECURITYGROUP')
super(TestMl2PortBinding, self).setUp()
def _check_port_binding_profile(self, port, profile=None):
self.assertIn('id', port)
self.assertIn(portbindings.PROFILE, port)
value = port[portbindings.PROFILE]
self.assertEqual(profile or {}, value)
def test_create_port_binding_profile(self):
self._test_create_port_binding_profile({'a': 1, 'b': 2})
def test_update_port_binding_profile(self):
self._test_update_port_binding_profile({'c': 3})
def test_create_port_binding_profile_too_big(self):
s = 'x' * 5000
profile_arg = {portbindings.PROFILE: {'d': s}}
try:
with self.port(expected_res_status=400,
arg_list=(portbindings.PROFILE,),
**profile_arg):
pass
except webob.exc.HTTPClientError:
pass
def test_remove_port_binding_profile(self):
profile = {'e': 5}
profile_arg = {portbindings.PROFILE: profile}
with self.port(arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_port_binding_profile(port['port'], profile)
port_id = port['port']['id']
profile_arg = {portbindings.PROFILE: None}
port = self._update('ports', port_id,
{'port': profile_arg})['port']
self._check_port_binding_profile(port)
port = self._show('ports', port_id)['port']
self._check_port_binding_profile(port)
def test_return_on_concurrent_delete_and_binding(self):
# create a port and delete it so we have an expired mechanism context
with self.port() as port:
plugin = manager.NeutronManager.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context.session,
port['port']['id'])[1]
binding['host'] = 'test'
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],
plugin.get_network(self.context, port['port']['network_id']),
binding, None)
with mock.patch(
'neutron.plugins.ml2.plugin.' 'db.get_locked_port_and_binding',
return_value=(None, None)) as glpab_mock,\
mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.'
'_make_port_dict') as mpd_mock:
plugin._bind_port_if_needed(mech_context)
# called during deletion to get port
self.assertTrue(glpab_mock.mock_calls)
# should have returned before calling _make_port_dict
self.assertFalse(mpd_mock.mock_calls)
def test_port_binding_profile_not_changed(self):
profile = {'e': 5}
profile_arg = {portbindings.PROFILE: profile}
with self.port(arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_port_binding_profile(port['port'], profile)
port_id = port['port']['id']
state_arg = {'admin_state_up': True}
port = self._update('ports', port_id,
{'port': state_arg})['port']
self._check_port_binding_profile(port, profile)
port = self._show('ports', port_id)['port']
self._check_port_binding_profile(port, profile)
def test_update_port_binding_host_id_none(self):
with self.port() as port:
plugin = manager.NeutronManager.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context.session,
port['port']['id'])[1]
binding['host'] = 'test'
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],
plugin.get_network(self.context, port['port']['network_id']),
binding, None)
with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.'
'_update_port_dict_binding') as update_mock:
attrs = {portbindings.HOST_ID: None}
plugin._process_port_binding(mech_context, attrs)
self.assertTrue(update_mock.mock_calls)
self.assertEqual('', binding.host)
def test_update_port_binding_host_id_not_changed(self):
with self.port() as port:
plugin = manager.NeutronManager.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context.session,
port['port']['id'])[1]
binding['host'] = 'test'
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],
plugin.get_network(self.context, port['port']['network_id']),
binding, None)
with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.'
'_update_port_dict_binding') as update_mock:
attrs = {portbindings.PROFILE: {'e': 5}}
plugin._process_port_binding(mech_context, attrs)
self.assertTrue(update_mock.mock_calls)
self.assertEqual('test', binding.host)
def test_process_dvr_port_binding_update_router_id(self):
host_id = 'host'
binding = models.DVRPortBinding(
port_id='port_id',
host=host_id,
router_id='old_router_id',
vif_type=portbindings.VIF_TYPE_OVS,
vnic_type=portbindings.VNIC_NORMAL,
status=constants.PORT_STATUS_DOWN)
plugin = manager.NeutronManager.get_plugin()
mock_network = {'id': 'net_id'}
mock_port = {'id': 'port_id'}
context = mock.Mock()
new_router_id = 'new_router'
attrs = {'device_id': new_router_id, portbindings.HOST_ID: host_id}
with mock.patch.object(plugin, '_update_port_dict_binding'):
with mock.patch.object(ml2_db, 'get_network_segments',
return_value=[]):
mech_context = driver_context.PortContext(
self, context, mock_port, mock_network, binding, None)
plugin._process_dvr_port_binding(mech_context, context, attrs)
self.assertEqual(new_router_id,
mech_context._binding.router_id)
self.assertEqual(host_id, mech_context._binding.host)
def test_update_dvr_port_binding_on_non_existent_port(self):
plugin = manager.NeutronManager.get_plugin()
port = {
'id': 'foo_port_id',
'binding:host_id': 'foo_host',
}
with mock.patch.object(ml2_db, 'ensure_dvr_port_binding') as mock_dvr:
plugin.update_dvr_port_binding(
self.context, 'foo_port_id', {'port': port})
self.assertFalse(mock_dvr.called)
class TestMl2PortBindingNoSG(TestMl2PortBinding):
HAS_PORT_FILTER = False
ENABLE_SG = False
FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER
class TestMl2PortBindingHost(Ml2PluginV2TestCase,
test_bindings.PortBindingsHostTestCaseMixin):
pass
class TestMl2PortBindingVnicType(Ml2PluginV2TestCase,
test_bindings.PortBindingsVnicTestCaseMixin):
pass
class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
def setUp(self, plugin=None):
super(TestMultiSegmentNetworks, self).setUp()
def test_allocate_dynamic_segment(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet1'}
network_id = network['network']['id']
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet1',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
self.assertTrue(dynamic_segment[driver_api.SEGMENTATION_ID] > 0)
segment2 = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.SEGMENTATION_ID: 1234,
driver_api.PHYSICAL_NETWORK: 'physnet3'}
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment2)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
segmentation_id='1234')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet3',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
self.assertEqual(dynamic_segment[driver_api.SEGMENTATION_ID], 1234)
def test_allocate_dynamic_segment_multiple_physnets(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet1'}
network_id = network['network']['id']
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet1',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
dynamic_segmentation_id = dynamic_segment[driver_api.SEGMENTATION_ID]
self.assertTrue(dynamic_segmentation_id > 0)
dynamic_segment1 = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
dynamic_segment1_id = dynamic_segment1[driver_api.SEGMENTATION_ID]
self.assertEqual(dynamic_segmentation_id, dynamic_segment1_id)
segment2 = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet2'}
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment2)
dynamic_segment2 = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet2')
dynamic_segmentation2_id = dynamic_segment2[driver_api.SEGMENTATION_ID]
self.assertNotEqual(dynamic_segmentation_id, dynamic_segmentation2_id)
def test_allocate_release_dynamic_segment(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet1'}
network_id = network['network']['id']
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet1',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
dynamic_segmentation_id = dynamic_segment[driver_api.SEGMENTATION_ID]
self.assertTrue(dynamic_segmentation_id > 0)
self.driver.type_manager.release_dynamic_segment(
self.context.session, dynamic_segment[driver_api.ID])
self.assertIsNone(ml2_db.get_dynamic_segment(
self.context.session, network_id, 'physnet1'))
def test_create_network_provider(self):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
self.assertNotIn(mpnet.SEGMENTS, network['network'])
def test_create_network_single_multiprovider(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
'tenant_id': 'tenant_one'}}
net_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
self.assertNotIn(mpnet.SEGMENTS, network['network'])
# Tests get_network()
net_req = self.new_show_request('networks', network['network']['id'])
network = self.deserialize(self.fmt, net_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
self.assertNotIn(mpnet.SEGMENTS, network['network'])
def test_create_network_multiprovider(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 2}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segments = network['network'][mpnet.SEGMENTS]
for segment_index, segment in enumerate(data['network']
[mpnet.SEGMENTS]):
for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
self.assertEqual(segment.get(field),
segments[segment_index][field])
# Tests get_network()
net_req = self.new_show_request('networks', network['network']['id'])
network = self.deserialize(self.fmt, net_req.get_response(self.api))
segments = network['network'][mpnet.SEGMENTS]
for segment_index, segment in enumerate(data['network']
[mpnet.SEGMENTS]):
for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
self.assertEqual(segment.get(field),
segments[segment_index][field])
def test_create_network_with_provider_and_multiprovider_fail(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(400, res.status_int)
def test_create_network_duplicate_full_segments(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(400, res.status_int)
def test_create_network_duplicate_partial_segments(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
def test_release_network_segments(self):
data = {'network': {'name': 'net1',
'admin_state_up': True,
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet2'}
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet2')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet2',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
self.assertTrue(dynamic_segment[driver_api.SEGMENTATION_ID] > 0)
with mock.patch.object(type_vlan.VlanTypeDriver,
'release_segment') as rs:
req = self.new_delete_request('networks', network_id)
res = req.get_response(self.api)
self.assertEqual(2, rs.call_count)
self.assertEqual(ml2_db.get_network_segments(
self.context.session, network_id), [])
self.assertIsNone(ml2_db.get_dynamic_segment(
self.context.session, network_id, 'physnet2'))
def test_release_segment_no_type_driver(self):
data = {'network': {'name': 'net1',
'admin_state_up': True,
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
segment = {driver_api.NETWORK_TYPE: 'faketype',
driver_api.PHYSICAL_NETWORK: 'physnet1',
driver_api.ID: 1}
with mock.patch('neutron.plugins.ml2.managers.LOG') as log:
with mock.patch('neutron.plugins.ml2.managers.db') as db:
db.get_network_segments.return_value = (segment,)
self.driver.type_manager.release_network_segments(
self.context.session, network_id)
log.error.assert_called_once_with(
"Failed to release segment '%s' because "
"network type is not supported.", segment)
def test_create_provider_fail(self):
segment = {pnet.NETWORK_TYPE: None,
pnet.PHYSICAL_NETWORK: 'phys_net',
pnet.SEGMENTATION_ID: None}
with testtools.ExpectedException(exc.InvalidInput):
self.driver.type_manager._process_provider_create(segment)
def test_create_network_plugin(self):
data = {'network': {'name': 'net1',
'admin_state_up': True,
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
def raise_mechanism_exc(*args, **kwargs):
raise ml2_exc.MechanismDriverError(
method='create_network_postcommit')
with mock.patch('neutron.plugins.ml2.managers.MechanismManager.'
'create_network_precommit', new=raise_mechanism_exc):
with testtools.ExpectedException(ml2_exc.MechanismDriverError):
self.driver.create_network(self.context, data)
def test_extend_dictionary_no_segments(self):
network = dict(name='net_no_segment', id='5', tenant_id='tenant_one')
self.driver.type_manager.extend_network_dict_provider(self.context,
network)
self.assertIsNone(network[pnet.NETWORK_TYPE])
self.assertIsNone(network[pnet.PHYSICAL_NETWORK])
self.assertIsNone(network[pnet.SEGMENTATION_ID])
class TestMl2AllowedAddressPairs(Ml2PluginV2TestCase,
test_pair.TestAllowedAddressPairs):
_extension_drivers = ['port_security']
def setUp(self, plugin=None):
config.cfg.CONF.set_override('extension_drivers',
self._extension_drivers,
group='ml2')
super(test_pair.TestAllowedAddressPairs, self).setUp(
plugin=PLUGIN_NAME)
class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt):
def setUp(self, plugin=None):
super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp(
plugin=PLUGIN_NAME)
class Ml2PluginV2FaultyDriverTestCase(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
# driver apis.
config.cfg.CONF.set_override('mechanism_drivers',
['test', 'logger'],
group='ml2')
super(Ml2PluginV2FaultyDriverTestCase, self).setUp(PLUGIN_NAME)
self.port_create_status = 'DOWN'
class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
def test_create_network_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'create_network_postcommit',
side_effect=ml2_exc.MechanismDriverError):
tenant_id = str(uuid.uuid4())
data = {'network': {'name': 'net1',
'tenant_id': tenant_id}}
req = self.new_create_request('networks', data)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
query_params = "tenant_id=%s" % tenant_id
nets = self._list('networks', query_params=query_params)
self.assertFalse(nets['networks'])
def test_delete_network_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'delete_network_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'delete_network_postcommit') as dnp:
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network_res = network_req.get_response(self.api)
self.assertEqual(201, network_res.status_int)
network = self.deserialize(self.fmt, network_res)
net_id = network['network']['id']
req = self.new_delete_request('networks', net_id)
res = req.get_response(self.api)
self.assertEqual(204, res.status_int)
# Test if other mechanism driver was called
self.assertTrue(dnp.called)
self._show('networks', net_id,
expected_code=webob.exc.HTTPNotFound.code)
def test_update_network_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'update_network_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_network_postcommit') as unp:
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network_res = network_req.get_response(self.api)
self.assertEqual(201, network_res.status_int)
network = self.deserialize(self.fmt, network_res)
net_id = network['network']['id']
new_name = 'a_brand_new_name'
data = {'network': {'name': new_name}}
req = self.new_update_request('networks', data, net_id)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
# Test if other mechanism driver was called
self.assertTrue(unp.called)
net = self._show('networks', net_id)
self.assertEqual(new_name, net['network']['name'])
self._delete('networks', net_id)
def test_create_subnet_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'create_subnet_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with self.network() as network:
net_id = network['network']['id']
data = {'subnet': {'network_id': net_id,
'cidr': '10.0.20.0/24',
'ip_version': '4',
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
query_params = "network_id=%s" % net_id
subnets = self._list('subnets', query_params=query_params)
self.assertFalse(subnets['subnets'])
def test_delete_subnet_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'delete_subnet_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'delete_subnet_postcommit') as dsp:
with self.network() as network:
data = {'subnet': {'network_id':
network['network']['id'],
'cidr': '10.0.20.0/24',
'ip_version': '4',
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
subnet_req = self.new_create_request('subnets', data)
subnet_res = subnet_req.get_response(self.api)
self.assertEqual(201, subnet_res.status_int)
subnet = self.deserialize(self.fmt, subnet_res)
subnet_id = subnet['subnet']['id']
req = self.new_delete_request('subnets', subnet_id)
res = req.get_response(self.api)
self.assertEqual(204, res.status_int)
# Test if other mechanism driver was called
self.assertTrue(dsp.called)
self._show('subnets', subnet_id,
expected_code=webob.exc.HTTPNotFound.code)
def test_update_subnet_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'update_subnet_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_subnet_postcommit') as usp:
with self.network() as network:
data = {'subnet': {'network_id':
network['network']['id'],
'cidr': '10.0.20.0/24',
'ip_version': '4',
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
subnet_req = self.new_create_request('subnets', data)
subnet_res = subnet_req.get_response(self.api)
self.assertEqual(201, subnet_res.status_int)
subnet = self.deserialize(self.fmt, subnet_res)
subnet_id = subnet['subnet']['id']
new_name = 'a_brand_new_name'
data = {'subnet': {'name': new_name}}
req = self.new_update_request('subnets', data, subnet_id)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
# Test if other mechanism driver was called
self.assertTrue(usp.called)
subnet = self._show('subnets', subnet_id)
self.assertEqual(new_name, subnet['subnet']['name'])
self._delete('subnets', subnet['subnet']['id'])
def test_create_port_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'create_port_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with self.network() as network:
net_id = network['network']['id']
data = {'port': {'network_id': net_id,
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'fixed_ips': []}}
req = self.new_create_request('ports', data)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
query_params = "network_id=%s" % net_id
ports = self._list('ports', query_params=query_params)
self.assertFalse(ports['ports'])
def test_update_port_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'update_port_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_port_postcommit') as upp:
with self.network() as network:
data = {'port': {'network_id': network['network']['id'],
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'fixed_ips': []}}
port_req = self.new_create_request('ports', data)
port_res = port_req.get_response(self.api)
self.assertEqual(201, port_res.status_int)
port = self.deserialize(self.fmt, port_res)
port_id = port['port']['id']
new_name = 'a_brand_new_name'
data = {'port': {'name': new_name}}
req = self.new_update_request('ports', data, port_id)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
# Test if other mechanism driver was called
self.assertTrue(upp.called)
port = self._show('ports', port_id)
self.assertEqual(new_name, port['port']['name'])
self._delete('ports', port['port']['id'])
def test_update_dvr_router_interface_port(self):
"""Test validate dvr router interface update succeeds."""
host_id = 'host'
binding = models.DVRPortBinding(
port_id='port_id',
host=host_id,
router_id='old_router_id',
vif_type=portbindings.VIF_TYPE_OVS,
vnic_type=portbindings.VNIC_NORMAL,
status=constants.PORT_STATUS_DOWN)
with mock.patch.object(
mech_test.TestMechanismDriver,
'update_port_postcommit',
side_effect=ml2_exc.MechanismDriverError) as port_post,\
mock.patch.object(
mech_test.TestMechanismDriver,
'update_port_precommit') as port_pre,\
mock.patch.object(ml2_db,
'get_dvr_port_bindings') as dvr_bindings:
dvr_bindings.return_value = [binding]
port_pre.return_value = True
with self.network() as network:
with self.subnet(network=network) as subnet:
subnet_id = subnet['subnet']['id']
data = {'port': {
'network_id': network['network']['id'],
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'device_owner':
'network:router_interface_distributed',
'admin_state_up': 1,
'fixed_ips':
[{'subnet_id': subnet_id}]}}
port_req = self.new_create_request('ports', data)
port_res = port_req.get_response(self.api)
self.assertEqual(201, port_res.status_int)
port = self.deserialize(self.fmt, port_res)
port_id = port['port']['id']
new_name = 'a_brand_new_name'
data = {'port': {'name': new_name}}
req = self.new_update_request('ports', data, port_id)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
self.assertTrue(dvr_bindings.called)
self.assertTrue(port_pre.called)
self.assertTrue(port_post.called)
port = self._show('ports', port_id)
self.assertEqual(new_name, port['port']['name'])
class TestMl2PluginCreateUpdateDeletePort(base.BaseTestCase):
def setUp(self):
super(TestMl2PluginCreateUpdateDeletePort, self).setUp()
self.context = mock.MagicMock()
self.notify_p = mock.patch('neutron.callbacks.registry.notify')
self.notify = self.notify_p.start()
def _ensure_transaction_is_closed(self):
transaction = self.context.session.begin(subtransactions=True)
enter = transaction.__enter__.call_count
exit = transaction.__exit__.call_count
self.assertEqual(enter, exit)
def _create_plugin_for_create_update_port(self, new_host_port):
plugin = ml2_plugin.Ml2Plugin()
plugin.extension_manager = mock.Mock()
plugin.type_manager = mock.Mock()
plugin.mechanism_manager = mock.Mock()
plugin.notifier = mock.Mock()
plugin._get_host_port_if_changed = mock.Mock(
return_value=new_host_port)
plugin._check_mac_update_allowed = mock.Mock(return_value=True)
self.notify.side_effect = (
lambda r, e, t, **kwargs: self._ensure_transaction_is_closed())
return plugin
def _test__get_host_port_if_changed(
self, mech_context, attrs=None, expected_retval=None):
with mock.patch.object(ml2_plugin.Ml2Plugin,
'__init__',
return_value=None):
plugin = ml2_plugin.Ml2Plugin()
test_return = plugin._get_host_port_if_changed(mech_context, attrs)
self.assertEqual(expected_retval, test_return)
def test__get_host_port_if_changed_no_attrs(self):
mech_context = mock.Mock()
mech_context._binding.host = 'Host-1'
self._test__get_host_port_if_changed(
mech_context, attrs=None, expected_retval=None)
def test__get_host_port_if_changed_no_binding_change(self):
mech_context = mock.Mock()
mech_context._binding.host = 'Host-1'
mech_context.current = {
'id': 'fake-id',
'mac_address': '2a:2b:2c:2d:2e:2f'
}
attrs = {'mac_address': '0a:0b:0c:0d:0e:0f'}
self._test__get_host_port_if_changed(
mech_context, attrs=attrs, expected_retval=None)
attrs = {
portbindings.HOST_ID: 'Host-1',
'mac_address': '0a:0b:0c:0d:0e:0f',
}
self._test__get_host_port_if_changed(
mech_context, attrs=attrs, expected_retval=None)
def test__get_host_port_if_changed_with_binding_removed(self):
expected_return = {
'id': 'fake-id',
portbindings.HOST_ID: None,
'mac_address': '2a:2b:2c:2d:2e:2f'
}
mech_context = mock.Mock()
mech_context._binding.host = 'Host-1'
mech_context.current = expected_return
attrs = {portbindings.HOST_ID: None}
self._test__get_host_port_if_changed(
mech_context, attrs=attrs, expected_retval=expected_return)
def test__get_host_port_if_changed_with_binding_added(self):
expected_return = {
'id': 'fake-id',
portbindings.HOST_ID: 'host-1',
'mac_address': '2a:2b:2c:2d:2e:2f'
}
mech_context = mock.Mock()
mech_context.current = expected_return
attrs = {portbindings.HOST_ID: 'host-1'}
self._test__get_host_port_if_changed(
mech_context, attrs=attrs, expected_retval=expected_return)
def test_create_port_rpc_outside_transaction(self):
with mock.patch.object(ml2_plugin.Ml2Plugin, '__init__') as init,\
mock.patch.object(base_plugin.NeutronDbPluginV2,
'create_port'):
init.return_value = None
new_host_port = mock.Mock()
plugin = self._create_plugin_for_create_update_port(new_host_port)
plugin.create_port(self.context, mock.MagicMock())
kwargs = {'context': self.context, 'port': new_host_port}
self.notify.assert_called_once_with('port', 'after_create',
plugin, **kwargs)
def test_update_port_rpc_outside_transaction(self):
port_id = 'fake_id'
net_id = 'mynet'
original_port_db = models_v2.Port(
id=port_id,
tenant_id='tenant',
network_id=net_id,
mac_address='08:00:01:02:03:04',
admin_state_up=True,
status='ACTIVE',
device_id='vm_id',
device_owner='compute:None')
binding = mock.Mock()
binding.port_id = port_id
binding.host = 'vm_host'
binding.vnic_type = portbindings.VNIC_NORMAL
binding.profile = ''
binding.vif_type = ''
binding.vif_details = ''
with mock.patch.object(ml2_plugin.Ml2Plugin, '__init__') as init,\
mock.patch.object(ml2_db, 'get_locked_port_and_binding',
return_value=(original_port_db, binding)),\
mock.patch.object(base_plugin.NeutronDbPluginV2,
'update_port'):
init.return_value = None
new_host_port = mock.Mock()
plugin = self._create_plugin_for_create_update_port(new_host_port)
original_port = plugin._make_port_dict(original_port_db)
plugin.update_port(self.context, port_id, mock.MagicMock())
kwargs = {
'context': self.context,
'port': new_host_port,
'mac_address_updated': True,
'original_port': original_port,
}
self.notify.assert_called_once_with('port', 'after_update',
plugin, **kwargs)
def test_notify_outside_of_delete_transaction(self):
self.notify.side_effect = (
lambda r, e, t, **kwargs: self._ensure_transaction_is_closed())
l3plugin = mock.Mock()
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
with mock.patch.object(ml2_plugin.Ml2Plugin,
'__init__',
return_value=None),\
mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value={'L3_ROUTER_NAT': l3plugin}):
plugin = self._create_plugin_for_create_update_port(mock.Mock())
# Set backend manually here since __init__ was mocked
plugin.set_ipam_backend()
# deleting the port will call registry.notify, which will
# run the transaction balancing function defined in this test
plugin.delete_port(self.context, 'fake_id')
self.assertTrue(self.notify.call_count)
|
|
"""
make openfmri distro for myconnectome
"""
import glob,os,shutil
import pickle
import json
import mvpa2.misc.fsl
def makedir(d):
""" make a directory if it doesn't already exist"""
if not os.path.exists(d):
os.makedirs(d)
def get_infodict(type,dcmhdr):
infodict={}
for k in dcmhdr.keys():
h=dcmhdr[k]
# here we make the assumption that the
# good resting scan was the last one in the session
# I *think* that's a good assumption
if h.SeriesDescription.find(type)==0:
for hk in h.keys():
try:
# filter out non-ascii entries
h[hk].value.decode('ascii')
infodict[h[hk].name]=h[hk].value
except:
pass
return infodict
def get_infodict_by_num(n,dcmhdr):
infodict={}
k='%d'%n
h=dcmhdr[k]
# here we make the assumption that the
# good resting scan was the last one in the session
# I *think* that's a good assumption
for hk in h.keys():
try:
# filter out non-ascii entries
h[hk].value.decode('ascii')
infodict[h[hk].name]=h[hk].value
except:
pass
return infodict
def logged_copy(infile,outfile,logfile='/scratch/01329/poldrack/selftracking/ds031/file_log.txt'):
shutil.copy(infile,outfile)
f=open(logfile,'a')
f.write('%s\t%s\n'%(infile,outfile))
f.close()
overwrite=False
task_descriptors=['N-','dot','face','super','loc','Breath']
basedir='/corral-repl/utexas/poldracklab/data/selftracking'
outdir='/scratch/01329/poldrack/selftracking/ds031/sub00001'
anatbase='/corral-repl/utexas/poldracklab/data/selftracking/anatomy_dicoms'
subdirs=glob.glob(os.path.join(basedir,'sub[0-1]*'))
subdirs.sort()
for i in range(len(subdirs)):
subdir=subdirs[i]
subcode=os.path.basename(subdir)
sesscode=subcode.replace('sub','ses')
print subcode,sesscode
sessdir=os.path.join(outdir,sesscode)
makedir(sessdir)
headerpklfile=os.path.join(subdir,'logs/dicom_headers.pkl')
try:
dcmhdr=pickle.load(open(headerpklfile,'rb'))
except:
dcmhdr={}
orig_boldfile=glob.glob(os.path.join(subdir,'BOLD/Resting*/bold.nii.gz'))
if not len(orig_boldfile)==1:
print 'no good bold file for',subcode
else:
bolddir=os.path.join(sessdir,'functional')
makedir(bolddir)
boldfile=os.path.join(bolddir,'sub00001_%s_task001_run001_bold.nii.gz'%sesscode)
if not os.path.exists(boldfile) or overwrite:
print 'copying %s to %s'%(orig_boldfile[0],boldfile)
logged_copy(orig_boldfile[0],boldfile)
# dump dicom header to json
# need to figure out which session it was
infofile=boldfile.replace('.nii.gz','.json')
if not os.path.exists(infofile) or overwrite:
infodict=get_infodict('Resting',dcmhdr)
f=open(infofile,'w')
f.write(json.dumps(infodict,indent=4))
f.close()
# get anatomies
if os.path.exists(os.path.join(anatbase,'t1w/%s_t1.nii.gz'%subcode)):
t1file=os.path.join(anatbase,'t1w/%s_t1.nii.gz'%subcode)
else:
t1file=None
if os.path.exists(os.path.join(anatbase,'t2w/%s_t2.nii.gz'%subcode)):
t2file=os.path.join(anatbase,'t2w/%s_t2.nii.gz'%subcode)
else:
t2file=None
anatsessdir=os.path.join(sessdir,'anatomy')
t1outfile=os.path.join(anatsessdir,'sub00001_%s_T1w_001.nii.gz'%sesscode)
t2outfile=os.path.join(anatsessdir,'sub00001_%s_T2w_001.nii.gz'%sesscode)
if t1file and not os.path.exists(t1outfile):
makedir(anatsessdir)
logged_copy(t1file,t1outfile)
infofile=t1outfile.replace('.nii.gz','.json')
if t1file and (not os.path.exists(infofile) or overwrite):
infodict=get_infodict('T1w',dcmhdr)
f=open(infofile,'w')
f.write(json.dumps(infodict,indent=4))
f.close()
if t2file and not os.path.exists(t2outfile):
makedir(anatsessdir)
logged_copy(t2file,t2outfile)
infofile=t2outfile.replace('.nii.gz','.json')
if t2file and (not os.path.exists(infofile) or overwrite):
infodict=get_infodict('T2w',dcmhdr)
f=open(infofile,'w')
f.write(json.dumps(infodict,indent=4))
f.close()
# get diffusion
diffusion_files=glob.glob(os.path.join(subdir,'DTI/DTI_[1,2].nii.gz'))
diffusion_files.sort()
if len(diffusion_files)>0:
diffdir=os.path.join(sessdir,'diffusion')
makedir(diffdir)
# find entries in header pickle
diffusion_series=[-1,-1]
for i in dcmhdr.keys():
if dcmhdr[i].SeriesDescription=='MBEPI with MDDW R-L':
diffusion_series[0]=int(i)
elif dcmhdr[i].SeriesDescription=='MBEPI with MDDW L-R':
diffusion_series[1]=int(i)
print 'diffusion series',diffusion_series
assert len(diffusion_series)==2
for i in range(len(diffusion_files)):
fnum=int(os.path.basename(diffusion_files[i]).split('_')[1].split('.')[0])
outfile=os.path.join(diffdir,'sub00001_%s_dwi_%03d.nii.gz'%(sesscode,fnum))
if not os.path.exists(outfile) or overwrite:
logged_copy(diffusion_files[i],outfile)
outfile=os.path.join(diffdir,'sub00001_%s_dwi_%03d.bval'%(sesscode,fnum))
if not os.path.exists(outfile) or overwrite:
logged_copy(diffusion_files[i].replace('.nii.gz','.bval'),outfile)
outfile=os.path.join(diffdir,'sub00001_%s_dwi_%03d.bvec'%(sesscode,fnum))
if not os.path.exists(outfile) or overwrite:
logged_copy(diffusion_files[i].replace('.nii.gz','.bvec'),outfile)
infofile=os.path.join(diffdir,'sub00001_%s_dwi_%03d.json'%(sesscode,fnum))
if diffusion_series[i]>-1 and (not os.path.exists(infofile) or overwrite):
infodict=get_infodict_by_num(diffusion_series[i],dcmhdr)
f=open(infofile,'w')
f.write(json.dumps(infodict,indent=4))
f.close()
# get task data
for t in range(len(task_descriptors)):
td=task_descriptors[t]
taskfiles=glob.glob(os.path.join(subdir,'BOLD/%s*/bold.nii.gz'%td))
if len(taskfiles)==0:
continue
for tf in range(len(taskfiles)):
taskfile=taskfiles[tf]
seriesnum=int(os.path.dirname(taskfile).split('_')[-1])
infodict=get_infodict_by_num(seriesnum,dcmhdr)
print infodict['Series Description']
outfile=os.path.join(outdir,'%s/functional/sub00001_%s_task%03d_run%03d_bold.nii.gz'%(sesscode,sesscode,t+2,tf+1))
if not os.path.exists(outfile) or overwrite:
logged_copy(taskfile,outfile)
infofile=outfile.replace('nii.gz','json')
if not os.path.exists(infofile) or overwrite:
f=open(infofile,'w')
f.write(json.dumps(infodict,indent=4))
f.close()
# get onset info from model
modelfile=os.path.join(subdir,'model/model%03d/task%03d_run%03d_333.feat/design.fsf'%(t+1,t+1,tf+1))
if os.path.exists(modelfile):
print 'found modelfile',modelfile
design=mvpa2.misc.fsl.read_fsl_design(modelfile)
# get field maps
fmfile_orig=glob.glob(os.path.join(subdir,'fieldmap/fieldmap_mag.nii.gz'))
if len(fmfile_orig)>0:
fmdir=os.path.join(sessdir,'fieldmap')
makedir(fmdir)
outfile_mag=os.path.join(fmdir,'sub00001_%s_fieldmap_001_magnitude.nii.gz'%sesscode)
if not os.path.exists(outfile_mag):
logged_copy(fmfile_orig[0],outfile_mag)
outfile_phase=os.path.join(fmdir,'sub00001_%s_fieldmap_001_phase.nii.gz'%sesscode)
if not os.path.exists(outfile_phase):
logged_copy(fmfile_orig[0].replace('mag','phase'),outfile_phase)
infofile=os.path.join(fmdir,'sub00001_%s_fieldmap_001_scan.json'%sesscode)
if not os.path.exists(infofile):
fm_series=[]
for k in dcmhdr.iterkeys():
if dcmhdr[k].SeriesDescription=='gre_field_mapping':
fm_series.append(int(k))
fm_series.sort()
infodict=get_infodict_by_num(fm_series[0],dcmhdr)
f=open(infofile,'w')
f.write(json.dumps(infodict,indent=4))
f.close()
|
|
# Copyright (C) 2011, 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011, 2012 Isaku Yamahata <yamahata at valinux co jp>
# Copyright (C) 2012 Simon Horman <horms ad verge net au>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import itertools
from ryu import exception
from ryu.lib import mac
from ryu.lib.pack_utils import msg_pack_into
from . import ofproto_v1_0
from . import inet
import logging
LOG = logging.getLogger('ryu.ofproto.nx_match')
UINT64_MAX = (1 << 64) - 1
UINT32_MAX = (1 << 32) - 1
UINT16_MAX = (1 << 16) - 1
FWW_IN_PORT = 1 << 0
FWW_DL_TYPE = 1 << 4
FWW_NW_PROTO = 1 << 5
# No corresponding OFPFW_* bits
FWW_NW_DSCP = 1 << 1
FWW_NW_ECN = 1 << 2
FWW_ARP_SHA = 1 << 3
FWW_ARP_THA = 1 << 6
FWW_IPV6_LABEL = 1 << 7
FWW_NW_TTL = 1 << 8
FWW_ALL = (1 << 13) - 1
FLOW_NW_FRAG_ANY = 1 << 0
FLOW_NW_FRAG_LATER = 1 << 1
FLOW_NW_FRAG_MASK = FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER
IP_ECN_MASK = 0x03
IP_DSCP_MASK = 0xfc
MF_PACK_STRING_BE64 = '!Q'
MF_PACK_STRING_BE32 = '!I'
MF_PACK_STRING_BE16 = '!H'
MF_PACK_STRING_8 = '!B'
MF_PACK_STRING_MAC = '!6s'
MF_PACK_STRING_IPV6 = '!8H'
_MF_FIELDS = {}
FLOW_N_REGS = 8 # ovs 1.5
class Flow(object):
def __init__(self):
self.in_port = 0
self.dl_vlan = 0
self.dl_vlan_pcp = 0
self.dl_src = mac.DONTCARE
self.dl_dst = mac.DONTCARE
self.dl_type = 0
self.tp_dst = 0
self.tp_src = 0
self.nw_tos = 0
self.vlan_tci = 0
self.nw_ttl = 0
self.nw_proto = 0
self.arp_sha = 0
self.arp_tha = 0
self.nw_src = 0
self.nw_dst = 0
self.tun_id = 0
self.arp_spa = 0
self.arp_tpa = 0
self.ipv6_src = []
self.ipv6_dst = []
self.nd_target = []
self.nw_frag = 0
self.regs = [0] * FLOW_N_REGS
self.ipv6_label = 0
class FlowWildcards(object):
def __init__(self):
self.dl_src_mask = 0
self.dl_dst_mask = 0
self.tp_src_mask = 0
self.tp_dst_mask = 0
self.nw_src_mask = 0
self.nw_dst_mask = 0
self.tun_id_mask = 0
self.arp_spa_mask = 0
self.arp_tpa_mask = 0
self.vlan_tci_mask = 0
self.ipv6_src_mask = []
self.ipv6_dst_mask = []
self.nd_target_mask = []
self.nw_frag_mask = 0
self.regs_bits = 0
self.regs_mask = [0] * FLOW_N_REGS
self.wildcards = ofproto_v1_0.OFPFW_ALL
class ClsRule(object):
"""describe a matching rule for OF 1.0 OFPMatch (and NX).
"""
def __init__(self):
self.wc = FlowWildcards()
self.flow = Flow()
def set_in_port(self, port):
self.wc.wildcards &= ~FWW_IN_PORT
self.flow.in_port = port
def set_dl_vlan(self, dl_vlan):
self.wc.wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN
self.flow.dl_vlan = dl_vlan
def set_dl_vlan_pcp(self, dl_vlan_pcp):
self.wc.wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN_PCP
self.flow.dl_vlan_pcp = dl_vlan_pcp
def set_dl_dst(self, dl_dst):
self.flow.dl_dst = dl_dst
def set_dl_dst_masked(self, dl_dst, mask):
self.wc.dl_dst_mask = mask
# bit-wise and of the corresponding elements of dl_dst and mask
self.flow.dl_dst = mac.haddr_bitand(dl_dst, mask)
def set_dl_src(self, dl_src):
self.flow.dl_src = dl_src
def set_dl_src_masked(self, dl_src, mask):
self.wc.dl_src_mask = mask
self.flow.dl_src = mac.haddr_bitand(dl_src, mask)
def set_dl_type(self, dl_type):
self.wc.wildcards &= ~FWW_DL_TYPE
self.flow.dl_type = dl_type
def set_dl_tci(self, tci):
self.set_dl_tci_masked(tci, UINT16_MAX)
def set_dl_tci_masked(self, tci, mask):
self.wc.vlan_tci_mask = mask
self.flow.vlan_tci = tci
def set_tp_src(self, tp_src):
self.set_tp_src_masked(tp_src, UINT16_MAX)
def set_tp_src_masked(self, tp_src, mask):
self.wc.tp_src_mask = mask
self.flow.tp_src = tp_src & mask
def set_tp_dst(self, tp_dst):
self.set_tp_dst_masked(tp_dst, UINT16_MAX)
def set_tp_dst_masked(self, tp_dst, mask):
self.wc.tp_dst_mask = mask
self.flow.tp_dst = tp_dst & mask
def set_nw_proto(self, nw_proto):
self.wc.wildcards &= ~FWW_NW_PROTO
self.flow.nw_proto = nw_proto
def set_nw_src(self, nw_src):
self.set_nw_src_masked(nw_src, UINT32_MAX)
def set_nw_src_masked(self, nw_src, mask):
self.flow.nw_src = nw_src
self.wc.nw_src_mask = mask
def set_nw_dst(self, nw_dst):
self.set_nw_dst_masked(nw_dst, UINT32_MAX)
def set_nw_dst_masked(self, nw_dst, mask):
self.flow.nw_dst = nw_dst
self.wc.nw_dst_mask = mask
def set_nw_dscp(self, nw_dscp):
self.wc.wildcards &= ~FWW_NW_DSCP
self.flow.nw_tos &= ~IP_DSCP_MASK
self.flow.nw_tos |= nw_dscp & IP_DSCP_MASK
def set_icmp_type(self, icmp_type):
self.set_tp_src(icmp_type)
def set_icmp_code(self, icmp_code):
self.set_tp_dst(icmp_code)
def set_tun_id(self, tun_id):
self.set_tun_id_masked(tun_id, UINT64_MAX)
def set_tun_id_masked(self, tun_id, mask):
self.wc.tun_id_mask = mask
self.flow.tun_id = tun_id & mask
def set_nw_ecn(self, nw_ecn):
self.wc.wildcards &= ~FWW_NW_ECN
self.flow.nw_tos &= ~IP_ECN_MASK
self.flow.nw_tos |= nw_ecn & IP_ECN_MASK
def set_nw_ttl(self, nw_ttl):
self.wc.wildcards &= ~FWW_NW_TTL
self.flow.nw_ttl = nw_ttl
def set_nw_frag(self, nw_frag):
self.wc.nw_frag_mask |= FLOW_NW_FRAG_MASK
self.flow.nw_frag = nw_frag
def set_nw_frag_masked(self, nw_frag, mask):
self.wc.nw_frag_mask = mask
self.flow.nw_frag = nw_frag & mask
def set_arp_spa(self, spa):
self.set_arp_spa_masked(spa, UINT32_MAX)
def set_arp_spa_masked(self, spa, mask):
self.flow.arp_spa = spa
self.wc.arp_spa_mask = mask
def set_arp_tpa(self, tpa):
self.set_arp_tpa_masked(tpa, UINT32_MAX)
def set_arp_tpa_masked(self, tpa, mask):
self.flow.arp_tpa = tpa
self.wc.arp_tpa_mask = mask
def set_arp_sha(self, sha):
self.wc.wildcards &= ~FWW_ARP_SHA
self.flow.arp_sha = sha
def set_arp_tha(self, tha):
self.wc.wildcards &= ~FWW_ARP_THA
self.flow.arp_tha = tha
def set_icmpv6_type(self, icmp_type):
self.set_tp_src(icmp_type)
def set_icmpv6_code(self, icmp_code):
self.set_tp_dst(icmp_code)
def set_ipv6_label(self, label):
self.wc.wildcards &= ~FWW_IPV6_LABEL
self.flow.ipv6_label = label
def set_ipv6_label(self, label):
self.wc.wildcards &= ~FWW_IPV6_LABEL
self.flow.ipv6_label = label
def set_ipv6_src_masked(self, src, mask):
self.wc.ipv6_src_mask = mask
self.flow.ipv6_src = [x & y for (x, y) in itertools.izip(src, mask)]
def set_ipv6_src(self, src):
self.flow.ipv6_src = src
def set_ipv6_dst_masked(self, dst, mask):
self.wc.ipv6_dst_mask = mask
self.flow.ipv6_dst = [x & y for (x, y) in itertools.izip(dst, mask)]
def set_ipv6_dst(self, dst):
self.flow.ipv6_dst = dst
def set_nd_target_masked(self, target, mask):
self.wc.nd_target_mask = mask
self.flow.nd_target = [x & y for (x, y) in
itertools.izip(target, mask)]
def set_nd_target(self, target):
self.flow.nd_target = target
def set_reg(self, reg_idx, value):
self.set_reg_masked(reg_idx, value, 0)
def set_reg_masked(self, reg_idx, value, mask):
self.wc.regs_mask[reg_idx] = mask
self.flow.regs[reg_idx] = value
self.wc.regs_bits |= (1 << reg_idx)
def flow_format(self):
# Tunnel ID is only supported by NXM
if self.wc.tun_id_mask != 0:
return ofproto_v1_0.NXFF_NXM
# Masking DL_DST is only supported by NXM
if self.wc.dl_dst_mask:
return ofproto_v1_0.NXFF_NXM
# Masking DL_SRC is only supported by NXM
if self.wc.dl_src_mask:
return ofproto_v1_0.NXFF_NXM
# ECN is only supported by NXM
if not self.wc.wildcards & FWW_NW_ECN:
return ofproto_v1_0.NXFF_NXM
if self.wc.regs_bits > 0:
return ofproto_v1_0.NXFF_NXM
return ofproto_v1_0.NXFF_OPENFLOW10
def match_tuple(self):
"""return a tuple which can be used as *args for
ofproto_v1_0_parser.OFPMatch.__init__().
see Datapath.send_flow_mod.
"""
assert self.flow_format() == ofproto_v1_0.NXFF_OPENFLOW10
wildcards = ofproto_v1_0.OFPFW_ALL
if not self.wc.wildcards & FWW_IN_PORT:
wildcards &= ~ofproto_v1_0.OFPFW_IN_PORT
if self.flow.dl_src != mac.DONTCARE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_SRC
if self.flow.dl_dst != mac.DONTCARE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_DST
if not self.wc.wildcards & FWW_DL_TYPE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_TYPE
if self.flow.dl_vlan != 0:
wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN
if self.flow.dl_vlan_pcp != 0:
wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN_PCP
if self.flow.nw_tos != 0:
wildcards &= ~ofproto_v1_0.OFPFW_NW_TOS
if self.flow.nw_proto != 0:
wildcards &= ~ofproto_v1_0.OFPFW_NW_PROTO
if self.wc.nw_src_mask != 0 and "01" not in bin(self.wc.nw_src_mask):
wildcards &= ~ofproto_v1_0.OFPFW_NW_SRC_MASK
maskbits = (bin(self.wc.nw_src_mask).count("0") - 1)
wildcards |= (maskbits << ofproto_v1_0.OFPFW_NW_SRC_SHIFT)
if self.wc.nw_dst_mask != 0 and "01" not in bin(self.wc.nw_dst_mask):
wildcards &= ~ofproto_v1_0.OFPFW_NW_DST_MASK
maskbits = (bin(self.wc.nw_dst_mask).count("0") - 1)
wildcards |= (maskbits << ofproto_v1_0.OFPFW_NW_DST_SHIFT)
if self.flow.tp_src != 0:
wildcards &= ~ofproto_v1_0.OFPFW_TP_SRC
if self.flow.tp_dst != 0:
wildcards &= ~ofproto_v1_0.OFPFW_TP_DST
return (wildcards, self.flow.in_port, self.flow.dl_src,
self.flow.dl_dst, self.flow.dl_vlan, self.flow.dl_vlan_pcp,
self.flow.dl_type, self.flow.nw_tos & IP_DSCP_MASK,
self.flow.nw_proto, self.flow.nw_src, self.flow.nw_dst,
self.flow.tp_src, self.flow.tp_dst)
def _set_nxm_headers(nxm_headers):
'''Annotate corresponding NXM header'''
def _set_nxm_headers_dec(self):
self.nxm_headers = nxm_headers
return self
return _set_nxm_headers_dec
def _register_make(cls):
'''class decorator to Register mf make'''
assert cls.nxm_headers is not None
assert cls.nxm_headers is not []
for nxm_header in cls.nxm_headers:
assert nxm_header not in _MF_FIELDS
_MF_FIELDS[nxm_header] = cls.make
return cls
def mf_from_nxm_header(nxm_header):
if nxm_header not in _MF_FIELDS:
return None
make = _MF_FIELDS.get(nxm_header)
assert make is not None
return make(nxm_header)
class MFField(object):
_FIELDS_HEADERS = {}
@staticmethod
def register_field_header(headers):
def _register_field_header(cls):
for header in headers:
MFField._FIELDS_HEADERS[header] = cls
return cls
return _register_field_header
def __init__(self, nxm_header, pack_str):
self.nxm_header = nxm_header
self.pack_str = pack_str
self.n_bytes = struct.calcsize(pack_str)
self.n_bits = self.n_bytes * 8
@classmethod
def parser(cls, buf, offset):
(header,) = struct.unpack_from('!I', buf, offset)
cls_ = MFField._FIELDS_HEADERS.get(header)
if cls_:
field = cls_.field_parser(header, buf, offset)
else:
# print 'unknown field type'
raise
field.length = (header & 0xff) + 4
return field
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
mask = None
if hasmask:
pack_str = '!' + cls.pack_str[1:] * 2
(value, mask) = struct.unpack_from(pack_str, buf,
offset + 4)
else:
(value,) = struct.unpack_from(cls.pack_str, buf,
offset + 4)
return cls(header, value, mask)
def _put(self, buf, offset, value):
msg_pack_into(self.pack_str, buf, offset, value)
return self.n_bytes
def putw(self, buf, offset, value, mask):
len_ = self._put(buf, offset, value)
return len_ + self._put(buf, offset + len_, mask)
def _is_all_ones(self, value):
return value == (1 << self.n_bits) - 1
def putm(self, buf, offset, value, mask):
if mask == 0:
return 0
elif self._is_all_ones(mask):
return self._put(buf, offset, value)
else:
return self.putw(buf, offset, value, mask)
def _putv6(self, buf, offset, value):
msg_pack_into(self.pack_str, buf, offset, *value)
return self.n_bytes
def putv6(self, buf, offset, value, mask):
len_ = self._putv6(buf, offset, value)
if len(mask):
return len_ + self._putv6(buf, offset + len_, mask)
return len_
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IN_PORT])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IN_PORT])
class MFInPort(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFInPort, self).__init__(header, MFInPort.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFInPort.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.in_port)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_DST, ofproto_v1_0.NXM_OF_ETH_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_DST,
ofproto_v1_0.NXM_OF_ETH_DST_W])
class MFEthDst(MFField):
pack_str = MF_PACK_STRING_MAC
def __init__(self, header, value, mask=None):
super(MFEthDst, self).__init__(header, MFEthDst.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthDst.pack_str)
def put(self, buf, offset, rule):
if rule.wc.dl_dst_mask:
return self.putw(buf, offset, rule.flow.dl_dst,
rule.wc.dl_dst_mask)
else:
return self._put(buf, offset, rule.flow.dl_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_SRC, ofproto_v1_0.NXM_OF_ETH_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_SRC,
ofproto_v1_0.NXM_OF_ETH_SRC_W])
class MFEthSrc(MFField):
pack_str = MF_PACK_STRING_MAC
def __init__(self, header, value, mask=None):
super(MFEthSrc, self).__init__(header, MFEthSrc.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthSrc.pack_str)
def put(self, buf, offset, rule):
if rule.wc.dl_src_mask:
return self.putw(buf, offset, rule.flow.dl_src,
rule.wc.dl_src_mask)
else:
return self._put(buf, offset, rule.flow.dl_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_TYPE])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_TYPE])
class MFEthType(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFEthType, self).__init__(header, MFEthType.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthType.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.dl_type)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_VLAN_TCI,
ofproto_v1_0.NXM_OF_VLAN_TCI_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_VLAN_TCI,
ofproto_v1_0.NXM_OF_VLAN_TCI_W])
class MFVlan(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFVlan, self).__init__(header, MFVlan.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFVlan.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.vlan_tci,
rule.wc.vlan_tci_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_TOS])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_TOS])
class MFIPDSCP(MFField):
pack_str = MF_PACK_STRING_8
def __init__(self, header, value, mask=None):
super(MFIPDSCP, self).__init__(header, MFIPDSCP.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFIPDSCP.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset,
rule.flow.nw_tos & IP_DSCP_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_TUN_ID,
ofproto_v1_0.NXM_NX_TUN_ID_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_TUN_ID,
ofproto_v1_0.NXM_NX_TUN_ID_W])
class MFTunId(MFField):
pack_str = MF_PACK_STRING_BE64
def __init__(self, header, value, mask=None):
super(MFTunId, self).__init__(header, MFTunId.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFTunId.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tun_id, rule.wc.tun_id_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_SRC, ofproto_v1_0.NXM_OF_IP_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_SRC,
ofproto_v1_0.NXM_OF_IP_SRC_W])
class MFIPSrc(MFField):
pack_str = MF_PACK_STRING_BE32
def __init__(self, header, value, mask=None):
super(MFIPSrc, self).__init__(header, MFIPSrc.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, MFIPSrc.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.nw_src, rule.wc.nw_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_DST, ofproto_v1_0.NXM_OF_IP_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_DST,
ofproto_v1_0.NXM_OF_IP_DST_W])
class MFIPDst(MFField):
pack_str = MF_PACK_STRING_BE32
def __init__(self, header, value, mask=None):
super(MFIPDst, self).__init__(header, MFIPDst.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, MFIPDst.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.nw_dst, rule.wc.nw_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_ECN])
class MFIPECN(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset,
rule.flow.nw_tos & IP_ECN_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_TTL])
class MFIPTTL(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.nw_ttl)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_PROTO])
class MFIPProto(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.nw_proto)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_TCP_SRC, ofproto_v1_0.NXM_OF_TCP_SRC_W,
ofproto_v1_0.NXM_OF_UDP_SRC, ofproto_v1_0.NXM_OF_UDP_SRC_W])
class MFTPSRC(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tp_src, rule.wc.tp_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_TCP_DST, ofproto_v1_0.NXM_OF_TCP_DST_W,
ofproto_v1_0.NXM_OF_UDP_DST, ofproto_v1_0.NXM_OF_UDP_DST_W])
class MFTPDST(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tp_dst, rule.wc.tp_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ARP_SPA, ofproto_v1_0.NXM_OF_ARP_SPA_W])
class MFArpSpa(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.arp_spa, rule.wc.arp_spa_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ARP_TPA, ofproto_v1_0.NXM_OF_ARP_TPA_W])
class MFArpTpa(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.arp_tpa, rule.wc.arp_tpa_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ARP_SHA])
class MFArpSha(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_MAC)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.arp_sha)
class MFIPV6(object):
pack_str = MF_PACK_STRING_IPV6
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
if hasmask:
pack_string = '!' + cls.pack_str[1:] * 2
value = struct.unpack_from(pack_string, buf, offset + 4)
return cls(header, list(value[:8]), list(value[8:]))
else:
value = struct.unpack_from(cls.pack_str, buf, offset + 4)
return cls(header, list(value))
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_SRC,
ofproto_v1_0.NXM_NX_IPV6_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_IPV6_SRC,
ofproto_v1_0.NXM_NX_IPV6_SRC_W])
class MFIPV6Src(MFIPV6, MFField):
def __init__(self, header, value, mask=None):
super(MFIPV6Src, self).__init__(header, MFIPV6Src.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, cls.pack_str)
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.ipv6_src,
rule.wc.ipv6_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_DST,
ofproto_v1_0.NXM_NX_IPV6_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_IPV6_DST,
ofproto_v1_0.NXM_NX_IPV6_DST_W])
class MFIPV6Dst(MFIPV6, MFField):
def __init__(self, header, value, mask=None):
super(MFIPV6Dst, self).__init__(header, MFIPV6Dst.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, cls.pack_str)
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.ipv6_dst,
rule.wc.ipv6_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ND_TARGET,
ofproto_v1_0.NXM_NX_ND_TARGET_W])
class MFNdTarget(MFField):
@classmethod
def make(cls, header):
return cls(header, '!4I')
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.nd_target,
rule.wc.nd_target_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_FRAG,
ofproto_v1_0.NXM_NX_IP_FRAG_W])
class MFIpFrag(MFField):
@classmethod
def make(cls, header):
return cls(header, '!B')
def put(self, buf, offset, rule):
if rule.wc.nw_frag_mask == FLOW_NW_FRAG_MASK:
return self._put(buf, offset, rule.flow.nw_frag)
else:
return self.putw(buf, offset, rule.flow.nw_frag,
rule.wc.nw_frag_mask & FLOW_NW_FRAG_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ARP_THA])
class MFArpTha(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_MAC)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.arp_tha)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ICMP_TYPE])
class MFICMPType(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ICMP_CODE])
class MFICMPCode(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ICMPV6_TYPE])
class MFICMPV6Type(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ICMPV6_CODE])
class MFICMPV6Code(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_LABEL])
class MFICMPV6Label(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.ipv6_label)
@_register_make
@_set_nxm_headers([ofproto_v1_0.nxm_nx_reg(i) for i in range(FLOW_N_REGS)]
+ [ofproto_v1_0.nxm_nx_reg_w(i) for i in range(FLOW_N_REGS)])
class MFRegister(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
for i in range(FLOW_N_REGS):
if (ofproto_v1_0.nxm_nx_reg(i) == self.nxm_header or
ofproto_v1_0.nxm_nx_reg_w(i) == self.nxm_header):
if rule.wc.regs_mask[i]:
return self.putm(buf, offset, rule.flow.regs[i],
rule.wc.regs_mask[i])
else:
return self._put(buf, offset, rule.flow.regs[i])
def serialize_nxm_match(rule, buf, offset):
old_offset = offset
if not rule.wc.wildcards & FWW_IN_PORT:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IN_PORT, rule)
# Ethernet.
if rule.flow.dl_dst != mac.DONTCARE:
if rule.wc.dl_dst_mask:
header = ofproto_v1_0.NXM_OF_ETH_DST_W
else:
header = ofproto_v1_0.NXM_OF_ETH_DST
offset += nxm_put(buf, offset, header, rule)
if rule.flow.dl_src != mac.DONTCARE:
if rule.wc.dl_src_mask:
header = ofproto_v1_0.NXM_OF_ETH_SRC_W
else:
header = ofproto_v1_0.NXM_OF_ETH_SRC
offset += nxm_put(buf, offset, header, rule)
if not rule.wc.wildcards & FWW_DL_TYPE:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ETH_TYPE, rule)
# 802.1Q
if rule.wc.vlan_tci_mask != 0:
if rule.wc.vlan_tci_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_VLAN_TCI
else:
header = ofproto_v1_0.NXM_OF_VLAN_TCI_W
offset += nxm_put(buf, offset, header, rule)
# L3
if not rule.wc.wildcards & FWW_NW_DSCP:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IP_TOS, rule)
if not rule.wc.wildcards & FWW_NW_ECN:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IP_ECN, rule)
if not rule.wc.wildcards & FWW_NW_TTL:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IP_TTL, rule)
if not rule.wc.wildcards & FWW_NW_PROTO:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IP_PROTO, rule)
if not rule.wc.wildcards & FWW_NW_PROTO and (rule.flow.nw_proto
== inet.IPPROTO_ICMP):
if rule.wc.tp_src_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ICMP_TYPE, rule)
if rule.wc.tp_dst_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ICMP_CODE, rule)
if rule.flow.tp_src != 0:
if rule.flow.nw_proto == 6:
if rule.wc.tp_src_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_TCP_SRC
else:
header = ofproto_v1_0.NXM_OF_TCP_SRC_W
elif rule.flow.nw_proto == 17:
if rule.wc.tp_src_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_UDP_SRC
else:
header = ofproto_v1_0.NXM_OF_UDP_SRC_W
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
if rule.flow.tp_dst != 0:
if rule.flow.nw_proto == 6:
if rule.wc.tp_dst_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_TCP_DST
else:
header = ofproto_v1_0.NXM_OF_TCP_DST_W
elif rule.flow.nw_proto == 17:
if rule.wc.tp_dst_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_UDP_DST
else:
header = ofproto_v1_0.NXM_OF_UDP_DST_W
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
# IP Source and Destination
if rule.flow.nw_src != 0:
if rule.wc.nw_src_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_IP_SRC
else:
header = ofproto_v1_0.NXM_OF_IP_SRC_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.nw_dst != 0:
if rule.wc.nw_dst_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_IP_DST
else:
header = ofproto_v1_0.NXM_OF_IP_DST_W
offset += nxm_put(buf, offset, header, rule)
# IPv6
if not rule.wc.wildcards & FWW_NW_PROTO and (rule.flow.nw_proto
== inet.IPPROTO_ICMPV6):
if rule.wc.tp_src_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ICMPV6_TYPE,
rule)
if rule.wc.tp_dst_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ICMPV6_CODE,
rule)
if not rule.wc.wildcards & FWW_IPV6_LABEL:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IPV6_LABEL, rule)
if len(rule.flow.ipv6_src):
if len(rule.wc.ipv6_src_mask):
header = ofproto_v1_0.NXM_NX_IPV6_SRC_W
else:
header = ofproto_v1_0.NXM_NX_IPV6_SRC
offset += nxm_put(buf, offset, header, rule)
if len(rule.flow.ipv6_dst):
if len(rule.wc.ipv6_dst_mask):
header = ofproto_v1_0.NXM_NX_IPV6_DST_W
else:
header = ofproto_v1_0.NXM_NX_IPV6_DST
offset += nxm_put(buf, offset, header, rule)
if len(rule.flow.nd_target):
if len(rule.wc.nd_target_mask):
header = ofproto_v1_0.NXM_NX_ND_TARGET_W
else:
header = ofproto_v1_0.NXM_NX_ND_TARGET
offset += nxm_put(buf, offset, header, rule)
# ARP
if rule.flow.arp_spa != 0:
if rule.wc.arp_spa_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_ARP_SPA
else:
header = ofproto_v1_0.NXM_OF_ARP_SPA_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.arp_tpa != 0:
if rule.wc.arp_tpa_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_ARP_TPA
else:
header = ofproto_v1_0.NXM_OF_ARP_TPA_W
offset += nxm_put(buf, offset, header, rule)
if not rule.wc.wildcards & FWW_ARP_SHA:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ARP_SHA, rule)
if not rule.wc.wildcards & FWW_ARP_THA:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ARP_THA, rule)
if rule.flow.nw_frag:
if rule.wc.nw_frag_mask == FLOW_NW_FRAG_MASK:
header = ofproto_v1_0.NXM_NX_IP_FRAG
else:
header = ofproto_v1_0.NXM_NX_IP_FRAG_W
offset += nxm_put(buf, offset, header, rule)
# Tunnel Id
if rule.wc.tun_id_mask != 0:
if rule.wc.tun_id_mask == UINT64_MAX:
header = ofproto_v1_0.NXM_NX_TUN_ID
else:
header = ofproto_v1_0.NXM_NX_TUN_ID_W
offset += nxm_put(buf, offset, header, rule)
# XXX: Cookie
for i in range(FLOW_N_REGS):
if rule.wc.regs_bits & (1 << i):
if rule.wc.regs_mask[i]:
header = ofproto_v1_0.nxm_nx_reg_w(i)
else:
header = ofproto_v1_0.nxm_nx_reg(i)
offset += nxm_put(buf, offset, header, rule)
# Pad
pad_len = round_up(offset) - offset
msg_pack_into("%dx" % pad_len, buf, offset)
# The returned length, the match_len, does not include the pad
return offset - old_offset
def nxm_put(buf, offset, header, rule):
nxm = NXMatch(header)
len_ = nxm.put_header(buf, offset)
mf = mf_from_nxm_header(nxm.header)
return len_ + mf.put(buf, offset + len_, rule)
def round_up(length):
return (length + 7) / 8 * 8 # Round up to a multiple of 8
class NXMatch(object):
def __init__(self, header):
self.header = header
@classmethod
def parser(cls, buf, offset, match_len):
if match_len < 4:
raise exception.OFPMalformedMessage
(header,) = struct.unpack_from(ofproto_v1_0.NXM_HEADER_PACK_STRING,
buf, offset)
instance = cls(header)
payload_len = instance.length()
if payload_len == 0 or match_len < payload_len + 4:
raise exception.OFPMalformedMessage
return instance
def vendor(self):
return self.header >> 16
def field(self):
return (self.header >> 9) % 0x7f
def type(self):
return (self.header >> 9) % 0x7fffff
def hasmask(self):
return (self.header >> 8) & 1
def length(self):
return self.header & 0xff
def show(self):
return ('%08x (vendor=%x, field=%x, hasmask=%x len=%x)' %
(self.header, self.vendor(), self.field(),
self.hasmask(), self.length()))
def put_header(self, buf, offset):
msg_pack_into(ofproto_v1_0.NXM_HEADER_PACK_STRING,
buf, offset, self.header)
return struct.calcsize(ofproto_v1_0.NXM_HEADER_PACK_STRING)
|
|
# -*- coding: utf-8 -*-
# continued from pattern.py
# defining the basic object we will be working with
# Note: makeVideo - it doesn't work yet - i haven't yet solved the issues about opencv
# so i am outputing the slides only for the moment
# 2013-09-23
##############################################################################################
#
#==== imports ================================================================================
# some of the stuff were moved to defaultParameters.py
import copy
import time
import os
import re
import numpy
import numpy as np
import numpy.ma as ma
#import matplotlib
import matplotlib.pyplot as plt
#import scipy.misc.pilutil as smp
#import numpy.fft as fft
#import shutil
#import sys
import pickle
from copy import deepcopy
try:
from scipy import signal
from scipy import interpolate
except ImportError:
#print "Scipy not installed"
pass
#==== setting up the global parameters========================================================
import defaultParameters as dp
from defaultParameters import * #bad habits but all these variables are prefixed with "default"
# or at least i try to make them to
import colourbarQPESUMS # the colourbars for the Central Weather Bureau
import colourbarQPESUMSwhiteBackground # the same as above, with white backgrounds
#==== importing pattern.py====================================================================
from . import pattern
try:
from dataStreamTools import makeVideo as mv
except ImportError:
print "import error! opencv not installed(?!)"
from dataStreamTools import kongrey as kr
dbz = pattern.DBZ
ds = pattern.DBZstream
#==== defining the classes ===================================================================
#class DataStreamSets:
class DataStreamSet: # correcting a long-standing typo 2014-03-09
"""
class dataStreamSet: DSS = dataStreamSet(ds0, ds1, ds2,...dsN)
where ds0 = observations, ds1, ds2,.. = models
with the bare basic methods of analysis and output to panel of 20+ images
"""
############################################################################
# initialisation and basic function calls
def __init__(self, ds0, *args):
self.name = ds0.name + '_' + '_'.join([v.name for v in args])
self.obs = ds0
self.wrfs = list(args)
############################################################################
# simple building block functions
def getAllDataTimes(self):
"""
get the union of the sets of dataTimes for all streams
"""
dataTimes = set([v.dataTime for v in self.obs])
for wrf in self.wrfs:
dataTimes = dataTimes.union([v.dataTime for v in wrf])
dataTimes = sorted(list(dataTimes))
return dataTimes
def getCommonDataTimes(self):
"""
get the intersection of the sets of dataTimes for all streams
"""
dataTimes = set([v.dataTime for v in self.obs])
for wrf in self.wrfs:
dataTimes = dataTimes.intersection([v.dataTime for v in wrf])
dataTimes = sorted(list(dataTimes))
return dataTimes
def backupMatrices(self):
self.obs.backupMatrices()
for wrf in self.wrfs:
wrf.backupMatrices()
def restoreMatrices(self):
self.obs.restoreMatrices()
for wrf in self.wrfs:
wrf.restoreMatrices()
############################################################################
# I/O's
def load(self, stream_key="all", verbose=False, **kwargs):
if stream_key == "all" or stream_key =="obs":
print "loading obs"
obs.load(**kwargs)
if stream_key == "all" or stream_key =="wrf" or stream_key=="wrfs":
print "loading wrfs"
for wrf in wrfs:
wrf.load(**kwargs)
def unload(self, stream_key="all", verbose=False, **kwargs):
if stream_key == "all" or stream_key =="obs":
print "unloading obs"
obs.unload(**kwargs)
if stream_key == "all" or stream_key =="wrf" or stream_key=="wrfs":
print "unloading wrfs"
for wrf in wrfs:
wrf.unload(**kwargs)
def makeVideo2(self, ordering, outputFolder=''):
"""
make video, with an ordering at each dataTime
ordering = [[1,2,3,5], [3,4,6,1], ...] - first for the first dataTime, second for the second dataTime, etc
"""
return mv.makeVideo( [self.obs] + self.wrfs, # [ds0, ds1, ds2, ds3, ds4, ...], a list of armor.pattern.DBZstream objects
panel_cols = 5, # number of colums in the panel
panel_rows = 5, # no need to be filled
fourcc = cv.CV_FOURCC('F', 'L', 'V', '1'),
fps = defaultFps,
extension= '.avi',
#fourcc = cv.CV_FOURCC('P', 'I', 'M', '1'),
outputFileName ="",
outputFolder=outputFolder,
saveFrames = True, # saving the frames as images
useCV2 = True,
ordering = ordering, # ordering of the models
)
def makeVideo1(self, ordering, outputFolder=''):
"""
make video, with a single ordering for each dataStream in its entirety
ordering = list, e.g. [2,3,4,5,1] <-- WRF2 goes first, then WRF3, WRF4, etc
"""
ordering = [ordering] * len(self.getAllDataTimes())
return self.makeVideo2(ordering, outputPath)
############################################################################
# analyses
def analyse(self, algorithm):
"""
input: algorithm
output: ordering at each dataTime
ordering = [[1,2,3,5], [3,4,6,1], ...] means WRF1, WRF2,WRF3, WRF5 for dataTime1; WRFs3,4,6,1, for the second dataTime, etc
"""
pass
def matching(self, algorithm, obsTime="", maxHourDiff=7, **kwargs):
"""
input:
algorithm - the function defining the algorithm of matching
algorithm(parameters): (obs, wrf) -> score (real number)
format of algorithm function: def alg1(a=pattern.a, ...., **kwargs):
obsTime - time at which obs is compared with the wrfs, e.g. "20140612.0200'
maxHourDiff - the maximal time difference (in hours) between obs and wrfs, e.g. 7 (hours)
kwargs - parameters for the algorithm
output:
ranking with scores and optimal timeshifts
2014-03-07
"""
if obsTime == "": # if the point for matching is not given, pick the first one
obsTime = self.obs[0].dataTime
ranking = []
obs = self.obs
wrfs = self.wrfs
for wrf in wrfs:
x = algorithm(obs, wrf, obsTime=obsTime, maxHourDiff=maxHourDiff, **kwargs)
score = x['score']
timeShift = x['timeShift']
ranking.append( {'wrf': wrf.name, 'timeShift': timeShift, #timeShift: in hours
'score': score,
'dataFolder': wrf.dataFolder,
'obsTime': obsTime,
'maxHourDiff': maxHourDiff # tag them along just in case
} ) #dataFolder = for potential disambiguation
ranking.sort(key=lambda v:v['score'], reverse=True)
return ranking
def filtering(self, algorithm, stream_key="all", name_key="", verbose=False, **kwargs):
"""
input:
algorithm - the function defining the algorithm of filtering
algorithm(parameters): changes a.matrix, a.name, no output given
format of algorithm function: def alg1(a=pattern.a, **kwargs):
stream_key - keyword for choosing the DBZstreams to be filtered
if it's "obs" we filter just all of the self.obs
if it's "wrf" or "wrfs" we filter just all of the self.wrfs
name_key - keyword for choosing the DBZ patterns to be filtered
kwargs - parameters for the algorithm
output:
ranking with scores and optimal timeshifts
2014-03-07
"""
obs = self.obs
wrfs = self.wrfs
# first filter the obs
if stream_key == "all" or stream_key == "obs" or stream_key == "OBS":
for a in obs:
if name_key in a.name:
algorithm(a, **kwargs) # key line
if verbose:
print a.name
if stream_key == "all" or stream_key == "wrf" or stream_key == "wrfs" \
or stream_key == "WRF" or stream_key == "WRFS" :
for wrf in wrfs:
for a in wrf:
if name_key in a.name:
algorithm(a, **kwargs) # key line
if verbose:
print a.name
############################################
# constants
DataStreamSets = DataStreamSet #alias; # correcting a long-standing typo 2014-03-09
DSS = DataStreamSet # alias
"""
key example: kongrey
"""
from dataStreamTools import kongrey as kr
#compref = pattern.DBZstream(dataFolder= kr.obs_folder,
# #name="COMPREF.DBZ",
# name="",
# lowerLeftCornerLatitudeLongitude = kr.obs_lowerLeft ,
# upperRightCornerLatitudeLongitude = kr.obs_upperRight ,
# outputFolder= kr.summary_folder,
# imageFolder=kr.summary_folder,
# key1="", # keywords to pick out specific files
# key2="", # used only once in the __init__
# key3="",
# preload=False,
# imageExtension = '.png',
# dataExtension = '.txt',
# )
"""
print 'loading observations'
obs = kr.constructOBSstream(dumping=False)
print 'loading models',
wrfsFolder = kr.defaultWRFdumpsFolder # '/home/k/ARMOR/data/KONG-REY/summary/WRF[regridded]'
wrfs = []
for i in range(1,21):
print i,
wrf = pickle.load(open(wrfsFolder+'dbzstream' + ('0'+str(i))[-2:] + '.pydump'))
#wrf.setDataFolder(asdfasdf) # haven't defined this function in pattern.DBZstream yet
wrfs.append(wrf)
kongreyDSS = DSS(obs, *wrfs)
"""
print 'constructing kongreyDSS'
obs = ds(name="COMPREF.DBZ", dataFolder=defaultRootFolder + 'data/KONG-REY/OBS/')
wrfs = []
for i in range(1,21):
print i,
wrfName = name='WRF'+ ('0'+str(i))[-2:]
wrf = ds(name=wrfName, key1=wrfName,
dataFolder=defaultRootFolder + 'data/KONG-REY/summary/WRF[regridded]/')
wrfs.append(wrf)
kongreyDSS = DSS(obs, *wrfs)
def constructDSS(obsFolder, wrfsFolder):
obsName = obsFolder.split("/")[-1]
wrfsName = wrfsFolder.split("/")[-1]
print 'Constructing DSS from:', obsName, ",", wrfsName
print obsFolder
print wrfsFolder
obs = ds(name=obsName, dataFolder=obsFolder)
wrfs = []
for i in range(1,21):
print i,
wrfName = name='WRF'+ ('0'+str(i))[-2:]
wrf = ds(name=wrfName, key1=wrfName,
dataFolder=wrfsFolder)
wrfs.append(wrf)
dss = DSS(obs, *wrfs)
return dss
print "constructing march11 - march13 DSS objects"
march11 = constructDSS(dp.defaultRootFolder+"data/march2014/QPESUMS/",
dp.defaultRootFolder+"data/march2014/WRFEPS[regridded]/20140311/")
march11.name = "Rainband_11_March_2014"
march11.obs.list= [v for v in march11.obs.list if '20140311' in v.dataTime]
march12 = constructDSS(dp.defaultRootFolder+"data/march2014/QPESUMS/",
dp.defaultRootFolder+"data/march2014/WRFEPS[regridded]/20140312/")
march12.name = "Rainband_12_March_2014"
march12.obs.list= [v for v in march12.obs.list if '20140312' in v.dataTime]
march13 = constructDSS(dp.defaultRootFolder+"data/march2014/QPESUMS/",
dp.defaultRootFolder+"data/march2014/WRFEPS[regridded]/20140313/")
march13.name = "Rainband_13_March_2014"
march13.obs.list= [v for v in march13.obs.list if '20140313' in v.dataTime]
print "constructing may2014 DSS objects"
may19 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/",
dp.defaultRootFolder+"data/may14/WRFEPS19[regridded]/")
may19.name = "Rainband_19_May_2014"
may19.obs.list= [v for v in may19.obs.list if '20140519' in v.dataTime]
may20 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/",
dp.defaultRootFolder+"data/may14/WRFEPS20[regridded]/")
may20.name = "Rainband_20_May_2014"
may20.obs.list= [v for v in may20.obs.list if '20140520' in v.dataTime]
may21 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/",
dp.defaultRootFolder+"data/may14/WRFEPS21[regridded]/")
may21.name = "Rainband_21_May_2014"
may21.obs.list= [v for v in may21.obs.list if '20140521' in v.dataTime]
may22 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/",
dp.defaultRootFolder+"data/may14/WRFEPS22[regridded]/")
may22.name = "Rainband_22_May_2014"
may22.obs.list= [v for v in may22.obs.list if '20140522' in v.dataTime]
may23 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/",
dp.defaultRootFolder+"data/may14/WRFEPS23[regridded]/")
may23.name = "Rainband_23_May_2014"
may23.obs.list= [v for v in may23.obs.list if '20140523' in v.dataTime]
|
|
# https://github.com/alexalemi/segmentation/blob/master/code/splitters.py
"""
Collect the various splitting strategies in one place
"""
import numpy as np
from scipy.ndimage import generic_filter
from scipy.spatial.distance import cdist
from numpy.random import rand
import tools
####################
# C99
####################
def rankkern(x):
""" The kernel for the rank transformation, measures the fraction of the neighbors that
take on a value less than the middle value """
n = x.size
mid = n//2
better = ( (x >= 0) & (x<x[mid]) ).sum()
return better / ( (x>=0).sum() - 1.0)
def rankify(mat, size=11):
""" Apply the ranking transformation of a given size """
return generic_filter(mat, rankkern, size=(size,size), mode='constant', cval=-1)
def c99score(distsmat, hyp, minlength=1, maxlength=None):
""" Do the choi c99 scoring for a hypothesis splitting """
N = distsmat.shape[0]
beta = 0.0
alpha = 0.0
for (a,b) in tools.seg_iter(hyp):
beta += distsmat[a:b,a:b].sum()
alpha += (b-a)**2
if minlength:
if (b-a) < minlength: beta += -np.inf
if maxlength:
if (b-a) > maxlength: beta += -np.inf
return -beta/(alpha+0.)
def c99split(distsmat, k, rank=0, *args, **kwargs):
""" Do the Choi style c99 splitting, given a matrix of distances D,
and k splits to perform. The rank keyword denotes whether we want to
do the ranking transformation if positive and if so denotes the size of the
ranking filter """
# perform ranking if desired
if rank:
distsmat = rankify(distsmat, rank)
N = distsmat.shape[0]
score = np.inf
splits = [N]
n = 0
while n < k:
newans = min(
( c99score( distsmat, sorted(splits+[i]), *args, **kwargs ), splits+[i] )
for i in xrange(1,N-1) if i not in set(splits) )
n += 1
splits = newans[1]
score = newans[0]
return sorted(splits), score
####################
# DP
####################
# The dynamic programming splitter
def gensig_euclidean(X,minlength=1,maxlength=None):
""" Generate the sigma for the squared difference from the mean """
cs = X.cumsum(0)
css = (X**2).sum(1).cumsum(0)
def sigma(i,j):
length = j-i
if minlength:
if length < minlength: return np.inf
if maxlength:
if length > maxlength: return np.inf
if i == 0:
return css[j-1] - 1./j * ((cs[j-1])**2).sum()
else:
return ( css[j-1]-css[i-1] ) - 1./(j-i) * ((cs[j-1] - cs[i-1])**2).sum()
return sigma
def gensig_cosine(X, minlength=1, maxlength=None):
""" Generate the sigma for the cosine similarity """
def sigma(a,b):
length = (b-a)
if minlength:
if length < minlength: return np.inf
if maxlength:
if length > maxlength: return np.inf
rep = X[a:b].mean(0)
if length < 2:
return np.inf
return (cdist( X[a:b], [ rep ], 'cosine')**2).sum()
return sigma
def gensig_model_old(X, minlength=1, maxlength=None, lam=0.0):
N,D = X.shape
over_sqrtD = 1./np.sqrt(D)
def sigma(a,b):
length = (b-a)
if minlength:
if length < minlength: return np.inf
if maxlength:
if length > maxlength: return np.inf
rep = (2*(X[a:b].sum(0)>0)-1)*over_sqrtD
return -X[a:b].dot(rep).sum()
# return -X[a:b].dot(rep).sum() + lam*np.sqrt(length)/np.log(N)
return sigma
def gensig_model(X, minlength=1, maxlength=None, lam=0.0):
N,D = X.shape
over_sqrtD = 1./np.sqrt(D)
cs = np.cumsum(X,0)
def sigma(a,b):
length = (b-a)
if minlength:
if length < minlength: return np.inf
if maxlength:
if length > maxlength: return np.inf
tot = cs[b-1].copy()
if a > 0:
tot -= cs[a-1]
signs = np.sign(tot)
return -over_sqrtD*(signs*tot).sum()
return sigma
def tiebreak():
return 1e-10*rand()
def gensig_choi(distsmat, minlength=1, maxlength=None, rank=0):
""" The two dimensional sigma function for the c99 splitting """
if rank:
distsmat = rankify(distsmat, rank)
def sigma(a,b):
length = (b-a)
beta = distsmat[a:b,a:b].sum()
alpha = (b-a)**2
if minlength:
if (b-a) < minlength: beta += np.inf
if maxlength:
if (b-a) > maxlength: beta += np.inf
return (-beta, alpha)
return sigma
def dpsplit(n,k, sig):
""" Perform the dynamic programming optimal segmentation, using the sig function
to determine the cost of a segment sig(i,j) is the cost of the i,j segment. These
are then added together
"""
# Set up the tracking tables
K = k + 1
N = n
segtable = np.zeros((n,K)) + np.nan
segtable[:,0] = [ sig(0,j+1) for j in xrange(N) ]
segindtable = np.zeros((N,K), dtype='int') - 1
# fill up the table in a clever order
for k in xrange(1,K):
for j in xrange(k,N):
#fill the j,k element
ans = min( ( (segtable[l,k-1] + sig(l+1,j+1), l+1 )
for l in xrange(k-1,j) ) )
segtable[j,k] = ans[0]
segindtable[j,k] = ans[1]
# read out the path
current_pointer = segindtable[-1,K-1]
path = [current_pointer]
for k in xrange(K-2, 0, -1):
current_pointer = segindtable[current_pointer-1, k]
path.append(current_pointer)
return sorted(path + [N]), segtable[-1,K-1]
def dpsplit_general(n,k, sig, combine=lambda a,b: a+b, key=lambda a: a, d=1):
""" Perform the dynamic programming optimal segmentation, using the sig function
to determine the cost of a segment sig(i,j) is the cost of the i,j segment. These
are then added together using the combine function and reduced to a scalar cost with the
key function. d sets the dimensionality of the intermediary representation
"""
# Set up the tracking tables
K = k + 1
N = n
if d > 1:
segtable = np.zeros((n,K,d)) + np.nan
else:
segtable = np.zeros((n,K)) + np.nan
segtable[:,0] = [ sig(0,j+1) for j in xrange(N) ]
segindtable = np.zeros((N,K), dtype='int') - 1
# fill up the table in a clever order
for k in xrange(1,K):
for j in xrange(k,N):
#fill the j,k element
ans = min( ( ( combine(segtable[l,k-1],sig(l+1,j+1)), l+1 )
for l in xrange(k-1,j) ), key=lambda x: key(x[0]) )
segtable[j,k] = ans[0]
segindtable[j,k] = ans[1]
# read out the path
current_pointer = segindtable[-1,K-1]
path = [current_pointer]
for k in xrange(K-2, 0, -1):
current_pointer = segindtable[current_pointer-1, k]
path.append(current_pointer)
return sorted(path + [N]), key(segtable[-1,K-1])
####################
# Greedy
####################
def greedysplit(n, k, sigma):
""" Do a greedy split """
splits = [n]
s = sigma(0,n)
def score(splits, sigma):
splits = sorted(splits)
return sum( sigma(a,b) for (a,b) in tools.seg_iter(splits) )
while k > 0:
usedinds = set(splits)
new = min( ( score( splits + [i], sigma), splits + [i] )
for i in xrange(1,n) if i not in usedinds )
splits = new[1]
s = new[0]
k -= 1
return sorted(splits), s
def greedysplit_general(n, k, sigma, combine=lambda a,b: a+b, key=lambda a: a):
""" Do a greedy split """
splits = [n]
s = sigma(0,n)
def score(splits, sigma):
splits = sorted(splits)
return key( reduce( combine, (sigma(a,b) for (a,b) in tools.seg_iter(splits) ) ))
while k > 0:
usedinds = set(splits)
print
print 'Splits'
print splits
print
print 'Usedinds, not contains the after?'
print usedinds
print 'n %i'%n
print
print i in xrange(1,n)
print
print
new = min( ( score( splits + [i], sigma), splits + [i] )
for i in xrange(1,n) if i not in usedinds )
splits = new[1]
s = new[0]
k -= 1
return sorted(splits), s
def bestsplit(low, high, sigma, minlength=1, maxlength=None):
""" Find the best split inside of a region """
length = high-low
if length < 2*minlength:
return (np.inf, np.inf, low)
best = min( ((sigma(low,j), sigma(j, high), j) for j in xrange(low+1,high)), key=lambda x: x[0]+x[1] )
return best
def greedysplit_old(n, k, sigma):
""" Do a greedy split """
k = k + 1
splits = [0,n]
costs = [sigma(0,n)]
cost = costs[0]
# path = []
while k > 0:
bestcosts = []
bsp = []
bestcost = np.inf
for j in xrange(len(splits)-1):
left, right, sp = bestsplit(splits[j], splits[j+1], sigma)
newcost = left+right + sum(costs[:j]) + sum(costs[j+1:])
if newcost < bestcost:
bestcost = newcost
bsp = splits[:j+1] + [sp] + splits[j+1:]
bestcosts = costs[:j] + [left,right] + costs[j:]
costs = bestcosts
cost = bestcost
splits = bsp
# path.append( (splits, cost, k*(d+1)*np.log(d*top) ) )
k -= 1
return splits[1:], cost
def refine(splits, sigma, n=1):
""" Given some splits, refine them a step """
oldsplits = splits[:]
counter = 0
n = n or np.inf
while counter < n:
splits = [0]+splits
n = len(splits) - 2
new = [splits[0]]
for i in xrange(n):
out = bestsplit(splits[i], splits[i+2], sigma)
new.append(out[2])
new.append(splits[-1])
splits = new[1:]
if splits == oldsplits:
break
oldsplits = splits[:]
counter += 1
return splits
def bestsplit_general(splits, pk, sigma, combine=lambda a,b: a+b, key=lambda a: a):
""" Move the pk-th split to its best location """
def score(splits, sigma):
splits = sorted(splits)
return key( reduce( combine, (sigma(a,b) for (a,b) in tools.seg_iter(splits) ) ))
if pk == 0:
left = 0
else:
left = splits[pk-1]
right = splits[pk+1]
best = min( (score( splits[:pk] + [j] + splits[pk+1:], sigma),j) for j in xrange(left+1,right) )
return best[1]
def refine_general(splits, sigma, n=1, combine=lambda a,b: a+b, key=lambda a: a):
""" Do a general refinement of up to n steps """
oldsplits = splits[:]
N = splits[-1]
counter = 0
k = len(splits)
n = n or np.inf
while counter < n:
splits = [ bestsplit_general(splits, i, sigma, combine, key) for i in xrange(k-1) ] + [N]
if splits == oldsplits:
break
oldsplits = splits[:]
counter += 1
return splits
|
|
__author__ = "Andre Merzky"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
# From http://code.activestate.com/recipes/576693/ :
#
# Drop-in substitute for Py2.7's new collections.OrderedDict. The recipe has
# big-oh performance that matches regular dictionaries (amortized O(1)
# insertion/deletion/lookup and O(n) iteration/repr/copy/equality_testing).
#
# License: MIT
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict (dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(self, *args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 1:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
# Make progressively weaker assumptions about "other"
if len(args) == 2:
other = args[1]
else:
other = ()
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys(): # ignore pylint complaint
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
|
# @MUNTJAC_COPYRIGHT@
# @MUNTJAC_LICENSE@
try:
from cStringIO import StringIO
except ImportError, e:
from StringIO import StringIO
from muntjac.ui.component import Event
from muntjac.ui.embedded import Embedded
from muntjac.ui.custom_component import CustomComponent
from muntjac.terminal.application_resource import IApplicationResource
from muntjac.terminal.uri_handler import IUriHandler
from muntjac.terminal.download_stream import DownloadStream
from muntjac.terminal.parameter_handler import IParameterHandler
from muntjac.terminal.gwt.client.application_connection import \
ApplicationConnection
class LoginEvent(Event):
"""This event is sent when login form is submitted."""
def __init__(self, params, form):
super(LoginEvent, self).__init__(form)
self._params = params
def getLoginParameter(self, name):
"""Access method to form values by field names.
@return: value in given field
"""
if name in self._params:
return self._params.get(name)
else:
return None
class ILoginListener(object):
"""Login listener is a class capable to listen LoginEvents sent from
LoginBox
"""
def onLogin(self, event):
"""This method is fired on each login form post.
"""
raise NotImplementedError
_ON_LOGIN_METHOD = getattr(ILoginListener, 'onLogin')
class LoginForm(CustomComponent):
"""LoginForm is a Muntjac component to handle common problem among Ajax
applications: browsers password managers don't fill dynamically created
forms like all those UI elements created by Muntjac.
For developer it is easy to use: add component to a desired place in you
UI and add ILoginListener to validate form input. Behind the curtain
LoginForm creates an iframe with static html that browsers detect.
Login form is by default 100% width and height, so consider using it
inside a sized L{Panel} or L{Window}.
Login page html can be overridden by replacing protected getLoginHTML
method. As the login page is actually an iframe, styles must be handled
manually. By default component tries to guess the right place for theme
css.
Note, this is a new Ajax terminal specific component and is likely to
change.
"""
def __init__(self):
self._usernameCaption = 'Username'
self._passwordCaption = 'Password'
self._loginButtonCaption = 'Login'
self._iframe = Embedded()
self._window = None
super(LoginForm, self).__init__()
self._iframe.setType(Embedded.TYPE_BROWSER)
self._iframe.setSizeFull()
self.setSizeFull()
self.setCompositionRoot(self._iframe)
self.addStyleName('v-loginform')
self.loginPage = LoginPage(self)
self.parameterHandler = ParameterHandler(self)
self.uriHandler = UriHandler(self)
def getLoginHTML(self):
"""Returns byte array containing login page html. If you need to
override the login html, use the default html as basis. Login page
sets its target with javascript.
@return: byte array containing login page html
"""
appUri = str(self.getApplication().getURL()) \
+ self.getWindow().getName() + '/'
return ('<!DOCTYPE html PUBLIC \"-//W3C//DTD '
'XHTML 1.0 Transitional//EN\" '
'\"http://www.w3.org/TR/xhtml1/'
'DTD/xhtml1-transitional.dtd\">\n'
'<html>'
'<head><script type=\'text/javascript\'>'
'var setTarget = function() {'
'var uri = \''
+ appUri +
'loginHandler'
'\'; var f = document.getElementById(\'loginf\');'
'document.forms[0].action = uri;document.forms[0].username.focus();};'
'' + 'var styles = window.parent.document.styleSheets;'
'for(var j = 0; j < styles.length; j++) {\n'
'if(styles[j].href) {'
'var stylesheet = document.createElement(\'link\');\n'
'stylesheet.setAttribute(\'rel\', \'stylesheet\');\n'
'stylesheet.setAttribute(\'type\', \'text/css\');\n'
'stylesheet.setAttribute(\'href\', styles[j].href);\n'
'document.getElementsByTagName(\'head\')[0].appendChild(stylesheet);\n'
'}'
'}\n'
'function submitOnEnter(e) { var keycode = e.keyCode || e.which;'
' if (keycode == 13) {document.forms[0].submit();} } \n'
'</script>'
'</head><body onload=\'setTarget();\' style=\'margin:0;padding:0; background:transparent;\' class=\"'
+ ApplicationConnection.GENERATED_BODY_CLASSNAME +
'\">' + '<div class=\'v-app v-app-loginpage\' style=\"background:transparent;\">'
'<iframe name=\'logintarget\' style=\'width:0;height:0;'
'border:0;margin:0;padding:0;\'></iframe>'
'<form id=\'loginf\' target=\'logintarget\' onkeypress=\"submitOnEnter(event)\" method=\"post\">'
'<div>'
+ self._usernameCaption +
'</div><div >'
'<input class=\'v-textfield\' style=\'display:block;\' type=\'text\' name=\'username\'></div>'
'<div>'
+ self._passwordCaption +
'</div>'
'<div><input class=\'v-textfield\' style=\'display:block;\' type=\'password\' name=\'password\'></div>'
'<div><div onclick=\"document.forms[0].submit();\" tabindex=\"0\" class=\"v-button\" role=\"button\" ><span class=\"v-button-wrap\"><span class=\"v-button-caption\">'
+ self._loginButtonCaption +
'</span></span></div></div></form></div>'
'</body></html>').encode('utf-8')
def attach(self):
super(LoginForm, self).attach()
self.getApplication().addResource(self.loginPage)
self.getWindow().addParameterHandler(self.parameterHandler)
self._iframe.setSource(self.loginPage)
def detach(self):
self.getApplication().removeResource(self.loginPage)
self.getWindow().removeParameterHandler(self.parameterHandler)
# store window temporary to properly remove uri handler once
# response is handled. (May happen if login handler removes login
# form
self._window = self.getWindow()
if self._window.getParent() is not None:
self._window = self._window.getParent()
super(LoginForm, self).detach()
_UNDEFINED_HEIGHT = '140px'
_UNDEFINED_WIDTH = '200px'
def addListener(self, listener, iface=None):
"""Adds ILoginListener to handle login logic.
"""
if (isinstance(listener, ILoginListener) and
(iface is None or issubclass(iface, ILoginListener))):
self.registerListener(LoginEvent, listener, _ON_LOGIN_METHOD)
super(LoginForm, self).addListener(listener, iface)
def addCallback(self, callback, eventType=None, *args):
if eventType is None:
eventType = callback._eventType
if issubclass(eventType, LoginEvent):
self.registerCallback(LoginEvent, callback, None, *args)
else:
super(LoginForm, self).addCallback(callback, eventType, *args)
def removeListener(self, listener, iface=None):
"""Removes ILoginListener.
"""
if (isinstance(listener, ILoginListener) and
(iface is None or issubclass(iface, ILoginListener))):
self.withdrawListener(LoginEvent, listener, _ON_LOGIN_METHOD)
super(LoginForm, self).removeListener(listener, iface)
def removeCallback(self, callback, eventType=None):
if eventType is None:
eventType = callback._eventType
if issubclass(eventType, LoginEvent):
self.withdrawCallback(LoginEvent, callback)
else:
super(LoginForm, self).removeCallback(callback, eventType)
def setWidth(self, width, unit=None):
if unit is not None:
super(LoginForm, self).setWidth(width, unit)
if self._iframe is not None:
if width < 0:
self._iframe.setWidth(self._UNDEFINED_WIDTH)
else:
self._iframe.setWidth('100%')
else:
super(LoginForm, self).setWidth(width)
def setHeight(self, height, unit=None):
if unit is not None:
super(LoginForm, self).setHeight(height, unit)
if self._iframe is not None:
if height < 0:
self._iframe.setHeight(self._UNDEFINED_HEIGHT)
else:
self._iframe.setHeight('100%')
else:
super(LoginForm, self).setHeight(height)
def getUsernameCaption(self):
"""Returns the caption for the user name field.
"""
return self._usernameCaption
def setUsernameCaption(self, usernameCaption):
"""Sets the caption to show for the user name field. The caption
cannot be changed after the form has been shown to the user.
"""
self._usernameCaption = usernameCaption
def getPasswordCaption(self):
"""Returns the caption for the password field.
"""
return self._passwordCaption
def setPasswordCaption(self, passwordCaption):
"""Sets the caption to show for the password field. The caption
cannot be changed after the form has been shown to the user.
"""
self._passwordCaption = passwordCaption
def getLoginButtonCaption(self):
"""Returns the caption for the login button.
"""
return self._loginButtonCaption
def setLoginButtonCaption(self, loginButtonCaption):
"""Sets the caption (button text) to show for the login button. The
caption cannot be changed after the form has been shown to the user.
"""
self._loginButtonCaption = loginButtonCaption
class LoginPage(IApplicationResource):
def __init__(self, form):
self._form = form
def getApplication(self):
return self._form.getApplication()
def getBufferSize(self):
return len(self._form.getLoginHTML())
def getCacheTime(self):
return -1
def getFilename(self):
return "login"
def getStream(self):
return DownloadStream(StringIO(self._form.getLoginHTML()),
self.getMIMEType(), self.getFilename())
def getMIMEType(self):
return "text/html; charset=utf-8"
class ParameterHandler(IParameterHandler):
def __init__(self, form):
self._form = form
def handleParameters(self, parameters):
if 'username' in parameters:
self._form.getWindow().addURIHandler(self._form.uriHandler)
params = dict()
# expecting single params
for key in parameters:
value = parameters.get(key)
params[key] = value
event = LoginEvent(params, self._form)
self._form.fireEvent(event)
class UriHandler(IUriHandler):
def __init__(self, form):
self._form = form
self._responce = ('<html><body>Login form handeled.'
+ '<script type=\'text/javascript\'>top.vaadin.forceSync();'
+ '</script></body></html>')
def handleURI(self, context, relativeUri):
if relativeUri is not None and 'loginHandler' in relativeUri:
if self._form._window is not None:
self._form._window.removeURIHandler(self)
downloadStream = DownloadStream(StringIO(self._responce),
'text/html', 'loginSuccesfull')
downloadStream.setCacheTime(-1)
return downloadStream
else:
return None
|
|
"""
Support for monitoring an SABnzbd NZB client.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sabnzbd/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.discovery import SERVICE_SABNZBD
from homeassistant.const import (
CONF_HOST, CONF_API_KEY, CONF_NAME, CONF_PORT, CONF_SENSORS, CONF_SSL)
from homeassistant.core import callback
from homeassistant.helpers import discovery
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util.json import load_json, save_json
REQUIREMENTS = ['pysabnzbd==1.1.0']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'sabnzbd'
DATA_SABNZBD = 'sabznbd'
_CONFIGURING = {}
ATTR_SPEED = 'speed'
BASE_URL_FORMAT = '{}://{}:{}/'
CONFIG_FILE = 'sabnzbd.conf'
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'SABnzbd'
DEFAULT_PORT = 8080
DEFAULT_SPEED_LIMIT = '100'
DEFAULT_SSL = False
UPDATE_INTERVAL = timedelta(seconds=30)
SERVICE_PAUSE = 'pause'
SERVICE_RESUME = 'resume'
SERVICE_SET_SPEED = 'set_speed'
SIGNAL_SABNZBD_UPDATED = 'sabnzbd_updated'
SENSOR_TYPES = {
'current_status': ['Status', None, 'status'],
'speed': ['Speed', 'MB/s', 'kbpersec'],
'queue_size': ['Queue', 'MB', 'mb'],
'queue_remaining': ['Left', 'MB', 'mbleft'],
'disk_size': ['Disk', 'GB', 'diskspacetotal1'],
'disk_free': ['Disk Free', 'GB', 'diskspace1'],
'queue_count': ['Queue Count', None, 'noofslots_total'],
'day_size': ['Daily Total', 'GB', 'day_size'],
'week_size': ['Weekly Total', 'GB', 'week_size'],
'month_size': ['Monthly Total', 'GB', 'month_size'],
'total_size': ['Total', 'GB', 'total_size'],
}
SPEED_LIMIT_SCHEMA = vol.Schema({
vol.Optional(ATTR_SPEED, default=DEFAULT_SPEED_LIMIT): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SENSORS):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
}),
}, extra=vol.ALLOW_EXTRA)
async def async_check_sabnzbd(sab_api):
"""Check if we can reach SABnzbd."""
from pysabnzbd import SabnzbdApiException
try:
await sab_api.check_available()
return True
except SabnzbdApiException:
_LOGGER.error("Connection to SABnzbd API failed")
return False
async def async_configure_sabnzbd(hass, config, use_ssl, name=DEFAULT_NAME,
api_key=None):
"""Try to configure Sabnzbd and request api key if configuration fails."""
from pysabnzbd import SabnzbdApi
host = config[CONF_HOST]
port = config[CONF_PORT]
uri_scheme = 'https' if use_ssl else 'http'
base_url = BASE_URL_FORMAT.format(uri_scheme, host, port)
if api_key is None:
conf = await hass.async_add_job(load_json,
hass.config.path(CONFIG_FILE))
api_key = conf.get(base_url, {}).get(CONF_API_KEY, '')
sab_api = SabnzbdApi(base_url, api_key,
session=async_get_clientsession(hass))
if await async_check_sabnzbd(sab_api):
async_setup_sabnzbd(hass, sab_api, config, name)
else:
async_request_configuration(hass, config, base_url)
async def async_setup(hass, config):
"""Set up the SABnzbd component."""
async def sabnzbd_discovered(service, info):
"""Handle service discovery."""
ssl = info.get('properties', {}).get('https', '0') == '1'
await async_configure_sabnzbd(hass, info, ssl)
discovery.async_listen(hass, SERVICE_SABNZBD, sabnzbd_discovered)
conf = config.get(DOMAIN)
if conf is not None:
use_ssl = conf.get(CONF_SSL)
name = conf.get(CONF_NAME)
api_key = conf.get(CONF_API_KEY)
await async_configure_sabnzbd(hass, conf, use_ssl, name, api_key)
return True
@callback
def async_setup_sabnzbd(hass, sab_api, config, name):
"""Set up SABnzbd sensors and services."""
sab_api_data = SabnzbdApiData(sab_api, name, config.get(CONF_SENSORS, {}))
if config.get(CONF_SENSORS):
hass.data[DATA_SABNZBD] = sab_api_data
hass.async_create_task(
discovery.async_load_platform(hass, 'sensor', DOMAIN, {}, config))
async def async_service_handler(service):
"""Handle service calls."""
if service.service == SERVICE_PAUSE:
await sab_api_data.async_pause_queue()
elif service.service == SERVICE_RESUME:
await sab_api_data.async_resume_queue()
elif service.service == SERVICE_SET_SPEED:
speed = service.data.get(ATTR_SPEED)
await sab_api_data.async_set_queue_speed(speed)
hass.services.async_register(DOMAIN, SERVICE_PAUSE,
async_service_handler,
schema=vol.Schema({}))
hass.services.async_register(DOMAIN, SERVICE_RESUME,
async_service_handler,
schema=vol.Schema({}))
hass.services.async_register(DOMAIN, SERVICE_SET_SPEED,
async_service_handler,
schema=SPEED_LIMIT_SCHEMA)
async def async_update_sabnzbd(now):
"""Refresh SABnzbd queue data."""
from pysabnzbd import SabnzbdApiException
try:
await sab_api.refresh_data()
async_dispatcher_send(hass, SIGNAL_SABNZBD_UPDATED, None)
except SabnzbdApiException as err:
_LOGGER.error(err)
async_track_time_interval(hass, async_update_sabnzbd, UPDATE_INTERVAL)
@callback
def async_request_configuration(hass, config, host):
"""Request configuration steps from the user."""
from pysabnzbd import SabnzbdApi
configurator = hass.components.configurator
# We got an error if this method is called while we are configuring
if host in _CONFIGURING:
configurator.async_notify_errors(
_CONFIGURING[host],
'Failed to register, please try again.')
return
async def async_configuration_callback(data):
"""Handle configuration changes."""
api_key = data.get(CONF_API_KEY)
sab_api = SabnzbdApi(host, api_key,
session=async_get_clientsession(hass))
if not await async_check_sabnzbd(sab_api):
return
def success():
"""Signal successful setup."""
conf = load_json(hass.config.path(CONFIG_FILE))
conf[host] = {CONF_API_KEY: api_key}
save_json(hass.config.path(CONFIG_FILE), conf)
req_config = _CONFIGURING.pop(host)
configurator.request_done(req_config)
hass.async_add_job(success)
async_setup_sabnzbd(hass, sab_api, config,
config.get(CONF_NAME, DEFAULT_NAME))
_CONFIGURING[host] = configurator.async_request_config(
DEFAULT_NAME,
async_configuration_callback,
description='Enter the API Key',
submit_caption='Confirm',
fields=[{'id': CONF_API_KEY, 'name': 'API Key', 'type': ''}]
)
class SabnzbdApiData:
"""Class for storing/refreshing sabnzbd api queue data."""
def __init__(self, sab_api, name, sensors):
"""Initialize component."""
self.sab_api = sab_api
self.name = name
self.sensors = sensors
async def async_pause_queue(self):
"""Pause Sabnzbd queue."""
from pysabnzbd import SabnzbdApiException
try:
return await self.sab_api.pause_queue()
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
async def async_resume_queue(self):
"""Resume Sabnzbd queue."""
from pysabnzbd import SabnzbdApiException
try:
return await self.sab_api.resume_queue()
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
async def async_set_queue_speed(self, limit):
"""Set speed limit for the Sabnzbd queue."""
from pysabnzbd import SabnzbdApiException
try:
return await self.sab_api.set_speed_limit(limit)
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
def get_queue_field(self, field):
"""Return the value for the given field from the Sabnzbd queue."""
return self.sab_api.queue.get(field)
|
|
__author__ = 'Jody Shumaker'
import sys
import glob
import argparse
import os.path
from utility.mouse import *
from utility.screen import *
from utility.logconfig import *
from PIL import ImageGrab, Image
import logging
import functools
import math
import operator
import time
# Level card positions. Positions are at top left part of card inside the boarder. When flipped this is a
# noinspection PyPep8
card_positions = [
# Level 1
[(-307, -139), (-37, -139), (233, -139),
(-307, 111), (-37, 111), (233, 111)],
# Level 2
[(-187, -189), (113, -189),
(-287, -9), (-187, -9), (113, -9), (213, -9),
(-187, 171), (113, 171)],
# Level 3
[(-236, -139), (-36, -139), (164, -139),
(-336, 11), (-136, 11), (64, 11), (264, 11),
(-236, 161), (-36, 161), (164, 161)],
# Level 4
[(-147, -139), (-37, -139), (73, -139),
(-477, -9), (-367, -9), (-257, -9), (193, -9), (303, -9), (413, -9),
(-147, 111), (-37, 111), (73, 111)],
# Level 5
[(-507, -195), (-427, -59), (-347, 69), (-267, 182), (-167, 34), (-81, -129),
(9, -129), (93, 34), (193, 182), (273, 69), (353, -59), (433, -195)],
# Level 6
[(-187, -139), (-87, -139), (13, -139), (113, -139),
(-387, 11), (-287, 11), (213, 11), (313, 11),
(-187, 111), (-87, 111), (13, 111), (113, 111)],
# Level 7
[(-337, -139), (-237, -139), (-137, -139), (-37, -139), (63, -139), (163, -139), (263, -139),
(-337, 111), (-237, 111), (-137, 111), (-37, 111), (63, 111), (163, 111), (263, 111)],
# Level 8
[(-437, -189), (-287, -189), (-137, -189), (13, -189), (163, -189), (313, -189),
(-437, -20), (-287, -20), (-137, -20), (13, -20), (163, -20), (313, -20),
(-437, 151), (-287, 151), (-137, 151), (13, 151), (163, 151), (313, 151)],
# Level 9
[(-437, -189), (-330, -189), (-223, -189), (-116, -189), (-9, -189), (98, -189), (205, -189), (312, -189),
(-437, -19), (-330, -19), (-223, -19), (-116, -19), (-9, -19), (98, -19), (205, -19), (312, -19),
(-437, 151), (-330, 151), (-223, 151), (-116, 151), (-9, 151), (98, 151), (205, 151), (312, 151)],
# Level 10
[(-437, -189), (-330, -189), (-223, -189), (-116, -189), (-9, -189), (98, -189), (205, -189), (312, -189),
(-437, -19), (-330, -19), (-223, -19), (-116, -19), (-9, -19), (98, -19), (205, -19), (312, -19),
(-437, 151), (-330, 151), (-223, 151), (-116, 151), (-9, 151), (98, 151), (205, 151), (312, 151)],
# Level 11
[(-445, -276), (-338, -276), (-231, -276), (-124, -276), (-17, -276), (90, -276), (197, -276), (304, -276),
(-445, -116), (-338, -116), (-231, -116), (-124, -116), (-17, -116), (90, -116), (197, -116), (304, -116),
(-445, 44), (-338, 44), (-231, 44), (-124, 44), (-17, 44), (90, 44), (197, 44), (304, 44),
(-124, 204), (-17, 204)],
# Level 12
[(-445, -276), (-338, -276), (-231, -276), (-124, -276), (-17, -276), (90, -276), (197, -276), (304, -276),
(-445, -116), (-338, -116), (-231, -116), (-124, -116), (-17, -116), (90, -116), (197, -116), (304, -116),
(-445, 44), (-338, 44), (-231, 44), (-124, 44), (-17, 44), (90, 44), (197, 44), (304, 44),
(-231, 204), (-124, 204), (-17, 204), (90, 204)],
# Level 13
[(-445, -276), (-338, -276), (-231, -276), (-124, -276), (-17, -276), (90, -276), (197, -276), (304, -276),
(-445, -116), (-338, -116), (-231, -116), (-124, -116), (-17, -116), (90, -116), (197, -116), (304, -116),
(-445, 44), (-338, 44), (-231, 44), (-124, 44), (-17, 44), (90, 44), (197, 44), (304, 44),
(-338, 204), (-231, 204), (-124, 204), (-17, 204), (90, 204), (197, 204)],
# Level 14
[(-445, -276), (-338, -276), (-231, -276), (-124, -276), (-17, -276), (90, -276), (197, -276), (304, -276),
(-445, -116), (-338, -116), (-231, -116), (-124, -116), (-17, -116), (90, -116), (197, -116), (304, -116),
(-445, 44), (-338, 44), (-231, 44), (-124, 44), (-17, 44), (90, 44), (197, 44), (304, 44),
(-445, 204), (-338, 204), (-231, 204), (-124, 204), (-17, 204), (90, 204), (197, 204), (304, 204)],
]
# Number of flips gained upon completion of a level.
flips_gained = [10, 16, 18, 18, 20, 20, 24, 28, 34, 40, 44, 48, 52]
card_width = 70
card_height = 123
flips_offsetx = 345
flips_offsety = 52
level_offsetx = 749
level_offsety = 20
digit_width = 10
digit_height = 16
card_match_regions = [(10, 13, 60, 14), (10, 20, 60, 21), (10, 110, 60, 111)]
class CardOnBoard:
def __init__(self, position):
self.name = None
self.matched = False
self.position = position
def __eq__(self, other):
return self.name == other.name and self.position != other.position
class TarotCards:
def __init__(self, learn=False):
self.flips_left = 0
self.level = -1
self.cards_on_board = None
self.gamepos = None
self.gamesize = None
self.gamecenter = None
self.safeclick = None
self.skipstart = False
self.learn = learn
self.learncount = 0
self.freeflips = 0
self.extraflips = 0
self.freeflipsonly = False
logging.info("Loading cards...")
self.tarot_cards = []
scriptdir = os.path.dirname(os.path.realpath(__file__))
globstring = os.path.join(scriptdir, "tarot/cards/*.png")
for file in glob.glob(globstring):
name = os.path.basename(file)
name, ext = os.path.splitext(name)
# Limit compared card size to 30x20
self.tarot_cards.append((name, Image.open(file)))
logging.debug("Loaded card: {0}".format(name))
logging.info("Loading digits...")
self.digits = []
scriptdir = os.path.dirname(os.path.realpath(__file__))
globstring = os.path.join(scriptdir, "tarot/digits/*.png")
for file in glob.glob(globstring):
name = os.path.basename(file)
name, ext = os.path.splitext(name)
self.digits.append((name, Image.open(file)))
logging.log(VERBOSE, "Loaded digit: {0}".format(name))
self.card_corners = {
1: Image.open("tarot/back1.png"),
3: Image.open("tarot/back3.png"),
5: Image.open("tarot/back5.png"),
8: Image.open("tarot/back8.png")
}
def compare_cards(self):
region_count = 0
for region in card_match_regions:
region_count += 1
logging.info("Comparing region {}".format(region_count))
worst_rms = 10000.0
worst_matchup = ''
for name1, image1 in self.tarot_cards:
for name2, image2 in self.tarot_cards:
if name1 != name2:
rms = compare_images(image1.crop(region), image2.crop(region))
if rms < worst_rms:
worst_rms = rms
worst_matchup = name1 + ' vs ' + name2
#logging.info("{0:>20} vs {1:<20}: {2:>10.3f}".format(name1, name2, rms))
logging.info("Region: {} Size: {} Worst matchup: {} rms: {:0.3f}".format(
region_count, (region[2] - region[0]) * (region[3] - region[1]), worst_matchup, worst_rms))
sys.exit(0)
def find_next(self):
# Search for next button.
logging.log(VERBOSE, "Searching for next button...")
next_image = Image.open("tarot/next.png")
searchx = self.gamecenter[0] - 45
searchy = self.gamecenter[1] + 65
best_x, best_y = image_search(ImageGrab.grab(), next_image, searchx, searchy)
next_image.close()
return best_x, best_y
def orient(self):
# Get the game window
self.gamepos = get_game_window()
self.gamesize = (self.gamepos[2] - self.gamepos[0] + 1, self.gamepos[3] - self.gamepos[1] + 1)
logging.log(VERBOSE, "Game Window position: {0},{1},{2},{3}".format(*self.gamepos))
if self.gamesize[0] < 1040:
logging.error("Game window size is too narrow, please make game window atleast 1040 pixels wide.")
sys.exit(1)
self.safeclick = (max(0, self.gamepos[0] + 2), max(0, self.gamepos[1] + 2))
Mouse.click(*self.safeclick)
time.sleep(0.25)
self.gamecenter = (self.gamepos[0] + int(self.gamesize[0] / 2),
self.gamepos[1] + int(self.gamesize[1] / 2))
# Search for start button to center.
logging.log(VERBOSE, "Searching for start button...")
start_image = Image.open("tarot/start.png")
searchx = self.gamecenter[0] - 31
searchy = self.gamecenter[1] + 97
best_x, best_y = image_search(ImageGrab.grab(), start_image, searchx, searchy)
start_image.close()
if best_x != -1:
self.gamecenter = (best_x + 31, best_y - 96)
self.level = 0
logging.log(VERBOSE, "Start button found, offset from expected: {0}, {1}".format(best_x - searchx, best_y - searchy))
else:
self.level = self.parse_level() - 1
best_x, best_y = self.find_next()
if best_x != -1:
self.gamecenter = (best_x + 45, best_y - 68)
logging.log(VERBOSE, "Next button found, offset from expected: {0}, {1}".format(best_x - searchx, best_y - searchy))
else:
logging.warning("Level appears to already be started.")
self.skipstart = True
logging.info("Center found at {0}".format(self.gamecenter))
logging.info("Starting play at level {0}".format(self.level + 1))
# Let's calibrate this with the first card which is never really covered.
card_corner = self.get_image_back_corner()
# Adjust center
screengrab = ImageGrab.grab()
logging.log(VERBOSE, "Calibrating via card 0, card back")
searchx, searchy = card_positions[self.level][0]
searchx += self.gamecenter[0] - 6
searchy += self.gamecenter[1] - 6
newx, newy = image_search(screengrab, card_corner, searchx, searchy, radius=20)
if newx == -1 and self.skipstart:
logging.log(VERBOSE, "Calibrating via card 0, flipped card")
# Card might be flipped.
searchx = self.gamecenter[0] + card_positions[self.level][0][0]
searchy = self.gamecenter[1] + card_positions[self.level][0][1]
card_name, newx, newy = detect_image(screengrab, self.tarot_cards,
searchx, searchy,
radius=20, threshold=2000.0, compare_regions=card_match_regions)
if newx == -1:
screengrab.save("failed_calibrate.png")
logging.error("Failed to calibrate, saved failed_calibrate.png")
sys.exit(1)
logging.info("First card offset: {0},{1}".format(newx - searchx, newy - searchy))
self.gamecenter = (self.gamecenter[0] + newx - searchx,
self.gamecenter[1] + newy - searchy)
logging.info("Center adjusted to {0}".format(self.gamecenter))
def match_digit(self, i1):
# This works as the threshold a match must be under to be reliable.
best_rms = 3000.0
best_digit = None
for digit, i2 in self.digits:
rms = compare_images(i1, i2)
logging.debug("Compare vs. {0}, rms: {1}".format(digit, rms))
if rms < best_rms:
best_digit = digit
return best_digit
def parse_flips(self):
logging.log(VERBOSE, "Parsing flips...")
value = 0
for x in range(3):
for posx, posy in search_offset(offsetx=flips_offsetx + self.gamepos[0],
offsety=flips_offsety + self.gamepos[1]):
digit_pos = (posx + (x * digit_width),
posy,
posx + (x * digit_width) + digit_width - 1,
posy + digit_height - 1)
digit_image = ImageGrab.grab(digit_pos)
digit_value = self.match_digit(digit_image)
if digit_value is not None:
logging.log(VERBOSE, "Digit found, offset from expected: {0},{1}".format(
posx - flips_offsetx - self.gamepos[0],
posy - flips_offsety - self.gamepos[1]
))
break
if digit_value == 'end':
# Last digit was read.
break
if digit_value is None:
imagefile = "digit{0}.png".format(x)
logging.warning('Failed to match digit, saving to ' + imagefile)
digit_image.save(imagefile)
else:
value = value * 10 + int(digit_value)
digit_image.close()
self.flips_left = value
def parse_level(self):
logging.log(VERBOSE, "Parsing level...")
value = -1
screengrab = ImageGrab.grab()
for x in range(2):
digit_value, digit_posx, digit_posy = detect_image(
screengrab, self.digits, level_offsetx + self.gamepos[0] + (x * digit_width),
level_offsety + self.gamepos[1])
if digit_value is not None and digit_value != 'end':
if value == -1:
value = int(digit_value)
else:
value = value * 10 + int(digit_value)
return value
def flip_card(self, cardnum, detect=False, fliptimeout=12.1, clicktimeout=1.0):
if self.flips_left <= 0:
# Let's double check the flips left.
self.parse_flips()
if self.flips_left <= 0:
logging.error("No flips remaining!")
sys.exit(1)
if self.freeflipsonly and self.freeflips <= 0:
logging.error("No freeflips remaining, only extra flips remaining.")
sys.exit(1)
cardpos = (self.gamecenter[0] + card_positions[self.level][cardnum][0] + int(card_width / 2),
self.gamecenter[1] + card_positions[self.level][cardnum][1] + 15)
logging.log(VERBOSE, "Flipping level {0} card {1} at position {2}".format(self.level, cardnum, cardpos))
timeout = time.time() + fliptimeout
card_back = True
while time.time() < timeout:
Mouse.click(*cardpos)
clicktimeout_time = time.time() + clicktimeout
# Wait for card back to go away.
while time.time() < clicktimeout_time and card_back:
card_back = self.detect_card_back(cardnum)[0] != -1
time.sleep(0.010)
if not card_back:
break
logging.warning("Card click does not appear to have registered, clicking again.")
if card_back:
logging.error("Timed out waiting for card to flip over.")
return False
# Sleep a bit to wait for the card to flip.
time.sleep(0.200)
self.flips_left -= 1
self.freeflips -= 1
if self.freeflips < 0:
self.extraflips += 1
logging.log(VERBOSE, "Flips left: {0}".format(self.flips_left))
if detect:
if self.detect_card(cardnum):
return True
elif self.find_next()[0] != -1:
logging.log(VERBOSE, "Next button is up, the level finished.")
return True
else:
logging.error("Failed to detect card {0} level {1}".format(cardnum, self.level))
ImageGrab.grab().save("failed_detection.png")
logging.info("Screenshot saved to failed_detection.png")
return False
return True
def get_image_back_corner(self):
if self.level < 2:
card_corner = self.card_corners[1]
elif self.level < 4:
card_corner = self.card_corners[3]
elif self.level < 7:
card_corner = self.card_corners[5]
else:
card_corner = self.card_corners[8]
return card_corner
def detect_card_back(self, cardnum, radius=3):
card_corner = self.get_image_back_corner()
# Search for card back.
screengrab = ImageGrab.grab()
searchx, searchy = card_positions[self.level][cardnum]
searchx += self.gamecenter[0] - 6
searchy += self.gamecenter[1] - 6
newx, newy = image_search(screengrab, card_corner, searchx, searchy, radius=radius)
return newx, newy
def detect_card(self, cardnum, dumb=False, timeout=3.0):
logging.log(VERBOSE, "Attempting to detect card {0}".format(cardnum))
timeout_time = time.time() + timeout
while time.time() < timeout_time:
logging.log(VERBOSE, "Checking against cards...")
searchx = self.gamecenter[0] + card_positions[self.level][cardnum][0]
searchy = self.gamecenter[1] + card_positions[self.level][cardnum][1]
card_name, x, y = detect_image(ImageGrab.grab(), self.tarot_cards,
searchx, searchy,
radius=1, threshold=2000.0, compare_regions=card_match_regions)
if card_name is not None:
logging.info("Matched card {0:>2} offset: {2:>2},{3:<2} name: {1} ".format(
cardnum + 1, card_name, searchx - x, searchy - y))
self.cards_on_board[cardnum].name = card_name
return True
elif not dumb and self.learn:
# Wait a little longer for the card flip to definitely complete.
time.sleep(0.050)
card_image = ImageGrab.grab((searchx, searchy, searchx + 70, searchy + 123))
self.learncount += 1
card_name = "Unknown{0}".format(self.learncount)
card_image.save("tarot/cards/{0}.png".format(card_name))
logging.warning("Learned new card, saved as {0}.".format(card_name))
self.tarot_cards.append((card_name, card_image))
elif dumb:
return False
return False
def wait_unflip(self, cardnum):
cardpos = (self.gamecenter[0] + card_positions[self.level][cardnum][0] + int(card_width / 2),
self.gamecenter[1] + card_positions[self.level][cardnum][1] + 15)
while self.detect_card_back(cardnum, radius=1)[0] == -1:
time.sleep(0.025)
def play_level(self):
logging.info("Playing level {0}".format(self.level + 1))
self.cards_on_board = []
for i in range(len(card_positions[self.level])):
self.cards_on_board.append(CardOnBoard(i))
unknownpos = 0
matches_remaining = len(card_positions[self.level]) / 2
cards_flipped = 0
if self.skipstart:
# scan cards
logging.log(VERBOSE, "Scanning for already flipped cards.")
for cardnum in range(len(card_positions[self.level])):
self.detect_card(cardnum, dumb=True)
if self.cards_on_board[cardnum].name is not None:
self.cards_on_board[cardnum].matched = True
cards_flipped += 1
if cards_flipped == 0:
if not self.skipstart:
# Click start.
start_pos = (self.gamecenter[0], 109 + self.gamecenter[1])
next_pos = (self.gamecenter[0], 74 + self.gamecenter[1])
if self.level == 0:
Mouse.click(*start_pos)
else:
Mouse.click(*next_pos)
time.sleep(1.0)
card_corner = self.get_image_back_corner()
# Adjust card positions.
screengrab = ImageGrab.grab()
timeout = time.time() + 6.0
for i in range(len(card_positions[self.level])):
logging.log(VERBOSE, "Calibrating card {0:>2}".format(i + 1))
while True:
searchx, searchy = card_positions[self.level][i]
searchx += self.gamecenter[0] - 6
searchy += self.gamecenter[1] - 6
newx, newy = image_search(screengrab, card_corner, searchx, searchy, radius=3)
if time.time() > timeout or newx != -1:
break
if newx == -1:
# Update our screengrab.
screengrab = ImageGrab.grab()
if newx == -1:
logging.warning("Failed to calibrate card position {0}".format(i + 1))
else:
card_positions[self.level][i] = (newx + 6 - self.gamecenter[0], newy + 6 - self.gamecenter[1])
logging.log(VERBOSE, "Card {0:>2} offset: {1:>2},{2:<2}".format(i + 1, newx - searchx, newy - searchy))
else:
# Game has already started and some cards flipped.
matches_remaining -= int(cards_flipped / 2)
for card in self.cards_on_board:
if not card.matched:
unknownpos = card.position
break
if cards_flipped % 2 == 1:
# We're in the middle of matching cards. Try and match another card before main loop starts.
self.flip_card(unknownpos, detect=True)
# Check if this is a match
match = False
for card in self.cards_on_board:
if self.cards_on_board[unknownpos] == card:
self.cards_on_board[unknownpos].matched = True
card.matched = True
matches_remaining -= 1
match = True
break
if not match:
# Let's wait for the cards to flip back over
self.wait_unflip(unknownpos)
# Figure out what card flipped back over.
for cardnum in range(len(card_positions[self.level])):
if self.cards_on_board[cardnum].matched and self.detect_card_back(cardnum)[0] != -1:
logging.log(VERBOSE, "Marking card {0} as unmatched.".format(cardnum))
self.cards_on_board[cardnum].matched = False
unknownpos += 1
self.skipstart = False
# Click pairs to find cards, and eventually match.
while matches_remaining > 0:
# seek past any previously matched cards.
while unknownpos < len(self.cards_on_board) and self.cards_on_board[unknownpos].name is not None:
unknownpos += 1
# Check if we know of any matches we can flip.
for c1 in range(len(self.cards_on_board)):
if self.cards_on_board[c1].name is not None and not self.cards_on_board[c1].matched:
for c2 in range(len(self.cards_on_board)):
if c1 != c2 and self.cards_on_board[c1] == self.cards_on_board[c2]:
retry = 0
retry_max = 2
while retry < retry_max:
retry += 1
if not self.flip_card(c1, detect=False):
logging.error("Error, failed to flip {0}".format(c1))
sys.exit(1)
if not self.flip_card(c2, detect=False):
logging.warning("Warning, failed to flip {0}. Trying pair again.".format(c1))
#We might have missed detection before they flipped back? Let's again.
time.sleep(2.0)
else:
break
if retry >= retry_max:
logging.error("Exhausted retries to flip pair")
sys.exit(1)
self.cards_on_board[c1].matched = True
self.cards_on_board[c2].matched = True
matches_remaining -= 1
continue
# Flip the next unknown card.
if not self.flip_card(unknownpos, detect=True):
logging.error("Error, failed to flip {0}".format(unknownpos))
sys.exit(1)
# Check if this card matches any we know.
matched = False
for i in range(len(self.cards_on_board)):
if i != unknownpos and self.cards_on_board[i] == self.cards_on_board[unknownpos]:
# We know the match for this, match it!
self.flip_card(i)
matches_remaining -= 1
self.cards_on_board[i].matched = True
self.cards_on_board[unknownpos].matched = True
matched = True
break
# Find next unknown.
unknownpos += 1
while unknownpos < len(self.cards_on_board) and self.cards_on_board[unknownpos].name is not None:
unknownpos += 1
if not matched:
# No match known, let's detect another card.
if matches_remaining == 1:
# There was only one match left, the last unknown card should be the match we need.
# We also won't get a chance to identify it as the level will end.
self.flip_card(unknownpos)
matches_remaining -= 1
else:
# Detect the card.
if not self.flip_card(unknownpos, detect=True):
logging.error("Error, failed to flip {0}".format(unknownpos))
sys.exit(1)
# Check if this is a match
if self.cards_on_board[unknownpos] == self.cards_on_board[unknownpos - 1]:
self.cards_on_board[unknownpos].matched = True
self.cards_on_board[unknownpos - 1].matched = True
matches_remaining -= 1
else:
# Let's wait for the cards to flip back over
self.wait_unflip(unknownpos)
unknownpos += 1
def play(self, buyflips=0):
self.parse_flips()
self.extraflips = 0
if self.level == 0:
self.freeflips = 15
else:
self.freeflips = flips_gained[self.level - 1]
while self.level < len(card_positions):
max_flips = int(len(card_positions[self.level]) * 1.75)
if max_flips > self.flips_left:
logging.warning("Not enough flips remaining. Bad case flips for next level: {0}".format(max_flips))
print("How do you wish to proceed?")
print("1: Continue play using only free flips, leaving extras to carry over. ")
print(" Note: Spare flips from levels before the script started are not counted.")
print("2: Use all remaining flips.")
print("3: Exit.")
if not args.force:
answer = input("Choice [1]: ")
if answer == '':
answer = 1
else:
answer = int(answer)
if answer == 1:
self.freeflipsonly = True
if self.freeflips <= 0:
logging.error("No free flips available. Play most likely did not start at level 1.")
sys.exit(1)
if answer == 1 or answer == 2:
Mouse.click(*self.safeclick)
time.sleep(0.250)
# Check if flips were added
self.parse_flips()
else:
sys.exit(1)
self.play_level()
for i in range(buyflips):
logging.info('Buying flips...')
Mouse.click(self.gamepos[0] + 376, self.gamepos[1] + 60)
time.sleep(0.250)
buyflips = 0
if self.level < len(flips_gained):
self.flips_left += flips_gained[self.level]
if self.freeflips < 0:
self.freeflips = 0
self.freeflips += flips_gained[self.level]
logging.debug("Added {0} flips.".format(flips_gained[self.level]))
time.sleep(0.5)
self.parse_flips()
logging.info("Flips left: {0}".format(self.flips_left))
self.level += 1
if args.singlelevel:
sys.exit(0)
time.sleep(3.0)
logging.info("Extra flips used: {0}".format(self.extraflips))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Automatically play LoA Tarot Cards.')
parser.add_argument('--attempts', '-a', type=int, default=1, help="""
How many attempts to do. Only continues upon a successful lvl 10 completion. Defaults to 1.
""")
parser.add_argument('--buyflips', '-b', type=int, default=0, help="""
A number of flips to buy after starting first attempt. Defaults to 0.
""")
parser.add_argument('--force', action='store_true', help="""
Play a level even if not enough flips to complete it. Additionally
keep flipping even if we don't think we have enough flips left.
""")
parser.add_argument('--singlelevel', '-s', action='store_true', help="""
Only play 1 level, good for debugging.
""")
parser.add_argument('--guessflips', action='store_true', help="""
Try to parse the number of flips remaining.
""")
parser.add_argument('--learn', action='store_true', help="""
Try and learn new cards while playing.
""")
parser.add_argument('--debug', action='store_true', help="""
Send debug output to console. It is always sent to log file, so this is rarely recommended.
""")
args = parser.parse_args()
loglevel = VERBOSE
if args.debug:
loglevel = logging.DEBUG
logconfig('tarot', loglevel)
tarot = TarotCards(learn=args.learn)
#tarot.compare_cards()
#sys.exit(0)
tarot.orient()
if args.guessflips:
tarot.parse_flips()
print("Guessed {0} flips left.".format(tarot.flips_left))
sys.exit(0)
buyflips = args.buyflips
for i in range(args.attempts):
tarot.play(buyflips)
tarot.level = 0
buyflips = 0
time.sleep(1.0)
|
|
from unittest import TestCase
from argparse import Namespace
from mock import MagicMock, Mock, patch, ANY, call
from pyinotify import WatchManager
from testrunner.configurator import Config
from testrunner import default_config
@patch.object(Config, "get_values", autospec=True)
@patch.object(Config, "__init__", return_value=None, autospec=True)
class TestConfigCmdRun(TestCase):
def setUp(self):
self.test_args = [
"TEST_RUNNER", "TEST_RUNNER_OPTIONS",
"TESTS_OPTIONS", "TESTS"
]
self.suite_args = [
"TEST_RUNNER", "TEST_RUNNER_OPTIONS",
"TEST_SUITE_OPTIONS", "TEST_SUITE"
]
def test_cmd_all_available(self, init, get_values):
get_values.return_value = ["1", "2", "3", "4"]
conf = Config(None)
cmd = conf.tests_command(suite=False)
get_values.assert_called_with(conf, self.test_args)
self.assertEqual(cmd, "1 2 3 4")
def test_cmd_runner_options_missing(self, init, get_values):
get_values.return_value = ["1", None, "3", "4"]
conf = Config(None)
cmd = conf.tests_command(suite=False)
get_values.assert_called_with(conf, self.test_args)
self.assertEqual(cmd, "1 3 4")
def test_cmd_options_missing(self, init, get_values):
get_values.return_value = ["1", "2", None, "4"]
conf = Config(None)
cmd = conf.tests_command(suite=False)
get_values.assert_called_with(conf, self.test_args)
self.assertEqual(cmd, "1 2 4")
def test_cmd_both_options_missing(self, init, get_values):
get_values.return_value = ["1", None, None, "4"]
conf = Config(None)
cmd = conf.tests_command(suite=False)
get_values.assert_called_with(conf, self.test_args)
self.assertEqual(cmd, "1 4")
def test_cmd_runner_missing(self, init, get_values):
get_values.return_value = [None, "2", "3", "4"]
conf = Config(None)
cmd = conf.tests_command(suite=False)
get_values.assert_called_with(conf, self.test_args)
self.assertEqual(cmd, None)
def test_cmd_test_missing(self, init, get_values):
get_values.return_value = ["1", "2", "3", None]
conf = Config(None)
cmd = conf.tests_command(suite=False)
get_values.assert_called_with(conf, self.test_args)
self.assertEqual(cmd, None)
def test_cmd_suite_all_available(self, init, get_values):
get_values.return_value = ["1", "2", "3", "4"]
conf = Config(None)
cmd = conf.tests_command(suite=True)
get_values.assert_called_with(conf, self.suite_args)
self.assertEqual(cmd, "1 2 3 4")
def test_cmd_suite_runner_options_missing(self, init, get_values):
get_values.return_value = ["1", None, "3", "4"]
conf = Config(None)
cmd = conf.tests_command(suite=True)
get_values.assert_called_with(conf, self.suite_args)
self.assertEqual(cmd, "1 3 4")
def test_cmd_suite_options_missing(self, init, get_values):
get_values.return_value = ["1", "2", None, "4"]
conf = Config(None)
cmd = conf.tests_command(suite=True)
get_values.assert_called_with(conf, self.suite_args)
self.assertEqual(cmd, "1 2 4")
def test_cmd_suite_both_options_missing(self, init, get_values):
get_values.return_value = ["1", None, None, "4"]
conf = Config(None)
cmd = conf.tests_command(suite=True)
get_values.assert_called_with(conf, self.suite_args)
self.assertEqual(cmd, "1 4")
def test_cmd_suite_runner_missing(self, init, get_values):
get_values.return_value = [None, "2", "3", "4"]
conf = Config(None)
cmd = conf.tests_command(suite=True)
get_values.assert_called_with(conf, self.suite_args)
self.assertEqual(cmd, None)
def test_cmd_suite_missing(self, init, get_values):
get_values.return_value = ["1", "2", "3", None]
conf = Config(None)
cmd = conf.tests_command(suite=True)
get_values.assert_called_with(conf, self.suite_args)
self.assertEqual(cmd, None)
class TestConfigOther(TestCase):
"""
Some simple test cases
"""
def test_constructor_watcher_type(self):
"""
Type should be enforced
"""
with self.assertRaises(AssertionError):
Config(None)
@patch.object(Config, "parse_command_line", autospec=True)
@patch.object(Config, "load_config", autospec=True)
@patch.object(Config, "update_watch", autospec=True)
def test_constructor_setup(self, watch, load, parse):
"""
Check initial setup
"""
watch_manager = Mock(spec=WatchManager)
conf = Config(watch_manager, command_args="args")
parse.assert_called_once_with(conf, "args")
load.assert_called_once_with(conf)
watch.assert_called_once_with(conf)
@patch.object(Config, "__init__", return_value=None, autospec=True)
@patch.object(Config, "get_value", autospec=True)
def test_config_file_location(self, get_value, init):
"""
Getting location of config file
"""
conf = Config(None)
get_value.return_value = "test_conf.py"
conf_file = conf.config_file()
self.assertEqual(conf_file, "test_conf.py")
get_value.assert_called_once_with(conf, "CONFIG")
@patch.object(Config, "__init__", return_value=None, autospec=True)
class TestConfigCommandLineParser(TestCase):
@patch("testrunner.configurator.time")
@patch("testrunner.configurator.ArgumentParser.parse_args")
def test_override_existing(self, parse_args, mock_time, init):
"""
Parsing command line should
- override existing command line config
- update time of last config change
"""
mock_time.return_value = 1
conf = Config(None)
conf.command_line = "existing config"
conf.parse_command_line()
self.assertNotEqual(conf.command_line, "existing config")
self.assertEqual(conf.config_loaded_at, 1)
@patch("testrunner.configurator.ArgumentParser.parse_args")
def test_parsing_empty_args(self, parse_args, init):
"""
Result of empty args should be empty config dict
"""
parse_args.return_value = MagicMock(spec=Namespace)
conf = Config(None)
conf.parse_command_line()
self.assertEqual(conf.command_line.__dict__, {})
@patch("testrunner.configurator.ArgumentParser.parse_args")
def test_parsing_single_arg(self, parse_args, init):
"""
Command line args get mapped to config dict
"""
parser_mapping = (
("runner", "TEST_RUNNER"),
("config", "CONFIG"),
("dir", "WATCH_DIR"),
)
for arg_name, conf_name in parser_mapping:
conf = Config(None)
parsed = MagicMock(spec=Namespace, **{arg_name: arg_name})
parse_args.return_value = parsed
conf.parse_command_line()
self.assertEqual(conf.command_line.__dict__, {conf_name: arg_name})
@patch("testrunner.configurator.ArgumentParser.add_argument")
@patch("testrunner.configurator.ArgumentParser.parse_args")
def test_parsing_test_names(self, parse_args, add_argument, init):
"""
Command line test args get mapped to config dict
"""
conf = Config(None)
parsed = MagicMock(test=[1, 2], spec=Namespace)
parse_args.return_value = parsed
conf.parse_command_line()
add_argument.assert_any_call("test", nargs="*", help=ANY)
self.assertEqual(conf.command_line.__dict__, {"TESTS": [1, 2]})
@patch.object(Config, "__init__", return_value=None, autospec=True)
class TestConfigGetValue(TestCase):
def test_existing_value_default(self, init):
conf = Config(None)
conf.command_line = object()
conf.config = object()
value, source = conf.get_value("LOG_LEVEL", True)
self.assertEqual(value, default_config.LOG_LEVEL)
self.assertEqual(source, Config.CONF_DEFAULT)
def test_existing_value_config(self, init):
conf = Config(None)
conf.command_line = object()
conf.config = Mock(LOG_LEVEL="test value")
value, source = conf.get_value("LOG_LEVEL", True)
self.assertEqual(value, "test value")
self.assertEqual(source, Config.CONF_LOCAL)
def test_existing_value_command_line(self, init):
conf = Config(None)
conf.command_line = Mock(LOG_LEVEL="value form command line")
conf.config = Mock(LOG_LEVEL="test value")
value, source = conf.get_value("LOG_LEVEL", True)
self.assertEqual(value, "value form command line")
self.assertEqual(source, Config.CONF_COMMAND_LINE)
def test_unexpected_value(self, init):
conf = Config(None)
conf.command_line = object()
conf.config = object()
with self.assertRaises(ValueError):
conf.get_value("ABC", True)
def test_value_source(self, init):
conf = Config(None)
conf.command_line = object()
conf.config = object()
values_source = conf.get_value("LOG_LEVEL", True)
self.assertIsInstance(values_source, tuple)
self.assertEqual(len(values_source), 2)
value = conf.get_value("LOG_LEVEL", False)
self.assertNotIsInstance(value, tuple)
@patch.object(Config, "get_value", autospec=True)
def test_get_values(self, get_value, init):
conf = Config(None)
get_value.side_effect = (x for x in range(3))
values = conf.get_values(["a", "b", "c"])
self.assertEqual(get_value.call_count, 3)
self.assertEqual(values, [0, 1, 2])
get_value.assert_has_calls([call(conf, x, False) for x in "abc"])
@patch.object(Config, "get_value", autospec=True)
def test_get_values_with_source(self, get_value, init):
conf = Config(None)
get_value.side_effect = ((x, "source") for x in range(3))
values = conf.get_values(["a", "b", "c"], True)
self.assertEqual(get_value.call_count, 3)
self.assertEqual(values, [(0, "source"), (1, "source"), (2, "source")])
get_value.assert_has_calls([call(conf, x, True) for x in "abc"])
@patch.object(Config, "__init__", return_value=None, autospec=True)
class TestConfigFilterWrapper(TestCase):
def test_fitler_wrapper_corrent(self, init):
"""
Test behaviour of wrapper given correct method
"""
conf = Config(None)
external_filter_test = Mock(return_value="X")
conf.filter_test = external_filter_test
result = conf.filter_wrapper("123")
self.assertEqual(result, "X")
external_filter_test.assert_called_once_with("123")
def test_fitler_wrapper_none(self, init):
"""
Test behaviour of wrapper without filter method
"""
conf = Config(None)
conf.filter_test = None
result = conf.filter_wrapper("123")
self.assertEqual(result, False)
def test_fitler_wrapper_not_callable(self, init):
"""
Test behaviour of wrapper without filter method
"""
conf = Config(None)
conf.filter_test = "test"
with self.assertRaises(TypeError):
conf.filter_wrapper("123")
@patch.object(Config, "__init__", return_value=None, autospec=True)
@patch.object(Config, "get_value", autospec=True)
@patch.object(Config, "update_watch", autospec=True)
class TestConfigLoading(TestCase):
@patch("testrunner.configurator._log")
@patch("testrunner.configurator.time")
def test_config_defines_it_self(
self, mock_time, log, update_watch, get_value, init):
"""
Local config should not define location to another config...
"""
conf = Config(None)
conf.config = Mock(CONFIG="config.py")
conf.config_loaded_at = 123
get_value.return_value = ("aaa", "1")
mock_time.return_value = 321
conf.load_config()
self.assertTrue(log.warning.called)
with self.assertRaises(AttributeError):
# CONFIG attribute should be deleted at this point
conf.config.CONFIG
self.assertEqual(conf.config_loaded_at, 321)
self.assertTrue(update_watch.called)
@patch("testrunner.configurator.path.exists")
@patch("testrunner.configurator.time")
def test_config_file_does_not_exit_default(
self, mock_time, exists, update_watch, get_value, init):
"""
Config file does not exit, specified in defaults
"""
conf = Config(None)
conf.config = None
conf.config_loaded_at = 123
get_value.return_value = ("file.py", conf.CONF_DEFAULT)
exists.return_value = False
mock_time.return_value = 321
# not exception is raised now
conf.load_config()
exists.assert_called_with("file.py")
self.assertEqual(conf.config_loaded_at, 321)
self.assertTrue(update_watch.called)
@patch("testrunner.configurator.path.exists")
def test_config_file_does_not_exit_command_line(
self, exists, update_watch, get_value, init):
"""
Config file does not exit, specified in command line args
"""
conf = Config(None)
conf.config = None
conf.config_loaded_at = 123
get_value.return_value = ("file.py", conf.CONF_COMMAND_LINE)
exists.return_value = False
with self.assertRaises(ValueError):
conf.load_config()
exists.assert_called_with("file.py")
self.assertEqual(conf.config_loaded_at, 123)
self.assertFalse(update_watch.called)
@patch("testrunner.configurator.path.exists")
@patch("testrunner.configurator.time")
@patch("testrunner.configurator.imp.find_module")
@patch("testrunner.configurator.imp.load_module")
def test_config_file_exits_command_line(
self, load_mod, find_mod, mock_time,
exists, update_watch, get_value, init):
"""
Config file does not exit, specified in command line args
"""
conf = Config(None)
conf.config = None
conf.config_loaded_at = 123
get_value.return_value = ("test/file.py", "source")
exists.return_value = True
mock_time.return_value = 321
find_mod.return_value = ["val_1", "val_2"]
conf.load_config()
exists.assert_called_with("test/file.py")
find_mod.assert_called_with("file", ["test"])
load_mod.assert_called_with("local_config", "val_1", "val_2")
self.assertEqual(conf.config_loaded_at, 321)
self.assertTrue(update_watch.called)
@patch("testrunner.configurator.path.exists")
def test_config_file_exits_command_line_wrong_ext(
self, exists, update_watch, get_value, init):
"""
Config file does not exit, specified in command line args
"""
conf = Config(None)
conf.config = None
conf.config_loaded_at = 123
get_value.return_value = ("file.txt", "source")
exists.return_value = True
with self.assertRaises(ValueError):
conf.load_config()
exists.assert_called_with("file.txt")
self.assertEqual(conf.config_loaded_at, 123)
self.assertFalse(update_watch.called)
@patch.object(Config, "__init__", return_value=None, autospec=True)
@patch.object(Config, "get_value", autospec=True)
class TestConfigUpdatingWatcher(TestCase):
def _helper(self):
conf = Config(None)
conf.watcher_added_at = 1
conf.config_loaded_at = 2
conf.watch_descriptors = None
conf.watch_manager = Mock()
self.conf = conf
def test_too_early_to_update(self, get_value, init):
"""
It should be safe to call update watch at any point,
however action should be executed only when needed
"""
self._helper()
self.conf.watcher_added_at = 2
self.conf.config_loaded_at = 1
self.conf.update_watch()
# Early return, no values should be accessed
self.assertFalse(get_value.called)
def test_no_watch_decriptor(self, get_value, init):
"""
When descriptor does not exits yet, do not try to remove it
"""
self._helper()
get_value.side_effect = iter([1, 4, "dir", None, "config"])
self.conf.update_watch()
self.assertFalse(self.conf.watch_manager.rm_watch.called)
# @expectedFailure
def test_watch_decriptor_present(self, get_value, init):
"""
When descriptor does not exits yet, do not try to remove it
"""
self._helper()
src_descriptor = Mock()
src_descriptor.values.return_value = "src_descr"
conf_descriptor = Mock()
conf_descriptor.values.return_value = "conf_descr"
self.conf.watch_descriptors = [src_descriptor, conf_descriptor]
get_value.side_effect = iter([1, 4, "dir", None, "config"])
self.conf.update_watch()
self.conf.watch_manager.rm_watch.assert_has_calls([
call("src_descr", rec=True),
call("conf_descr", rec=True),
])
@patch("testrunner.configurator.pyinotify.ExcludeFilter")
def test_filter_present(self, in_filter, get_value, init):
"""
Test applying filters
"""
self._helper()
get_value.side_effect = iter([1, 4, "dir", "filters", "config"])
self.conf.filter_test = "some function"
in_filter.return_value = "new function"
self.conf.update_watch()
in_filter.assert_called_once_with("filters")
self.assertEqual(self.conf.filter_test, "new function")
@patch("testrunner.configurator.pyinotify.ExcludeFilter")
def test_filters_not_set(self, in_filter, get_value, init):
"""
Test filters not set
"""
self._helper()
get_value.side_effect = iter([1, 4, "dir", [], "config"])
self.conf.filter_test = "some function"
in_filter.return_value = "new function"
self.conf.update_watch()
self.assertFalse(in_filter.called)
self.assertEqual(self.conf.filter_test, None)
@patch("testrunner.configurator.time")
def test_set_new_watch(self, mock_time, get_value, init):
"""
Setting new watch for dir
"""
self._helper()
self.conf.watcher_added_at = 1
add_watch = Mock()
add_watch.side_effect = iter(["dir descriptor", "conf descriptor"])
self.conf.watch_manager.add_watch = add_watch
get_value.side_effect = iter([1, 2, "dir", None, "config"])
mock_time.return_value = 2
self.conf.update_watch()
self.assertEqual(add_watch.call_count, 2)
add_watch.assert_has_calls([
call(path="dir", mask=1 & ~2, auto_add=True,
rec=True, exclude_filter=self.conf.filter_wrapper),
call(path="config", mask=1 & ~2),
])
self.assertEqual(
self.conf.watch_descriptors, ("dir descriptor", "conf descriptor"))
self.assertEqual(self.conf.watcher_added_at, 2)
@patch("testrunner.configurator.time")
def test_include_excelude_list(self, mock_time, get_value, init):
"""
Include/exclude should accept a list of flags
"""
self._helper()
self.conf.watcher_added_at = 1
add_watch = Mock()
add_watch.side_effect = iter(["src descr", "conf descr"])
self.conf.watch_manager.add_watch = add_watch
filter_include = [1, 4094]
filter_exclude = [8, 16, 32]
get_value.side_effect = iter([
filter_include, filter_exclude, "dir", None, "config"
])
ored_include = 4095
ored_exclude = 56
expected_mask = ored_include & ~ored_exclude
mock_time.return_value = 2
self.conf.update_watch()
self.assertEqual(add_watch.call_count, 2)
add_watch.assert_has_calls([
call(path="dir", mask=expected_mask, auto_add=True,
rec=True, exclude_filter=self.conf.filter_wrapper),
call(path="config", mask=expected_mask),
])
self.assertEqual(
self.conf.watch_descriptors, ("src descr", "conf descr"))
self.assertEqual(self.conf.watcher_added_at, 2)
@patch("testrunner.configurator.time")
def test_include_excelude_tuple(self, mock_time, get_value, init):
"""
Include/exclude should accept a tuple of flags
"""
self._helper()
self.conf.watcher_added_at = 1
add_watch = Mock()
add_watch.side_effect = iter(["src descr", "conf descr"])
self.conf.watch_manager.add_watch = add_watch
filter_include = (1, 4094)
filter_exclude = (8, 16, 32)
get_value.side_effect = iter([
filter_include, filter_exclude, "dir", None, "config"
])
ored_include = 4095
ored_exclude = 56
expected_mask = ored_include & ~ored_exclude
mock_time.return_value = 2
self.conf.update_watch()
self.assertEqual(add_watch.call_count, 2)
add_watch.assert_has_calls([
call(path="dir", mask=expected_mask, auto_add=True,
rec=True, exclude_filter=self.conf.filter_wrapper),
call(path="config", mask=expected_mask),
])
self.assertEqual(
self.conf.watch_descriptors, ("src descr", "conf descr"))
self.assertEqual(self.conf.watcher_added_at, 2)
|
|
##
# Testing suite for select.php
#
import requests
import re
import sys
from xml.dom import minidom
from copy import copy
from . import GeoMOOSETest
class ParcelTest(GeoMOOSETest):
def setUp(self):
self.select_php = "http://" + self.host + self.geomoose_base + "/php/query.php"
self.default_params = {
"mode" : "search",
"header0" : "select_header",
"footer0" : "select_footer",
"template0" : "select_record",
"layer0" : "parcels/parcels",
"projection": "EPSG:3857",
"shape0" : "POINT(-10373109.338156 5552992.5910145)",
"shape0_layer" : "",
"shape0_buffer" : "0",
"shape0_layer_buffer" : "0"
}
super(ParcelTest,self).setUp()
def fetchHighlightImage(self, selectionId, testName):
params = {
"BBOX" : "-10375087.146263,5551951.1365041,-10371676.143877,5554320.6843807",
"FORMAT": "image/png",
"HEIGHT": "496",
"LAYERS" : "highlight",
"MAP": selectionId,
"SRS": "EPSG:3857",
"TRANSPARENT": "true",
"WIDTH": "714",
"REQUEST" : "GetMap",
"SERVICE": "WMS",
"STYLES": "",
"VERSION": "1.1.1"
}
response = self.get("http://"+self.host+self.mapserver_base, params=params)
# TODO: Test response results...
f = open(self.temp_dir+'/'+testName+'.png', 'w')
f.write(response.content)
f.close()
return response
def check_parcels(self, paramOverrides, expectedParcels, pinPattern='(PIN:\<\/b\>\<\/td\>\<td\>)([0-9]+)'):
"""
Prototype function to sending a set of parameters
to the select_php and checking that they are all there.
"""
params = copy(self.default_params)
params.update(paramOverrides)
results = self.post(self.select_php, params=params)
# check to make sure we got a valid response from the server.
self.assertEqual(results.status_code, 200, "Failed to get valid return form service.")
# replacing all the newlines was causing very odd
# buffer over flows on long strings, so we just split
# each line and parse it individually.
html = results.text.split('\n')
# print >> sys.stderr, 'RAW', results.text
# print >> sys.stderr, 'HTML', html
#<td><b>PIN:</b></td><td>130360001026</td>
pin_re = re.compile(pinPattern, re.UNICODE)
# pull out the PIN entries
parcel_ids = []
for line in html:
found_ids = [x[1] for x in pin_re.findall(line)]
parcel_ids += found_ids
#print >> sys.stderr, 'Found Parcels', parcel_ids
# test for all the valid pins here.
# expected IDs
missing_parcels = []
for expected_id in expectedParcels:
if(expected_id not in parcel_ids):
missing_parcels.append(expected_id)
self.assertEqual(len(missing_parcels), 0, 'Parcel ID not found in results: '+';'.join(missing_parcels))
# now test that we didn't get *more* parcels
# than we were looking to find.
expected_set = set(expectedParcels)
found_set = set(parcel_ids)
diff = found_set.difference(expected_set)
#print >> sys.stderr, 'Found Length: ', len(found_set), 'Diff Length:', len(diff)
self.assertTrue(len(diff) == 0, 'More parcels returned than expected: %s' % ';'.join(list(diff)))
return results
class SelectTest(ParcelTest):
def test_ticket24(self):
"""
Check that buffered parcels are returning a complete set.
"""
p = {
"layer0" : "parcels/parcels",
"shape0_layer" : "parcels/parcels",
"shape0_buffer" : "0",
"shape0_layer_buffer" : "30.479983540808888", # "100ft" converted to meters
"shape0" : "POINT(-10372932.577528 5552764.4742582)"
}
# this list was generated with postgresql/postgis,
# on 19 October 2015, theduckylittle
expected_parcels = [
"130250001050",
"130260001201",
"130260001150",
"130260001051",
"130260001275",
"130260001175",
"130350002001",
"130350001002",
"130350001025",
"130360001026",
"130260001101",
"130260001025",
]
# this data should return correctly but there will be a missing
# parcel in the highlight
results = self.check_parcels(p, expected_parcels)
# Parsing ... it happens.
find_map = results.text.find("'map'")
first_apos = results.text.find("'", find_map+5+1)
second_apos = results.text.find("'", first_apos+1)
selection_id = results.text[first_apos+1:second_apos]
self.fetchHighlightImage(selection_id, 'ticket24')
# TODO : Automate the viewing of this image, even though it downloads reasonably
def test_ticket31(self):
"""
Testing LAYER versus GROUP versus ALL selection.
"""
expected_parcels = ['130270001077']
layer_test = {
"shape0" : "POINT(-10374958.869833 5552691.0678879)"
}
self.check_parcels(layer_test, expected_parcels)
group_test = {
"shape0" : "POINT(-10374958.869833 5552691.0678879)",
"select0_layer" : "parcels/parcels_group"
}
self.check_parcels(group_test, expected_parcels)
all_test = {
"shape0" : "POINT(-10374958.869833 5552691.0678879)",
"select0_layer" : "parcels/all"
}
self.check_parcels(all_test, expected_parcels)
def test_ticket85(self):
"""
Test a two point polygon with no buffer (ticket #85)
"""
test_params = {
"shape" : "POLYGON((-10375742.69258 5555129.6328634,-10375734.33228 5555115.3009206,-10375742.69258 5555129.6328634))"
}
params = copy(self.default_params)
params.update(test_params)
results = self.post(self.select_php, params=params)
self.assertEqual(results.status_code, 200, "Service did not catch the exception!")
def test_ticket84(self):
"""
Ensure point selector is not over selecting (ticket #84)
"""
test_params = {
"shape0" : "POINT(-10375755.352418 5555107.2472463)"
}
self.check_parcels(test_params, ['130220003076'])
def test_header_and_footer(self):
"""
Test header0/footer0 support in query.php using select_header/footer
"""
test_params = copy(self.default_params)
test_params.update({
"shape0" : "POINT(-10373109.338156 5552992.5910145)",
"shape0_buffer" : "500",
})
res = self.post(self.select_php, params=test_params)
def test_caching(self):
"""
Test a parcel query with caching
"""
test_params = copy(self.default_params)
test_params['cache'] = 'true'
res = self.post(self.select_php, params=test_params)
#print >> sys.stderr, res.text
# geomoose cache id's start with 'gm_'
self.assertTrue('gm_' in res.text, 'Missing query contents!')
|
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# criticalstat Trace long critical sections (IRQs or preemption disabled)
# For Linux, uses BCC, eBPF. Requires kernel built with
# CONFIG_DEBUG_PREEMPT and CONFIG_PREEMPTIRQ_EVENTS
#
# USAGE: criticalstat [-h] [-p] [-i] [-d DURATION]
#
# Copyright (c) 2018, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# By Joel Fernandes <joel@joelfernandes.org>
from __future__ import print_function
from bcc import BPF
import argparse
import sys
import subprocess
import os.path
examples=""
parser = argparse.ArgumentParser(
description="Trace long critical sections",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-p", "--preemptoff", action="store_true",
help="Find long sections where preemption was off")
parser.add_argument("-i", "--irqoff", action="store_true",
help="Find long sections where IRQ was off")
parser.add_argument("-d", "--duration", default=100,
help="Duration in uS (microseconds) below which we filter")
args = parser.parse_args()
preemptoff = False
irqoff = False
if args.irqoff:
preemptoff = False
irqoff = True
elif args.preemptoff:
preemptoff = True
irqoff = False
debugfs_path = subprocess.Popen ("cat /proc/mounts | grep -w debugfs" +
" | awk '{print $2}'",
shell=True,
stdout=subprocess.PIPE).stdout.read().split(b"\n")[0]
if debugfs_path == "":
print("ERROR: Unable to find debugfs mount point");
sys.exit(0);
trace_path = debugfs_path + b"/tracing/events/preemptirq/";
if (not os.path.exists(trace_path + b"irq_disable") or
not os.path.exists(trace_path + b"irq_enable") or
not os.path.exists(trace_path + b"preempt_disable") or
not os.path.exists(trace_path + b"preempt_enable")):
print("ERROR: required tracing events are not available\n" +
"Make sure the kernel is built with CONFIG_DEBUG_PREEMPT " +
"CONFIG_PREEMPT_TRACER " +
"and CONFIG_PREEMPTIRQ_EVENTS (CONFIG_PREEMPTIRQ_TRACEPOINTS in "
"kernel 4.19 and later) enabled. Also please disable " +
"CONFIG_PROVE_LOCKING and CONFIG_LOCKDEP on older kernels.")
sys.exit(0)
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
enum addr_offs {
START_CALLER_OFF,
START_PARENT_OFF,
END_CALLER_OFF,
END_PARENT_OFF
};
struct start_data {
u32 addr_offs[2];
u64 ts;
int idle_skip;
int active;
};
struct data_t {
u64 time;
s64 stack_id;
u32 cpu;
u64 id;
u32 addrs[4]; /* indexed by addr_offs */
char comm[TASK_COMM_LEN];
};
BPF_STACK_TRACE(stack_traces, 16384);
BPF_PERCPU_ARRAY(sts, struct start_data, 1);
BPF_PERCPU_ARRAY(isidle, u64, 1);
BPF_PERF_OUTPUT(events);
/*
* In the below code we install tracepoint probes on preempt or
* IRQ disable/enable critical sections and idle events, the cases
* are combinations of 4 different states.
* The states are defined as:
* CSenter: A critical section has been entered. Either due to
* preempt disable or irq disable.
* CSexit: A critical section has been exited. Either due to
* preempt enable or irq enable.
* Ienter: The CPU has entered an idle state.
* Iexit: The CPU has exited an idle state.
*
* The scenario we are trying to detect is if there is an overlap
* between Critical sections and idle entry/exit. If there are any
* such cases, we avoid recording those critical sections since they
* are not worth while to record and just add noise.
*/
TRACEPOINT_PROBE(power, cpu_idle)
{
int idx = 0;
u64 val;
struct start_data *stdp, std;
// Mark active sections as that they should be skipped
// Handle the case CSenter, Ienter, CSexit, Iexit
// Handle the case CSenter, Ienter, Iexit, CSexit
stdp = sts.lookup(&idx);
if (stdp && stdp->active) {
/*
* Due to verifier issues, we have to copy contents
* of stdp onto the stack before the update.
* Fix it to directly update once kernel patch d71962f
* becomes more widespread.
*/
std = *stdp;
std.idle_skip = 1;
sts.update(&idx, &std);
}
// Mark CPU as actively within idle or not.
if (args->state < 100) {
val = 1;
isidle.update(&idx, &val);
} else {
val = 0;
isidle.update(&idx, &val);
}
return 0;
}
static int in_idle(void)
{
u64 *idlep;
int idx = 0;
// Skip event if we're in idle loop
idlep = isidle.lookup(&idx);
if (idlep && *idlep)
return 1;
return 0;
}
static void reset_state(void)
{
int idx = 0;
struct start_data s = {};
sts.update(&idx, &s);
}
TRACEPOINT_PROBE(preemptirq, TYPE_disable)
{
int idx = 0;
struct start_data s;
// Handle the case Ienter, CSenter, CSexit, Iexit
// Handle the case Ienter, CSenter, Iexit, CSexit
if (in_idle()) {
reset_state();
return 0;
}
u64 ts = bpf_ktime_get_ns();
s.idle_skip = 0;
s.addr_offs[START_CALLER_OFF] = args->caller_offs;
s.addr_offs[START_PARENT_OFF] = args->parent_offs;
s.ts = ts;
s.active = 1;
sts.update(&idx, &s);
return 0;
}
TRACEPOINT_PROBE(preemptirq, TYPE_enable)
{
int idx = 0;
u64 start_ts, end_ts, diff;
struct start_data *stdp;
// Handle the case CSenter, Ienter, CSexit, Iexit
// Handle the case Ienter, CSenter, CSexit, Iexit
if (in_idle()) {
reset_state();
return 0;
}
stdp = sts.lookup(&idx);
if (!stdp) {
reset_state();
return 0;
}
// Handle the case Ienter, Csenter, Iexit, Csexit
if (!stdp->active) {
reset_state();
return 0;
}
// Handle the case CSenter, Ienter, Iexit, CSexit
if (stdp->idle_skip) {
reset_state();
return 0;
}
end_ts = bpf_ktime_get_ns();
start_ts = stdp->ts;
if (start_ts > end_ts) {
reset_state();
return 0;
}
diff = end_ts - start_ts;
if (diff < DURATION) {
reset_state();
return 0;
}
u64 id = bpf_get_current_pid_tgid();
struct data_t data = {};
if (bpf_get_current_comm(&data.comm, sizeof(data.comm)) == 0) {
data.addrs[START_CALLER_OFF] = stdp->addr_offs[START_CALLER_OFF];
data.addrs[START_PARENT_OFF] = stdp->addr_offs[START_PARENT_OFF];
data.addrs[END_CALLER_OFF] = args->caller_offs;
data.addrs[END_PARENT_OFF] = args->parent_offs;
data.id = id;
data.stack_id = stack_traces.get_stackid(args, 0);
data.time = diff;
data.cpu = bpf_get_smp_processor_id();
events.perf_submit(args, &data, sizeof(data));
}
reset_state();
return 0;
}
"""
bpf_text = bpf_text.replace('DURATION', '{}'.format(int(args.duration) * 1000))
if preemptoff:
bpf_text = bpf_text.replace('TYPE', 'preempt')
else:
bpf_text = bpf_text.replace('TYPE', 'irq')
b = BPF(text=bpf_text)
def get_syms(kstack):
syms = []
for addr in kstack:
s = b.ksym(addr, show_offset=True)
syms.append(s)
return syms
# process event
def print_event(cpu, data, size):
try:
global b
event = b["events"].event(data)
stack_traces = b['stack_traces']
stext = b.ksymname('_stext')
print("===================================")
print("TASK: %s (pid %5d tid %5d) Total Time: %-9.3fus\n\n" % (event.comm, \
(event.id >> 32), (event.id & 0xffffffff), float(event.time) / 1000), end="")
print("Section start: {} -> {}".format(b.ksym(stext + event.addrs[0]), b.ksym(stext + event.addrs[1])))
print("Section end: {} -> {}".format(b.ksym(stext + event.addrs[2]), b.ksym(stext + event.addrs[3])))
if event.stack_id >= 0:
kstack = stack_traces.walk(event.stack_id)
syms = get_syms(kstack)
if not syms:
return
for s in syms:
print(" ", end="")
print("%s" % s)
else:
print("NO STACK FOUND DUE TO COLLISION")
print("===================================")
print("")
except Exception:
sys.exit(0)
b["events"].open_perf_buffer(print_event, page_cnt=256)
print("Finding critical section with {} disabled for > {}us".format(
('preempt' if preemptoff else 'IRQ'), args.duration))
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
|
import copy
import itertools
import boto
import boto.ec2
import k.aws.util
import warnings
from k.aws.config import AwsCreds, connection_hash
from k.aws.config import RegionAwsCreds, region_connection_hash
def connect(creds):
"""
Connect to ec2, with user-provided options.
:param region_creds: The region name and AWS credentials.
:type region_creds: k.aws.config.AwsCreds or k.aws.config.RegionAwsCreds
:rtype: boto.ec2.connection.EC2Connection
"""
if isinstance(creds, AwsCreds):
return boto.connect_ec2(**connection_hash(creds))
elif isinstance(creds, RegionAwsCreds):
return boto.ec2.connect_to_region(
**region_connection_hash(creds))
raise Exception("Unrecognized credential type: %s" % creds)
def parse_ec2_launch(lines, base):
instances = []
reservation = copy.copy(base)
for line in lines:
if line.startswith("RESERVATION"):
reservation = copy.copy(base)
parse_reservation_line(line, reservation)
elif line.startswith("INSTANCE"):
instance = parse_instance_line(line, copy.copy(reservation))
instances.append(instance)
return instances
def parse_reservation_line(line, reservation):
chunks = line.strip().split("\t")
set_field(reservation, chunks[1], 'reservation_id')
set_field(reservation, chunks[2], 'user_id')
set_field(reservation, chunks[3], 'groups')
def parse_instance_line(line, instance):
chunks = line.strip().split("\t")
if len(chunks) > 1:
if(len(chunks) == 13): #defect in ec2-api-tools, sometimes there is one less field from ec2-run-instances
chunks.insert(8, '')
set_field(instance, chunks[1], 'instance_id')
set_field(instance, chunks[2], 'ami_id')
set_field(instance, chunks[3], 'external_dns')
set_field(instance, chunks[4], 'internal_dns')
set_field(instance, chunks[5], 'state')
set_field(instance, chunks[6], 'key')
instance['index'] = int(chunks[7])
set_field(instance, chunks[8], 'type')
set_field(instance, chunks[10], 'timestamp')
set_field(instance, chunks[11], 'availability_zone')
set_field(instance, chunks[12], 'aki_id')
set_field(instance, chunks[13], 'ari_id')
return instance
def set_field(instance, value, name):
if(value == ""):
instance[name] = None
else:
instance[name] = value
def get_account_id(conn):
"""
There's no direct API call for getting the account id
so get it through a security group object.
"""
rs = conn.get_all_security_groups()
if len(rs) > 0:
return rs[0].owner_id
else:
warnings.warn("Coult not get account id.")
return 0
def get_instance_from_name_or_ip(conn, name_or_ip):
mapping = dict()
for i in all_instances(conn):
if name_or_ip in (i.private_dns_name, i.public_dns_name, i.ip_address, i.ip_address):
return i.id
return
def check_for_tag(instance, tag, desired_value):
"""Check for a particular tag, and confirm that its value is what
is equal to desired_value
:type instance: boto.ec2.instance.Instance
:param instance: The instance whose tags will be checked.
:type tag: str
:param tag: The name of the tag to be searched for
:type desired_value: str
:param desired_value: The string that indicates that kerberos is enabled.
Returns: boolean
"""
instance.update()
if tag in instance.tags.keys():
if instance.tags[tag] == desired_value:
return True
return False
def all_instances(conn):
"""returns a list of instances, no reservations required"""
all_instances = [ [inst for inst in res.instances] for res in conn.get_all_instances() ]
return list(itertools.chain(*all_instances)) # flat-pack
def get_key_pair_name(creds):
"""
Returns the default key pair name to use when launching instances with in the account
associated with the creds argument.
"""
ec2_conn = connect(creds)
account_id = get_account_id(ec2_conn)
return k.aws.util.get_key_pair_name(account_id)
class Instances(object):
"""Gathers data from a particular account, and makes it easier to
search for desired instance data
"""
def __init__(self, boto, account_info):
"""Initialize with a reference to the boto module (so adding this class
doesn't break existing code that already calls boto) and the account
info that will be connected to.
"""
ec2 = boto.connect_ec2(account_info[0], account_info[1])
self.reservations = ec2.get_all_instances()
self.groups = ec2.get_all_security_groups()
self.instances = all_instances(ec2)
self.by_instance_id = dict()
self.by_private_ip = dict()
self.by_private_dns_name = dict()
self.by_public_ip = dict()
self.by_public_dns_name = dict()
self._build_instance_id_map()
self._build_private_ip_map()
self._build_private_dns_name_map()
self._build_public_ip_map()
self._build_public_dns_name_map()
def flatten_instances(self):
instances = list()
for r in self.reservations:
instances.extend(r.instances)
return instances
def _build_instance_id_map(self):
for instance in self.instances:
self.by_instance_id[instance.id] = instance
def _build_private_ip_map(self):
for instance in self.instances:
self.by_private_ip[instance.private_ip_address] = instance
def _build_private_dns_name_map(self):
for instance in self.instances:
self.by_private_dns_name[instance.private_dns_name] = instance
def _build_public_ip_map(self):
for instance in self.instances:
self.by_public_ip[instance.ip_address] = instance
def _build_public_dns_name_map(self):
for instance in self.instances:
self.by_public_dns_name[instance.public_dns_name] = instance
def lookup(self, key):
"""Look in the various dicts we have for the name being
provided. If that fails, I can add a fallback lookup if we
need to be more general.
"""
for d in (self.by_instance_id, self.by_private_ip, self.by_public_ip, self.by_public_dns_name, self.by_private_dns_name):
if key in d.keys():
return d[key]
class KInstances(Instances):
"""Gathers data from a particular account, and makes it easier to
search for desired instance data.
Uses k.aws-specific lingo since its superclass was written without
knowledge of k.aws.
"""
def __init__(self, ec2_conn):
self.reservations = ec2_conn.get_all_instances()
self.groups = ec2_conn.get_all_security_groups()
self.instances = all_instances(ec2_conn)
self.by_instance_id = dict()
self.by_private_ip = dict()
self.by_private_dns_name = dict()
self.by_public_ip = dict()
self.by_public_dns_name = dict()
self._build_instance_id_map()
self._build_private_ip_map()
self._build_private_dns_name_map()
self._build_public_ip_map()
self._build_public_dns_name_map()
# Local Variables:
# tab-width: 4
# indent-tabs-mode: t
# End:
|
|
import time
import time
import os, os.path
import sys
import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import operator
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import numpy.ma as ma
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import pylab
import pickle
PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
print PyCodePath
from matplotlib.ticker import FuncFormatter
from matplotlib.ticker import ScalarFormatter
def myexpformat_4digs(x, pos):
return '%.3e' %x
# for ndigs in range(4):
# lab=(('%.'+'%d' %ndigs+'e') %x).replace('e+0','e').replace('e+','e').replace('e0','').replace('e-0','e')
# if eval(lab)==x:
# return lab
# return lab
ExpTickLabels=FuncFormatter(myexpformat_4digs)
RegTickLabels=matplotlib.ticker.ScalarFormatter()
def autotickformat(ax, x=False, y=False, ndec=3):
for bl, xax, lims in zip([x, y], [ax.xaxis, ax.yaxis], [ax.get_xlim(), ax.get_ylim()]):
if bl:
try:
doit=numpy.max(numpy.log10(numpy.abs(numpy.array(lims))))<(-ndec)
doit=doit or numpy.min(numpy.log10(numpy.abs(numpy.array(lims))))>ndec
except:
print 'error on axis formatter for lims ', lims
continue
if doit:
xax.set_major_formatter(ExpTickLabels)
else:
xax.set_major_formatter(RegTickLabels)
def autocolorbarformat(lims, ndec=3):
try:
doit=numpy.max(numpy.log10(numpy.abs(numpy.array(lims))))<(-ndec)
doit=doit or numpy.min(numpy.log10(numpy.abs(numpy.array(lims))))>ndec
except:
print 'error on axis formatter for lims ', lims
return
if doit:
return ExpTickLabels
else:
return RegTickLabels
wd=os.getcwd()
sys.path.append(os.path.join(PyCodePath,'PythonCompositionPlots'))
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
from quaternary_FOM_stackedtern5 import *
from quaternary_FOM_stackedtern10 import *
from quaternary_FOM_stackedtern20 import *
from quaternary_FOM_stackedtern30 import *
from quaternary_FOM_stackedtern9of100 import *
#sys.path.append(os.path.join(PyCodePath,'JCAPPyDBComm'))
#from mysql_dbcommlib import *
#
#sys.path.append(os.path.join(PyCodePath, 'PythonCodeSecureFiles'))
#from paths import *
#if os.path.isdir(EchemSavePath):
# os.chdir(EchemSavePath)
try:
wd=os.path.split(os.path.split(os.path.split(os.path.split(os.path.realpath(__file__))[0])[0])[0])[0]
os.chdir(wd)
except:
pass
class messageDialog(QDialog):
def __init__(self, parent=None, title=''):
super(messageDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
QObject.connect(self.buttonBox, SIGNAL("rejected()"), self.reject)
mainlayout.addWidget(self.buttonBox, 0, 0)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
def ExitRoutine(self):
return
def mygetopenfile(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getOpenFileName(xparent,''.join(['Select file to open:', markstr]),os.path.join(xpath, filename).replace('\\','/')))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getOpenFileName(parent,''.join(['Select file to open: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
def mygetopenfiles(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfns=QFileDialog.getOpenFileNames(xparent,''.join(['Select file to open:', markstr]),os.path.join(xpath, filename).replace('\\','/'))
xparent.destroy()
xapp.quit()
else:
returnfns=QFileDialog.getOpenFileNames(parent,''.join(['Select file to open: ', markstr]),os.path.join(xpath, filename).replace('\\','/'))
return [str(s) for s in returnfns]
def mygetsavefile(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getSaveFileName(xparent,''.join(['Select file for save: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getSaveFileName(parent,''.join(['Select file for save: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
def mygetdir(parent=None, xpath="%s" % os.getcwd(),markstr='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getExistingDirectory(xparent,''.join(['Select directory:', markstr]), xpath))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getExistingDirectory(parent,''.join(['Select directory:', markstr]), xpath))
def userinputcaller(parent, inputs=[('testnumber', int)], title='Enter values', cancelallowed=True):
problem=True
while problem:
idialog=userinputDialog(parent, inputs, title)
idialog.exec_()
problem=idialog.problem
if not idialog.ok and cancelallowed:
return None
inputs=[(tup[0], tup[1], s) for tup, s in zip(inputs, idialog.inputstrlist)]
return idialog.ans
class userinputDialog(QDialog):
def __init__(self, parent, inputs=[('testnumber', int, '')], title='Enter values'):
super(userinputDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.parent=parent
self.inputs=inputs
self.lelist=[]
for i, tup in enumerate(self.inputs):
lab=QLabel()
lab.setText(tup[0])
le=QLineEdit()
if len(tup)>2:
le.setText(tup[2])
self.lelist+=[le]
mainlayout.addWidget(lab, 0, i, 1, 1)
mainlayout.addWidget(le, 1, i, 1, 1)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox, 2, 0, len(inputs), 1)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
self.setLayout(mainlayout)
QMetaObject.connectSlotsByName(self)
self.problem=False
self.ok=False
def ExitRoutine(self):
self.ok=True
self.problem=False
self.ans=[]
self.inputstrlist=[str(le.text()).strip() for le in self.lelist]
for s, tup in zip(self.inputstrlist, self.inputs):
if tup[1]==str:
try:
self.ans+=[s]
except:
self.problem=True
break
else:
try:
n=myeval(s)
self.ans+=[tup[1](n)]
except:
self.problem=True
break
if self.problem:
idialog=messageDialog(self, 'problem with conversion of ' + tup[0])
idialog.exec_()
#class selectdbsessionsDialog(QDialog):
# def __init__(self, parent, ex_trange_techl, maxsessions=15, title='Select DB experiment sessions to analyze'):
# super(selectdbsessionsDialog, self).__init__(parent)
# self.setWindowTitle(title)
# mainlayout=QVBoxLayout()
#
# self.cblist=[]
# self.cbinds=[]
# for count, (ex, (t0, t1), techl) in enumerate(ex_trange_techl[:maxsessions]):
# cb=QCheckBox()
# cb.setText('exp %d: %s to %s, %s' %(ex, str(t0), str(t1), ','.join(techl)))
# cb.setChecked(False)
# mainlayout.addWidget(cb)
# self.cblist+=[cb]
# self.cbinds+=[[count]]
# if len(ex_trange_techl)>maxsessions:
# cb=QCheckBox()
# ex, (t0, t1), techl=ex_trange_techl[maxsessions]
# ex2, (t02, t12), techl2=ex_trange_techl[-1]
# techl=list(set(techl+techl2))
# cb.setText('exp %d-%d: %s to %s, %s' %(ex, ex2, str(t0), str(t12), ','.join(techl)))
# cb.setChecked(True)
# mainlayout.addWidget(cb)
# self.cblist+=[cb]
# self.cbinds+=[range(maxsessions, len(ex_trange_techl))]
# cb.setChecked(True)
#
# self.buttonBox = QDialogButtonBox(self)
# self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
# self.buttonBox.setOrientation(Qt.Horizontal)
# self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
# QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
# mainlayout.addWidget(self.buttonBox)
#
# QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
# self.setLayout(mainlayout)
# QMetaObject.connectSlotsByName(self)
# def ExitRoutine(self):
# self.selectinds=[]
# for cb, l in zip(self.cblist, self.cbinds):
# if cb.isChecked():
# self.selectinds+=l
#
class echem10axesWidget(QDialog):
def __init__(self, parent=None, ellabels=['A', 'B', 'C', 'D']):
super(echem10axesWidget, self).__init__(parent)
mainlayout=QVBoxLayout()
self.plotw=plotwidget(self)
self.plotw.fig.clf()
self.axl, self.stpl=make10ternaxes(fig=self.plotw.fig, ellabels=ellabels)
#### mainlayout.addWidget(self.plotw)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox)
self.setLayout(mainlayout)
# def plot(self, d, cb=True):
# if 'fomlabel' in d.keys():
# cblabel=d['fomlabel']
# else:
# cblabel=''
# scatter_10axes(d['comps'], d['fom'], self.stpl, s=18, edgecolors='none', cb=cb, cblabel=cblabel, cmap=d['cmap'], norm=d['norm'])
class echem30axesWidget(QDialog):
def __init__(self, parent=None, ellabels=['A', 'B', 'C', 'D']):
super(echem30axesWidget, self).__init__(parent)
mainlayout=QVBoxLayout()
self.plotw=plotwidget(self)
self.plotw.fig.clf()
self.axl, self.stpl=make30ternaxes(fig=self.plotw.fig, ellabels=ellabels)
mainlayout.addWidget(self.plotw)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox)
self.setLayout(mainlayout)
# def plot(self, d, cb=True):
# if 'fomlabel' in d.keys():
# cblabel=d['fomlabel']
# else:
# cblabel=''
# scatter_30axes(d['comps'], d['fom'], self.stpl, s=18, edgecolors='none', cb=cb, cblabel=cblabel, cmap=d['cmap'], norm=d['norm'])
class messageDialog(QDialog):
def __init__(self, parent=None, title=''):
super(messageDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
QObject.connect(self.buttonBox, SIGNAL("rejected()"), self.reject)
## mainlayout.addWidget(self.buttonBox, 0, 0)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
def ExitRoutine(self):
return
class plotwidget(FigureCanvas):
def __init__(self, parent, width=12, height=6, dpi=72, projection3d=False):
#plotdata can be 2d array for image plot or list of 2 1d arrays for x-y plot or 2d array for image plot or list of lists of 2 1D arrays
self.fig=Figure(figsize=(width, height), dpi=dpi)
if projection3d:
self.axes=self.fig.add_subplot(111, navigate=True, projection='3d')
else:
self.axes=self.fig.add_subplot(111, navigate=True)
self.axes.hold(True)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
#self.parent=parent
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
#NavigationToolbar(self, parent)
NavigationToolbar(self, self)
self.mpl_connect('button_press_event', self.myclick)
self.clicklist=[]
def myclick(self, event):
if not (event.xdata is None or event.ydata is None):
arrayxy=[event.xdata, event.ydata]
print 'clicked on image: array indeces ', arrayxy, ' using button', event.button
self.clicklist+=[arrayxy]
self.emit(SIGNAL("genericclickonplot"), [event.xdata, event.ydata, event.button, event.inaxes])
def col_string(s):
s=s.strip()
if ('(' in s) and (')' in s):
try:
s=eval(s)
except:
return None
cc=colors.ColorConverter()
return cc.to_rgb(s)
|
|
"""Migration to hybrid mode (TVShows and movies)
Revision ID: edec70effd74
Revises: b037baf33cfa
Create Date: 2021-06-13 19:03:10.107347
"""
# revision identifiers, used by Alembic.
revision = 'edec70effd74'
down_revision = 'b037baf33cfa'
from alembic import op
import sqlalchemy as sa
import cineapp.migration_types
import json
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# Create new show table
op.create_table('shows',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('original_name', sa.String(length=100), nullable=True),
sa.Column('release_date', sa.Date(), nullable=True),
sa.Column('type', sa.String(length=5), nullable=True),
sa.Column('url', sa.String(length=100), nullable=True),
sa.Column('origin', sa.String(length=5), nullable=True),
sa.Column('director', sa.String(length=500), nullable=True),
sa.Column('overview', sa.String(length=2000), nullable=True),
sa.Column('poster_path', sa.String(length=255), nullable=True),
sa.Column('added_when', sa.DateTime(), nullable=True),
sa.Column('added_by_user', sa.Integer(), nullable=True),
sa.Column('show_type', sa.String(length=50), nullable=True),
sa.ForeignKeyConstraint(['added_by_user'], ['users.id'], ),
sa.ForeignKeyConstraint(['origin'], ['origins.id'], ),
sa.ForeignKeyConstraint(['type'], ['types.id'], ),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_collate='utf8_general_ci'
)
op.create_index(op.f('ix_shows_director'), 'shows', ['director'], unique=False)
op.create_index(op.f('ix_shows_name'), 'shows', ['name'], unique=False)
op.create_index(op.f('ix_shows_origin'), 'shows', ['origin'], unique=False)
op.create_index(op.f('ix_shows_original_name'), 'shows', ['original_name'], unique=False)
op.create_index(op.f('ix_shows_release_date'), 'shows', ['release_date'], unique=False)
op.create_index(op.f('ix_shows_type'), 'shows', ['type'], unique=False)
op.create_index(op.f('ix_shows_url'), 'shows', ['url'], unique=False)
# Migrate data from movies table to show table
conn=op.get_bind()
conn.execute("INSERT INTO shows (`id`, `name`, `release_date`, `type`, `url`, `origin`, `director`, `overview`, `poster_path`, `added_when`, `added_by_user`, `original_name`, `show_type`) SELECT `id`, `name`, `release_date`, `type`, `url`, `origin`, `director`, `overview`, `poster_path`, `added_when`, `added_by_user`, `original_name`,\"movie\" FROM movies")
op.create_table('tvshows',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('nb_seasons', sa.Integer(), nullable=True),
sa.Column('tmvdb_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['id'], ['shows.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('tmvdb_id'),
mysql_charset='utf8',
mysql_collate='utf8_general_ci'
)
# Rename movie_id column to show_id column and update associated foreign key
op.drop_constraint('marks_ibfk_2', 'marks', type_='foreignkey')
op.drop_index('movie_id', table_name='marks')
op.alter_column('marks','movie_id', new_column_name='show_id', server_default=None, existing_server_default=None, nullable=False, existing_nullable=False, type_=None, existing_type=sa.Integer())
op.create_foreign_key('marks_ibfk_2', 'marks', 'shows', ['show_id'], ['id'])
#op.create_index(op.f('show_id'), 'marks', ['show_id'], unique=False)
# Rename mark_movie_id column to mark_show_id and update associated foreign key
op.drop_constraint('mark_comment_ibfk_1', 'mark_comment', type_='foreignkey')
op.alter_column('mark_comment', 'mark_movie_id', new_column_name='mark_show_id', server_default=None, existing_server_default=None, nullable=False, existing_nullable=False, type_=None, existing_type=sa.Integer())
op.create_foreign_key('mark_comment_ibfk_1', 'mark_comment', 'marks', ['mark_user_id', 'mark_show_id'], ['user_id', 'show_id'])
# Rename table favorite_movies to favorite_shows
op.drop_constraint('favorite_movies_ibfk_1', 'favorite_movies', type_='foreignkey')
op.alter_column('favorite_movies', 'movie_id', new_column_name='show_id', server_default=None, existing_server_default=None, nullable=False, existing_nullable=False, type_=None, existing_type=sa.Integer())
op.rename_table('favorite_movies','favorite_shows')
op.create_foreign_key('favorite_shows_ibfk_1', 'favorite_shows', 'shows', ['show_id'], ['id'])
# Create movies table
op.create_table('movies_temp',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('duration', sa.Integer(), nullable=True),
sa.Column('tmvdb_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['id'], ['shows.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('tmvdb_id'),
mysql_charset='utf8',
mysql_collate='utf8_general_ci'
)
# Fill the movie table with correct data in order to handle correctly inheritance
conn.execute("INSERT INTO movies_temp SELECT `id`, `duration`, `tmvdb_id` FROM movies")
# Drop the old movie table and rename the temp table
op.drop_table('movies')
op.rename_table('movies_temp','movies')
# Column upgrade
op.alter_column('mark_comment', 'mark_show_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=True)
# Update notification field with new notifications
conn=op.get_bind()
res = conn.execute("select id,notifications from users")
results = res.fetchall()
for cur_result in results:
temp_id=cur_result[0]
temp_dict=json.loads(cur_result[1])
temp_notif_value=temp_dict["notif_movie_add"]
del temp_dict["notif_movie_add"]
temp_dict["notif_show_add"]=temp_notif_value
final_dict=json.dumps(temp_dict)
# Update the notification field into the database
conn.execute("UPDATE users SET notifications='%s' WHERE id=%s" % (json.dumps(temp_dict), temp_id))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
# Create movie table with a temporary name because the inherited movies table still exists
op.create_table('movies_temp',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('original_name', sa.String(length=100), nullable=True),
sa.Column('release_date', sa.Date(), nullable=True),
sa.Column('type', sa.String(length=5), nullable=True),
sa.Column('url', sa.String(length=100), nullable=True),
sa.Column('origin', sa.String(length=5), nullable=True),
sa.Column('director', sa.String(length=200), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.Column('overview', sa.String(length=2000), nullable=True),
sa.Column('tmvdb_id', sa.Integer(), nullable=True),
sa.Column('poster_path', sa.String(length=255), nullable=True),
sa.Column('added_when', sa.DateTime(), nullable=True),
sa.Column('added_by_user', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['added_by_user'], ['users.id'], ),
sa.ForeignKeyConstraint(['origin'], ['origins.id'], ),
sa.ForeignKeyConstraint(['type'], ['types.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('tmvdb_id'),
mysql_charset='utf8',
mysql_collate='utf8_general_ci'
)
# Upgrade columns
op.alter_column('mark_comment', 'mark_show_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=False)
# Migrate data from show and movies table
conn=op.get_bind()
conn.execute("INSERT INTO movies_temp (`id`, `name`, `release_date`, `type`, `url`, `origin`, `director`, `overview`, `poster_path`, `added_when`, `added_by_user`, `original_name`) SELECT `id`, `name`, `release_date`, `type`, `url`, `origin`, `director`, `overview`, `poster_path`, `added_when`, `added_by_user`, `original_name` FROM shows")
conn.execute("UPDATE movies_temp mo SET mo.duration=(SELECT m.duration FROM movies m WHERE m.id=mo.id)")
# Rename the movies_temp table with the definitive name
# But remove constraints linked to that table before
op.drop_constraint('marks_ibfk_2', 'marks', type_='foreignkey')
op.drop_constraint('favorite_shows_ibfk_1', 'favorite_shows', type_='foreignkey')
op.drop_table('movies')
op.rename_table('movies_temp','movies')
# Rename show_id column to movie_id column and update associated foreign key
op.alter_column('marks','show_id', new_column_name='movie_id', server_default=None, existing_server_default=None, nullable=False, existing_nullable=False, type_=None, existing_type=sa.Integer())
op.create_foreign_key('marks_ibfk_2', 'marks', 'movies', ['movie_id'], ['id'])
# Rename mark_show_id column to mark_movie_id and update associated foreign key
op.drop_constraint('mark_comment_ibfk_1', 'mark_comment', type_='foreignkey')
op.alter_column('mark_comment', 'mark_show_id', new_column_name='mark_movie_id', server_default=None, existing_server_default=None, nullable=False, existing_nullable=False, type_=None, existing_type=sa.Integer())
op.create_foreign_key('mark_comment_ibfk_1', 'mark_comment', 'marks', ['mark_user_id', 'mark_movie_id'], ['user_id', 'movie_id'])
# Rename table favorite_shows to favorite_movies
op.alter_column('favorite_shows', 'show_id', new_column_name='movie_id', server_default=None, existing_server_default=None, nullable=False, existing_nullable=False, type_=None, existing_type=sa.Integer())
op.rename_table('favorite_shows','favorite_movies')
op.create_foreign_key('favorite_movies_ibfk_1', 'favorite_movies', 'movies', ['movie_id'], ['id'])
# Delete obsolete tables
op.drop_table('tvshows')
op.drop_table('shows')
# Update notification field with new notifications
conn=op.get_bind()
res = conn.execute("select id,notifications from users")
results = res.fetchall()
for cur_result in results:
temp_id=cur_result[0]
temp_dict=json.loads(cur_result[1])
temp_notif_value=temp_dict["notif_show_add"]
del temp_dict["notif_show_add"]
temp_dict["notif_movie_add"]=temp_notif_value
final_dict=json.dumps(temp_dict)
# Update the notification field into the database
conn.execute("UPDATE users SET notifications='%s' WHERE id=%s" % (json.dumps(temp_dict), temp_id))
# Create missing index
op.create_index(op.f('movie_id'), 'marks', ['movie_id'], unique=False)
# ### end Alembic commands ###
|
|
'''
This is an overarching class for the config class, the variable class, and
the model class.
It should implement a bunch of the the normal stuff that I want from all of
the models, which will allow me to transfer stuff between them nicely.
I'm going to implement a couple of my new paradigms, which should make things
smoother, especially the saving.
'''
import nnlibrary as nn
import pickle as pickle
import numpy as np
class ConfigParameters:
def __init__(self):
'''This is the set of parameters that will be saved by the
save command, incase we want to pull them up later.'''
# training parameters
self.lr = 1.0e-4 # the learning rate
self.lr_reduction = 1.5 # the reduction of the leanring rate after epochs
self.max_epochs = None # the maximum number of training epochs
self.stop_after = None # the number of epochs to train after the best epoch
self.epsilon = 1.0e-8 # the Adam epsilon parameter
# hidden dimension sizes
self.r = 7
# number of gru layers
self.gru_depth = 1
self.bidirectional = True
# for the graph augmentation
self.augmentation = False
self.structure_data=False
self.structure_data_scaling=0.1
# regularization
self.regularization = None
self.dropout = None # dropout during training
# extra symbols
self.special_symbols = [
'END_OF_HYP', 'END_OF_SECTION', 'START_OUTPUT',
'TARGET_UC', 'UC']
# add various parameters for attention. These usually won't be used
self.attention = False
self.matrix_attention = True
self.full_state_attention = False
class DefaultConfig:
def __init__(self, language_model):
''' this is a default configuration thing, which should have
most of the normal parameters.'''
# add the normal parameters
self.p = ConfigParameters()
# add the language_model dependent stuff
self.lm = language_model
# and the various lookup tables
self.total_constructor_arity = language_model.total_constructor_arity
# self.constructor_arity_indices = language_model.constructor_arity_indices
self.label_to_arity = {label:len(language_model.constructor_arity_indices[label]) for label in language_model.constructor_arity_indices}
# self.num_constructors = len(self.constructor_arity_indices) # the number of propositions in the database
self.constructor_label_to_number = language_model.constructor_label_to_number
self.num_extra_variable_names = language_model.num_extra_variable_names
# self.max_unconstrained_arity = language_model.max_unconstrained_arity
# this builds a list of all the constructors
self.all_constructor_list = [None] * len(self.constructor_label_to_number)
for label in self.constructor_label_to_number:
number = self.constructor_label_to_number[label]
assert self.all_constructor_list[number] is None
self.all_constructor_list[number] = label
self.construct_dictionary()
def construct_dictionary(self):
# decode turns numbers into tokens
# encode turns tokens into numbers
self.decode = self.all_constructor_list
self.decode = self.decode + self.p.special_symbols
self.encode = {}
for i in range(len(self.decode)):
self.encode[self.decode[i]] = i
self.num_tokens = len(self.decode)
print('Config(): added '+str(self.num_tokens)+' tokens to dictionary')
# print self.num_tokens, self.decode
def save(self, file_path):
''' saves the variables to file_path '''
with open(file_path, 'wb') as handle:
pickle.dump(self.p, handle)
def load(self, file_path):
''' loads the variables and replaces the current values '''
with open(file_path, 'rb') as handle:
self.p = pickle.load(handle)
class AugmentationVariables:
def __init__(self, graph, r, name, bidirectional=False):
''' this defines a set of variables for the
augmentation portion of the graph. If graph
is not None, these variables get added to
graph. r is the dimension of the various
parameters. This includes variables for
both the backward and forward passes.
This creates the following variables:
no_parent, no_left_sibling, no_right_sibling
In theory I could add an extra set of the above
for each of the special symbols. I won't now,
though, although I may chnage that later.
'''
self.no_parent = nn.VariableNode([r], None, name=name+'no_parent')
self.no_left_sibling = nn.VariableNode([r], None, name=name+'no_left_sibling')
self.vs = [self.no_parent, self.no_left_sibling]
if bidirectional:
self.no_right_sibling = nn.VariableNode([r], None, name=name+'no_right_sibling')
self.vs.append(self.no_right_sibling)
self.rvs = []
return
class Container:
def __init__(self):
''' this is a placeholder class, which we can use for
whatever '''
pass
class DefaultVariables:
def __init__(self, config):
self.config = config
self.vs = []
self.rvs = []
r = self.config.p.r
num_tokens = self.config.num_tokens
# the embedding dictionary, which I think is the only shared variable
self.L = nn.VariableNode([num_tokens, self.config.p.r], None, name='L')
self.vs.append(self.L)
# add the attention matrix if needed
if self.config.p.attention and self.config.p.matrix_attention:
# add the attention matrix
left_size = (r * self.config.p.gru_depth) if self.config.p.full_state_attention else r
# assume bidirectional
right_size = 2*left_size if self.config.p.bidirectional else left_size
self.attention_B = nn.VariableNode([left_size, right_size], None, name='attention_B')
self.vs.append(self.attention_B)
# add_trainer needs to be added after initializing all
# of the variables
def add_trainer(self):
self.optimizer = nn.AdamOptimizer(self.vs, alpha=self.config.p.lr, beta1=0.9,
beta2=0.999, epsilon=self.config.p.epsilon)
def save(self, file_path):
''' saves the variables to file_path '''
with open(file_path, 'wb') as handle:
pickle.dump(self.vs, handle)
def load(self, file_path):
''' loads the variables and replaces the current values '''
with open(file_path, 'rb') as handle:
vs = pickle.load(handle)
# build a dictionary for the new variables
vs_dict = {v.name:v for v in vs}
warned = False
for v in self.vs:
if (v.name not in vs_dict) and (not warned):
print(set([v.name for v in self.vs]))
print(set(vs_dict.keys()))
print('in saved but not new')
print(set(vs_dict.keys()).difference([v.name for v in self.vs]))
print('in new but not saved')
print(set([v.name for v in self.vs]).difference(list(vs_dict.keys())))
print('missing', v.name)
print(v.name in vs_dict)
print(list(vs_dict.keys()))
raise Warning('Some variables not replaced.')
else:
v.load(vs_dict[v.name])
def add_GRUb_block(self, name, bidirectional=False, takes_attention=False):
''' this creates a set of parameters for a block of
GRUbs, based off of the current attentional model
and stuff
h_size is set to be r
x_size is h_size (*2 if bidirectional)
+(2r if forward and attention)
+(r if backwards and attention)
+(r if being fed attention)
'''
r = self.config.p.r
depth = self.config.p.gru_depth
vs = []
rvs = []
GRUbParameters_forward = []
outputs = Container()
outputs.forward = GRUbParameters_forward
# forward pass
for i in range(depth):
h_size = r
x_size = r if i==0 or not bidirectional else 2*r
if i==0 and self.config.p.structure_data:
x_size += 4 # depth, arity, parent_arity, leaf_position
if self.config.p.augmentation: x_size += 2*r
if takes_attention:
x_size += r
if self.config.p.bidirectional: # assume that bidirectional applies to the attention source
x_size += r
this_GRUb = nn.GRUbParameters(h_size, None, x_size=x_size, name=name + '_GRUb_forward_'+str(i))
vs += this_GRUb.vs
rvs += this_GRUb.rvs
GRUbParameters_forward.append(this_GRUb)
# backward pass
if bidirectional:
GRUbParameters_backward = []
outputs.backward = GRUbParameters_backward
for i in range(depth):
h_size = r
x_size = r if i==0 or not bidirectional else 2*r
if i==0 and self.config.p.structure_data:
x_size += 4 # depth, arity, parent_arity, leaf_position
if self.config.p.augmentation: x_size += r
if takes_attention: x_size += 2*r # assumes bidirectional input for attention
this_GRUb = nn.GRUbParameters(h_size, None, x_size=x_size, name=name + '_GRUb_backward_'+str(i))
vs += this_GRUb.vs
rvs += this_GRUb.rvs
GRUbParameters_backward.append(this_GRUb)
if self.config.p.augmentation:
augmentation_params = []
outputs.aug = augmentation_params
for i in range(depth):
this_aug = AugmentationVariables(None, r, name+'augmentation_'+str(i), bidirectional=bidirectional)
vs += this_aug.vs
rvs += this_aug.rvs
augmentation_params.append(this_aug)
# add to the variables
self.vs+=vs
self.rvs+=rvs
return outputs
class DefaultModel:
def __init__(self, config, variables, train=False):
self.config = config
self.v = variables
self.g = nn.ComputationalGraph(nodes = self.v.vs)
self.lm = config.lm
self.attention_has_been_set_up = False
self.dropout = self.config.p.dropout if train else None
self.train = train
# add in regularization if the regularization
# is not zero.
if self.config.p.regularization is not None:
reg_losses = [nn.L2Node(self.config.p.regularization, var, self.g)
for var in self.v.rvs]
self.loss = nn.AddNode(reg_losses, self.g)
else:
self.loss = nn.ConstantNode(0.0, graph=self.g)
# self.attention_memory should be a list of the
# intermediate states for the GRU block:
# self.attention_memory[i][j] is the ith input symbol
# at the jth layer
if self.config.p.attention:
self.attention_memory = []
def set_up_attention(self):
self.attention_has_been_set_up = True
if not self.config.p.attention: return
#print 'attention', len(self.attention_memory),len(self.attention_memory[0])
prestack = [nn.ConcatNode([layer[i] for layer in self.attention_memory], self.g) for i in range(len(self.attention_memory[0]))]
#print prestack
self.stacked_attention_memory = nn.StackNode(prestack, self.g)
#print 'stacked_memory.shape()', self.stacked_attention_memory.shape()
if self.config.p.full_state_attention:
prestack = [nn.ConcatNode([layer[i] for layer in self.attention_memory], self.g) for i in range(len(self.attention_memory[0]))]
self.to_alpha = nn.StackNode(prestack, self.g)
else:
prestack = self.attention_memory[0]
self.to_alpha = nn.StackNode(prestack, self.g)
#print len(self.attention_memory),len(self.attention_memory[0]), self.attention_memory[0][0].value.shape
#print 'to_alpha shape',self.to_alpha.value.shape
# transpose
self.to_alpha = nn.TransposeInPlaceNode(self.to_alpha, self.g)
# to_alpha is (length, rish)
if self.config.p.matrix_attention:
self.to_alpha = nn.DotNode(self.v.attention_B, self.to_alpha, self.g)
def attention(self, state_list):
assert self.config.p.attention
assert self.attention_has_been_set_up
if self.config.p.full_state_attention:
state = nn.ConcatNode(state_list, self.g)
else:
state = state_list[0]
alpha = nn.DotNode(state, self.to_alpha, self.g)
#print 'alpha shape', alpha.shape(), self.stacked_attention_memory
alpha = nn.SoftmaxNode(alpha, self.g)
newstates = nn.DotNode(alpha, self.stacked_attention_memory, self.g)
return nn.SplitNode(newstates, self.config.p.gru_depth, self.g)
def encode(self, token, structure_data=None):
index = self.config.encode[token]
out = nn.SingleIndexNode(index, self.v.L, self.g)
out = nn.DropoutNode(out, self.dropout, self.g)
if self.config.p.structure_data:
structure_data_node = nn.ConstantNode(self.config.p.structure_data_scaling*np.array(structure_data), self.g)
out = nn.ConcatNode([out,structure_data_node], self.g)
return out
# returns a list of input vectors corresponding to the stuff.
def encode_string(self, string, structure_datas = None):
if self.config.p.structure_data:
return [self.encode(token, structure_data=sd) for token, sd in zip(string, structure_datas)]
return [self.encode(token) for token in string]
def forward_vertical_slice(self, hs, parent, left, input_token, params, structure_data, takes_attention=True):
takes_attention = takes_attention and self.config.p.attention
# first construct the actual inputs, which is a bunch of stuff merged together
if takes_attention: attention_in = self.attention(hs)
x = self.encode(input_token, structure_data=structure_data)
out_hs = []
for i in range(self.config.p.gru_depth):
x = nn.DropoutNode(x, self.dropout, self.g)
if self.config.p.augmentation and takes_attention:
merged_x = nn.ConcatNode([x, parent[i], left[i], attention_in[i]], self.g)
elif self.config.p.augmentation and not takes_attention:
merged_x = nn.ConcatNode([x, parent[i], left[i]], self.g)
elif not self.config.p.augmentation and takes_attention:
merged_x = nn.ConcatNode([x, attention_in[i]], self.g)
elif not self.config.p.augmentation and not takes_attention:
merged_x = x
x = nn.GRUbCell(hs[i], merged_x, params[i], self.g, dropout=self.dropout)
out_hs.append(x)
return out_hs, x
def gru_block(self, hs, input_tokens, params, hs_backward=None, parents=None,
left_siblings=None, right_siblings=None, bidirectional=True,
feed_to_attention=False, structure_data=None):
# verify the parameters
feed_to_attention = self.config.p.attention and feed_to_attention
if self.config.p.augmentation:
assert left_siblings is not None
assert parents is not None
if bidirectional:
assert right_siblings is not None
# this does the forward and backwards parts of a gru_block
xs = self.encode_string(input_tokens, structure_datas=structure_data)
length = len(input_tokens)
# memory is a len * depth * directions list
memory = []
h_out_forward = []
h_out_backward = [] if bidirectional else None
# we proceed layer by layer
for i in range(self.config.p.gru_depth):
this_layer_foward = [None] * length
#forward pass
h = hs[i]
for pos in range(length):
this_params = params[pos]
this_x = xs[pos]
this_x = nn.DropoutNode(this_x, self.dropout, self.g)
if self.config.p.augmentation:
# no attention, forward pass
parent = parents[pos]
parent_x = this_params.aug[i].no_parent if parent==-1 else this_layer_foward[parent]
left_sibling = left_siblings[pos]
left_sibling_x = this_params.aug[i].no_left_sibling if left_sibling==-1 else this_layer_foward[left_sibling]
this_x = nn.ConcatNode([this_x, parent_x, left_sibling_x], self.g)
h = nn.GRUbCell(h, this_x, this_params.forward[i], self.g, dropout=self.dropout)
this_layer_foward[pos] = h
h_out_forward.append(h)
# backward pass
if bidirectional:
this_layer_backward = [None] * length
#forward pass
h = hs_backward[i]
for pos in range(length-1,-1,-1):
this_params = params[pos]
this_x = xs[pos]
this_x = nn.DropoutNode(this_x, self.dropout, self.g)
if self.config.p.augmentation:
# no attention, forward pass
right_sibling = right_siblings[pos]
right_sibling_x = this_params.aug[i].no_right_sibling if right_sibling==-1 else this_layer_backward[right_sibling]
this_x = nn.ConcatNode([this_x, right_sibling_x], self.g)
h = nn.GRUbCell(h, this_x, this_params.backward[i], self.g, dropout=self.dropout)
this_layer_backward[pos] = h
h_out_backward.append(h)
# now figure out the forward layer thingy
xs = [nn.ConcatNode(x, self.g) for x in zip(this_layer_foward, this_layer_backward)]
else:
xs = this_layer_foward
memory.append(xs)
if feed_to_attention:
self.attention_memory = memory
# h_out is the forward out or the concatonation of the forward and backward outs
h_out = [nn.ConcatNode(x, self.g) for x in zip(h_out_forward, h_out_backward)] if bidirectional else h_out_forward
return h_out # this is really all we need
''' This is a function that returns information about the graph
structure of a tree, particularly returning the augmentation information
Once it gets called, the string needs to be run through the decoder'''
def merge_graph_structures(gs_list, params_list):
out_string = []
out_parents = []
out_left_siblings = []
out_right_siblings = []
out_params = []
out_depth = []
out_parent_arity = []
out_leaf_position = []
out_arity = []
for gs, param in zip(gs_list, params_list):
current_n = len(out_string)
length = len(gs.string)
out_params += [param] * length
out_string += gs.string
out_parents += [(-1 if x==-1 else x+current_n) for x in gs.parents]
out_left_siblings += [(-1 if x==-1 else x+current_n) for x in gs.left_sibling]
out_right_siblings += [(-1 if x==-1 else x+current_n) for x in gs.right_sibling]
out_depth += gs.depth
out_parent_arity += gs.parent_arity
out_leaf_position += gs.leaf_position
out_arity += gs.arity
return out_string, out_parents, out_left_siblings, out_right_siblings, \
out_params, out_depth, out_parent_arity, out_leaf_position, out_arity
# def get_graph_structure(trees, start_symbol=None, intermediate_symbol = None, end_symbol = None):
# ''' this returns a bunch of things from the annotated tree
# returns:
# string: the string corresponding to the labels
# parents: a list for each node containing the index of tha parent
# of that node. returns -1 if this is a root node, and
# -2 if this is a special symbol.
# left_sibling: returns the index of the sibling. -1 if it has no
# left sibling, -2 if this is a special symbol
# right_sibling:
# '''
# # print TreeInformation(trees, start_symbol=start_symbol,
# # intermediate_symbol=intermediate_symbol,
# # end_symbol=end_symbol).params()
# return TreeInformation(trees, start_symbol=start_symbol,
# intermediate_symbol=intermediate_symbol,
# end_symbol=end_symbol).params()
class TreeInformation:
def __init__(self, trees, start_symbol=None,
intermediate_symbol=None, end_symbol=None):
self.parents = []
self.left_sibling = []
self.right_sibling = []
self.string = []
self.depth = []
self.parent_arity = []
self.leaf_position = []
self.arity = []
self.n=0
if start_symbol is not None:
self.right_sibling.append(-1)
self.parents.append(-1)
self.left_sibling.append(-1)
self.string.append(start_symbol)
self.depth.append(-1)
self.parent_arity.append(-1)
self.leaf_position.append(-1)
self.arity.append(-1)
self.n+=1
for i, tree in enumerate(trees):
self.add_tree(tree)
self.add_tree_right_siblings(tree)
if i is not len(trees)-1 and intermediate_symbol is not None:
self.right_sibling.append(-1)
self.parents.append(-1)
self.left_sibling.append(-1)
self.string.append(intermediate_symbol)
self.depth.append(-1)
self.parent_arity.append(-1)
self.leaf_position.append(-1)
self.arity.append(-1)
self.n+=1
if end_symbol is not None:
self.right_sibling.append(-1)
self.parents.append(-1)
self.left_sibling.append(-1)
self.string.append(end_symbol)
self.depth.append(-1)
self.parent_arity.append(-1)
self.leaf_position.append(-1)
self.arity.append(-1)
self.n+=1
# verify some stuff
length = len(self.string)
assert len(self.right_sibling) == length
assert len(self.parents) == length
assert len(self.left_sibling) == length
assert len(self.depth) == length
assert len(self.parent_arity) == length
assert len(self.leaf_position) == length
assert len(self.arity) == length
def params(self):
return self.string, self.parents, self.left_sibling, self.right_sibling, \
self.depth, self.parent_arity, self.leaf_position, self.arity
def add_tree(self, tree, parent=-1, left_sibling=-1, depth=0, parent_arity=-1, leaf_position=-1):
degree = len(tree.leaves)
this_n = self.n
tree.ti_index = this_n
self.parents.append(parent)
self.left_sibling.append(left_sibling)
self.string.append(tree.value)
self.depth.append(depth)
self.parent_arity.append(parent_arity)
self.leaf_position.append(leaf_position)
arity = len(tree.leaves)
self.arity.append(arity)
self.n += 1
prev_n = -1
for i, c in enumerate(tree.leaves):
self.add_tree(c, parent=this_n, left_sibling=prev_n, depth=depth+1, parent_arity=arity, leaf_position=i)
prev_n=c.ti_index
def add_tree_right_siblings(self, tree, right_sibling = -1):
self.right_sibling.append(right_sibling)
degree = len(tree.leaves)
for i,c in enumerate(tree.leaves):
if i < degree-1:
next_right = tree.leaves[i+1].ti_index
else:
next_right = -1
self.add_tree_right_siblings(c, next_right)
|
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# Also if needed: retab
'''
Magnetic field line tracing
'''
# Standard python modules
from __future__ import (unicode_literals, absolute_import, \
print_function, division)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
import numpy as np
import os
#import re
import scipy.integrate as spode
import scipy.interpolate as interpolate
import scipy.spatial
#import warnings
#import sys
# Local modules
import imas
try:
import pywed as pw
except ImportError as err:
print(err)
# Project modules
try:
from tofu.mag.mag_ripple.mag_ripple import mag_ripple
except Exception:
from mag.mag_ripple.mag_ripple import mag_ripple
#try:
# from equimap import interp_quantity
#except ImportError as err:
# print(err)
__all__ = ['MagFieldLines']
class MagFieldLines:
'''
Compute magnetic field lines
Parameters
----------
shot : int
Shot number
run : run number, optional (default=0)
occ : occurrence number, optional (default=0)
user : user name, optional (default=imas_public)
machine : machine name, optional (default=west)
Methods
-------
trace_mline(init_points_array, time_array, direction='FWD')
In other words:
trace_mline([[r_1, r_2, ...], [phi_1, phi_2, ...], [z_1, z_2, ...]], [t1, t2, ...], direction='FWD')
returns list of list of traces with shape (times, init_points)
plot_trace(trace[0][0])
2D plots of traced magnetic field line
plot_trace_3D(trace[0][0])
3D plot of traced magnetic field line
Examples
--------
For shot 54095
Compute linear interpolation B_r, B_tor, B_z at r=2, phi=0, z=0
and r, z, phi of magnetic line starting at r=2.7, phi=0, z=0
>>> test = MagFieldLines(54095)
Call method with specific time (t=34 seconds in this case)
>>> test.trace_mline([2.7, 0., 0.], 34)[0][0]['r'][10]
2.700041665466963
>>> test.trace_mline([2.7, 0., 0.], 34)[0][0]['p'][10]
-0.0037010956581712776
>>> test.trace_mline([2.7, 0., 0.], 34)[0][0]['z'][10]
0.000373472578674799
Call method for list of points (2 items and t=34 seconds)
>>> test.trace_mline([[2.7, 2.8], [0., 0.], [0., 0.1*np.pi]], 34)[0][0]['r'][10]
2.700041665466963
>>> test.trace_mline([[2.7, 2.8], [0., 0.], [0., 0.1*np.pi]], 34)[0][0]['p'][10]
-0.0037010956581712776
>>> test.trace_mline([[2.7, 2.8], [0., 0.], [0., 0.1*np.pi]], 34)[0][0]['z'][10]
0.000373472578674799
For interpolation of B magnetic field
>>> test.b_field_interp(2, 0, 0)
(0.006836488559673934, -4.547017391384852, -0.14305889263210517)
'''
def __init__(self, shot, run=0, occ=0, user='imas_public', machine='west'):
# Check if shot exists
run_number = '{:04d}'.format(run)
shot_file = os.path.expanduser('~' + user + '/public/imasdb/' + machine + \
'/3/0/' + 'ids_' + str(shot) + run_number + \
'.datafile')
if (not os.path.isfile(shot_file)):
raise FileNotFoundError('IMAS file does not exist')
# Parameters
self.wall_ck=False
# Get equilibrium
idd = imas.ids(shot, run)
idd.open_env(user, machine, '3')
idd.equilibrium.get()
self.equi = idd.equilibrium
# Check code.output_flag for data validity
if (np.any(np.isnan(self.equi.code.output_flag))):
self.mask = np.full(len(self.equi.time), True, dtype=bool)
else:
self.mask = np.asarray(self.equi.code.output_flag) >= 0
# Get Itor (current of toroidal coils, coils that produce the toroidal field)
self.itor, self.t_itor = pw.tsbase(shot, 'gmag_itor', nargout=2)
self.t_ignitron = pw.tsmat(shot, 'IGNITRON|1')
self.t_itor += self.t_ignitron[0]
self.equiDict = {}
# Declaration of arrays 2d from equilibrium IDS
equi_grid = idd.equilibrium.grids_ggd[0].grid[0]
NbrPoints = len(equi_grid.space[0].objects_per_dimension[0].object)
self.equiDict['r'] = np.full(NbrPoints, np.nan)
self.equiDict['z'] = np.full(NbrPoints, np.nan)
for ii in range(NbrPoints):
self.equiDict['r'][ii] = equi_grid.space[0].objects_per_dimension[0]. \
object[ii].geometry[0]
self.equiDict['z'][ii] = equi_grid.space[0].objects_per_dimension[0]. \
object[ii].geometry[1]
self.equiDict['b_field_r'] = np.full((len(self.equi.time), NbrPoints), np.nan)
self.equiDict['b_field_z'] = np.full((len(self.equi.time), NbrPoints), np.nan)
self.equiDict['b_field_tor'] = np.full((len(self.equi.time), NbrPoints), np.nan)
self.equiDict['boundary_r'] = [None]*len(self.equi.time)
self.equiDict['boundary_z'] = [None]*len(self.equi.time)
for ii in range(len(self.equi.time)):
equi_slice = self.equi.time_slice[ii]
self.equiDict['b_field_r'][ii] = equi_slice.ggd[0].b_field_r[0].values
self.equiDict['b_field_z'][ii] = equi_slice.ggd[0].b_field_z[0].values
self.equiDict['b_field_tor'][ii] = equi_slice.ggd[0].b_field_tor[0].values
self.equiDict['boundary_r'][ii] = equi_slice.boundary.outline.r
self.equiDict['boundary_z'][ii] = equi_slice.boundary.outline.z
self.points = np.vstack((self.equiDict['r'], self.equiDict['z'])).transpose()
self.delaunay = scipy.spatial.Delaunay(self.points)
# Time interpolation
self.f_intp_br = interpolate.interp1d(self.equi.time[self.mask], \
self.equiDict['b_field_r'][self.mask, :], axis=0, \
bounds_error=False)
self.f_intp_bt = interpolate.interp1d(self.equi.time[self.mask], \
self.equiDict['b_field_tor'][self.mask, :], axis=0, \
bounds_error=False)
self.f_intp_bz = interpolate.interp1d(self.equi.time[self.mask], \
self.equiDict['b_field_z'][self.mask, :], axis=0, \
bounds_error=False)
def trace_mline(self, init_state, time, direction='FWD', \
length_line=None, stp=None, ripple=True):
'''
Traces the field line given a starting point.
Integration step defined by stp and maximum length of the field line
defined by s.
Collision with the wall stops integration.
input:
- init_state [[r_1, r_2, ...], [phi_1, phi_2, ...], [z_1, z_2, ...]] :
the coordinates of the starting points (list or numpy array)
OR list of lists OR numpy 2D array
- time : array of times requested (list or numpy array)
- direction : direction of integration 'FWD' forward or 'REV' reverse
(string)
- ripple : take into account magnetic ripple or not (boolean)
output:
returns a dictionary containing:
- r : radial coordinate (np.array)
- p : toridal coordinate (np.array)
- z : vertical coordinate (np.array)
- x : x coordinate (np.array)
- y : y coordinate (np.array)
- cp : collision point with the wall (list)
'''
# Step for the integration
if (stp is None):
stp = 0.001
# Length of the field line
if (length_line is None):
s = 100
else:
s = length_line
ds = np.linspace(0, s, int(s/stp))
init_state = np.squeeze(np.asarray(init_state))
if (init_state.ndim < 2):
init_state = init_state[:, np.newaxis]
self.ar_time = np.atleast_1d(np.squeeze(np.asarray([time])))
br_intp_t = np.atleast_2d(np.squeeze(self.f_intp_br(self.ar_time)))
bt_intp_t = np.atleast_2d(np.squeeze(self.f_intp_bt(self.ar_time)))
bz_intp_t = np.atleast_2d(np.squeeze(self.f_intp_bz(self.ar_time)))
# !!!!!!!!!!!!!!!!!!!!!
# HARD CODED CORRECTION
if (np.nanmean(bt_intp_t) > 0):
bt_intp_t *= -1
# !!!!!!!!!!!!!!!!!!!!!
# Interpolate current
itor_intp_t_vect = np.interp(self.ar_time, self.t_itor[:, 0], \
self.itor[:, 0])
b0_intp_t_vect = np.interp(self.ar_time, self.equi.time[self.mask], \
self.equi.vacuum_toroidal_field.b0[self.mask])
outMagLine = []
for ii in range(self.ar_time.size):
self.br_lin_intp = \
interpolate.LinearNDInterpolator(self.delaunay, br_intp_t[ii])
self.bt_lin_intp = \
interpolate.LinearNDInterpolator(self.delaunay, bt_intp_t[ii])
self.bz_lin_intp = \
interpolate.LinearNDInterpolator(self.delaunay, bz_intp_t[ii])
# Interpolated current and b0
self.itor_intp_t = itor_intp_t_vect[ii]
self.b0_intp_t = b0_intp_t_vect[ii]
out = []
for jj in range(init_state.shape[1]):
out_prime = self.integrate_solve_ivp(init_state[:, jj], \
direction, s, ds, ripple)
out_prime['init_point'] = init_state[:, jj]
out_prime['time'] = self.ar_time[ii]
# Return list of dict
out.append(out_prime)
outMagLine.append(out)
return outMagLine
def integrate_solve_ivp(self, init_state, direction, s, ds, ripple=True):
'''
Integration step defined by stp and maximum length of the field line
defined by s.
Collision with the wall stops integration.
input:
- init_state [r, phi, z] : the coordinates of the starting point (list)
- direction : direction of integration 'FWD' forward or 'REV' reverse
(string)
- s : field line maximum length
- ds : vector of steps
- ripple : boolean, take into account magnetic ripple or not
output:
returns a dictionary containing:
- r : radial coordinate (np.array)
- p : toridal coordinate (np.array)
- z : vertical coordinate (np.array)
- x : x coordinate (np.array)
- y : y coordinate (np.array)
- cp : collision point with the wall (list)
'''
if (ripple):
if direction == 'FWD':
sol = spode.solve_ivp(self.mfld3dcylfwd, [0, s], init_state,
method='RK23', t_eval=ds,
events=self.hit_wall_circ)
elif direction == 'REV':
sol = spode.solve_ivp(self.mfld3dcylrev, [0, s], init_state,
method='RK23', t_eval=ds,
events=self.hit_wall_circ)
else:
if direction == 'FWD':
sol = spode.solve_ivp(self.mfld3dcylfwd_no_ripple,
[0, s], init_state,
method='RK23', t_eval=ds,
events=self.hit_wall_circ)
elif direction == 'REV':
sol = spode.solve_ivp(self.mfld3dcylrev_no_ripple,
[0, s], init_state,
method='RK23', t_eval=ds,
events=self.hit_wall_circ)
sgf = sol.t
rgf = sol.y[0]
pgf = sol.y[1]
xgf = rgf*np.cos(pgf)
ygf = rgf*np.sin(pgf)
zgf = sol.y[2]
if len(sgf)<len(ds):
colpt = [rgf[-1], pgf[-1], zgf[-1]]
else:
colpt = []
return {'s': sgf,
'r': rgf,
'p': pgf,
'z': zgf,
'x': xgf,
'y': ygf,
'cp': colpt}
def mfld3dcylfwd(self, s, state):
'''
Returns the right end side of the field line system of equations in
cyclindrical (R, Phi, Z) coord.
This is the case for forward integration.
'''
R, P, Z = state
Br, Bt, Bz = self.b_field_interp(R, P, Z)
#Br=self.fBp_r(R,Z)[0]+self.fBt_r(P,self.ftheta(R,Z),R)
#Bt=self.fBt_t(P,self.ftheta(R,Z),R)
#Bz=self.fBp_z(R,Z)[0]
B = np.sqrt(Br*Br + Bz*Bz + Bt*Bt)
d_R = Br/B
d_P = Bt/B*1/R
d_Z = Bz/B
return [d_R, d_P, d_Z]
def mfld3dcylrev(self, s, state):
'''
Returns the right end side of the field line system of equations in
cyclindrical (R, Phi, Z) coord.
This is the case for backward integration.
'''
R, P, Z = state
Br, Bt, Bz = self.b_field_interp(R, P, Z)
# Br=self.fBp_r(R,Z)[0]+self.fBt_r(P,self.ftheta(R,Z),R)
# Bt=self.fBt_t(P,self.ftheta(R,Z),R)
# Bz=self.fBp_z(R,Z)[0]
B = np.sqrt(Br*Br + Bz*Bz + Bt*Bt)
d_R = -Br/B
d_P = -Bt/B*1/R
d_Z = -Bz/B
return [d_R, d_P, d_Z]
def mfld3dcylfwd_no_ripple(self, s, state):
'''
Returns the right end side of the field line system of equations in
cyclindrical (R, Phi, Z) coord.
This is the case for forward integration.
'''
R, P, Z = state
Br, Bt, Bz = self.b_field_interp_no_ripple(R, P, Z)
# Br=self.fBp_r(R,Z)[0]+self.fBt_r(P,self.ftheta(R,Z),R)
# Bt=self.fBt_t(P,self.ftheta(R,Z),R)
# Bz=self.fBp_z(R,Z)[0]
B = np.sqrt(Br*Br + Bz*Bz + Bt*Bt)
d_R = Br/B
d_P = Bt/B*1/R
d_Z = Bz/B
return [d_R, d_P, d_Z]
def mfld3dcylrev_no_ripple(self, s, state):
'''
Returns the right end side of the field line system of equations in
cyclindrical (R, Phi, Z) coord.
This is the case for backward integration.
'''
R, P, Z = state
Br, Bt, Bz = self.b_field_interp_no_ripple(R, P, Z)
# Br=self.fBp_r(R,Z)[0]+self.fBt_r(P,self.ftheta(R,Z),R)
# Bt=self.fBt_t(P,self.ftheta(R,Z),R)
# Bz=self.fBp_z(R,Z)[0]
B = np.sqrt(Br*Br + Bz*Bz + Bt*Bt)
d_R = -Br/B
d_P = -Bt/B*1/R
d_Z = -Bz/B
return [d_R, d_P, d_Z]
def hit_wall_circ(self, s, state):
'''
return 0 when hit wall.
With wall_ck:
- False => The wall is a simple circular torus with minor radius rw
centered at (Rc,Zc), thus with major radius Rc.
- True => check collision with wall boundary given in the Equilibrium
mat file
'''
R, P, Z = state
# if (np.abs(Z) <= 0.5 and R > 3.01
# and np.deg2rad(89.5) <= P <= np.deg2rad(90.5)):
# return 0
# if (np.abs(Z) <= 0.5 and R > 3.01
# and np.deg2rad(179.5) <= P <= np.deg2rad(180.5)):
# print('Got 2')
# return 0
# elif (np.abs(Z) <= 0.5 and R > 3.01
# and np.deg2rad(269.5) <= P <= np.deg2rad(270.5)):
# print('Got 3')
# return 0
if self.wall_ck==False:
Rc=2.460
Zc=0.0
rw=0.950
return (R-Rc)**2+(Z-Zc)**2-rw**2
elif self.wall_ck and Z>=0:
zwall=self.fwall_up(R)
return Z-zwall
elif self.wall_ck and Z<0:
zwall=self.fwall_dw(R)
return zwall-Z
hit_wall_circ.terminal=True
def b_field_interp(self, R, Phi, Z):
''' Linear interpolation of B vector components at R, Phi, Z positions
'''
interp_points = np.vstack((R, Z)).transpose()
br_intp = self.br_lin_intp.__call__(interp_points)
bt_intp = self.bt_lin_intp.__call__(interp_points)
bz_intp = self.bz_lin_intp.__call__(interp_points)
# Compute magnetic field for given Phi
br_ripple, bt_ripple, bz_ripple = mag_ripple(R, Phi, \
Z, self.itor_intp_t)
# Compute reference vaccuum magnetic field
self.bt_vac = self.equi.vacuum_toroidal_field.r0*self.b0_intp_t / R
br_intp -= br_ripple[0]
bt_intp -= (bt_ripple[0] - np.abs(self.bt_vac))
bz_intp -= bz_ripple[0]
return br_intp[0], bt_intp[0], bz_intp[0]
def b_field_interp_no_ripple(self, R, Phi, Z):
''' Linear interpolation of B vector components at R, Phi, Z positions
'''
interp_points = np.vstack((R, Z)).transpose()
br_intp = self.br_lin_intp.__call__(interp_points)
bt_intp = self.bt_lin_intp.__call__(interp_points)
bz_intp = self.bz_lin_intp.__call__(interp_points)
return br_intp[0], bt_intp[0], bz_intp[0]
def plot_trace(self, trace):
'''
Plots a summary of the magnetic field line trace.
It contains a graph of the radial, vertical and toroidal corrdinates followed by
the vertical projection (w/o and with the wall) and the toroidal projection.
A black dot represents the starting point of the field line.
The end of the trace is indicated by:
- a red dot in case of collision with the wall
- a black square otherwise
input:
- trace : the magneticl field line data (dictionary)
'''
sgf = trace['s']
rgf = trace['r']
zgf = trace['z']
pgf = trace['p']
xgf = trace['x']
ygf = trace['y']
colpt = trace['cp']
ind_time = np.argmin(np.abs(self.equi.time - trace['time']))
plt.figure(figsize=[12,8])
plt.subplot(231)
plt.plot(sgf, rgf)
plt.grid()
plt.ylabel('R [m]'); plt.xlabel('s [m]'); plt.title('Radial coord.')
plt.subplot(232)
plt.plot(sgf, zgf)
plt.grid()
plt.ylabel('Z [m]'); plt.xlabel('s [m]'); plt.title('Vertical coord.')
plt.subplot(233)
plt.plot(sgf, np.rad2deg(pgf))
plt.grid()
plt.ylabel('$\phi$ [deg]'); plt.xlabel('s [m]'); plt.title('Toroidal coord.')
plt.subplot(234)
plt.plot(rgf, zgf)
plt.plot(rgf[0], zgf[0], marker='o', markersize=3, color="black")
if not colpt:
plt.plot(rgf[-1], zgf[-1], marker='s', markersize=5, color="black")
else:
plt.plot(rgf[-1], zgf[-1], marker='o', markersize=5, color="red")
plt.axis('equal'); plt.xlabel('R [m]'); plt.ylabel('Z [m]')
plt.plot(self.equiDict['boundary_r'][ind_time], \
self.equiDict['boundary_z'][ind_time])
plt.title('Vertical projection')
plt.grid()
plt.subplot(235)
plt.plot(rgf, zgf)
#plt.plot(rwall,zwall)
plt.plot(rgf[0], zgf[0], marker='o', markersize=3, color="black")
if not colpt:
plt.plot(rgf[-1], zgf[-1], marker='s', markersize=5, color="black")
else:
plt.plot(rgf[-1], zgf[-1], marker='o', markersize=5, color="red")
plt.axis('equal'); plt.xlabel('R [m]'); plt.ylabel('Z [m]')
plt.title('Vertical projection')
plt.subplot(236)
plt.plot(xgf, ygf)
#plt.plot(ra*np.cos(pa),ra*np.sin(pa),'b:')
plt.plot(xgf[0], ygf[0], marker='o', markersize=3, color="black")
if not colpt:
plt.plot(xgf[-1], ygf[-1], marker='s', markersize=5, color="black")
else:
plt.plot(xgf[-1], ygf[-1], marker='o', markersize=5, color="red")
plt.axis('equal'); plt.xlabel('x [m]'); plt.ylabel('y [m]')
plt.title('Toroidal projection')
plt.tight_layout()
plt.show()
def plot_trace_3D(self, trace):
'''
Plots the magnetic field line trace in a 3D projection.
The outline of the antenna is shown by black solid ines.
A black dot represents the starting point of the field line.
The end of the trace is indicated by:
- a red dot in case of collision with the wall
- a black square otherwise
input:
- trace : the magneticl field line data (dictionary)
'''
sgf = trace['s']
rgf = trace['r']
zgf = trace['z']
pgf = trace['p']
xgf = trace['x']
ygf = trace['y']
colpt = trace['cp']
fig = plt.figure(figsize=[10, 10])
ax = Axes3D(fig)
#ax = fig.gca(projection='3d')
#ax.set_aspect('equal')
ax.plot(xgf, ygf, zgf)
#ax.plot(ra[0]*np.cos(pa),ra[0]*np.sin(pa),za[0],'k')
#ax.plot(ra*np.cos(pa[0]),ra*np.sin(pa[0]),za,'k')
#ax.plot(ra[-1]*np.cos(pa),ra[-1]*np.sin(pa),za[-1],'k')
#ax.plot(ra*np.cos(pa[-1]),ra*np.sin(pa[-1]),za,'k')
ax.plot(3.5*np.cos(np.linspace(0, 2*np.pi, 36)), \
3.5*np.sin(np.linspace(0, 2*np.pi, 36)), 0, 'k:')
# Used to create the fake bounding box
max_range = np.array([xgf.max()-xgf.min(), ygf.max()-ygf.min(), zgf.max()-zgf.min()]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(xgf.max()+xgf.min())
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(ygf.max()+ygf.min())
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(zgf.max()+zgf.min())
# Comment or uncomment following both lines to test the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
ax.plot([xgf[0]], [ygf[0]], [zgf[0]], marker='o', markersize=3, color="black")
if not colpt:
ax.plot([xgf[-1]], [ygf[-1]], [zgf[-1]], marker='s', markersize=5, color="black")
else:
ax.plot([xgf[-1]], [ygf[-1]], [zgf[-1]], marker='o', markersize=5, color="red")
ax.set_xlabel('x [m]'); ax.set_ylabel('y [m]'); ax.set_zlabel('z [m]')
plt.title('Magnetic field line trace')
plt.show()
def plot_poincare(self, trace_plt, tor_angle=0):
'''
Plots Poincare plot at given toroidal angle
input:
- trace_plt : the magneticl field line data for several points
one single time (dictionary)
'''
fig, ax = plt.subplots()
for ii in range(len(trace_plt)):
loc_cross = trace_plt[ii]['p'] % (tor_angle + 2*np.pi)
mask_cross = np.r_[False, loc_cross[1:]
< loc_cross[:-1]] & np.r_[loc_cross[:-1]
< loc_cross[1:],
False]
ax.plot(trace_plt[ii]['r'][mask_cross],
trace_plt[ii]['z'][mask_cross], 'o', markersize=3)
ax.set_aspect('equal')
ax.set_xlabel('R [m]')
ax.set_ylabel('Z [m]')
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model evaluation tools for TFGAN.
These methods come from https://arxiv.org/abs/1606.03498 and
https://arxiv.org/abs/1706.08500.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import sys
import tarfile
from six.moves import urllib
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
__all__ = [
'get_graph_def_from_disk',
'get_graph_def_from_resource',
'get_graph_def_from_url_tarball',
'preprocess_image',
'run_image_classifier',
'run_inception',
'inception_score',
'classifier_score',
'frechet_inception_distance',
'frechet_classifier_distance',
]
INCEPTION_URL = 'http://download.tensorflow.org/models/frozen_inception_v3_2017_09_13.tar.gz'
INCEPTION_FROZEN_GRAPH = 'frozen_inception_v3.pb'
INCEPTION_V3_INPUT = 'input'
INCEPTION_V3_OUTPUT = 'InceptionV3/Logits/SpatialSqueeze:0'
INCEPTION_V3_FINAL_POOL = 'InceptionV3/Logits/AvgPool_1a_8x8/AvgPool:0'
_INCEPTION_V3_NUM_CLASSES = 1001
_INCEPTION_V3_FINAL_POOL_SIZE = 2048
INCEPTION_V3_DEFAULT_IMG_SIZE = 299
def _validate_images(images, image_size):
images = ops.convert_to_tensor(images)
images.shape.with_rank(4)
images.shape.assert_is_compatible_with(
[None, image_size, image_size, None])
return images
def _symmetric_matrix_square_root(mat, eps=1e-10):
"""Compute square root of a symmetric matrix.
Note that this is different from an elementwise square root. We want to
compute M' where M' = sqrt(mat) such that M' * M' = mat.
Also note that this method **only** works for symmetric matrices.
Args:
mat: Matrix to take the square root of.
eps: Small epsilon such that any element less than eps will not be square
rooted to guard against numerical instability.
Returns:
Matrix square root of mat.
"""
# Unlike numpy, tensorflow's return order is (s, u, v)
s, u, v = linalg_ops.svd(mat)
# sqrt is unstable around 0, just use 0 in such case
si = array_ops.where(math_ops.less(s, eps), s, math_ops.sqrt(s))
# Note that the v returned by Tensorflow is v = V
# (when referencing the equation A = U S V^T)
# This is unlike Numpy which returns v = V^T
return math_ops.matmul(
math_ops.matmul(u, array_ops.diag(si)), v, transpose_b=True)
# Convenience preprocessing function, with fixed defaults.
# NOTE: Floating-point inputs are expected to be in [0, 1].
# Copied from /tensorflow_models/slim/preprocessing/inception_preprocessing.py.
def preprocess_image(
image, height=INCEPTION_V3_DEFAULT_IMG_SIZE,
width=INCEPTION_V3_DEFAULT_IMG_SIZE, central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with ops.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != dtypes.float32:
image = image_ops.convert_image_dtype(image, dtype=dtypes.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = image_ops.central_crop(image, central_fraction=central_fraction)
# Resize the image to the specified height and width.
image = array_ops.expand_dims(image, 0)
image = image_ops.resize_bilinear(image, [height, width],
align_corners=False)
image = array_ops.squeeze(image, [0])
image = (image - 0.5) * 2.0
return image
def _kl_divergence(p, p_logits, q):
"""Computes the Kullback-Liebler divergence between p and q.
This function uses p's logits in some places to improve numerical stability.
Specifically:
KL(p || q) = sum[ p * log(p / q) ]
= sum[ p * ( log(p) - log(q) ) ]
= sum[ p * ( log_softmax(p_logits) - log(q) ) ]
Args:
p: A 2-D floating-point Tensor p_ij, where `i` corresponds to the minibatch
example and `j` corresponds to the probability of being in class `j`.
p_logits: A 2-D floating-point Tensor corresponding to logits for `p`.
q: A 1-D floating-point Tensor, where q_j corresponds to the probability
of class `j`.
Returns:
KL divergence between two distributions. Output dimension is 1D, one entry
per distribution in `p`.
Raises:
ValueError: If any of the inputs aren't floating-point.
ValueError: If p or p_logits aren't 2D.
ValueError: If q isn't 1D.
"""
for tensor in [p, p_logits, q]:
if not tensor.dtype.is_floating:
raise ValueError('Input %s must be floating type.', tensor.name)
p.shape.assert_has_rank(2)
p_logits.shape.assert_has_rank(2)
q.shape.assert_has_rank(1)
return math_ops.reduce_sum(
p * (nn_ops.log_softmax(p_logits) - math_ops.log(q)), axis=1)
def get_graph_def_from_disk(filename):
"""Get a GraphDef proto from a disk location."""
with gfile.FastGFile(filename, 'rb') as f:
return graph_pb2.GraphDef.FromString(f.read())
def get_graph_def_from_resource(filename):
"""Get a GraphDef proto from within a .par file."""
return graph_pb2.GraphDef.FromString(resource_loader.load_resource(filename))
def get_graph_def_from_url_tarball(url, filename):
"""Get a GraphDef proto from a tarball on the web."""
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
url, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
tar_filename, _ = urllib.request.urlretrieve(url, reporthook=_progress)
with tarfile.open(tar_filename, 'r:gz') as tar:
proto_str = tar.extractfile(filename).read()
return graph_pb2.GraphDef.FromString(proto_str)
def _default_graph_def_fn():
return get_graph_def_from_url_tarball(INCEPTION_URL, INCEPTION_FROZEN_GRAPH)
def run_inception(images,
graph_def=None,
default_graph_def_fn=_default_graph_def_fn,
image_size=INCEPTION_V3_DEFAULT_IMG_SIZE,
input_tensor=INCEPTION_V3_INPUT,
output_tensor=INCEPTION_V3_OUTPUT):
"""Run images through a pretrained Inception classifier.
Args:
images: Input tensors. Must be [batch, height, width, channels]. Input shape
and values must be in [-1, 1], which can be achieved using
`preprocess_image`.
graph_def: A GraphDef proto of a pretrained Inception graph. If `None`,
call `default_graph_def_fn` to get GraphDef.
default_graph_def_fn: A function that returns a GraphDef. Used if
`graph_def` is `None. By default, returns a pretrained InceptionV3 graph.
image_size: Required image width and height. See unit tests for the default
values.
input_tensor: Name of input Tensor.
output_tensor: Name of output Tensor. This function will compute activations
at the specified layer. Examples include INCEPTION_V3_OUTPUT and
INCEPTION_V3_FINAL_POOL which would result in this function computing
the final logits or the penultimate pooling layer.
Returns:
Logits.
Raises:
ValueError: If images are not the correct size.
ValueError: If neither `graph_def` nor `default_graph_def_fn` are provided.
"""
images = _validate_images(images, image_size)
if graph_def is None:
if default_graph_def_fn is None:
raise ValueError('If `graph_def` is `None`, must provide '
'`default_graph_def_fn`.')
graph_def = default_graph_def_fn()
activations = run_image_classifier(images, graph_def, input_tensor,
output_tensor)
if array_ops.rank(activations) != 2:
activations = layers.flatten(activations)
return activations
def run_image_classifier(tensor, graph_def, input_tensor,
output_tensor, scope='RunClassifier'):
"""Runs a network from a frozen graph.
Args:
tensor: An Input tensor.
graph_def: A GraphDef proto.
input_tensor: Name of input tensor in graph def.
output_tensor: Name of output tensor in graph def.
scope: Name scope for classifier.
Returns:
Classifier output. Shape depends on the classifier used, but is often
[batch, classes].
Raises:
ValueError: If `image_size` is not `None`, and `tensor` are not the correct
size.
"""
input_map = {input_tensor: tensor}
return_elements = [output_tensor]
classifier_output = importer.import_graph_def(
graph_def, input_map, return_elements, name=scope)[0]
return classifier_output
def classifier_score(images, classifier_fn, num_batches=1):
"""Classifier score for evaluating a conditional generative model.
This is based on the Inception Score, but for an arbitrary classifier.
This technique is described in detail in https://arxiv.org/abs/1606.03498. In
summary, this function calculates
exp( E[ KL(p(y|x) || p(y)) ] )
which captures how different the network's classification prediction is from
the prior distribution over classes.
Args:
images: Images to calculate the classifier score for.
classifier_fn: A function that takes images and produces logits based on a
classifier.
num_batches: Number of batches to split `generated_images` in to in order to
efficiently run them through the classifier network.
Returns:
The classifier score. A floating-point scalar.
"""
generated_images_list = array_ops.split(
images, num_or_size_splits=num_batches)
# Compute the classifier splits using the memory-efficient `map_fn`.
logits = functional_ops.map_fn(
fn=classifier_fn,
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
logits = array_ops.concat(array_ops.unstack(logits), 0)
logits.shape.assert_has_rank(2)
# Use maximum precision for best results.
logits_dtype = logits.dtype
if logits_dtype != dtypes.float64:
logits = math_ops.cast(logits, dtypes.float64)
p = nn_ops.softmax(logits)
q = math_ops.reduce_mean(p, axis=0)
kl = _kl_divergence(p, logits, q)
kl.shape.assert_has_rank(1)
log_score = math_ops.reduce_mean(kl)
final_score = math_ops.exp(log_score)
if logits_dtype != dtypes.float64:
final_score = math_ops.cast(final_score, dtypes.float64)
return final_score
inception_score = functools.partial(
classifier_score,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_V3_OUTPUT))
def trace_sqrt_product(sigma, sigma_v):
"""Find the trace of the positive sqrt of product of covariance matrices.
'_symmetric_matrix_square_root' only works for symmetric matrices, so we
cannot just take _symmetric_matrix_square_root(sigma * sigma_v).
('sigma' and 'sigma_v' are symmetric, but their product is not necessarily).
Let sigma = A A so A = sqrt(sigma), and sigma_v = B B.
We want to find trace(sqrt(sigma sigma_v)) = trace(sqrt(A A B B))
Note the following properties:
(i) forall M1, M2: eigenvalues(M1 M2) = eigenvalues(M2 M1)
=> eigenvalues(A A B B) = eigenvalues (A B B A)
(ii) if M1 = sqrt(M2), then eigenvalues(M1) = sqrt(eigenvalues(M2))
=> eigenvalues(sqrt(sigma sigma_v)) = sqrt(eigenvalues(A B B A))
(iii) forall M: trace(M) = sum(eigenvalues(M))
=> trace(sqrt(sigma sigma_v)) = sum(eigenvalues(sqrt(sigma sigma_v)))
= sum(sqrt(eigenvalues(A B B A)))
= sum(eigenvalues(sqrt(A B B A)))
= trace(sqrt(A B B A))
= trace(sqrt(A sigma_v A))
A = sqrt(sigma). Both sigma and A sigma_v A are symmetric, so we **can**
use the _symmetric_matrix_square_root function to find the roots of these
matrices.
Args:
sigma: a square, symmetric, real, positive semi-definite covariance matrix
sigma_v: same as sigma
Returns:
The trace of the positive square root of sigma*sigma_v
"""
# Note sqrt_sigma is called "A" in the proof above
sqrt_sigma = _symmetric_matrix_square_root(sigma)
# This is sqrt(A sigma_v A) above
sqrt_a_sigmav_a = math_ops.matmul(
sqrt_sigma, math_ops.matmul(sigma_v, sqrt_sigma))
return math_ops.trace(_symmetric_matrix_square_root(sqrt_a_sigmav_a))
def frechet_classifier_distance(real_images,
generated_images,
classifier_fn,
num_batches=1):
"""Classifier distance for evaluating a generative model.
This is based on the Frechet Inception distance, but for an arbitrary
classifier.
This technique is described in detail in https://arxiv.org/abs/1706.08500.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calcuates
|m - m_w|^2 + Tr(C + C_w - 2(C * C_w)^(1/2))
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute frechet classifier distance when comparing two
generative models.
Args:
real_images: Real images to use to compute Frechet Inception distance.
generated_images: Generated images to use to compute Frechet Inception
distance.
classifier_fn: A function that takes images and produces activations
based on a classifier.
num_batches: Number of batches to split images in to in order to
efficiently run them through the classifier network.
Returns:
The Frechet Inception distance. A floating-point scalar.
"""
real_images_list = array_ops.split(
real_images, num_or_size_splits=num_batches)
generated_images_list = array_ops.split(
generated_images, num_or_size_splits=num_batches)
imgs = array_ops.stack(real_images_list + generated_images_list)
# Compute the activations using the memory-efficient `map_fn`.
activations = functional_ops.map_fn(
fn=classifier_fn,
elems=imgs,
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
# Split the activations by the real and generated images.
real_a, gen_a = array_ops.split(activations, [num_batches, num_batches], 0)
# Ensure the activations have the right shapes.
real_a = array_ops.concat(array_ops.unstack(real_a), 0)
gen_a = array_ops.concat(array_ops.unstack(gen_a), 0)
real_a.shape.assert_has_rank(2)
gen_a.shape.assert_has_rank(2)
# Compute mean and covariance matrices of activations.
m = math_ops.reduce_mean(real_a, 0)
m_v = math_ops.reduce_mean(gen_a, 0)
num_examples = math_ops.to_float(array_ops.shape(real_a)[0])
# sigma = (1 / (n - 1)) * (X - mu) (X - mu)^T
sigma = math_ops.matmul(
real_a - m, real_a - m, transpose_a=True) / (num_examples - 1)
sigma_v = math_ops.matmul(
gen_a - m_v, gen_a - m_v, transpose_a=True) / (num_examples - 1)
# Find the Tr(sqrt(sigma sigma_v)) component of FID
sqrt_trace_component = trace_sqrt_product(sigma, sigma_v)
# Compute the two components of FID.
# First the covariance component.
# Here, note that trace(A + B) = trace(A) + trace(B)
trace = math_ops.trace(sigma + sigma_v) - 2.0 * sqrt_trace_component
# Next the distance between means.
mean = math_ops.square(linalg_ops.norm(m - m_v)) # This uses the L2 norm.
fid = trace + mean
return fid
frechet_inception_distance = functools.partial(
frechet_classifier_distance,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_V3_FINAL_POOL))
|
|
"""
Functional tests for Pydocweb: these go through real-use patterns with
the Django web client.
"""
import os, sys, re
from django.test import TestCase
from django.conf import settings
PASSWORD='asdfasd'
class AccessTests(TestCase):
"""
Simple tests that check that basic pages can be accessed
and they contain something sensible.
"""
fixtures = ['tests/users.json']
def test_docstring_index(self):
response = self.client.get('/docs/')
self.assertContains(response, 'All docstrings')
def test_wiki_stock_frontpage(self):
response = self.client.get('/Front Page/')
self.assertContains(response, '')
def test_wiki_new_page(self):
response = self.client.get('/Some New Page/')
self.assertContains(response, 'Create new')
def test_changes(self):
response = self.client.get('/changes/')
self.assertContains(response, 'Recent changes')
def test_search(self):
response = self.client.get('/search/')
self.assertContains(response, 'Fulltext')
def test_stats(self):
response = self.client.get('/stats/')
self.assertContains(response, 'Overview')
def test_patch(self):
response = self.client.get('/patch/')
self.assertContains(response, 'Generate patch')
def test_non_authenticated(self):
for url in ['/merge/', '/control/', '/accounts/password/',
'/Some%20New%20Page/edit/']:
response = self.client.get(url)
# It should contain a redirect to the login page
self.failUnless('Location: http://testserver/accounts/login/?next=%s' % url in str(response))
self.client.login(username='editor', password=PASSWORD)
response = self.client.get('/control/')
self.failUnless('Location: http://testserver/accounts/login/' in str(response))
def test_merge(self):
self.client.login(username='editor', password=PASSWORD)
response = self.client.get('/merge/')
self.assertContains(response, 'Nothing to merge')
def test_control(self):
self.client.login(username='admin', password=PASSWORD)
response = self.client.get('/control/')
self.assertContains(response, 'Pull from sources')
self.assertContains(response, 'Editor Editorer')
def test_admin(self):
self.client.login(username='editor', password=PASSWORD)
response = self.client.get('/admin/')
# django's own login form...
self.assertContains(response, '<input type="submit" value="Log in" />')
self.client.login(username='admin', password=PASSWORD)
response = self.client.get('/admin/')
self.assertContains(response, 'Site administration')
class LoginTests(TestCase):
fixtures = ['tests/users.json']
def test_login_ok(self):
response = self.client.post('/accounts/login/',
{'username': 'editor',
'password': PASSWORD})
response = _follow_redirect(response)
response = _follow_redirect(response)
self.assertContains(response, 'Editor Editorer')
def test_login_fail(self):
response = self.client.post('/accounts/login/',
{'username': 'editor',
'password': 'blashyrkh'})
self.assertContains(response, 'Authentication failed')
class WikiTests(TestCase):
fixtures = ['tests/users.json']
def setUp(self):
self.client.login(username='editor', password=PASSWORD)
def test_page_cycle(self):
# Go to a new page
response = self.client.get('/A New Page/')
self.assertContains(response, '"/A%20New%20Page/edit/"')
response = self.client.get('/A%20New%20Page/edit/')
self.assertContains(response, 'action="/A%20New%20Page/edit/"')
# Try out the preview
response = self.client.post('/A%20New%20Page/edit/',
{'button_preview': 'Preview',
'text': 'Test *text*',
'comment': 'Test comment'})
self.assertContains(response, 'Preview')
self.assertContains(response, '+Test *text*')
self.assertContains(response, '<p>Test <em>text</em></p>')
# Edit the page
response = self.client.post('/A%20New%20Page/edit/',
{'text': 'Test *text*',
'comment': 'Test remark'})
response = self.client.get('/A New Page/')
self.assertContains(response, '<p>Test <em>text</em></p>')
# Edit the page again
response = self.client.post('/A%20New%20Page/edit/',
{'text': 'Test *stuff*',
'comment': 'Test note'})
response = self.client.get('/A New Page/')
self.assertContains(response, '<p>Test <em>stuff</em></p>')
# Check log entries
response = self.client.get('/A New Page/log/')
self.assertContains(response, 'Test note')
self.assertContains(response, 'Test remark')
self.assertContains(response, 'Editor Editorer', count=3)
self.assertContains(response, 'href="/A%20New%20Page/?revision=2"')
# Check old revision
response = self.client.get('/A New Page/', {'revision': '1'})
self.assertContains(response, 'Revision 1')
self.assertContains(response, 'Test <em>text</em>')
# Check log diff redirect & diff
response = self.client.post('/A New Page/log/',
{'button_diff': 'Differences',
'rev1': '1', 'rev2': '2'})
response = _follow_redirect(response)
self.assertContains(response, '-Test *text*')
self.assertContains(response, '+Test *stuff*')
# Check that the edits appear on the changes page
response = self.client.get('/changes/')
self.assertContains(response, 'Test remark')
self.assertContains(response, 'Test note')
self.assertContains(response, 'A New Page', count=2)
self.assertContains(response, 'Editor Editorer', count=2+1)
class DocstringTests(TestCase):
fixtures = ['tests/users.json', 'tests/docstrings.json']
def test_docstring_index(self):
response = self.client.get('/docs/')
self.assertContains(response, 'sample_module')
self.assertContains(response, 'sample_module.sample1')
def test_docstring_page(self):
response = self.client.get('/docs/sample_module/')
self.assertContains(response, 'sample1')
self.assertContains(response, 'func1')
response = self.client.get('/docs/sample_module.sample1/')
self.assertContains(response, 'sample1 docstring')
self.assertContains(response, 'Functions')
self.assertContains(response, 'func1')
def test_docstring_cycle(self):
self.client.login(username='editor', password=PASSWORD)
page = '/docs/sample_module.sample1.func1/'
# Test preview
response = self.client.post(page + 'edit/',
{'text': 'New *text*',
'button_preview': 'Preview',
'comment': 'Comment 1'})
self.assertContains(response, 'New <em>text</em>')
self.assertContains(response, '+New *text*')
# Test edit
response = self.client.post(page + 'edit/',
{'text': 'New *text* + `sample_module.func1`_',
'comment': 'Comment 1'})
response = _follow_redirect(response)
self.assertContains(response, 'New <em>text</em>')
# check that LabelCache is updated properly (#29)
self.failIf('Unknown target name' in str(response))
# Another edit by another person
self.client.login(username='admin', password=PASSWORD)
response = self.client.post(page + 'edit/',
{'text': 'New *stuff*',
'comment': 'Comment 2'})
response = _follow_redirect(response)
self.assertContains(response, 'New <em>stuff</em>')
# Check log
self.client.login(username='editor', password=PASSWORD)
response = self.client.get(page + 'log/')
self.assertContains(response, 'Admin Adminer', count=1)
self.assertContains(response, 'Editor Editorer', count=1+1)
self.assertContains(response, 'Source', count=1)
self.assertContains(response, 'Initial source revision', count=1)
self.assertContains(response, 'Comment 1', count=1)
self.assertContains(response, 'Comment 2', count=1)
# Follow log url to diff
response = self.client.post(page + 'log/',
{'button_diff': 'Differences',
'rev1': '2', 'rev2': '3'})
response = _follow_redirect(response)
self.assertContains(response, '-New *text*')
self.assertContains(response, '+New *stuff*')
# Diff vs. previous
response = self.client.get(page + 'diff/3/')
self.assertContains(response, 'Differences between revisions 2 and 3')
self.assertContains(response, '-New *text*')
self.assertContains(response, '+New *stuff*')
# Diff vs. VCS
response = self.client.get(page + 'diff/vcs/2/')
self.assertContains(response, 'Differences between revisions VCS and 2')
self.failUnless('New *stuff*' not in response.content)
self.assertContains(response, '+New *text*')
# Look at a previous revision
response = self.client.get(page, {'revision': '1'})
self.failUnless('New *text*' not in response.content)
self.failUnless('New *stuff*' not in response.content)
class SphinxTests(TestCase):
fixtures = ['tests/users.json', 'tests/docstrings_sphinx.json',
'tests/docstrings.json']
def test_create_page(self):
self.client.login(username='admin', password=PASSWORD)
response = self.client.post('/control/',
{'update-docstrings': 'Pull'})
self.client.login(username='editor', password=PASSWORD)
response = self.client.get('/docs/docs/')
response = self.client.get('/docs/docs/')
self.assertContains(response, 'Create file')
# create a file sub-page
response = self.client.post('/docs/docs/new/',
{'name': 'about.rst',
'button_file': 'x'})
response = _follow_redirect(response)
self.assertContains(response, 'about.rst\n</h1>')
# check that it's shown in the page listing
response = self.client.get('/docs/docs/')
response = self.client.get('/docs/docs/')
self.assertContains(response, 'about.rst')
# edit it
response = self.client.post('/docs/docs/about.rst/edit/',
{'text': 'Initial *edit*',
'comment': 'Initial comment'})
response = _follow_redirect(response)
self.assertContains(response, 'Initial <em>edit</em>')
# check that it appears in the patch
response = self.client.post('/patch/',
{'docs/about.rst': 'checked'})
self.assertContains(response, '+++ sample_module/doc/about.rst')
# create a directory sub-page
response = self.client.post('/docs/docs/new/',
{'name': 'user_guide',
'button_dir': 'x'})
response = _follow_redirect(response)
self.assertContains(response, 'Subdirectories')
def test_delete_page(self):
self.client.login(username='editor', password=PASSWORD)
# Delete a page by pressing the delete button
response = self.client.get('/docs/docs/quux.rst/edit/')
self.assertContains(response, 'name="button_delete" value="Delete"')
response = self.client.post('/docs/docs/quux.rst/edit/',
{'text': 'foo',
'comment': 'some comment',
'button_delete': 'Delete'})
# The UI should now redirect to the parent directory page.
# The deleted file should not appear in the 'Files' list.
response = _follow_redirect(response)
self.assertContains(response, 'Files')
self.assertContains(response, 'index.rst')
self.failUnless('quux.rst' not in str(response))
class ReviewTests(TestCase):
fixtures = ['tests/users.json', 'tests/docstrings.json']
def test_docstring_review(self):
self.client.login(username='editor', password=PASSWORD)
# Initial status: needs editing
response = self.client.get('/docs/sample_module/')
self.assertContains(response,
'id="review-status" class="needs-editing"')
# OK status change, I'm an editor
response = self.client.post('/docs/sample_module/review/',
{'status': '2'})
response = _follow_redirect(response)
self.assertContains(response, 'id="review-status" class="needs-review"')
# Not OK status change, I'm only an editor
response = self.client.post('/docs/sample_module/review/',
{'status': '5'})
response = _follow_redirect(response)
self.assertContains(response, 'id="review-status" class="needs-review"')
def test_docstring_review_admin(self):
self.client.login(username='admin', password=PASSWORD)
# Initial status: needs editing
response = self.client.get('/docs/sample_module/')
self.assertContains(response,
'id="review-status" class="needs-editing"')
# OK status change, I'm an admin
response = self.client.post('/docs/sample_module/review/',
{'status': '5'})
response = _follow_redirect(response)
self.assertContains(response, 'id="review-status" class="reviewed"')
def test_ok_to_apply(self):
# Prepare initial status, ensure there's a revision
import docweb.models as models
doc = models.Docstring.get_non_obsolete().get(name='sample_module')
doc.edit('test text', 'editor', 'comment')
doc.ok_to_apply = False
doc.save()
# Initial status: not OK
response = self.client.get('/docs/sample_module/')
self.failUnless('<li>OK</li>' not in response.content)
# Not OK to change it as an editor
self.client.login(username='editor', password=PASSWORD)
response = self.client.post('/docs/sample_module/ok-to-apply/',
{'ok': 'ok'})
response = _follow_redirect(response)
self.failUnless('<li>OK</li>' not in response.content)
# OK to change it as an admin
self.client.login(username='admin', password=PASSWORD)
response = self.client.post('/docs/sample_module/ok-to-apply/',
{'ok': '1'})
response = _follow_redirect(response)
self.failUnless('id="submit-ok-to-change" value="Change to No"' in response.content)
# Change it back
response = self.client.post('/docs/sample_module/ok-to-apply/',
{'ok': '0'})
response = _follow_redirect(response)
self.failUnless('id="submit-ok-to-change" value="Change to Yes"' in response.content)
class CommentTests(TestCase):
fixtures = ['tests/users.json', 'tests/docstrings.json']
def test_comment_cycle(self):
page = '/docs/sample_module/'
self.client.login(username='editor', password=PASSWORD)
# Check that there's a link to comment page
response = self.client.get(page)
self.assertContains(response, 'action="%scomment/new/"' % page)
# Try preview
response = self.client.post(page + 'comment/new/',
{'button_preview': 'Preview',
'text': 'New *text*'})
self.assertContains(response, 'New <em>text</em>')
# Try submit
response = self.client.post(page + 'comment/new/',
{'button_edit': 'Save',
'text': 'New *text*'})
response = _follow_redirect(response)
self.assertContains(response, 'New <em>text</em>')
self.assertContains(response, 'action="%scomment/1/"' % page)
self.assertContains(response, 'action="%scomment/' % page, count=1+3*1)
# Submit second one
response = self.client.post(page + 'comment/new/',
{'button_edit': 'Save',
'text': '*Second comment*'})
response = _follow_redirect(response)
self.assertContains(response, '<em>Second comment</em>')
self.assertContains(response, 'action="%scomment/' % page, count=1+3*2)
# Re-edit comment
response = self.client.post(page + 'comment/1/',
{'button_edit': 'Save',
'text': 'New *stuff*'})
response = _follow_redirect(response)
self.failUnless('New <em>text</em>' not in response.content)
self.assertContains(response, 'New <em>stuff</em>')
self.assertContains(response, 'action="%scomment/' % page, count=1+3*2)
# Mark comment resolved
self.assertContains(response, '<div class="comment">')
response = self.client.post(page + 'comment/1/',
{'button_resolved': 'Resolved'})
response = _follow_redirect(response)
self.assertContains(response, '<div class="comment resolved">')
self.assertContains(response, 'action="%scomment/' % page, count=1+3*2)
# Check that comments appear in /changes/
response = self.client.get('/changes/')
self.assertContains(response, 'sample_module')
self.assertContains(response, 'New *stuff*')
self.assertContains(response, '*Second comment*')
# Delete comment
response = self.client.post(page + 'comment/1/',
{'button_delete': 'Resolved'})
response = _follow_redirect(response)
self.assertContains(response, 'action="%scomment/' % page, count=1+3*1)
self.failUnless('New <em>stuff</em>' not in response.content)
self.assertContains(response, '<em>Second comment</em>')
class PullMergeTests(TestCase):
fixtures = ['tests/users.json', 'tests/docstrings_changed.json']
def tearDown(self):
xmlfile = os.path.join(settings.MODULE_DIR, 'base-examplecom.xml')
if os.path.isfile(xmlfile):
os.unlink(xmlfile)
def test_pull_merge_cycle(self):
self.client.login(username='admin', password=PASSWORD)
# Run pull
response = self.client.post('/control/',
{'update-docstrings': 'Pull'})
# Check that it succeeded
response = self.client.get('/docs/sample_module.sample1.func_obsolete/')
self.assertContains(response, 'This docstring is obsolete')
response = self.client.get('/docs/sample_module.sample4/')
self.assertContains(response, 'sample4.')
# Check merge results
self.client.login(username='editor', password=PASSWORD)
response = self.client.get('/merge/')
# waiting for merge
self.assertContains(response, 'type="checkbox" name="sample_module.sample1.func2"')
# conflict
self.assertContains(response, '<li><a href="/docs/sample_module.sample1.func1/"')
# Check what's to be merged
response = self.client.get('/docs/sample_module.sample1.func2/')
self.assertContains(response, 'nop"> sample1.func2 docstring NEW PART')
self.assertContains(response, 'del">-MERGE TEST\\r')
self.assertContains(response, 'add">+\\r')
self.assertContains(response, 'name="sample_module.sample1.func2"')
# Accept merges
response = self.client.post('/merge/',
{'sample_module.sample1.func2': 'checked'})
self.assertContains(response, 'Nothing to merge')
# Check conflict
response = self.client.get('/docs/sample_module.sample1.func1/')
conflict_text = ('<<<<<<< new vcs version\n'
'sample1.func1 docstrings\n'
'=======\n'
'edited docstring\n'
'>>>>>>> web version')
self.assertContains(response, conflict_text)
self.assertContains(response, 'Merge conflict')
response = self.client.get('/docs/sample_module.sample1.func1/edit/')
self.assertContains(response, conflict_text)
# Check that conflict markers can't be committed in
bad_text = '<<<<<<<\nA\n=======\nB\n>>>>>>>'
good_text = 'some new text'
response = self.client.post('/docs/sample_module.sample1.func1/edit/',
{'text': bad_text,
'button_edit': 'Save',
'comment': 'Resolve'})
self.assertContains(response, '"button_edit"')
# Check that conflict status is reset on edit
good_text = 'some **new** text'
response = self.client.post('/docs/sample_module.sample1.func1/edit/',
{'text': good_text,
'button_edit': 'Save',
'comment': 'Resolve'})
response = _follow_redirect(response)
self.failUnless('=======' not in response.content)
self.failUnless('Merge conflict' not in response.content)
self.assertContains(response, 'some <strong>new</strong> text')
# Check that no conflicts or merges remain
response = self.client.get('/merge/')
self.assertContains(response, 'Nothing to merge')
self.assertContains(response, 'No conflicts')
# Check idempotency of pull
response = self.client.post('/control/',
{'update-docstrings': 'Pull'})
response = self.client.get('/merge/')
self.assertContains(response, 'Nothing to merge')
self.assertContains(response, 'No conflicts')
class PatchTests(TestCase):
fixtures = ['tests/users.json', 'tests/docstrings_changed.json']
def tearDown(self):
xmlfile = os.path.join(settings.MODULE_DIR, 'base-examplecom.xml')
if os.path.isfile(xmlfile):
os.unlink(xmlfile)
def test_patch_generation(self):
# Run pull
self.client.login(username='admin', password=PASSWORD)
response = self.client.post('/control/',
{'update-docstrings': 'Pull'})
self.client.logout()
# Edit a docstring
self.client.login(username='editor', password=PASSWORD)
response = self.client.post('/docs/sample_module.sample2.func4/edit/',
{'text': 'EDITED Quux docstring',
'button_edit': 'Save',
'comment': 'Edit a bit'})
response = _follow_redirect(response)
self.assertContains(response, 'EDITED Quux docstring')
self.client.logout()
# Check that it's listed
response = self.client.get('/patch/')
self.assertContains(response, 'href="/docs/sample_module.sample2.func4/diff/vcs/cur/"')
self.assertContains(response, 'type="checkbox" name="sample_module.sample2.func4"')
self.assertContains(response, 'action="/patch/"')
# Check patch generation
response = self.client.post('/patch/',
{'sample_module.sample2.func4': 'checked'})
self.assertContains(response, '--- sample_module/sample2.py.old')
self.assertContains(response, '+++ sample_module/sample2.py')
self.assertContains(response,
'-\t"Quux docstring"\n'
'+\t"""EDITED Quux docstring"""\n')
class SearchTests(TestCase):
fixtures = ['tests/docstrings.json', 'tests/wiki.json']
def test_search_page(self):
response = self.client.get('/search/')
self.assertContains(response, '<form action="/search/"')
# check searching from docstrings
response = self.client.post('/search/',
{'type_code': 'any',
'fulltext': 'sample_module'})
self.assertContains(response, '>sample_module</a>')
# check search filter
response = self.client.post('/search/',
{'type_code': 'wiki',
'fulltext': 'sample_module'})
self.failUnless('>sample_module</a>' not in response)
# check wiki search
response = self.client.post('/search/',
{'type_code': 'wiki',
'fulltext': 'Help',
'button_search': 'Search',
})
self.assertContains(response, '>Help Edit Docstring</a>')
def _follow_redirect(response, data={}):
if response.status_code not in (301, 302):
raise AssertionError("Not a redirect")
url = re.match('http://testserver([^#]*)', response['Location']).group(1)
return response.client.get(url, data)
|
|
from easyprocess import Proc, extract_version
from pyavrutils.avrsize import AvrSize
from pyavrutils.util import tmpdir, tmpfile, separate_sources, CompileError
import tempfile
from path import Path
class AvrGccCompileError(CompileError):
pass
class AvrGcc(object):
minprog = 'int main(){};'
def __init__(self, mcu='atmega168'):
self.cc = 'avr-gcc'
self.proc = None
self.options_extra = []
self.use_only_extra_options = False
self.defines = []
self.includes = []
self.output = None
self.mcu = mcu
self._targets = None
# Hz
self.f_cpu = 4000000
self.std = 'gnu99'
# http://www.network-theory.co.uk/docs/gccintro/gccintro_49.html
# 0/1/2/3/s (s=for size)
self.optimization = 0
# Enables linker relaxations. This is a catch-all for optimisations
# which occur during the link stage,
# where the final code can be altered by the linker
# to produce better code according to preset patterns.
# It doesn't do much at the moment,
# but one thing it does do is replace JMP instructions
# with RJMP instructions where possible to save a byte or so.
self.relax = False
# "garbage collect" unused sections
# Used with the -ffunction-sections and -fdata-sections compiler flags.
# When set, the linker is free to discard unused sections
# from the resulting binary.
self.gc_sections = False
# Force each function into it's own section
self.ffunction_sections = False
# Same as above, but for RAM globals.
self.fdata_sections = False
# stop the compiler from inlining repeated calls to tiny functions
# which can blow up the total binary size
# --param inline-call-cost=2 ??
self.fno_inline_small_functions = False
self.optimize_for_size()
def optimize_for_size(self):
'''
http://www.avrfreaks.net/index.php?name=PNphpBB2&file=viewtopic&t=90752
http://www.avrfreaks.net/index.php?name=PNphpBB2&file=viewtopic&t=69813
'''
self.optimization = 's'
self.relax = True
self.gc_sections = True
self.ffunction_sections = True
self.fdata_sections = True
self.fno_inline_small_functions = True
def optimize_no(self):
''' all options set to default
'''
self.optimization = 0
self.relax = False
self.gc_sections = False
self.ffunction_sections = False
self.fdata_sections = False
self.fno_inline_small_functions = False
@property
def ok(self):
if self.proc:
return self.proc.return_code == 0
@property
def targets(self):
if not self._targets:
# cc = AvrGcc()
# cc.optimize_no()
# cc.mcu = 'xxxx'
# try:
# cc.build(self.minprog)
# except AvrGccCompileError:
# pass
# lines = cc.error_text.splitlines()
# lines = [x for x in lines if '/' not in x]
# lines = [x for x in lines if ':' not in x]
# lines = [x for x in lines if 'xxxx' not in x]
# lines = [x for x in lines if '\\' not in x]
# lines = [x.strip() for x in lines]
# lines.sort()
# self._targets = lines
def filt1(lines):
for i, x in enumerate(lines):
if 'known mcu names' in x.lower():
return lines[i + 1:]
def filt2(lines):
for i, x in enumerate(lines):
if not x:
return lines[:i]
s = Proc([self.cc, '--target-help']).call().stdout
lines = s.splitlines()
lines = filt1(lines)
lines = filt2(lines)
mcus = ' '.join(lines).strip().split()
self._targets = mcus
return self._targets
@property
def error_text(self):
if self.proc:
return self.proc.stderr
def version(self):
'avr-gcc version'
return extract_version(Proc(self.cc + ' --version').call().stdout)
def options_generated(self):
return self.command_list([''], _opt=True)
def command_list(self, sources, _opt=False):
'''command line as list'''
def abspath(x):
x = Path(x).abspath()
if not x.exists():
raise ValueError('file not found! ' + x)
return x
self.f_cpu = int(self.f_cpu)
self.mcu = str(self.mcu)
# if not self.mcu in self.targets:
# raise ValueError('invalid mcu:' + self.mcu)
if not _opt:
sources = [abspath(x) for x in sources]
includes = [abspath(x) for x in self.includes]
if not self.output:
self.output = tempfile.NamedTemporaryFile(
prefix='pyavrutils_', suffix='.elf', delete=0).name
defines = self.defines + ['F_CPU=' + str(self.f_cpu)]
cmd = [self.cc]
if not self.use_only_extra_options:
if not _opt:
cmd += sources
cmd += ['-D' + x for x in defines]
cmd += ['-I' + x for x in includes]
if not _opt:
cmd += ['-o', self.output]
cmd += ['-mmcu=' + self.mcu]
cmd += ['--std=' + self.std]
if self.relax:
cmd += ['-Wl,--relax']
if self.gc_sections:
cmd += ['-Wl,--gc-sections']
if self.ffunction_sections:
cmd += ['-ffunction-sections']
if self.fdata_sections:
cmd += ['-fdata-sections']
if self.fno_inline_small_functions:
cmd += ['-fno-inline-small-functions']
if self.optimization != 0:
cmd += ['-O' + str(self.optimization)]
cmd += self.options_extra
return cmd
def build(self, sources=None, headers=None):
''' sources can be file name or code:
sources=['x.c','int main(){}']
or
sources='int main(){}'
'''
tempdir = None
strings, files = separate_sources(sources)
if len(strings) or headers:
# TODO: remove tempdir
tempdir = tmpdir()
temp_list = [tmpfile(x, tempdir, '.c') for x in strings]
if headers:
for n, s in headers.items():
(Path(tempdir) / n).write_text(s)
cmd = self.command_list(files + temp_list)
if tempdir:
cmd += ['-I' + tempdir]
self.proc = Proc(cmd).call()
# for x in temp_list:
# os.remove(x)
if not self.ok:
raise AvrGccCompileError(cmd, sources, self.error_text)
def size(self):
s = AvrSize()
s.run(self.output, self.mcu)
return s
|
|
import networkx as nx
import numpy as np
import pytest
import pyquil.simulation.matrices as qmats
from pyquil import Program
from pyquil.api import QuantumComputer
from pyquil.device import NxDevice
from pyquil.experiment import ExperimentSetting, Experiment, zeros_state
from pyquil.gates import CNOT, H, I, MEASURE, PHASE, RX, RY, RZ, X
from pyquil.operator_estimation import measure_observables
from pyquil.paulis import sI, sX, sY, sZ
from pyquil.pyqvm import PyQVM
from pyquil.simulation._reference import ReferenceDensitySimulator, _is_valid_quantum_state
from pyquil.simulation.tools import lifted_gate_matrix
from pyquil.tests.utils import DummyCompiler
def test_qaoa_density():
wf_true = [
0.00167784 + 1.00210180e-05 * 1j,
0.50000000 - 4.99997185e-01 * 1j,
0.50000000 - 4.99997185e-01 * 1j,
0.00167784 + 1.00210180e-05 * 1j,
]
wf_true = np.reshape(np.array(wf_true), (4, 1))
rho_true = np.dot(wf_true, np.conj(wf_true).T)
prog = Program()
prog.inst(
[
RY(np.pi / 2, 0),
RX(np.pi, 0),
RY(np.pi / 2, 1),
RX(np.pi, 1),
CNOT(0, 1),
RX(-np.pi / 2, 1),
RY(4.71572463191, 1),
RX(np.pi / 2, 1),
CNOT(0, 1),
RX(-2 * 2.74973750579, 0),
RX(-2 * 2.74973750579, 1),
]
)
qam = PyQVM(n_qubits=2, quantum_simulator_type=ReferenceDensitySimulator).execute(prog)
rho = qam.wf_simulator.density
np.testing.assert_allclose(rho_true, rho, atol=1e-8)
def test_larger_qaoa_density():
prog = Program(
H(0),
H(1),
H(2),
H(3),
X(0),
PHASE(0.3928244130249029, 0),
X(0),
PHASE(0.3928244130249029, 0),
CNOT(0, 1),
RZ(0.78564882604980579, 1),
CNOT(0, 1),
X(0),
PHASE(0.3928244130249029, 0),
X(0),
PHASE(0.3928244130249029, 0),
CNOT(0, 3),
RZ(0.78564882604980579, 3),
CNOT(0, 3),
X(0),
PHASE(0.3928244130249029, 0),
X(0),
PHASE(0.3928244130249029, 0),
CNOT(1, 2),
RZ(0.78564882604980579, 2),
CNOT(1, 2),
X(0),
PHASE(0.3928244130249029, 0),
X(0),
PHASE(0.3928244130249029, 0),
CNOT(2, 3),
RZ(0.78564882604980579, 3),
CNOT(2, 3),
H(0),
RZ(-0.77868204192240842, 0),
H(0),
H(1),
RZ(-0.77868204192240842, 1),
H(1),
H(2),
RZ(-0.77868204192240842, 2),
H(2),
H(3),
RZ(-0.77868204192240842, 3),
H(3),
)
qam = PyQVM(n_qubits=4, quantum_simulator_type=ReferenceDensitySimulator).execute(prog)
rho_test = qam.wf_simulator.density
wf_true = np.array(
[
8.43771693e-05 - 0.1233845 * 1j,
-1.24927731e-01 + 0.00329533 * 1j,
-1.24927731e-01 + 0.00329533 * 1j,
-2.50040954e-01 + 0.12661547 * 1j,
-1.24927731e-01 + 0.00329533 * 1j,
-4.99915497e-01 - 0.12363516 * 1j,
-2.50040954e-01 + 0.12661547 * 1j,
-1.24927731e-01 + 0.00329533 * 1j,
-1.24927731e-01 + 0.00329533 * 1j,
-2.50040954e-01 + 0.12661547 * 1j,
-4.99915497e-01 - 0.12363516 * 1j,
-1.24927731e-01 + 0.00329533 * 1j,
-2.50040954e-01 + 0.12661547 * 1j,
-1.24927731e-01 + 0.00329533 * 1j,
-1.24927731e-01 + 0.00329533 * 1j,
8.43771693e-05 - 0.1233845 * 1j,
]
)
wf_true = np.reshape(wf_true, (2 ** 4, 1))
rho_true = np.dot(wf_true, np.conj(wf_true).T)
np.testing.assert_allclose(rho_true, rho_test, atol=1e-8)
def _random_1q_density():
state = np.random.random(2) + 1j * np.random.random()
normalization = np.conj(state).T.dot(state)
state /= np.sqrt(normalization)
state = state.reshape((-1, 1))
rho = state.dot(np.conj(state).T)
assert np.isclose(np.trace(rho), 1.0)
assert np.allclose(rho, np.conj(rho).T)
return rho
def test_kraus_application_bitflip():
p = 0.372
qam = PyQVM(
n_qubits=1,
quantum_simulator_type=ReferenceDensitySimulator,
post_gate_noise_probabilities={"bit_flip": p},
)
initial_density = _random_1q_density()
qam.wf_simulator.density = initial_density
qam.execute(Program(I(0)))
final_density = (1 - p) * initial_density + p * qmats.X.dot(initial_density).dot(qmats.X)
np.testing.assert_allclose(final_density, qam.wf_simulator.density)
def test_kraus_application_phaseflip():
p = 0.372
qam = PyQVM(
n_qubits=1,
quantum_simulator_type=ReferenceDensitySimulator,
post_gate_noise_probabilities={"phase_flip": p},
)
initial_density = _random_1q_density()
qam.wf_simulator.density = initial_density
qam.execute(Program(I(0)))
final_density = (1 - p) * initial_density + p * qmats.Z.dot(initial_density).dot(qmats.Z)
np.testing.assert_allclose(final_density, qam.wf_simulator.density)
def test_kraus_application_bitphaseflip():
p = 0.372
qam = PyQVM(
n_qubits=1,
quantum_simulator_type=ReferenceDensitySimulator,
post_gate_noise_probabilities={"bitphase_flip": p},
)
initial_density = _random_1q_density()
qam.wf_simulator.density = initial_density
qam.execute(Program(I(0)))
final_density = (1 - p) * initial_density + p * qmats.Y.dot(initial_density).dot(qmats.Y)
np.testing.assert_allclose(final_density, qam.wf_simulator.density)
def test_kraus_application_relaxation():
p = 0.372
qam = PyQVM(
n_qubits=1,
quantum_simulator_type=ReferenceDensitySimulator,
post_gate_noise_probabilities={"relaxation": p},
)
rho = _random_1q_density()
qam.wf_simulator.density = rho
qam.execute(Program(I(0)))
final_density = np.array(
[
[rho[0, 0] + rho[1, 1] * p, np.sqrt(1 - p) * rho[0, 1]],
[np.sqrt(1 - p) * rho[1, 0], (1 - p) * rho[1, 1]],
]
)
np.testing.assert_allclose(final_density, qam.wf_simulator.density)
def test_kraus_application_dephasing():
p = 0.372
qam = PyQVM(
n_qubits=1,
quantum_simulator_type=ReferenceDensitySimulator,
post_gate_noise_probabilities={"dephasing": p},
)
rho = _random_1q_density()
qam.wf_simulator.density = rho
qam.execute(Program(I(0)))
final_density = np.array([[rho[0, 0], (1 - p) * rho[0, 1]], [(1 - p) * rho[1, 0], rho[1, 1]]])
np.testing.assert_allclose(final_density, qam.wf_simulator.density)
def test_kraus_application_depolarizing():
p = 0.372
qam = PyQVM(
n_qubits=1,
quantum_simulator_type=ReferenceDensitySimulator,
post_gate_noise_probabilities={"depolarizing": p},
)
rho = _random_1q_density()
qam.wf_simulator.density = rho
qam.execute(Program(I(0)))
final_density = (1 - p) * rho + (p / 3) * (
qmats.X.dot(rho).dot(qmats.X)
+ qmats.Y.dot(rho).dot(qmats.Y)
+ qmats.Z.dot(rho).dot(qmats.Z)
)
np.testing.assert_allclose(final_density, qam.wf_simulator.density)
def test_kraus_compound_T1T2_application():
p1 = 0.372
p2 = 0.45
qam = PyQVM(
n_qubits=1,
quantum_simulator_type=ReferenceDensitySimulator,
post_gate_noise_probabilities={"relaxation": p1, "dephasing": p2},
)
rho = _random_1q_density()
qam.wf_simulator.density = rho
qam.execute(Program(I(0)))
final_density = np.array(
[
[rho[0, 0] + rho[1, 1] * p1, (1 - p2) * np.sqrt(1 - p1) * rho[0, 1]],
[(1 - p2) * np.sqrt(1 - p1) * rho[1, 0], (1 - p1) * rho[1, 1]],
]
)
np.testing.assert_allclose(final_density, qam.wf_simulator.density)
@pytest.mark.xfail(reason="We don't support different noise parameters for 2q vs 1q gates!")
def test_multiqubit_decay_bellstate():
program = Program(RY(np.pi / 3, 0), CNOT(0, 1))
# commence manually dotting the above program
initial_density = np.zeros((4, 4), dtype=complex)
initial_density[0, 0] = 1.0
gate_time_1q = 50e-9
T1 = 30e-6
T2 = 15e-6
p1 = 1 - np.exp(-gate_time_1q / T1)
p2 = 1 - np.exp(-gate_time_1q / T2)
# RY
gate_1 = np.kron(np.eye(2), qmats.RY(np.pi / 3))
state = gate_1.dot(initial_density).dot(np.conj(gate_1).T)
for ii in range(2):
new_density = np.zeros_like(state)
for kop in qmats.relaxation_operators(p1):
operator = lifted_gate_matrix(matrix=kop, qubit_inds=[ii], n_qubits=2)
new_density += operator.dot(state).dot(np.conj(operator).T)
state = new_density
for ii in range(2):
new_density = np.zeros_like(state)
for kop in qmats.dephasing_operators(p2):
operator = lifted_gate_matrix(matrix=kop, qubit_inds=[ii], n_qubits=2)
new_density += operator.dot(state).dot(np.conj(operator).T)
state = new_density
# CNOT
# TODO: different 1q, 2q noise probabilities
cnot_01 = np.kron(qmats.I, qmats.P0) + np.kron(qmats.X, qmats.P1)
state = cnot_01.dot(state).dot(cnot_01.T)
gate_time_2q = 150e-9
p1 = 1 - np.exp(-gate_time_2q / T1)
p2 = 1 - np.exp(-gate_time_2q / T2)
for ii in range(2):
new_density = np.zeros_like(state)
for kop in qmats.relaxation_operators(p1):
operator = lifted_gate_matrix(matrix=kop, qubit_inds=[ii], n_qubits=2)
new_density += operator.dot(state).dot(np.conj(operator).T)
state = new_density
for ii in range(2):
new_density = np.zeros_like(state)
for kop in qmats.dephasing_operators(p2):
operator = lifted_gate_matrix(matrix=kop, qubit_inds=[ii], n_qubits=2)
new_density += operator.dot(state).dot(np.conj(operator).T)
state = new_density
qam = PyQVM(
n_qubits=2,
quantum_simulator_type=ReferenceDensitySimulator,
post_gate_noise_probabilities={"relaxation": p1, "dephasing": p2},
)
qam.execute(program)
assert np.allclose(qam.wf_simulator.density, state)
@pytest.mark.slow
def test_for_negative_probabilities():
# trivial program to do state tomography on
prog = Program(I(0))
# make an Experiment
expt_settings = [ExperimentSetting(zeros_state([0]), pt) for pt in [sI(0), sX(0), sY(0), sZ(0)]]
experiment_1q = Experiment(settings=expt_settings, program=prog)
# make a quantum computer object
device = NxDevice(nx.complete_graph(1))
qc_density = QuantumComputer(
name="testy!",
qam=PyQVM(n_qubits=1, quantum_simulator_type=ReferenceDensitySimulator),
device=device,
compiler=DummyCompiler(),
)
# initialize with a pure state
initial_density = np.array([[1.0, 0.0], [0.0, 0.0]])
qc_density.qam.wf_simulator.density = initial_density
try:
list(measure_observables(qc=qc_density, tomo_experiment=experiment_1q, n_shots=3000))
except ValueError as e:
# the error is from np.random.choice by way of self.rs.choice in ReferenceDensitySimulator
assert str(e) != "probabilities are not non-negative"
# initialize with a mixed state
initial_density = np.array([[0.9, 0.0], [0.0, 0.1]])
qc_density.qam.wf_simulator.density = initial_density
try:
list(measure_observables(qc=qc_density, tomo_experiment=experiment_1q, n_shots=3000))
except ValueError as e:
assert str(e) != "probabilities are not non-negative"
def test_set_initial_state():
# That is test the assigned state matrix in ReferenceDensitySimulator is persistent between
# rounds of run.
rho1 = np.array([[0.0, 0.0], [0.0, 1.0]])
# run prog
prog = Program(I(0))
ro = prog.declare("ro", "BIT", 1)
prog += MEASURE(0, ro[0])
# make a quantum computer object
device = NxDevice(nx.complete_graph(1))
qc_density = QuantumComputer(
name="testy!",
qam=PyQVM(n_qubits=1, quantum_simulator_type=ReferenceDensitySimulator),
device=device,
compiler=DummyCompiler(),
)
qc_density.qam.wf_simulator.set_initial_state(rho1).reset()
out = [qc_density.run(prog) for _ in range(0, 4)]
ans = [np.array([[1]]), np.array([[1]]), np.array([[1]]), np.array([[1]])]
assert all([np.allclose(x, y) for x, y in zip(out, ans)])
# Run and measure style
progRAM = Program(I(0))
results = qc_density.run_and_measure(progRAM, trials=10)
ans = {0: np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])}
assert np.allclose(results[0], ans[0])
# test reverting ReferenceDensitySimulator to the default state
rho0 = np.array([[1.0, 0.0], [0.0, 0.0]])
qc_density.qam.wf_simulator.set_initial_state(rho0).reset()
assert np.allclose(qc_density.qam.wf_simulator.density, rho0)
assert np.allclose(qc_density.qam.wf_simulator.initial_density, rho0)
def test_is_valid_quantum_state():
with pytest.raises(ValueError):
# is Hermitian and PSD but not trace one
_is_valid_quantum_state(np.array([[1, 0], [0, 1]]))
with pytest.raises(ValueError):
# negative eigenvalue
_is_valid_quantum_state(np.array([[1.01, 0], [0, -0.01]]))
with pytest.raises(ValueError):
# imaginary eigenvalue
_is_valid_quantum_state(np.array([[1, 0], [0, -0.0001j]]))
with pytest.raises(ValueError):
# not Hermitian
_is_valid_quantum_state(np.array([[0, 1], [1, 0]]))
|
|
#!/usr/bin/env python
# CREATED:2013-03-08 15:25:18 by Brian McFee <brm2132@columbia.edu>
# unit tests for librosa core (__init__.py)
#
# Run me as follows:
# cd tests/
# nosetests -v --with-coverage --cover-package=librosa
#
from __future__ import print_function
# Disable cache
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except:
pass
import librosa
import glob
import numpy as np
import scipy.io
import six
from nose.tools import eq_, raises, make_decorator
import warnings
warnings.resetwarnings()
warnings.simplefilter('always')
# -- utilities --#
def files(pattern):
test_files = glob.glob(pattern)
test_files.sort()
return test_files
def srand(seed=628318530):
np.random.seed(seed)
pass
def load(infile):
return scipy.io.loadmat(infile, chars_as_strings=True)
def test_load():
# Note: this does not test resampling.
# That is a separate unit test.
def __test(infile):
DATA = load(infile)
y, sr = librosa.load(DATA['wavfile'][0],
sr=None,
mono=DATA['mono'])
# Verify that the sample rate is correct
eq_(sr, DATA['sr'])
assert np.allclose(y, DATA['y'])
for infile in files('data/core-load-*.mat'):
yield (__test, infile)
pass
def test_load_resample():
sr_target = 16000
offset = 10
duration = 5
def __test(res_type):
y_native, sr = librosa.load(librosa.util.example_audio_file(),
sr=None,
offset=offset,
duration=duration,
res_type=res_type)
y2 = librosa.resample(y_native, sr, sr_target, res_type=res_type)
y, _ = librosa.load(librosa.util.example_audio_file(),
sr=sr_target,
offset=offset,
duration=duration,
res_type=res_type)
assert np.allclose(y2, y)
for res_type in ['kaiser_fast', 'kaiser_best', 'scipy']:
yield __test, res_type
def test_segment_load():
sample_len = 2003
fs = 44100
test_file = 'data/test1_44100.wav'
y, sr = librosa.load(test_file, sr=None, mono=False,
offset=0., duration=sample_len/float(fs))
eq_(y.shape[-1], sample_len)
y2, sr = librosa.load(test_file, sr=None, mono=False)
assert np.allclose(y, y2[:, :sample_len])
sample_offset = 2048
y, sr = librosa.load(test_file, sr=None, mono=False,
offset=sample_offset/float(fs), duration=1.0)
eq_(y.shape[-1], fs)
y2, sr = librosa.load(test_file, sr=None, mono=False)
assert np.allclose(y, y2[:, sample_offset:sample_offset+fs])
def test_resample_mono():
def __test(y, sr_in, sr_out, res_type, fix):
y2 = librosa.resample(y, sr_in, sr_out,
res_type=res_type,
fix=fix)
# First, check that the audio is valid
librosa.util.valid_audio(y2, mono=True)
# If it's a no-op, make sure the signal is untouched
if sr_out == sr_in:
assert np.allclose(y, y2)
# Check buffer contiguity
assert y2.flags['C_CONTIGUOUS']
# Check that we're within one sample of the target length
target_length = y.shape[-1] * sr_out // sr_in
assert np.abs(y2.shape[-1] - target_length) <= 1
for infile in ['data/test1_44100.wav',
'data/test1_22050.wav',
'data/test2_8000.wav']:
y, sr_in = librosa.load(infile, sr=None, duration=5)
for sr_out in [8000, 22050]:
for res_type in ['kaiser_best', 'kaiser_fast', 'scipy']:
for fix in [False, True]:
yield (__test, y, sr_in, sr_out, res_type, fix)
def test_resample_stereo():
def __test(y, sr_in, sr_out, res_type, fix):
y2 = librosa.resample(y, sr_in, sr_out,
res_type=res_type,
fix=fix)
# First, check that the audio is valid
librosa.util.valid_audio(y2, mono=False)
eq_(y2.ndim, y.ndim)
# If it's a no-op, make sure the signal is untouched
if sr_out == sr_in:
assert np.allclose(y, y2)
# Check buffer contiguity
assert y2.flags['C_CONTIGUOUS']
# Check that we're within one sample of the target length
target_length = y.shape[-1] * sr_out // sr_in
assert np.abs(y2.shape[-1] - target_length) <= 1
y, sr_in = librosa.load('data/test1_44100.wav', mono=False, sr=None, duration=5)
for sr_out in [8000, 22050]:
for res_type in ['kaiser_fast', 'scipy']:
for fix in [False, True]:
yield __test, y, sr_in, sr_out, res_type, fix
def test_resample_scale():
def __test(sr_in, sr_out, res_type, y):
y2 = librosa.resample(y, sr_in, sr_out,
res_type=res_type,
scale=True)
# First, check that the audio is valid
librosa.util.valid_audio(y2, mono=True)
n_orig = np.sqrt(np.sum(np.abs(y)**2))
n_res = np.sqrt(np.sum(np.abs(y2)**2))
# If it's a no-op, make sure the signal is untouched
assert np.allclose(n_orig, n_res, atol=1e-2), (n_orig, n_res)
y, sr_in = librosa.load('data/test1_22050.wav', mono=True, sr=None, duration=3)
for res_type in ['scipy', 'kaiser_best', 'kaiser_fast']:
for sr_out in [11025, 22050, 44100]:
yield __test, sr_in, sr_out, res_type, y
yield __test, sr_out, sr_in, res_type, y
def test_stft():
def __test(infile):
DATA = load(infile)
# Load the file
(y, sr) = librosa.load(DATA['wavfile'][0], sr=None, mono=True)
if DATA['hann_w'][0, 0] == 0:
# Set window to ones, swap back to nfft
window = np.ones
win_length = None
else:
window = 'hann'
win_length = DATA['hann_w'][0, 0]
# Compute the STFT
D = librosa.stft(y,
n_fft=DATA['nfft'][0, 0].astype(int),
hop_length=DATA['hop_length'][0, 0].astype(int),
win_length=win_length,
window=window,
center=False)
assert np.allclose(D, DATA['D'])
for infile in files('data/core-stft-*.mat'):
yield (__test, infile)
def test_ifgram():
def __test(infile):
DATA = load(infile)
y, sr = librosa.load(DATA['wavfile'][0], sr=None, mono=True)
# Compute the IFgram
F, D = librosa.ifgram(y,
n_fft=DATA['nfft'][0, 0].astype(int),
hop_length=DATA['hop_length'][0, 0].astype(int),
win_length=DATA['hann_w'][0, 0].astype(int),
sr=DATA['sr'][0, 0].astype(int),
ref_power=0.0,
clip=False,
center=False)
# D fails to match here because of fftshift()
# assert np.allclose(D, DATA['D'])
assert np.allclose(F, DATA['F'], rtol=1e-1, atol=1e-1)
for infile in files('data/core-ifgram-*.mat'):
yield (__test, infile)
def test_ifgram_matches_stft():
y, sr = librosa.load('data/test1_22050.wav')
def __test(n_fft, hop_length, win_length, center, norm, dtype):
D_stft = librosa.stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, center=center,
dtype=dtype)
_, D_ifgram = librosa.ifgram(y, sr, n_fft=n_fft,
hop_length=hop_length,
win_length=win_length, center=center,
norm=norm, dtype=dtype)
if norm:
# STFT doesn't do window normalization;
# let's just ignore the relative scale to make this easy
D_stft = librosa.util.normalize(D_stft, axis=0)
D_ifgram = librosa.util.normalize(D_ifgram, axis=0)
assert np.allclose(D_stft, D_ifgram)
for n_fft in [1024, 2048]:
for hop_length in [None, n_fft // 2, n_fft // 4]:
for win_length in [None, n_fft // 2, n_fft // 4]:
for center in [False, True]:
for norm in [False, True]:
for dtype in [np.complex64, np.complex128]:
yield (__test, n_fft, hop_length, win_length,
center, norm, dtype)
def test_ifgram_if():
y, sr = librosa.load('data/test1_22050.wav')
def __test(ref, clip):
F, D = librosa.ifgram(y, sr=sr, ref_power=ref, clip=clip)
if clip:
assert np.all(0 <= F) and np.all(F <= 0.5 * sr)
assert np.all(np.isfinite(F))
for ref in [-10, 0.0, 1e-6, np.max]:
for clip in [False, True]:
if six.callable(ref) or ref >= 0.0:
tf = __test
else:
tf = raises(librosa.ParameterError)(__test)
yield tf, ref, clip
def test_salience_basecase():
(y, sr) = librosa.load('data/test1_22050.wav')
S = np.abs(librosa.stft(y))
freqs = librosa.core.fft_frequencies(sr)
harms = [1]
weights = [1.0]
S_sal = librosa.core.salience(
S, freqs, harms, weights, filter_peaks=False, kind='quadratic'
)
assert np.allclose(S_sal, S)
def test_salience_basecase2():
(y, sr) = librosa.load('data/test1_22050.wav')
S = np.abs(librosa.stft(y))
freqs = librosa.core.fft_frequencies(sr)
harms = [1, 0.5, 2.0]
weights = [1.0, 0.0, 0.0]
S_sal = librosa.core.salience(
S, freqs, harms, weights, filter_peaks=False, kind='quadratic'
)
assert np.allclose(S_sal, S)
def test_salience_defaults():
S = np.array([
[0.1, 0.5, 0.0],
[0.2, 1.2, 1.2],
[0.0, 0.7, 0.3],
[1.3, 3.2, 0.8]
])
freqs = np.array([50.0, 100.0, 200.0, 400.0])
harms = [0.5, 1, 2]
actual = librosa.core.salience(
S, freqs, harms, kind='quadratic', fill_value=0.0
)
expected = np.array([
[0.0, 0.0, 0.0],
[0.3, 2.4, 1.5],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
]) / 3.0
assert np.allclose(expected, actual)
def test_salience_weights():
S = np.array([
[0.1, 0.5, 0.0],
[0.2, 1.2, 1.2],
[0.0, 0.7, 0.3],
[1.3, 3.2, 0.8]
])
freqs = np.array([50.0, 100.0, 200.0, 400.0])
harms = [0.5, 1, 2]
weights = [1.0, 1.0, 1.0]
actual = librosa.core.salience(
S, freqs, harms, weights, kind='quadratic', fill_value=0.0
)
expected = np.array([
[0.0, 0.0, 0.0],
[0.3, 2.4, 1.5],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
]) / 3.0
assert np.allclose(expected, actual)
def test_salience_no_peak_filter():
S = np.array([
[0.1, 0.5, 0.0],
[0.2, 1.2, 1.2],
[0.0, 0.7, 0.3],
[1.3, 3.2, 0.8]
])
freqs = np.array([50.0, 100.0, 200.0, 400.0])
harms = [0.5, 1, 2]
weights = [1.0, 1.0, 1.0]
actual = librosa.core.salience(
S, freqs, harms, weights, filter_peaks=False, kind='quadratic'
)
expected = np.array([
[0.3, 1.7, 1.2],
[0.3, 2.4, 1.5],
[1.5, 5.1, 2.3],
[1.3, 3.9, 1.1]
]) / 3.0
assert np.allclose(expected, actual)
def test_salience_aggregate():
S = np.array([
[0.1, 0.5, 0.0],
[0.2, 1.2, 1.2],
[0.0, 0.7, 0.3],
[1.3, 3.2, 0.8]
])
freqs = np.array([50.0, 100.0, 200.0, 400.0])
harms = [0.5, 1, 2]
weights = [1.0, 1.0, 1.0]
actual = librosa.core.salience(
S, freqs, harms, weights, aggregate=np.ma.max, kind='quadratic',
fill_value=0.0
)
expected = np.array([
[0.0, 0.0, 0.0],
[0.2, 1.2, 1.2],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
])
assert np.allclose(expected, actual)
def test_magphase():
(y, sr) = librosa.load('data/test1_22050.wav')
D = librosa.stft(y)
S, P = librosa.magphase(D)
assert np.allclose(S * P, D)
def test_istft_reconstruction():
from scipy.signal import bartlett, hann, hamming, blackman, blackmanharris
def __test(x, n_fft, hop_length, window, atol, length):
S = librosa.core.stft(
x, n_fft=n_fft, hop_length=hop_length, window=window)
x_reconstructed = librosa.core.istft(
S, hop_length=hop_length, window=window, length=length)
if length is not None:
assert len(x_reconstructed) == length
L = min(len(x), len(x_reconstructed))
x = np.resize(x, L)
x_reconstructed = np.resize(x_reconstructed, L)
# NaN/Inf/-Inf should not happen
assert np.all(np.isfinite(x_reconstructed))
# should be almost approximately reconstucted
assert np.allclose(x, x_reconstructed, atol=atol)
srand()
# White noise
x1 = np.random.randn(2 ** 15)
# Sin wave
x2 = np.sin(np.linspace(-np.pi, np.pi, 2 ** 15))
# Real music signal
x3, sr = librosa.load('data/test1_44100.wav', sr=None, mono=True)
assert sr == 44100
for x, atol in [(x1, 1.0e-6), (x2, 1.0e-7), (x3, 1.0e-7)]:
for window_func in [bartlett, hann, hamming, blackman, blackmanharris]:
for n_fft in [512, 1024, 2048, 4096]:
win = window_func(n_fft, sym=False)
symwin = window_func(n_fft, sym=True)
# tests with pre-computed window fucntions
for hop_length_denom in six.moves.range(2, 9):
hop_length = n_fft // hop_length_denom
for length in [None, len(x) - 1000, len(x + 1000)]:
yield (__test, x, n_fft, hop_length, win, atol, length)
yield (__test, x, n_fft, hop_length, symwin, atol, length)
# also tests with passing widnow function itself
yield (__test, x, n_fft, n_fft // 9, window_func, atol, None)
# test with default paramters
x_reconstructed = librosa.core.istft(librosa.core.stft(x))
L = min(len(x), len(x_reconstructed))
x = np.resize(x, L)
x_reconstructed = np.resize(x_reconstructed, L)
assert np.allclose(x, x_reconstructed, atol=atol)
def test_load_options():
filename = 'data/test1_22050.wav'
def __test(offset, duration, mono, dtype):
y, sr = librosa.load(filename, mono=mono, offset=offset,
duration=duration, dtype=dtype)
if duration is not None:
assert np.allclose(y.shape[-1], int(sr * duration))
if mono:
eq_(y.ndim, 1)
else:
# This test file is stereo, so y.ndim should be 2
eq_(y.ndim, 2)
# Check the dtype
assert np.issubdtype(y.dtype, dtype)
assert np.issubdtype(dtype, y.dtype)
for offset in [0, 1, 2]:
for duration in [None, 0, 0.5, 1, 2]:
for mono in [False, True]:
for dtype in [np.float32, np.float64]:
yield __test, offset, duration, mono, dtype
pass
def test_get_duration_wav():
def __test_audio(filename, mono, sr, duration):
y, sr = librosa.load(filename, sr=sr, mono=mono, duration=duration)
duration_est = librosa.get_duration(y=y, sr=sr)
assert np.allclose(duration_est, duration, rtol=1e-3, atol=1e-5)
def __test_spec(filename, sr, duration, n_fft, hop_length, center):
y, sr = librosa.load(filename, sr=sr, duration=duration)
S = librosa.stft(y, n_fft=n_fft, hop_length=hop_length, center=center)
duration_est = librosa.get_duration(S=S, sr=sr, n_fft=n_fft,
hop_length=hop_length,
center=center)
# We lose a little accuracy in framing without centering, so it's
# not as precise as time-domain duration
assert np.allclose(duration_est, duration, rtol=1e-1, atol=1e-2)
test_file = 'data/test1_22050.wav'
for sr in [8000, 11025, 22050]:
for duration in [1.0, 2.5]:
for mono in [False, True]:
yield __test_audio, test_file, mono, sr, duration
for n_fft in [256, 512, 1024]:
for hop_length in [n_fft // 8, n_fft // 4, n_fft // 2]:
for center in [False, True]:
yield (__test_spec, test_file, sr,
duration, n_fft, hop_length, center)
def test_get_duration_filename():
filename = 'data/test2_8000.wav'
true_duration = 30.197625
duration_fn = librosa.get_duration(filename=filename)
y, sr = librosa.load(filename, sr=None)
duration_y = librosa.get_duration(y=y, sr=sr)
assert np.allclose(duration_fn, true_duration)
assert np.allclose(duration_fn, duration_y)
def test_autocorrelate():
def __test(y, truth, max_size, axis):
ac = librosa.autocorrelate(y, max_size=max_size, axis=axis)
my_slice = [slice(None)] * truth.ndim
if max_size is not None and max_size <= y.shape[axis]:
my_slice[axis] = slice(min(max_size, y.shape[axis]))
if not np.iscomplexobj(y):
assert not np.iscomplexobj(ac)
assert np.allclose(ac, truth[my_slice])
srand()
# test with both real and complex signals
for y in [np.random.randn(256, 256), np.exp(1.j * np.random.randn(256, 256))]:
# Make ground-truth autocorrelations along each axis
truth = [np.asarray([scipy.signal.fftconvolve(yi, yi[::-1].conj(),
mode='full')[len(yi)-1:] for yi in y.T]).T,
np.asarray([scipy.signal.fftconvolve(yi, yi[::-1].conj(),
mode='full')[len(yi)-1:] for yi in y])]
for axis in [0, 1, -1]:
for max_size in [None, y.shape[axis]//2, y.shape[axis], 2 * y.shape[axis]]:
yield __test, y, truth[axis], max_size, axis
def test_to_mono():
def __test(filename, mono):
y, sr = librosa.load(filename, mono=mono)
y_mono = librosa.to_mono(y)
eq_(y_mono.ndim, 1)
eq_(len(y_mono), y.shape[-1])
if mono:
assert np.allclose(y, y_mono)
filename = 'data/test1_22050.wav'
for mono in [False, True]:
yield __test, filename, mono
def test_zero_crossings():
def __test(data, threshold, ref_magnitude, pad, zp):
zc = librosa.zero_crossings(y=data,
threshold=threshold,
ref_magnitude=ref_magnitude,
pad=pad,
zero_pos=zp)
idx = np.flatnonzero(zc)
if pad:
idx = idx[1:]
for i in idx:
assert np.sign(data[i]) != np.sign(data[i-1])
srand()
data = np.random.randn(32)
for threshold in [None, 0, 1e-10]:
for ref_magnitude in [None, 0.1, np.max]:
for pad in [False, True]:
for zero_pos in [False, True]:
yield __test, data, threshold, ref_magnitude, pad, zero_pos
def test_pitch_tuning():
def __test(hz, resolution, bins_per_octave, tuning):
est_tuning = librosa.pitch_tuning(hz,
resolution=resolution,
bins_per_octave=bins_per_octave)
assert np.abs(tuning - est_tuning) <= resolution
for resolution in [1e-2, 1e-3]:
for bins_per_octave in [12]:
# Make up some frequencies
for tuning in [-0.5, -0.375, -0.25, 0.0, 0.25, 0.375]:
note_hz = librosa.midi_to_hz(tuning + np.arange(128))
yield __test, note_hz, resolution, bins_per_octave, tuning
def test_piptrack_properties():
def __test(S, n_fft, hop_length, fmin, fmax, threshold):
pitches, mags = librosa.core.piptrack(S=S,
n_fft=n_fft,
hop_length=hop_length,
fmin=fmin,
fmax=fmax,
threshold=threshold)
# Shape tests
eq_(S.shape, pitches.shape)
eq_(S.shape, mags.shape)
# Make sure all magnitudes are positive
assert np.all(mags >= 0)
# Check the frequency estimates for bins with non-zero magnitude
idx = (mags > 0)
assert np.all(pitches[idx] >= fmin)
assert np.all(pitches[idx] <= fmax)
# And everywhere else, pitch should be 0
assert np.all(pitches[~idx] == 0)
y, sr = librosa.load('data/test1_22050.wav')
for n_fft in [2048, 4096]:
for hop_length in [None, n_fft // 4, n_fft // 2]:
S = np.abs(librosa.stft(y, n_fft=n_fft, hop_length=hop_length))
for fmin in [0, 100]:
for fmax in [4000, 8000, sr // 2]:
for threshold in [0.1, 0.2, 0.5]:
yield __test, S, n_fft, hop_length, fmin, fmax, threshold
def test_piptrack_errors():
def __test(y, sr, S, n_fft, hop_length, fmin, fmax, threshold):
pitches, mags = librosa.piptrack(
y=y, sr=sr, S=S, n_fft=n_fft, hop_length=hop_length, fmin=fmin,
fmax=fmax, threshold=threshold)
S = np.asarray([[1, 0, 0]]).T
np.seterr(divide='raise')
yield __test, None, 22050, S, 4096, None, 150.0, 4000.0, 0.1
def test_piptrack():
def __test(S, freq):
pitches, mags = librosa.piptrack(S=S, fmin=100)
idx = (mags > 0)
assert len(idx) > 0
recovered_pitches = pitches[idx]
# We should be within one cent of the target
assert np.all(np.abs(np.log2(recovered_pitches) - np.log2(freq)) <= 1e-2)
sr = 22050
duration = 3.0
for freq in [110, 220, 440, 880]:
# Generate a sine tone
y = np.sin(2 * np.pi * freq * np.linspace(0, duration, num=duration*sr))
for n_fft in [1024, 2048, 4096]:
# Using left-aligned frames eliminates reflection artifacts at the boundaries
S = np.abs(librosa.stft(y, n_fft=n_fft, center=False))
yield __test, S, freq
def test_estimate_tuning():
def __test(target_hz, resolution, bins_per_octave, tuning):
y = np.sin(2 * np.pi * target_hz * t)
tuning_est = librosa.estimate_tuning(resolution=resolution,
bins_per_octave=bins_per_octave,
y=y,
sr=sr,
n_fft=2048,
fmin=librosa.note_to_hz('C4'),
fmax=librosa.note_to_hz('G#9'))
# Round to the proper number of decimals
deviation = np.around(np.abs(tuning - tuning_est),
int(-np.log10(resolution)))
# We'll accept an answer within three bins of the resolution
assert deviation <= 3 * resolution
for sr in [11025, 22050]:
duration = 5.0
t = np.linspace(0, duration, duration * sr)
for resolution in [1e-2]:
for bins_per_octave in [12]:
# test a null-signal tuning estimate
yield (__test, 0.0, resolution, bins_per_octave, 0.0)
for center_note in [69, 84, 108]:
for tuning in np.linspace(-0.5, 0.5, 8, endpoint=False):
target_hz = librosa.midi_to_hz(center_note + tuning)
yield (__test, np.asscalar(target_hz), resolution,
bins_per_octave, tuning)
def test__spectrogram():
y, sr = librosa.load('data/test1_22050.wav')
def __test(n_fft, hop_length, power):
S = np.abs(librosa.stft(y, n_fft=n_fft, hop_length=hop_length))**power
S_, n_fft_ = librosa.core.spectrum._spectrogram(y=y, S=S, n_fft=n_fft,
hop_length=hop_length,
power=power)
# First check with all parameters
assert np.allclose(S, S_)
assert np.allclose(n_fft, n_fft_)
# Then check with only the audio
S_, n_fft_ = librosa.core.spectrum._spectrogram(y=y, n_fft=n_fft,
hop_length=hop_length,
power=power)
assert np.allclose(S, S_)
assert np.allclose(n_fft, n_fft_)
# And only the spectrogram
S_, n_fft_ = librosa.core.spectrum._spectrogram(S=S, n_fft=n_fft,
hop_length=hop_length,
power=power)
assert np.allclose(S, S_)
assert np.allclose(n_fft, n_fft_)
# And only the spectrogram with no shape parameters
S_, n_fft_ = librosa.core.spectrum._spectrogram(S=S, power=power)
assert np.allclose(S, S_)
assert np.allclose(n_fft, n_fft_)
# And only the spectrogram but with incorrect n_fft
S_, n_fft_ = librosa.core.spectrum._spectrogram(S=S, n_fft=2*n_fft,
power=power)
assert np.allclose(S, S_)
assert np.allclose(n_fft, n_fft_)
for n_fft in [1024, 2048]:
for hop_length in [None, 512]:
for power in [1, 2]:
yield __test, n_fft, hop_length, power
assert librosa.core.spectrum._spectrogram(y)
def test_logamplitude():
# Fake up some data
def __test(x, ref, amin, top_db):
y = librosa.logamplitude(x,
ref=ref,
amin=amin,
top_db=top_db)
assert np.isrealobj(y)
eq_(y.shape, x.shape)
if top_db is not None:
assert y.min() >= y.max()-top_db
for n in [1, 2, 10]:
x = np.linspace(0, 2e5, num=n)
phase = np.exp(1.j * x)
for ref in [1.0, np.max]:
for amin in [-1, 0, 1e-10, 1e3]:
for top_db in [None, -10, 0, 40, 80]:
tf = __test
if amin <= 0 or (top_db is not None and top_db < 0):
tf = raises(librosa.ParameterError)(__test)
yield tf, x, ref, amin, top_db
yield tf, x * phase, ref, amin, top_db
def test_power_to_db_logamp():
srand()
NOISE_FLOOR = 1e-6
# Make some noise
x = np.abs(np.random.randn(1000)) + NOISE_FLOOR
db1 = librosa.power_to_db(x**2, top_db=None)
db2 = librosa.logamplitude(x**2, top_db=None)
assert np.allclose(db1, db2)
def test_power_to_db():
def __test(y_true, x, rp):
y = librosa.power_to_db(x, ref=rp, top_db=None)
assert np.isclose(y, y_true)
for erp in range(-5, 6):
for k in range(-5, 6):
yield __test, (k-erp)*10, 10.0**k, 10.0**erp
def test_amplitude_to_db():
srand()
NOISE_FLOOR = 1e-6
# Make some noise
x = np.abs(np.random.randn(1000)) + NOISE_FLOOR
db1 = librosa.amplitude_to_db(x, top_db=None)
db2 = librosa.logamplitude(x**2, top_db=None)
assert np.allclose(db1, db2)
def test_db_to_power_inv():
srand()
NOISE_FLOOR = 1e-5
# Make some noise
xp = (np.abs(np.random.randn(1000)) + NOISE_FLOOR)**2
def __test(ref):
db = librosa.power_to_db(xp, ref=ref, top_db=None)
xp2 = librosa.db_to_power(db, ref=ref)
assert np.allclose(xp, xp2)
for ref_p in range(-3, 4):
yield __test, 10.0**ref_p
def test_db_to_power():
def __test(y, rp, x_true):
x = librosa.db_to_power(y, ref=rp)
assert np.isclose(x, x_true), (x, x_true, y, rp)
for erp in range(-5, 6):
for db in range(-100, 101, 10):
yield __test, db, 10.0**erp, 10.0**erp * 10.0**(0.1 * db)
def test_db_to_amplitude_inv():
srand()
NOISE_FLOOR = 1e-5
# Make some noise
xp = np.abs(np.random.randn(1000)) + NOISE_FLOOR
def __test(ref):
db = librosa.amplitude_to_db(xp, ref=ref, top_db=None)
xp2 = librosa.db_to_amplitude(db, ref=ref)
assert np.allclose(xp, xp2)
for ref_p in range(-3, 4):
yield __test, 10.0**ref_p
def test_db_to_amplitude():
srand()
NOISE_FLOOR = 1e-6
# Make some noise
x = np.abs(np.random.randn(1000)) + NOISE_FLOOR
db = librosa.amplitude_to_db(x, top_db=None)
x2 = librosa.db_to_amplitude(db)
assert np.allclose(x, x2)
def test_clicks():
def __test(times, frames, sr, hop_length, click_freq, click_duration, click, length):
y = librosa.clicks(times=times,
frames=frames,
sr=sr,
hop_length=hop_length,
click_freq=click_freq,
click_duration=click_duration,
click=click,
length=length)
if times is not None:
nmax = librosa.time_to_samples(times, sr=sr).max()
else:
nmax = librosa.frames_to_samples(frames, hop_length=hop_length).max()
if length is not None:
assert len(y) == length
elif click is not None:
assert len(y) == nmax + len(click)
test_times = np.linspace(0, 10.0, num=5)
# Bad cases
yield raises(librosa.ParameterError)(__test), None, None, 22050, 512, 1000, 0.1, None, None
yield raises(librosa.ParameterError)(__test), test_times, None, 22050, 512, 1000, 0.1, np.ones((2, 10)), None
yield raises(librosa.ParameterError)(__test), test_times, None, 22050, 512, 1000, 0.1, None, 0
yield raises(librosa.ParameterError)(__test), test_times, None, 22050, 512, 0, 0.1, None, None
yield raises(librosa.ParameterError)(__test), test_times, None, 22050, 512, 1000, 0, None, None
for sr in [11025, 22050]:
for hop_length in [512, 1024]:
test_frames = librosa.time_to_frames(test_times, sr=sr, hop_length=hop_length)
for click in [None, np.ones(sr // 10)]:
for length in [None, 5 * sr, 15 * sr]:
yield __test, test_times, None, sr, hop_length, 1000, 0.1, click, length
yield __test, None, test_frames, sr, hop_length, 1000, 0.1, click, length
def test_fmt_scale():
# This test constructs a single-cycle cosine wave, applies various axis scalings,
# and tests that the FMT is preserved
def __test(scale, n_fmt, over_sample, kind, y_orig, y_res, atol):
# Make sure our signals preserve energy
assert np.allclose(np.sum(y_orig**2), np.sum(y_res**2))
# Scale-transform the original
f_orig = librosa.fmt(y_orig,
t_min=0.5,
n_fmt=n_fmt,
over_sample=over_sample,
kind=kind)
# Force to the same length
n_fmt_res = 2 * len(f_orig) - 2
# Scale-transform the new signal to match
f_res = librosa.fmt(y_res,
t_min=scale * 0.5,
n_fmt=n_fmt_res,
over_sample=over_sample,
kind=kind)
# Due to sampling alignment, we'll get some phase deviation here
# The shape of the spectrum should be approximately preserved though.
assert np.allclose(np.abs(f_orig), np.abs(f_res), atol=atol, rtol=1e-7)
# Our test signal is a single-cycle sine wave
def f(x):
freq = 1
return np.sin(2 * np.pi * freq * x)
bounds = [0, 1.0]
num = 2**8
x = np.linspace(bounds[0], bounds[1], num=num, endpoint=False)
y_orig = f(x)
atol = {'slinear': 1e-4, 'quadratic': 1e-5, 'cubic': 1e-6}
for scale in [2, 3./2, 5./4, 9./8]:
# Scale the time axis
x_res = np.linspace(bounds[0], bounds[1], num=scale * num, endpoint=False)
y_res = f(x_res)
# Re-normalize the energy to match that of y_orig
y_res /= np.sqrt(scale)
for kind in ['slinear', 'quadratic', 'cubic']:
for n_fmt in [None, 64, 128, 256, 512]:
for cur_os in [1, 2, 3]:
yield __test, scale, n_fmt, cur_os, kind, y_orig, y_res, atol[kind]
# Over-sampling with down-scaling gets dicey at the end-points
yield __test, 1./scale, n_fmt, 1, kind, y_res, y_orig, atol[kind]
def test_fmt_fail():
@raises(librosa.ParameterError)
def __test(t_min, n_fmt, over_sample, y):
librosa.fmt(y, t_min=t_min, n_fmt=n_fmt, over_sample=over_sample)
srand()
y = np.random.randn(256)
# Test for bad t_min
for t_min in [-1, 0]:
yield __test, t_min, None, 2, y
# Test for bad n_fmt
for n_fmt in [-1, 0, 1, 2]:
yield __test, 1, n_fmt, 2, y
# Test for bad over_sample
for over_sample in [-1, 0, 0.5]:
yield __test, 1, None, over_sample, y
# Test for bad input
y[len(y)//2:] = np.inf
yield __test, 1, None, 2, y
# Test for insufficient samples
yield __test, 1, None, 1, np.ones(2)
def test_fmt_axis():
srand()
y = np.random.randn(32, 32)
f1 = librosa.fmt(y, axis=-1)
f2 = librosa.fmt(y.T, axis=0).T
assert np.allclose(f1, f2)
def test_harmonics_1d():
x = np.arange(16)
y = np.linspace(-8, 8, num=len(x), endpoint=False)**2
h = [0.25, 0.5, 1, 2, 4]
yh = librosa.interp_harmonics(y, x, h)
eq_(yh.shape[1:], y.shape)
eq_(yh.shape[0], len(h))
for i in range(len(h)):
if h[i] <= 1:
# Check that subharmonics match
step = int(1./h[i])
vals = yh[i, ::step]
assert np.allclose(vals, y[:len(vals)])
else:
# Else check that harmonics match
step = h[i]
vals = y[::step]
assert np.allclose(vals, yh[i, :len(vals)])
def test_harmonics_2d():
x = np.arange(16)
y = np.linspace(-8, 8, num=len(x), endpoint=False)**2
y = np.tile(y, (5, 1)).T
h = [0.25, 0.5, 1, 2, 4]
yh = librosa.interp_harmonics(y, x, h, axis=0)
eq_(yh.shape[1:], y.shape)
eq_(yh.shape[0], len(h))
for i in range(len(h)):
if h[i] <= 1:
# Check that subharmonics match
step = int(1./h[i])
vals = yh[i, ::step]
assert np.allclose(vals, y[:len(vals)])
else:
# Else check that harmonics match
step = h[i]
vals = y[::step]
assert np.allclose(vals, yh[i, :len(vals)])
@raises(librosa.ParameterError)
def test_harmonics_badshape_1d():
freqs = np.zeros(100)
obs = np.zeros((5, 10))
librosa.interp_harmonics(obs, freqs, [1])
@raises(librosa.ParameterError)
def test_harmonics_badshape_2d():
freqs = np.zeros((5, 5))
obs = np.zeros((5, 10))
librosa.interp_harmonics(obs, freqs, [1])
def test_harmonics_2d_varying():
x = np.arange(16)
y = np.linspace(-8, 8, num=len(x), endpoint=False)**2
x = np.tile(x, (5, 1)).T
y = np.tile(y, (5, 1)).T
h = [0.25, 0.5, 1, 2, 4]
yh = librosa.interp_harmonics(y, x, h, axis=0)
eq_(yh.shape[1:], y.shape)
eq_(yh.shape[0], len(h))
for i in range(len(h)):
if h[i] <= 1:
# Check that subharmonics match
step = int(1./h[i])
vals = yh[i, ::step]
assert np.allclose(vals, y[:len(vals)])
else:
# Else check that harmonics match
step = h[i]
vals = y[::step]
assert np.allclose(vals, yh[i, :len(vals)])
def test_show_versions():
# Nothing to test here, except that everything passes.
librosa.show_versions()
def test_padding():
# A simple test to verify that pad_mode is used properly by giving
# different answers for different modes.
# Does not validate the correctness of each mode.
y, sr = librosa.load('data/test1_44100.wav', sr=None, mono=True,
duration=1)
def __test_stft(center, pad_mode):
D1 = librosa.stft(y, center=center, pad_mode='reflect')
D2 = librosa.stft(y, center=center, pad_mode=pad_mode)
assert D1.shape == D2.shape
if center and pad_mode != 'reflect':
assert not np.allclose(D1, D2)
else:
assert np.allclose(D1, D2)
def __test_ifgram(center, pad_mode):
D1, F1 = librosa.ifgram(y, center=center, pad_mode='reflect')
D2, F2 = librosa.ifgram(y, center=center, pad_mode=pad_mode)
assert D1.shape == D2.shape
if center and pad_mode != 'reflect':
assert not np.allclose(D1, D2)
else:
assert np.allclose(D1, D2)
assert np.allclose(F1, F2)
def __test_cqt(pad_mode):
D1 = librosa.cqt(y, pad_mode='reflect')
D2 = librosa.cqt(y, pad_mode=pad_mode)
assert D1.shape == D2.shape
if pad_mode != 'reflect':
assert not np.allclose(D1, D2)
else:
assert np.allclose(D1, D2)
def __test_hybrid_cqt(pad_mode):
D1 = librosa.hybrid_cqt(y, pad_mode='reflect')
D2 = librosa.hybrid_cqt(y, pad_mode=pad_mode)
assert D1.shape == D2.shape
if pad_mode != 'reflect':
assert not np.allclose(D1, D2)
else:
assert np.allclose(D1, D2)
def __test_pseudo_cqt(pad_mode):
D1 = librosa.pseudo_cqt(y, pad_mode='reflect')
D2 = librosa.pseudo_cqt(y, pad_mode=pad_mode)
assert D1.shape == D2.shape
if pad_mode != 'reflect':
assert not np.allclose(D1, D2)
else:
assert np.allclose(D1, D2)
for pad_mode in ['reflect', 'constant']:
yield __test_cqt, pad_mode
yield __test_hybrid_cqt, pad_mode
yield __test_pseudo_cqt, pad_mode
for center in [False, True]:
yield __test_stft, center, pad_mode
yield __test_ifgram, center, pad_mode
def test_iirt():
gt = scipy.io.loadmat(os.path.join('data', 'features-CT-cqt'), squeeze_me=True)['f_cqt']
y, sr = librosa.load(os.path.join('data', 'test1_44100.wav'))
mut = librosa.iirt(y, hop_length=2205, win_length=4410)
assert np.allclose(mut, gt[23:108, :mut.shape[1]], atol=1.8)
|
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import six
__all__ = (
'ComponentConfig',
'HelloReturn',
'Accept',
'Deny',
'Challenge',
'HelloDetails',
'SessionDetails',
'CloseDetails',
'SubscribeOptions',
'EventDetails',
'PublishOptions',
'RegisterOptions',
'CallDetails',
'CallOptions',
'CallResult',
)
class ComponentConfig(object):
"""
WAMP application component configuration. An instance of this class is
provided to the constructor of :class:`autobahn.wamp.protocol.ApplicationSession`.
"""
def __init__(self, realm=None, extra=None):
"""
:param realm: The realm the session should join.
:type realm: unicode
:param extra: Optional user-supplied object with extra
configuration. This can be any object you like, and is
accessible in your `ApplicationSession` subclass via
`self.config.extra`. `dict` is a good default choice.
"""
if six.PY2 and type(realm) == str:
realm = six.u(realm)
self.realm = realm
self.extra = extra
def __str__(self):
return "ComponentConfig(realm = {0}, extra = {1})".format(self.realm, self.extra)
class HelloReturn(object):
"""
Base class for ``HELLO`` return information.
"""
class Accept(HelloReturn):
"""
Information to accept a ``HELLO``.
"""
def __init__(self, authid=None, authrole=None, authmethod=None, authprovider=None):
"""
:param authid: The authentication ID the client is assigned, e.g. ``"joe"`` or ``"joe@example.com"``.
:type authid: unicode
:param authrole: The authentication role the client is assigned, e.g. ``"anonymous"``, ``"user"`` or ``"com.myapp.user"``.
:type authrole: unicode
:param authmethod: The authentication method that was used to authenticate the client, e.g. ``"cookie"`` or ``"wampcra"``.
:type authmethod: unicode
:param authprovider: The authentication provider that was used to authenticate the client, e.g. ``"mozilla-persona"``.
:type authprovider: unicode
"""
if six.PY2:
if type(authid) == str:
authid = six.u(authid)
if type(authrole) == str:
authrole = six.u(authrole)
if type(authmethod) == str:
authmethod = six.u(authmethod)
if type(authprovider) == str:
authprovider = six.u(authprovider)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
def __str__(self):
return "Accept(authid = {0}, authrole = {1}, authmethod = {2}, authprovider = {3})".format(self.authid, self.authrole, self.authmethod, self.authprovider)
class Deny(HelloReturn):
"""
Information to deny a ``HELLO``.
"""
def __init__(self, reason=u"wamp.error.not_authorized", message=None):
"""
:param reason: The reason of denying the authentication (an URI, e.g. ``wamp.error.not_authorized``)
:type reason: unicode
:param message: A human readable message (for logging purposes).
:type message: unicode
"""
if six.PY2:
if type(reason) == str:
reason = six.u(reason)
if type(message) == str:
message = six.u(message)
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
self.reason = reason
self.message = message
def __str__(self):
return "Deny(reason = {0}, message = '{1}')".format(self.reason, self.message)
class Challenge(HelloReturn):
"""
Information to challenge the client upon ``HELLO``.
"""
def __init__(self, method, extra=None):
"""
:param method: The authentication method for the challenge (e.g. ``"wampcra"``).
:type method: unicode
:param extra: Any extra information for the authentication challenge. This is
specific to the authentication method.
:type extra: dict
"""
if six.PY2:
if type(method) == str:
method = six.u(method)
self.method = method
self.extra = extra or {}
def __str__(self):
return "Challenge(method = {0}, extra = {1})".format(self.method, self.extra)
class HelloDetails(object):
"""
Provides details of a WAMP session while still attaching.
"""
def __init__(self, roles=None, authmethods=None, authid=None, pending_session=None):
"""
:param roles: The WAMP roles and features supported by the attaching client.
:type roles: dict
:param authmethods: The authentication methods the client is willing to perform.
:type authmethods: list
:param authid: The authentication ID the client wants to authenticate as. Required for WAMP-CRA.
:type authid: str
:param pending_session: The session ID the session will get once successfully attached.
:type pending_session: int
"""
self.roles = roles
self.authmethods = authmethods
self.authid = authid
self.pending_session = pending_session
def __str__(self):
return "HelloDetails(roles = {0}, authmethods = {1}, authid = {2}, pending_session = {3})".format(self.roles, self.authmethods, self.authid, self.pending_session)
class SessionDetails(object):
"""
Provides details for a WAMP session upon open.
.. seealso:: :func:`autobahn.wamp.interfaces.ISession.onJoin`
"""
def __init__(self, realm, session, authid=None, authrole=None, authmethod=None, authprovider=None):
"""
Ctor.
:param realm: The realm this WAMP session is attached to.
:type realm: unicode
:param session: WAMP session ID of this session.
:type session: int
"""
self.realm = realm
self.session = session
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
def __str__(self):
return "SessionDetails(realm = {0}, session = {1}, authid = {2}, authrole = {3}, authmethod = {4})".format(self.realm, self.session, self.authid, self.authrole, self.authmethod)
class CloseDetails(object):
"""
Provides details for a WAMP session upon open.
.. seealso:: :func:`autobahn.wamp.interfaces.ISession.onLeave`
"""
REASON_DEFAULT = u"wamp.close.normal"
REASON_TRANSPORT_LOST = u"wamp.close.transport_lost"
def __init__(self, reason=None, message=None):
"""
:param reason: The close reason (an URI, e.g. ``wamp.close.normal``)
:type reason: unicode
:param message: Closing log message.
:type message: unicode
"""
self.reason = reason
self.message = message
def __str__(self):
return "CloseDetails(reason = {0}, message = '{1}'')".format(self.reason, self.message)
class SubscribeOptions(object):
"""
Used to provide options for subscribing in
:func:`autobahn.wamp.interfaces.ISubscriber.subscribe`.
"""
def __init__(self, match=None, details_arg=None):
"""
:param match: The topic matching method to be used for the subscription.
:type match: unicode
:param details_arg: When invoking the handler, provide event details
in this keyword argument to the callable.
:type details_arg: str
"""
assert(match is None or (type(match) == six.text_type and match in [u'exact', u'prefix', u'wildcard']))
assert(details_arg is None or type(details_arg) == str)
self.match = match
self.details_arg = details_arg
def message_attr(self):
# options dict as sent within WAMP message
return {
'match': self.match
}
def __str__(self):
return "SubscribeOptions(match = {0}, details_arg = {1})".format(self.match, self.details_arg)
class EventDetails(object):
"""
Provides details on an event when calling an event handler
previously registered.
"""
def __init__(self, publication, publisher=None, topic=None):
"""
Ctor.
:param publication: The publication ID of the event (always present).
:type publication: int
:param publisher: The WAMP session ID of the original publisher of this event.
:type publisher: int
:param topic: For pattern-based subscriptions, the actual topic URI being published to.
:type topic1: unicode or None
"""
self.publication = publication
self.publisher = publisher
self.topic = topic
def __str__(self):
return "EventDetails(publication = {0}, publisher = {1}, topic = {2})".format(self.publication, self.publisher, self.topic)
class PublishOptions(object):
"""
Used to provide options for subscribing in
:func:`autobahn.wamp.interfaces.IPublisher.publish`.
"""
def __init__(self,
acknowledge=None,
exclude_me=None,
exclude=None,
eligible=None,
disclose_me=None):
"""
:param acknowledge: If ``True``, acknowledge the publication with a success or
error response.
:type acknowledge: bool
:param exclude_me: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type exclude_me: bool
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int
:param disclose_me: If ``True``, request to disclose the publisher of this event
to subscribers.
:type disclose_me: bool
"""
# filter out None entries from exclude list, so it's easier for callers
if type(exclude) == list:
exclude = [x for x in exclude if x is not None]
assert(acknowledge is None or type(acknowledge) == bool)
assert(exclude_me is None or type(exclude_me) == bool)
assert(exclude is None or (type(exclude) == list and all(type(x) in six.integer_types for x in exclude)))
assert(eligible is None or (type(eligible) == list and all(type(x) in six.integer_types for x in eligible)))
assert(disclose_me is None or type(disclose_me) == bool)
self.acknowledge = acknowledge
self.exclude_me = exclude_me
self.exclude = exclude
self.eligible = eligible
self.disclose_me = disclose_me
def message_attr(self):
# options dict as sent within WAMP message
return {
u'acknowledge': self.acknowledge,
u'exclude_me': self.exclude_me,
u'exclude': self.exclude,
u'eligible': self.eligible,
u'disclose_me': self.disclose_me
}
def __str__(self):
return "PublishOptions(acknowledge = {0}, exclude_me = {1}, exclude = {2}, eligible = {3}, disclose_me = {4})".format(self.acknowledge, self.exclude_me, self.exclude, self.eligible, self.disclose_me)
class RegisterOptions(object):
"""
Used to provide options for registering in
:func:`autobahn.wamp.interfaces.ICallee.register`.
"""
def __init__(self, match=None, invoke=None, details_arg=None):
"""
:param details_arg: When invoking the endpoint, provide call details
in this keyword argument to the callable.
:type details_arg: str
"""
assert(match is None or (type(match) == six.text_type and match in [u'exact', u'prefix', u'wildcard']))
assert(invoke is None or (type(invoke) == six.text_type and invoke in [u'single', u'first', u'last', u'roundrobin', u'random']))
assert(details_arg is None or type(details_arg) == str)
self.match = match
self.invoke = invoke
self.details_arg = details_arg
def message_attr(self):
# options dict as sent within WAMP message
return {
u'match': self.match,
u'invoke': self.invoke
}
def __str__(self):
return "RegisterOptions(match = {0}, invoke = {1}, details_arg = {2})".format(self.match, self.invoke, self.details_arg)
class CallDetails(object):
"""
Provides details on a call when an endpoint previously
registered is being called and opted to receive call details.
"""
def __init__(self, progress=None, caller=None, procedure=None):
"""
Ctor.
:param progress: A callable that will receive progressive call results.
:type progress: callable
:param caller: The WAMP session ID of the caller, if the latter is disclosed.
:type caller: int
:param procedure: For pattern-based registrations, the actual procedure URI being called.
:type procedure: unicode or None
"""
self.progress = progress
self.caller = caller
self.procedure = procedure
def __str__(self):
return "CallDetails(progress = {0}, caller = {1}, procedure = {2})".format(self.progress, self.caller, self.procedure)
class CallOptions(object):
"""
Used to provide options for calling with :func:`autobahn.wamp.interfaces.ICaller.call`.
"""
def __init__(self,
on_progress=None,
timeout=None,
disclose_me=None):
"""
:param on_progress: A callback that will be called when the remote endpoint
called yields interim call progress results.
:type on_progress: callable
:param timeout: Time in seconds after which the call should be automatically canceled.
:type timeout: float
:param disclose_me: Request to disclose the identity of the caller (it's WAMP session ID)
to Callees. Note that a Dealer, depending on Dealer configuration, might
reject the request, or might disclose the Callee's identity without
a request to do so.
:type disclose_me: bool
"""
assert(on_progress is None or callable(on_progress))
assert(timeout is None or (type(timeout) in list(six.integer_types) + [float] and timeout > 0))
assert(disclose_me is None or type(disclose_me) == bool)
self.on_progress = on_progress
self.timeout = timeout
self.disclose_me = disclose_me
def message_attr(self):
# options dict as sent within WAMP message
res = {
u'timeout': self.timeout,
u'disclose_me': self.disclose_me
}
if self.on_progress:
res['receive_progress'] = True
return res
def __str__(self):
return "CallOptions(on_progress = {0}, timeout = {1}, disclose_me = {2})".format(self.on_progress, self.timeout, self.disclose_me)
class CallResult(object):
"""
Wrapper for remote procedure call results that contain multiple positional
return values or keyword return values.
"""
def __init__(self, *results, **kwresults):
"""
Constructor.
:param results: The positional result values.
:type results: list
:param kwresults: The keyword result values.
:type kwresults: dict
"""
self.results = results
self.kwresults = kwresults
def __str__(self):
return "CallResult(results = {0}, kwresults = {1})".format(self.results, self.kwresults)
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on explorations.
All functions here should be agnostic of how ExplorationModel objects are
stored in the database. In particular, the various query methods should
delegate to the Exploration model class. This will enable the exploration
storage model to be changed without affecting this module and others above it.
"""
__author__ = 'Sean Lip'
import copy
import datetime
import logging
import os
import StringIO
import zipfile
from core.domain import event_services
from core.domain import exp_domain
from core.domain import fs_domain
from core.domain import rights_manager
from core.platform import models
import feconf
memcache_services = models.Registry.import_memcache_services()
search_services = models.Registry.import_search_services()
taskqueue_services = models.Registry.import_taskqueue_services()
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
import utils
# This takes additional 'title' and 'category' parameters.
CMD_CREATE_NEW = 'create_new'
#Name for the exploration search index
SEARCH_INDEX_EXPLORATIONS = 'explorations'
def _migrate_states_schema(versioned_exploration_states):
"""Holds the responsibility of performing a step-by-step, sequential update
of an exploration states structure based on the schema version of the input
exploration dictionary. This is very similar to the YAML conversion process
found in exp_domain.py and, in fact, many of the conversion functions for
states are also used in the YAML conversion pipeline. If the current
exploration states schema version changes
(feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION), a new conversion
function must be added and some code appended to this function to account
for that new version.
Args:
versioned_exploration_states: A dict with two keys:
- states_schema_version: the states schema version for the
exploration.
- states: the dict of states comprising the exploration. The keys in
this dict are state names.
"""
exploration_states_schema_version = versioned_exploration_states[
'states_schema_version']
if (exploration_states_schema_version is None
or exploration_states_schema_version < 1):
exploration_states_schema_version = 0
if not (0 <= exploration_states_schema_version
<= feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d and unversioned exploration '
'state schemas at present.' %
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
# Check for conversion to v1.
if exploration_states_schema_version == 0:
exp_domain.Exploration.update_states_v0_to_v1_from_model(
versioned_exploration_states)
exploration_states_schema_version = 1
# Check for conversion to v2.
if exploration_states_schema_version == 1:
exp_domain.Exploration.update_states_v1_to_v2_from_model(
versioned_exploration_states)
exploration_states_schema_version = 2
# Check for conversion to v3.
if exploration_states_schema_version == 2:
exp_domain.Exploration.update_states_v2_to_v3_from_model(
versioned_exploration_states)
exploration_states_schema_version = 3
# Check for conversion to v4.
if exploration_states_schema_version == 3:
exp_domain.Exploration.update_states_v3_to_v4_from_model(
versioned_exploration_states)
exploration_states_schema_version = 4
# Check for conversion to v5.
if exploration_states_schema_version == 4:
exp_domain.Exploration.update_states_v4_to_v5_from_model(
versioned_exploration_states)
exploration_states_schema_version = 5
# Repository GET methods.
def _get_exploration_memcache_key(exploration_id, version=None):
"""Returns a memcache key for an exploration."""
if version:
return 'exploration-version:%s:%s' % (exploration_id, version)
else:
return 'exploration:%s' % exploration_id
def get_exploration_from_model(exploration_model, run_conversion=True):
"""Returns an Exploration domain object given an exploration model loaded
from the datastore.
If run_conversion is True, then the exploration's states schema version
will be checked against the current states schema version. If they do not
match, the exploration will be automatically updated to the latest states
schema version.
IMPORTANT NOTE TO DEVELOPERS: In general, run_conversion should never be
False. This option is only used for testing that the states schema version
migration works correctly, and it should never be changed otherwise.
"""
# Ensure the original exploration model does not get altered.
versioned_exploration_states = {
'states_schema_version': exploration_model.states_schema_version,
'states': copy.deepcopy(exploration_model.states)
}
# If the exploration uses the latest states schema version, no conversion
# is necessary.
if (run_conversion and exploration_model.states_schema_version !=
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION):
_migrate_states_schema(versioned_exploration_states)
return exp_domain.Exploration(
exploration_model.id, exploration_model.title,
exploration_model.category, exploration_model.objective,
exploration_model.language_code, exploration_model.tags,
exploration_model.blurb, exploration_model.author_notes,
exploration_model.default_skin, exploration_model.skin_customizations,
versioned_exploration_states['states_schema_version'],
exploration_model.init_state_name,
versioned_exploration_states['states'],
exploration_model.param_specs, exploration_model.param_changes,
exploration_model.version, exploration_model.created_on,
exploration_model.last_updated)
def get_exploration_summary_from_model(exp_summary_model):
return exp_domain.ExplorationSummary(
exp_summary_model.id, exp_summary_model.title,
exp_summary_model.category, exp_summary_model.objective,
exp_summary_model.language_code, exp_summary_model.tags,
exp_summary_model.ratings, exp_summary_model.status,
exp_summary_model.community_owned, exp_summary_model.owner_ids,
exp_summary_model.editor_ids, exp_summary_model.viewer_ids,
exp_summary_model.version,
exp_summary_model.exploration_model_created_on,
exp_summary_model.exploration_model_last_updated
)
def get_exploration_by_id(exploration_id, strict=True, version=None):
"""Returns a domain object representing an exploration."""
exploration_memcache_key = _get_exploration_memcache_key(
exploration_id, version=version)
memcached_exploration = memcache_services.get_multi(
[exploration_memcache_key]).get(exploration_memcache_key)
if memcached_exploration is not None:
return memcached_exploration
else:
exploration_model = exp_models.ExplorationModel.get(
exploration_id, strict=strict, version=version)
if exploration_model:
exploration = get_exploration_from_model(exploration_model)
memcache_services.set_multi({
exploration_memcache_key: exploration})
return exploration
else:
return None
def get_exploration_summary_by_id(exploration_id):
"""Returns a domain object representing an exploration summary."""
# TODO(msl): Maybe use memcache similarly to get_exploration_by_id.
exp_summary_model = exp_models.ExpSummaryModel.get(
exploration_id)
if exp_summary_model:
exp_summary = get_exploration_summary_from_model(exp_summary_model)
return exp_summary
else:
return None
def get_multiple_explorations_by_id(exp_ids, strict=True):
"""Returns a dict of domain objects representing explorations with the
given ids as keys. If an exp_id is not present it is not included in the
return dict.
"""
exp_ids = set(exp_ids)
result = {}
uncached = []
memcache_keys = [_get_exploration_memcache_key(i) for i in exp_ids]
cache_result = memcache_services.get_multi(memcache_keys)
for exp_obj in cache_result.itervalues():
result[exp_obj.id] = exp_obj
for _id in exp_ids:
if _id not in result:
uncached.append(_id)
db_exp_models = exp_models.ExplorationModel.get_multi(uncached)
db_results_dict = {}
not_found = []
for i, eid in enumerate(uncached):
model = db_exp_models[i]
if model:
exploration = get_exploration_from_model(model)
db_results_dict[eid] = exploration
else:
logging.info('Tried to fetch exploration with id %s, but no such '
'exploration exists in the datastore' % eid)
not_found.append(eid)
if strict and not_found:
raise ValueError(
'Couldn\'t find explorations with the following ids:\n%s'
% '\n'.join(not_found))
cache_update = {
eid: db_results_dict[eid] for eid in db_results_dict.iterkeys()
if db_results_dict[eid] is not None
}
if cache_update:
memcache_services.set_multi(cache_update)
result.update(db_results_dict)
return result
def get_new_exploration_id():
"""Returns a new exploration id."""
return exp_models.ExplorationModel.get_new_id('')
def is_exp_summary_editable(exp_summary, user_id=None):
"""Checks if a given user may edit an exploration by checking
the given domain object."""
return user_id is not None and (
user_id in exp_summary.editor_ids
or user_id in exp_summary.owner_ids
or exp_summary.community_owned)
# Query methods.
def get_exploration_titles_and_categories(exp_ids):
"""Returns exploration titles and categories for the given ids.
The result is a dict with exploration ids as keys. The corresponding values
are dicts with the keys 'title' and 'category'.
Any invalid exp_ids will not be included in the return dict. No error will
be raised.
"""
explorations = [
(get_exploration_from_model(e) if e else None)
for e in exp_models.ExplorationModel.get_multi(exp_ids)]
result = {}
for ind, exploration in enumerate(explorations):
if exploration is None:
logging.error(
'Could not find exploration corresponding to id')
else:
result[exploration.id] = {
'title': exploration.title,
'category': exploration.category,
}
return result
def _get_exploration_summary_dicts_from_models(exp_summary_models):
"""Given an iterable of ExpSummaryModel instances, create a dict containing
corresponding exploration summary domain objects, keyed by id."""
exploration_summaries = [
get_exploration_summary_from_model(exp_summary_model)
for exp_summary_model in exp_summary_models]
result = {}
for exp_summary in exploration_summaries:
result[exp_summary.id] = exp_summary
return result
def get_exploration_summaries_matching_ids(exp_ids):
"""Given a list of exploration ids, return a list with the corresponding
summary domain objects (or None if the corresponding summary does not
exist).
"""
return [
(get_exploration_summary_from_model(model) if model else None)
for model in exp_models.ExpSummaryModel.get_multi(exp_ids)]
def get_exploration_summaries_matching_query(query_string, cursor=None):
"""Returns a list with all exploration summary domain objects matching the
given search query string, as well as a search cursor for future fetches.
This method returns exactly feconf.GALLERY_PAGE_SIZE results if there are
at least that many, otherwise it returns all remaining results. (If this
behaviour does not occur, an error will be logged.) The method also returns
a search cursor.
"""
MAX_ITERATIONS = 10
summary_models = []
search_cursor = cursor
for i in range(MAX_ITERATIONS):
remaining_to_fetch = feconf.GALLERY_PAGE_SIZE - len(summary_models)
exp_ids, search_cursor = search_explorations(
query_string, remaining_to_fetch, cursor=search_cursor)
invalid_exp_ids = []
for ind, model in enumerate(
exp_models.ExpSummaryModel.get_multi(exp_ids)):
if model is not None:
summary_models.append(model)
else:
invalid_exp_ids.append(exp_ids[ind])
if len(summary_models) == feconf.GALLERY_PAGE_SIZE or (
search_cursor is None):
break
else:
logging.error(
'Search index contains stale exploration ids: %s' %
', '.join(invalid_exp_ids))
if (len(summary_models) < feconf.GALLERY_PAGE_SIZE
and search_cursor is not None):
logging.error(
'Could not fulfill search request for query string %s; at least '
'%s retries were needed.' % (query_string, MAX_ITERATIONS))
return ([
get_exploration_summary_from_model(summary_model)
for summary_model in summary_models
], search_cursor)
def get_non_private_exploration_summaries():
"""Returns a dict with all non-private exploration summary domain objects,
keyed by their id."""
return _get_exploration_summary_dicts_from_models(
exp_models.ExpSummaryModel.get_non_private())
def get_all_exploration_summaries():
"""Returns a dict with all exploration summary domain objects,
keyed by their id."""
return _get_exploration_summary_dicts_from_models(
exp_models.ExpSummaryModel.get_all())
def get_private_at_least_viewable_exploration_summaries(user_id):
"""Returns a dict with all exploration summary domain objects that are
at least viewable by given user. The dict is keyed by exploration id."""
return _get_exploration_summary_dicts_from_models(
exp_models.ExpSummaryModel.get_private_at_least_viewable(
user_id=user_id))
def get_at_least_editable_exploration_summaries(user_id):
"""Returns a dict with all exploration summary domain objects that are
at least editable by given user. The dict is keyed by exploration id."""
return _get_exploration_summary_dicts_from_models(
exp_models.ExpSummaryModel.get_at_least_editable(
user_id=user_id))
def count_explorations():
"""Returns the total number of explorations."""
return exp_models.ExplorationModel.get_exploration_count()
# Methods for exporting states and explorations to other formats.
def export_to_zip_file(exploration_id, version=None):
"""Returns a ZIP archive of the exploration."""
exploration = get_exploration_by_id(exploration_id, version=version)
yaml_repr = exploration.to_yaml()
o = StringIO.StringIO()
with zipfile.ZipFile(o, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
zf.writestr('%s.yaml' % exploration.title, yaml_repr)
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
dir_list = fs.listdir('')
for filepath in dir_list:
# Currently, the version number of all files is 1, since they are
# not modifiable post-upload.
# TODO(sll): When allowing editing of files, implement versioning
# for them.
file_contents = fs.get(filepath, version=1)
str_filepath = 'assets/%s' % filepath
assert isinstance(str_filepath, str)
unicode_filepath = str_filepath.decode('utf-8')
zf.writestr(unicode_filepath, file_contents)
return o.getvalue()
def export_states_to_yaml(exploration_id, version=None, width=80):
"""Returns a python dictionary of the exploration, whose keys are state
names and values are yaml strings representing the state contents with
lines wrapped at 'width' characters."""
exploration = get_exploration_by_id(exploration_id, version=version)
exploration_dict = {}
for state in exploration.states:
exploration_dict[state] = utils.yaml_from_dict(
exploration.states[state].to_dict(), width=width)
return exploration_dict
# Repository SAVE and DELETE methods.
def apply_change_list(exploration_id, change_list):
"""Applies a changelist to a pristine exploration and returns the result.
Each entry in change_list is a dict that represents an ExplorationChange
object.
Returns:
the resulting exploration domain object.
"""
exploration = get_exploration_by_id(exploration_id)
try:
changes = [exp_domain.ExplorationChange(change_dict)
for change_dict in change_list]
for change in changes:
if change.cmd == exp_domain.CMD_ADD_STATE:
exploration.add_states([change.state_name])
elif change.cmd == exp_domain.CMD_RENAME_STATE:
exploration.rename_state(
change.old_state_name, change.new_state_name)
elif change.cmd == exp_domain.CMD_DELETE_STATE:
exploration.delete_state(change.state_name)
elif change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY:
state = exploration.states[change.state_name]
if (change.property_name ==
exp_domain.STATE_PROPERTY_PARAM_CHANGES):
state.update_param_changes(change.new_value)
elif change.property_name == exp_domain.STATE_PROPERTY_CONTENT:
state.update_content(change.new_value)
elif (change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_ID):
state.update_interaction_id(change.new_value)
elif (change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS):
state.update_interaction_customization_args(
change.new_value)
elif (change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_HANDLERS):
raise utils.InvalidInputException(
'Editing interaction handlers is no longer supported')
elif (change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS):
state.update_interaction_answer_groups(change.new_value)
elif (change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME):
state.update_interaction_default_outcome(change.new_value)
elif change.cmd == exp_domain.CMD_EDIT_EXPLORATION_PROPERTY:
if change.property_name == 'title':
exploration.update_title(change.new_value)
elif change.property_name == 'category':
exploration.update_category(change.new_value)
elif change.property_name == 'objective':
exploration.update_objective(change.new_value)
elif change.property_name == 'language_code':
exploration.update_language_code(change.new_value)
elif change.property_name == 'tags':
exploration.update_tags(change.new_value)
elif change.property_name == 'blurb':
exploration.update_blurb(change.new_value)
elif change.property_name == 'author_notes':
exploration.update_author_notes(change.new_value)
elif change.property_name == 'param_specs':
exploration.update_param_specs(change.new_value)
elif change.property_name == 'param_changes':
exploration.update_param_changes(change.new_value)
elif change.property_name == 'default_skin_id':
exploration.update_default_skin_id(change.new_value)
elif change.property_name == 'init_state_name':
exploration.update_init_state_name(change.new_value)
elif (change.cmd ==
exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION):
# Loading the exploration model from the datastore into an
# Eploration domain object automatically converts it to use the
# latest states schema version. As a result, simply resaving the
# exploration is sufficient to apply the states schema update.
continue
return exploration
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, exploration_id, change_list)
)
raise
def get_summary_of_change_list(base_exploration, change_list):
"""Applies a changelist to a pristine exploration and returns a summary.
Each entry in change_list is a dict that represents an ExplorationChange
object.
Returns:
a dict with five keys:
exploration_property_changes: a dict, where each key is a property_name
of the exploration, and the corresponding values are dicts with keys
old_value and new_value.
state_property_changes: a dict, where each key is a state name, and the
corresponding values are dicts; the keys of these dicts represent
properties of the state, and the corresponding values are dicts with
keys old_value and new_value. If a state name is changed, this is
listed as a property name change under the old state name in the
outer dict.
changed_states: a list of state names. This indicates that the state
has changed but we do not know what the changes are. This can happen
for complicated operations like removing a state and later adding a
new state with the same name as the removed state.
added_states: a list of added state names.
deleted_states: a list of deleted state names.
"""
# TODO(sll): This really needs tests, especially the diff logic. Probably
# worth comparing with the actual changed exploration.
# Ensure that the original exploration does not get altered.
exploration = copy.deepcopy(base_exploration)
changes = [
exp_domain.ExplorationChange(change_dict)
for change_dict in change_list]
exploration_property_changes = {}
state_property_changes = {}
changed_states = []
added_states = []
deleted_states = []
original_state_names = {
state_name: state_name for state_name in exploration.states.keys()
}
for change in changes:
if change.cmd == exp_domain.CMD_ADD_STATE:
if change.state_name in changed_states:
continue
elif change.state_name in deleted_states:
changed_states.append(change.state_name)
del state_property_changes[change.state_name]
deleted_states.remove(change.state_name)
else:
added_states.append(change.state_name)
original_state_names[change.state_name] = change.state_name
elif change.cmd == exp_domain.CMD_RENAME_STATE:
orig_state_name = original_state_names[change.old_state_name]
original_state_names[change.new_state_name] = orig_state_name
if orig_state_name in changed_states:
continue
if orig_state_name not in state_property_changes:
state_property_changes[orig_state_name] = {}
if 'name' not in state_property_changes[orig_state_name]:
state_property_changes[orig_state_name]['name'] = {
'old_value': change.old_state_name
}
state_property_changes[orig_state_name]['name']['new_value'] = (
change.new_state_name)
elif change.cmd == exp_domain.CMD_DELETE_STATE:
orig_state_name = original_state_names[change.state_name]
if orig_state_name in changed_states:
continue
elif orig_state_name in added_states:
added_states.remove(orig_state_name)
else:
deleted_states.append(orig_state_name)
elif change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY:
orig_state_name = original_state_names[change.state_name]
if orig_state_name in changed_states:
continue
property_name = change.property_name
if orig_state_name not in state_property_changes:
state_property_changes[orig_state_name] = {}
if property_name not in state_property_changes[orig_state_name]:
state_property_changes[orig_state_name][property_name] = {
'old_value': change.old_value
}
state_property_changes[orig_state_name][property_name][
'new_value'] = change.new_value
elif change.cmd == exp_domain.CMD_EDIT_EXPLORATION_PROPERTY:
property_name = change.property_name
if property_name not in exploration_property_changes:
exploration_property_changes[property_name] = {
'old_value': change.old_value
}
exploration_property_changes[property_name]['new_value'] = (
change.new_value)
elif (change.cmd ==
exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION):
continue
unchanged_exploration_properties = []
for property_name in exploration_property_changes:
if (exploration_property_changes[property_name]['old_value'] ==
exploration_property_changes[property_name]['new_value']):
unchanged_exploration_properties.append(property_name)
for property_name in unchanged_exploration_properties:
del exploration_property_changes[property_name]
unchanged_state_names = []
for state_name in state_property_changes:
unchanged_state_properties = []
changes = state_property_changes[state_name]
for property_name in changes:
if (changes[property_name]['old_value'] ==
changes[property_name]['new_value']):
unchanged_state_properties.append(property_name)
for property_name in unchanged_state_properties:
del changes[property_name]
if len(changes) == 0:
unchanged_state_names.append(state_name)
for state_name in unchanged_state_names:
del state_property_changes[state_name]
return {
'exploration_property_changes': exploration_property_changes,
'state_property_changes': state_property_changes,
'changed_states': changed_states,
'added_states': added_states,
'deleted_states': deleted_states,
}
def _save_exploration(
committer_id, exploration, commit_message, change_list):
"""Validates an exploration and commits it to persistent storage.
If successful, increments the version number of the incoming exploration
domain object by 1.
"""
if change_list is None:
change_list = []
exploration_rights = rights_manager.get_exploration_rights(exploration.id)
if exploration_rights.status != rights_manager.EXPLORATION_STATUS_PRIVATE:
exploration.validate(strict=True)
else:
exploration.validate()
exploration_model = exp_models.ExplorationModel.get(
exploration.id, strict=False)
if exploration_model is None:
exploration_model = exp_models.ExplorationModel(id=exploration.id)
else:
if exploration.version > exploration_model.version:
raise Exception(
'Unexpected error: trying to update version %s of exploration '
'from version %s. Please reload the page and try again.'
% (exploration_model.version, exploration.version))
elif exploration.version < exploration_model.version:
raise Exception(
'Trying to update version %s of exploration from version %s, '
'which is too old. Please reload the page and try again.'
% (exploration_model.version, exploration.version))
exploration_model.category = exploration.category
exploration_model.title = exploration.title
exploration_model.objective = exploration.objective
exploration_model.language_code = exploration.language_code
exploration_model.tags = exploration.tags
exploration_model.blurb = exploration.blurb
exploration_model.author_notes = exploration.author_notes
exploration_model.default_skin = exploration.default_skin
exploration_model.skin_customizations = (
exploration.skin_instance.to_dict()['skin_customizations'])
exploration_model.states_schema_version = exploration.states_schema_version
exploration_model.init_state_name = exploration.init_state_name
exploration_model.states = {
state_name: state.to_dict()
for (state_name, state) in exploration.states.iteritems()}
exploration_model.param_specs = exploration.param_specs_dict
exploration_model.param_changes = exploration.param_change_dicts
exploration_model.commit(
committer_id, commit_message, change_list)
memcache_services.delete(_get_exploration_memcache_key(exploration.id))
event_services.ExplorationContentChangeEventHandler.record(exploration.id)
index_explorations_given_ids([exploration.id])
exploration.version += 1
def _create_exploration(
committer_id, exploration, commit_message, commit_cmds):
"""Ensures that rights for a new exploration are saved first.
This is because _save_exploration() depends on the rights object being
present to tell it whether to do strict validation or not.
"""
# This line is needed because otherwise a rights object will be created,
# but the creation of an exploration object will fail.
exploration.validate()
rights_manager.create_new_exploration_rights(exploration.id, committer_id)
model = exp_models.ExplorationModel(
id=exploration.id,
category=exploration.category,
title=exploration.title,
objective=exploration.objective,
language_code=exploration.language_code,
tags=exploration.tags,
blurb=exploration.blurb,
author_notes=exploration.author_notes,
default_skin=exploration.default_skin,
skin_customizations=exploration.skin_instance.to_dict(
)['skin_customizations'],
states_schema_version=exploration.states_schema_version,
init_state_name=exploration.init_state_name,
states={
state_name: state.to_dict()
for (state_name, state) in exploration.states.iteritems()},
param_specs=exploration.param_specs_dict,
param_changes=exploration.param_change_dicts,
)
model.commit(committer_id, commit_message, commit_cmds)
event_services.ExplorationContentChangeEventHandler.record(exploration.id)
exploration.version += 1
create_exploration_summary(exploration.id)
def save_new_exploration(committer_id, exploration):
commit_message = (
'New exploration created with title \'%s\'.' % exploration.title)
_create_exploration(committer_id, exploration, commit_message, [{
'cmd': CMD_CREATE_NEW,
'title': exploration.title,
'category': exploration.category,
}])
def delete_exploration(committer_id, exploration_id, force_deletion=False):
"""Deletes the exploration with the given exploration_id.
IMPORTANT: Callers of this function should ensure that committer_id has
permissions to delete this exploration, prior to calling this function.
If force_deletion is True the exploration and its history are fully deleted
and are unrecoverable. Otherwise, the exploration and all its history are
marked as deleted, but the corresponding models are still retained in the
datastore. This last option is the preferred one.
"""
# TODO(sll): Delete the files too?
exploration_rights_model = exp_models.ExplorationRightsModel.get(
exploration_id)
exploration_rights_model.delete(
committer_id, '', force_deletion=force_deletion)
exploration_model = exp_models.ExplorationModel.get(exploration_id)
exploration_model.delete(
committer_id, feconf.COMMIT_MESSAGE_EXPLORATION_DELETED,
force_deletion=force_deletion)
# This must come after the exploration is retrieved. Otherwise the memcache
# key will be reinstated.
exploration_memcache_key = _get_exploration_memcache_key(exploration_id)
memcache_services.delete(exploration_memcache_key)
#delete the exploration from search.
delete_documents_from_search_index([exploration_id])
# delete summary of exploration
delete_exploration_summary(exploration_id, force_deletion=force_deletion)
# Operations on exploration snapshots.
def _get_simple_changelist_summary(
exploration_id, version_number, change_list):
"""Returns an auto-generated changelist summary for the history logs."""
# TODO(sll): Get this from memcache where possible. It won't change, so we
# can keep it there indefinitely.
base_exploration = get_exploration_by_id(
exploration_id, version=version_number)
if (len(change_list) == 1 and change_list[0]['cmd'] in
['create_new', 'AUTO_revert_version_number']):
# An automatic summary is not needed here, because the original commit
# message is sufficiently descriptive.
return ''
else:
full_summary = get_summary_of_change_list(
base_exploration, change_list)
short_summary_fragments = []
if full_summary['added_states']:
short_summary_fragments.append(
'added \'%s\'' % '\', \''.join(full_summary['added_states']))
if full_summary['deleted_states']:
short_summary_fragments.append(
'deleted \'%s\'' % '\', \''.join(
full_summary['deleted_states']))
if (full_summary['changed_states'] or
full_summary['state_property_changes']):
affected_states = (
full_summary['changed_states'] +
full_summary['state_property_changes'].keys())
short_summary_fragments.append(
'edited \'%s\'' % '\', \''.join(affected_states))
if full_summary['exploration_property_changes']:
short_summary_fragments.append(
'edited exploration properties %s' % ', '.join(
full_summary['exploration_property_changes'].keys()))
return '; '.join(short_summary_fragments)
def get_exploration_snapshots_metadata(exploration_id):
"""Returns the snapshots for this exploration, as dicts.
Args:
exploration_id: str. The id of the exploration in question.
Returns:
list of dicts, each representing a recent snapshot. Each dict has the
following keys: committer_id, commit_message, commit_cmds, commit_type,
created_on_ms, version_number. The version numbers are consecutive and
in ascending order. There are exploration.version_number items in the
returned list.
"""
exploration = get_exploration_by_id(exploration_id)
current_version = exploration.version
version_nums = range(1, current_version + 1)
return exp_models.ExplorationModel.get_snapshots_metadata(
exploration_id, version_nums)
def update_exploration(
committer_id, exploration_id, change_list, commit_message):
"""Update an exploration. Commits changes.
Args:
- committer_id: str. The id of the user who is performing the update
action.
- exploration_id: str. The exploration id.
- change_list: list of dicts, each representing a _Change object. These
changes are applied in sequence to produce the resulting exploration.
- commit_message: str or None. A description of changes made to the state.
For published explorations, this must be present; for unpublished
explorations, it should be equal to None.
"""
is_public = rights_manager.is_exploration_public(exploration_id)
if is_public and not commit_message:
raise ValueError(
'Exploration is public so expected a commit message but '
'received none.')
exploration = apply_change_list(exploration_id, change_list)
_save_exploration(committer_id, exploration, commit_message, change_list)
# update summary of changed exploration
update_exploration_summary(exploration.id)
def create_exploration_summary(exploration_id):
"""Create summary of an exploration and store in datastore."""
exploration = get_exploration_by_id(exploration_id)
exp_summary = get_summary_of_exploration(exploration)
save_exploration_summary(exp_summary)
def update_exploration_summary(exploration_id):
"""Update the summary of an exploration."""
exploration = get_exploration_by_id(exploration_id)
exp_summary = get_summary_of_exploration(exploration)
save_exploration_summary(exp_summary)
def get_summary_of_exploration(exploration):
"""Create ExplorationSummary domain object for a given Exploration
domain object and return it.
"""
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exploration.id)
exp_summary_model = exp_models.ExpSummaryModel.get_by_id(exploration.id)
if exp_summary_model:
old_exp_summary = get_exploration_summary_from_model(exp_summary_model)
ratings = old_exp_summary.ratings or feconf.get_empty_ratings()
else:
ratings = feconf.get_empty_ratings()
exploration_model_last_updated = exploration.last_updated
exploration_model_created_on = exploration.created_on
exp_summary = exp_domain.ExplorationSummary(
exploration.id, exploration.title, exploration.category,
exploration.objective, exploration.language_code,
exploration.tags, ratings, exp_rights.status,
exp_rights.community_owned, exp_rights.owner_ids,
exp_rights.editor_ids, exp_rights.viewer_ids, exploration.version,
exploration_model_created_on, exploration_model_last_updated
)
return exp_summary
def save_exploration_summary(exp_summary):
"""Save exploration summary domain object as ExpSummaryModel
entity in datastore."""
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_summary.id,
title=exp_summary.title,
category=exp_summary.category,
objective=exp_summary.objective,
language_code=exp_summary.language_code,
tags=exp_summary.tags,
ratings = exp_summary.ratings,
status=exp_summary.status,
community_owned=exp_summary.community_owned,
owner_ids=exp_summary.owner_ids,
editor_ids=exp_summary.editor_ids,
viewer_ids=exp_summary.viewer_ids,
version=exp_summary.version,
exploration_model_last_updated=(
exp_summary.exploration_model_last_updated),
exploration_model_created_on=(
exp_summary.exploration_model_created_on)
)
exp_summary_model.put()
def delete_exploration_summary(exploration_id, force_deletion=False):
"""Delete an exploration summary model."""
exp_models.ExpSummaryModel.get(exploration_id).delete()
def revert_exploration(
committer_id, exploration_id, current_version, revert_to_version):
"""Reverts an exploration to the given version number. Commits changes."""
exploration_model = exp_models.ExplorationModel.get(
exploration_id, strict=False)
if current_version > exploration_model.version:
raise Exception(
'Unexpected error: trying to update version %s of exploration '
'from version %s. Please reload the page and try again.'
% (exploration_model.version, current_version))
elif current_version < exploration_model.version:
raise Exception(
'Trying to update version %s of exploration from version %s, '
'which is too old. Please reload the page and try again.'
% (exploration_model.version, current_version))
# Validate the previous version of the exploration before committing the
# change.
exploration = get_exploration_by_id(
exploration_id, version=revert_to_version)
exploration_rights = rights_manager.get_exploration_rights(exploration.id)
if exploration_rights.status != rights_manager.EXPLORATION_STATUS_PRIVATE:
exploration.validate(strict=True)
else:
exploration.validate()
exp_models.ExplorationModel.revert(exploration_model,
committer_id, 'Reverted exploration to version %s' % revert_to_version,
revert_to_version)
memcache_services.delete(_get_exploration_memcache_key(exploration_id))
update_exploration_summary(exploration_id)
# Creation and deletion methods.
def get_demo_exploration_components(demo_path):
"""Gets the content of `demo_path` in the sample explorations folder.
Args:
demo_path: the file or folder path for the content of an exploration
in SAMPLE_EXPLORATIONS_DIR. E.g.: 'adventure.yaml' or 'tar/'.
Returns:
a 2-tuple, the first element of which is a yaml string, and the second
element of which is a list of (filepath, content) 2-tuples. The filepath
does not include the assets/ prefix.
"""
demo_filepath = os.path.join(feconf.SAMPLE_EXPLORATIONS_DIR, demo_path)
if demo_filepath.endswith('yaml'):
file_contents = utils.get_file_contents(demo_filepath)
return file_contents, []
elif os.path.isdir(demo_filepath):
return utils.get_exploration_components_from_dir(demo_filepath)
else:
raise Exception('Unrecognized file path: %s' % demo_path)
def save_new_exploration_from_yaml_and_assets(
committer_id, yaml_content, title, category, exploration_id,
assets_list):
if assets_list is None:
assets_list = []
exploration = exp_domain.Exploration.from_yaml(
exploration_id, title, category, yaml_content)
commit_message = (
'New exploration created from YAML file with title \'%s\'.'
% exploration.title)
_create_exploration(committer_id, exploration, commit_message, [{
'cmd': CMD_CREATE_NEW,
'title': exploration.title,
'category': exploration.category,
}])
for (asset_filename, asset_content) in assets_list:
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
fs.commit(committer_id, asset_filename, asset_content)
def delete_demo(exploration_id):
"""Deletes a single demo exploration."""
if not (0 <= int(exploration_id) < len(feconf.DEMO_EXPLORATIONS)):
raise Exception('Invalid demo exploration id %s' % exploration_id)
exploration = get_exploration_by_id(exploration_id, strict=False)
if not exploration:
logging.info('Exploration with id %s was not deleted, because it '
'does not exist.' % exploration_id)
else:
delete_exploration(
feconf.SYSTEM_COMMITTER_ID, exploration_id, force_deletion=True)
def load_demo(exploration_id):
"""Loads a demo exploration.
The resulting exploration will have version 2 (one for its initial
creation and one for its subsequent modification.)
"""
# TODO(sll): Speed this method up. It is too slow.
delete_demo(exploration_id)
if not (0 <= int(exploration_id) < len(feconf.DEMO_EXPLORATIONS)):
raise Exception('Invalid demo exploration id %s' % exploration_id)
exploration_info = feconf.DEMO_EXPLORATIONS[int(exploration_id)]
if len(exploration_info) == 3:
(exp_filename, title, category) = exploration_info
else:
raise Exception('Invalid demo exploration: %s' % exploration_info)
yaml_content, assets_list = get_demo_exploration_components(exp_filename)
save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, title, category,
exploration_id, assets_list)
rights_manager.publish_exploration(
feconf.SYSTEM_COMMITTER_ID, exploration_id)
# Release ownership of all explorations.
rights_manager.release_ownership(
feconf.SYSTEM_COMMITTER_ID, exploration_id)
index_explorations_given_ids([exploration_id])
logging.info('Exploration with id %s was loaded.' % exploration_id)
def get_next_page_of_all_commits(
page_size=feconf.COMMIT_LIST_PAGE_SIZE, urlsafe_start_cursor=None):
"""Returns a page of commits to all explorations in reverse time order.
The return value is a triple (results, cursor, more) as described in
fetch_page() at:
https://developers.google.com/appengine/docs/python/ndb/queryclass
"""
results, new_urlsafe_start_cursor, more = (
exp_models.ExplorationCommitLogEntryModel.get_all_commits(
page_size, urlsafe_start_cursor))
return ([exp_domain.ExplorationCommitLogEntry(
entry.created_on, entry.last_updated, entry.user_id, entry.username,
entry.exploration_id, entry.commit_type, entry.commit_message,
entry.commit_cmds, entry.version, entry.post_commit_status,
entry.post_commit_community_owned, entry.post_commit_is_private
) for entry in results], new_urlsafe_start_cursor, more)
def get_next_page_of_all_non_private_commits(
page_size=feconf.COMMIT_LIST_PAGE_SIZE, urlsafe_start_cursor=None,
max_age=None):
"""Returns a page of non-private commits in reverse time order. If max_age
is given, it should be a datetime.timedelta instance.
The return value is a triple (results, cursor, more) as described in
fetch_page() at:
https://developers.google.com/appengine/docs/python/ndb/queryclass
"""
if max_age is not None and not isinstance(max_age, datetime.timedelta):
raise ValueError(
"max_age must be a datetime.timedelta instance. or None.")
results, new_urlsafe_start_cursor, more = (
exp_models.ExplorationCommitLogEntryModel.get_all_non_private_commits(
page_size, urlsafe_start_cursor, max_age=max_age))
return ([exp_domain.ExplorationCommitLogEntry(
entry.created_on, entry.last_updated, entry.user_id, entry.username,
entry.exploration_id, entry.commit_type, entry.commit_message,
entry.commit_cmds, entry.version, entry.post_commit_status,
entry.post_commit_community_owned, entry.post_commit_is_private
) for entry in results], new_urlsafe_start_cursor, more)
def _exp_rights_to_search_dict(rights):
# Allow searches like "is:featured".
doc = {}
if rights.status == rights_manager.EXPLORATION_STATUS_PUBLICIZED:
doc['is'] = 'featured'
return doc
def _should_index(exp):
rights = rights_manager.get_exploration_rights(exp.id)
return rights.status != rights_manager.EXPLORATION_STATUS_PRIVATE
def _get_search_rank(exp_id):
"""Returns an integer determining the document's rank in search.
Featured explorations get a ranking bump, and so do explorations that
have been more recently updated. Good ratings will increase the ranking
and bad ones will lower it.
"""
# TODO(sll): Improve this calculation.
_STATUS_PUBLICIZED_BONUS = 30
# This is done to prevent the rank hitting 0 too easily. Note that
# negative ranks are disallowed in the Search API.
_DEFAULT_RANK = 20
exploration = get_exploration_by_id(exp_id)
rights = rights_manager.get_exploration_rights(exp_id)
summary = get_exploration_summary_by_id(exp_id)
rank = _DEFAULT_RANK + (
_STATUS_PUBLICIZED_BONUS
if rights.status == rights_manager.EXPLORATION_STATUS_PUBLICIZED
else 0)
if summary.ratings:
RATING_WEIGHTINGS = {'1': -5, '2': -2, '3': 2, '4': 5, '5': 10}
for rating_value in summary.ratings:
rank += (
summary.ratings[rating_value] *
RATING_WEIGHTINGS[rating_value])
# Iterate backwards through the exploration history metadata until we find
# the most recent snapshot that was committed by a human.
last_human_update_ms = 0
snapshots_metadata = get_exploration_snapshots_metadata(exp_id)
for snapshot_metadata in reversed(snapshots_metadata):
if snapshot_metadata['committer_id'] != feconf.MIGRATION_BOT_USER_ID:
last_human_update_ms = snapshot_metadata['created_on_ms']
break
_TIME_NOW_MS = utils.get_current_time_in_millisecs()
_MS_IN_ONE_DAY = 24 * 60 * 60 * 1000
time_delta_days = int(
(_TIME_NOW_MS - last_human_update_ms) / _MS_IN_ONE_DAY)
if time_delta_days == 0:
rank += 80
elif time_delta_days == 1:
rank += 50
elif 2 <= time_delta_days <= 7:
rank += 35
# Ranks must be non-negative.
return max(rank, 0)
def _exp_to_search_dict(exp):
rights = rights_manager.get_exploration_rights(exp.id)
doc = {
'id': exp.id,
'language_code': exp.language_code,
'title': exp.title,
'category': exp.category,
'tags': exp.tags,
'blurb': exp.blurb,
'objective': exp.objective,
'author_notes': exp.author_notes,
'rank': _get_search_rank(exp.id),
}
doc.update(_exp_rights_to_search_dict(rights))
return doc
def clear_search_index():
"""WARNING: This runs in-request, and may therefore fail if there are too
many entries in the index.
"""
search_services.clear_index(SEARCH_INDEX_EXPLORATIONS)
def index_explorations_given_ids(exp_ids):
# We pass 'strict=False' so as not to index deleted explorations.
exploration_models = get_multiple_explorations_by_id(exp_ids, strict=False)
search_services.add_documents_to_index([
_exp_to_search_dict(exp) for exp in exploration_models.values()
if _should_index(exp)
], SEARCH_INDEX_EXPLORATIONS)
def patch_exploration_search_document(exp_id, update):
"""Patches an exploration's current search document, with the values
from the 'update' dictionary."""
doc = search_services.get_document_from_index(
exp_id, SEARCH_INDEX_EXPLORATIONS)
doc.update(update)
search_services.add_documents_to_index([doc], SEARCH_INDEX_EXPLORATIONS)
def update_exploration_status_in_search(exp_id):
rights = rights_manager.get_exploration_rights(exp_id)
if rights.status == rights_manager.EXPLORATION_STATUS_PRIVATE:
delete_documents_from_search_index([exp_id])
else:
patch_exploration_search_document(
rights.id, _exp_rights_to_search_dict(rights))
def delete_documents_from_search_index(exploration_ids):
search_services.delete_documents_from_index(
exploration_ids, SEARCH_INDEX_EXPLORATIONS)
def search_explorations(query, limit, sort=None, cursor=None):
"""Searches through the available explorations.
args:
- query_string: the query string to search for.
- sort: a string indicating how to sort results. This should be a string
of space separated values. Each value should start with a '+' or a
'-' character indicating whether to sort in ascending or descending
order respectively. This character should be followed by a field name
to sort on. When this is None, results are based on 'rank'. See
_get_search_rank to see how rank is determined.
- limit: the maximum number of results to return.
- cursor: A cursor, used to get the next page of results.
If there are more documents that match the query than 'limit', this
function will return a cursor to get the next page.
returns: a tuple:
- a list of exploration ids that match the query.
- a cursor if there are more matching explorations to fetch, None
otherwise. If a cursor is returned, it will be a web-safe string that
can be used in URLs.
"""
return search_services.search(
query, SEARCH_INDEX_EXPLORATIONS, cursor, limit, sort, ids_only=True)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Version 2 of class Optimizer."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import six
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import revived_types
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = array_ops.unique(indices)
summed_values = math_ops.unsorted_segment_sum(
values, new_index_positions,
array_ops.shape(unique_indices)[0])
return (summed_values, unique_indices)
@six.add_metaclass(abc.ABCMeta)
@keras_export("keras.optimizers.Optimizer")
class OptimizerV2(trackable.Trackable):
"""Updated base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 * var1 + 2 * var2 * var2
# In graph mode, returns op that minimizes the loss by updating the listed
# variables.
opt_op = opt.minimize(loss, var_list=[var1, var2])
opt_op.run()
# In eager mode, simply call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
```
### Custom training loop with Keras models
In Keras models, sometimes variables are created when the model is first
called, instead of construction time. Examples include 1) sequential models
without input shape pre-defined, or 2) subclassed models. Pass var_list as
callable in these cases.
Example:
```python
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(num_hidden, activation='relu'))
model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid'))
loss_fn = lambda: tf.keras.losses.mse(model(input), output)
var_list_fn = lambda: model.trainable_weights
for input, output in data:
opt.minimize(loss_fn, var_list_fn)
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `tf.GradientTape`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# Compute the gradients for a list of variables.
with tf.GradientTape() as tape:
loss = <call_loss_function>
vars = <list_of_variables>
grads = tape.gradient(loss, vars)
# Process the gradients, for example cap them, etc.
# capped_grads = [MyCapper(g) for g in grads]
processed_grads = [process_gradient(g) for g in grads]
# Ask the optimizer to apply the processed gradients.
opt.apply_gradients(zip(processed_grads, var_list))
```
### Use with `tf.distribute.Strategy`.
This optimizer class is `tf.distribute.Strategy` aware, which means it
automatically sums gradients across all replicas. To average gradients,
you divide your loss by the global batch size, which is done
automatically if you use `tf.keras` built-in training or evaluation loops.
See the `reduction` argument of your loss which should be set to
`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or
`tf.keras.losses.Reduction.SUM` for not.
If you are not using these and you want to average gradients, you should use
`tf.math.reduce_sum` to add up your per-example losses and then divide by the
global batch size. Note that when using `tf.distribute.Strategy`, the first
component of a tensor's shape is the *replica-local* batch size, which is off
by a factor equal to the number of replicas being used to compute a single
step. As a result, using `tf.math.reduce_mean` will give the wrong answer,
resulting in gradients that can be many times too big.
### Variable Constraint
All Keras optimizers respect variable constraints. If constraint function is
passed to any variable, the constraint will be applied to the variable after
the gradient has been applied to the variable.
Important: If gradient is sparse tensor, variable constraint is not supported.
### Thread Compatibility
The entire optimizer is currently thread compatible, not thread-safe. The user
needs to perform synchronization if necessary.
### Slots
Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage
additional variables associated with the variables to train. These are called
<i>Slots</i>. Slots have names and you can ask the optimizer for the names of
the slots that it uses. Once you have a slot name you can ask the optimizer
for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
### Hyper parameters
These are arguments passed to the optimizer subclass constructor
(the `__init__` method), and then passed to `self._set_hyper()`.
They can be either regular Python values (like 1.0), tensors, or
callables. If they are callable, the callable will be called during
`apply_gradients()` to get the value for the hyper parameter.
Hyper parameters can be overwritten through user code:
Example:
```python
# Create an optimizer with the desired parameters.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 + 2 * var2
# In eager mode, simply call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
# update learning rate
opt.learning_rate = 0.05
opt.minimize(loss, var_list=[var1, var2])
```
### Write a customized optimizer.
If you intend to create your own optimization algorithm, simply inherit from
this class and override the following methods:
- resource_apply_dense (update variable given gradient tensor is dense)
- resource_apply_sparse (update variable given gradient tensor is sparse)
- create_slots (if your optimizer algorithm requires additional variables)
- get_config (serialization of the optimizer, include all hyper parameters)
"""
def __init__(self, name, **kwargs):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Note that Optimizer instances should not bind to a single graph,
and so shouldn't keep Tensors as member variables. Generally
you should be able to use the _set_hyper()/state.get_hyper()
facility instead.
This class in stateful and thread-compatible.
Args:
name: A non-empty string. The name to use for accumulators created
for the optimizer.
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
Raises:
ValueError: If name is malformed.
RuntimeError: If _create_slots has been overridden instead of
_create_vars.
"""
allowed_kwargs = {"clipnorm", "clipvalue", "lr", "decay"}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError("Unexpected keyword argument "
"passed to optimizer: " + str(k))
# checks that all keyword arguments are non-negative.
if kwargs[k] < 0:
raise ValueError("Expected {} >= 0, received: {}".format(k, kwargs[k]))
self._use_locking = True
self._init_set_name(name)
self._hyper = {}
# dict: {variable name : {slot name : variable}}
self._slots = {}
self._slot_names = []
self._weights = []
self._iterations = None
# For implementing Trackable. Stores information about how to restore
# slot variables which have not yet been created
# (trackable._CheckpointPosition objects).
# {slot_name :
# {_var_key(variable_to_train): [checkpoint_position, ... ], ... },
# ... }
self._deferred_slot_restorations = {}
decay = kwargs.pop("decay", 0.0)
if decay < 0.:
raise ValueError("decay cannot be less than 0: {}".format(decay))
self._initial_decay = decay
if "clipnorm" in kwargs:
self.clipnorm = kwargs.pop("clipnorm")
if "clipvalue" in kwargs:
self.clipvalue = kwargs.pop("clipvalue")
self._hypers_created = False
def minimize(self, loss, var_list, grad_loss=None, name=None):
"""Minimize `loss` by updating `var_list`.
This method simply computes gradient using `tf.GradientTape` and calls
`apply_gradients()`. If you want to process the gradient before applying
then call `tf.GradientTape` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A callable taking no arguments which returns the value to minimize.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable` objects.
Use callable when the variable list would otherwise be incomplete before
`minimize` since the variables are created at the first time `loss` is
called.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
name: Optional name for the returned operation.
Returns:
An `Operation` that updates the variables in `var_list`. The `iterations`
will be automatically increased by 1.
Raises:
ValueError: If some of the variables are not `Variable` objects.
"""
grads_and_vars = self._compute_gradients(
loss, var_list=var_list, grad_loss=grad_loss)
return self.apply_gradients(grads_and_vars, name=name)
def _compute_gradients(self, loss, var_list, grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A callable taking no arguments which returns the value to minimize.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable` objects.
Use callable when the variable list would otherwise be incomplete before
`minimize` and the variables are created at the first time when `loss`
is called.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid, or var_list is None.
"""
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
with backprop.GradientTape() as tape:
if not callable(var_list):
tape.watch(var_list)
loss_value = loss()
if callable(var_list):
var_list = var_list()
var_list = nest.flatten(var_list)
with backend.name_scope(self._name + "/gradients"):
grads = tape.gradient(loss_value, var_list, grad_loss)
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return grads_and_vars
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
params = nest.flatten(params)
with backend.get_graph().as_default(), backend.name_scope(self._name +
"/gradients"):
grads = gradients.gradients(loss, params)
for grad, param in zip(grads, params):
if grad is None:
raise ValueError("Variable {} has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"K.argmax, K.round, K.eval.".format(param))
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
def apply_gradients(self, grads_and_vars, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs.
name: Optional name for the returned operation. Default to the name
passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. The `iterations`
will be automatically increased by 1.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
grads_and_vars = _filter_grads(grads_and_vars)
var_list = [v for (_, v) in grads_and_vars]
with backend.name_scope(self._name):
# Create iteration if necessary.
with ops.init_scope():
_ = self.iterations
self._create_hypers()
self._create_slots(var_list)
if not grads_and_vars:
# Distribution strategy does not support reducing an empty list of
# gradients
return control_flow_ops.no_op()
apply_state = self._prepare(var_list)
return distribute_ctx.get_replica_context().merge_call(
functools.partial(self._distributed_apply, apply_state=apply_state),
args=(grads_and_vars,),
kwargs={"name": name})
def _distributed_apply(self, distribution, grads_and_vars, name, apply_state):
"""`apply_gradients` using a `DistributionStrategy`."""
reduced_grads = distribution.extended.batch_reduce_to(
ds_reduce_util.ReduceOp.SUM, grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
def apply_grad_to_update_var(var, grad):
"""Apply gradient to variable."""
if isinstance(var, ops.Tensor):
raise NotImplementedError("Trying to update a Tensor ", var)
apply_kwargs = {}
if isinstance(grad, ops.IndexedSlices):
if var.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
if "apply_state" in self._sparse_apply_args:
apply_kwargs["apply_state"] = apply_state
return self._resource_apply_sparse_duplicate_indices(
grad.values, var, grad.indices, **apply_kwargs)
if "apply_state" in self._dense_apply_args:
apply_kwargs["apply_state"] = apply_state
update_op = self._resource_apply_dense(grad, var, **apply_kwargs)
if var.constraint is not None:
with ops.control_dependencies([update_op]):
return var.assign(var.constraint(var))
else:
return update_op
update_ops = []
with backend.name_scope(name or self._name):
for grad, var in grads_and_vars:
scope_name = ("update" if ops.executing_eagerly_outside_functions() else
"update_" + var.op.name)
# Colocate the update with variables to avoid unnecessary communication
# delays. See b/136304694.
with backend.name_scope(
scope_name), distribution.extended.colocate_vars_with(var):
update_ops.extend(
distribution.extended.update(
var, apply_grad_to_update_var, args=(grad,), group=False))
any_symbolic = any(isinstance(i, ops.Operation) or
tf_utils.is_symbolic_tensor(i) for i in update_ops)
if not context.executing_eagerly() or any_symbolic:
# If the current context is graph mode or any of the update ops are
# symbolic then the step update should be carried out under a graph
# context. (eager updates execute immediately)
with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access
with ops.control_dependencies(update_ops):
return self._iterations.assign_add(1).op
return self._iterations.assign_add(1)
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
grads_and_vars = list(zip(grads, params))
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return [self.apply_gradients(grads_and_vars)]
def _set_hyper(self, name, value):
"""set hyper `name` to value. value can be callable, tensor, numeric."""
if isinstance(value, trackable.Trackable):
self._track_trackable(value, name, overwrite=True)
if name not in self._hyper:
self._hyper[name] = value
else:
prev_value = self._hyper[name]
if (callable(prev_value)
or isinstance(prev_value,
(ops.Tensor, int, float,
learning_rate_schedule.LearningRateSchedule))
or isinstance(value, learning_rate_schedule.LearningRateSchedule)):
self._hyper[name] = value
else:
backend.set_value(self._hyper[name], value)
def _get_hyper(self, name, dtype=None):
if not self._hypers_created:
self._create_hypers()
value = self._hyper[name]
if isinstance(value, learning_rate_schedule.LearningRateSchedule):
return value
if callable(value):
value = value()
if dtype:
return math_ops.cast(value, dtype)
else:
return value
def __getattribute__(self, name):
"""Overridden to support hyperparameter access."""
try:
return super(OptimizerV2, self).__getattribute__(name)
except AttributeError as e:
# Needed to avoid infinite recursion with __setattr__.
if name == "_hyper":
raise e
# Backwards compatibility with Keras optimizers.
if name == "lr":
name = "learning_rate"
if name in self._hyper:
return self._get_hyper(name)
raise e
def __setattr__(self, name, value):
"""Override setattr to support dynamic hyperparameter setting."""
# Backwards compatibility with Keras optimizers.
if name == "lr":
name = "learning_rate"
if hasattr(self, "_hyper") and name in self._hyper:
self._set_hyper(name, value)
else:
super(OptimizerV2, self).__setattr__(name, value)
def get_slot_names(self):
"""A list of names for this optimizer's slots."""
return self._slot_names
def add_slot(self, var, slot_name, initializer="zeros"):
"""Add a new slot variable for `var`."""
if slot_name not in self._slot_names:
self._slot_names.append(slot_name)
var_key = _var_key(var)
slot_dict = self._slots.setdefault(var_key, {})
weight = slot_dict.get(slot_name, None)
if weight is None:
if isinstance(initializer, six.string_types) or callable(initializer):
initializer = initializers.get(initializer)
initial_value = functools.partial(
initializer, shape=var.shape, dtype=var.dtype)
else:
initial_value = initializer
strategy = distribute_ctx.get_strategy()
if not strategy.extended.variable_created_in_scope(var):
raise ValueError(
"Trying to create optimizer slot variable under the scope for "
"tf.distribute.Strategy ({}), which is different from the scope "
"used for the original variable ({}). Make sure the slot "
"variables are created under the same strategy scope. This may "
"happen if you're restoring from a checkpoint outside the scope"
.format(strategy, var))
with strategy.extended.colocate_vars_with(var):
weight = tf_variables.Variable(
name="%s/%s" % (var._shared_name, slot_name), # pylint: disable=protected-access
dtype=var.dtype,
trainable=False,
initial_value=initial_value)
backend.track_variable(weight)
slot_dict[slot_name] = weight
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=weight)
self._weights.append(weight)
return weight
def get_slot(self, var, slot_name):
var_key = _var_key(var)
slot_dict = self._slots[var_key]
return slot_dict[slot_name]
def _prepare(self, var_list):
keys = set()
for var in var_list:
var_devices = (getattr(var, "devices", None) or # Distributed
[var.device]) # Regular
var_dtype = var.dtype.base_dtype
for var_device in var_devices:
keys.add((var_device, var_dtype))
apply_state = {}
for var_device, var_dtype in keys:
apply_state[(var_device, var_dtype)] = {}
with ops.device(var_device):
self._prepare_local(var_device, var_dtype, apply_state)
return apply_state
def _prepare_local(self, var_device, var_dtype, apply_state):
if "learning_rate" in self._hyper:
lr_t = array_ops.identity(self._decayed_lr(var_dtype))
apply_state[(var_device, var_dtype)]["lr_t"] = lr_t
def _fallback_apply_state(self, var_device, var_dtype):
"""Compatibility for subclasses that don't pass apply_state through."""
apply_state = {(var_device, var_dtype): {}}
self._prepare_local(var_device, var_dtype, apply_state)
return apply_state[(var_device, var_dtype)]
def _create_hypers(self):
if self._hypers_created:
return
# Iterate hyper values deterministically.
for name, value in sorted(self._hyper.items()):
if isinstance(
value, (ops.Tensor, tf_variables.Variable)) or callable(value):
continue
else:
self._hyper[name] = self.add_weight(
name,
shape=[],
trainable=False,
initializer=value,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._hypers_created = True
@property
def iterations(self):
"""Variable. The number of training steps this Optimizer has run."""
if self._iterations is None:
self._iterations = self.add_weight(
"iter",
shape=[],
dtype=dtypes.int64,
trainable=False,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._weights.append(self._iterations)
return self._iterations
@iterations.setter
def iterations(self, variable):
if self._iterations is not None:
raise RuntimeError("Cannot set `iterations` to a new Variable after "
"the Optimizer weights have been created")
self._iterations = variable
self._weights.append(self._iterations)
def _decayed_lr(self, var_dtype):
"""Get decayed learning rate as a Tensor with dtype=var_dtype."""
lr_t = self._get_hyper("learning_rate", var_dtype)
if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule):
local_step = math_ops.cast(self.iterations, var_dtype)
lr_t = math_ops.cast(lr_t(local_step), var_dtype)
if self._initial_decay > 0.:
local_step = math_ops.cast(self.iterations, var_dtype)
decay_t = self._get_hyper("decay", var_dtype)
lr_t = lr_t / (1. + decay_t * local_step)
return lr_t
@abc.abstractmethod
def get_config(self):
"""Returns the config of the optimimizer.
An optimizer config is a Python dictionary (serializable)
containing the configuration of an optimizer.
The same optimizer can be reinstantiated later
(without any saved state) from this configuration.
Returns:
Python dictionary.
"""
config = {"name": self._name}
if hasattr(self, "clipnorm"):
config["clipnorm"] = self.clipnorm
if hasattr(self, "clipvalue"):
config["clipvalue"] = self.clipvalue
return config
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same optimizer from the config
dictionary.
Arguments:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional Python
objects used to create this optimizer, such as a function used for a
hyperparameter.
Returns:
An optimizer instance.
"""
if "lr" in config:
config["learning_rate"] = config.pop("lr")
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = learning_rate_schedule.deserialize(
config["learning_rate"], custom_objects=custom_objects)
return cls(**config)
def _serialize_hyperparameter(self, hyperparameter_name):
"""Serialize a hyperparameter that can be a float, callable, or Tensor."""
value = self._hyper[hyperparameter_name]
if isinstance(value, learning_rate_schedule.LearningRateSchedule):
return learning_rate_schedule.serialize(value)
if callable(value):
return value()
if tensor_util.is_tensor(value):
return backend.get_value(value)
return value
def variables(self):
"""Returns variables of this Optimizer based on the order created."""
return self._weights
@property
def weights(self):
"""Returns variables of this Optimizer based on the order created."""
return self._weights
def get_weights(self):
params = self.weights
return backend.batch_get_value(params)
# TODO(tanzheny): Maybe share this logic with base_layer.
def set_weights(self, weights):
params = self.weights
if len(params) != len(weights):
raise ValueError(
"You called `set_weights(weights)` on optimizer " + self._name +
" with a weight list of length " + str(len(weights)) +
", but the optimizer was expecting " + str(len(params)) +
" weights. Provided weights: " + str(weights)[:50] + "...")
if not params:
return
weight_value_tuples = []
param_values = backend.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError("Optimizer weight shape " + str(pv.shape) +
" not compatible with "
"provided weight shape " + str(w.shape))
weight_value_tuples.append((p, w))
backend.batch_set_value(weight_value_tuples)
def add_weight(self,
name,
shape,
dtype=None,
initializer="zeros",
trainable=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE):
if dtype is None:
dtype = dtypes.float32
if isinstance(initializer, six.string_types) or callable(initializer):
initializer = initializers.get(initializer)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
"Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ.")
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
getter=base_layer_utils.make_variable,
overwrite=True,
initializer=initializer,
dtype=dtype,
trainable=trainable,
use_resource=True,
synchronization=synchronization,
aggregation=aggregation)
backend.track_variable(variable)
return variable
def _init_set_name(self, name, zero_based=True):
if not name:
self._name = backend.unique_object_name(
generic_utils.to_snake_case(self.__class__.__name__),
zero_based=zero_based)
else:
self._name = name
def _assert_valid_dtypes(self, tensors):
"""Asserts tensors are all valid types (see `_valid_dtypes`).
Args:
tensors: Tensors to check.
Raises:
ValueError: If any tensor is not a valid type.
"""
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError("Invalid type %r for %s, expected: %s." %
(dtype, t.name, [v for v in valid_dtypes]))
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
"""
return set(
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])
def _call_if_callable(self, param):
"""Call the function if param is callable."""
return param() if callable(param) else param
def _resource_apply_dense(self, grad, handle, apply_state):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
apply_state: A dict which is used across multiple apply calls.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices,
**kwargs):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices may be repeated.
**kwargs: May optionally contain `apply_state`
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices,
**kwargs)
def _resource_apply_sparse(self, grad, handle, indices, apply_state):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices are unique.
apply_state: A dict which is used across multiple apply calls.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_scatter_update(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_update(x.handle, i, v)]):
return x.value()
@property
@tracking.cached_per_instance
def _dense_apply_args(self):
return tf_inspect.getfullargspec(self._resource_apply_dense).args
@property
@tracking.cached_per_instance
def _sparse_apply_args(self):
return tf_inspect.getfullargspec(self._resource_apply_sparse).args
# ---------------
# For implementing the trackable interface
# ---------------
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(key=lambda position: position.restore_uid,
reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `trackable._CheckpointPosition` object
indicating the slot variable `Trackable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
variable_key = _var_key(variable)
slot_dict = self._slots.get(variable_key, {})
slot_variable = slot_dict.get(slot_name, None)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()
# Defer slot variable creation if there is an active variable creator
# scope. Generally we'd like to eagerly create/restore slot variables
# when possible, but this may mean that scopes intended to catch
# `variable` also catch its eagerly created slot variable
# unintentionally (specifically make_template would add a dependency on
# a slot variable if not for this case). Deferring is mostly harmless
# (aside from double initialization), and makes variable creator scopes
# behave the same way they do when graph building.
and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access
initializer = trackable.CheckpointInitialValue(
checkpoint_position=slot_variable_position)
slot_variable = self.add_slot(
var=variable,
initializer=initializer,
slot_name=slot_name)
# Slot variables are not owned by any one object (because we don't want to
# save the slot variable if the optimizer is saved without the non-slot
# variable, or if the non-slot variable is saved without the optimizer;
# it's a dependency hypergraph with edges of the form (optimizer, non-slot
# variable, variable)). So we don't _track_ slot variables anywhere, and
# instead special-case this dependency and otherwise pretend it's a normal
# graph.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
self._deferred_slot_restorations.setdefault(
slot_name, {}).setdefault(variable_key, []).append(
slot_variable_position)
def _filter_grads(grads_and_vars):
"""Filter out iterable with grad equal to None."""
grads_and_vars = tuple(grads_and_vars)
if not grads_and_vars:
return grads_and_vars
filtered = []
vars_with_empty_grads = []
for grad, var in grads_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, var))
filtered = tuple(filtered)
if not filtered:
raise ValueError("No gradients provided for any variable: %s." %
([v.name for _, v in grads_and_vars],))
if vars_with_empty_grads:
logging.warning(
("Gradients do not exist for variables %s when minimizing the loss."),
([v.name for v in vars_with_empty_grads]))
return filtered
def _var_key(var):
"""Key for representing a primary variable, for looking up slots.
In graph mode the name is derived from the var shared name.
In eager mode the name is derived from the var unique id.
If distribution strategy exists, get the primary variable first.
Args:
var: the variable.
Returns:
the unique name of the variable.
"""
# pylint: disable=protected-access
# Get the distributed variable if it exists.
if hasattr(var, "_distributed_container"):
var = var._distributed_container()
if var._in_graph_mode:
return var._shared_name
return var._unique_id
def _get_slot_key_from_var(var, slot_name):
"""Get the slot key for the variable: var_name/slot_name."""
name = _var_key(var)
return name + "/" + slot_name
class RestoredOptimizer(OptimizerV2):
"""A non-functional Optimizer implementation for checkpoint compatibility.
Holds slot variables and hyperparameters when an optimizer is restored from a
SavedModel. These variables may be referenced in functions along with ops
created by the original optimizer, but currently we do not support using the
optimizer object iself (e.g. through `apply_gradients`).
"""
# TODO(allenl): Make the restored optimizer functional by tracing its apply
# methods.
def __init__(self):
super(RestoredOptimizer, self).__init__("RestoredOptimizer")
self._hypers_created = True
def get_config(self):
# TODO(allenl): Save and restore the Optimizer's config
raise NotImplementedError(
"Restoring functional Optimzers from SavedModels is not currently "
"supported. Please file a feature request if this limitation bothers "
"you.")
revived_types.register_revived_type(
"optimizer",
lambda obj: isinstance(obj, OptimizerV2),
versions=[revived_types.VersionedTypeRegistration(
object_factory=lambda proto: RestoredOptimizer(),
version=1,
min_producer_version=1,
min_consumer_version=1,
setter=RestoredOptimizer._set_hyper # pylint: disable=protected-access
)])
|
|
"""
This is a modified version of input_data.py by Google,
for generalization.
"""
import tensorflow as tf
import numpy
import pandas as pd
import jpandas as jpd
import numpy as np
import random
def placeholder_inputs(batch_size, mnist_IMAGE_PIXELS):
"""Generate placeholder variables to represent the the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
images_placeholder = tf.placeholder(tf.float32,
shape=(batch_size, mnist_IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
return images_placeholder, labels_placeholder
def XY_split( X, Y, rate = 0):
assert rate >= 0
total_N = X.shape[0]
train_N = int( total_N * (1.0 - rate))
X1, X2 = np.split( X, [train_N])
Y1, Y2 = np.split( Y, [train_N])
return X1, Y1, X2, Y2
def read_data_sets_csv( fname, validation_rate = 0, test_rate = 0, disp = False):
class DataSets(object):
pass
data_sets = DataSets()
pdr = pd.read_csv( fname)
xM = jpd.pd_get_xM( pdr)
yV = jpd.pd_get_yV( pdr, y_id = 'exp')
X, Y = list(map( np.array, [xM, yV]))
assert X.shape[0] == Y.shape[0]
if test_rate > 0:
X, Y, X_test, Y_test = XY_split( X, Y, test_rate)
data_sets.test = DataSet_CSV( X_test, Y_test, disp = disp)
if validation_rate > 0:
X, Y, X_val, Y_val = XY_split( X, Y, validation_rate)
data_sets.validation = DataSet_CSV( X_val, Y_val, disp = disp)
# If test_rate and validation_rate are both zero,
# all data is allocated to train dataset.
data_sets.train = DataSet_CSV( X, Y, disp = disp)
return data_sets
def read_data_sets_mol_md( fname, validation_rate = 0, test_rate = 0, disp = False):
class DataSets(object):
pass
data_sets = DataSets()
pdr = pd.read_csv( fname)
xM_fp = jpd.pd_get_xM( pdr)
xM_key = jpd.pd_get_xM_MACCSkeys( pdr)
xM_molw = jpd.pd_get_xM_molw( pdr)
xM_molw = np.divide( xM_molw, np.std( xM_molw, axis = 0))
xM_lasa = jpd.pd_get_xM_lasa( pdr)
xM_lasa = np.divide( xM_lasa, np.std( xM_lasa, axis = 0))
xM = np.concatenate( [xM_fp, xM_key, xM_molw, xM_lasa], axis = 1)
yV = jpd.pd_get_yV( pdr, y_id = 'exp').A1
yV = [1 if y > 0 else 0 for y in yV] # classification is performed
X, Y = list(map( np.array, [xM, yV]))
assert X.shape[0] == Y.shape[0]
if test_rate > 0:
X, Y, X_test, Y_test = XY_split( X, Y, test_rate)
data_sets.test = DataSet_CSV( X_test, Y_test, disp = disp)
if validation_rate > 0:
X, Y, X_val, Y_val = XY_split( X, Y, validation_rate)
data_sets.validation = DataSet_CSV( X_val, Y_val, disp = disp)
# If test_rate and validation_rate are both zero,
# all data is allocated to train dataset.
data_sets.train = DataSet_CSV( X, Y, disp = disp)
data_sets.IMAGE_PIXELS = xM.shape[1]
return data_sets
def read_data_sets_mol_sd( fname, validation_rate = 0, test_rate = 0, disp = False):
class DataSets(object):
pass
data_sets = DataSets()
pdr = pd.read_csv( fname)
xM_fp = jpd.pd_get_xM( pdr)
#xM_key = jpd.pd_get_xM_MACCSkeys( pdr)
#xM_molw = jpd.pd_get_xM_molw( pdr)
#xM_lasa = jpd.pd_get_xM_lasa( pdr)
#xM = np.concatenate( [xM_fp, xM_key, xM_molw, xM_lasa], axis = 1)
xM = xM_fp
yV = jpd.pd_get_yV( pdr, y_id = 'exp').A1
yV = [1 if y > 0 else 0 for y in yV] # classification is performed
X, Y = list(map( np.array, [xM, yV]))
assert X.shape[0] == Y.shape[0]
if test_rate > 0:
X, Y, X_test, Y_test = XY_split( X, Y, test_rate)
data_sets.test = DataSet_CSV( X_test, Y_test, disp = disp)
if validation_rate > 0:
X, Y, X_val, Y_val = XY_split( X, Y, validation_rate)
data_sets.validation = DataSet_CSV( X_val, Y_val, disp = disp)
# If test_rate and validation_rate are both zero,
# all data is allocated to train dataset.
data_sets.train = DataSet_CSV( X, Y, disp = disp)
data_sets.IMAGE_PIXELS = xM.shape[1]
return data_sets
def read_data_sets_mol_sd_molw( fname, validation_rate = 0, test_rate = 0, disp = False):
class DataSets(object):
pass
data_sets = DataSets()
pdr = pd.read_csv( fname)
#xM_fp = jpd.pd_get_xM( pdr)
#xM_key = jpd.pd_get_xM_MACCSkeys( pdr)
xM_molw = jpd.pd_get_xM_molw( pdr)
#xM_lasa = jpd.pd_get_xM_lasa( pdr)
#xM = np.concatenate( [xM_fp, xM_key, xM_molw, xM_lasa], axis = 1)
#xM = xM_molw
xM = np.divide( xM_molw, np.std( xM_molw, axis = 0))
yV = jpd.pd_get_yV( pdr, y_id = 'exp').A1
yV = [1 if y > 0 else 0 for y in yV] # classification is performed
X, Y = list(map( np.array, [xM, yV]))
assert X.shape[0] == Y.shape[0]
if test_rate > 0:
X, Y, X_test, Y_test = XY_split( X, Y, test_rate)
data_sets.test = DataSet_CSV( X_test, Y_test, disp = disp)
if validation_rate > 0:
X, Y, X_val, Y_val = XY_split( X, Y, validation_rate)
data_sets.validation = DataSet_CSV( X_val, Y_val, disp = disp)
# If test_rate and validation_rate are both zero,
# all data is allocated to train dataset.
data_sets.train = DataSet_CSV( X, Y, disp = disp)
data_sets.IMAGE_PIXELS = xM.shape[1]
return data_sets
def read_data_sets_mol_sd_key( fname, validation_rate = 0, test_rate = 0, disp = False):
class DataSets(object):
pass
data_sets = DataSets()
pdr = pd.read_csv( fname)
#xM_fp = jpd.pd_get_xM( pdr)
xM_key = jpd.pd_get_xM_MACCSkeys( pdr)
#xM_molw = jpd.pd_get_xM_molw( pdr)
#xM_lasa = jpd.pd_get_xM_lasa( pdr)
#xM = np.concatenate( [xM_fp, xM_key, xM_molw, xM_lasa], axis = 1)
xM = xM_key
#xM = np.divide( xM_molw, np.std( xM_molw, axis = 0))
yV = jpd.pd_get_yV( pdr, y_id = 'exp').A1
yV = [1 if y > 0 else 0 for y in yV] # classification is performed
X, Y = list(map( np.array, [xM, yV]))
assert X.shape[0] == Y.shape[0]
if test_rate > 0:
X, Y, X_test, Y_test = XY_split( X, Y, test_rate)
data_sets.test = DataSet_CSV( X_test, Y_test, disp = disp)
if validation_rate > 0:
X, Y, X_val, Y_val = XY_split( X, Y, validation_rate)
data_sets.validation = DataSet_CSV( X_val, Y_val, disp = disp)
# If test_rate and validation_rate are both zero,
# all data is allocated to train dataset.
data_sets.train = DataSet_CSV( X, Y, disp = disp)
data_sets.IMAGE_PIXELS = xM.shape[1]
return data_sets
def read_data_sets_mol( fname, validation_rate = 0, test_rate = 0, disp = False):
class DataSets(object):
pass
data_sets = DataSets()
pdr = pd.read_csv( fname)
xM_fp = jpd.pd_get_xM( pdr)
xM_key = jpd.pd_get_xM_MACCSkeys( pdr)
xM_molw = jpd.pd_get_xM_molw( pdr)
xM_lasa = jpd.pd_get_xM_lasa( pdr)
xM = np.concatenate( [xM_fp, xM_key, xM_molw, xM_lasa], axis = 1)
yV = jpd.pd_get_yV( pdr, y_id = 'exp').A1
yV = [1 if y > 0 else 0 for y in yV] # classification is performed
X, Y = list(map( np.array, [xM, yV]))
assert X.shape[0] == Y.shape[0]
if test_rate > 0:
X, Y, X_test, Y_test = XY_split( X, Y, test_rate)
data_sets.test = DataSet_CSV( X_test, Y_test, disp = disp)
if validation_rate > 0:
X, Y, X_val, Y_val = XY_split( X, Y, validation_rate)
data_sets.validation = DataSet_CSV( X_val, Y_val, disp = disp)
# If test_rate and validation_rate are both zero,
# all data is allocated to train dataset.
data_sets.train = DataSet_CSV( X, Y, disp = disp)
data_sets.IMAGE_PIXELS = xM.shape[1]
return data_sets
def read_data_sets_mol_molw( fname, validation_rate = 0, test_rate = 0, disp = False):
class DataSets(object):
pass
data_sets = DataSets()
pdr = pd.read_csv( fname)
#xM_fp = jpd.pd_get_xM( pdr)
#xM_key = jpd.pd_get_xM_MACCSkeys( pdr)
xM_molw = jpd.pd_get_xM_molw( pdr)
#xM_lasa = jpd.pd_get_xM_lasa( pdr)
#xM = np.concatenate( [xM_fp, xM_key, xM_molw, xM_lasa], axis = 1)
"Normalize xM so as to be a set of unit norm random values"
xM = np.divide( xM_molw, np.std( xM_molw, axis = 0))
yV = jpd.pd_get_yV( pdr, y_id = 'exp').A1
X, Y = list(map( np.array, [xM, yV]))
assert X.shape[0] == Y.shape[0]
if test_rate > 0:
X, Y, X_test, Y_test = XY_split( X, Y, test_rate)
data_sets.test = DataSet_CSV( X_test, Y_test, disp = disp)
if validation_rate > 0:
X, Y, X_val, Y_val = XY_split( X, Y, validation_rate)
data_sets.validation = DataSet_CSV( X_val, Y_val, disp = disp)
# If test_rate and validation_rate are both zero,
# all data is allocated to train dataset.
data_sets.train = DataSet_CSV( X, Y, disp = disp)
# The length of descriptors are fed back.
data_sets.IMAGE_PIXELS = xM.shape[1]
return data_sets
################################
### Stability
################################
def read_data_sets_sd_logK( fname, y_id = "log_K_hyd", th_l = ['<', 2.81], shuffle = True,
validation_rate = 0, test_rate = 0, disp = False):
#define closure class and functions
class DataSets(object):
pass
def get_xMyV( fname, y_id):
pdr = pd.read_csv( fname)
xM_fp = jpd.pd_get_xM( pdr)
xM = xM_fp
yV = jpd.pd_get_yV( pdr, y_id = y_id)
return xM, yV
def do_shuffle( xM, yV):
idx_l = list(range(xM.shape[0]))
random.shuffle( idx_l) # inplace command
xM_sf = xM[ idx_l, :]
yV_sf = yV[ idx_l]
return xM_sf, yV_sf
def gen_bin_vec( yV, th_l):
if th_l[0] == '>':
yV_bin = [1 if y > th_l[1] else 0 for y in yV] # classification is performed
else:
yV_bin = [1 if y < th_l[1] else 0 for y in yV] # classification is performed
return yV_bin
#===================================================================
# main codes are started
data_sets = DataSets()
xM, yV = get_xMyV( fname, y_id)
yv = yV.A1
if shuffle:
xM, yv = do_shuffle( xM, yv)
yv_bin = gen_bin_vec( yv, th_l)
X, Y = list(map( np.array, [xM, yv_bin]))
assert X.shape[0] == Y.shape[0]
if test_rate > 0:
X, Y, X_test, Y_test = XY_split( X, Y, test_rate)
data_sets.test = DataSet_CSV( X_test, Y_test, disp = disp)
if validation_rate > 0:
X, Y, X_val, Y_val = XY_split( X, Y, validation_rate)
data_sets.validation = DataSet_CSV( X_val, Y_val, disp = disp)
# If test_rate and validation_rate are both zero,
# all data is allocated to train dataset.
data_sets.train = DataSet_CSV( X, Y, disp = disp)
data_sets.IMAGE_PIXELS = xM.shape[1]
return data_sets
def read_data_sets_mol_gen( N_shape, sig = 0.1, validation_rate = 0, test_rate = 0, disp = False):
"""
Here, new data are generated using randn() in numpy.
Depending on disp, now the type of datasets are notated before the shape
of them is shown.
"""
class DataSets(object):
pass
data_sets = DataSets()
if type( N_shape) == type( 0):
N_shape = ( N_shape, 1)
X = np.random.randn( *N_shape)
n = np.random.randn( N_shape[0], 1)
w = np.random.randn( N_shape[1], 1)
print("Weight vector is {}.".format( w))
# sig = 0.1 # this value can be updated later on.
Y = np.dot( X, w) + sig * n
#X, Y = map( np.array, [xM, yV])
assert X.shape[0] == Y.shape[0]
if test_rate > 0:
X, Y, X_test, Y_test = XY_split( X, Y, test_rate)
if disp:
print("Testing Dataset:")
data_sets.test = DataSet_CSV( X_test, Y_test, disp = disp)
if validation_rate > 0:
X, Y, X_val, Y_val = XY_split( X, Y, validation_rate)
if disp:
print("Validation Dataset:")
data_sets.validation = DataSet_CSV( X_val, Y_val, disp = disp)
# If test_rate and validation_rate are both zero,
# all data is allocated to train dataset.
if disp:
print("Training Dataset:")
data_sets.train = DataSet_CSV( X, Y, disp = disp)
return data_sets
"""
ALL_DATA_PICKLE = 'minst.pkl'
data_list = already_pikle( ALL_DATA_PICKLE, train_dir)
if not data_list:
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
VALIDATION_SIZE = 5000
local_file = maybe_download(TRAIN_IMAGES, train_dir)
train_images = extract_images(local_file)
local_file = maybe_download(TRAIN_LABELS, train_dir)
train_labels = extract_labels(local_file, one_hot=one_hot)
local_file = maybe_download(TEST_IMAGES, train_dir)
test_images = extract_images(local_file)
local_file = maybe_download(TEST_LABELS, train_dir)
test_labels = extract_labels(local_file, one_hot=one_hot)
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
if fastload: # This is working only the flag of fastload is turned on.
data_list = [train_images, train_labels,
validation_images, validation_labels,
test_images, test_labels]
save_pickle( ALL_DATA_PICKLE, train_dir, data_list)
else:
[train_images, train_labels,
validation_images, validation_labels,
test_images, test_labels] = data_list
data_sets.train = DataSet(train_images, train_labels)
data_sets.validation = DataSet(validation_images, validation_labels)
data_sets.test = DataSet(test_images, test_labels)
return data_sets
"""
class DataSet_CSV(object):
def __init__(self, images, labels, disp = False):
"""
Construct a DataSet. one_hot arg is used only if fake_data is true.
Fake mode is not supported. Moreover, onehot is also not supported.
"""
assert images.shape[0] == labels.shape[0]
self._data_size = images.shape[0]
self._num_examples = images.shape[0]
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
# images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def data_size(self):
return self._data_size
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""
Return the next `batch_size` examples from this data set.
Fake mode codes are removed.
fake_data is used for compatibilty, but not applicable in this code
"""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._data_size:
"""
If the last batch is reached, snuffling is applied.
The remained data is omitted and data reading is restarted from
the initial point. while the new dataset is the snuffled version of
the original data (or the previous data).
"""
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._data_size)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._data_size
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
class DataSet(object):
def __init__(self, images, labels, fake_data=False, one_hot=False):
"""Construct a DataSet. one_hot arg is used only if fake_data is true."""
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape,
labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in range(batch_size)], [
fake_label for _ in range(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
|
|
# -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2015,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = '3.8'
__tabversion__ = '3.8'
import re
import sys
import types
import copy
import os
import inspect
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self, message, s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def critical(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re, findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = 'INITIAL' # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexstateeoff = {} # Dictionary of eof functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lexeoff = None # EOF rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = '' # Ignored characters
self.lexliterals = '' # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = False # Optimized mode
def clone(self, object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = {}
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object, f[0].__name__), f[1]))
newre.append((cre, newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = {}
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object, ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self, lextab, outputdir=''):
if isinstance(lextab, types.ModuleType):
raise IOError("Won't overwrite existing lextab module")
basetabmodule = lextab.split('.')[-1]
filename = os.path.join(outputdir, basetabmodule) + '.py'
with open(filename, 'w') as tf:
tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
tf.write('_tabversion = %s\n' % repr(__tabversion__))
tf.write('_lextokens = %s\n' % repr(self.lextokens))
tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
# Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
for statename, lre in self.lexstatere.items():
titem = []
for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
titem.append((retext, _funcs_to_names(func, renames)))
tabre[statename] = titem
tf.write('_lexstatere = %s\n' % repr(tabre))
tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
taberr = {}
for statename, ef in self.lexstateerrorf.items():
taberr[statename] = ef.__name__ if ef else None
tf.write('_lexstateerrorf = %s\n' % repr(taberr))
tabeof = {}
for statename, ef in self.lexstateeoff.items():
tabeof[statename] = ef.__name__ if ef else None
tf.write('_lexstateeoff = %s\n' % repr(tabeof))
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self, tabfile, fdict):
if isinstance(tabfile, types.ModuleType):
lextab = tabfile
else:
exec('import %s' % tabfile)
lextab = sys.modules[tabfile]
if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
raise ImportError('Inconsistent PLY version')
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lextokens_all = self.lextokens | set(self.lexliterals)
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = {}
self.lexstateretext = {}
for statename, lre in lextab._lexstatere.items():
titem = []
txtitem = []
for pat, func_name in lre:
titem.append((re.compile(pat, lextab._lexreflags | re.VERBOSE), _names_to_funcs(func_name, fdict)))
self.lexstatere[statename] = titem
self.lexstateretext[statename] = txtitem
self.lexstateerrorf = {}
for statename, ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[statename] = fdict[ef]
self.lexstateeoff = {}
for statename, ef in lextab._lexstateeoff.items():
self.lexstateeoff[statename] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self, s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c, StringTypes):
raise ValueError('Expected a string')
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self, state):
if state not in self.lexstatere:
raise ValueError('Undefined state')
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state, '')
self.lexerrorf = self.lexstateerrorf.get(state, None)
self.lexeoff = self.lexstateeoff.get(state, None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self, state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self, n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre, lexindexfunc in self.lexre:
m = lexre.match(lexdata, lexpos)
if not m:
continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func, tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if newtok.type not in self.lextokens_all:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.__code__.co_filename, func.__code__.co_firstlineno,
func.__name__, newtok.type), lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = 'error'
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
newtok = self.lexeoff(tok)
return newtok
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError('No input string given with input()')
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _get_regex(func)
#
# Returns the regular expression assigned to a function either as a doc string
# or as a .regex attribute attached by the @TOKEN decorator.
# -----------------------------------------------------------------------------
def _get_regex(func):
return getattr(func, 'regex', func.__doc__)
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist, namelist):
result = []
for f, name in zip(funclist, namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist, fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]], n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist, reflags, ldict, toknames):
if not relist:
return []
regex = '|'.join(relist)
try:
lexre = re.compile(regex, re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
lexindexnames = lexindexfunc[:]
for f, i in lexre.groupindex.items():
handle = ldict.get(f, None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle, toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find('ignore_') > 0:
lexindexfunc[i] = (None, None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre, lexindexfunc)], [regex], [lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0:
m = 1
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
return (llist+rlist), (lre+rre), (lnames+rnames)
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s, names):
nonstate = 1
parts = s.split('_')
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = '_'.join(parts[i:])
return (states, tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self, ldict, log=None, reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = {'INITIAL': 'inclusive'}
self.modules = set()
self.error = False
self.log = PlyLogger(sys.stderr) if log is None else log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get('tokens', None)
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'", n)
self.error = True
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get('literals', '')
if not self.literals:
self.literals = ''
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c, StringTypes) or len(c) > 1:
self.log.error('Invalid literal %s. Must be a single character', repr(c))
self.error = True
except TypeError:
self.log.error('Invalid literals specification. literals must be a sequence of characters')
self.error = True
def get_states(self):
self.states = self.ldict.get('states', None)
# Build statemap
if self.states:
if not isinstance(self.states, (tuple, list)):
self.log.error('states must be defined as a tuple or list')
self.error = True
else:
for s in self.states:
if not isinstance(s, tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
self.error = True
continue
name, statetype = s
if not isinstance(name, StringTypes):
self.log.error('State name %s must be a string', repr(name))
self.error = True
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
self.error = True
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined", name)
self.error = True
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_']
# Now build up a list of functions and a list of strings
self.toknames = {} # Mapping of symbols to token names
self.funcsym = {} # Symbols defined as functions
self.strsym = {} # Symbols defined as strings
self.ignore = {} # Ignore strings by state
self.errorf = {} # Error functions by state
self.eoff = {} # EOF functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error('No rules of the form t_rulename are defined')
self.error = True
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f, self.stateinfo)
self.toknames[f] = tokname
if hasattr(t, '__call__'):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore':
line = t.__code__.co_firstlineno
file = t.__code__.co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
self.error = True
else:
for s in states:
self.funcsym[s].append((f, t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = True
else:
for s in states:
self.strsym[s].append((f, t))
else:
self.log.error('%s not defined as a function or string', f)
self.error = True
# Sort the functions by line number
for f in self.funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
s.sort(key=lambda x: len(x[1]), reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
continue
if not _get_regex(f):
self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), re.VERBOSE | self.reflags)
if c.match(''):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
self.error = True
except re.error as e:
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
if '#' in _get_regex(f):
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
self.error = True
# Validate all rules defined by strings
for name, r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = True
continue
if tokname not in self.tokens and tokname.find('ignore_') < 0:
self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (name, r), re.VERBOSE | self.reflags)
if (c.match('')):
self.log.error("Regular expression for rule '%s' matches empty string", name)
self.error = True
except re.error as e:
self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
self.error = True
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'", state)
self.error = True
# Validate the error function
efunc = self.errorf.get(state, None)
if efunc:
f = efunc
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
for module in self.modules:
self.validate_module(module)
# -----------------------------------------------------------------------------
# validate_module()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the source code of the given module.
# -----------------------------------------------------------------------------
def validate_module(self, module):
lines, linen = inspect.getsourcelines(module)
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = {}
linen += 1
for line in lines:
m = fre.match(line)
if not m:
m = sre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
self.error = True
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
reflags=0, nowarn=False, outputdir=None, debuglog=None, errorlog=None):
if lextab is None:
lextab = 'lextab'
global lexer
ldict = None
stateinfo = {'INITIAL': 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token, input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object:
module = object
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
ldict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in ldict:
ldict['__file__'] = sys.modules[ldict['__module__']].__file__
else:
ldict = get_caller_module_dict(2)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = ldict.get('__package__')
if pkg and isinstance(lextab, str):
if '.' not in lextab:
lextab = pkg + '.' + lextab
# Collect parser information from the dictionary
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab, ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info('lex: tokens = %r', linfo.tokens)
debuglog.info('lex: literals = %r', linfo.literals)
debuglog.info('lex: states = %r', linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = set()
for n in linfo.tokens:
lexobj.lextokens.add(n)
# Get literals specification
if isinstance(linfo.literals, (list, tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = {}
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
# Now add all of the simple rules
for name, r in linfo.strsym[state]:
regex_list.append('(?P<%s>%s)' % (name, r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i, text in enumerate(re_text):
debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state, stype in stateinfo.items():
if state != 'INITIAL' and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere['INITIAL']
lexobj.lexretext = lexobj.lexstateretext['INITIAL']
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
if not lexobj.lexerrorf:
errorlog.warning('No t_error rule is defined')
# Set up eof functions
lexobj.lexstateeoff = linfo.eoff
lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
# Check state information for ignore and error rules
for s, stype in stateinfo.items():
if stype == 'exclusive':
if s not in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if s not in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if s not in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
if s not in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If lextab specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(lextab, types.ModuleType):
srcfile = lextab.__file__
else:
if '.' not in lextab:
srcfile = ldict['__file__']
else:
parts = lextab.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
try:
lexobj.writetab(lextab, outputdir)
except IOError as e:
errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None, data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write('Reading from standard input (type EOF to end):\n')
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while True:
tok = _token()
if not tok:
break
sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_regex(f):
if hasattr(r, '__call__'):
f.regex = _get_regex(r)
else:
f.regex = r
return f
return set_regex
# Alternative spelling of the TOKEN decorator
Token = TOKEN
|
|
import pytest
from configyaml.config.nodes import StringNode, BoolNode
from .test_loader import DummyLoader, DummyComplexLoader
def test_variable_match():
n = StringNode(value='$var', variables={'var': 'testing'})
assert n.is_valid()
assert n._value == 'testing'
assert n._as_dict() == {'value': 'testing'}
assert n._as_dict(redact=True) == {'value': '[REDACTED]', 'redacted': True}
def test_variable_no_match():
n = StringNode(value='$$var', variables={'var': 'testing'})
assert not n.is_valid()
assert n._value == '$$var'
assert n._errors[0].title == 'Variable not found'
assert n._as_dict() == {
'errors': [
{
'description': "'$var' was not found in ['var']",
'end_column': None,
'end_line': None,
'start_column': None,
'start_line': None,
'title': 'Variable not found',
}
],
'value': '$$var',
}
assert n._as_dict(redact=True) == {
'errors': [
{
'description': "'$var' was not found in ['var']",
'end_column': None,
'end_line': None,
'start_column': None,
'start_line': None,
'title': 'Variable not found',
}
],
'value': '$$var',
}
def test_variable_escaped():
n = StringNode(value='\$var', variables={'var': 'testing'})
assert n.is_valid()
assert n._value == '$var'
assert n._as_dict() == {'value': '$var'}
assert n._as_dict(redact=True) == {'value': '$var'}
def test_variable_case_sensitive():
n = StringNode(value='$VAR', variables={'var': 'testing'})
assert not n.is_valid()
assert n._value == '$VAR'
assert n._errors[0].title == 'Variable not found'
assert n._as_dict() == {
'errors': [
{
'description': "'VAR' was not found in ['var']",
'end_column': None,
'end_line': None,
'start_column': None,
'start_line': None,
'title': 'Variable not found',
}
],
'value': '$VAR',
}
assert n._as_dict(redact=True) == {
'errors': [
{
'description': "'VAR' was not found in ['var']",
'end_column': None,
'end_line': None,
'start_column': None,
'start_line': None,
'title': 'Variable not found',
}
],
'value': '$VAR',
}
n = StringNode(value='$vaR', variables={'vaR': 'testing'})
assert n.is_valid()
assert n._value == 'testing'
assert n._as_dict() == {'value': 'testing'}
assert n._as_dict(redact=True) == {'value': '[REDACTED]', 'redacted': True}
def test_no_variables():
n = StringNode(value='$var')
assert not n.is_valid()
assert n._value == '$var'
assert n._as_dict() == {
'errors': [
{
'description': "'var' was not found in []",
'end_column': None,
'end_line': None,
'start_column': None,
'start_line': None,
'title': 'Variable not found',
}
],
'value': '$var',
}
assert n._as_dict(redact=True) == {
'errors': [
{
'description': "'var' was not found in []",
'end_column': None,
'end_line': None,
'start_column': None,
'start_line': None,
'title': 'Variable not found',
}
],
'value': '$var',
}
def test_variables_type_error():
with pytest.raises(TypeError):
StringNode(value='$var', variables='testing')
def test_bool_node_variable():
n = BoolNode(value='$test', variables={'test': True})
assert n.is_valid()
assert n._value == True
assert n._as_dict() == {'value': True}
assert n._as_dict(redact=True) == {'value': '[REDACTED]', 'redacted': True}
def test_bool_node_variable_bad_type():
n = BoolNode(value='$test', variables={'test': 'ok'})
assert not n.is_valid()
assert n._value == 'ok'
assert n._as_dict() == {
'errors': [
{
'description': "boolnode must be a bool",
'end_column': None,
'end_line': None,
'start_column': None,
'start_line': None,
'title': 'boolnode has an invalid type',
}
],
'value': 'ok',
}
assert n._as_dict(redact=True) == {
'errors': [
{
'description': "boolnode must be a bool",
'end_column': None,
'end_line': None,
'start_column': None,
'start_line': None,
'title': 'boolnode has an invalid type',
}
],
'value': '[REDACTED]',
'redacted': True,
}
def test_loader_with_variables():
value = "{ 'foo': '$bar'}"
loader = DummyLoader(value, variables={'bar': 'testing'})
assert loader.is_valid()
assert loader.as_text() == "{ 'foo': '$bar'}"
assert loader.as_dict() == {
'config': {'foo': {'value': 'testing'}}, 'config_text': "{ 'foo': '$bar'}"
}
assert loader.as_dict(redact=True) == {
'config': {'foo': {'redacted': True, 'value': '[REDACTED]'}},
'config_text': "{ 'foo': '$bar'}",
}
def test_loader_with_missing_variable():
value = "{ 'foo': '$bar'}"
loader = DummyLoader(value, variables={'boo': 'testing'})
assert not loader.is_valid()
assert loader.errors[0].title == 'Variable not found'
assert loader.as_text() == """{ 'foo': '$bar'}
# { 'foo': '$bar'}
# ^^^^^^
# --------
# Variable not found
# - 'bar' was not found in ['boo']
# --------"""
assert loader.as_dict() == {
'config': {
'foo': {
'errors': [
{
'description': "'bar' was not found in ['boo']",
'end_column': 15,
'end_line': 0,
'start_column': 9,
'start_line': 0,
'title': 'Variable not found',
}
],
'value': '$bar',
}
},
'config_text': "{ 'foo': '$bar'}",
'errors': [
{
'description': "'bar' was not found in ['boo']",
'end_column': 15,
'end_line': 0,
'start_column': 9,
'start_line': 0,
'title': 'Variable not found',
}
],
}
assert loader.as_dict(redact=True) == {
'config': {
'foo': {
'errors': [
{
'description': "'bar' was not found in ['boo']",
'end_column': 15,
'end_line': 0,
'start_column': 9,
'start_line': 0,
'title': 'Variable not found',
}
],
'value': '$bar',
}
},
'config_text': "{ 'foo': '$bar'}",
'errors': [
{
'description': "'bar' was not found in ['boo']",
'end_column': 15,
'end_line': 0,
'start_column': 9,
'start_line': 0,
'title': 'Variable not found',
}
],
}
def test_loader_with_nested_variables():
value = 'foo: $list_var\nbar: yay'
variables = {'list_var': ['one', 'two', '$three_var'], 'three_var': 'THREE!'}
loader = DummyComplexLoader(value, variables=variables)
assert loader.is_valid()
assert loader.as_text() == 'foo: $list_var\nbar: yay'
assert loader.as_dict() == {
'config': {
'bar': {'value': 'yay'},
'foo': {'items': [{'value': 'one'}, {'value': 'two'}, {'value': 'THREE!'}]},
},
'config_text': 'foo: $list_var\nbar: yay',
}
assert loader.as_dict(redact=True) == {
'config': {
'bar': {'value': 'yay'}, 'foo': {'redacted': True, 'value': '[REDACTED]'}
},
'config_text': 'foo: $list_var\nbar: yay',
}
def test_loader_with_nested_list_variable_missing():
value = 'foo: $list_var\nbar: yay'
variables = {
'list_var': ['one', 'two', '$three_var'], 'threeve': 'THREE!' # one key
}
loader = DummyComplexLoader(value, variables=variables)
assert not loader.is_valid()
text = loader.as_text()
assert text == """foo: $list_var
# foo: $list_var
# ^^^^^^^^^
# --------
# Variable not found
# - 'three_var' was not found in ['list_var', 'threeve']
# --------
bar: yay"""
# the variable content should not be visible, only the variable keys
assert '$three_var' not in text
assert loader.as_dict() == {
'config': {
'bar': {'value': 'yay'},
'foo': {
'items': [
{'value': 'one'},
{'value': 'two'},
{
'errors': [
{
'description': "'three_var' was not "
'found in '
"['list_var', "
"'threeve']",
'end_column': 14,
'end_line': 0,
'start_column': 5,
'start_line': 0,
'title': 'Variable not found',
}
],
'value': '$three_var',
},
]
},
},
'config_text': 'foo: $list_var\nbar: yay',
'errors': [
{
'description': "'three_var' was not found in ['list_var', "
"'threeve']",
'end_column': 14,
'end_line': 0,
'start_column': 5,
'start_line': 0,
'title': 'Variable not found',
}
],
}
assert loader.as_dict(redact=True) == {
'config': {
'bar': {'value': 'yay'}, 'foo': {'redacted': True, 'value': '[REDACTED]'}
},
'config_text': 'foo: $list_var\nbar: yay',
'errors': [
{
'description': "'three_var' was not found in ['list_var', "
"'threeve']",
'end_column': 14,
'end_line': 0,
'start_column': 5,
'start_line': 0,
'title': 'Variable not found',
}
],
}
|
|
"""
Tool for handling UnTRIM data specific to the SUNTANS model
M.Rayson
Stanford University
March 2014
"""
from sunpy import Grid, Spatial
import numpy as np
import matplotlib.pyplot as plt
import pdb
# Dictionary containing the suntans-untrim equivalent grid variables
untrim_gridvars = {\
'xp':'Mesh2_node_x',\
'yp':'Mesh2_node_y',\
'xv':'Mesh2_face_x',\
'yv':'Mesh2_face_y',\
'xe':'Mesh2_edge_x',\
'ye':'Mesh2_edge_y',\
'mark':'Mesh2_edge_bc',\
'edges':'Mesh2_edge_nodes',\
'grad':'Mesh2_edge_faces',\
'cells':'Mesh2_face_nodes',\
'face':'Mesh2_face_edges',\
'dv':'Mesh2_face_depth',\
'z_r':'Mesh2_layer_3d',\
'time':'Mesh2_data_time'\
}
# Dictionary containing the suntans-untrim equivalent grid dimensions
untrim_griddims = {\
'Np':'nMesh2_node',\
'Ne':'nMesh2_edge',\
'Nc':'nMesh2_face',\
'Nkmax':'nMesh2_layer_3d',\
'Nk':'nMesh2_layer_3d',\
'numsides':'nMaxMesh2_face_nodes',\
'Two':'Two'\
}
######
# The untrim netcdf data format
######
untrim_ugrid={}
fillval=999999.0
vname = 'Mesh2'
dimensions = ()
attributes = {
'dimension': 2,\
'cf_role': "mesh_topology" ,\
'long_name': "Topology data of 2D untrim mesh" ,\
'node_coordinatesi': "Mesh2_node_x Mesh2_node_y",\
'edge_coordinates': "Mesh2_edge_x Mesh2_edge_y",\
'face_coordinates': "Mesh2_face_x Mesh2_face_y",\
'edge_node_connectivity': "Mesh2_edge_nodes",\
'edge_face_connectivity': "Mesh2_edge_faces",\
'face_node_connectivity': "Mesh2_face_nodes",\
'face_edge_connectivity': "Mesh2_face_edges" \
}
dtype = 'i4'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_node_x'
dimensions = ('nMesh2_node',)
attributes = {
'long_name': "x-Coordinate of untrim grid node" ,\
'units':'m'
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_node_y'
dimensions = ('nMesh2_node',)
attributes = {
'long_name': "y-Coordinate of untrim grid node" ,\
'units':'m'
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_face_x'
dimensions = ('nMesh2_face',)
attributes = {
'long_name': "x-Coordinate of untrim grid face" ,\
'units':'m'
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_face_y'
dimensions = ('nMesh2_face',)
attributes = {
'long_name': "y-Coordinate of untrim grid face" ,\
'units':'m'
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_edge_x'
dimensions = ('nMesh2_edge',)
attributes = {
'long_name': "x-Coordinate of untrim polygon edge" ,\
'units':'m'
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_edge_y'
dimensions = ('nMesh2_edge',)
attributes = {
'long_name': "y-Coordinate of untrim polygon edge" ,\
'units':'m'
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_edge_bc'
dimensions = ('nMesh2_edge',)
attributes = {
'long_name': "untrim polygon edge boundary condition" ,\
'flags':'none closed dirichlet'
}
dtype = 'i4'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_face_bc'
dimensions = ('nMesh2_face',)
attributes = {
'long_name': "untrim polygon boundary condition" ,\
'flags':'none water_level'
}
dtype = 'i4'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_edge_faces'
dimensions = ('nMesh2_edge','Two',)
attributes = {
'long_name': "Maps every edge to its bounding faces" ,\
}
dtype = 'i4'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':True,'complevel':1,'fill_value':fillval}})
vname = 'Mesh2_edge_nodes'
dimensions = ('nMesh2_edge','Two',)
attributes = {
'long_name': "Maps every edge to its end nodes " ,\
}
dtype = 'i4'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':True,'complevel':1,'fill_value':fillval}})
vname = 'Mesh2_face_nodes'
dimensions = ('nMesh2_face','nMaxMesh2_face_nodes',)
attributes = {
'long_name': "Maps every polygon face its corner nodes" ,\
}
dtype = 'i4'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':True,'complevel':1,'fill_value':fillval}})
vname = 'Mesh2_face_edges'
dimensions = ('nMesh2_face','nMaxMesh2_face_nodes',)
attributes = {
'long_name': "Maps every polygon face its edges" ,\
}
dtype = 'i4'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':True,'complevel':1,'fill_value':fillval}})
vname = 'Mesh2_edge_depth'
dimensions = ('nMesh2_edge',)
attributes = {
'long_name': "Maximum depth of edge" ,\
'units':'m',\
'positive':'down',\
'coordinates': "Mesh2_edge_x Mesh2_edge_y" \
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_face_depth'
dimensions = ('nMesh2_face',)
attributes = {
'long_name': "Maximum depth of face" ,\
'units':'m',\
'positive':'down',\
'coordinates': "Mesh2_face_x Mesh2_face_y" \
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_layer_3d'
dimensions = ('nMesh2_layer_3d',)
attributes = {
'long_name': "elevation of layer" ,\
'units':'m',\
'positive':'up',\
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_data_time'
dimensions = ('nMesh2_data_time',)
attributes = {
'units': "days since 1899-12-31 00:00:00.0" \
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_data_time_string'
dimensions = ('date_string_length','nMesh2_data_time')
attributes = {
'long_name':'data time string',\
'units': "yyyy-mm-dd HH:MM:SS" \
}
dtype = 'c'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'h_flow_avg'
dimensions = ('nMesh2_edge','nMesh2_layer_3d','nMesh2_data_time')
attributes = {
'long_name': "Horizontal volume flux averaged over integration interval" ,\
'units':'m3 s-1',\
'coordinates': "Mesh2_edge_x Mesh2_edge_y Mesh2_edge_z_3d" \
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':True,'complevel':1,'fill_value':fillval}})
vname = 'v_flow_avg'
dimensions = ('nMesh2_face','nMesh2_layer_3d','nMesh2_data_time')
attributes = {
'long_name': "Vertical volume flux averaged over integration interval" ,\
'units':'m3 s-1',\
'coordinates': "Mesh2_face_x Mesh2_face_y Mesh2_edge_z_3d" \
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':True,'complevel':1,'fill_value':fillval}})
vname = 'Mesh2_edge_wet_area'
dimensions = ('nMesh2_edge','nMesh2_layer_3d','nMesh2_data_time')
attributes = {
'long_name': "sea area" ,\
'units':'m2',\
'coordinates': "Mesh2_edge_x Mesh2_edge_y Mesh2_edge_z_3d" \
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':True,'complevel':1,'fill_value':fillval}})
vname = 'Mesh2_face_wet_area'
dimensions = ('nMesh2_face','nMesh2_layer_3d','nMesh2_data_time')
attributes = {
'long_name': "sea area" ,\
'units':'m2',\
'coordinates': "Mesh2_face_x Mesh2_face_y Mesh2_edge_z_3d" \
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':True,'complevel':1,'fill_value':fillval}})
vname = 'Mesh2_edge_bottom_layer'
dimensions = ('nMesh2_edge','nMesh2_data_time')
attributes = {
'long_name': "bottom most active layer (from either side of edge)" ,\
'units':'',\
'coordinates': "Mesh2_edge_x Mesh2_edge_y" \
}
dtype = 'i4'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_edge_top_layer'
dimensions = ('nMesh2_edge','nMesh2_data_time')
attributes = {
'long_name': "top most active layer (from either side of edge)" ,\
'units':'',\
'coordinates': "Mesh2_edge_x Mesh2_edge_y" \
}
dtype = 'i4'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_face_bottom_layer'
dimensions = ('nMesh2_face','nMesh2_data_time')
attributes = {
'long_name': "bottom most active layer (from either side of edge)" ,\
'units':'',\
'coordinates': "Mesh2_face_x Mesh2_face_y" \
}
dtype = 'i4'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_face_top_layer'
dimensions = ('nMesh2_face','nMesh2_data_time')
attributes = {
'long_name': "top most active layer (from either side of edge)" ,\
'units':'',\
'coordinates': "Mesh2_face_x Mesh2_face_y" \
}
dtype = 'i4'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':False}})
vname = 'Mesh2_face_water_volume'
dimensions = ('nMesh2_face','nMesh2_layer_3d','nMesh2_data_time')
attributes = {
'long_name': "Water prism volume" ,\
'units':'m3',\
'coordinates': "Mesh2_face_x Mesh2_face_y Mesh2_edge_z_3d" \
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':True,'complevel':1,'fill_value':fillval}})
vname = 'Mesh2_salinity_3d'
dimensions = ('nMesh2_face','nMesh2_layer_3d','nMesh2_data_time')
attributes = {
'long_name': "salinity" ,\
'units':'psu',\
'coordinates': "Mesh2_face_x Mesh2_face_y Mesh2_edge_z_3d" \
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':True,'complevel':1,'fill_value':fillval}})
vname = 'Mesh2_vertical_diffusivity_3d'
dimensions = ('nMesh2_face','nMesh2_layer_3d','nMesh2_data_time')
attributes = {
'long_name': "vertical diffusivity" ,\
'units':'m2 s-1',\
'coordinates': "Mesh2_face_x Mesh2_face_y Mesh2_edge_z_3d" \
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':True,'complevel':1,'fill_value':fillval}})
vname = 'Mesh2_sea_surface_elevation'
dimensions = ('nMesh2_face','nMesh2_data_time')
attributes = {
'long_name': "sea surface elevation" ,\
'units':'m',\
'coordinates': "Mesh2_face_x Mesh2_face_y" \
}
dtype = 'f8'
untrim_ugrid.update({vname:{'dimensions':dimensions,'attributes':attributes,'dtype':dtype,'zlib':True,'complevel':1,'fill_value':fillval}})
class UNTRIMGrid(Grid):
"""
UnTRIM grid class for loading from a netcdf file
"""
def __init__(self,ncfile):
self.ncfile=ncfile
Grid.__init__(self,ncfile,gridvars=untrim_gridvars,griddims=untrim_griddims)
class UNTRIMSpatial(Spatial):
"""
Class for handling UnTRIM netcdf hydrodynamic output
"""
_FillValue = -9999
def __init__(self,ncfile,**kwargs):
Spatial.__init__(self,ncfile,gridvars=untrim_gridvars,griddims=untrim_griddims,**kwargs)
# Make sure the number of faces array is correct
self.nfaces = np.sum(self.cells.mask==False,axis=1)
self.xy = self.cellxy()
def loadDataRaw(self,variable=None):
"""
Overloaded method - Untrim variables are ordered in a different order
Untrim: [Nc,Nk,Nt] or [Nc,Nt]
SUNTANS: [Nt,Nk,Nc] or [Nt,Nc]
Load the specified suntans variable data as a vector
"""
if variable==None:
variable=self.variable
if self.hasDim(variable,self.griddims['Ne']) and self.j==None:
j=range(self.Ne)
elif self.hasDim(variable,self.griddims['Nc']) and self.j==None:
j=range(self.Nc)
else:
j = self.j
nc = self.nc
try:
self.long_name = nc.variables[variable].long_name
except:
self.long_name = ''
self.units= nc.variables[variable].units
# ndims = len(nc.variables[variable].dimensions)
ndim = nc.variables[variable].ndim
if ndim==1:
self.data=nc.variables[variable][j]
elif ndim==2:
#print self.j
data=nc.variables[variable][j,self.tstep]
self.data = data.swapaxes(0,1)
else:
if self.klayer[0]==-1: # grab the seabed values
raise Exception, 'Seabed extraction not implemented for UnTRIM'
#klayer = np.arange(0,self.Nkmax)
##if type(self.tstep)==int:
#if isinstance(self.tstep,(int,long)):
# data=nc.variables[variable][self.tstep,klayer,j]
# self.data = data[self.Nk[j],j]
#else: # need to extract timestep by timestep for animations to save memory
# self.data=np.zeros((len(self.tstep),len(j)))
# i=-1
# for t in self.tstep:
# i+=1
# data=nc.variables[variable][t,klayer,j]
# self.data[i,:] = data[self.Nk[j],j]
elif self.klayer[0]==-99: # Grab all layers
klayer = np.arange(0,self.Nkmax)
#self.data=nc.variables[variable][self.tstep,klayer,j]
data=nc.variables[variable][j,klayer,self.tstep]
self.data = data.swapaxes(0,2)
else:
klayer = self.klayer
data=nc.variables[variable][j,klayer,self.tstep]
if data.ndim==3:
self.data = data.swapaxes(0,2)
else:
self.data=data
# Mask the data
self.mask = self.data==self._FillValue
self.data[self.mask]=0.
self.data = self.data.squeeze()
return self.data
|
|
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epoll', 'poll', 'poll-cv']
}
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[], iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None, environ={}, cpu_cost=1.0, flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
flake_retries=5 if flaky or args.allow_flakes else 0,
timeout_retries=3 if args.allow_flakes else 0)
def get_c_tests(travis, test_lang) :
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [tgt
for tgt in js
if tgt['language'] == test_lang and
platform_string() in tgt[platforms_str] and
not (travis and tgt['flaky'])]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception('Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple(
'_ConfigVars', ['shell', 'builder', 'builder_prefix_arguments',
'venv_relative_python', 'toolchain', 'runner'])
def _python_config_generator(name, major, minor, bits, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
self._make_options = [_windows_toolset_option(self.args.compiler),
_windows_arch_option(self.args.arch)]
else:
self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV '
try:
cflags += subprocess.check_output(['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(['pkg-config', '--libs', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
ldflags = '-luv '
self._make_options += ['EXTRA_CPPFLAGS={}'.format(cflags),
'EXTRA_LDLIBS={}'.format(ldflags)]
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
if target.get('uses_polling', True)
else ['all'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/lib/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY': polling_strategy,
'GRPC_VERBOSITY': 'DEBUG'}
shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
timeout_scaling = 1
if polling_strategy == 'poll-cv':
timeout_scaling *= 5
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
continue
if self.platform == 'windows':
binary = 'vsprojects/%s%s/%s.exe' % (
'x64/' if self.args.arch == 'x64' else '',
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
if 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a
# complete list of the tests contained in a binary
# for each test, we then add a job to run, filtering for just that
# test
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output([binary, '--gtest_list_tests'],
stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('#')
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary] + ['--gtest_filter=%s' % test]
out.append(self.config.job_spec(cmdline,
shortname='%s --gtest_filter=%s %s' % (binary, test, shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
out.append(self.config.job_spec(cmdline,
shortname=' '.join(
pipes.quote(arg)
for arg in cmdline) +
shortname_ext,
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target]
def make_options(self):
return self._make_options;
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_c.bat']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
return ['CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix,
'LDXX=clang++%s' % version_suffix]
def _gcc_make_options(self, version_suffix):
return ['CC=gcc%s' % version_suffix,
'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix,
'LDXX=g++%s' % version_suffix]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.4':
return ('wheezy', self._gcc_make_options(version_suffix='-4.4'))
elif compiler == 'gcc4.6':
return ('wheezy', self._gcc_make_options(version_suffix='-4.6'))
elif compiler == 'gcc4.8':
return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'clang3.4':
# on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.7'))
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
class NodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# Note: electron ABI only depends on major and minor version, so that's all
# we should specify in the compiler argument
_check_compiler(self.args.compiler, ['default', 'node0.12',
'node4', 'node5', 'node6',
'node7', 'electron1.3'])
if self.args.compiler == 'default':
self.runtime = 'node'
self.node_version = '4'
else:
if self.args.compiler.startswith('electron'):
self.runtime = 'electron'
self.node_version = self.args.compiler[8:]
else:
self.runtime = 'node'
# Take off the word "node"
self.node_version = self.args.compiler[4:]
def test_specs(self):
if self.platform == 'windows':
return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])]
else:
run_script = 'run_node'
if self.runtime == 'electron':
run_script += '_electron'
return [self.config.job_spec(['tools/run_tests/helper_scripts/{}.sh'.format(run_script),
self.node_version],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
build_script = 'pre_build_node'
if self.runtime == 'electron':
build_script += '_electron'
return [['tools/run_tests/helper_scripts/{}.sh'.format(build_script), self.node_version]]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_node.bat']]
else:
build_script = 'build_node'
if self.runtime == 'electron':
build_script += '_electron'
# building for electron requires a patch version
self.node_version += '.0'
return [['tools/run_tests/helper_scripts/{}.sh'.format(build_script), self.node_version]]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(collections.namedtuple('PythonConfig', [
'name', 'build', 'run'])):
"""Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open('src/python/grpcio_tests/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
return [self.config.job_spec(
config.run,
timeout_seconds=5*60,
environ=dict(list(environment.items()) +
[('GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
shortname='%s.test.%s' % (config.name, suite_name),)
for suite_name in tests_json
for config in self.pythons]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (self.python_manager_name(), _docker_arch_suffix(self.args.arch))
def python_manager_name(self):
return 'pyenv' if self.args.compiler in ['python3.5', 'python3.6'] else 'jessie'
def _get_pythons(self, args):
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python_msys2.sh')]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python.sh')]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
runner = [os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')]
config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments,
venv_relative_python, toolchain, runner)
python27_config = _python_config_generator(name='py27', major='2',
minor='7', bits=bits,
config_vars=config_vars)
python34_config = _python_config_generator(name='py34', major='3',
minor='4', bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(name='py35', major='3',
minor='5', bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(name='py36', major='3',
minor='6', bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(name='pypy', major='2',
config_vars=config_vars)
pypy32_config = _pypy_config_generator(name='pypy3', major='3',
config_vars=config_vars)
if args.compiler == 'default':
if os.name == 'nt':
return (python27_config,)
else:
return (python27_config, python34_config,)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
# Explicitly choosing between x86 and x64 arch doesn't work yet
_check_arch(self.args.arch, ['default'])
# CoreCLR use 64bit runtime by default.
arch_option = 'x64' if self.args.compiler == 'coreclr' else self.args.arch
self._make_options = [_windows_toolset_option(self.args.compiler),
_windows_arch_option(arch_option)]
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
if self.platform == 'linux' and self.args.compiler == 'coreclr':
self._docker_distro = 'coreclr'
else:
self._docker_distro = 'jessie'
if self.platform == 'mac':
# TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
self._make_options = ['EMBED_OPENSSL=true']
if self.args.compiler != 'coreclr':
# On Mac, official distribution of mono is 32bit.
self._make_options += ['CFLAGS=-m32', 'LDFLAGS=-m32']
else:
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp1.0'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
nunit_args += ['--noresult', '--workers=1']
if self.platform == 'windows':
runtime_cmd = []
else:
runtime_cmd = ['mono']
specs = []
for assembly in tests_by_assembly.iterkeys():
assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
assembly_subdir,
assembly,
assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args
specs.append(self.config.job_spec(cmdline,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = ['src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file,
'-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*',
'-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(self.config.job_spec(cmdline,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return self._make_options;
def build_steps(self):
if self.args.compiler == 'coreclr':
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp_coreclr.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp_coreclr.sh']]
else:
if self.platform == 'windows':
return [[_windows_build_bat(self.args.compiler),
'src/csharp/Grpc.sln',
'/p:Configuration=%s' % _MSBUILD_CONFIG[self.config.build_config]]]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [
self.config.job_spec(['src/objective-c/tests/run_tests.sh'],
timeout_seconds=60*60,
shortname='objc-tests',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(['src/objective-c/tests/build_example_test.sh'],
timeout_seconds=30*60,
shortname='objc-examples-build',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['interop_server']
def make_options(self):
return []
def build_steps(self):
return [['src/objective-c/tests/build_tests.sh']]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
environ={'TEST': 'true'}
if _is_use_docker_child():
environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
return [self.config.job_spec(cmd['script'].split(),
timeout_seconds=30*60,
environ=environ,
cpu_cost=cmd.get('cpu_cost', 1))
for cmd in yaml.load(f)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
class NodeExpressLanguage(object):
"""Dummy Node express test target to enable running express performance
benchmarks"""
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default', 'node0.12',
'node4', 'node5', 'node6'])
if self.args.compiler == 'default':
self.node_version = '4'
else:
# Take off the word "node"
self.node_version = self.args.compiler[4:]
def test_specs(self):
return []
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
return [['tools/run_tests/helper_scripts/pre_build_node.sh', self.node_version]]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'node_express'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'node': NodeLanguage(),
'node_express': NodeExpressLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc' : ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print('Architecture %s does not match current runtime architecture.' % arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' % args.arch)
sys.exit(1)
def _windows_build_bat(compiler):
"""Returns name of build.bat for selected compiler."""
# For CoreCLR, fall back to the default compiler for C core
if compiler == 'default' or compiler == 'vs2013' or compiler == 'coreclr':
return 'vsprojects\\build_vs2013.bat'
elif compiler == 'vs2015':
return 'vsprojects\\build_vs2015.bat'
elif compiler == 'vs2010':
return 'vsprojects\\build_vs2010.bat'
else:
print('Compiler %s not supported.' % compiler)
sys.exit(1)
def _windows_toolset_option(compiler):
"""Returns msbuild PlatformToolset for selected compiler."""
# For CoreCLR, fall back to the default compiler for C core
if compiler == 'default' or compiler == 'vs2013' or compiler == 'coreclr':
return '/p:PlatformToolset=v120'
elif compiler == 'vs2015':
return '/p:PlatformToolset=v140'
elif compiler == 'vs2010':
return '/p:PlatformToolset=v100'
else:
print('Compiler %s not supported.' % compiler)
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxilary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument('-c', '--config',
choices=sorted(_CONFIGS.keys()),
default='opt')
argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument('-f', '--forever',
default=False,
action='store_const',
const=True)
argp.add_argument('-t', '--travis',
default=False,
action='store_const',
const=True)
argp.add_argument('--newline_on_success',
default=False,
action='store_const',
const=True)
argp.add_argument('-l', '--language',
choices=['all'] + sorted(_LANGUAGES.keys()),
nargs='+',
default=['all'])
argp.add_argument('-S', '--stop_on_failure',
default=False,
action='store_const',
const=True)
argp.add_argument('--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument('--allow_flakes',
default=False,
action='store_const',
const=True,
help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
argp.add_argument('--arch',
choices=['default', 'x86', 'x64'],
default='default',
help='Selects architecture to target. For some platforms "default" is the only supported choice.')
argp.add_argument('--compiler',
choices=['default',
'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3',
'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
'vs2010', 'vs2013', 'vs2015',
'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3',
'node0.12', 'node4', 'node5', 'node6', 'node7',
'electron1.3',
'coreclr'],
default='default',
help='Selects compiler to use. Allowed values depend on the platform and language.')
argp.add_argument('--iomgr_platform',
choices=['native', 'uv'],
default='native',
help='Selects iomgr platform to build on')
argp.add_argument('--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but dont run any tests.')
argp.add_argument('--measure_cpu_costs', default=False, action='store_const', const=True,
help='Measure the cpu costs of tests')
argp.add_argument('--update_submodules', default=[], nargs='*',
help='Update some submodules before building. If any are updated, also run generate_projects. ' +
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.')
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument('--report_suite_name', default='tests', type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument('--quiet_success',
default=False,
action='store_const',
const=True,
help='Dont print anything when a test passes. Passing tests also will not be reported in XML report. ' +
'Useful when running many iterations of each test (argument -n).')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Dont try to iterate over many polling strategies when they exist')
args = argp.parse_args()
if args.force_default_poller:
_POLLING_STRATEGIES = {}
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
subprocess.check_call('git %s' % cmd, cwd=cwd, shell=True)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
subprocess.check_call('tools/buildgen/generate_projects.sh', shell=True)
else:
print('WARNING: may need to regenerate projects, but since we are not on')
print(' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
if 'all' in args.language:
lang_list = _LANGUAGES.keys()
else:
lang_list = args.language
# We don't support code coverage on some languages
if 'gcov' in args.config:
for bad in ['objc', 'sanity']:
if bad in lang_list:
lang_list.remove(bad)
languages = set(_LANGUAGES[l] for l in lang_list)
for l in languages:
l.configure(run_config, args)
language_make_options=[]
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print('languages with custom make options cannot be built simultaneously with other languages')
sys.exit(1)
else:
language_make_options = next(iter(languages)).make_options()
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print ('Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print ('Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call(['tools/run_tests/dockerize/build_docker_and_run_tests.sh'],
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
extra_args = []
# better do parallel compilation
# empirically /m:2 gives the best performance/price and should prevent
# overloading the windows workers.
extra_args.extend(['/m:2'])
# disable PDB generation: it's broken, and we don't need it during CI
extra_args.extend(['/p:Jenkins=true'])
return [
jobset.JobSpec([_windows_build_bat(args.compiler),
'vsprojects\\%s.sln' % target,
'/p:Configuration=%s' % _MSBUILD_CONFIG[cfg]] +
extra_args +
language_make_options,
shell=True, timeout_seconds=None)
for target in targets]
else:
if targets:
return [jobset.JobSpec([os.getenv('MAKE', 'make'),
'-f', makefile,
'-j', '%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' % args.slowdown,
'CONFIG=%s' % cfg] +
language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) +
targets,
timeout_seconds=None)]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), flake_retries=5)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
post_tests_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(urllib.request.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen(
'http://localhost:%d/quitquitquit' % legacy_server_port).read()
def _start_port_server(port_server_port):
# check if a compatible port server is running
# if incompatible (version mismatch) ==> start a new one
# if not running ==> start a new one
# otherwise, leave it up
try:
version = int(urllib.request.urlopen(
'http://localhost:%d/version_number' % port_server_port,
timeout=10).read())
print('detected port server running version %d' % version)
running = True
except Exception as e:
print('failed to detect port server: %s' % sys.exc_info()[0])
print(e.strerror)
running = False
if running:
current_version = int(subprocess.check_output(
[sys.executable, os.path.abspath('tools/run_tests/python_utils/port_server.py'),
'dump_version']))
print('my port server is version %d' % current_version)
running = (version >= current_version)
if not running:
print('port_server version mismatch: killing the old one')
urllib.request.urlopen('http://localhost:%d/quitquitquit' % port_server_port).read()
time.sleep(1)
if not running:
fd, logfile = tempfile.mkstemp()
os.close(fd)
print('starting port_server, with log file %s' % logfile)
args = [sys.executable, os.path.abspath('tools/run_tests/python_utils/port_server.py'),
'-p', '%d' % port_server_port, '-l', logfile]
env = dict(os.environ)
env['BUILD_ID'] = 'pleaseDontKillMeJenkins'
if platform_string() == 'windows':
# Working directory of port server needs to be outside of Jenkins
# workspace to prevent file lock issues.
tempdir = tempfile.mkdtemp()
port_server = subprocess.Popen(
args,
env=env,
cwd=tempdir,
creationflags = 0x00000008, # detached process
close_fds=True)
else:
port_server = subprocess.Popen(
args,
env=env,
preexec_fn=os.setsid,
close_fds=True)
time.sleep(1)
# ensure port server is up
waits = 0
while True:
if waits > 10:
print('killing port server due to excessive start up waits')
port_server.kill()
if port_server.poll() is not None:
print('port_server failed to start')
# try one final time: maybe another build managed to start one
time.sleep(1)
try:
urllib.request.urlopen('http://localhost:%d/get' % port_server_port,
timeout=1).read()
print('last ditch attempt to contact port server succeeded')
break
except:
traceback.print_exc()
port_log = open(logfile, 'r').read()
print(port_log)
sys.exit(1)
try:
urllib.request.urlopen('http://localhost:%d/get' % port_server_port,
timeout=1).read()
print('port server is up and ready')
break
except socket.timeout:
print('waiting for port_server: timeout')
traceback.print_exc();
time.sleep(1)
waits += 1
except urllib.error.URLError:
print('waiting for port_server: urlerror')
traceback.print_exc();
time.sleep(1)
waits += 1
except:
traceback.print_exc()
port_server.kill()
raise
def _calculate_num_runs_failures(list_of_results):
"""Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
# returns a list of things that failed (or an empty list on success)
def _build_and_run(
check_cancelled, newline_on_success, xml_report=None, build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
build_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
return []
# start antagonists
antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)]
port_server_port = 32766
_start_port_server(port_server_port)
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(
spec
for language in languages
for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis:
massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(one_run) # random.shuffle needs an indexable seq.
random.shuffle(massaged_one_run) # which it modifies in-place.
if infinite_runs:
assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port},
quiet_success=args.quiet_success)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message(
'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if xml_report and resultset:
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message('SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
|
|
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pacemaker_cluster
short_description: Manage a pacemaker cluster
version_added: "2.3"
author: "Mathieu Bultel (matbu)"
description:
- This module can manage a pacemaker cluster and nodes from Ansible using
the pacemaker cli.
options:
state:
description:
- Indicate desired state of the cluster
choices: ['online', 'offline', 'restart', 'cleanup']
required: true
node:
description:
- Specify which node of the cluster you want to manage. None == the
cluster status itself, 'all' == check the status of all nodes.
required: false
default: None
timeout:
description:
- Timeout when the module should considered that the action has failed
required: false
default: 300
force:
description:
- Force the change of the cluster state
required: false
default: true
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
---
- name: Set cluster Online
hosts: localhost
gather_facts: no
tasks:
- name: get cluster state
pacemaker_cluster: state=online
'''
RETURN = '''
changed:
description: True if the cluster state has changed
type: bool
returned: always
out:
description: The output of the current state of the cluster. It return a
list of the nodes state.
type: string
sample: 'out: [[" overcloud-controller-0", " Online"]]}'
returned: always
rc:
description: exit code of the module
type: bool
returned: always
'''
import time
from ansible.module_utils.basic import AnsibleModule
_PCS_CLUSTER_DOWN="Error: cluster is not currently running on this node"
def get_cluster_status(module):
cmd = "pcs cluster status"
rc, out, err = module.run_command(cmd)
if out in _PCS_CLUSTER_DOWN:
return 'offline'
else:
return 'online'
def get_node_status(module, node='all'):
if node == 'all':
cmd = "pcs cluster pcsd-status %s" % node
else:
cmd = "pcs cluster pcsd-status"
rc, out, err = module.run_command(cmd)
if rc is 1:
module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
status = []
for o in out.splitlines():
status.append(o.split(':'))
return status
def clean_cluster(module, timeout):
cmd = "pcs resource cleanup"
rc, out, err = module.run_command(cmd)
if rc is 1:
module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
def set_cluster(module, state, timeout, force):
if state == 'online':
cmd = "pcs cluster start"
if state == 'offline':
cmd = "pcs cluster stop"
if force:
cmd = "%s --force" % cmd
rc, out, err = module.run_command(cmd)
if rc is 1:
module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
t = time.time()
ready = False
while time.time() < t+timeout:
cluster_state = get_cluster_status(module)
if cluster_state == state:
ready = True
break
if not ready:
module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
def set_node(module, state, timeout, force, node='all'):
# map states
if state == 'online':
cmd = "pcs cluster start"
if state == 'offline':
cmd = "pcs cluster stop"
if force:
cmd = "%s --force" % cmd
nodes_state = get_node_status(module, node)
for node in nodes_state:
if node[1].strip().lower() != state:
cmd = "%s %s" % (cmd, node[0].strip())
rc, out, err = module.run_command(cmd)
if rc is 1:
module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
t = time.time()
ready = False
while time.time() < t+timeout:
nodes_state = get_node_status(module)
for node in nodes_state:
if node[1].strip().lower() == state:
ready = True
break
if not ready:
module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
def main():
argument_spec = dict(
state = dict(choices=['online', 'offline', 'restart', 'cleanup']),
node = dict(default=None),
timeout=dict(default=300, type='int'),
force=dict(default=True, type='bool'),
)
module = AnsibleModule(argument_spec,
supports_check_mode=True,
)
changed = False
state = module.params['state']
node = module.params['node']
force = module.params['force']
timeout = module.params['timeout']
if state in ['online', 'offline']:
# Get cluster status
if node is None:
cluster_state = get_cluster_status(module)
if cluster_state == state:
module.exit_json(changed=changed,
out=cluster_state)
else:
set_cluster(module, state, timeout, force)
cluster_state = get_cluster_status(module)
if cluster_state == state:
module.exit_json(changed=True,
out=cluster_state)
else:
module.fail_json(msg="Fail to bring the cluster %s" % state)
else:
cluster_state = get_node_status(module, node)
# Check cluster state
for node_state in cluster_state:
if node_state[1].strip().lower() == state:
module.exit_json(changed=changed,
out=cluster_state)
else:
# Set cluster status if needed
set_cluster(module, state, timeout, force)
cluster_state = get_node_status(module, node)
module.exit_json(changed=True,
out=cluster_state)
if state in ['restart']:
set_cluster(module, 'offline', timeout, force)
cluster_state = get_cluster_status(module)
if cluster_state == 'offline':
set_cluster(module, 'online', timeout, force)
cluster_state = get_cluster_status(module)
if cluster_state == 'online':
module.exit_json(changed=True,
out=cluster_state)
else:
module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be started")
else:
module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped")
if state in ['cleanup']:
clean_cluster(module, timeout)
cluster_state = get_cluster_status(module)
module.exit_json(changed=True,
out=cluster_state)
if __name__ == '__main__':
main()
|
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from neutron_lib import constants
from neutron_lib.services.qos import constants as qos_consts
from neutronclient.common import exceptions
from oslo_utils import uuidutils
import testscenarios
from neutron.agent.common import ovs_lib
from neutron.agent.linux import tc_lib
from neutron.common import utils
from neutron.tests import base as tests_base
from neutron.tests.common.agents import l2_extensions
from neutron.tests.fullstack import base
from neutron.tests.fullstack.resources import environment
from neutron.tests.fullstack.resources import machine
from neutron.tests.fullstack import utils as fullstack_utils
from neutron.tests.unit import testlib_api
from neutron.conf.plugins.ml2.drivers import linuxbridge as \
linuxbridge_agent_config
from neutron.plugins.ml2.drivers.linuxbridge.agent import \
linuxbridge_neutron_agent as linuxbridge_agent
from neutron.services.qos.drivers.openvswitch import driver as ovs_drv
load_tests = testlib_api.module_load_tests
BANDWIDTH_BURST = 100
BANDWIDTH_LIMIT = 500
DSCP_MARK = 16
class BaseQoSRuleTestCase(object):
of_interface = None
number_of_hosts = 1
@property
def reverse_direction(self):
if self.direction == constants.INGRESS_DIRECTION:
return constants.EGRESS_DIRECTION
elif self.direction == constants.EGRESS_DIRECTION:
return constants.INGRESS_DIRECTION
def setUp(self):
host_desc = [
environment.HostDescription(
l3_agent=False,
of_interface=self.of_interface,
l2_agent_type=self.l2_agent_type
) for _ in range(self.number_of_hosts)]
env_desc = environment.EnvironmentDescription(
qos=True)
env = environment.Environment(env_desc, host_desc)
super(BaseQoSRuleTestCase, self).setUp(env)
self.tenant_id = uuidutils.generate_uuid()
self.network = self.safe_client.create_network(self.tenant_id,
'network-test')
self.subnet = self.safe_client.create_subnet(
self.tenant_id, self.network['id'],
cidr='10.0.0.0/24',
gateway_ip='10.0.0.1',
name='subnet-test',
enable_dhcp=False)
def _create_qos_policy(self):
return self.safe_client.create_qos_policy(
self.tenant_id, 'fs_policy', 'Fullstack testing policy',
shared='False', is_default='False')
def _prepare_vm_with_qos_policy(self, rule_add_functions):
qos_policy = self._create_qos_policy()
qos_policy_id = qos_policy['id']
port = self.safe_client.create_port(
self.tenant_id, self.network['id'],
self.environment.hosts[0].hostname,
qos_policy_id)
for rule_add in rule_add_functions:
rule_add(qos_policy)
vm = self.useFixture(
machine.FakeFullstackMachine(
self.environment.hosts[0],
self.network['id'],
self.tenant_id,
self.safe_client,
neutron_port=port))
return vm, qos_policy
class _TestBwLimitQoS(BaseQoSRuleTestCase):
number_of_hosts = 1
@staticmethod
def _get_expected_egress_burst_value(limit):
return int(
limit * qos_consts.DEFAULT_BURST_RATE
)
def _wait_for_bw_rule_removed(self, vm, direction):
# No values are provided when port doesn't have qos policy
self._wait_for_bw_rule_applied(vm, None, None, direction)
def _add_bw_limit_rule(self, limit, burst, direction, qos_policy):
qos_policy_id = qos_policy['id']
rule = self.safe_client.create_bandwidth_limit_rule(
self.tenant_id, qos_policy_id, limit, burst, direction)
# Make it consistent with GET reply
rule['type'] = qos_consts.RULE_TYPE_BANDWIDTH_LIMIT
rule['qos_policy_id'] = qos_policy_id
qos_policy['rules'].append(rule)
def test_bw_limit_qos_policy_rule_lifecycle(self):
new_limit = BANDWIDTH_LIMIT + 100
# Create port with qos policy attached
vm, qos_policy = self._prepare_vm_with_qos_policy(
[functools.partial(
self._add_bw_limit_rule,
BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction)])
bw_rule = qos_policy['rules'][0]
self._wait_for_bw_rule_applied(
vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction)
qos_policy_id = qos_policy['id']
self.client.delete_bandwidth_limit_rule(bw_rule['id'], qos_policy_id)
self._wait_for_bw_rule_removed(vm, self.direction)
# Create new rule with no given burst value, in such case ovs and lb
# agent should apply burst value as
# bandwidth_limit * qos_consts.DEFAULT_BURST_RATE
new_expected_burst = self._get_expected_burst_value(new_limit,
self.direction)
new_rule = self.safe_client.create_bandwidth_limit_rule(
self.tenant_id, qos_policy_id, new_limit, direction=self.direction)
self._wait_for_bw_rule_applied(
vm, new_limit, new_expected_burst, self.direction)
# Update qos policy rule id
self.client.update_bandwidth_limit_rule(
new_rule['id'], qos_policy_id,
body={'bandwidth_limit_rule': {'max_kbps': BANDWIDTH_LIMIT,
'max_burst_kbps': BANDWIDTH_BURST}})
self._wait_for_bw_rule_applied(
vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction)
# Remove qos policy from port
self.client.update_port(
vm.neutron_port['id'],
body={'port': {'qos_policy_id': None}})
self._wait_for_bw_rule_removed(vm, self.direction)
def test_bw_limit_direction_change(self):
# Create port with qos policy attached, with rule self.direction
vm, qos_policy = self._prepare_vm_with_qos_policy(
[functools.partial(
self._add_bw_limit_rule,
BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction)])
bw_rule = qos_policy['rules'][0]
self._wait_for_bw_rule_applied(
vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction)
# Update rule by changing direction to opposite then it was before
self.client.update_bandwidth_limit_rule(
bw_rule['id'], qos_policy['id'],
body={'bandwidth_limit_rule': {
'direction': self.reverse_direction}})
self._wait_for_bw_rule_removed(vm, self.direction)
self._wait_for_bw_rule_applied(
vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.reverse_direction)
class TestBwLimitQoSOvs(_TestBwLimitQoS, base.BaseFullStackTestCase):
l2_agent_type = constants.AGENT_TYPE_OVS
direction_scenarios = [
('ingress', {'direction': constants.INGRESS_DIRECTION}),
('egress', {'direction': constants.EGRESS_DIRECTION})
]
scenarios = testscenarios.multiply_scenarios(
direction_scenarios, fullstack_utils.get_ovs_interface_scenarios())
@staticmethod
def _get_expected_burst_value(limit, direction):
# For egress bandwidth limit this value should be calculated as
# bandwidth_limit * qos_consts.DEFAULT_BURST_RATE
if direction == constants.EGRESS_DIRECTION:
return TestBwLimitQoSOvs._get_expected_egress_burst_value(limit)
else:
return 0
def _wait_for_bw_rule_applied(self, vm, limit, burst, direction):
if direction == constants.EGRESS_DIRECTION:
utils.wait_until_true(
lambda: vm.bridge.get_egress_bw_limit_for_port(
vm.port.name) == (limit, burst))
elif direction == constants.INGRESS_DIRECTION:
utils.wait_until_true(
lambda: vm.bridge.get_ingress_bw_limit_for_port(
vm.port.name) == (limit, burst))
def test_bw_limit_qos_port_removed(self):
"""Test if rate limit config is properly removed when whole port is
removed.
"""
# Create port with qos policy attached
vm, qos_policy = self._prepare_vm_with_qos_policy(
[functools.partial(
self._add_bw_limit_rule,
BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction)])
self._wait_for_bw_rule_applied(
vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction)
# Delete port with qos policy attached
vm.destroy()
self._wait_for_bw_rule_removed(vm, self.direction)
self.assertIsNone(vm.bridge.find_qos(vm.port.name))
self.assertIsNone(vm.bridge.find_queue(vm.port.name,
ovs_lib.QOS_DEFAULT_QUEUE))
class TestBwLimitQoSLinuxbridge(_TestBwLimitQoS, base.BaseFullStackTestCase):
l2_agent_type = constants.AGENT_TYPE_LINUXBRIDGE
scenarios = [
('egress', {'direction': constants.EGRESS_DIRECTION}),
('ingress', {'direction': constants.INGRESS_DIRECTION}),
]
@staticmethod
def _get_expected_burst_value(limit, direction):
# For egress bandwidth limit this value should be calculated as
# bandwidth_limit * qos_consts.DEFAULT_BURST_RATE
if direction == constants.EGRESS_DIRECTION:
return TestBwLimitQoSLinuxbridge._get_expected_egress_burst_value(
limit)
else:
return TestBwLimitQoSLinuxbridge._get_expected_ingress_burst_value(
limit)
@staticmethod
def _get_expected_ingress_burst_value(limit):
# calculate expected burst in same way as it's done in tc_lib but
# burst value = 0 so it's always value calculated from kernel's hz
# value
# as in tc_lib.bits_to_kilobits result is rounded up that even
# 1 bit gives 1 kbit same should be added here to expected burst
# value
return int(
float(limit) /
float(linuxbridge_agent_config.DEFAULT_KERNEL_HZ_VALUE) + 1)
def _wait_for_bw_rule_applied(self, vm, limit, burst, direction):
port_name = linuxbridge_agent.LinuxBridgeManager.get_tap_device_name(
vm.neutron_port['id'])
tc = tc_lib.TcCommand(
port_name,
linuxbridge_agent_config.DEFAULT_KERNEL_HZ_VALUE,
namespace=vm.host.host_namespace
)
if direction == constants.EGRESS_DIRECTION:
utils.wait_until_true(
lambda: tc.get_filters_bw_limits() == (limit, burst))
elif direction == constants.INGRESS_DIRECTION:
utils.wait_until_true(
lambda: tc.get_tbf_bw_limits() == (limit, burst))
class _TestDscpMarkingQoS(BaseQoSRuleTestCase):
number_of_hosts = 2
def _wait_for_dscp_marking_rule_removed(self, vm):
self._wait_for_dscp_marking_rule_applied(vm, None)
def _add_dscp_rule(self, dscp_mark, qos_policy):
qos_policy_id = qos_policy['id']
rule = self.safe_client.create_dscp_marking_rule(
self.tenant_id, qos_policy_id, dscp_mark)
# Make it consistent with GET reply
rule['type'] = qos_consts.RULE_TYPE_DSCP_MARKING
rule['qos_policy_id'] = qos_policy_id
qos_policy['rules'].append(rule)
def test_dscp_qos_policy_rule_lifecycle(self):
new_dscp_mark = DSCP_MARK + 8
# Create port with qos policy attached
vm, qos_policy = self._prepare_vm_with_qos_policy(
[functools.partial(self._add_dscp_rule, DSCP_MARK)])
dscp_rule = qos_policy['rules'][0]
self._wait_for_dscp_marking_rule_applied(vm, DSCP_MARK)
qos_policy_id = qos_policy['id']
self.client.delete_dscp_marking_rule(dscp_rule['id'], qos_policy_id)
self._wait_for_dscp_marking_rule_removed(vm)
# Create new rule
new_rule = self.safe_client.create_dscp_marking_rule(
self.tenant_id, qos_policy_id, new_dscp_mark)
self._wait_for_dscp_marking_rule_applied(vm, new_dscp_mark)
# Update qos policy rule id
self.client.update_dscp_marking_rule(
new_rule['id'], qos_policy_id,
body={'dscp_marking_rule': {'dscp_mark': DSCP_MARK}})
self._wait_for_dscp_marking_rule_applied(vm, DSCP_MARK)
# Remove qos policy from port
self.client.update_port(
vm.neutron_port['id'],
body={'port': {'qos_policy_id': None}})
self._wait_for_dscp_marking_rule_removed(vm)
@tests_base.unstable_test("bug 1733649")
def test_dscp_marking_packets(self):
# Create port (vm) which will be used to received and test packets
receiver_port = self.safe_client.create_port(
self.tenant_id, self.network['id'],
self.environment.hosts[1].hostname)
receiver = self.useFixture(
machine.FakeFullstackMachine(
self.environment.hosts[1],
self.network['id'],
self.tenant_id,
self.safe_client,
neutron_port=receiver_port))
# Create port with qos policy attached
sender, qos_policy = self._prepare_vm_with_qos_policy(
[functools.partial(self._add_dscp_rule, DSCP_MARK)])
sender.block_until_boot()
receiver.block_until_boot()
self._wait_for_dscp_marking_rule_applied(sender, DSCP_MARK)
l2_extensions.wait_for_dscp_marked_packet(
sender, receiver, DSCP_MARK)
class TestDscpMarkingQoSOvs(_TestDscpMarkingQoS, base.BaseFullStackTestCase):
scenarios = fullstack_utils.get_ovs_interface_scenarios()
l2_agent_type = constants.AGENT_TYPE_OVS
def _wait_for_dscp_marking_rule_applied(self, vm, dscp_mark):
l2_extensions.wait_until_dscp_marking_rule_applied_ovs(
vm.bridge, vm.port.name, dscp_mark)
class TestDscpMarkingQoSLinuxbridge(_TestDscpMarkingQoS,
base.BaseFullStackTestCase):
l2_agent_type = constants.AGENT_TYPE_LINUXBRIDGE
def _wait_for_dscp_marking_rule_applied(self, vm, dscp_mark):
l2_extensions.wait_until_dscp_marking_rule_applied_linuxbridge(
vm.host.host_namespace, vm.port.name, dscp_mark)
class TestQoSWithL2Population(base.BaseFullStackTestCase):
def setUp(self):
host_desc = [] # No need to register agents for this test case
env_desc = environment.EnvironmentDescription(qos=True, l2_pop=True)
env = environment.Environment(env_desc, host_desc)
super(TestQoSWithL2Population, self).setUp(env)
def test_supported_qos_rule_types(self):
res = self.client.list_qos_rule_types()
rule_types = {t['type'] for t in res['rule_types']}
expected_rules = set(ovs_drv.SUPPORTED_RULES)
self.assertEqual(expected_rules, rule_types)
class TestQoSPolicyIsDefault(base.BaseFullStackTestCase):
NAME = 'fs_policy'
DESCRIPTION = 'Fullstack testing policy'
SHARED = True
def setUp(self):
host_desc = [] # No need to register agents for this test case
env_desc = environment.EnvironmentDescription(qos=True)
env = environment.Environment(env_desc, host_desc)
super(TestQoSPolicyIsDefault, self).setUp(env)
def _create_qos_policy(self, project_id, is_default):
return self.safe_client.create_qos_policy(
project_id, self.NAME, self.DESCRIPTION, shared=self.SHARED,
is_default=is_default)
def _update_qos_policy(self, qos_policy_id, is_default):
return self.client.update_qos_policy(
qos_policy_id, body={'policy': {'is_default': is_default}})
def test_create_one_default_qos_policy_per_project(self):
project_ids = [uuidutils.generate_uuid(), uuidutils.generate_uuid()]
for project_id in project_ids:
qos_policy = self._create_qos_policy(project_id, True)
self.assertTrue(qos_policy['is_default'])
self.assertEqual(project_id, qos_policy['project_id'])
qos_policy = self._create_qos_policy(project_id, False)
self.assertFalse(qos_policy['is_default'])
self.assertEqual(project_id, qos_policy['project_id'])
def test_create_two_default_qos_policies_per_project(self):
project_id = uuidutils.generate_uuid()
qos_policy = self._create_qos_policy(project_id, True)
self.assertTrue(qos_policy['is_default'])
self.assertEqual(project_id, qos_policy['project_id'])
self.assertRaises(exceptions.Conflict,
self._create_qos_policy, project_id, True)
def test_update_default_status(self):
project_ids = [uuidutils.generate_uuid(), uuidutils.generate_uuid()]
for project_id in project_ids:
qos_policy = self._create_qos_policy(project_id, True)
self.assertTrue(qos_policy['is_default'])
qos_policy = self._update_qos_policy(qos_policy['id'], False)
self.assertFalse(qos_policy['policy']['is_default'])
def test_update_default_status_conflict(self):
project_id = uuidutils.generate_uuid()
qos_policy_1 = self._create_qos_policy(project_id, True)
self.assertTrue(qos_policy_1['is_default'])
qos_policy_2 = self._create_qos_policy(project_id, False)
self.assertFalse(qos_policy_2['is_default'])
self.assertRaises(exceptions.Conflict,
self._update_qos_policy, qos_policy_2['id'], True)
|
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# a management cli application.
import logging
import paramiko
import sys
from copy import copy
import os.path
CONF = {
"ssh_port": 4990,
"ssh_host": "localhost",
"ssh_hostkey": None,
"ssh_username": "ryu",
"ssh_password": "ryu",
}
from ryu.lib import hub
from ryu import version
from ryu.services.protocols.bgp.operator.command import Command
from ryu.services.protocols.bgp.operator.command import CommandsResponse
from ryu.services.protocols.bgp.operator.commands.root import RootCmd
from ryu.services.protocols.bgp.operator.internal_api import InternalApi
from ryu.services.protocols.bgp.operator.command import STATUS_OK
from ryu.services.protocols.bgp.base import Activity
LOG = logging.getLogger('bgpspeaker.cli')
class SshServer(paramiko.ServerInterface):
TERM = "ansi"
PROMPT = "bgpd> "
WELCOME = """
Hello, this is Ryu BGP speaker (version %s).
""" % version
class HelpCmd(Command):
help_msg = 'show this help'
command = 'help'
def action(self, params):
return self.parent_cmd.question_mark()[0]
class QuitCmd(Command):
help_msg = 'exit this session'
command = 'quit'
def action(self, params):
self.api.sshserver.end_session()
return CommandsResponse(STATUS_OK, True)
def __init__(self, sock, addr):
super(SshServer, self).__init__()
# tweak InternalApi and RootCmd for non-bgp related commands
self.api = InternalApi(log_handler=logging.StreamHandler(sys.stderr))
setattr(self.api, 'sshserver', self)
self.root = RootCmd(self.api)
self.root.subcommands['help'] = self.HelpCmd
self.root.subcommands['quit'] = self.QuitCmd
transport = paramiko.Transport(sock)
transport.load_server_moduli()
host_key = self._find_ssh_server_key()
transport.add_server_key(host_key)
self.transport = transport
transport.start_server(server=self)
def _find_ssh_server_key(self):
if CONF["ssh_hostkey"]:
return paramiko.RSAKey.from_private_key_file(ssh_hostkey)
elif os.path.exists("/etc/ssh_host_rsa_key"):
# OSX
return paramiko.RSAKey.from_private_key_file(
"/etc/ssh_host_rsa_key")
elif os.path.exists("/etc/ssh/ssh_host_rsa_key"):
# Linux
return paramiko.RSAKey.from_private_key_file(
"/etc/ssh/ssh_host_rsa_key")
else:
return paramiko.RSAKey.generate(1024)
def check_auth_none(self, username):
return paramiko.AUTH_SUCCESSFUL
def check_auth_password(self, username, password):
if username == CONF["ssh_username"] and \
password == CONF["ssh_password"]:
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def check_channel_request(self, kind, chanid):
if kind == 'session':
return paramiko.OPEN_SUCCEEDED
return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_channel_shell_request(self, chan):
hub.spawn(self._handle_shell_request)
return True
def check_channel_pty_request(self, chan, term, width, height,
pixelwidth, pixelheight, modes):
LOG.debug("termtype: %s" % (term, ))
self.TERM = term
return True
def check_channel_window_change_request(self, chan, width, height, pwidth,
pheight):
LOG.info("channel window change")
return True
def _is_echoable(self, c):
return not (c < chr(0x20) or c == chr(0x7F))
def _is_enter(self, c):
return c == chr(0x0d)
def _is_eof(self, c):
return c == chr(0x03)
def _is_esc(self, c):
return c == chr(0x1b)
def _is_hist(self, c):
return c == chr(0x10) or c == chr(0x0e)
def _is_del(self, c):
return c == chr(0x04) or c == chr(0x08) or c == chr(0x15) \
or c == chr(0x17) or c == chr(0x0c) or c == chr(0x7f)
def _is_curmov(self, c):
return c == chr(0x01) or c == chr(0x02) or c == chr(0x05) \
or c == chr(0x06)
def _is_cmpl(self, c):
return c == chr(0x09)
def _handle_csi_seq(self):
c = self.chan.recv(1)
if c == 'A':
self._lookup_hist_up()
elif c == 'B':
self._lookup_hist_down()
elif c == 'C':
self._movcursor(self.curpos + 1)
elif c == 'D':
self._movcursor(self.curpos - 1)
else:
LOG.error("unknown CSI sequence. do nothing: %c" % c)
def _handle_esc_seq(self):
c = self.chan.recv(1)
if c == '[':
self._handle_csi_seq()
else:
LOG.error("non CSI sequence. do nothing")
def _send_csi_seq(self, cmd):
self.chan.send('\x1b[' + cmd)
def _movcursor(self, curpos):
if self.prompted and curpos < len(self.PROMPT):
self.curpos = len(self.PROMPT)
elif self.prompted and curpos > (len(self.PROMPT) + len(self.buf)):
self.curpos = len(self.PROMPT) + len(self.buf)
else:
self._send_csi_seq('%dG' % (curpos + 1))
self.curpos = curpos
def _clearscreen(self, prompt=None):
if not prompt and self.prompted:
prompt = self.PROMPT
# clear screen
self._send_csi_seq('2J')
# move cursor to the top
self._send_csi_seq('d')
# redraw prompt and buf
self._refreshline(prompt=prompt)
def _clearline(self, prompt=None):
if not prompt and self.prompted:
prompt = self.PROMPT
self.prompted = False
self._movcursor(0)
self._send_csi_seq('2K')
if prompt:
self.prompted = True
self.chan.send(prompt)
self._movcursor(len(prompt))
self.buf = []
def _refreshline(self, prompt=None):
if not prompt and self.prompted:
prompt = self.PROMPT
buf = copy(self.buf)
curpos = copy(self.curpos)
self._clearline(prompt=prompt)
self.chan.send(''.join(buf))
self.buf = buf
self.curpos = curpos
self._movcursor(curpos)
def _refreshnewline(self, prompt=None):
if not prompt and self.prompted:
prompt = self.PROMPT
buf = copy(self.buf)
curpos = copy(self.curpos)
self._startnewline(prompt)
self.chan.send(''.join(buf))
self.buf = buf
self.curpos = curpos
self._movcursor(curpos)
def _startnewline(self, prompt=None, buf=''):
if not prompt and self.prompted:
prompt = self.PROMPT
if type(buf) == str:
buf = list(buf)
if self.chan:
self.buf = buf
if prompt:
self.chan.send('\n\r' + prompt + ''.join(buf))
self.curpos = len(prompt) + len(buf)
self.prompted = True
else:
self.chan.send('\n\r' + ''.join(buf))
self.curpos = len(buf)
self.prompted = False
def _lookup_hist_up(self):
if len(self.history) == 0:
return
self.buf = self.history[self.histindex]
self.curpos = self.promptlen + len(self.buf)
self._refreshline()
if self.histindex + 1 < len(self.history):
self.histindex += 1
def _lookup_hist_down(self):
if self.histindex > 0:
self.histindex -= 1
self.buf = self.history[self.histindex]
self.curpos = self.promptlen + len(self.buf)
self._refreshline()
else:
self._clearline()
def _do_cmpl(self, buf, is_exec=False):
cmpleter = self.root
is_spaced = buf[-1] == ' ' if len(buf) > 0 else False
cmds = [tkn.strip() for tkn in ''.join(buf).split()]
ret = []
for i, cmd in enumerate(cmds):
subcmds = cmpleter.subcommands
matches = [x for x in subcmds.keys() if x.startswith(cmd)]
if len(matches) == 1:
cmpled_cmd = matches[0]
cmpleter = subcmds[cmpled_cmd](self.api)
if is_exec:
ret.append(cmpled_cmd)
continue
if (i + 1) == len(cmds):
if is_spaced:
result, cmd = cmpleter('?')
result = result.value.replace('\n', '\n\r').rstrip()
self.prompted = False
buf = copy(buf)
self._startnewline(buf=result)
self.prompted = True
self._startnewline(buf=buf)
else:
self.buf = buf[:(-1 * len(cmd))] + \
list(cmpled_cmd + ' ')
self.curpos += len(cmpled_cmd) - len(cmd) + 1
self._refreshline()
else:
self.prompted = False
buf = copy(self.buf)
if len(matches) == 0:
if cmpleter.param_help_msg:
self.prompted = True
ret.append(cmd)
continue
else:
self._startnewline(buf='Error: Not implemented')
else:
if (i + 1) < len(cmds):
self._startnewline(buf='Error: Ambiguous command')
else:
self._startnewline(buf=', '.join(matches))
ret = False
self.prompted = True
if not is_exec:
self._startnewline(buf=buf)
break
return ret
def _execute_cmd(self, cmds):
result, cmd = self.root(cmds)
LOG.debug("result: %s" % str(result))
self.prompted = False
self._startnewline()
output = result.value.replace('\n', '\n\r').rstrip()
self.chan.send(output)
self.prompted = True
return result.status
def end_session(self):
self._startnewline(prompt=False, buf='bye.\n\r')
self.chan.close()
def _handle_shell_request(self):
LOG.info("session start")
chan = self.transport.accept(20)
if not chan:
LOG.info("transport.accept timed out")
return
self.chan = chan
self.buf = []
self.curpos = 0
self.history = []
self.histindex = 0
self.prompted = True
self.chan.send(self.WELCOME)
self._startnewline()
while True:
c = self.chan.recv(1)
if len(c) == 0:
break
LOG.debug("ord:%d, hex:0x%x" % (ord(c), ord(c)))
self.promptlen = len(self.PROMPT) if self.prompted else 0
if c == '?':
cmpleter = self.root
cmds = [tkn.strip() for tkn in ''.join(self.buf).split()]
for i, cmd in enumerate(cmds):
subcmds = cmpleter.subcommands
matches = [x for x in subcmds.keys() if x.startswith(cmd)]
if len(matches) == 1:
cmpled_cmd = matches[0]
cmpleter = subcmds[cmpled_cmd](self.api)
result, cmd = cmpleter('?')
result = result.value.replace('\n', '\n\r').rstrip()
self.prompted = False
buf = copy(self.buf)
self._startnewline(buf=result)
self.prompted = True
self._startnewline(buf=buf)
elif self._is_echoable(c):
self.buf.insert(self.curpos - self.promptlen, c)
self.curpos += 1
self._refreshline()
elif self._is_esc(c):
self._handle_esc_seq()
elif self._is_eof(c):
self.end_session()
elif self._is_curmov(c):
# <C-a>
if c == chr(0x01):
self._movcursor(self.promptlen)
# <C-b>
elif c == chr(0x02):
self._movcursor(self.curpos - 1)
# <C-e>
elif c == chr(0x05):
self._movcursor(self.promptlen + len(self.buf))
# <C-f>
elif c == chr(0x06):
self._movcursor(self.curpos + 1)
else:
LOG.error("unknown cursor move cmd.")
continue
elif self._is_hist(c):
# <C-p>
if c == chr(0x10):
self._lookup_hist_up()
# <C-n>
elif c == chr(0x0e):
self._lookup_hist_down()
elif self._is_del(c):
# <C-d>
if c == chr(0x04):
if self.curpos < (self.promptlen + len(self.buf)):
self.buf.pop(self.curpos - self.promptlen)
self._refreshline()
# <C-h> or delete
elif c == chr(0x08) or c == chr(0x7f):
if self.curpos > self.promptlen:
self.buf.pop(self.curpos - self.promptlen - 1)
self.curpos -= 1
self._refreshline()
# <C-u>
elif c == chr(0x15):
self._clearline()
# <C-w>
elif c == chr(0x17):
pos = self.curpos - self.promptlen
i = pos
flag = False
for c in reversed(self.buf[:pos]):
if flag and c == ' ':
break
if c != ' ':
flag = True
i -= 1
del self.buf[i:pos]
self.curpos = self.promptlen + i
self._refreshline()
# <C-l>
elif c == chr(0x0c):
self._clearscreen()
elif self._is_cmpl(c):
self._do_cmpl(self.buf)
elif self._is_enter(c):
if len(''.join(self.buf).strip()) != 0:
# cmd line interpretation
cmds = self._do_cmpl(self.buf, is_exec=True)
if cmds:
self.history.insert(0, self.buf)
self.histindex = 0
self._execute_cmd(cmds)
else:
LOG.debug("blank buf. just start a new line.")
self._startnewline()
LOG.debug("curpos: %d, buf: %s, prompted: %s" % (self.curpos,
self.buf,
self.prompted))
LOG.info("session end")
class SshServerFactory(object):
def __init__(self, *args, **kwargs):
super(SshServerFactory, self).__init__(*args, **kwargs)
def streamserver_handle(self, sock, addr):
SshServer(sock, addr)
class Cli(Activity):
def __init__(self):
super(Cli, self).__init__()
def _run(self, *args, **kwargs):
for k, v in kwargs.items():
if k in CONF:
CONF[k] = v
LOG.info("starting ssh server at %s:%d", CONF["ssh_host"],
CONF["ssh_port"])
factory = SshServerFactory()
server = hub.StreamServer((CONF["ssh_host"], CONF["ssh_port"]),
factory.streamserver_handle)
server.serve_forever()
SSH_CLI_CONTROLLER = Cli()
|
|
"""
Oracle database backend for Django.
Requires cx_Oracle: https://oracle.github.io/python-cx_Oracle/
"""
import datetime
import decimal
import os
import platform
from contextlib import contextmanager
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils.encoding import force_bytes, force_str
from django.utils.functional import cached_property
def _setup_environment(environ):
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.AL32UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
# Some of these import cx_Oracle, so import them after checking if it's installed.
from .client import DatabaseClient # NOQA isort:skip
from .creation import DatabaseCreation # NOQA isort:skip
from .features import DatabaseFeatures # NOQA isort:skip
from .introspection import DatabaseIntrospection # NOQA isort:skip
from .operations import DatabaseOperations # NOQA isort:skip
from .schema import DatabaseSchemaEditor # NOQA isort:skip
from .utils import Oracle_datetime # NOQA isort:skip
from .validation import DatabaseValidation # NOQA isort:skip
@contextmanager
def wrap_oracle_errors():
try:
yield
except Database.DatabaseError as e:
# cx_Oracle raises a cx_Oracle.DatabaseError exception with the
# following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# Convert that case to Django's IntegrityError exception.
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') and x.code == 2091 and 'ORA-02291' in x.message:
raise utils.IntegrityError(*tuple(e.args))
raise
class _UninitializedOperatorsDescriptor:
def __get__(self, instance, cls=None):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
display_name = 'Oracle'
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11) GENERATED BY DEFAULT ON NULL AS IDENTITY',
'BigAutoField': 'NUMBER(19) GENERATED BY DEFAULT ON NULL AS IDENTITY',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'INTERVAL DAY(9) TO SECOND(6)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11)',
'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
'UUIDField': 'VARCHAR2(32)',
}
data_type_check_constraints = {
'BooleanField': '%(qn_column)s IN (0,1)',
'NullBooleanField': '%(qn_column)s IN (0,1)',
'PositiveIntegerField': '%(qn_column)s >= 0',
'PositiveSmallIntegerField': '%(qn_column)s >= 0',
}
# Oracle doesn't support a database index on these columns.
_limited_data_types = ('clob', 'nclob', 'blob')
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = {
**_standard_operators,
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, %, _)
# should be escaped on the database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
_pattern_ops = {
'contains': "'%%' || {} || '%%'",
'icontains': "'%%' || UPPER({}) || '%%'",
'startswith': "{} || '%%'",
'istartswith': "UPPER({}) || '%%'",
'endswith': "'%%' || {}",
'iendswith': "'%%' || UPPER({})",
}
_standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)"
" ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
for k, v in _pattern_ops.items()}
_likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'"
for k, v in _pattern_ops.items()}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
validation_class = DatabaseValidation
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_columns_from_insert = use_returning_into
def _dsn(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT']:
return Database.makedsn(settings_dict['HOST'], int(settings_dict['PORT']), settings_dict['NAME'])
return settings_dict['NAME']
def _connect_string(self):
return '%s/"%s"@%s' % (self.settings_dict['USER'], self.settings_dict['PASSWORD'], self._dsn())
def get_connection_params(self):
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
return conn_params
def get_new_connection(self, conn_params):
return Database.connect(
user=self.settings_dict['USER'],
password=self.settings_dict['PASSWORD'],
dsn=self._dsn(),
**conn_params,
)
def init_connection_state(self):
cursor = self.create_cursor()
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set Oracle date to ANSI date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'" +
(" TIME_ZONE = 'UTC'" if settings.USE_TZ else '')
)
cursor.close()
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
cursor = self.create_cursor()
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except Database.DatabaseError:
self.operators = self._likec_operators
self.pattern_ops = self._likec_pattern_ops
else:
self.operators = self._standard_operators
self.pattern_ops = self._standard_pattern_ops
cursor.close()
self.connection.stmtcachesize = 20
# Ensure all changes are preserved even when AUTOCOMMIT is False.
if not self.get_autocommit():
self.commit()
def create_cursor(self, name=None):
return FormatStylePlaceholderCursor(self.connection)
def _commit(self):
if self.connection is not None:
with wrap_oracle_errors():
return self.connection.commit()
# Oracle doesn't support releasing savepoints. But we fake them when query
# logging is enabled to keep query counts consistent with other backends.
def _savepoint_commit(self, sid):
if self.queries_logged:
self.queries_log.append({
'sql': '-- RELEASE SAVEPOINT %s (faked)' % self.ops.quote_name(sid),
'time': '0.000',
})
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
Check constraints by setting them to immediate. Return them to deferred
afterward.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def oracle_version(self):
with self.temporary_connection():
return tuple(int(x) for x in self.connection.version.split('.'))
class OracleParam:
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and (isinstance(param, datetime.datetime) and
not isinstance(param, Oracle_datetime)):
param = Oracle_datetime.from_datetime(param)
string_size = 0
# Oracle doesn't recognize True and False correctly.
if param is True:
param = 1
elif param is False:
param = 0
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, (Database.Binary, datetime.timedelta)):
self.force_bytes = param
else:
# To transmit to the database, we need Unicode if supported
# To get size right, we must consider bytes.
self.force_bytes = force_str(param, cursor.charset, strings_only)
if isinstance(self.force_bytes, str):
# We could optimize by only converting up to 4000 bytes here
string_size = len(force_bytes(param, cursor.charset, strings_only))
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif string_size > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
elif isinstance(param, datetime.datetime):
self.input_size = Database.TIMESTAMP
else:
self.input_size = None
class VariableWrapper:
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instantiate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class FormatStylePlaceholderCursor:
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
self.cursor.outputtypehandler = self._output_type_handler
@staticmethod
def _output_number_converter(value):
return decimal.Decimal(value) if '.' in value else int(value)
@staticmethod
def _get_decimal_converter(precision, scale):
if scale == 0:
return int
context = decimal.Context(prec=precision)
quantize_value = decimal.Decimal(1).scaleb(-scale)
return lambda v: decimal.Decimal(v).quantize(quantize_value, context=context)
@staticmethod
def _output_type_handler(cursor, name, defaultType, length, precision, scale):
"""
Called for each db column fetched from cursors. Return numbers as the
appropriate Python type.
"""
if defaultType == Database.NUMBER:
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point.
# This will normally be an integer from a sequence,
# but it could be a decimal value.
outconverter = FormatStylePlaceholderCursor._output_number_converter
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
outconverter = float
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntegerField and DecimalField columns.
outconverter = FormatStylePlaceholderCursor._get_decimal_converter(precision, scale)
else:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
outconverter = FormatStylePlaceholderCursor._output_number_converter
return cursor.var(
Database.STRING,
size=255,
arraysize=cursor.arraysize,
outconverter=outconverter,
)
def _format_params(self, params):
try:
return {k: OracleParam(v, self, True) for k, v in params.items()}
except AttributeError:
return tuple(OracleParam(p, self, True) for p in params)
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], 'keys'):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
if sizes:
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
if sizes:
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, 'items'):
return {k: v.force_bytes for k, v in params.items()}
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params, unify_by_values=False):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
if params is None:
params = []
elif hasattr(params, 'keys'):
# Handle params as dict
args = {k: ":%s" % k for k in params}
query = query % args
elif unify_by_values and params:
# Handle params as a dict with unified query parameters by their
# values. It can be used only in single query execute() because
# executemany() shares the formatted query with each of the params
# list. e.g. for input params = [0.75, 2, 0.75, 'sth', 0.75]
# params_dict = {0.75: ':arg0', 2: ':arg1', 'sth': ':arg2'}
# args = [':arg0', ':arg1', ':arg0', ':arg2', ':arg0']
# params = {':arg0': 0.75, ':arg1': 2, ':arg2': 'sth'}
params_dict = {param: ':arg%d' % i for i, param in enumerate(set(params))}
args = [params_dict[param] for param in params]
params = {value: key for key, value in params_dict.items()}
query = query % tuple(args)
else:
# Handle params as sequence
args = [(':arg%d' % i) for i in range(len(params))]
query = query % tuple(args)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params, unify_by_values=True)
self._guess_input_sizes([params])
with wrap_oracle_errors():
return self.cursor.execute(query, self._param_generator(params))
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams] + [self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
with wrap_oracle_errors():
return self.cursor.executemany(query, [self._param_generator(p) for p in formatted])
def close(self):
try:
self.cursor.close()
except Database.InterfaceError:
# already closed
pass
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for openal
Generated with:
../tools/wraptypes/wrap.py /usr/include/AL/al.h -lopenal -olib_openal.py
.. Hacked to remove non-existent library functions.
TODO add alGetError check.
.. alListener3i and alListeneriv are present in my OS X 10.4 but not another
10.4 user's installation. They've also been removed for compatibility.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: lib_openal.py 2541 2009-12-31 04:31:11Z benjamin.coder.smith@gmail.com $'
import ctypes
from ctypes import *
import sys
import pyglet.lib
_lib = pyglet.lib.load_library('openal', win32='openal32',
framework='/System/Library/Frameworks/OpenAL.framework')
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
AL_API = 0 # /usr/include/AL/al.h:39
ALAPI = 0 # /usr/include/AL/al.h:59
AL_INVALID = -1 # /usr/include/AL/al.h:61
AL_ILLEGAL_ENUM = 0 # /usr/include/AL/al.h:62
AL_ILLEGAL_COMMAND = 0 # /usr/include/AL/al.h:63
ALboolean = c_int # Better return type than c_char, as generated
ALchar = c_char # /usr/include/AL/al.h:73
ALbyte = c_char # /usr/include/AL/al.h:76
ALubyte = c_ubyte # /usr/include/AL/al.h:79
ALshort = c_short # /usr/include/AL/al.h:82
ALushort = c_ushort # /usr/include/AL/al.h:85
ALint = c_int # /usr/include/AL/al.h:88
ALuint = c_uint # /usr/include/AL/al.h:91
ALsizei = c_int # /usr/include/AL/al.h:94
ALenum = c_int # /usr/include/AL/al.h:97
ALfloat = c_float # /usr/include/AL/al.h:100
ALdouble = c_double # /usr/include/AL/al.h:103
ALvoid = None # /usr/include/AL/al.h:106
AL_NONE = 0 # /usr/include/AL/al.h:112
AL_FALSE = 0 # /usr/include/AL/al.h:115
AL_TRUE = 1 # /usr/include/AL/al.h:118
AL_SOURCE_RELATIVE = 514 # /usr/include/AL/al.h:121
AL_CONE_INNER_ANGLE = 4097 # /usr/include/AL/al.h:130
AL_CONE_OUTER_ANGLE = 4098 # /usr/include/AL/al.h:137
AL_PITCH = 4099 # /usr/include/AL/al.h:145
AL_POSITION = 4100 # /usr/include/AL/al.h:157
AL_DIRECTION = 4101 # /usr/include/AL/al.h:160
AL_VELOCITY = 4102 # /usr/include/AL/al.h:163
AL_LOOPING = 4103 # /usr/include/AL/al.h:171
AL_BUFFER = 4105 # /usr/include/AL/al.h:178
AL_GAIN = 4106 # /usr/include/AL/al.h:191
AL_MIN_GAIN = 4109 # /usr/include/AL/al.h:200
AL_MAX_GAIN = 4110 # /usr/include/AL/al.h:209
AL_ORIENTATION = 4111 # /usr/include/AL/al.h:216
AL_SOURCE_STATE = 4112 # /usr/include/AL/al.h:221
AL_INITIAL = 4113 # /usr/include/AL/al.h:222
AL_PLAYING = 4114 # /usr/include/AL/al.h:223
AL_PAUSED = 4115 # /usr/include/AL/al.h:224
AL_STOPPED = 4116 # /usr/include/AL/al.h:225
AL_BUFFERS_QUEUED = 4117 # /usr/include/AL/al.h:230
AL_BUFFERS_PROCESSED = 4118 # /usr/include/AL/al.h:231
AL_SEC_OFFSET = 4132 # /usr/include/AL/al.h:236
AL_SAMPLE_OFFSET = 4133 # /usr/include/AL/al.h:237
AL_BYTE_OFFSET = 4134 # /usr/include/AL/al.h:238
AL_SOURCE_TYPE = 4135 # /usr/include/AL/al.h:246
AL_STATIC = 4136 # /usr/include/AL/al.h:247
AL_STREAMING = 4137 # /usr/include/AL/al.h:248
AL_UNDETERMINED = 4144 # /usr/include/AL/al.h:249
AL_FORMAT_MONO8 = 4352 # /usr/include/AL/al.h:252
AL_FORMAT_MONO16 = 4353 # /usr/include/AL/al.h:253
AL_FORMAT_STEREO8 = 4354 # /usr/include/AL/al.h:254
AL_FORMAT_STEREO16 = 4355 # /usr/include/AL/al.h:255
AL_REFERENCE_DISTANCE = 4128 # /usr/include/AL/al.h:265
AL_ROLLOFF_FACTOR = 4129 # /usr/include/AL/al.h:273
AL_CONE_OUTER_GAIN = 4130 # /usr/include/AL/al.h:282
AL_MAX_DISTANCE = 4131 # /usr/include/AL/al.h:292
AL_FREQUENCY = 8193 # /usr/include/AL/al.h:300
AL_BITS = 8194 # /usr/include/AL/al.h:301
AL_CHANNELS = 8195 # /usr/include/AL/al.h:302
AL_SIZE = 8196 # /usr/include/AL/al.h:303
AL_UNUSED = 8208 # /usr/include/AL/al.h:310
AL_PENDING = 8209 # /usr/include/AL/al.h:311
AL_PROCESSED = 8210 # /usr/include/AL/al.h:312
AL_NO_ERROR = 0 # /usr/include/AL/al.h:316
AL_INVALID_NAME = 40961 # /usr/include/AL/al.h:321
AL_INVALID_ENUM = 40962 # /usr/include/AL/al.h:326
AL_INVALID_VALUE = 40963 # /usr/include/AL/al.h:331
AL_INVALID_OPERATION = 40964 # /usr/include/AL/al.h:336
AL_OUT_OF_MEMORY = 40965 # /usr/include/AL/al.h:342
AL_VENDOR = 45057 # /usr/include/AL/al.h:346
AL_VERSION = 45058 # /usr/include/AL/al.h:347
AL_RENDERER = 45059 # /usr/include/AL/al.h:348
AL_EXTENSIONS = 45060 # /usr/include/AL/al.h:349
AL_DOPPLER_FACTOR = 49152 # /usr/include/AL/al.h:356
AL_DOPPLER_VELOCITY = 49153 # /usr/include/AL/al.h:361
AL_SPEED_OF_SOUND = 49155 # /usr/include/AL/al.h:366
AL_DISTANCE_MODEL = 53248 # /usr/include/AL/al.h:375
AL_INVERSE_DISTANCE = 53249 # /usr/include/AL/al.h:376
AL_INVERSE_DISTANCE_CLAMPED = 53250 # /usr/include/AL/al.h:377
AL_LINEAR_DISTANCE = 53251 # /usr/include/AL/al.h:378
AL_LINEAR_DISTANCE_CLAMPED = 53252 # /usr/include/AL/al.h:379
AL_EXPONENT_DISTANCE = 53253 # /usr/include/AL/al.h:380
AL_EXPONENT_DISTANCE_CLAMPED = 53254 # /usr/include/AL/al.h:381
# /usr/include/AL/al.h:386
alEnable = _lib.alEnable
alEnable.restype = None
alEnable.argtypes = [ALenum]
# /usr/include/AL/al.h:388
alDisable = _lib.alDisable
alDisable.restype = None
alDisable.argtypes = [ALenum]
# /usr/include/AL/al.h:390
alIsEnabled = _lib.alIsEnabled
alIsEnabled.restype = ALboolean
alIsEnabled.argtypes = [ALenum]
# /usr/include/AL/al.h:396
alGetString = _lib.alGetString
alGetString.restype = POINTER(ALchar)
alGetString.argtypes = [ALenum]
# /usr/include/AL/al.h:398
alGetBooleanv = _lib.alGetBooleanv
alGetBooleanv.restype = None
alGetBooleanv.argtypes = [ALenum, POINTER(ALboolean)]
# /usr/include/AL/al.h:400
alGetIntegerv = _lib.alGetIntegerv
alGetIntegerv.restype = None
alGetIntegerv.argtypes = [ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:402
alGetFloatv = _lib.alGetFloatv
alGetFloatv.restype = None
alGetFloatv.argtypes = [ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:404
alGetDoublev = _lib.alGetDoublev
alGetDoublev.restype = None
alGetDoublev.argtypes = [ALenum, POINTER(ALdouble)]
# /usr/include/AL/al.h:406
alGetBoolean = _lib.alGetBoolean
alGetBoolean.restype = ALboolean
alGetBoolean.argtypes = [ALenum]
# /usr/include/AL/al.h:408
alGetInteger = _lib.alGetInteger
alGetInteger.restype = ALint
alGetInteger.argtypes = [ALenum]
# /usr/include/AL/al.h:410
alGetFloat = _lib.alGetFloat
alGetFloat.restype = ALfloat
alGetFloat.argtypes = [ALenum]
# /usr/include/AL/al.h:412
alGetDouble = _lib.alGetDouble
alGetDouble.restype = ALdouble
alGetDouble.argtypes = [ALenum]
# /usr/include/AL/al.h:419
alGetError = _lib.alGetError
alGetError.restype = ALenum
alGetError.argtypes = []
# /usr/include/AL/al.h:427
alIsExtensionPresent = _lib.alIsExtensionPresent
alIsExtensionPresent.restype = ALboolean
alIsExtensionPresent.argtypes = [POINTER(ALchar)]
# /usr/include/AL/al.h:429
alGetProcAddress = _lib.alGetProcAddress
alGetProcAddress.restype = POINTER(c_void)
alGetProcAddress.argtypes = [POINTER(ALchar)]
# /usr/include/AL/al.h:431
alGetEnumValue = _lib.alGetEnumValue
alGetEnumValue.restype = ALenum
alGetEnumValue.argtypes = [POINTER(ALchar)]
# /usr/include/AL/al.h:450
alListenerf = _lib.alListenerf
alListenerf.restype = None
alListenerf.argtypes = [ALenum, ALfloat]
# /usr/include/AL/al.h:452
alListener3f = _lib.alListener3f
alListener3f.restype = None
alListener3f.argtypes = [ALenum, ALfloat, ALfloat, ALfloat]
# /usr/include/AL/al.h:454
alListenerfv = _lib.alListenerfv
alListenerfv.restype = None
alListenerfv.argtypes = [ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:456
alListeneri = _lib.alListeneri
alListeneri.restype = None
alListeneri.argtypes = [ALenum, ALint]
# /usr/include/AL/al.h:458
#alListener3i = _lib.alListener3i
#alListener3i.restype = None
#alListener3i.argtypes = [ALenum, ALint, ALint, ALint]
# /usr/include/AL/al.h:460
#alListeneriv = _lib.alListeneriv
#alListeneriv.restype = None
#alListeneriv.argtypes = [ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:465
alGetListenerf = _lib.alGetListenerf
alGetListenerf.restype = None
alGetListenerf.argtypes = [ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:467
alGetListener3f = _lib.alGetListener3f
alGetListener3f.restype = None
alGetListener3f.argtypes = [ALenum, POINTER(ALfloat), POINTER(ALfloat), POINTER(ALfloat)]
# /usr/include/AL/al.h:469
alGetListenerfv = _lib.alGetListenerfv
alGetListenerfv.restype = None
alGetListenerfv.argtypes = [ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:471
alGetListeneri = _lib.alGetListeneri
alGetListeneri.restype = None
alGetListeneri.argtypes = [ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:473
alGetListener3i = _lib.alGetListener3i
alGetListener3i.restype = None
alGetListener3i.argtypes = [ALenum, POINTER(ALint), POINTER(ALint), POINTER(ALint)]
# /usr/include/AL/al.h:475
alGetListeneriv = _lib.alGetListeneriv
alGetListeneriv.restype = None
alGetListeneriv.argtypes = [ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:512
alGenSources = _lib.alGenSources
alGenSources.restype = None
alGenSources.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:515
alDeleteSources = _lib.alDeleteSources
alDeleteSources.restype = None
alDeleteSources.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:518
alIsSource = _lib.alIsSource
alIsSource.restype = ALboolean
alIsSource.argtypes = [ALuint]
# /usr/include/AL/al.h:523
alSourcef = _lib.alSourcef
alSourcef.restype = None
alSourcef.argtypes = [ALuint, ALenum, ALfloat]
# /usr/include/AL/al.h:525
alSource3f = _lib.alSource3f
alSource3f.restype = None
alSource3f.argtypes = [ALuint, ALenum, ALfloat, ALfloat, ALfloat]
# /usr/include/AL/al.h:527
alSourcefv = _lib.alSourcefv
alSourcefv.restype = None
alSourcefv.argtypes = [ALuint, ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:529
alSourcei = _lib.alSourcei
alSourcei.restype = None
alSourcei.argtypes = [ALuint, ALenum, ALint]
# /usr/include/AL/al.h:531
#alSource3i = _lib.alSource3i
#alSource3i.restype = None
#alSource3i.argtypes = [ALuint, ALenum, ALint, ALint, ALint]
# /usr/include/AL/al.h:533
#alSourceiv = _lib.alSourceiv
#alSourceiv.restype = None
#alSourceiv.argtypes = [ALuint, ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:538
alGetSourcef = _lib.alGetSourcef
alGetSourcef.restype = None
alGetSourcef.argtypes = [ALuint, ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:540
alGetSource3f = _lib.alGetSource3f
alGetSource3f.restype = None
alGetSource3f.argtypes = [ALuint, ALenum, POINTER(ALfloat), POINTER(ALfloat), POINTER(ALfloat)]
# /usr/include/AL/al.h:542
alGetSourcefv = _lib.alGetSourcefv
alGetSourcefv.restype = None
alGetSourcefv.argtypes = [ALuint, ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:544
alGetSourcei = _lib.alGetSourcei
alGetSourcei.restype = None
alGetSourcei.argtypes = [ALuint, ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:546
#alGetSource3i = _lib.alGetSource3i
#alGetSource3i.restype = None
#alGetSource3i.argtypes = [ALuint, ALenum, POINTER(ALint), POINTER(ALint), POINTER(ALint)]
# /usr/include/AL/al.h:548
alGetSourceiv = _lib.alGetSourceiv
alGetSourceiv.restype = None
alGetSourceiv.argtypes = [ALuint, ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:556
alSourcePlayv = _lib.alSourcePlayv
alSourcePlayv.restype = None
alSourcePlayv.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:559
alSourceStopv = _lib.alSourceStopv
alSourceStopv.restype = None
alSourceStopv.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:562
alSourceRewindv = _lib.alSourceRewindv
alSourceRewindv.restype = None
alSourceRewindv.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:565
alSourcePausev = _lib.alSourcePausev
alSourcePausev.restype = None
alSourcePausev.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:572
alSourcePlay = _lib.alSourcePlay
alSourcePlay.restype = None
alSourcePlay.argtypes = [ALuint]
# /usr/include/AL/al.h:575
alSourceStop = _lib.alSourceStop
alSourceStop.restype = None
alSourceStop.argtypes = [ALuint]
# /usr/include/AL/al.h:578
alSourceRewind = _lib.alSourceRewind
alSourceRewind.restype = None
alSourceRewind.argtypes = [ALuint]
# /usr/include/AL/al.h:581
alSourcePause = _lib.alSourcePause
alSourcePause.restype = None
alSourcePause.argtypes = [ALuint]
# /usr/include/AL/al.h:586
alSourceQueueBuffers = _lib.alSourceQueueBuffers
alSourceQueueBuffers.restype = None
alSourceQueueBuffers.argtypes = [ALuint, ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:588
alSourceUnqueueBuffers = _lib.alSourceUnqueueBuffers
alSourceUnqueueBuffers.restype = None
alSourceUnqueueBuffers.argtypes = [ALuint, ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:606
alGenBuffers = _lib.alGenBuffers
alGenBuffers.restype = None
alGenBuffers.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:609
alDeleteBuffers = _lib.alDeleteBuffers
alDeleteBuffers.restype = None
alDeleteBuffers.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:612
alIsBuffer = _lib.alIsBuffer
alIsBuffer.restype = ALboolean
alIsBuffer.argtypes = [ALuint]
# /usr/include/AL/al.h:615
alBufferData = _lib.alBufferData
alBufferData.restype = None
alBufferData.argtypes = [ALuint, ALenum, POINTER(ALvoid), ALsizei, ALsizei]
# /usr/include/AL/al.h:620
alBufferf = _lib.alBufferf
alBufferf.restype = None
alBufferf.argtypes = [ALuint, ALenum, ALfloat]
# /usr/include/AL/al.h:622
alBuffer3f = _lib.alBuffer3f
alBuffer3f.restype = None
alBuffer3f.argtypes = [ALuint, ALenum, ALfloat, ALfloat, ALfloat]
# /usr/include/AL/al.h:624
alBufferfv = _lib.alBufferfv
alBufferfv.restype = None
alBufferfv.argtypes = [ALuint, ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:626
alBufferi = _lib.alBufferi
alBufferi.restype = None
alBufferi.argtypes = [ALuint, ALenum, ALint]
# /usr/include/AL/al.h:628
alBuffer3i = _lib.alBuffer3i
alBuffer3i.restype = None
alBuffer3i.argtypes = [ALuint, ALenum, ALint, ALint, ALint]
# /usr/include/AL/al.h:630
alBufferiv = _lib.alBufferiv
alBufferiv.restype = None
alBufferiv.argtypes = [ALuint, ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:635
alGetBufferf = _lib.alGetBufferf
alGetBufferf.restype = None
alGetBufferf.argtypes = [ALuint, ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:637
alGetBuffer3f = _lib.alGetBuffer3f
alGetBuffer3f.restype = None
alGetBuffer3f.argtypes = [ALuint, ALenum, POINTER(ALfloat), POINTER(ALfloat), POINTER(ALfloat)]
# /usr/include/AL/al.h:639
alGetBufferfv = _lib.alGetBufferfv
alGetBufferfv.restype = None
alGetBufferfv.argtypes = [ALuint, ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:641
alGetBufferi = _lib.alGetBufferi
alGetBufferi.restype = None
alGetBufferi.argtypes = [ALuint, ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:643
alGetBuffer3i = _lib.alGetBuffer3i
alGetBuffer3i.restype = None
alGetBuffer3i.argtypes = [ALuint, ALenum, POINTER(ALint), POINTER(ALint), POINTER(ALint)]
# /usr/include/AL/al.h:645
alGetBufferiv = _lib.alGetBufferiv
alGetBufferiv.restype = None
alGetBufferiv.argtypes = [ALuint, ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:651
alDopplerFactor = _lib.alDopplerFactor
alDopplerFactor.restype = None
alDopplerFactor.argtypes = [ALfloat]
# /usr/include/AL/al.h:653
alDopplerVelocity = _lib.alDopplerVelocity
alDopplerVelocity.restype = None
alDopplerVelocity.argtypes = [ALfloat]
# /usr/include/AL/al.h:655
alSpeedOfSound = _lib.alSpeedOfSound
alSpeedOfSound.restype = None
alSpeedOfSound.argtypes = [ALfloat]
# /usr/include/AL/al.h:657
alDistanceModel = _lib.alDistanceModel
alDistanceModel.restype = None
alDistanceModel.argtypes = [ALenum]
LPALENABLE = CFUNCTYPE(None, ALenum) # /usr/include/AL/al.h:662
LPALDISABLE = CFUNCTYPE(None, ALenum) # /usr/include/AL/al.h:663
LPALISENABLED = CFUNCTYPE(ALboolean, ALenum) # /usr/include/AL/al.h:664
LPALGETSTRING = CFUNCTYPE(POINTER(ALchar), ALenum) # /usr/include/AL/al.h:665
LPALGETBOOLEANV = CFUNCTYPE(None, ALenum, POINTER(ALboolean)) # /usr/include/AL/al.h:666
LPALGETINTEGERV = CFUNCTYPE(None, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:667
LPALGETFLOATV = CFUNCTYPE(None, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:668
LPALGETDOUBLEV = CFUNCTYPE(None, ALenum, POINTER(ALdouble)) # /usr/include/AL/al.h:669
LPALGETBOOLEAN = CFUNCTYPE(ALboolean, ALenum) # /usr/include/AL/al.h:670
LPALGETINTEGER = CFUNCTYPE(ALint, ALenum) # /usr/include/AL/al.h:671
LPALGETFLOAT = CFUNCTYPE(ALfloat, ALenum) # /usr/include/AL/al.h:672
LPALGETDOUBLE = CFUNCTYPE(ALdouble, ALenum) # /usr/include/AL/al.h:673
LPALGETERROR = CFUNCTYPE(ALenum) # /usr/include/AL/al.h:674
LPALISEXTENSIONPRESENT = CFUNCTYPE(ALboolean, POINTER(ALchar)) # /usr/include/AL/al.h:675
LPALGETPROCADDRESS = CFUNCTYPE(POINTER(c_void), POINTER(ALchar)) # /usr/include/AL/al.h:676
LPALGETENUMVALUE = CFUNCTYPE(ALenum, POINTER(ALchar)) # /usr/include/AL/al.h:677
LPALLISTENERF = CFUNCTYPE(None, ALenum, ALfloat) # /usr/include/AL/al.h:678
LPALLISTENER3F = CFUNCTYPE(None, ALenum, ALfloat, ALfloat, ALfloat) # /usr/include/AL/al.h:679
LPALLISTENERFV = CFUNCTYPE(None, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:680
LPALLISTENERI = CFUNCTYPE(None, ALenum, ALint) # /usr/include/AL/al.h:681
LPALLISTENER3I = CFUNCTYPE(None, ALenum, ALint, ALint, ALint) # /usr/include/AL/al.h:682
LPALLISTENERIV = CFUNCTYPE(None, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:683
LPALGETLISTENERF = CFUNCTYPE(None, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:684
LPALGETLISTENER3F = CFUNCTYPE(None, ALenum, POINTER(ALfloat), POINTER(ALfloat), POINTER(ALfloat)) # /usr/include/AL/al.h:685
LPALGETLISTENERFV = CFUNCTYPE(None, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:686
LPALGETLISTENERI = CFUNCTYPE(None, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:687
LPALGETLISTENER3I = CFUNCTYPE(None, ALenum, POINTER(ALint), POINTER(ALint), POINTER(ALint)) # /usr/include/AL/al.h:688
LPALGETLISTENERIV = CFUNCTYPE(None, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:689
LPALGENSOURCES = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:690
LPALDELETESOURCES = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:691
LPALISSOURCE = CFUNCTYPE(ALboolean, ALuint) # /usr/include/AL/al.h:692
LPALSOURCEF = CFUNCTYPE(None, ALuint, ALenum, ALfloat) # /usr/include/AL/al.h:693
LPALSOURCE3F = CFUNCTYPE(None, ALuint, ALenum, ALfloat, ALfloat, ALfloat) # /usr/include/AL/al.h:694
LPALSOURCEFV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:695
LPALSOURCEI = CFUNCTYPE(None, ALuint, ALenum, ALint) # /usr/include/AL/al.h:696
LPALSOURCE3I = CFUNCTYPE(None, ALuint, ALenum, ALint, ALint, ALint) # /usr/include/AL/al.h:697
LPALSOURCEIV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:698
LPALGETSOURCEF = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:699
LPALGETSOURCE3F = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat), POINTER(ALfloat), POINTER(ALfloat)) # /usr/include/AL/al.h:700
LPALGETSOURCEFV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:701
LPALGETSOURCEI = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:702
LPALGETSOURCE3I = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint), POINTER(ALint), POINTER(ALint)) # /usr/include/AL/al.h:703
LPALGETSOURCEIV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:704
LPALSOURCEPLAYV = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:705
LPALSOURCESTOPV = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:706
LPALSOURCEREWINDV = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:707
LPALSOURCEPAUSEV = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:708
LPALSOURCEPLAY = CFUNCTYPE(None, ALuint) # /usr/include/AL/al.h:709
LPALSOURCESTOP = CFUNCTYPE(None, ALuint) # /usr/include/AL/al.h:710
LPALSOURCEREWIND = CFUNCTYPE(None, ALuint) # /usr/include/AL/al.h:711
LPALSOURCEPAUSE = CFUNCTYPE(None, ALuint) # /usr/include/AL/al.h:712
LPALSOURCEQUEUEBUFFERS = CFUNCTYPE(None, ALuint, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:713
LPALSOURCEUNQUEUEBUFFERS = CFUNCTYPE(None, ALuint, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:714
LPALGENBUFFERS = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:715
LPALDELETEBUFFERS = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:716
LPALISBUFFER = CFUNCTYPE(ALboolean, ALuint) # /usr/include/AL/al.h:717
LPALBUFFERDATA = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALvoid), ALsizei, ALsizei) # /usr/include/AL/al.h:718
LPALBUFFERF = CFUNCTYPE(None, ALuint, ALenum, ALfloat) # /usr/include/AL/al.h:719
LPALBUFFER3F = CFUNCTYPE(None, ALuint, ALenum, ALfloat, ALfloat, ALfloat) # /usr/include/AL/al.h:720
LPALBUFFERFV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:721
LPALBUFFERI = CFUNCTYPE(None, ALuint, ALenum, ALint) # /usr/include/AL/al.h:722
LPALBUFFER3I = CFUNCTYPE(None, ALuint, ALenum, ALint, ALint, ALint) # /usr/include/AL/al.h:723
LPALBUFFERIV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:724
LPALGETBUFFERF = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:725
LPALGETBUFFER3F = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat), POINTER(ALfloat), POINTER(ALfloat)) # /usr/include/AL/al.h:726
LPALGETBUFFERFV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:727
LPALGETBUFFERI = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:728
LPALGETBUFFER3I = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint), POINTER(ALint), POINTER(ALint)) # /usr/include/AL/al.h:729
LPALGETBUFFERIV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:730
LPALDOPPLERFACTOR = CFUNCTYPE(None, ALfloat) # /usr/include/AL/al.h:731
LPALDOPPLERVELOCITY = CFUNCTYPE(None, ALfloat) # /usr/include/AL/al.h:732
LPALSPEEDOFSOUND = CFUNCTYPE(None, ALfloat) # /usr/include/AL/al.h:733
LPALDISTANCEMODEL = CFUNCTYPE(None, ALenum) # /usr/include/AL/al.h:734
__all__ = ['AL_API', 'ALAPI', 'AL_INVALID', 'AL_ILLEGAL_ENUM',
'AL_ILLEGAL_COMMAND', 'ALboolean', 'ALchar', 'ALbyte', 'ALubyte', 'ALshort',
'ALushort', 'ALint', 'ALuint', 'ALsizei', 'ALenum', 'ALfloat', 'ALdouble',
'ALvoid', 'AL_NONE', 'AL_FALSE', 'AL_TRUE', 'AL_SOURCE_RELATIVE',
'AL_CONE_INNER_ANGLE', 'AL_CONE_OUTER_ANGLE', 'AL_PITCH', 'AL_POSITION',
'AL_DIRECTION', 'AL_VELOCITY', 'AL_LOOPING', 'AL_BUFFER', 'AL_GAIN',
'AL_MIN_GAIN', 'AL_MAX_GAIN', 'AL_ORIENTATION', 'AL_SOURCE_STATE',
'AL_INITIAL', 'AL_PLAYING', 'AL_PAUSED', 'AL_STOPPED', 'AL_BUFFERS_QUEUED',
'AL_BUFFERS_PROCESSED', 'AL_SEC_OFFSET', 'AL_SAMPLE_OFFSET', 'AL_BYTE_OFFSET',
'AL_SOURCE_TYPE', 'AL_STATIC', 'AL_STREAMING', 'AL_UNDETERMINED',
'AL_FORMAT_MONO8', 'AL_FORMAT_MONO16', 'AL_FORMAT_STEREO8',
'AL_FORMAT_STEREO16', 'AL_REFERENCE_DISTANCE', 'AL_ROLLOFF_FACTOR',
'AL_CONE_OUTER_GAIN', 'AL_MAX_DISTANCE', 'AL_FREQUENCY', 'AL_BITS',
'AL_CHANNELS', 'AL_SIZE', 'AL_UNUSED', 'AL_PENDING', 'AL_PROCESSED',
'AL_NO_ERROR', 'AL_INVALID_NAME', 'AL_INVALID_ENUM', 'AL_INVALID_VALUE',
'AL_INVALID_OPERATION', 'AL_OUT_OF_MEMORY', 'AL_VENDOR', 'AL_VERSION',
'AL_RENDERER', 'AL_EXTENSIONS', 'AL_DOPPLER_FACTOR', 'AL_DOPPLER_VELOCITY',
'AL_SPEED_OF_SOUND', 'AL_DISTANCE_MODEL', 'AL_INVERSE_DISTANCE',
'AL_INVERSE_DISTANCE_CLAMPED', 'AL_LINEAR_DISTANCE',
'AL_LINEAR_DISTANCE_CLAMPED', 'AL_EXPONENT_DISTANCE',
'AL_EXPONENT_DISTANCE_CLAMPED', 'alEnable', 'alDisable', 'alIsEnabled',
'alGetString', 'alGetBooleanv', 'alGetIntegerv', 'alGetFloatv',
'alGetDoublev', 'alGetBoolean', 'alGetInteger', 'alGetFloat', 'alGetDouble',
'alGetError', 'alIsExtensionPresent', 'alGetProcAddress', 'alGetEnumValue',
'alListenerf', 'alListener3f', 'alListenerfv', 'alListeneri', 'alListener3i',
'alListeneriv', 'alGetListenerf', 'alGetListener3f', 'alGetListenerfv',
'alGetListeneri', 'alGetListener3i', 'alGetListeneriv', 'alGenSources',
'alDeleteSources', 'alIsSource', 'alSourcef', 'alSource3f', 'alSourcefv',
'alSourcei', 'alSource3i', 'alSourceiv', 'alGetSourcef', 'alGetSource3f',
'alGetSourcefv', 'alGetSourcei', 'alGetSource3i', 'alGetSourceiv',
'alSourcePlayv', 'alSourceStopv', 'alSourceRewindv', 'alSourcePausev',
'alSourcePlay', 'alSourceStop', 'alSourceRewind', 'alSourcePause',
'alSourceQueueBuffers', 'alSourceUnqueueBuffers', 'alGenBuffers',
'alDeleteBuffers', 'alIsBuffer', 'alBufferData', 'alBufferf', 'alBuffer3f',
'alBufferfv', 'alBufferi', 'alBuffer3i', 'alBufferiv', 'alGetBufferf',
'alGetBuffer3f', 'alGetBufferfv', 'alGetBufferi', 'alGetBuffer3i',
'alGetBufferiv', 'alDopplerFactor', 'alDopplerVelocity', 'alSpeedOfSound',
'alDistanceModel', 'LPALENABLE', 'LPALDISABLE', 'LPALISENABLED',
'LPALGETSTRING', 'LPALGETBOOLEANV', 'LPALGETINTEGERV', 'LPALGETFLOATV',
'LPALGETDOUBLEV', 'LPALGETBOOLEAN', 'LPALGETINTEGER', 'LPALGETFLOAT',
'LPALGETDOUBLE', 'LPALGETERROR', 'LPALISEXTENSIONPRESENT',
'LPALGETPROCADDRESS', 'LPALGETENUMVALUE', 'LPALLISTENERF', 'LPALLISTENER3F',
'LPALLISTENERFV', 'LPALLISTENERI', 'LPALLISTENER3I', 'LPALLISTENERIV',
'LPALGETLISTENERF', 'LPALGETLISTENER3F', 'LPALGETLISTENERFV',
'LPALGETLISTENERI', 'LPALGETLISTENER3I', 'LPALGETLISTENERIV',
'LPALGENSOURCES', 'LPALDELETESOURCES', 'LPALISSOURCE', 'LPALSOURCEF',
'LPALSOURCE3F', 'LPALSOURCEFV', 'LPALSOURCEI', 'LPALSOURCE3I', 'LPALSOURCEIV',
'LPALGETSOURCEF', 'LPALGETSOURCE3F', 'LPALGETSOURCEFV', 'LPALGETSOURCEI',
'LPALGETSOURCE3I', 'LPALGETSOURCEIV', 'LPALSOURCEPLAYV', 'LPALSOURCESTOPV',
'LPALSOURCEREWINDV', 'LPALSOURCEPAUSEV', 'LPALSOURCEPLAY', 'LPALSOURCESTOP',
'LPALSOURCEREWIND', 'LPALSOURCEPAUSE', 'LPALSOURCEQUEUEBUFFERS',
'LPALSOURCEUNQUEUEBUFFERS', 'LPALGENBUFFERS', 'LPALDELETEBUFFERS',
'LPALISBUFFER', 'LPALBUFFERDATA', 'LPALBUFFERF', 'LPALBUFFER3F',
'LPALBUFFERFV', 'LPALBUFFERI', 'LPALBUFFER3I', 'LPALBUFFERIV',
'LPALGETBUFFERF', 'LPALGETBUFFER3F', 'LPALGETBUFFERFV', 'LPALGETBUFFERI',
'LPALGETBUFFER3I', 'LPALGETBUFFERIV', 'LPALDOPPLERFACTOR',
'LPALDOPPLERVELOCITY', 'LPALSPEEDOFSOUND', 'LPALDISTANCEMODEL']
|
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import cloud
import salt.utils.cloud
cloud.__salt__ = {}
cloud.__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class CloudTestCase(TestCase):
'''
Test cases for salt.states.cloud
'''
# 'present' function tests: 1
def test_present(self):
'''
Test to spin up a single instance on a cloud provider, using salt-cloud.
'''
name = 'mycloud'
cloud_provider = 'my_cloud_provider'
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
mock = MagicMock(side_effect=[True, False])
mock_bool = MagicMock(side_effect=[True, False, False])
mock_dict = MagicMock(return_value={'cloud': 'saltcloud'})
with patch.dict(cloud.__salt__, {'cmd.retcode': mock,
'cloud.has_instance': mock_bool,
'cloud.create': mock_dict}):
comt = ('onlyif execution failed')
ret.update({'comment': comt})
self.assertDictEqual(cloud.present(name, cloud_provider,
onlyif=False), ret)
self.assertDictEqual(cloud.present(name, cloud_provider, onlyif=''),
ret)
comt = ('unless execution succeeded')
ret.update({'comment': comt})
self.assertDictEqual(cloud.present(name, cloud_provider,
unless=True), ret)
self.assertDictEqual(cloud.present(name, cloud_provider, unless=''),
ret)
comt = ('Already present instance {0}'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(cloud.present(name, cloud_provider), ret)
with patch.dict(cloud.__opts__, {'test': True}):
comt = ('Instance {0} needs to be created'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(cloud.present(name, cloud_provider), ret)
with patch.dict(cloud.__opts__, {'test': False}):
comt = ('Created instance mycloud using provider '
'my_cloud_provider and the following options: {}')
ret.update({'comment': comt, 'result': True,
'changes': {'cloud': 'saltcloud'}})
self.assertDictEqual(cloud.present(name, cloud_provider), ret)
# 'absent' function tests: 1
def test_absent(self):
'''
Test to ensure that no instances with the specified names exist.
'''
name = 'mycloud'
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
mock = MagicMock(side_effect=[True, False])
mock_bool = MagicMock(side_effect=[False, True, True])
mock_dict = MagicMock(return_value={'cloud': 'saltcloud'})
with patch.dict(cloud.__salt__, {'cmd.retcode': mock,
'cloud.has_instance': mock_bool,
'cloud.destroy': mock_dict}):
comt = ('onlyif execution failed')
ret.update({'comment': comt})
self.assertDictEqual(cloud.absent(name, onlyif=False), ret)
self.assertDictEqual(cloud.absent(name, onlyif=''), ret)
comt = ('unless execution succeeded')
ret.update({'comment': comt})
self.assertDictEqual(cloud.absent(name, unless=True), ret)
self.assertDictEqual(cloud.absent(name, unless=''), ret)
comt = ('Already absent instance {0}'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(cloud.absent(name), ret)
with patch.dict(cloud.__opts__, {'test': True}):
comt = ('Instance {0} needs to be destroyed'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(cloud.absent(name), ret)
with patch.dict(cloud.__opts__, {'test': False}):
comt = (('Destroyed instance {0}').format(name))
ret.update({'comment': comt, 'result': True,
'changes': {'cloud': 'saltcloud'}})
self.assertDictEqual(cloud.absent(name), ret)
# 'profile' function tests: 1
def test_profile(self):
'''
Test to create a single instance on a cloud provider,
using a salt-cloud profile.
'''
name = 'mycloud'
profile = 'myprofile'
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
mock = MagicMock(side_effect=[True, False])
mock_dict = MagicMock(side_effect=[{'cloud': 'saltcloud'},
{'Not Actioned': True},
{'Not Actioned': True},
{
'Not Found': True,
'Not Actioned/Not Running': True
}])
mock_d = MagicMock(return_value={})
with patch.dict(cloud.__salt__, {'cmd.retcode': mock,
'cloud.profile': mock_d,
'cloud.action': mock_dict}):
comt = ('onlyif execution failed')
ret.update({'comment': comt})
self.assertDictEqual(cloud.profile(name, profile, onlyif=False),
ret)
self.assertDictEqual(cloud.profile(name, profile, onlyif=''), ret)
comt = ('unless execution succeeded')
ret.update({'comment': comt})
self.assertDictEqual(cloud.profile(name, profile, unless=True), ret)
self.assertDictEqual(cloud.profile(name, profile, unless=''), ret)
comt = ('Already present instance {0}'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(cloud.profile(name, profile), ret)
with patch.dict(cloud.__opts__, {'test': True}):
comt = ('Instance {0} needs to be created'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(cloud.profile(name, profile), ret)
with patch.dict(cloud.__opts__, {'test': False}):
comt = (('Failed to create instance {0}'
'using profile {1}').format(name, profile))
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(cloud.profile(name, profile), ret)
with patch.dict(cloud.__opts__, {'test': False}):
comt = (('Failed to create instance {0}'
'using profile {1}').format(name, profile))
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(cloud.profile(name, profile), ret)
# 'volume_present' function tests: 1
def test_volume_present(self):
'''
Test to check that a block volume exists.
'''
name = 'mycloud'
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
mock = MagicMock(return_value=name)
mock_lst = MagicMock(side_effect=[[name], [], []])
with patch.dict(cloud.__salt__, {'cloud.volume_list': mock_lst,
'cloud.volume_create': mock}):
with patch.object(salt.utils.cloud, 'check_name',
MagicMock(return_value=True)):
comt = ('Invalid characters in name.')
ret.update({'comment': comt})
self.assertDictEqual(cloud.volume_present(name), ret)
comt = ('Volume exists: {0}'.format(name))
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(cloud.volume_present(name), ret)
with patch.dict(cloud.__opts__, {'test': True}):
comt = ('Volume {0} will be created.'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(cloud.volume_present(name), ret)
with patch.dict(cloud.__opts__, {'test': False}):
comt = ('Volume {0} was created'.format(name))
ret.update({'comment': comt, 'result': True,
'changes': {'old': None, 'new': name}})
self.assertDictEqual(cloud.volume_present(name), ret)
# 'volume_absent' function tests: 1
def test_volume_absent(self):
'''
Test to check that a block volume exists.
'''
name = 'mycloud'
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
mock = MagicMock(return_value=False)
mock_lst = MagicMock(side_effect=[[], [name], [name]])
with patch.dict(cloud.__salt__, {'cloud.volume_list': mock_lst,
'cloud.volume_delete': mock}):
with patch.object(salt.utils.cloud, 'check_name',
MagicMock(return_value=True)):
comt = ('Invalid characters in name.')
ret.update({'comment': comt})
self.assertDictEqual(cloud.volume_absent(name), ret)
comt = ('Volume is absent.')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(cloud.volume_absent(name), ret)
with patch.dict(cloud.__opts__, {'test': True}):
comt = ('Volume {0} will be deleted.'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(cloud.volume_absent(name), ret)
with patch.dict(cloud.__opts__, {'test': False}):
comt = ('Volume {0} failed to delete.'.format(name))
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(cloud.volume_absent(name), ret)
# 'volume_attached' function tests: 1
def test_volume_attached(self):
'''
Test to check if a block volume is attached.
'''
name = 'mycloud'
server_name = 'mycloud_server'
disk_name = 'trogdor'
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
mock = MagicMock(return_value=False)
mock_dict = MagicMock(side_effect=[{name: {'name': disk_name, 'attachments': True}}, {},
{name: {'name': disk_name, 'attachments': False}},
{name: {'name': disk_name, 'attachments': False}},
{name: {'name': disk_name, 'attachments': False}}])
with patch.dict(cloud.__salt__, {'cloud.volume_list': mock_dict,
'cloud.action': mock}):
with patch.object(salt.utils.cloud, 'check_name',
MagicMock(side_effect=[True, False, True])):
comt = ('Invalid characters in name.')
ret.update({'comment': comt})
self.assertDictEqual(cloud.volume_attached(name, server_name),
ret)
ret.update({'name': server_name})
self.assertDictEqual(cloud.volume_attached(name, server_name),
ret)
comt = ('Volume {0} is already attached: True'.format(disk_name))
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(cloud.volume_attached(name, server_name), ret)
comt = ('Volume {0} does not exist'.format(name))
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(cloud.volume_attached(name, server_name), ret)
comt = ('Server {0} does not exist'.format(server_name))
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(cloud.volume_attached(name, server_name), ret)
mock = MagicMock(return_value=True)
with patch.dict(cloud.__salt__, {'cloud.action': mock,
'cloud.volume_attach': mock}):
with patch.dict(cloud.__opts__, {'test': True}):
comt = ('Volume {0} will be will be attached.'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(cloud.volume_attached(name,
server_name),
ret)
with patch.dict(cloud.__opts__, {'test': False}):
comt = ('Volume {0} was created'.format(name))
ret.update({'comment': comt, 'result': True,
'changes': {'new': True,
'old': {'name': disk_name,
'attachments': False}}})
self.assertDictEqual(cloud.volume_attached(name,
server_name),
ret)
# 'volume_detached' function tests: 1
def test_volume_detached(self):
'''
Test to check if a block volume is detached.
'''
name = 'mycloud'
server_name = 'mycloud_server'
disk_name = 'trogdor'
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
mock = MagicMock(return_value=False)
mock_dict = MagicMock(side_effect=[{name: {'name': disk_name, 'attachments': False}}, {},
{name: {'name': disk_name, 'attachments': True}},
{name: {'name': disk_name, 'attachments': True}},
{name: {'name': disk_name, 'attachments': True}}])
with patch.dict(cloud.__salt__, {'cloud.volume_list': mock_dict,
'cloud.action': mock}):
with patch.object(salt.utils.cloud, 'check_name',
MagicMock(side_effect=[True, False, True])):
comt = ('Invalid characters in name.')
ret.update({'comment': comt})
self.assertDictEqual(cloud.volume_detached(name, server_name),
ret)
ret.update({'name': server_name})
self.assertDictEqual(cloud.volume_detached(name, server_name),
ret)
comt = ('Volume {0} is not currently attached to anything.'.format(disk_name))
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(cloud.volume_detached(name, server_name), ret)
comt = ('Volume {0} does not exist'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(cloud.volume_detached(name, server_name), ret)
comt = ('Server {0} does not exist'.format(server_name))
ret.update({'comment': comt})
self.assertDictEqual(cloud.volume_detached(name, server_name), ret)
mock = MagicMock(return_value=True)
with patch.dict(cloud.__salt__, {'cloud.action': mock,
'cloud.volume_detach': mock}):
with patch.dict(cloud.__opts__, {'test': True}):
comt = ('Volume {0} will be will be detached.'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(cloud.volume_detached(name,
server_name),
ret)
with patch.dict(cloud.__opts__, {'test': False}):
comt = ('Volume {0} was created'.format(name))
ret.update({'comment': comt, 'result': True,
'changes': {'new': True,
'old': {'name': disk_name,
'attachments': True}}})
self.assertDictEqual(cloud.volume_detached(name,
server_name),
ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(CloudTestCase, needs_daemon=False)
|
|
import calendar
import time
import datetime
import yaml
from flask import (current_app, request, render_template, Blueprint, abort,
jsonify, g, session, Response)
from flask.ext.babel import gettext
from lever import get_joined
from .models import (OneMinuteShare, Block, OneMinuteType, FiveMinuteType,
FiveMinuteShare, OneHourShare, Status, DonationPercent,
FiveMinuteHashrate, OneMinuteHashrate, OneHourHashrate, OneMinuteTemperature,
FiveMinuteTemperature, OneHourTemperature, OneHourType)
from . import db, root, cache, babel
from .utils import (compress_typ, get_typ, verify_message, get_pool_acc_rej,
get_pool_eff, last_10_shares, collect_user_stats, get_adj_round_shares,
get_pool_hashrate, get_network_hashrate, last_block_time, get_alerts,
last_block_found, last_blockheight, resort_recent_visit,
collect_acct_items, all_blocks, get_block_stats, get_pool_workers)
main = Blueprint('main', __name__)
@babel.localeselector
def get_locale():
new_language = request.args.get('lang')
if new_language:
session['lang'] = new_language
elif not 'lang' in session:
session['lang'] = request.accept_languages.best_match(current_app.config['accept_locales'].keys())
return session['lang']
@main.before_request
def before_request():
g.locale = get_locale()
@main.route("/")
def home():
news = yaml.load(open(root + '/static/yaml/news.yaml'))
return render_template('home.html', news=news)
@main.route("/news")
def news():
news = yaml.load(open(root + '/static/yaml/news.yaml'))
return render_template('news.html', news=news)
@main.route("/blocks")
def blocks():
blocks = all_blocks()
return render_template('blocks.html', blocks=blocks)
@main.route("/<address>/account")
def account(address):
return render_template('account.html',
acct_items=collect_acct_items(address, None))
@main.route("/pool_stats")
def pool_stats():
current_block = {'reward': cache.get('reward') or 0,
'difficulty': cache.get('difficulty') or 0,
'height': cache.get('blockheight') or 0}
blocks = db.session.query(Block).order_by(Block.height.desc()).limit(10)
pool_luck, effective_return, orphan_perc = get_block_stats(g.average_difficulty)
reject_total, accept_total = get_pool_acc_rej()
efficiency = get_pool_eff()
return render_template('pool_stats.html',
blocks=blocks,
current_block=current_block,
efficiency=efficiency,
accept_total=accept_total,
reject_total=reject_total,
pool_luck=pool_luck,
effective_return=effective_return,
orphan_perc=orphan_perc)
@main.route("/network_stats")
def network_stats():
network_block_time = current_app.config['block_time']
network_difficulty = cache.get('difficulty') or 0
network_avg_difficulty = g.average_difficulty or 0
network_blockheight = cache.get('blockheight') or 0
network_hashrate = (network_difficulty * (2**32)) / network_block_time
return render_template('network_stats.html',
network_difficulty=network_difficulty,
network_avg_difficulty=network_avg_difficulty,
network_blockheight=network_blockheight,
network_hashrate=network_hashrate,
network_block_time=network_block_time)
@main.route("/network_stats/<graph_type>/<window>")
def network_graph_data(graph_type=None, window="hour"):
if not graph_type:
return None
type_map = {'hour': OneMinuteType,
'month': OneHourType,
'day': FiveMinuteType}
typ = type_map[window]
types = {}
compress = None
if window == "day":
compress = OneMinuteType
elif window == "month":
compress = FiveMinuteType
if compress:
for slc in get_typ(compress, q_typ=graph_type):
slice_dt = compress.floor_time(slc.time)
stamp = calendar.timegm(slice_dt.utctimetuple())
types.setdefault(slc.typ, {})
types[slc.typ].setdefault(stamp, 0)
types[slc.typ][stamp] += slc.value
for m in get_typ(typ, q_typ=graph_type):
stamp = calendar.timegm(m.time.utctimetuple())
types.setdefault(m.typ, {})
types[m.typ].setdefault(stamp, 0)
types[m.typ][stamp] += m.value
step = typ.slice_seconds
end = ((int(time.time()) // step) * step) - (step * 2)
start = end - typ.window.total_seconds() + (step * 2)
return jsonify(start=start, end=end, step=step, workers=types)
@main.before_request
def add_pool_stats():
try:
try:
if len(session['recent_users'][0]) != 2:
session['recent_users'] = []
except (KeyError, IndexError):
pass
except IndexError:
pass
g.completed_block_shares = get_adj_round_shares()
g.round_duration = (datetime.datetime.utcnow() - last_block_time()).total_seconds()
g.hashrate = get_pool_hashrate()
g.net_hashrate = get_network_hashrate()
g.worker_count = get_pool_workers() or 0
g.average_difficulty = cache.get('difficulty_avg') or 1
g.shares_to_solve = g.average_difficulty * (2 ** 16)
g.total_round_shares = g.shares_to_solve * current_app.config['last_n']
g.alerts = get_alerts()
@main.route("/close/<int:id>")
def close_alert(id):
dismissed_alerts = session.get('dismissed_alerts', [])
dismissed_alerts.append(id)
session['dismissed_alerts'] = dismissed_alerts
return Response('success')
@main.route("/api/pool_stats")
def pool_stats_api():
ret = {}
ret['hashrate'] = get_pool_hashrate()
ret['net_hashrate'] = get_network_hashrate()
ret['workers'] = g.worker_count
ret['completed_shares'] = g.completed_block_shares
ret['total_round_shares'] = g.total_round_shares
ret['round_duration'] = g.round_duration
sps = float(g.completed_block_shares) / g.round_duration
ret['shares_per_sec'] = sps
ret['last_block_found'] = last_blockheight()
ret['shares_to_solve'] = g.shares_to_solve
if sps > 0:
ret['est_sec_remaining'] = (float(g.shares_to_solve) - g.completed_block_shares) / sps
else:
ret['est_sec_remaining'] = 'infinite'
ret['pool_luck'], ret['effective_return'], ret['orphan_perc'] = get_block_stats(g.average_difficulty)
return jsonify(**ret)
@main.route("/api/last_block")
def last_block_api():
b = last_block_found()
if not b:
return jsonify()
return jsonify(difficulty=b.difficulty,
duration=str(b.duration),
shares_to_solve=b.shares_to_solve,
found_by=b.user,
luck=b.luck,
height=b.height,
hash=b.hash)
@main.route("/index.php")
def mpos_pool_stats_api():
ret = {}
action = request.args.get('action', 'none')
api_key = request.args.get('api_key', 'none')
if (action == 'getpoolstatus') & (api_key in current_app.config['mpos_api_keys']):
sps = float(g.completed_block_shares) / g.round_duration
difficulty = cache.get('difficulty') or 0
blockheight = cache.get('blockheight') or 0
data = {"pool_name": current_app.config['site_url'],
"hashrate": round(get_pool_hashrate(), 0),
"efficiency": round(get_pool_eff(), 2),
"workers": g.worker_count,
"currentnetworkblock": blockheight,
"nextnetworkblock": blockheight+1,
"lastblock": last_blockheight(),
"networkdiff": difficulty,
"esttime": round((float(g.shares_to_solve) - g.completed_block_shares) / sps, 0),
"estshares": round(g.shares_to_solve, 0),
"timesincelast": round(g.round_duration, 0),
# possible deprecation
# "nethashrate": get_network_hashrate()
"nethashrate": round((difficulty * 2**32) / current_app.config['block_time'], 0)
}
ret['getpoolstatus'] = {"version": "0.6.3", "runtime": 0, "data": data}
return jsonify(**ret)
@main.route("/stats")
def user_stats():
return render_template('stats.html')
@main.route("/round_summary")
def summary_page():
user_shares = cache.get('pplns_user_shares')
cached_time = cache.get('pplns_cache_time')
cached_donation = cache.get('user_donations')
def user_match(user):
if cached_donation is not None:
if user in cached_donation:
return cached_donation[user]
else:
return current_app.config['default_perc']
if cached_time is not None:
cached_time = cached_time.replace(second=0, microsecond=0).strftime("%Y-%m-%d %H:%M")
redacted = set(current_app.config.get('redacted_addresses', set()))
user_list = []
total_hashrate = 0.0
if user_shares is not None:
for user, shares in user_shares.iteritems():
user = user[6:]
hashrate = (65536 * last_10_shares(user) / 600)
total_hashrate += hashrate
dat = {'hashrate': hashrate,
'shares': shares,
'user': user if user not in redacted else None,
'donation_perc': user_match(user)}
user_list.append(dat)
user_list = sorted(user_list, key=lambda x: x['shares'], reverse=True)
return render_template('round_summary.html',
users=user_list,
blockheight=cache.get('blockheight') or 0,
cached_time=cached_time,
total_hashrate=total_hashrate)
@main.route("/exc_test")
def exception():
current_app.logger.warn("Exception test!")
raise Exception()
return ""
@main.route("/<address>/<worker>/details/<int:gpu>")
@main.route("/<address>/details/<int:gpu>", defaults={'worker': ''})
@main.route("/<address>//details/<int:gpu>", defaults={'worker': ''})
def gpu_detail(address, worker, gpu):
status = Status.query.filter_by(user=address, worker=worker).first()
if status:
output = status.pretty_json(gpu)
else:
output = gettext("Not available")
return jsonify(output=output)
@main.route("/<address>/<worker>")
def worker_detail(address, worker):
status = Status.query.filter_by(user=address, worker=worker).first()
return render_template('worker_detail.html',
status=status,
username=address,
worker=worker)
@main.route("/<address>/<worker>/<stat_type>/<window>")
def worker_stats(address=None, worker=None, stat_type=None, window="hour"):
if not address or not worker or not stat_type:
return None
type_lut = {'hash': {'hour': OneMinuteHashrate,
'day': FiveMinuteHashrate,
'day_compressed': OneMinuteHashrate,
'month': OneHourHashrate,
'month_compressed': FiveMinuteHashrate},
'temp': {'hour': OneMinuteTemperature,
'day': FiveMinuteTemperature,
'day_compressed': OneMinuteTemperature,
'month': OneHourTemperature,
'month_compressed': FiveMinuteTemperature}}
# store all the raw data of we've grabbed
workers = {}
typ = type_lut[stat_type][window]
if window == "day":
compress_typ(type_lut[stat_type]['day_compressed'], workers, address, worker=worker)
elif window == "month":
compress_typ(type_lut[stat_type]['month_compressed'], workers, address, worker=worker)
for m in get_typ(typ, address, worker=worker):
stamp = calendar.timegm(m.time.utctimetuple())
if worker is not None or 'undefined':
workers.setdefault(m.device, {})
workers[m.device].setdefault(stamp, 0)
workers[m.device][stamp] += m.value
else:
workers.setdefault(m.worker, {})
workers[m.worker].setdefault(stamp, 0)
workers[m.worker][stamp] += m.value
step = typ.slice_seconds
end = ((int(time.time()) // step) * step) - (step * 2)
start = end - typ.window.total_seconds() + (step * 2)
return jsonify(start=start, end=end, step=step, workers=workers)
@main.route("/<address>")
def user_dashboard(address=None):
if len(address) != 34:
abort(404)
stats = collect_user_stats(address)
# reorganize/create the recently viewed
recent = session.get('recent_user_counts', {})
recent.setdefault(address, 0)
recent[address] += 1
session['recent_user_counts'] = recent
resort_recent_visit(recent)
return render_template('user_stats.html', username=address, **stats)
@main.route("/api/<address>")
def address_api(address):
if len(address) != 34:
abort(404)
stats = collect_user_stats(address)
stats['acct_items'] = get_joined(stats['acct_items'])
stats['total_earned'] = float(stats['total_earned'])
if stats['pplns_cached_time']:
stats['pplns_cached_time'] = calendar.timegm(stats['pplns_cached_time'].utctimetuple())
day_shares = stats['last_10_shares'] * 6 * 24
daily_percentage = float(day_shares) / g.shares_to_solve
donation_perc = (1 - (stats['donation_perc'] / 100.0))
rrwd = current_app.config['reward']
stats['daily_est'] = daily_percentage * rrwd * donation_perc
stats['est_round_payout'] = (float(stats['round_shares']) / g.total_round_shares) * donation_perc * rrwd
return jsonify(**stats)
@main.route("/<address>/clear")
def address_clear(address=None):
# remove address from the recently viewed
recent = session.get('recent_users_counts', {})
try:
del recent[address]
except KeyError:
pass
resort_recent_visit(recent)
return jsonify(recent=session['recent_users'])
@main.route("/<address>/stats")
@main.route("/<address>/stats/<window>")
def address_stats(address=None, window="hour"):
# store all the raw data of we've grabbed
workers = {}
if window == "hour":
typ = OneMinuteShare
elif window == "day":
compress_typ(OneMinuteShare, workers, address)
typ = FiveMinuteShare
elif window == "month":
compress_typ(FiveMinuteShare, workers, address)
typ = OneHourShare
for m in get_typ(typ, address):
stamp = calendar.timegm(m.time.utctimetuple())
workers.setdefault(m.worker, {})
workers[m.worker].setdefault(stamp, 0)
workers[m.worker][stamp] += m.value
step = typ.slice_seconds
end = ((int(time.time()) // step) * step) - (step * 2)
start = end - typ.window.total_seconds() + (step * 2)
if address == "pool" and '' in workers:
workers['Entire Pool'] = workers['']
del workers['']
return jsonify(start=start, end=end, step=step, workers=workers)
@main.errorhandler(Exception)
def handle_error(error):
current_app.logger.exception(error)
return render_template("500.html")
@main.route("/guides")
@main.route("/guides/")
def guides_index():
return render_template("guides/index.html")
@main.route("/guides/<guide>")
def guides(guide):
return render_template("guides/" + guide + ".html")
@main.route("/faq")
def faq():
return render_template("faq.html")
@main.route("/set_donation/<address>", methods=['POST', 'GET'])
def set_donation(address):
vals = request.form
result = ""
if request.method == "POST":
try:
verify_message(address, vals['message'], vals['signature'])
except Exception as e:
current_app.logger.info("Failed to validate!", exc_info=True)
result = gettext("An error occurred: ") + str(e)
else:
result = gettext("Successfully changed!")
perc = DonationPercent.query.filter_by(user=address).first()
if not perc:
perc = current_app.config.get('default_perc', 0)
else:
perc = perc.perc
return render_template("set_donation.html", username=address, result=result,
perc=perc)
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import collections
from blazar import context
from blazar.db import api as db_api
from blazar import policy
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class BasePlugin(object, metaclass=abc.ABCMeta):
resource_type = 'none'
title = None
description = None
monitor = None
query_options = None
def get_plugin_opts(self):
"""Plugin can expose some options that should be specified in conf file
For example:
def get_plugin_opts(self):
return [
cfg.StrOpt('mandatory-conf', required=True),
cfg.StrOpt('optional_conf', default="42"),
]
"""
return []
def setup(self, conf):
"""Plugin initialization
:param conf: plugin-specific configurations
"""
pass
def to_dict(self):
return {
'resource_type': self.resource_type,
'title': self.title,
'description': self.description,
}
@abc.abstractmethod
def get(self, resource_id):
"""Get resource by id"""
pass
@abc.abstractmethod
def reserve_resource(self, reservation_id, values):
"""Reserve resource."""
pass
@abc.abstractmethod
def list_allocations(self, query, detail=False):
"""List resource allocations."""
pass
@abc.abstractmethod
def query_allocations(self, resource_id_list, lease_id=None,
reservation_id=None):
"""List resource allocations."""
pass
@abc.abstractmethod
def allocation_candidates(self, lease_values):
"""Get candidates for reservation allocation."""
pass
@abc.abstractmethod
def update_reservation(self, reservation_id, values):
"""Update reservation."""
pass
@abc.abstractmethod
def on_end(self, resource_id):
"""Delete resource."""
pass
@abc.abstractmethod
def on_start(self, resource_id):
"""Wake up resource."""
pass
def list_resource_properties(self, query):
detail = False if not query else query.get('detail', False)
all_properties = False if not query else query.get('all', False)
resource_properties = collections.defaultdict(list)
include_private = all_properties and policy.enforce(
context.current(), 'admin', {}, do_raise=False)
for name, private, value in db_api.resource_properties_list(
self.resource_type):
if include_private or not private:
resource_properties[name].append(value)
if detail:
return [
dict(property=k, private=False, values=v)
for k, v in resource_properties.items()]
else:
return [dict(property=k) for k, v in resource_properties.items()]
def update_resource_property(self, property_name, values):
return db_api.resource_property_update(
self.resource_type, property_name, values)
def before_end(self, resource_id):
"""Take actions before the end of a lease"""
pass
def heal_reservations(self, failed_resources, interval_begin,
interval_end):
"""Heal reservations which suffer from resource failures.
:param failed_resources: failed resources
:param interval_begin: start date of the period to heal.
:param interval_end: end date of the period to heal.
:return: a dictionary of {reservation id: flags to update}
e.g. {'de27786d-bd96-46bb-8363-19c13b2c6657':
{'missing_resources': True}}
"""
raise NotImplementedError
def get_query_options(self, params, index_type):
options = {k: params[k] for k in params
if k in self.query_options[index_type]}
unsupported = set(params) - set(options)
if unsupported:
LOG.debug('Unsupported query key is specified in API request: %s',
unsupported)
return options
class BaseMonitorPlugin(metaclass=abc.ABCMeta):
"""Base class of monitor plugin."""
@abc.abstractmethod
def is_notification_enabled(self):
"""Check if the notification monitor is enabled."""
pass
@abc.abstractmethod
def get_notification_event_types(self):
"""Get a list of event types of messages to handle."""
pass
@abc.abstractmethod
def get_notification_topics(self):
"""Get a list of topics of notification to subscribe to."""
pass
@abc.abstractmethod
def notification_callback(self, event_type, payload):
"""Handle a notification message.
It is used as a callback of a notification based resource monitor.
:param event_type: an event type of a notification.
:param payload: a payload of a notification.
:return: a dictionary of {reservation id: flags to update}
e.g. {'de27786d-bd96-46bb-8363-19c13b2c6657':
{'missing_resources': True}}
"""
pass
@abc.abstractmethod
def is_polling_enabled(self):
"""Check if the polling monitor is enabled."""
pass
@abc.abstractmethod
def get_polling_interval(self):
"""Get an interval of polling in seconds."""
pass
@abc.abstractmethod
def poll(self):
"""Check health of resources.
:return: a dictionary of {reservation id: flags to update}
e.g. {'de27786d-bd96-46bb-8363-19c13b2c6657':
{'missing_resources': True}}
"""
pass
@abc.abstractmethod
def get_healing_interval(self):
"""Get interval of reservation healing in minutes."""
pass
@abc.abstractmethod
def heal(self):
"""Heal suffering reservations.
:return: a dictionary of {reservation id: flags to update}
"""
|
|
# -*- coding: utf-8 -*-
#
# Cipher/PKCS1_OAEP.py : PKCS#1 OAEP
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""RSA encryption protocol according to PKCS#1 OAEP
See RFC3447__ or the `original RSA Labs specification`__ .
This scheme is more properly called ``RSAES-OAEP``.
As an example, a sender may encrypt a message in this way:
>>> from Crypto.Cipher import PKCS1_OAEP
>>> from Crypto.PublicKey import RSA
>>>
>>> message = 'To be encrypted'
>>> key = RSA.importKey(open('pubkey.der').read())
>>> cipher = PKCS1_OAEP.new(key)
>>> ciphertext = cipher.encrypt(message)
At the receiver side, decryption can be done using the private part of
the RSA key:
>>> key = RSA.importKey(open('privkey.der').read())
>>> cipher = PKCS1_OAP.new(key)
>>> message = cipher.decrypt(ciphertext)
:undocumented: __revision__, __package__
.. __: http://www.ietf.org/rfc/rfc3447.txt
.. __: http://www.rsa.com/rsalabs/node.asp?id=2125.
"""
from __future__ import nested_scopes
__revision__ = "$Id$"
__all__ = [ 'new', 'PKCS1OAEP_Cipher' ]
import Crypto.Signature.PKCS1_PSS
import Crypto.Hash.SHA
from Crypto.Util.py3compat import *
import Crypto.Util.number
from Crypto.Util.number import ceil_div
from Crypto.Util.strxor import strxor
class PKCS1OAEP_Cipher:
"""This cipher can perform PKCS#1 v1.5 OAEP encryption or decryption."""
def __init__(self, key, hashAlgo, mgfunc, label):
"""Initialize this PKCS#1 OAEP cipher object.
:Parameters:
key : an RSA key object
If a private half is given, both encryption and decryption are possible.
If a public half is given, only encryption is possible.
hashAlgo : hash object
The hash function to use. This can be a module under `Crypto.Hash`
or an existing hash object created from any of such modules. If not specified,
`Crypto.Hash.SHA` (that is, SHA-1) is used.
mgfunc : callable
A mask generation function that accepts two parameters: a string to
use as seed, and the lenth of the mask to generate, in bytes.
If not specified, the standard MGF1 is used (a safe choice).
label : string
A label to apply to this particular encryption. If not specified,
an empty string is used. Specifying a label does not improve
security.
:attention: Modify the mask generation function only if you know what you are doing.
Sender and receiver must use the same one.
"""
self._key = key
if hashAlgo:
self._hashObj = hashAlgo
else:
self._hashObj = Crypto.Hash.SHA
if mgfunc:
self._mgf = mgfunc
else:
self._mgf = lambda x,y: Crypto.Signature.PKCS1_PSS.MGF1(x,y,self._hashObj)
self._label = label
def can_encrypt(self):
"""Return True/1 if this cipher object can be used for encryption."""
return self._key.can_encrypt()
def can_decrypt(self):
"""Return True/1 if this cipher object can be used for decryption."""
return self._key.can_decrypt()
def encrypt(self, message):
"""Produce the PKCS#1 OAEP encryption of a message.
This function is named ``RSAES-OAEP-ENCRYPT``, and is specified in
section 7.1.1 of RFC3447.
:Parameters:
message : string
The message to encrypt, also known as plaintext. It can be of
variable length, but not longer than the RSA modulus (in bytes)
minus 2, minus twice the hash output size.
:Return: A string, the ciphertext in which the message is encrypted.
It is as long as the RSA modulus (in bytes).
:Raise ValueError:
If the RSA key length is not sufficiently long to deal with the given
message.
"""
# TODO: Verify the key is RSA
randFunc = self._key._randfunc
# See 7.1.1 in RFC3447
modBits = Crypto.Util.number.size(self._key.n)
k = ceil_div(modBits,8) # Convert from bits to bytes
hLen = self._hashObj.digest_size
mLen = len(message)
# Step 1b
ps_len = k-mLen-2*hLen-2
if ps_len<0:
raise ValueError("Plaintext is too long.")
# Step 2a
lHash = self._hashObj.new(self._label).digest()
# Step 2b
ps = bchr(0x00)*ps_len
# Step 2c
db = lHash + ps + bchr(0x01) + message
# Step 2d
ros = randFunc(hLen)
# Step 2e
dbMask = self._mgf(ros, k-hLen-1)
# Step 2f
maskedDB = strxor(db, dbMask)
# Step 2g
seedMask = self._mgf(maskedDB, hLen)
# Step 2h
maskedSeed = strxor(ros, seedMask)
# Step 2i
em = bchr(0x00) + maskedSeed + maskedDB
# Step 3a (OS2IP), step 3b (RSAEP), part of step 3c (I2OSP)
m = self._key.encrypt(em, 0)[0]
# Complete step 3c (I2OSP)
c = bchr(0x00)*(k-len(m)) + m
return c
def decrypt(self, ct):
"""Decrypt a PKCS#1 OAEP ciphertext.
This function is named ``RSAES-OAEP-DECRYPT``, and is specified in
section 7.1.2 of RFC3447.
:Parameters:
ct : string
The ciphertext that contains the message to recover.
:Return: A string, the original message.
:Raise ValueError:
If the ciphertext length is incorrect, or if the decryption does not
succeed.
:Raise TypeError:
If the RSA key has no private half.
"""
# TODO: Verify the key is RSA
# See 7.1.2 in RFC3447
modBits = Crypto.Util.number.size(self._key.n)
k = ceil_div(modBits,8) # Convert from bits to bytes
hLen = self._hashObj.digest_size
# Step 1b and 1c
if len(ct) != k or k<hLen+2:
raise ValueError("Ciphertext with incorrect length.")
# Step 2a (O2SIP), 2b (RSADP), and part of 2c (I2OSP)
m = self._key.decrypt(ct)
# Complete step 2c (I2OSP)
em = bchr(0x00)*(k-len(m)) + m
# Step 3a
lHash = self._hashObj.new(self._label).digest()
# Step 3b
y = em[0]
# y must be 0, but we MUST NOT check it here in order not to
# allow attacks like Manger's (http://dl.acm.org/citation.cfm?id=704143)
maskedSeed = em[1:hLen+1]
maskedDB = em[hLen+1:]
# Step 3c
seedMask = self._mgf(maskedDB, hLen)
# Step 3d
seed = strxor(maskedSeed, seedMask)
# Step 3e
dbMask = self._mgf(seed, k-hLen-1)
# Step 3f
db = strxor(maskedDB, dbMask)
# Step 3g
valid = 1
one = db[hLen:].find(bchr(0x01))
lHash1 = db[:hLen]
if lHash1!=lHash:
valid = 0
if one<0:
valid = 0
if bord(y)!=0:
valid = 0
if not valid:
raise ValueError("Incorrect decryption.")
# Step 4
return db[hLen+one+1:]
def new(key, hashAlgo=None, mgfunc=None, label=b('')):
"""Return a cipher object `PKCS1OAEP_Cipher` that can be used to perform PKCS#1 OAEP encryption or decryption.
:Parameters:
key : RSA key object
The key to use to encrypt or decrypt the message. This is a `Crypto.PublicKey.RSA` object.
Decryption is only possible if *key* is a private RSA key.
hashAlgo : hash object
The hash function to use. This can be a module under `Crypto.Hash`
or an existing hash object created from any of such modules. If not specified,
`Crypto.Hash.SHA` (that is, SHA-1) is used.
mgfunc : callable
A mask generation function that accepts two parameters: a string to
use as seed, and the lenth of the mask to generate, in bytes.
If not specified, the standard MGF1 is used (a safe choice).
label : string
A label to apply to this particular encryption. If not specified,
an empty string is used. Specifying a label does not improve
security.
:attention: Modify the mask generation function only if you know what you are doing.
Sender and receiver must use the same one.
"""
return PKCS1OAEP_Cipher(key, hashAlgo, mgfunc, label)
|
|
from rpython.flowspace.model import (Constant, Variable, SpaceOperation,
mkentrymap)
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.translator.unsimplify import insert_empty_block, split_block
def fold_op_list(operations, constants, exit_early=False, exc_catch=False):
newops = []
folded_count = 0
for spaceop in operations:
vargsmodif = False
vargs = []
args = []
for v in spaceop.args:
if isinstance(v, Constant):
args.append(v.value)
elif v in constants:
v = constants[v]
vargsmodif = True
args.append(v.value)
vargs.append(v)
try:
op = getattr(llop, spaceop.opname)
except AttributeError:
pass
else:
if not op.sideeffects and len(args) == len(vargs):
RESTYPE = spaceop.result.concretetype
try:
result = op(RESTYPE, *args)
except TypeError:
pass
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass # turn off reporting these as warnings: useless
#log.WARNING('constant-folding %r:' % (spaceop,))
#log.WARNING(' %s: %s' % (e.__class__.__name__, e))
else:
# success in folding this space operation
if spaceop.opname in fixup_op_result:
result = fixup_op_result[spaceop.opname](result)
constants[spaceop.result] = Constant(result, RESTYPE)
folded_count += 1
continue
# failed to fold an operation, exit early if requested
if exit_early:
return folded_count
else:
if vargsmodif:
if (spaceop.opname == 'indirect_call'
and isinstance(vargs[0], Constant)):
spaceop = SpaceOperation('direct_call', vargs[:-1],
spaceop.result)
else:
spaceop = SpaceOperation(spaceop.opname, vargs,
spaceop.result)
newops.append(spaceop)
# end
if exit_early:
return folded_count
else:
return newops
def constant_fold_block(block):
constants = {}
block.operations = fold_op_list(block.operations, constants,
exc_catch=block.canraise)
if constants:
if block.exitswitch in constants:
switch = constants[block.exitswitch].value
remaining_exits = [link for link in block.exits
if link.llexitcase == switch]
if not remaining_exits:
assert block.exits[-1].exitcase == 'default'
remaining_exits = [block.exits[-1]]
assert len(remaining_exits) == 1
remaining_exits[0].exitcase = None
remaining_exits[0].llexitcase = None
block.exitswitch = None
block.recloseblock(*remaining_exits)
for link in block.exits:
link.args = [constants.get(v, v) for v in link.args]
def fixup_solid(p):
# Operations returning pointers to inlined parts of a constant object
# have to be tweaked so that the inlined part keeps the whole object alive.
# XXX This is done with a hack. (See test_keepalive_const_*())
container = p._obj
assert isinstance(container, lltype._parentable)
container._keepparent = container._parentstructure()
# Instead of 'p', return a solid pointer, to keep the inlined part
# itself alive.
return container._as_ptr()
fixup_op_result = {
"getsubstruct": fixup_solid,
"getarraysubstruct": fixup_solid,
"direct_fieldptr": fixup_solid,
"direct_arrayitems": fixup_solid,
}
def complete_constants(link, constants):
# 'constants' maps some Variables of 'block' to Constants.
# Some input args of 'block' may be absent from 'constants'
# and must be fixed in the link to be passed directly from
# 'link.prevblock' instead of via 'block'.
for v1, v2 in zip(link.args, link.target.inputargs):
if v2 in constants:
assert constants[v2] is v1
else:
constants[v2] = v1
def rewire_link_for_known_exitswitch(link1, llexitvalue):
# For the case where link1.target contains only a switch, rewire link1
# to go directly to the correct exit based on a constant switch value.
# This is a situation that occurs typically after inlining; see
# test_fold_exitswitch_along_one_path.
block = link1.target
if block.exits[-1].exitcase == "default":
defaultexit = block.exits[-1]
nondefaultexits = block.exits[:-1]
else:
defaultexit = None
nondefaultexits = block.exits
for nextlink in nondefaultexits:
if nextlink.llexitcase == llexitvalue:
break # found -- the result is in 'nextlink'
else:
if defaultexit is None:
return # exit case not found! just ignore the problem here
nextlink = defaultexit
blockmapping = dict(zip(block.inputargs, link1.args))
newargs = []
for v in nextlink.args:
if isinstance(v, Variable):
v = blockmapping[v]
newargs.append(v)
link1.target = nextlink.target
link1.args = newargs
def prepare_constant_fold_link(link, constants, splitblocks):
block = link.target
if not block.operations:
# when the target block has no operation, there is nothing we can do
# except trying to fold an exitswitch
if block.exitswitch is not None and block.exitswitch in constants:
llexitvalue = constants[block.exitswitch].value
rewire_link_for_known_exitswitch(link, llexitvalue)
return
folded_count = fold_op_list(block.operations, constants, exit_early=True)
n = len(block.operations)
if block.canraise:
n -= 1
# is the next, non-folded operation an indirect_call?
if folded_count < n:
nextop = block.operations[folded_count]
if nextop.opname == 'indirect_call' and nextop.args[0] in constants:
# indirect_call -> direct_call
callargs = [constants[nextop.args[0]]]
constants1 = constants.copy()
complete_constants(link, constants1)
for v in nextop.args[1:-1]:
callargs.append(constants1.get(v, v))
v_result = Variable(nextop.result)
v_result.concretetype = nextop.result.concretetype
constants[nextop.result] = v_result
callop = SpaceOperation('direct_call', callargs, v_result)
newblock = insert_empty_block(link, [callop])
[link] = newblock.exits
assert link.target is block
folded_count += 1
if folded_count > 0:
splits = splitblocks.setdefault(block, [])
splits.append((folded_count, link, constants))
def rewire_links(splitblocks, graph):
for block, splits in splitblocks.items():
# A splitting position is given by how many operations were
# folded with the knowledge of an incoming link's constant.
# Various incoming links may cause various splitting positions.
# We split the block gradually, starting from the end.
splits.sort()
splits.reverse()
for position, link, constants in splits:
assert link.target is block
if position == len(block.operations) and block.exitswitch is None:
# a split here would leave nothing in the 2nd part, so
# directly rewire the links
assert len(block.exits) == 1
splitlink = block.exits[0]
else:
# split the block at the given position
splitlink = split_block(block, position)
assert list(block.exits) == [splitlink]
assert link.target is block
assert splitlink.prevblock is block
complete_constants(link, constants)
args = [constants.get(v, v) for v in splitlink.args]
link.args = args
link.target = splitlink.target
def constant_diffuse(graph):
count = 0
# after 'exitswitch vexit', replace 'vexit' with the corresponding constant
# if it also appears on the outgoing links
for block in graph.iterblocks():
vexit = block.exitswitch
if isinstance(vexit, Variable):
for link in block.exits:
if vexit in link.args and link.exitcase != 'default':
remap = {vexit: Constant(link.llexitcase,
vexit.concretetype)}
link.args = [remap.get(v, v) for v in link.args]
count += 1
# if the same constants appear at the same positions in all links
# into a block remove them from the links, remove the corresponding
# input variables and introduce equivalent same_as at the beginning
# of the block then try to fold the block further
for block, links in mkentrymap(graph).iteritems():
if block is graph.startblock:
continue
if block.exits == ():
continue
firstlink = links[0]
rest = links[1:]
diffuse = []
for i, c in enumerate(firstlink.args):
if not isinstance(c, Constant):
continue
for lnk in rest:
if lnk.args[i] != c:
break
else:
diffuse.append((i, c))
diffuse.reverse()
same_as = []
for i, c in diffuse:
for lnk in links:
del lnk.args[i]
v = block.inputargs.pop(i)
same_as.append(SpaceOperation('same_as', [c], v))
count += 1
block.operations = same_as + block.operations
if same_as:
constant_fold_block(block)
return count
def constant_fold_graph(graph):
# first fold inside the blocks
for block in graph.iterblocks():
if block.operations:
constant_fold_block(block)
# then fold along the links - a fixpoint process, because new links
# with new constants show up, even though we can probably prove that
# a single iteration is enough under some conditions, like the graph
# is in a join_blocks() form.
while 1:
diffused = constant_diffuse(graph)
splitblocks = {}
for link in list(graph.iterlinks()):
constants = {}
for v1, v2 in zip(link.args, link.target.inputargs):
if isinstance(v1, Constant):
constants[v2] = v1
if constants:
prepare_constant_fold_link(link, constants, splitblocks)
if splitblocks:
rewire_links(splitblocks, graph)
if not diffused and not splitblocks:
break # finished
def replace_symbolic(graph, symbolic, value):
result = False
for block in graph.iterblocks():
for op in block.operations:
for i, arg in enumerate(op.args):
if isinstance(arg, Constant) and arg.value is symbolic:
op.args[i] = value
result = True
if block.exitswitch is symbolic:
block.exitswitch = value
result = True
return result
def replace_we_are_jitted(graph):
from rpython.rlib import jit
replacement = Constant(0)
replacement.concretetype = lltype.Signed
did_replacement = replace_symbolic(graph, jit._we_are_jitted, replacement)
if did_replacement:
constant_fold_graph(graph)
return did_replacement
|
|
import os
from collections import OrderedDict
from conans.client import tools
from conans.client.build.compiler_flags import architecture_flag, parallel_compiler_cl_flag
from conans.client.build.cppstd_flags import cppstd_flag, cppstd_from_settings
from conans.client.tools import cross_building
from conans.client.tools.oss import get_cross_building_settings
from conans.errors import ConanException
from conans.model.build_info import DEFAULT_BIN, DEFAULT_INCLUDE, DEFAULT_LIB, DEFAULT_SHARE
from conans.model.version import Version
from conans.util.env_reader import get_env
from conans.util.log import logger
verbose_definition_name = "CMAKE_VERBOSE_MAKEFILE"
cmake_install_prefix_var_name = "CMAKE_INSTALL_PREFIX"
runtime_definition_var_name = "CONAN_LINK_RUNTIME"
cmake_in_local_cache_var_name = "CONAN_IN_LOCAL_CACHE"
def get_toolset(settings):
if settings.get_safe("compiler") == "Visual Studio":
subs_toolset = settings.get_safe("compiler.toolset")
if subs_toolset:
return subs_toolset
return None
def get_generator(settings):
if "CONAN_CMAKE_GENERATOR" in os.environ:
return os.environ["CONAN_CMAKE_GENERATOR"]
compiler = settings.get_safe("compiler")
arch = settings.get_safe("arch")
compiler_version = settings.get_safe("compiler.version")
os_build, _, _, _ = get_cross_building_settings(settings)
os_host = settings.get_safe("os")
if not compiler or not compiler_version or not arch:
if os_build == "Windows":
logger.warning("CMake generator could not be deduced from settings")
return None
return "Unix Makefiles"
if compiler == "Visual Studio":
_visuals = {'8': '8 2005',
'9': '9 2008',
'10': '10 2010',
'11': '11 2012',
'12': '12 2013',
'14': '14 2015',
'15': '15 2017',
'16': '16 2019'}
base = "Visual Studio %s" % _visuals.get(compiler_version,
"UnknownVersion %s" % compiler_version)
if os_host != "WindowsCE" and Version(compiler_version) < "16":
if arch == "x86_64":
base += " Win64"
elif "arm" in arch:
base += " ARM"
return base
# The generator depends on the build machine, not the target
if os_build == "Windows" and compiler != "qcc":
return "MinGW Makefiles" # it is valid only under Windows
return "Unix Makefiles"
def get_generator_platform(settings, generator):
if "CONAN_CMAKE_GENERATOR_PLATFORM" in os.environ:
return os.environ["CONAN_CMAKE_GENERATOR_PLATFORM"]
compiler = settings.get_safe("compiler")
arch = settings.get_safe("arch")
compiler_version = settings.get_safe("compiler.version")
if settings.get_safe("os") == "WindowsCE":
return settings.get_safe("os.platform")
if compiler == "Visual Studio" and Version(compiler_version) >= "16" \
and "Visual" in generator:
return {"x86": "Win32",
"x86_64": "x64",
"armv7": "ARM",
"armv8": "ARM64"}.get(arch)
return None
def is_multi_configuration(generator):
if not generator:
return False
return "Visual" in generator or "Xcode" in generator
def is_toolset_supported(generator):
# https://cmake.org/cmake/help/v3.14/variable/CMAKE_GENERATOR_TOOLSET.html
if not generator:
return False
return "Visual" in generator or "Xcode" in generator or "Green Hills MULTI" in generator
def is_generator_platform_supported(generator):
# https://cmake.org/cmake/help/v3.14/variable/CMAKE_GENERATOR_PLATFORM.html
if not generator:
return False
return "Visual" in generator or "Green Hills MULTI" in generator
def verbose_definition(value):
return {verbose_definition_name: "ON" if value else "OFF"}
def in_local_cache_definition(value):
return {cmake_in_local_cache_var_name: "ON" if value else "OFF"}
def runtime_definition(runtime):
return {runtime_definition_var_name: "/%s" % runtime} if runtime else {}
def build_type_definition(build_type, generator):
if build_type and not is_multi_configuration(generator):
return {"CMAKE_BUILD_TYPE": build_type}
return {}
class CMakeDefinitionsBuilder(object):
def __init__(self, conanfile, cmake_system_name=True, make_program=None,
parallel=True, generator=None, set_cmake_flags=False,
forced_build_type=None, output=None):
self._conanfile = conanfile
self._forced_cmake_system_name = cmake_system_name
self._make_program = make_program
self._parallel = parallel
self._generator = generator
self._set_cmake_flags = set_cmake_flags
self._forced_build_type = forced_build_type
self._output = output
def _ss(self, setname):
"""safe setting"""
return self._conanfile.settings.get_safe(setname)
def _get_cpp_standard_vars(self):
cppstd = cppstd_from_settings(self._conanfile.settings)
compiler = self._ss("compiler")
compiler_version = self._ss("compiler.version")
if not cppstd:
return {}
ret = {}
if cppstd.startswith("gnu"):
ret["CONAN_CMAKE_CXX_STANDARD"] = cppstd[3:]
ret["CONAN_CMAKE_CXX_EXTENSIONS"] = "ON"
else:
ret["CONAN_CMAKE_CXX_STANDARD"] = cppstd
ret["CONAN_CMAKE_CXX_EXTENSIONS"] = "OFF"
ret["CONAN_STD_CXX_FLAG"] = cppstd_flag(compiler, compiler_version, cppstd)
return ret
def _cmake_cross_build_defines(self):
os_ = self._ss("os")
arch = self._ss("arch")
os_ver_str = "os.api_level" if os_ == "Android" else "os.version"
op_system_version = self._ss(os_ver_str)
env_sn = get_env("CONAN_CMAKE_SYSTEM_NAME", "")
env_sn = {"False": False, "True": True, "": None}.get(env_sn, env_sn)
cmake_system_name = env_sn or self._forced_cmake_system_name
os_build, _, _, _ = get_cross_building_settings(self._conanfile.settings)
compiler = self._ss("compiler")
libcxx = self._ss("compiler.libcxx")
ret = OrderedDict()
os_ver = get_env("CONAN_CMAKE_SYSTEM_VERSION", op_system_version)
toolchain_file = get_env("CONAN_CMAKE_TOOLCHAIN_FILE", "")
if toolchain_file != "":
logger.info("Setting Cross build toolchain file: %s" % toolchain_file)
ret["CMAKE_TOOLCHAIN_FILE"] = toolchain_file
return ret
if cmake_system_name is False:
return ret
# System name and system version
if cmake_system_name is not True: # String not empty
ret["CMAKE_SYSTEM_NAME"] = cmake_system_name
else: # detect if we are cross building and the system name and version
if cross_building(self._conanfile.settings): # We are cross building
if os_ != os_build:
if os_: # the_os is the host (regular setting)
ret["CMAKE_SYSTEM_NAME"] = {"iOS": "Darwin",
"tvOS": "Darwin",
"watchOS": "Darwin",
"Neutrino": "QNX"}.get(os_, os_)
else:
ret["CMAKE_SYSTEM_NAME"] = "Generic"
if os_ver:
ret["CMAKE_SYSTEM_VERSION"] = os_ver
if str(os_) == "Macos":
ret["CMAKE_OSX_DEPLOYMENT_TARGET"] = os_ver
# system processor
cmake_system_processor = os.getenv("CONAN_CMAKE_SYSTEM_PROCESSOR")
if cmake_system_processor:
ret["CMAKE_SYSTEM_PROCESSOR"] = cmake_system_processor
if ret: # If enabled cross compile
for env_var in ["CONAN_CMAKE_FIND_ROOT_PATH",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_PROGRAM",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_LIBRARY",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_INCLUDE"]:
value = os.getenv(env_var)
if value:
ret[env_var] = value
if self._conanfile and self._conanfile.deps_cpp_info.sysroot:
sysroot_path = self._conanfile.deps_cpp_info.sysroot
else:
sysroot_path = os.getenv("CONAN_CMAKE_FIND_ROOT_PATH", None)
if sysroot_path:
# Needs to be set here, can't be managed in the cmake generator, CMake needs
# to know about the sysroot before any other thing
ret["CMAKE_SYSROOT"] = sysroot_path.replace("\\", "/")
# Adjust Android stuff
if str(os_) == "Android" and ret["CMAKE_SYSTEM_NAME"] == "Android":
arch_abi_settings = tools.to_android_abi(arch)
if arch_abi_settings:
ret["CMAKE_ANDROID_ARCH_ABI"] = arch_abi_settings
ret["ANDROID_ABI"] = arch_abi_settings
conan_cmake_android_ndk = os.getenv("CONAN_CMAKE_ANDROID_NDK")
if conan_cmake_android_ndk:
ret["ANDROID_NDK"] = conan_cmake_android_ndk
ret["ANDROID_PLATFORM"] = "android-%s" % op_system_version
ret["ANDROID_TOOLCHAIN"] = compiler
# More details about supported stdc++ libraries here:
# https://developer.android.com/ndk/guides/cpp-support.html
if libcxx:
ret["ANDROID_STL"] = libcxx
else:
ret["ANDROID_STL"] = 'none'
logger.info("Setting Cross build flags: %s"
% ", ".join(["%s=%s" % (k, v) for k, v in ret.items()]))
return ret
def _get_make_program_definition(self):
make_program = os.getenv("CONAN_MAKE_PROGRAM") or self._make_program
if make_program:
if not tools.which(make_program):
self._output.warn("The specified make program '%s' cannot be found and will be "
"ignored" % make_program)
else:
self._output.info("Using '%s' as CMAKE_MAKE_PROGRAM" % make_program)
return {"CMAKE_MAKE_PROGRAM": make_program}
return {}
def get_definitions(self):
compiler = self._ss("compiler")
compiler_version = self._ss("compiler.version")
arch = self._ss("arch")
os_ = self._ss("os")
libcxx = self._ss("compiler.libcxx")
runtime = self._ss("compiler.runtime")
build_type = self._ss("build_type")
ret = OrderedDict()
ret.update(runtime_definition(runtime))
if self._forced_build_type and self._forced_build_type != build_type:
self._output.warn("Forced CMake build type ('%s') different from the settings build "
"type ('%s')" % (self._forced_build_type, build_type))
build_type = self._forced_build_type
ret.update(build_type_definition(build_type, self._generator))
if str(os_) == "Macos":
if arch == "x86":
ret["CMAKE_OSX_ARCHITECTURES"] = "i386"
ret.update(self._cmake_cross_build_defines())
ret.update(self._get_cpp_standard_vars())
ret["CONAN_EXPORTED"] = "1"
ret.update(in_local_cache_definition(self._conanfile.in_local_cache))
if compiler:
ret["CONAN_COMPILER"] = compiler
if compiler_version:
ret["CONAN_COMPILER_VERSION"] = str(compiler_version)
# C, CXX, LINK FLAGS
if compiler == "Visual Studio":
if self._parallel:
flag = parallel_compiler_cl_flag(output=self._output)
ret['CONAN_CXX_FLAGS'] = flag
ret['CONAN_C_FLAGS'] = flag
else: # arch_flag is only set for non Visual Studio
arch_flag = architecture_flag(compiler=compiler, os=os_, arch=arch)
if arch_flag:
ret['CONAN_CXX_FLAGS'] = arch_flag
ret['CONAN_SHARED_LINKER_FLAGS'] = arch_flag
ret['CONAN_C_FLAGS'] = arch_flag
if self._set_cmake_flags:
ret['CMAKE_CXX_FLAGS'] = arch_flag
ret['CMAKE_SHARED_LINKER_FLAGS'] = arch_flag
ret['CMAKE_C_FLAGS'] = arch_flag
if libcxx:
ret["CONAN_LIBCXX"] = libcxx
# Shared library
try:
ret["BUILD_SHARED_LIBS"] = "ON" if self._conanfile.options.shared else "OFF"
except ConanException:
pass
# Install to package folder
try:
if self._conanfile.package_folder:
ret["CMAKE_INSTALL_PREFIX"] = self._conanfile.package_folder
ret["CMAKE_INSTALL_BINDIR"] = DEFAULT_BIN
ret["CMAKE_INSTALL_SBINDIR"] = DEFAULT_BIN
ret["CMAKE_INSTALL_LIBEXECDIR"] = DEFAULT_BIN
ret["CMAKE_INSTALL_LIBDIR"] = DEFAULT_LIB
ret["CMAKE_INSTALL_INCLUDEDIR"] = DEFAULT_INCLUDE
ret["CMAKE_INSTALL_OLDINCLUDEDIR"] = DEFAULT_INCLUDE
ret["CMAKE_INSTALL_DATAROOTDIR"] = DEFAULT_SHARE
except AttributeError:
pass
# fpic
if not str(os_).startswith("Windows"):
fpic = self._conanfile.options.get_safe("fPIC")
if fpic is not None:
shared = self._conanfile.options.get_safe("shared")
ret["CONAN_CMAKE_POSITION_INDEPENDENT_CODE"] = "ON" if (fpic or shared) else "OFF"
# Adjust automatically the module path in case the conanfile is using the
# cmake_find_package or cmake_find_package_multi
install_folder = self._conanfile.install_folder.replace("\\", "/")
if "cmake_find_package" in self._conanfile.generators:
ret["CMAKE_MODULE_PATH"] = install_folder
if "cmake_find_package_multi" in self._conanfile.generators:
# The cmake_find_package_multi only works with targets and generates XXXConfig.cmake
# that require the prefix path and the module path
ret["CMAKE_PREFIX_PATH"] = install_folder
ret["CMAKE_MODULE_PATH"] = install_folder
ret.update(self._get_make_program_definition())
# Disable CMake export registry #3070 (CMake installing modules in user home's)
ret["CMAKE_EXPORT_NO_PACKAGE_REGISTRY"] = "ON"
return ret
|
|
import os
import datetime
import requests as req
import json
import jmespath
import operator
from functools import reduce
from features.src.support import constants
from features.src.support.loginusers_oauth2 import LoginUsersOauth2, user_tokens
from subprocess import check_output, CalledProcessError, STDOUT
count = 1
spaceID = ''
spaceName = ''
githubRepo = ''
def printToJson(titleText, r):
try:
print("******************* {} ********************".format(titleText))
parsed = json.loads(r.text)
print(json.dumps(parsed, indent=4, sort_keys=True))
except Exception as e:
print('Unexpected printToJson exception found: {}'.format(e))
print('Raw text of request/response: [{}]'.format(r.text))
def login_user(username="", password=""):
loginUser = LoginUsersOauth2(username, password)
loginUser.login_users()
def is_user_logged_in():
return len(user_tokens) > 0
def get_user_tokens(index=0):
return user_tokens[index]
def is_github_linked():
if(is_user_logged_in):
authUrl = os.getenv("AUTH_API")
authToken = get_user_tokens().split(";")[0]
authHeader = 'Bearer {}'.format(authToken)
getTokenUrl = '{}/api/token?force_pull=true&for=https://github.com'.format(authUrl)
headers = {'Accept': 'application/json',
'Authorization': authHeader,
'X-App': 'osio',
'X-Git-Provider': 'GitHub'}
r = req.get(
getTokenUrl,
headers=headers
)
if r.status_code == 200:
return True
else:
printToJson("GitHub token response", r)
return False
def create_space_name(template="BDD"):
now = datetime.datetime.now()
space = "{}-{}-{}".format(
os.getenv("OSIO_USERNAME"),
template,
"{:02d}{:02d}-{:02d}{:02d}".format(now.month, now.day, now.hour, now.minute)
)
space = space.replace('@', '-')
space = space.replace(':', '-')
space = space.replace('.', '-')
print("The spacename is: {}".format(space))
global spaceName
spaceName = space
return space
def getSpaceID():
global spaceID
return spaceID
def setSpaceID(theID):
global spaceID
spaceID = theID
def getWorkspaceID():
global workspaceID
return workspaceID
def setWorkspaceID(theSpaceID):
global workspaceID
workspaceID = theSpaceID
def getStackReportKey():
global stackReportKey
return stackReportKey
def setStackReportKey(theKey):
global stackReportKey
stackReportKey = theKey
def getSpaceName():
global spaceName
return spaceName
def setSpaceName(theName):
global spaceName
spaceName = theName
def getGithubRepo():
global githubRepo
return githubRepo
def setGithubRepo(theRepo):
global githubRepo
githubRepo = theRepo
def find_in_obj(obj, condition, path=None):
if path is None:
path = []
# In case this is a list
if isinstance(obj, list):
for index, value in enumerate(obj):
new_path = list(path)
new_path.append(index)
for result in find_in_obj(value, condition, path=new_path):
yield result
# In case this is a dictionary
if isinstance(obj, dict):
for key, value in obj.items():
new_path = list(path)
new_path.append(key)
for result in find_in_obj(value, condition, path=new_path):
yield result
if condition == value:
new_path = list(path)
new_path.append(key)
yield new_path
def getFromDict(dataDict, mapList):
return reduce(operator.getitem, mapList, dataDict)
def setInDict(dataDict, mapList, value):
getFromDict(dataDict, mapList[:-1])[mapList[-1]] = value
def read_post_data_file(file_name=None, replace=None, json_dir='planner_jsons'):
# Default json_dir is set to planner_jsons directory
if file_name is None:
print("No file name provided. No JSON to read!!")
return None
else:
try:
curr_dir = os.path.dirname(__file__)
filepath = os.path.join(curr_dir, json_dir, file_name)
with open(filepath, 'rb') as f:
json_data = json.load(f)
if replace is not None:
json_data = replace_values(json_data, replace)
return json_data
except Exception as e:
print("Exception reading file for JSON data")
print(e)
return None
def extract_value(extract_path=None, json_response=None):
if None in [json_response, extract_path]:
print("Either JSON response or the extractor path are None")
return None
else:
try:
return jmespath.search(extract_path, json_response.json())
except Exception:
print("Exception extracting value from the response body")
return None
def extract_header(extract_key=None, json_response=None):
if None in [json_response, extract_key]:
print("Either JSON response or the extractor path are None")
return None
else:
try:
return json_response.headers[extract_key]
except Exception:
print("Exception extracting header value from the response")
return None
def replace_values(orig_dict=None, strs_to_replace_dict=None):
if None not in [orig_dict, strs_to_replace_dict]:
paths = {}
for key_rep in strs_to_replace_dict:
val_to_replace = key_rep.encode("utf-8")
# print("Key to replace:"), val_to_replace
temp_list = []
temp_dict = {}
try:
for path in find_in_obj(orig_dict, val_to_replace):
temp_list.append(path)
temp_dict = {val_to_replace: temp_list}
paths.update(temp_dict)
# print("interim paths:"), paths
except KeyError:
print("All paths found")
except Exception:
print("Key not found in json blob")
# print("final paths:"), paths
for path in paths:
for i in range(len(paths[path])):
setInDict(orig_dict, paths[path][i], strs_to_replace_dict[path])
return orig_dict # Final updated JSON
else:
print("None value supplied for replacements")
def generate_entity_names(static_string=None, no_of_names=1, reverse=False, reset_counter=False):
"""Returns a list like this if called like: generate_entity_names('Area', 5)
['Area 1', 'Area 2', 'Area 3', 'Area 4', 'Area 5']
"""
global count
if reset_counter:
count = 1
mylist = []
total_entities = count + no_of_names
for i in range(count, total_entities):
if static_string is not None:
mylist.append("{}_{}".format(static_string, str(i)))
else:
mylist.append(i)
# Updating global counter
count = total_entities
if reverse:
mylist = list(reversed(mylist))
return mylist
def create_workitem_SDD(title=None, spaceid=None, witype=None, iterationid=None):
if None in [title, spaceid, witype]:
print("None value supplied for either SpaceID / WI-Title / WI-Type")
return None
# Create workitems in Iterations context
elif iterationid is not None:
api = "api/spaces/{}/workitems".format(spaceid)
url = constants.launch_detail.create_url(api)
f = read_post_data_file('create_wi_in_iter.json', replace={
'$wi_nos_generated': title, '$witype': witype,
'$iteration_id': iterationid})
r = req.post(url, headers=constants.request_detail.headers_default, json=f)
constants.dynamic_vars.wi_names_to_ids[title] = extract_value("data.id", r)
constants.dynamic_vars.wi_names_to_links[title] = extract_value("data.links.self", r)
return r
# Create workitems in backlog view
else:
api = "api/spaces/{}/workitems".format(spaceid)
url = constants.launch_detail.create_url(api)
f = read_post_data_file('create_wi_in_backlog.json', replace={
'$wi_nos_generated': title, '$witype': witype})
r = req.post(url, headers=constants.request_detail.headers_default, json=f)
constants.dynamic_vars.wi_names_to_ids[title] = extract_value("data.id", r)
constants.dynamic_vars.wi_names_to_links[title] = extract_value("data.links.self", r)
return r
def create_workitem_SCRUM(title=None, spaceid=None, witype=None, iterationid=None):
if None in [title, spaceid, witype]:
print("None value supplied for either SpaceID / WI-Title / WI-Type")
return None
# Create workitems in Iterations context
elif iterationid is not None:
api = "api/spaces/{}/workitems".format(spaceid)
url = constants.launch_detail.create_url(api)
if witype == constants.workitem_constants.witypetask1:
f = read_post_data_file('create_wi_in_iter_scrum.json', replace={
'$wi_nos_generated': title, '$witype': witype,
'$state': 'To Do', '$iteration_id': iterationid})
else:
f = read_post_data_file('create_wi_in_iter_scrum.json', replace={
'$wi_nos_generated': title, '$witype': witype,
'$state': 'New', '$iteration_id': iterationid})
r = req.post(url, headers=constants.request_detail.headers_default, json=f)
constants.dynamic_vars.wi_names_to_ids[title] = extract_value("data.id", r)
constants.dynamic_vars.wi_names_to_links[title] = extract_value("data.links.self", r)
return r
# Create workitems in backlog view
else:
api = "api/spaces/{}/workitems".format(spaceid)
url = constants.launch_detail.create_url(api)
if witype == constants.workitem_constants.witypetask1:
f = read_post_data_file('create_wi_in_backlog_scrum.json', replace={
'$wi_nos_generated': title, '$witype': witype,
'$state': 'To Do'})
else:
f = read_post_data_file('create_wi_in_backlog_scrum.json', replace={
'$wi_nos_generated': title, '$witype': witype,
'$state': 'New'})
r = req.post(url, headers=constants.request_detail.headers_default, json=f)
constants.dynamic_vars.wi_names_to_ids[title] = extract_value("data.id", r)
constants.dynamic_vars.wi_names_to_links[title] = extract_value("data.links.self", r)
return r
def add_workitem_comment(workitem_link=None, comment_text=None):
if None in [workitem_link, comment_text]:
print("Please specify a valid Workitem-Link and a CommentText")
return None
else:
# Add a comment to the workitem
wi_comment_api = "{}/comments".format(workitem_link)
f = read_post_data_file('add_wi_comment.json', replace={'$comment_text': comment_text})
return req.post(wi_comment_api, headers=constants.request_detail.headers_default, json=f)
def create_new_label(label_text=None):
if label_text is None:
print("Please specify a valid LabelText")
return None
else:
# Add a Label to the space
create_label_api = "api/spaces/{}/labels".format(constants.dynamic_vars.spaceid)
url = constants.launch_detail.create_url(create_label_api)
f = read_post_data_file('create_label.json', replace={'$label_name': label_text})
return req.post(url, headers=constants.request_detail.headers_default, json=f)
def add_workitem_label(workitem_link=None, label_text=None, label_id=None):
if None in [workitem_link]:
print("Please specify a valid Workitem-Link")
return None
else:
if label_id is None:
# Create a new label to the space
r = create_new_label(label_text)
if r is not None:
label_id = extract_value("data.id", r)
if label_id is not None:
# Add a label to the workitem
wi_id = workitem_link.rsplit('/', 1)[1]
wi_patch_api = workitem_link
if type(label_id) == list:
f = read_post_data_file('add_wi_labels.json', replace={
'$wi_id': wi_id, '$wi_link': workitem_link,
'$label_1_id': label_id[0],
'$label_2_id': label_id[1], '$label_3_id': label_id[2]})
r = req.patch(wi_patch_api, headers=constants.request_detail.headers_default,
json=f)
else:
f = read_post_data_file('add_wi_label.json', replace={
'$wi_id': wi_id, '$wi_link': workitem_link, '$wi_ver': 0,
'$label_id': label_id})
r = req.patch(wi_patch_api, headers=constants.request_detail.headers_default,
json=f)
return r, label_id
def add_workitem_parent_link(wi_parent_title=None, wi_child_title=None):
if None in [wi_parent_title, wi_child_title]:
print("Please specify two valid Workitem Titles")
return None
else:
# Design the URL
api = "api/workitemlinks"
url = constants.launch_detail.create_url(api)
f = read_post_data_file(
'create_wi_hierarchy.json',
replace={'$wilinktype_parent': constants.workitem_constants.wilinktype_parent,
'$wi_parent_id': constants.dynamic_vars.wi_names_to_ids[wi_parent_title],
'$wi_child_id': constants.dynamic_vars.wi_names_to_ids[wi_child_title]})
# Make the request
r = req.post(url, headers=constants.request_detail.headers_default, json=f)
return r
def delete_space(spaceid=None):
if spaceid is None:
print("Please specify a valid space ID")
return None
else:
# Delete a space
api = "api/spaces/{}".format(spaceid)
url = constants.launch_detail.create_url(api)
r = req.delete(url, headers=constants.request_detail.headers_default)
return r
def report_dir():
return os.getenv("REPORT_DIR")
def gather_pod_logs(_context, project):
"""Gather project logs."""
if is_user_logged_in():
try:
print("Gathering project logs.")
output = check_output(["./oc-get-project-logs.sh",
_context.username,
_context.password,
project,
get_user_tokens().split(";")[0]
],
stderr=STDOUT)
save_output_to_file(output.decode("utf-8"),
"{}/project-logs-{}.log".format(report_dir(), project))
except CalledProcessError as e:
print("Error executing: {}".format(e))
def save_output_to_file(output, fileName):
f = open(fileName, "w")
f.write(output)
|
|
import numpy as np
from scipy.stats import kde
from .stats import *
__all__ = ['traceplot', 'kdeplot', 'kde2plot', 'forestplot', 'autocorrplot']
def traceplot(trace, vars=None, figsize=None,
lines=None, combined=False, grid=True):
"""Plot samples histograms and values
Parameters
----------
trace : result of MCMC run
vars : list of variable names
Variables to be plotted, if None all variable are plotted
figsize : figure size tuple
If None, size is (12, num of variables * 2) inch
lines : dict
Dictionary of variable name / value to be overplotted as vertical
lines to the posteriors and horizontal lines on sample values
e.g. mean of posteriors, true values of a simulation
combined : bool
Flag for combining multiple chains into a single chain. If False
(default), chains will be plotted separately.
grid : bool
Flag for adding gridlines to histogram. Defaults to True.
Returns
-------
fig : figure object
"""
import matplotlib.pyplot as plt
if vars is None:
vars = trace.varnames
n = len(vars)
if figsize is None:
figsize = (12, n*2)
fig, ax = plt.subplots(n, 2, squeeze=False, figsize=figsize)
for i, v in enumerate(vars):
for d in trace.get_values(v, combine=combined, squeeze=False):
d = np.squeeze(d)
d = make_2d(d)
if d.dtype.kind == 'i':
histplot_op(ax[i, 0], d)
else:
kdeplot_op(ax[i, 0], d)
ax[i, 0].set_title(str(v))
ax[i, 0].grid(grid)
ax[i, 1].set_title(str(v))
ax[i, 1].plot(d, alpha=.35)
ax[i, 0].set_ylabel("Frequency")
ax[i, 1].set_ylabel("Sample value")
if lines:
try:
ax[i, 0].axvline(x=lines[v], color="r", lw=1.5)
ax[i, 1].axhline(y=lines[v], color="r", lw=1.5, alpha=.35)
except KeyError:
pass
plt.tight_layout()
return fig
def histplot_op(ax, data):
for i in range(data.shape[1]):
d = data[:, i]
mind = np.min(d)
maxd = np.max(d)
ax.hist(d, bins=range(mind, maxd + 2), align='left')
ax.set_xlim(mind - .5, maxd + .5)
def kdeplot_op(ax, data):
for i in range(data.shape[1]):
d = data[:, i]
density = kde.gaussian_kde(d)
l = np.min(d)
u = np.max(d)
x = np.linspace(0, 1, 100) * (u - l) + l
ax.plot(x, density(x))
def make_2d(a):
"""Ravel the dimensions after the first.
"""
a = np.atleast_2d(a.T).T
#flatten out dimensions beyond the first
n = a.shape[0]
newshape = np.product(a.shape[1:]).astype(int)
a = a.reshape((n, newshape), order='F')
return a
def kde2plot_op(ax, x, y, grid=200):
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
grid = grid * 1j
X, Y = np.mgrid[xmin:xmax:grid, ymin:ymax:grid]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = kde.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
extent=[xmin, xmax, ymin, ymax])
def kdeplot(data):
f, ax = subplots(1, 1, squeeze=True)
kdeplot_op(ax, data)
return f
def kde2plot(x, y, grid=200):
f, ax = subplots(1, 1, squeeze=True)
kde2plot_op(ax, x, y, grid)
return f
def autocorrplot(trace, vars=None, fontmap=None, max_lag=100):
"""Bar plot of the autocorrelation function for a trace"""
import matplotlib.pyplot as plt
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
if vars is None:
vars = trace.varnames
else:
vars = [str(var) for var in vars]
chains = trace.nchains
f, ax = plt.subplots(len(vars), chains, squeeze=False)
max_lag = min(len(trace) - 1, max_lag)
for i, v in enumerate(vars):
for j in range(chains):
d = np.squeeze(trace.get_values(v, chains=[j]))
ax[i, j].acorr(d, detrend=plt.mlab.detrend_mean, maxlags=max_lag)
if not j:
ax[i, j].set_ylabel("correlation")
ax[i, j].set_xlabel("lag")
if chains > 1:
ax[i, j].set_title("chain {0}".format(j+1))
# Smaller tick labels
tlabels = plt.gca().get_xticklabels()
plt.setp(tlabels, 'fontsize', fontmap[1])
tlabels = plt.gca().get_yticklabels()
plt.setp(tlabels, 'fontsize', fontmap[1])
def var_str(name, shape):
"""Return a sequence of strings naming the element of the tallyable object.
This is a support function for forestplot.
:Example:
>>> var_str('theta', (4,))
['theta[1]', 'theta[2]', 'theta[3]', 'theta[4]']
"""
size = np.prod(shape)
ind = (np.indices(shape) + 1).reshape(-1, size)
names = ['[' + ','.join(map(str, i)) + ']' for i in zip(*ind)]
# if len(name)>12:
# name = '\n'.join(name.split('_'))
# name += '\n'
names[0] = '%s %s' % (name, names[0])
return names
def forestplot(trace_obj, vars=None, alpha=0.05, quartiles=True, rhat=True,
main=None, xtitle=None, xrange=None, ylabels=None,
chain_spacing=0.05, vline=0):
""" Forest plot (model summary plot)
Generates a "forest plot" of 100*(1-alpha)% credible intervals for either
the set of variables in a given model, or a specified set of nodes.
:Arguments:
trace_obj: NpTrace or MultiTrace object
Trace(s) from an MCMC sample.
vars: list
List of variables to plot (defaults to None, which results in all
variables plotted).
alpha (optional): float
Alpha value for (1-alpha)*100% credible intervals (defaults to
0.05).
quartiles (optional): bool
Flag for plotting the interquartile range, in addition to the
(1-alpha)*100% intervals (defaults to True).
rhat (optional): bool
Flag for plotting Gelman-Rubin statistics. Requires 2 or more
chains (defaults to True).
main (optional): string
Title for main plot. Passing False results in titles being
suppressed; passing None (default) results in default titles.
xtitle (optional): string
Label for x-axis. Defaults to no label
xrange (optional): list or tuple
Range for x-axis. Defaults to matplotlib's best guess.
ylabels (optional): list
User-defined labels for each variable. If not provided, the node
__name__ attributes are used.
chain_spacing (optional): float
Plot spacing between chains (defaults to 0.05).
vline (optional): numeric
Location of vertical reference line (defaults to 0).
"""
import matplotlib.pyplot as plt
try:
import matplotlib.gridspec as gridspec
except ImportError:
gridspec = None
if not gridspec:
print_('\nYour installation of matplotlib is not recent enough to ' +
'support summary_plot; this function is disabled until ' +
'matplotlib is updated.')
return
# Quantiles to be calculated
qlist = [100 * alpha / 2, 50, 100 * (1 - alpha / 2)]
if quartiles:
qlist = [100 * alpha / 2, 25, 50, 75, 100 * (1 - alpha / 2)]
# Range for x-axis
plotrange = None
# Number of chains
chains = None
# Gridspec
gs = None
# Subplots
interval_plot = None
rhat_plot = None
nchains = trace_obj.nchains
if nchains > 1:
from .diagnostics import gelman_rubin
R = gelman_rubin(trace_obj)
if vars is not None:
R = {v: R[v] for v in vars}
else:
# Can't calculate Gelman-Rubin with a single trace
rhat = False
if vars is None:
vars = trace_obj.varnames
# Empty list for y-axis labels
labels = []
if gs is None:
# Initialize plot
if rhat and nchains > 1:
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
else:
gs = gridspec.GridSpec(1, 1)
# Subplot for confidence intervals
interval_plot = plt.subplot(gs[0])
trace_quantiles = quantiles(trace_obj, qlist, squeeze=False)
hpd_intervals = hpd(trace_obj, alpha, squeeze=False)
for j, chain in enumerate(trace_obj.chains):
# Counter for current variable
var = 1
for varname in vars:
var_quantiles = trace_quantiles[chain][varname]
quants = [var_quantiles[v] for v in qlist]
var_hpd = hpd_intervals[chain][varname].T
# Substitute HPD interval for quantile
quants[0] = var_hpd[0].T
quants[-1] = var_hpd[1].T
# Ensure x-axis contains range of current interval
if plotrange:
plotrange = [min(
plotrange[0],
np.min(quants)),
max(plotrange[1],
np.max(quants))]
else:
plotrange = [np.min(quants), np.max(quants)]
# Number of elements in current variable
value = trace_obj.get_values(varname, chains=[chain])[0]
k = np.size(value)
# Append variable name(s) to list
if not j:
if k > 1:
names = var_str(varname, np.shape(value))
labels += names
else:
labels.append(varname)
# labels.append('\n'.join(varname.split('_')))
# Add spacing for each chain, if more than one
e = [0] + [(chain_spacing * ((i + 2) / 2)) *
(-1) ** i for i in range(nchains - 1)]
# Deal with multivariate nodes
if k > 1:
for i, q in enumerate(np.transpose(quants).squeeze()):
# Y coordinate with jitter
y = -(var + i) + e[j]
if quartiles:
# Plot median
plt.plot(q[2], y, 'bo', markersize=4)
# Plot quartile interval
plt.errorbar(
x=(q[1],
q[3]),
y=(y,
y),
linewidth=2,
color='b')
else:
# Plot median
plt.plot(q[1], y, 'bo', markersize=4)
# Plot outer interval
plt.errorbar(
x=(q[0],
q[-1]),
y=(y,
y),
linewidth=1,
color='b')
else:
# Y coordinate with jitter
y = -var + e[j]
if quartiles:
# Plot median
plt.plot(quants[2], y, 'bo', markersize=4)
# Plot quartile interval
plt.errorbar(
x=(quants[1],
quants[3]),
y=(y,
y),
linewidth=2,
color='b')
else:
# Plot median
plt.plot(quants[1], y, 'bo', markersize=4)
# Plot outer interval
plt.errorbar(
x=(quants[0],
quants[-1]),
y=(y,
y),
linewidth=1,
color='b')
# Increment index
var += k
labels = ylabels or labels
# Update margins
left_margin = np.max([len(x) for x in labels]) * 0.015
gs.update(left=left_margin, right=0.95, top=0.9, bottom=0.05)
# Define range of y-axis
plt.ylim(-var + 0.5, -0.5)
datarange = plotrange[1] - plotrange[0]
plt.xlim(plotrange[0] - 0.05 * datarange, plotrange[1] + 0.05 * datarange)
# Add variable labels
plt.yticks([-(l + 1) for l in range(len(labels))], labels)
# Add title
if main is not False:
plot_title = main or str(int((
1 - alpha) * 100)) + "% Credible Intervals"
plt.title(plot_title)
# Add x-axis label
if xtitle is not None:
plt.xlabel(xtitle)
# Constrain to specified range
if xrange is not None:
plt.xlim(*xrange)
# Remove ticklines on y-axes
for ticks in interval_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in interval_plot.spines.items():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
# Reference line
plt.axvline(vline, color='k', linestyle='--')
# Genenerate Gelman-Rubin plot
if rhat and nchains > 1:
# If there are multiple chains, calculate R-hat
rhat_plot = plt.subplot(gs[1])
if main is not False:
plt.title("R-hat")
# Set x range
plt.xlim(0.9, 2.1)
# X axis labels
plt.xticks((1.0, 1.5, 2.0), ("1", "1.5", "2+"))
plt.yticks([-(l + 1) for l in range(len(labels))], "")
i = 1
for varname in vars:
chain = trace_obj.chains[0]
value = trace_obj.get_values(varname, chains=[chain])[0]
k = np.size(value)
if k > 1:
plt.plot([min(r, 2) for r in R[varname]], [-(j + i)
for j in range(k)], 'bo', markersize=4)
else:
plt.plot(min(R[varname], 2), -i, 'bo', markersize=4)
i += k
# Define range of y-axis
plt.ylim(-i + 0.5, -0.5)
# Remove ticklines on y-axes
for ticks in rhat_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in rhat_plot.spines.items():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
return gs
|
|
# Modified work:
# -----------------------------------------------------------------------------
# Copyright (c) 2018 Preferred Infrastructure, Inc.
# Copyright (c) 2018 Preferred Networks, Inc.
# -----------------------------------------------------------------------------
# Original work:
# -----------------------------------------------------------------------------
# Copyright (c) 2015 by Contributors
# \file roi_pooling.cu
# \brief roi pooling operator
# \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
# \changed to roi_align by Elaine Bao
# \file roi_align.cu
# \roi align operator described in Mask RCNN
# -----------------------------------------------------------------------------
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function
from chainer.functions.pooling.roi_average_align_2d \
import _GET_BILINEAR_INTERP_KERNEL
from chainer.functions.pooling.roi_average_align_2d \
import _get_bilinear_interp_params
from chainer.functions.pooling.roi_average_align_2d import _get_bounds
from chainer import utils
from chainer.utils import type_check
def _pair(x):
if isinstance(x, chainer.utils.collections_abc.Iterable):
return x
return x, x
class ROIMaxAlign2D(function.Function):
"""ROI max align over a set of 2d planes."""
def __init__(self, outsize, spatial_scale, sampling_ratio=None):
outh, outw = _pair(outsize)
if not (isinstance(outh, int) and outh > 0):
raise TypeError(
'outsize[0] must be positive integer: {}, {}'
.format(type(outh), outh))
if not (isinstance(outw, int) and outw > 0):
raise TypeError(
'outsize[1] must be positive integer: {}, {}'
.format(type(outw), outw))
if isinstance(spatial_scale, int):
spatial_scale = float(spatial_scale)
if not (isinstance(spatial_scale, float) and spatial_scale > 0):
raise TypeError(
'spatial_scale must be a positive float number: {}, {}'
.format(type(spatial_scale), spatial_scale))
sampling_ratio = _pair(sampling_ratio)
if not all((isinstance(s, int) and s >= 1) or s is None
for s in sampling_ratio):
raise TypeError(
'sampling_ratio must be integer >= 1 or a pair of it: {}'
.format(sampling_ratio))
self.outh, self.outw = outh, outw
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, roi_type, roi_index_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 4,
roi_type.dtype == numpy.float32,
roi_type.ndim == 2,
roi_type.shape[1] == 4,
roi_index_type.dtype == numpy.int32,
roi_index_type.ndim == 1,
roi_type.shape[0] == roi_index_type.shape[0],
)
def forward_cpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = numpy.empty((n_rois, channels, self.outh,
self.outw), dtype=bottom_data.dtype)
self.argmax_data = numpy.empty(top_data.shape, numpy.int32)
pooled_width, pooled_height = self.outw, self.outh
spatial_scale = self.spatial_scale
for i in six.moves.range(top_data.size):
pw = i % pooled_width
ph = int(i / pooled_width) % pooled_height
c = int(i / pooled_width / pooled_height) % channels
n = int(i / pooled_width / pooled_height / channels)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_width = max(roi_end_w - roi_start_w, 1.)
roi_height = max(roi_end_h - roi_start_h, 1.)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(numpy.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(numpy.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
max_val = - numpy.inf
max_index = -1
for iy in six.moves.range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
for ix in six.moves.range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear interpolation {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
tmp_val = 0.
if w1 > 0 and y_low >= 0 and x_low >= 0:
v1 = bottom_data[roi_batch_ind, c, y_low, x_low]
tmp_val += w1 * v1
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
v2 = bottom_data[roi_batch_ind, c, y_low, x_high]
tmp_val += w2 * v2
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
v3 = bottom_data[roi_batch_ind, c, y_high, x_low]
tmp_val += w3 * v3
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
v4 = bottom_data[roi_batch_ind, c, y_high, x_high]
tmp_val += w4 * v4
tmp_index = iy * roi_bin_grid_w + ix
if tmp_val > max_val:
max_val = tmp_val
max_index = tmp_index
# }}
top_data[n, c, ph, pw] = max_val
self.argmax_data[n, c, ph, pw] = max_index
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = cuda.cupy.empty((n_rois, channels, self.outh,
self.outw), dtype=bottom_data.dtype)
self.argmax_data = cuda.cupy.empty(top_data.shape, numpy.int32)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T bottom_data, T spatial_scale, int32 channels,
int32 height, int32 width, int32 pooled_height, int32 pooled_width,
int32 sampling_ratio_h, int32 sampling_ratio_w,
raw T bottom_rois, raw int32 bottom_roi_indices
''',
'T top_data, int32 argmax_data',
'''
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height)
/ static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width)
/ static_cast<T>(pooled_width);
int bottom_data_offset =
(roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
T max_val = - (T) (1.0 / 0.0);
int max_index = -1;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1
{
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
T tmp_val = 0.;
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T v1 = bottom_data[
bottom_data_offset + y_low * width + x_low];
tmp_val += w1 * v1;
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T v2 = bottom_data[
bottom_data_offset + y_low * width + x_high];
tmp_val += w2 * v2;
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T v3 = bottom_data[
bottom_data_offset + y_high * width + x_low];
tmp_val += w3 * v3;
}
if (w4 > 0 && y_high <= height - 1 &&
x_high <= width - 1) {
T v4 = bottom_data[
bottom_data_offset + y_high * width + x_high];
tmp_val += w4 * v4;
}
int tmp_index = iy * roi_bin_grid_w + ix;
if (tmp_val > max_val) {
max_val = tmp_val;
max_index = tmp_index;
}
// }}
}
}
top_data = max_val;
argmax_data = max_index;
''',
'roi_max_align_2d_fwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(bottom_data, self.spatial_scale, channels, height, width,
self.outh, self.outw, sampling_ratio_h, sampling_ratio_w,
bottom_rois, bottom_roi_indices, top_data, self.argmax_data)
return top_data,
def backward_cpu(self, inputs, gy):
bottom_rois, bottom_roi_indices = inputs[1:]
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = numpy.zeros(self._bottom_data_shape, gy[0].dtype)
spatial_scale = self.spatial_scale
pooled_height = self.outh
pooled_width = self.outw
top_diff = gy[0]
for i in six.moves.range(top_diff.size):
pw = i % pooled_width
ph = int(i / pooled_width) % pooled_height
c = int(i / pooled_width / pooled_height) % channels
n = int(i / pooled_width / pooled_height / channels)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_width = max(roi_end_w - roi_start_w, 1.)
roi_height = max(roi_end_h - roi_start_h, 1.)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
top_diff_this_bin = top_diff[n, c, ph, pw]
max_index = self.argmax_data[n, c, ph, pw]
if max_index != -1:
if self.sampling_ratio[0] is None:
roi_bin_grid_h = numpy.ceil(roi_height / pooled_height)
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = numpy.ceil(roi_width / pooled_width)
else:
roi_bin_grid_w = self.sampling_ratio[1]
iy = int(max_index / roi_bin_grid_w)
ix = max_index % roi_bin_grid_w
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
# bilinear_interpolation_gradient {{
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
if w1 > 0 and y_low >= 0 and x_low >= 0:
g1 = top_diff_this_bin * w1
bottom_diff[roi_batch_ind, c, y_low, x_low] += g1
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
g2 = top_diff_this_bin * w2
bottom_diff[roi_batch_ind, c, y_low, x_high] += g2
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
g3 = top_diff_this_bin * w3
bottom_diff[roi_batch_ind, c, y_high, x_low] += g3
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
g4 = top_diff_this_bin * w4
bottom_diff[roi_batch_ind, c, y_high, x_high] += g4
# }}
return bottom_diff, None, None
def backward_gpu(self, inputs, gy):
utils.nondeterministic('atomicAdd')
bottom_rois, bottom_roi_indices = inputs[1:]
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = cuda.cupy.zeros(self._bottom_data_shape, gy[0].dtype)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T top_diff, T spatial_scale,
int32 channels, int32 height, int32 width,
int32 pooled_height, int32 pooled_width,
int32 sampling_ratio_h, int32 sampling_ratio_w,
raw T bottom_rois, raw int32 bottom_roi_indices
''',
'raw T bottom_diff, raw int32 argmax_data',
'''
// (n, c, h, w) coords in bottom data
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
// Do not using rounding; this implementation detail is critical
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) /
static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) /
static_cast<T>(pooled_width);
int bottom_diff_offset =
(roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
int max_index = argmax_data[top_offset + ph * pooled_width + pw];
if (max_index != -1) {
T top_diff_this_bin =
top_diff[top_offset + ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
int iy = max_index / roi_bin_grid_w;
int ix = max_index % roi_bin_grid_w;
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// bilinear_interpolation_gradient {{
int y_low, x_low, y_high, x_high;
T w1, w2, w3, w4;
bool y_ret = get_bounds(y, height, y_low, y_high);
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret || !y_ret) continue;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T g1 = top_diff_this_bin * w1;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_low], g1);
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T g2 = top_diff_this_bin * w2;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_high], g2);
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T g3 = top_diff_this_bin * w3;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_low], g3);
}
if (w4 > 0 && y_high <= height - 1 && x_high <= width - 1) {
T g4 = top_diff_this_bin * w4;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_high], g4);
}
}
// }}
''',
'roi_max_align_2d_bwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(gy[0], self.spatial_scale, channels, height, width,
self.outh, self.outw, sampling_ratio_h, sampling_ratio_w,
bottom_rois, bottom_roi_indices, bottom_diff, self.argmax_data,
size=gy[0].size)
return bottom_diff, None, None
def roi_max_align_2d(
x, rois, roi_indices, outsize, spatial_scale, sampling_ratio=None
):
"""Spatial Region of Interest (ROI) max align function.
This function acts similarly to
:func:`~chainer.functions.roi_max_pooling_2d`, but it computes maximum
of input spatial patch with bilinear interpolation for each channel with
the region of interest.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimentional: ``(n: batch, c: channel, h, height, w: width)``.
rois (~chainer.Variable): Input roi variable. The shape is expected to
be ``(n: data size, 4)``, and each datum is set as below:
``(y_min, x_min, y_max, x_max)``.
roi_indices (~chainer.Variable): Input roi variable. The shape is
expected to be ``(n: data size, )``.
outsize ((int, int) or int): Expected output size after pooled
(height, width). ``outsize=o`` and ``outsize=(o, o)``
are equivalent.
spatial_scale (float): Scale of the roi is resized.
sampling_ratio ((int, int) or int): Sampling step for the alignment.
It must be an integer over :math:`1` or :obj:`None`, and the value
is automatically decided when :obj:`None` is passed. Use of
different ratio in height and width axis is also supported by
passing tuple of int as ``(sampling_ratio_h, sampling_ratio_w)``.
``sampling_ratio=s`` and ``sampling_ratio=(s, s)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing ROIAlign:
`Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
"""
return ROIMaxAlign2D(outsize, spatial_scale, sampling_ratio)(
x, rois, roi_indices)
|
|
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import integer_array
from pandas.core.arrays.integer import Int8Dtype, UInt32Dtype
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"])
def test_preserve_dtypes(op):
# TODO(#22346): preserve Int64 dtype
# for ops that enable (mean would actually work here
# but generally it is a float return value)
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": integer_array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
if op in {"sum", "prod", "min", "max"}:
assert isinstance(result, np.int64)
else:
assert isinstance(result, int)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_astype_nansafe():
# see gh-22343
arr = integer_array([np.nan, 1, 2], dtype="Int8")
msg = "cannot convert to 'uint32'-dtype NumPy array with missing values."
with pytest.raises(ValueError, match=msg):
arr.astype("uint32")
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(integer_array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index(np.array(other))
assert isinstance(idx, ABCIndexClass)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = pd.Series(mixed)
result = s.astype(dtype)
expected = pd.Series(mixed, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - mixed
s = pd.Series(mixed)
msg = r"cannot convert to .*-dtype NumPy array with missing values.*"
with pytest.raises(ValueError, match=msg):
s.astype(all_data.dtype.numpy_dtype)
# coerce to object
s = pd.Series(mixed)
result = s.astype("object")
expected = pd.Series(np.asarray(mixed))
tm.assert_series_equal(result, expected)
def test_astype_to_larger_numpy():
a = pd.array([1, 2], dtype="Int32")
result = a.astype("int64")
expected = np.array([1, 2], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
a = pd.array([1, 2], dtype="UInt32")
result = a.astype("uint64")
expected = np.array([1, 2], dtype="uint64")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"])
def test_astype_specific_casting(dtype):
s = pd.Series([1, 2, 3], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
s = pd.Series([1, 2, 3, None], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3, None], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_astype_dt64():
# GH#32435
arr = pd.array([1, 2, 3, pd.NA]) * 10 ** 9
result = arr.astype("datetime64[ns]")
expected = np.array([1, 2, 3, "NaT"], dtype="M8[s]").astype("M8[ns]")
tm.assert_numpy_array_equal(result, expected)
def test_construct_cast_invalid(dtype):
msg = "cannot safely"
arr = [1.2, 2.3, 3.7]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
arr = [1.2, 2.3, 3.7, np.nan]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
@pytest.mark.parametrize("in_series", [True, False])
def test_to_numpy_na_nan(in_series):
a = pd.array([0, 1, None], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([0.0, 1.0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="int64", na_value=-1)
expected = np.array([0, 1, -1], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="bool", na_value=False)
expected = np.array([False, True, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("in_series", [True, False])
@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"])
def test_to_numpy_dtype(dtype, in_series):
a = pd.array([0, 1], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype=dtype)
expected = np.array([0, 1], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float64", "int64", "bool"])
def test_to_numpy_na_raises(dtype):
a = pd.array([0, 1, None], dtype="Int64")
with pytest.raises(ValueError, match=dtype):
a.to_numpy(dtype=dtype)
def test_astype_str():
a = pd.array([1, 2, None], dtype="Int64")
expected = np.array(["1", "2", "<NA>"], dtype="<U21")
tm.assert_numpy_array_equal(a.astype(str), expected)
tm.assert_numpy_array_equal(a.astype("str"), expected)
def test_astype_boolean():
# https://github.com/pandas-dev/pandas/issues/31102
a = pd.array([1, 0, -1, 2, None], dtype="Int64")
result = a.astype("boolean")
expected = pd.array([True, False, True, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
|
|
from OpenGL.GL import *
from OpenGL.GLUT import *
import weakref
from glcommon import GLMultiViewportProgram
keymap = {GLUT_KEY_F1:'f1',
GLUT_KEY_F2:'f2',
GLUT_KEY_F3:'f3',
GLUT_KEY_F4:'f4',
GLUT_KEY_F5:'f5',
GLUT_KEY_F6:'f6',
GLUT_KEY_F7:'f7',
GLUT_KEY_F8:'f8',
GLUT_KEY_F9:'f9',
GLUT_KEY_F10:'f10',
GLUT_KEY_F11:'f11',
GLUT_KEY_F12:'f12',
GLUT_KEY_LEFT:'left',
GLUT_KEY_UP:'up',
GLUT_KEY_RIGHT:'right',
GLUT_KEY_DOWN:'down',
GLUT_KEY_PAGE_UP:'page up',
GLUT_KEY_PAGE_DOWN:'page down',
GLUT_KEY_HOME:'home',
GLUT_KEY_END:'end',
GLUT_KEY_INSERT:'insert'
}
class GLUTWindow:
"""A GLUT window. Should not be used directly.
Attributes:
- name: title of the window (only has an effect before calling
run())
- index: the GLUT index
- width, height: width/height of the window (only has an effect
before calling run(), and these are updated when the user resizes
the window.
- clearColor: the RGBA floating point values of the background color.
- glutInitialized: true if GLUT has been initialized
"""
def __init__(self,name):
"""Note: must be called after GLUT is initialized."""
self.name = name
self.program = None
self.width = 640
self.height = 480
self.clearColor = [1.0,1.0,1.0,0.0]
self.lastx = 0
self.lasty = 0
self.initialized = False
self.glutWindowID = None
self.modifierList = []
def initialize(self):
assert self.program != None, "program member needs to be set"
assert not self.initialized,"initialized twice?"
program = self.program
#glutInitWindowPosition (0,0);
glutInitWindowSize (self.width, self.height);
self.glutWindowID = glutCreateWindow (self.name)
program.view.x = 0
program.view.y = 0
def glutsafe(func,update_modifiers=False):
def safefunc(*args):
if update_modifiers:
self._updateModifiers()
try:
return func(*args)
except Exception, e:
import traceback
traceback.print_exc()
glutLeaveMainLoop()
return
return safefunc
# set window callbacks
glutReshapeFunc (glutsafe(self._reshapefunc))
glutKeyboardFunc (glutsafe(program.keyboardfunc,update_modifiers=True))
glutKeyboardUpFunc (glutsafe(program.keyboardupfunc,update_modifiers=True))
glutSpecialFunc (glutsafe(self._specialfunc,update_modifiers=True))
glutSpecialUpFunc (glutsafe(self._specialupfunc,update_modifiers=True))
glutMotionFunc (glutsafe(self._motionfunc,update_modifiers=True))
glutPassiveMotionFunc (glutsafe(self._motionfunc,update_modifiers=True))
glutMouseFunc (glutsafe(self._mousefunc,update_modifiers=True))
glutDisplayFunc (glutsafe(self._displayfunc))
glutIdleFunc(glutsafe(program.idlefunc))
glutCloseFunc(glutsafe(self._closefunc))
#init function
self.program.initialize()
glEnable(GL_MULTISAMPLE)
glutPostRedisplay()
self.initialized = True
print "Initialized"
def add_action(self,*args):
pass
def setProgram(self,program):
from glprogram import GLProgram
assert isinstance(program,GLProgram)
if hasattr(program,'name'):
self.name = program.name
if self.initialized:
glutSetWindowTitle(program.name)
self.program = program
program.window = self
if self.initialized:
program.initialize()
program.reshapefunc(self.width,self.height)
self.idlesleep(0)
else:
self.reshape(program.view.w,program.view.h)
def modifiers(self):
"""Call this to retrieve modifiers. Called by frontend."""
return self.modifierList
def refresh(self):
"""Call this to redraw the screen on the next event loop. Called by frontend."""
glutPostRedisplay()
def idlesleep(self,duration=float('inf')):
"""Sleeps the idle callback for t seconds. If t is not provided,
the idle callback is slept forever. Called by frontend."""
if duration==0:
glutIdleFunc(self.program.idlefunc);
else:
glutIdleFunc(None);
if duration!=float('inf'):
glutTimerFunc(int(duration*1000),lambda x:glutIdleFunc(self.program.idlefunc),0);
def reshape(self,w,h):
"""Resizes the GL window. Called by frontend."""
print "reshaping",w,h
self.width,self.height = w,h
if self.initialized:
glutReshapeWindow(self.width,self.height)
def draw_text(self,point,text,size=12,color=None):
"""If called in the display_screen method, renders text at the given point (may be 2d
or 3d).
If size is given, it renders a font in the given size. If color is given, then it
is an RGB or RGBA color value. Called by frontend."""
import ctypes
if size <= 10:
font = GLUT_BITMAP_HELVETICA_10
elif size <= 12:
font = GLUT_BITMAP_HELVETICA_12
elif size <= 13:
font = GLUT_BITMAP_8_BY_13
elif size <= 16:
font = GLUT_BITMAP_9_BY_15
elif size <= 21:
font = GLUT_BITMAP_HELVETICA_12
else:
font = GLUT_TIMES_NEW_ROMAN_24
if color is None:
glColor3f(0,0,0)
elif len(color)==3:
glColor3f(color[0],color[1],color[2])
else:
glColor4f(color[0],color[1],color[2],color[3])
if len(point)==3:
glRasterPos3f(*point)
else:
glRasterPos2f(*point)
for c in text:
glutBitmapCharacter(font, ctypes.c_int( ord(c) ))
def close(self):
if self.index != None:
glutDestroyWindow(self.index)
self.index = None
self._closefunc()
def _updateModifiers(self):
m = []
modifiers = glutGetModifiers()
if modifiers & GLUT_ACTIVE_CTRL:
m.append('ctrl')
elif modifiers & GLUT_ACTIVE_SHIFT:
m.append('shift')
elif modifiers & GLUT_ACTIVE_ALT:
m.append('alt')
self.modifierList = m
def _reshapefunc(self,w,h):
"""Internal use"""
self.width = w
self.height = h
self.program.reshapefunc(w,h)
glutPostRedisplay()
def _motionfunc(self,x,y):
"""Internal use"""
dx = x - self.lastx
dy = y - self.lasty
self.program.motionfunc(x,y,dx,dy)
self.lastx = x
self.lasty = y
def _mousefunc(self,button,state,x,y):
"""Internal use"""
self.program.mousefunc(button,state,x,y)
self.lastx = x
self.lasty = y
def _specialfunc(self,c,x,y):
if c in keymap:
self.program.keyboardfunc(keymap[c],x,y)
def _specialupfunc(self,c,x,y):
if c in keymap:
self.program.keyboardupfunc(keymap[c],x,y)
def _displayfunc(self):
"""Internal use."""
if self.width == 0 or self.height == 0:
#hidden?
print "GLProgram.displayfunc called on hidden window?"
return
self.program.displayfunc()
glutSwapBuffers ()
def _closefunc(self):
self.program.closefunc()
self.program.window = None
class GLUTBackend:
"""A basic OpenGL program using GLUT. Set up your GLProgramInterface class,
call addPlugin(plugin), then call run() to start the GLUT main loop.
NOTE: the run() call may not return depending on your GLUT system.
For more control over windowing, you can use the createWindow function to
construct new windows and addPlugin to add plugins to those windows.
IMPORTANT NOTE: only one window may be created for a given world. If you want to
use multiple windows, then a new world should be loaded for each world.
"""
def __init__(self):
self.glutInitialized = False
self.windows = []
def initialize(self,program_name):
if self.glutInitialized == False:
glutInit ([])
if bool(glutSetOption):
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE,GLUT_ACTION_GLUTMAINLOOP_RETURNS)
glutInitDisplayMode (GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH | GLUT_MULTISAMPLE)
self.glutInitialized = True
def createWindow(self,name):
self.initialize(name)
w = GLUTWindow(name)
self.windows.append(w)
return w
def run(self):
"""Starts the main loop. NOTE: if freeglut is not installed, this
will not return."""
# Initialize Glut
assert len(self.windows) >= 1,"Need to define at least one GL interface"
for w in self.windows:
w.initialize()
glutMainLoop ()
|
|
import datetime
import json
from nose.tools import ok_, eq_, assert_raises
import mock
from django.conf import settings
from django.utils import timezone
from django.core.exceptions import ImproperlyConfigured
from funfactory.urlresolvers import reverse
from airmozilla.main.models import Event, EventHitStats, Approval
from airmozilla.manage import autocompeter
from airmozilla.base.tests.testbase import DjangoTestCase
class Response(object):
def __init__(self, content, status_code=200, headers=None):
self.content = self.text = content
self.status_code = status_code
self.headers = headers or {}
def json(self):
return json.loads(self.content)
class TestAutocompeter(DjangoTestCase):
fixtures = ['airmozilla/manage/tests/main_testdata.json']
def test_update_without_key(self):
with self.settings(AUTOCOMPETER_KEY=None):
# This would simply fail if autocompeter.py wasn't smart
# enough to notice that the AUTOCOMPETER_KEY was not set.
autocompeter.update()
@mock.patch('requests.post')
def test_basic_update(self, rpost):
posts = []
def mocked_post(url, **options):
assert settings.AUTOCOMPETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
autocompeter.update()
# nothing should happen because there are no recently modified events
ok_(not posts)
event = Event.objects.get(title='Test event')
event.save()
autocompeter.update()
eq_(len(posts), 1)
# In the posted data should be a thing called 'documents'
# which is a list of every document.
assert len(posts[0]['documents']) == 1
document = posts[0]['documents'][0]
eq_(document['url'], reverse('main:event', args=(event.slug,)))
eq_(document['title'], event.title)
eq_(document['group'], '')
eq_(document['popularity'], 0)
@mock.patch('requests.post')
def test_basic_update_with_popularity(self, rpost):
posts = []
def mocked_post(url, **options):
assert settings.AUTOCOMPETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
event = Event.objects.get(title='Test event')
# also change to a non-public privacy setting
event.privacy = Event.PRIVACY_CONTRIBUTORS
event.save()
EventHitStats.objects.create(
event=event,
total_hits=100
)
autocompeter.update()
document = posts[0]['documents'][0]
eq_(document['popularity'], 100)
eq_(document['group'], Event.PRIVACY_CONTRIBUTORS)
@mock.patch('requests.post')
def test_basic_update_with_repeated_titles(self, rpost):
posts = []
def mocked_post(url, **options):
assert settings.AUTOCOMPETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
event = Event.objects.get(title='Test event')
# also change to a non-public privacy setting
event.privacy = Event.PRIVACY_CONTRIBUTORS
event.save()
EventHitStats.objects.create(
event=event,
total_hits=100
)
event2 = Event.objects.create(
slug='something-else',
title=event.title,
status=event.status,
privacy=event.privacy,
start_time=event.start_time + datetime.timedelta(days=40),
description=event.description,
placeholder_img=event.placeholder_img,
archive_time=event.archive_time,
)
EventHitStats.objects.create(
event=event2,
total_hits=100
)
assert Event.objects.approved().count() == 2
autocompeter.update()
documents = posts[0]['documents']
titles = [x['title'] for x in documents]
titles.sort()
title1 = event.start_time.strftime('Test event %d %b %Y')
title2 = event2.start_time.strftime('Test event %d %b %Y')
eq_(titles, [title1, title2])
@mock.patch('requests.post')
def test_basic_update_upcoming_event(self, rpost):
posts = []
def mocked_post(url, **options):
assert settings.AUTOCOMPETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
event = Event.objects.get(title='Test event')
EventHitStats.objects.create(
event=event,
total_hits=100
)
autocompeter.update()
future = timezone.now() + datetime.timedelta(days=1)
Event.objects.create(
slug='aaa',
title='Other',
start_time=future,
status=event.status,
)
assert Event.objects.approved().count() == 2
autocompeter.update()
assert len(posts[0]['documents']) == 1
document = posts[0]['documents'][0]
eq_(document['title'], 'Other')
# picks this up from the median
eq_(document['popularity'], 100)
@mock.patch('requests.post')
def test_basic_update_all(self, rpost):
posts = []
def mocked_post(url, **options):
assert settings.AUTOCOMPETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
autocompeter.update(all=True)
assert len(posts[0]['documents']) == 1
document = posts[0]['documents'][0]
eq_(document['title'], 'Test event')
eq_(document['popularity'], 0)
@mock.patch('requests.post')
def test_basic_update_all_with_popularity(self, rpost):
posts = []
def mocked_post(url, **options):
assert settings.AUTOCOMPETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
EventHitStats.objects.create(
event=Event.objects.get(title='Test event'),
total_hits=200
)
autocompeter.update(all=True)
assert len(posts[0]['documents']) == 1
document = posts[0]['documents'][0]
eq_(document['title'], 'Test event')
eq_(document['popularity'], 200)
@mock.patch('requests.post')
def test_basic_update_all_with_unapproved(self, rpost):
posts = []
def mocked_post(url, **options):
assert settings.AUTOCOMPETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
event = Event.objects.get(title='Test event')
EventHitStats.objects.create(
event=event,
total_hits=200
)
autocompeter.update(all=True)
assert len(posts[0]['documents']) == 1
document = posts[0]['documents'][0]
eq_(document['title'], 'Test event')
eq_(document['group'], '')
app = Approval.objects.create(event=event)
autocompeter.update(all=True)
document = posts[1]['documents'][0]
eq_(document['title'], 'Test event')
eq_(document['group'], 'contributors')
app.approved = True
app.save()
autocompeter.update(all=True)
document = posts[2]['documents'][0]
eq_(document['title'], 'Test event')
eq_(document['group'], '')
@mock.patch('requests.delete')
@mock.patch('requests.post')
def test_basic_update_all_with_flush(self, rpost, rdelete):
posts = []
deletes = []
def mocked_post(url, **options):
assert settings.AUTOCOMPETER_URL in url
data = json.loads(options['data'])
posts.append(data)
return Response(
'OK',
201
)
rpost.side_effect = mocked_post
def mocked_delete(url, **options):
assert settings.AUTOCOMPETER_URL in url
deletes.append(url)
return Response(
'OK',
204
)
rdelete.side_effect = mocked_delete
autocompeter.update(all=True, flush_first=True)
ok_(deletes)
ok_(posts)
@mock.patch('requests.get')
def test_stats(self, rget):
def mocked_get(url, **options):
return Response(
json.dumps({'documents': 1}),
200,
headers={
'content-type': 'application/json'
}
)
rget.side_effect = mocked_get
result = autocompeter.stats()
eq_(result, {'documents': 1})
def test_stats_no_key(self):
with self.settings(AUTOCOMPETER_KEY=''):
assert_raises(
ImproperlyConfigured,
autocompeter.stats
)
@mock.patch('requests.get')
def test_test(self, rget):
def mocked_get(url, **options):
return Response(
json.dumps({
'terms': ['foo'],
'results': [
['/url', 'Page'],
]
}),
200,
headers={
'content-type': 'application/json'
}
)
rget.side_effect = mocked_get
result = autocompeter.test('foo')
eq_(result['terms'], ['foo'])
eq_(result['results'], [['/url', 'Page']])
|
|
import types
C_TO_PY_CAST = {
'b' : 'char',
'i' : 'int',
'H' : 'uint16',
'h' : 'int16',
'B' : 'uchar',
}
# --------------------------------------------------------------------------------------------
class gen_fmt(object):
def __init__(self, fields, tp = None, bv = None, cast=None, cmt = None):
self.fields = fields
self.tp = tp
# Format to be passed to Py_BuildValue
if not bv:
self.bv = "XXX"
else:
if bv == "K":
self.bv = "PY_FMT64"
else:
self.bv = '"%s"' % bv
if not cast:
if bv == "K":
cast = "pyul_t"
elif bv in C_TO_PY_CAST:
cast = C_TO_PY_CAST[bv]
self.cast = "" if not cast else "(%s)" % cast
self.cmt = cmt
if bv == "K":
self.setcvt = "uint64 v(0); PyGetNumber(value, &v);"
elif bv == 'i':
self.setcvt = "int v = PyInt_AsLong(value);"
else:
self.setcvt = "uint64 v = %sPyInt_AsLong(value);" % self.cast
# --------------------------------------------------------------------------------------------
switch_info_ex_t_gen = [
gen_fmt('regdtyp', bv = 'b', cmt = 'size of the switch expression register as dtyp'),
gen_fmt('flags2', bv = 'i'),
gen_fmt('jcases', bv = 'i', cmt = 'number of entries in the jump table (SWI2_INDIRECT)'),
gen_fmt('regnum', bv = 'i', cmt = 'the switch expression as a register number'),
gen_fmt('flags', bv = 'H', cmt = 'the switch expression as a register number'),
gen_fmt('ncases', bv = 'H', cmt = 'number of cases (excluding default)'),
gen_fmt('defjump', bv = 'K', cmt = 'default jump address'),
gen_fmt('jumps', bv = 'K', cmt = 'jump table address'),
gen_fmt('elbase', bv = 'K', cmt = 'element base'),
gen_fmt('startea', bv = 'K', cmt = 'start of switch idiom'),
gen_fmt('custom', bv = 'K', cmt = 'information for custom tables (filled and used by modules)'),
gen_fmt('ind_lowcase', bv = 'K'),
gen_fmt(['values', 'lowcase'], bv = 'K'),
]
op_t_gen = [
gen_fmt('n', bv = 'b'),
gen_fmt('type', bv = 'B'),
gen_fmt('offb', bv = 'b'),
gen_fmt('offo', bv = 'b'),
gen_fmt('flags', bv = 'B'),
gen_fmt('dtyp', bv = 'b'),
gen_fmt(['reg', 'phrase'], bv = 'H'),
gen_fmt('value', bv = 'K'),
gen_fmt('addr', bv = 'K'),
gen_fmt('specval', bv = 'K'),
gen_fmt('specflag1', bv = 'b'),
gen_fmt('specflag2', bv = 'b'),
gen_fmt('specflag3', bv = 'b'),
gen_fmt('specflag4', bv = 'b')
]
insn_t_gen = [
gen_fmt('cs', bv = 'K'),
gen_fmt('ip', bv = 'K'),
gen_fmt('ea', bv = 'K'),
gen_fmt('itype', bv = 'H'),
gen_fmt('size', bv = 'H'),
gen_fmt('auxpref', bv = 'H'),
gen_fmt('segpref', bv = 'b'),
gen_fmt('insnpref', bv = 'b'),
gen_fmt('Op1', tp = 'op_t'),
gen_fmt('Op2', tp = 'op_t'),
gen_fmt('Op3', tp = 'op_t'),
gen_fmt('Op4', tp = 'op_t'),
gen_fmt('Op5', tp = 'op_t'),
gen_fmt('Op6', tp = 'op_t'),
gen_fmt('flags', bv = 'b')
]
regval_t_gen = [
gen_fmt('rvtype', bv = 'i'),
gen_fmt('ival', bv = 'K'),
gen_fmt('fval', bv = 'd'),
gen_fmt('bytes', bv = 's'),
]
# --------------------------------------------------------------------------------------------
S_LINK_ATTR = 'S_CLINK_NAME' # If the name is a literal, make sure you specify double quotations
S_CMOD_NAME = '_idaapi'
# --------------------------------------------------------------------------------------------
def gen_stub(gen, name, cname = None, tabs=4, gen_py_file = False, gen_c_file = False):
# Assume C type name same as python type name
if not cname:
cname = name
# Python property lines
prop_body = []
# Python get/set bodies
getset_body = []
# C get/set bodies
cgetset_body = []
# some spacing constants
spc = ' ' * tabs
spc2 = spc * 2
nspc = '\n' + spc
nspc2 = '\n' + spc2
cget_link = '%s_get_clink' % cname
#
# Process fields
#
for g in gen:
# a union will be represented by a list
if type(g.fields) != types.ListType:
fields = [g.fields]
else:
fields = g.fields
# join all field names (in case of a union)
flds_name = '_'.join(fields)
# form the method and variable names
set_method = '__set_%s__' % flds_name
get_method = '__get_%s__' % flds_name
cset_method = '%s_set_%s' % (name, flds_name)
cget_method = '%s_get_%s' % (name, flds_name)
fld_name = '__%s__' % flds_name
basic_type = not g.tp
vars = {
'get': get_method,
'set': set_method,
'l': S_LINK_ATTR,
'fld' : fld_name,
'cmod' : S_CMOD_NAME,
'cget': cget_method,
'cset': cset_method,
'csetcvt': g.setcvt,
'cname': cname,
'cgetlink': cget_link,
'cfield1': fields[0],
'bv': g.bv,
'bvcast': g.cast
}
#
# Python code
#
# basic type?
# For basic types we need to create property and get/set methods
if basic_type:
for fld in fields:
prop_body.append('%s = property(%s, %s)' % (fld, get_method, set_method))
if g.cmt:
prop_body.append('"""%s"""' % g.cmt)
#
code = '\n'.join([
# get method
'def %(get)s(self):',
spc2 + 'return %(cmod)s.%(cget)s(self)',
# set method
spc + 'def %(set)s(self, v):',
spc2 + '%(cmod)s.%(cset)s(self, v)',
]) % vars
getset_body.append(code)
#
# C code
#
if basic_type:
code = '\n'.join([
"""static PyObject *%(cget)s(PyObject *self)
{
%(cname)s *link = %(cgetlink)s(self);
if ( link == NULL )
Py_RETURN_NONE;
return Py_BuildValue(%(bv)s, %(bvcast)slink->%(cfield1)s);
}
static void %(cset)s(PyObject *self, PyObject *value)
{
%(cname)s *link = %(cgetlink)s(self);
if ( link == NULL )
return;
%(csetcvt)s
link->%(cfield1)s = %(bvcast)sv;
}
"""
]) % vars
cgetset_body.append(code)
# print 'prop_body->\n\t', '\n\t'.join(prop_body), '\n<'
# print 'getset_body->\n', '\n'.join(getset_body), '\n<'
# print 'cgetset_body->\n', '\n'.join(cgetset_body), '\n<'
vars = {
'name': name,
'cname': cname,
'getlink': cget_link,
'l': S_LINK_ATTR,
'cmod' : S_CMOD_NAME
}
#
# Form the complete Python code
#
py = '\n'.join([
'class %(name)s(py_clinked_object_t):',
# init() code
spc + 'def __init__(self, lnk = None):',
spc2 + 'py_clinked_object_t.__init__(self, lnk)',
'',
spc + 'def _create_clink(self):',
spc2 + 'return _idaapi.%(name)s_create()',
'',
spc + 'def _del_clink(self, lnk):',
spc2 + 'return _idaapi.%(name)s_destroy(lnk)',
'',
spc + 'def assign(self, other):',
spc2 + 'return _idaapi.%(name)s_assign(self, other)',
'',
'',
spc + '#',
spc + '# Autogenerated',
spc + '#',
# get/set code
spc + nspc.join(getset_body),
# props code
spc + nspc.join(prop_body),
]) % vars
#
# Form the Python to C conversion function
#
#
# Form the complete C code
#
ccode = '\n'.join([
# Form the C get link code
"""%(cname)s *%(getlink)s(PyObject *self)
{
if ( !PyObject_HasAttrString(self, %(l)s) )
return NULL;
%(cname)s *r;
PyObject *attr = PyObject_GetAttrString(self, %(l)s);
if ( PyCObject_Check(attr) )
r = (%(cname)s *) PyCObject_AsVoidPtr(attr);
else
r = NULL;
Py_DECREF(attr);
return r;
}
static PyObject *%(cname)s_create()
{
%(cname)s *inst = new %(cname)s();
return PyCObject_FromVoidPtr(inst, NULL);
}
static bool %(cname)s_destroy(PyObject *py_obj)
{
if ( !PyCObject_Check(py_obj) )
return false;
%(cname)s *inst = (%(cname)s *) PyCObject_AsVoidPtr(py_obj);
delete inst;
return true;
}
static bool %(cname)s_assign(PyObject *self, PyObject *other)
{
%(cname)s *lhs = %(cname)s_get_clink(self);
%(cname)s *rhs = %(cname)s_get_clink(other);
if (lhs == NULL || rhs == NULL)
return false;
*lhs = *rhs;
return true;
}
//-------------------------------------------------------------------------
// Auto generated - begin
//
""",
# Form C get/set functions
''.join(cgetset_body),
"""//
// Auto generated - end
//
//-------------------------------------------------------------------------"""
]) % vars
# write the Python file
if gen_py_file:
f = open(name + '.py', 'w')
f.write(py)
f.close()
# write C file
if gen_c_file:
f = open(name + '.cpp', 'w')
f.write(ccode)
f.close()
# --------------------------------------------------------------------------------------------
def main():
files = [
('switch_info_ex_t', switch_info_ex_t_gen),
]
for (n, g) in files:
gen_stub(g, n, gen_py_file = True, gen_c_file = True)
main()
|
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""Widgets to display/edit configuration objects."""
from __future__ import division
import os
import os.path
from PyQt4 import QtGui, QtCore
from vistrails.core.configuration import ConfigurationObject, \
ConfigFieldParent, ConfigPath, \
get_vistrails_configuration, find_simpledoc
from vistrails.core.thumbnails import ThumbnailCache
from vistrails.gui.common_widgets import QSearchTreeWindow, QSearchTreeWidget, \
QDirectoryChooserToolButton
from vistrails.gui.utils import YES_BUTTON, NO_BUTTON, show_question, show_warning
from vistrails.core import system
##############################################################################
def bool_conv(st):
if st == 'True':
return True
elif st == 'False':
return False
else:
raise TypeError('Bogus value for bool_conv ' + str(st))
class QConfigurationTreeWidgetItem(QtGui.QTreeWidgetItem):
def __init__(self, parent, obj, parent_obj, name, temp_obj, temp_parent_obj):
lst = [name]
t = type(obj)
if t == bool:
self._obj_type = bool_conv
else:
self._obj_type = t
self._parent_obj = parent_obj
self._temp_parent_obj = temp_parent_obj
self._name = name
if t == ConfigurationObject:
lst.extend(['', ''])
QtGui.QTreeWidgetItem.__init__(self, parent, lst)
self.setFlags(self.flags() & ~(QtCore.Qt.ItemIsDragEnabled |
QtCore.Qt.ItemIsSelectable ))
elif t == tuple and obj[0] is None and isinstance(obj[1], type):
self._obj_type = obj[1]
lst.extend(['', obj[1].__name__])
QtGui.QTreeWidgetItem.__init__(self, parent, lst)
self.setFlags((self.flags() & ~QtCore.Qt.ItemIsDragEnabled) |
QtCore.Qt.ItemIsEditable)
else:
lst.extend([str(obj), type(obj).__name__])
QtGui.QTreeWidgetItem.__init__(self, parent, lst)
self.setFlags((self.flags() & ~QtCore.Qt.ItemIsDragEnabled) |
QtCore.Qt.ItemIsEditable)
def change_value(self, new_value):
# if this is a parent ConfigurationObject, do nothing
if self._parent_obj and not self._obj_type == ConfigurationObject:
setattr(self._parent_obj, self._name, self._obj_type(new_value))
setattr(self._temp_parent_obj, self._name, self._obj_type(new_value))
def _get_name(self):
return self._name
name = property(_get_name)
class QConfigurationTreeWidgetItemDelegate(QtGui.QItemDelegate):
"""
QConfigurationTreeWidgetItemDelegate allows a custom editor for
each column of the QConfigurationTreeWidget
"""
def createEditor(self, parent, option, index):
""" createEditor(parent: QWidget,
option: QStyleOptionViewItem,
index: QModelIndex) -> QWidget
Return the editing widget depending on columns
"""
# We only allow users to edit the second column
if index.column()==1:
dataType = str(index.sibling(index.row(), 2).data())
# Create the editor based on dataType
if dataType=='int':
editor = QtGui.QLineEdit(parent)
editor.setValidator(QtGui.QIntValidator(parent))
elif dataType=='bool':
editor = QtGui.QComboBox(parent)
editor.addItem('True')
editor.addItem('False')
else:
editor = QtGui.QItemDelegate.createEditor(self, parent,
option, index)
return editor
return None
def setEditorData(self, editor, index):
""" setEditorData(editor: QWidget, index: QModelIndex) -> None
Set the editor to reflects data at index
"""
if isinstance(editor, QtGui.QComboBox):
editor.setCurrentIndex(editor.findText(index.data()))
else:
QtGui.QItemDelegate.setEditorData(self, editor, index)
def setModelData(self, editor, model, index):
""" setModelData(editor: QStringEdit,
model: QAbstractItemModel,
index: QModelIndex) -> None
Set the text of the editor back to the item model
"""
if isinstance(editor, QtGui.QComboBox):
model.setData(index, editor.currentText())
elif isinstance(editor, QtGui.QLineEdit):
model.setData(index, editor.text())
else:
# Should never get here
assert False
class QConfigurationTreeWidget(QSearchTreeWidget):
def __init__(self, parent, persistent_config, temp_config):
QSearchTreeWidget.__init__(self, parent)
self.setMatchedFlags(QtCore.Qt.ItemIsEditable)
self.setColumnCount(3)
lst = ['Name', 'Value', 'Type']
self.setHeaderLabels(lst)
self.create_tree(persistent_config, temp_config)
def create_tree(self, persistent_config, temp_config):
def create_item(parent, obj, parent_obj, name, temp_obj, temp_parent_obj):
item = QConfigurationTreeWidgetItem(parent, obj, parent_obj,
name, temp_obj, temp_parent_obj)
if isinstance(obj, ConfigurationObject):
for key in sorted(obj.keys()):
create_item(item, getattr(obj, key), obj, key,
getattr(temp_obj, key), temp_obj)
# disconnect() and clear() are here because create_tree might
# also be called when an entirely new configuration object is set.
self.disconnect(self, QtCore.SIGNAL('itemChanged(QTreeWidgetItem *, int)'),
self.change_configuration)
self.clear()
self._configuration = persistent_config
self._temp_configuration = temp_config
create_item(self, self._configuration, None, 'configuration',
self._temp_configuration, None)
self.expandAll()
self.resizeColumnToContents(0)
self.connect(self,
QtCore.SIGNAL('itemChanged(QTreeWidgetItem *, int)'),
self.change_configuration)
def change_configuration(self, item, col):
if item.flags() & QtCore.Qt.ItemIsEditable:
new_value = self.indexFromItem(item, col).data()
item.change_value(new_value)
# option-specific code
if item._name == 'dbDefault':
# Update the state of the icons if changing between db and
# file support
dbState = getattr(get_vistrails_configuration(), 'dbDefault')
if new_value != dbState:
from vistrails.gui.vistrails_window import _app
_app.setDBDefault(dbState)
self.emit(QtCore.SIGNAL('configuration_changed'),
item, new_value)
class QConfigurationTreeWindow(QSearchTreeWindow):
def __init__(self, parent, persistent_config, temp_config):
self._configuration_object = persistent_config
self._temp_configuration = temp_config
QSearchTreeWindow.__init__(self, parent)
def createTreeWidget(self):
self.setWindowTitle('Configuration')
treeWidget = QConfigurationTreeWidget(self, self._configuration_object,
self._temp_configuration)
# The delegate has to be around (self._delegate) to
# work, else the instance will be clean by Python...
self._delegate = QConfigurationTreeWidgetItemDelegate()
treeWidget.setItemDelegate(self._delegate)
return treeWidget
class QConfigurationWidget(QtGui.QWidget):
def __init__(self, parent, persistent_config, temp_config):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout(self)
self.setLayout(layout)
self._tree = QConfigurationTreeWindow(self, persistent_config,
temp_config)
lbl = QtGui.QLabel("Set configuration variables for VisTrails here.", self)
layout.addWidget(lbl)
layout.addWidget(self._tree)
def configuration_changed(self, persistent_config, temp_config):
self._tree.treeWidget.create_tree(persistent_config, temp_config)
class QConfigurationWidgetItem(object):
def __init__(self, key, field, callback_f):
self.key = key
self.field = field
self.change_callback_f = callback_f
self._desc = None
def get_desc(self):
if self._desc is not None:
return self._desc
options = self.get_widget_options()
if "label" in options:
return options["label"]
return ""
def set_desc(self, desc=None):
self._desc = desc
def get_label_text(self):
return self.get_desc()
def set_value(self, value, signal=True):
raise NotImplementedError("Subclass needs to implement this method")
def value_changed(self, value):
self.change_callback_f(self, self.key, self.field, value)
def get_widget_options(self):
options = {}
if self.field.widget_options is not None:
options = self.field.widget_options
return options
class QConfigurationCheckBox(QtGui.QCheckBox, QConfigurationWidgetItem):
def __init__(self, key, field, callback_f, parent=None):
QtGui.QCheckBox.__init__(self, parent)
QConfigurationWidgetItem.__init__(self, key, field, callback_f)
self.setText(self.get_desc())
self.toggled.connect(self.value_changed)
def set_value(self, value, signal=True):
if not signal:
self.toggled.disconnect(self.value_changed)
self.setChecked(value)
if not signal:
self.toggled.connect(self.value_changed)
def get_label_text(self):
return ""
class QConfigurationLineEdit(QtGui.QLineEdit, QConfigurationWidgetItem):
def __init__(self, key, field, callback_f, parent=None):
QtGui.QLineEdit.__init__(self, parent)
QConfigurationWidgetItem.__init__(self, key, field, callback_f)
self.setMinimumWidth(200)
self.editingFinished.connect(self.value_changed)
def value_changed(self):
QConfigurationWidgetItem.value_changed(self, self.text())
def set_value(self, value, signal=True):
if value is None:
value = ""
if not signal:
self.editingFinished.disconnect(self.value_changed)
self.setText(unicode(value))
if not signal:
self.editingFinished.connect(self.value_changed)
class QConfigurationLineEditButton(QtGui.QWidget, QConfigurationWidgetItem):
def __init__(self, key, field, callback_f, button, parent=None):
QtGui.QWidget.__init__(self, parent)
QConfigurationWidgetItem.__init__(self, key, field, callback_f)
layout = QtGui.QHBoxLayout()
layout.setMargin(0)
layout.setSpacing(5)
self.line_edit = QtGui.QLineEdit()
self.line_edit.setMinimumWidth(200)
layout.addWidget(self.line_edit)
if button is not None:
layout.addWidget(button)
self.setLayout(layout)
self.line_edit.editingFinished.connect(self.value_changed)
def add_button(self, button):
self.layout().addWidget(button)
def value_changed(self):
QConfigurationWidgetItem.value_changed(self, self.line_edit.text())
def set_value(self, value, signal=True):
if value is None:
value = ""
if not signal:
self.line_edit.editingFinished.disconnect(self.value_changed)
self.line_edit.setText(unicode(value))
if not signal:
self.line_edit.editingFinished.connect(self.value_changed)
class QConfigurationPathEdit(QConfigurationLineEditButton):
def __init__(self, key, field, callback_f,
button_cls=QDirectoryChooserToolButton, parent=None):
QConfigurationLineEditButton.__init__(self, key, field, callback_f,
None, parent)
button = button_cls(self, self.line_edit)
self.add_button(button)
class QConfigurationThumbnailCache(QConfigurationLineEditButton):
def __init__(self, key, field, callback_f, parent=None):
button = QtGui.QPushButton("Clear...")
button.setAutoDefault(False)
button.clicked.connect(self.clear_clicked)
QConfigurationLineEditButton.__init__(self, key, field, callback_f,
button, parent)
def clear_clicked(self, checked=False):
thumbnail_dir = system.get_vistrails_directory("thumbs.cacheDir")
res = show_question('VisTrails',
("All files in %s will be removed. "
"Are you sure? " % thumbnail_dir),
buttons = [YES_BUTTON,NO_BUTTON],
default = NO_BUTTON)
if res == YES_BUTTON:
ThumbnailCache.getInstance().clear()
class QConfigurationLabelButton(QtGui.QWidget, QConfigurationWidgetItem):
def __init__(self, key, field, callback_f, label=None, button=None,
parent=None):
QtGui.QWidget.__init__(self, parent)
QConfigurationWidgetItem.__init__(self, key, field, callback_f)
layout = QtGui.QHBoxLayout()
layout.setMargin(0)
layout.setSpacing(5)
if label is not None:
self.label = label
layout.addWidget(self.label)
if button is not None:
self.button = button
layout.addWidget(self.button)
self.setLayout(layout)
def add_button(self, button):
self.button = button
self.layout().addWidget(self.button)
def add_label(self, label):
self.label = label
self.layout().insertWidget(0, self.label)
def set_value(self, value, signal=True):
# nothing to do here
pass
class QConfigurationLinuxHandler(QConfigurationLabelButton):
def __init__(self, key, field, callback_f, parent=None):
from vistrails.gui.application import linux_default_application_set
if linux_default_application_set():
label = QtGui.QLabel(".vt, .vtl handlers installed")
button = None
else:
label = QtGui.QLabel(".vt, .vtl handlers not installed")
button = QtGui.QPushButton("Install...")
button.setAutoDefault(False)
button.clicked.connect(self.install_clicked)
QConfigurationLabelButton.__init__(self, key, field, callback_f,
label, button, parent)
def install_clicked(self, checked=False):
from vistrails.core.application import get_vistrails_application
app = get_vistrails_application()
if app.ask_update_default_application(False):
self.label.setText(".vt, .vtl handlers installed")
class QConfigurationComboBox(QtGui.QComboBox, QConfigurationWidgetItem):
def __init__(self, key, field, callback_f, parent=None):
QtGui.QComboBox.__init__(self, parent)
QConfigurationWidgetItem.__init__(self, key, field, callback_f)
inv_remap = None
options = self.get_widget_options()
if "allowed_values" in options:
values = options["allowed_values"]
if "remap" in options:
remap = options["remap"]
inv_remap = dict((v, k) for (k, v) in remap.iteritems())
entries = [remap[v] for v in values]
else:
entries = values
for entry in entries:
self.addItem(entry)
self.currentIndexChanged[int].connect(self.value_changed)
def set_value(self, value, signal=True):
options = self.get_widget_options()
if not signal:
self.currentIndexChanged[int].disconnect(self.value_changed)
if value is not None and "allowed_values" in options:
if "remap" in options:
remap = options["remap"]
cur_text = remap[value]
else:
cur_text = value
self.setCurrentIndex(self.findText(cur_text))
else:
self.setCurrentIndex(-1)
if not signal:
self.currentIndexChanged[int].connect(self.value_changed)
class QConfigurationPane(QtGui.QWidget):
def __init__(self, parent, persistent_config, temp_config, cat_fields):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QFormLayout()
layout.setMargin(10)
layout.setSpacing(4)
self.setLayout(layout)
self._configuration = persistent_config
self._temp_configuration = temp_config
self._fields = {}
self._field_layouts = {}
for category, fields in cat_fields:
self.process_fields(layout, fields, category)
spacer_widget = QtGui.QWidget()
spacer_layout = QtGui.QVBoxLayout()
spacer_layout.setMargin(0)
spacer_layout.addSpacing(15)
spacer_widget.setLayout(spacer_layout)
layout.addRow("", spacer_widget)
def process_fields(self, layout, fields, category, parent_fields=[],
prev_field=None, prefix=""):
for field in fields:
if isinstance(field, ConfigFieldParent):
self.process_fields(layout, field.sub_fields, category,
parent_fields, prev_field,
prefix="%s%s." % (prefix, field.name))
else:
if field.depends_on is not None:
if field.depends_on not in parent_fields:
if field.depends_on == prev_field:
parent_fields.append(prev_field)
else:
raise Exception("Dependent field %s should "
"follow parent." % field.name)
parent_idx = parent_fields.index(field.depends_on)
parent_fields = parent_fields[:parent_idx+1]
indent = 4 * len(parent_fields)
else:
parent_fields = []
indent = 0
self.add_field(layout, field, category, prefix=prefix,
indent=indent)
prev_field = field.name
category = ""
def add_field(self, base_layout, field, category="", startup_only=False,
prefix="", indent=0):
label_widget = QtGui.QWidget()
label_layout = QtGui.QHBoxLayout()
label_layout.setMargin(0)
label_layout.setSpacing(5)
label_widget.setLayout(label_layout)
config_key = "%s%s" % (prefix, field.name)
if self._temp_configuration.is_unset(config_key):
config_val = None
else:
config_val = self._temp_configuration.get_deep_value(config_key)
if self._configuration.is_unset(config_key):
perm_config_val = None
else:
perm_config_val = self._configuration.get_deep_value(config_key)
icon = self.style().standardIcon(QtGui.QStyle.SP_MessageBoxWarning)
label = QtGui.QLabel()
label.setPixmap(icon.pixmap(14,14))
label.setToolTip("This option has been changed for this session")
label_layout.addWidget(label, 0, QtCore.Qt.AlignCenter)
space = 0
if not startup_only and config_val == perm_config_val:
space = (label.sizeHint().width() +
label_layout.spacing() * (indent + 1))
label.hide()
elif indent > 0:
space = label_layout.spacing() * indent
if space > 0:
spacer = QtGui.QSpacerItem(space, label.sizeHint().height())
label_layout.insertSpacerItem(0, spacer)
config_desc = find_simpledoc(config_key)
widget_type = field.widget_type
if widget_type is None:
if field.val_type == bool:
widget_type = "checkbox"
elif field.val_type == ConfigPath:
widget_type = "pathedit"
else:
widget_type = "lineedit"
if widget_type == "combo":
widget = QConfigurationComboBox(config_key, field,
self.field_changed)
elif widget_type == "lineedit":
widget = QConfigurationLineEdit(config_key, field,
self.field_changed)
elif widget_type == "pathedit":
widget = QConfigurationPathEdit(config_key, field,
self.field_changed)
elif widget_type == "thumbnailcache":
widget = QConfigurationThumbnailCache(config_key, field,
self.field_changed)
elif widget_type == "linuxext":
widget = QConfigurationLinuxHandler(config_key, field,
self.field_changed)
else:
config_val = bool(config_val)
widget = QConfigurationCheckBox(config_key, field,
self.field_changed)
widget.set_value(config_val, False)
label_text = widget.get_label_text()
if not label_text and category:
label_text = category
if label_text:
label = QtGui.QLabel(label_text + ":")
label_layout.addWidget(label)
base_layout.addRow(label_widget, widget)
self._field_layouts[config_key] = (base_layout, base_layout.rowCount())
def field_changed(self, widget, config_key, field, val):
config_val = self._configuration.get_deep_value(config_key)
if config_val != self._temp_configuration.get_deep_value(config_key):
retval = QtGui.QMessageBox.question(
self,
"Change Setting",
"This configuration value has been temporarily changed. "
"If you change it, it will be changed permanently. Do you "
"want to continue?",
QtGui.QMessageBox.Cancel | QtGui.QMessageBox.Ok,
QtGui.QMessageBox.Ok)
if retval != QtGui.QMessageBox.Ok:
# revert widget's value
widget.set_value(self._temp_configuration.get_deep_value(
config_key))
return
# need to update hbox to reflect change...
form_layout, row = self._field_layouts[config_key]
label_layout = form_layout.itemAt(row, QtGui.QFormLayout.LabelRole).widget().layout()
leading_item = label_layout.itemAt(0)
if isinstance(leading_item.widget(), QtGui.QLabel):
label = leading_item.widget()
spacer = QtGui.QSpacerItem(label.sizeHint().width() + \
label_layout.spacing(),
label.sizeHint().height())
label_layout.insertSpacerItem(0, spacer)
else:
spacer = leading_item
label = label_layout.itemAt(1).widget()
spacer.changeSize((spacer.sizeHint().width() +
label.sizeHint().width() +
label_layout.spacing()),
label.sizeHint().height())
label.hide()
# FIXME
if False:
QtGui.QMessageBox.information(
self, "Change Setting",
"You must restart VisTrails for this setting to take effect.")
setattr(self._temp_configuration, config_key, val)
setattr(self._configuration, config_key, val)
# TODO: Make sure this functionality (Move and Clear Cache) is preserved
class QThumbnailConfiguration(QtGui.QWidget):
def thumbs_cache_directory_changed(self):
""" thumbs_cache_changed(v: int) -> None
"""
value = str(self._thumbs_cache_directory_edt.text())
old_folder = self._configuration.thumbs.cacheDirectory
if os.path.exists(value):
self._configuration.thumbs.cacheDirectory = value
self._temp_configuration.thumbs.cacheDirectory = value
self.emit(QtCore.SIGNAL('configuration_changed'),
None, value)
self._cache.move_cache_directory(old_folder,value)
else:
show_warning('VisTrails', 'The directory specified does not exist.')
self._thumbs_cache_directory_edt.setText(old_folder)
|
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer import Variable
def _identiy_grid(in_shape):
mesh = numpy.meshgrid(
numpy.linspace(-1., 1., num=in_shape[2]),
numpy.linspace(-1., 1., num=in_shape[3]))
grid = numpy.concatenate([mesh[0][None], mesh[1][None]], axis=0)
grid = numpy.repeat(grid[None], in_shape[0], axis=0).astype(numpy.float32)
return grid
def _rotate_grid(in_shape):
mesh = numpy.meshgrid(
numpy.linspace(-1., 1., num=in_shape[2]),
numpy.linspace(-1., 1., num=in_shape[3]))
mesh = [numpy.rot90(mesh[0]), numpy.rot90(mesh[1])]
grid = numpy.concatenate([mesh[0][None], mesh[1][None]], axis=0)
grid = numpy.repeat(grid[None], in_shape[0], axis=0).astype(numpy.float32)
return grid
def _rotate_BCHW(x):
rotated_xs = []
for i in range(x.shape[0]):
x_i = x[i].transpose(1, 2, 0)
x_i = numpy.rot90(x_i)
rotated_xs.append(x_i.transpose(2, 0, 1))
rotated_xs = numpy.concatenate([r_x[None] for r_x in rotated_xs], axis=0)
return rotated_xs
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'never'],
}))
class TestSpatialTransformerSampler(unittest.TestCase):
in_shape = (2, 2, 4, 4)
out_shape = (2, 2, 3, 3)
grid_shape = (2, 2, 3, 3)
def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(numpy.float32)
self.grid = numpy.random.uniform(
low=-2., high=2., size=self.grid_shape).astype(numpy.float32)
self.grads = numpy.random.uniform(
size=self.out_shape).astype(numpy.float32)
def check_forward(self, x, grid):
y = functions.spatial_transformer_sampler(x, grid)
self.assertEqual(y.shape, self.out_shape)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.grid)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.grid))
def check_backward(self, x, grid, grads):
gradient_check.check_backward(
functions.SpatialTransformerSampler(),
(x, grid), (grads,), dtype='d', atol=1e-2, rtol=1e-2, eps=1e-5)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.grid, self.grads)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_backward(cuda.to_gpu(self.x),
cuda.to_gpu(self.grid),
cuda.to_gpu(self.grads))
class TestSpatialTransformerSamplerConsistencyWithCuDNN(unittest.TestCase):
in_shape = (2, 2, 4, 4)
out_shape = (2, 2, 3, 3)
grid_shape = (2, 2, 3, 3)
def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(numpy.float32)
self.grid = numpy.random.uniform(
low=-2, high=2, size=self.grid_shape).astype(numpy.float32)
self.grads = numpy.random.uniform(
size=self.out_shape).astype(numpy.float32)
def _apply_backward(self, x, grid, grads):
x = Variable(x)
grid = Variable(grid)
y = functions.spatial_transformer_sampler(x, grid)
x.cleargrad()
grid.cleargrad()
y.grad = grads
y.backward()
return x, grid, y
@attr.gpu
@attr.cudnn
def test_consistency_with_cudnn_cpu(self):
with chainer.using_config('use_cudnn', 'never'):
x_cpu, grid_cpu, y_cpu = self._apply_backward(
self.x, self.grid, self.grads)
with chainer.using_config('use_cudnn', 'always'):
x_cudnn, grid_cudnn, y_cudnn = self._apply_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.grid),
cuda.to_gpu(self.grads))
testing.assert_allclose(y_cpu.data, y_cudnn.data)
testing.assert_allclose(x_cpu.grad, x_cudnn.grad)
testing.assert_allclose(grid_cpu.grad, grid_cudnn.grad)
@attr.gpu
@attr.cudnn
def test_consistency_with_cudnn_gpu(self):
with chainer.using_config('use_cudnn', 'never'):
x_gpu, grid_gpu, y_gpu = self._apply_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.grid),
cuda.to_gpu(self.grads))
with chainer.using_config('use_cudnn', 'always'):
x_cudnn, grid_cudnn, y_cudnn = self._apply_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.grid),
cuda.to_gpu(self.grads))
testing.assert_allclose(y_gpu.data, y_cudnn.data)
testing.assert_allclose(x_gpu.grad, x_cudnn.grad)
testing.assert_allclose(grid_gpu.grad, grid_cudnn.grad)
@testing.parameterize(
{'grid_creator': _identiy_grid, 'operator': lambda x: x,
'use_cudnn': 'always'},
{'grid_creator': _identiy_grid, 'operator': lambda x: x,
'use_cudnn': 'never'},
{'grid_creator': _rotate_grid, 'operator': _rotate_BCHW,
'use_cudnn': 'always'},
{'grid_creator': _rotate_grid, 'operator': _rotate_BCHW,
'use_cudnn': 'never'},
)
class TestSpatialTransformerSamplerForwardToyCases(unittest.TestCase):
in_shape = (2, 2, 4, 4)
grid_shape = (2, 2, 3, 3)
def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(numpy.float32)
self.grid = self.grid_creator(self.in_shape)
def check_forward(self, x, grid):
y = functions.spatial_transformer_sampler(x, grid)
testing.assert_allclose(y.data, self.operator(self.x))
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.grid)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.grid))
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'never'],
}))
class TestSpatialTransformerSamplerForwardPaddedImage(unittest.TestCase):
in_shape = (1, 2, 4, 4)
def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(numpy.float32)
p1 = [[-0.5], [-0.5]]
p2 = [[3.5], [3.5]]
p3 = [[2], [3.5]]
p4 = [[-0.5], [2]]
self.grid = numpy.concatenate((p1, p2, p3, p4), axis=1)
self.grid = self.grid.reshape(1, 2, 4, 1).astype(numpy.float32)
# Scale the coordinates so that the pixels inside the input image
# lies in range [-1, 1].
self.grid[:, 0] =\
((self.grid[:, 0] / (self.in_shape[3] - 1)) - 0.5) * 2
self.grid[:, 1] =\
((self.grid[:, 1] / (self.in_shape[2] - 1)) - 0.5) * 2
exp_p1 = self.x[0, :, 0, 0] / 4
exp_p2 = self.x[0, :, 3, 3] / 4
exp_p3 = self.x[0, :, 3, 2] / 2
exp_p4 = self.x[0, :, 2, 0] / 2
self.expected = numpy.concatenate(
(exp_p1[:, None],
exp_p2[:, None],
exp_p3[:, None],
exp_p4[:, None]), axis=1)
self.expected = self.expected.reshape(1, 2, 4, 1).astype(numpy.float32)
def check_forward(self, x, grid, expected):
y = functions.spatial_transformer_sampler(x, grid)
testing.assert_allclose(y.data, expected)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.grid, self.expected)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.grid),
cuda.to_gpu(self.expected))
testing.run_module(__name__, __file__)
|
|
from functools import update_wrapper
from django.apps import apps
from django.conf import settings
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.engine import Engine
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
system_check_errors = []
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = ugettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = ugettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = ugettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
if admin_class is not ModelAdmin and settings.DEBUG:
system_check_errors.extend(admin_class.check(model))
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that admin and contenttypes apps are
installed, as well as the auth context processor.
"""
if not apps.is_installed('django.contrib.admin'):
raise ImproperlyConfigured(
"Put 'django.contrib.admin' in your INSTALLED_APPS "
"setting in order to use the admin application.")
if not apps.is_installed('django.contrib.contenttypes'):
raise ImproperlyConfigured(
"Put 'django.contrib.contenttypes' in your INSTALLED_APPS "
"setting in order to use the admin application.")
try:
default_template_engine = Engine.get_default()
except Exception:
# Skip this non-critical check:
# 1. if the user has a non-trivial TEMPLATES setting and Django
# can't find a default template engine
# 2. if anything goes wrong while loading template engines, in
# order to avoid raising an exception from a confusing location
# Catching ImproperlyConfigured suffices for 1. but 2. requires
# catching all exceptions.
pass
else:
if ('django.contrib.auth.context_processors.auth'
not in default_template_engine.context_processors):
raise ImproperlyConfigured(
"Enable 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATES setting in order to use the admin "
"application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import url, include
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
wrapper.admin_site = self
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
url(r'^$', wrap(self.index), name='index'),
url(r'^login/$', self.login, name='login'),
url(r'^logout/$', wrap(self.logout), name='logout'),
url(r'^password_change/$', wrap(self.password_change, cacheable=True), name='password_change'),
url(r'^password_change/done/$', wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$', wrap(contenttype_views.shortcut),
name='view_on_site'),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in self._registry.items():
urlpatterns += [
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
url(regex, wrap(self.app_index), name='app_list'),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Returns a dictionary of variables to put in the template context for
*every* page in the admin site.
"""
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': self.site_url,
'has_permission': self.has_permission(request),
'available_apps': self.get_app_list(request),
}
def password_change(self, request, extra_context=None):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'current_app': self.name,
'password_change_form': AdminPasswordChangeForm,
'post_change_redirect': url,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'current_app': self.name,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'current_app': self.name,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import login
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = dict(self.each_context(request),
title=_('Log in'),
app_path=request.get_full_path(),
)
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = request.get_full_path()
context.update(extra_context or {})
defaults = {
'extra_context': context,
'current_app': self.name,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
return login(request, **defaults)
def _build_app_dict(self, request, label=None):
"""
Builds the app dictionary. Takes an optional label parameters to filter
models of a specific app.
"""
app_dict = {}
if label:
models = {
m: m_a for m, m_a in self._registry.items()
if m._meta.app_label == label
}
else:
models = self._registry
for model, model_admin in models.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
if label:
raise PermissionDenied
continue
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True not in perms.values():
continue
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if label:
return app_dict.get(label)
return app_dict
def get_app_list(self, request):
"""
Returns a sorted list of all the installed apps that have been
registered in this site.
"""
app_dict = self._build_app_dict(request)
# Sort the apps alphabetically.
app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
return app_list
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_list = self.get_app_list(request)
context = dict(
self.each_context(request),
title=self.index_title,
app_list=app_list,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.index_template or
'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_dict = self._build_app_dict(request, app_label)
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
app_name = apps.get_app_config(app_label).verbose_name
context = dict(self.each_context(request),
title=_('%(app)s administration') % {'app': app_name},
app_list=[app_dict],
app_label=app_label,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
|
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# It's reasonable for unittests to be messing with protected members.
# pylint: disable=protected-access
from __future__ import print_function
import json
import os
import sys
import unittest
import tempfile
if sys.version_info[0] == 2:
import mock
else:
import unittest.mock as mock
import six
import gpu_project_config
import run_gpu_integration_test
from gpu_tests import context_lost_integration_test
from gpu_tests import gpu_helper
from gpu_tests import gpu_integration_test
from gpu_tests import path_util
from gpu_tests import webgl_conformance_integration_test
from py_utils import tempfile_ext
from telemetry.internal.util import binary_manager
from telemetry.internal.platform import system_info
from telemetry.testing import browser_test_runner
from telemetry.testing import fakes
from telemetry.testing import run_browser_tests
path_util.AddDirToPathIfNeeded(path_util.GetChromiumSrcDir(), 'tools', 'perf')
from chrome_telemetry_build import chromium_config
# Unittest test cases are defined as public methods, so ignore complaints about
# having too many.
# pylint: disable=too-many-public-methods
VENDOR_NVIDIA = 0x10DE
VENDOR_AMD = 0x1002
VENDOR_INTEL = 0x8086
VENDOR_STRING_IMAGINATION = 'Imagination Technologies'
DEVICE_STRING_SGX = 'PowerVR SGX 554'
def _GetSystemInfo( # pylint: disable=too-many-arguments
gpu='',
device='',
vendor_string='',
device_string='',
passthrough=False,
gl_renderer=''):
sys_info = {
'model_name': '',
'gpu': {
'devices': [
{
'vendor_id': gpu,
'device_id': device,
'vendor_string': vendor_string,
'device_string': device_string
},
],
'aux_attributes': {
'passthrough_cmd_decoder': passthrough
}
}
}
if gl_renderer:
sys_info['gpu']['aux_attributes']['gl_renderer'] = gl_renderer
return system_info.SystemInfo.FromDict(sys_info)
def _GetTagsToTest(browser, test_class=None):
test_class = test_class or gpu_integration_test.GpuIntegrationTest
tags = None
with mock.patch.object(
test_class, 'ExpectationsFiles', return_value=['exp.txt']):
tags = set(test_class.GetPlatformTags(browser))
return tags
def _GenerateNvidiaExampleTagsForTestClassAndArgs(test_class, args):
tags = None
with mock.patch.object(
test_class, 'ExpectationsFiles', return_value=['exp.txt']):
_ = [_ for _ in test_class.GenerateGpuTests(args)]
platform = fakes.FakePlatform('win', 'win10')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
gpu=VENDOR_NVIDIA, device=0x1cb3, gl_renderer='ANGLE Direct3D9')
tags = _GetTagsToTest(browser, test_class)
return tags
class _IntegrationTestArgs(object):
"""Struct-like object for defining an integration test."""
def __init__(self, test_name):
self.test_name = test_name
self.failures = []
self.successes = []
self.skips = []
self.additional_args = []
class GpuIntegrationTestUnittest(unittest.TestCase):
def setUp(self):
self._test_state = {}
self._test_result = {}
def _RunGpuIntegrationTests(self, test_name, extra_args=None):
extra_args = extra_args or []
unittest_config = chromium_config.ChromiumConfig(
top_level_dir=path_util.GetGpuTestDir(),
benchmark_dirs=[
os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
])
with binary_manager.TemporarilyReplaceBinaryManager(None), \
mock.patch.object(gpu_project_config, 'CONFIG', unittest_config):
# TODO(crbug.com/1103792): Using NamedTemporaryFile() as a generator is
# causing windows bots to fail. When the issue is fixed with
# tempfile_ext.NamedTemporaryFile(), put it in the list of generators
# starting this with block. Also remove the try finally statement
# below.
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
try:
test_argv = [
test_name,
'--write-full-results-to=%s' % temp_file.name,
# We don't want the underlying typ-based tests to report their
# results to ResultDB.
'--disable-resultsink',
] + extra_args
processed_args = run_gpu_integration_test.ProcessArgs(test_argv)
telemetry_args = browser_test_runner.ProcessConfig(
unittest_config, processed_args)
run_browser_tests.RunTests(telemetry_args)
with open(temp_file.name) as f:
self._test_result = json.load(f)
finally:
temp_file.close()
def testOverrideDefaultRetryArgumentsinRunGpuIntegrationTests(self):
self._RunGpuIntegrationTests('run_tests_with_expectations_files',
['--retry-limit=1'])
self.assertEqual(
self._test_result['tests']['a']['b']['unexpected-fail.html']['actual'],
'FAIL FAIL')
def testDefaultRetryArgumentsinRunGpuIntegrationTests(self):
self._RunGpuIntegrationTests('run_tests_with_expectations_files')
self.assertEqual(
self._test_result['tests']['a']['b']['expected-flaky.html']['actual'],
'FAIL FAIL FAIL')
def testTestNamePrefixGenerationInRunGpuIntegrationTests(self):
self._RunGpuIntegrationTests('simple_integration_unittest')
self.assertIn('expected_failure', self._test_result['tests'])
def _TestTagGenerationForMockPlatform(self, test_class, args):
tag_set = _GenerateNvidiaExampleTagsForTestClassAndArgs(test_class, args)
self.assertTrue(
set([
'win', 'win10', 'angle-d3d9', 'release', 'nvidia', 'nvidia-0x1cb3',
'no-passthrough'
]).issubset(tag_set))
return tag_set
def testGenerateContextLostExampleTagsForAsan(self):
args = gpu_helper.GetMockArgs(is_asan=True)
tag_set = self._TestTagGenerationForMockPlatform(
context_lost_integration_test.ContextLostIntegrationTest, args)
self.assertIn('asan', tag_set)
self.assertNotIn('no-asan', tag_set)
def testGenerateContextLostExampleTagsForNoAsan(self):
args = gpu_helper.GetMockArgs()
tag_set = self._TestTagGenerationForMockPlatform(
context_lost_integration_test.ContextLostIntegrationTest, args)
self.assertIn('no-asan', tag_set)
self.assertNotIn('asan', tag_set)
def testGenerateWebglConformanceExampleTagsForWebglVersion1andAsan(self):
args = gpu_helper.GetMockArgs(is_asan=True, webgl_version='1.0.0')
tag_set = self._TestTagGenerationForMockPlatform(
webgl_conformance_integration_test.WebGLConformanceIntegrationTest,
args)
self.assertTrue(set(['asan', 'webgl-version-1']).issubset(tag_set))
self.assertFalse(set(['no-asan', 'webgl-version-2']) & tag_set)
def testGenerateWebglConformanceExampleTagsForWebglVersion2andNoAsan(self):
args = gpu_helper.GetMockArgs(is_asan=False, webgl_version='2.0.0')
tag_set = self._TestTagGenerationForMockPlatform(
webgl_conformance_integration_test.WebGLConformanceIntegrationTest,
args)
self.assertTrue(set(['no-asan', 'webgl-version-2']).issubset(tag_set))
self.assertFalse(set(['asan', 'webgl-version-1']) & tag_set)
@mock.patch('sys.platform', 'win32')
def testGenerateNvidiaExampleTags(self):
platform = fakes.FakePlatform('win', 'win10')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
gpu=VENDOR_NVIDIA, device=0x1cb3, gl_renderer='ANGLE Direct3D9')
self.assertEqual(
_GetTagsToTest(browser),
set([
'win', 'win10', 'release', 'nvidia', 'nvidia-0x1cb3', 'angle-d3d9',
'no-passthrough', 'no-swiftshader-gl', 'skia-renderer-disabled',
'no-oop-c'
]))
@mock.patch('sys.platform', 'darwin')
def testGenerateVendorTagUsingVendorString(self):
platform = fakes.FakePlatform('mac', 'mojave')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
vendor_string=VENDOR_STRING_IMAGINATION,
device_string=DEVICE_STRING_SGX,
passthrough=True,
gl_renderer='ANGLE OpenGL ES')
self.assertEqual(
_GetTagsToTest(browser),
set([
'mac', 'mojave', 'release', 'imagination',
'imagination-PowerVR-SGX-554', 'angle-opengles', 'passthrough',
'no-swiftshader-gl', 'skia-renderer-disabled', 'no-oop-c'
]))
@mock.patch('sys.platform', 'darwin')
def testGenerateVendorTagUsingDeviceString(self):
platform = fakes.FakePlatform('mac', 'mojave')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
vendor_string='illegal vendor string',
device_string='ANGLE (Imagination, Triangle Monster 3000, 1.0)')
self.assertEqual(
_GetTagsToTest(browser),
set([
'mac', 'mojave', 'release', 'imagination',
'imagination-Triangle-Monster-3000', 'angle-disabled',
'no-passthrough', 'no-swiftshader-gl', 'skia-renderer-disabled',
'no-oop-c'
]))
@mock.patch.dict(os.environ, clear=True)
def testGenerateDisplayServer(self):
platform = fakes.FakePlatform('mac', 'mojave')
browser = fakes.FakeBrowser(platform, 'release')
with mock.patch('sys.platform', 'darwin'):
tags = gpu_integration_test.GpuIntegrationTest.GetPlatformTags(browser)
for t in tags:
self.assertFalse(t.startswith('display-server'))
# Python 2's return value.
with mock.patch('sys.platform', 'linux2'):
tags = gpu_integration_test.GpuIntegrationTest.GetPlatformTags(browser)
self.assertIn('display-server-x', tags)
os.environ['WAYLAND_DISPLAY'] = 'wayland-0'
tags = gpu_integration_test.GpuIntegrationTest.GetPlatformTags(browser)
self.assertIn('display-server-wayland', tags)
# Python 3's return value.
with mock.patch('sys.platform', 'linux'):
del os.environ['WAYLAND_DISPLAY']
tags = gpu_integration_test.GpuIntegrationTest.GetPlatformTags(browser)
self.assertIn('display-server-x', tags)
os.environ['WAYLAND_DISPLAY'] = 'wayland-0'
tags = gpu_integration_test.GpuIntegrationTest.GetPlatformTags(browser)
self.assertIn('display-server-wayland', tags)
def testSimpleIntegrationTest(self):
test_args = _IntegrationTestArgs('simple_integration_unittest')
test_args.failures = [
'unexpected_error',
'unexpected_failure',
]
test_args.successes = [
'expected_flaky',
'expected_failure',
]
test_args.skips = ['expected_skip']
test_args.additional_args = [
'--retry-only-retry-on-failure',
'--retry-limit=3',
'--test-name-prefix=unittest_data.integration_tests.SimpleTest.',
]
self._RunIntegrationTest(test_args)
# The number of browser starts include the one call to StartBrowser at the
# beginning of the run of the test suite and for each RestartBrowser call
# which happens after every failure
self.assertEquals(self._test_state['num_browser_starts'], 6)
def testIntegrationTesttWithBrowserFailure(self):
test_args = _IntegrationTestArgs(
'browser_start_failure_integration_unittest')
test_args.successes = [
'unittest_data.integration_tests.BrowserStartFailureTest.restart'
]
self._RunIntegrationTest(test_args)
self.assertEquals(self._test_state['num_browser_crashes'], 2)
self.assertEquals(self._test_state['num_browser_starts'], 3)
def testIntegrationTestWithBrowserCrashUponStart(self):
test_args = _IntegrationTestArgs(
'browser_crash_after_start_integration_unittest')
test_args.successes = [
'unittest_data.integration_tests.BrowserCrashAfterStartTest.restart'
]
self._RunIntegrationTest(test_args)
self.assertEquals(self._test_state['num_browser_crashes'], 2)
self.assertEquals(self._test_state['num_browser_starts'], 3)
def testRetryLimit(self):
test_args = _IntegrationTestArgs('test_retry_limit')
test_args.failures = [
'unittest_data.integration_tests.TestRetryLimit.unexpected_failure'
]
test_args.additional_args = ['--retry-limit=2']
self._RunIntegrationTest(test_args)
# The number of attempted runs is 1 + the retry limit.
self.assertEquals(self._test_state['num_test_runs'], 3)
def _RunTestsWithExpectationsFiles(self):
test_args = _IntegrationTestArgs('run_tests_with_expectations_files')
test_args.failures = ['a/b/unexpected-fail.html']
test_args.successes = [
'a/b/expected-fail.html',
'a/b/expected-flaky.html',
]
test_args.skips = ['should_skip']
test_args.additional_args = [
'--retry-limit=3',
'--retry-only-retry-on-failure-tests',
('--test-name-prefix=unittest_data.integration_tests.'
'RunTestsWithExpectationsFiles.'),
]
self._RunIntegrationTest(test_args)
def testTestFilterCommandLineArg(self):
test_args = _IntegrationTestArgs('run_tests_with_expectations_files')
test_args.failures = ['a/b/unexpected-fail.html']
test_args.successes = ['a/b/expected-fail.html']
test_args.skips = ['should_skip']
test_args.additional_args = [
'--retry-limit=3',
'--retry-only-retry-on-failure-tests',
('--test-filter=a/b/unexpected-fail.html::a/b/expected-fail.html::'
'should_skip'),
('--test-name-prefix=unittest_data.integration_tests.'
'RunTestsWithExpectationsFiles.'),
]
self._RunIntegrationTest(test_args)
def testUseTestExpectationsFileToHandleExpectedSkip(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['should_skip']
self.assertEqual(results['expected'], 'SKIP')
self.assertEqual(results['actual'], 'SKIP')
self.assertNotIn('is_regression', results)
def testUseTestExpectationsFileToHandleUnexpectedTestFailure(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['a']['b']['unexpected-fail.html']
self.assertEqual(results['expected'], 'PASS')
self.assertEqual(results['actual'], 'FAIL')
self.assertIn('is_regression', results)
def testUseTestExpectationsFileToHandleExpectedFailure(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['a']['b']['expected-fail.html']
self.assertEqual(results['expected'], 'FAIL')
self.assertEqual(results['actual'], 'FAIL')
self.assertNotIn('is_regression', results)
def testUseTestExpectationsFileToHandleExpectedFlakyTest(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['a']['b']['expected-flaky.html']
self.assertEqual(results['expected'], 'PASS')
self.assertEqual(results['actual'], 'FAIL FAIL FAIL PASS')
self.assertNotIn('is_regression', results)
def testRepeat(self):
test_args = _IntegrationTestArgs('test_repeat')
test_args.successes = ['unittest_data.integration_tests.TestRepeat.success']
test_args.additional_args = ['--repeat=3']
self._RunIntegrationTest(test_args)
self.assertEquals(self._test_state['num_test_runs'], 3)
def testAlsoRunDisabledTests(self):
test_args = _IntegrationTestArgs('test_also_run_disabled_tests')
test_args.failures = [
'skip',
'flaky',
]
# Tests that are expected to fail and do fail are treated as test passes
test_args.successes = ['expected_failure']
test_args.additional_args = [
'--all',
'--test-name-prefix',
'unittest_data.integration_tests.TestAlsoRunDisabledTests.',
'--retry-limit=3',
'--retry-only-retry-on-failure',
]
self._RunIntegrationTest(test_args)
self.assertEquals(self._test_state['num_flaky_test_runs'], 4)
self.assertEquals(self._test_state['num_test_runs'], 6)
def testStartBrowser_Retries(self):
class TestException(Exception):
pass
def SetBrowserAndRaiseTestException():
gpu_integration_test.GpuIntegrationTest.browser = (mock.MagicMock())
raise TestException
gpu_integration_test.GpuIntegrationTest.browser = None
gpu_integration_test.GpuIntegrationTest.platform = None
with mock.patch.object(
gpu_integration_test.serially_executed_browser_test_case.\
SeriallyExecutedBrowserTestCase,
'StartBrowser',
side_effect=SetBrowserAndRaiseTestException) as mock_start_browser:
with mock.patch.object(gpu_integration_test.GpuIntegrationTest,
'StopBrowser') as mock_stop_browser:
with self.assertRaises(TestException):
gpu_integration_test.GpuIntegrationTest.StartBrowser()
self.assertEqual(mock_start_browser.call_count,
gpu_integration_test._START_BROWSER_RETRIES)
self.assertEqual(mock_stop_browser.call_count,
gpu_integration_test._START_BROWSER_RETRIES)
def _RunIntegrationTest(self, test_args):
"""Runs an integration and asserts fail/success/skip expectations.
Args:
test_args: A _IntegrationTestArgs instance to use.
"""
config = chromium_config.ChromiumConfig(
top_level_dir=path_util.GetGpuTestDir(),
benchmark_dirs=[
os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
])
with binary_manager.TemporarilyReplaceBinaryManager(None), \
tempfile_ext.NamedTemporaryDirectory() as temp_dir:
test_results_path = os.path.join(temp_dir, 'test_results.json')
test_state_path = os.path.join(temp_dir, 'test_state.json')
# We are processing ChromiumConfig instance and getting the argument
# list. Then we pass it directly to run_browser_tests.RunTests. If
# we called browser_test_runner.Run, then it would spawn another
# subprocess which is less efficient.
args = browser_test_runner.ProcessConfig(
config,
[
test_args.test_name,
'--write-full-results-to=%s' % test_results_path,
'--test-state-json-path=%s' % test_state_path,
# We don't want the underlying typ-based tests to report their
# results to ResultDB.
'--disable-resultsink',
] + test_args.additional_args)
run_browser_tests.RunTests(args)
with open(test_results_path) as f:
self._test_result = json.load(f)
with open(test_state_path) as f:
self._test_state = json.load(f)
actual_successes, actual_failures, actual_skips = (_ExtractTestResults(
self._test_result))
self.assertEquals(set(actual_failures), set(test_args.failures))
self.assertEquals(set(actual_successes), set(test_args.successes))
self.assertEquals(set(actual_skips), set(test_args.skips))
def _ExtractTestResults(test_result):
delimiter = test_result['path_delimiter']
failures = []
successes = []
skips = []
def _IsLeafNode(node):
test_dict = node[1]
return ('expected' in test_dict
and isinstance(test_dict['expected'], six.string_types))
node_queues = []
for t in test_result['tests']:
node_queues.append((t, test_result['tests'][t]))
while node_queues:
node = node_queues.pop()
full_test_name, test_dict = node
if _IsLeafNode(node):
if all(res not in test_dict['expected'].split()
for res in test_dict['actual'].split()):
failures.append(full_test_name)
elif test_dict['expected'] == test_dict['actual'] == 'SKIP':
skips.append(full_test_name)
else:
successes.append(full_test_name)
else:
for k in test_dict:
node_queues.append(
('%s%s%s' % (full_test_name, delimiter, k), test_dict[k]))
return successes, failures, skips
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
import xarray as xr
import numpy as np
import pytest
from xarrayutils.vertical_coordinates import (
conservative_remap,
linear_interpolation_remap,
linear_interpolation_regrid,
_strip_dim,
_coord_interp,
_regular_interp,
)
def random_broadcast(da):
# add random noise with additionial dimensions to input array
raw_noise = np.random.rand(2, 6, 12)
noise = xr.DataArray(raw_noise, dims=["test_a", "test_b", "test_c"])
return ((noise) * 4) + da
def test_strip_dim():
data = np.array([0, 3, 4])
a = xr.DataArray(data, coords=[("x", data)])
b = xr.DataArray(data, dims="x")
xr.testing.assert_identical(_strip_dim(a, "x"), b)
# TODO: make an explicit test for 1D case that compares values
@pytest.mark.parametrize("mask", [True, False])
@pytest.mark.parametrize("multi_dim", [True, False])
@pytest.mark.parametrize("dask", [True, False])
@pytest.mark.parametrize("coords", [True, False])
@pytest.mark.parametrize(
"dat, src, tar",
[
(
# first example, going from low res to high res
# NOTE: the target bounds always need to cover all values of the
# source, otherwise properties are not conserved
np.array([30, 12.3, 5]),
np.array([4.5, 9, 23, 45.6]),
np.array([0, 2, 4, 10, 11, 13.4, 23, 55.6, 80, 100]),
),
(
# second example, going from high res to low res
# NOTE: the target bounds always need to cover all values of the
# source, otherwise properties are not conserved
np.array([30, 12.3, 5, 2, -1, 4]),
np.array([4.5, 9, 23, 45.6, 46, 70, 90]),
np.array([0, 10, 100]),
),
(
# third example, using negative depth values sorted
np.array([30, 12.3, 5, 2, -1, 4]),
np.array([-90, -70, -46, -45.6, -23, -9, -4.5]),
np.array([-100, -10, -0]),
),
# (
# # forth example, using negative depth values unsorted
# # this is not supported atm, because I need to find a way to sort a
# # multidim depth array consistently (would be easy with 1d.)
# np.array([30, 12.3, 5, 2, -1, 4]),
# np.array([-4.5, -9, -23, -45.6, -46, -70, -90]),
# np.array([-0, -10, -100]),
# ),
(
# fifth example, using negative AND positive depth values sorted
np.array([30, 12.3, 5, 2, -1, 4]),
np.array([-90, -70, -45.6, -23, 0, 1, 3]),
np.array([-100, -10, 0, 10]),
),
],
)
def test_conservative_remap(mask, multi_dim, dask, dat, src, tar, coords):
# create test datasets
if coords:
z_raw = 0.5 * (src[1:] + src[0:-1])
data = xr.DataArray(np.array(dat), dims=["z"], coords=[("z", z_raw)])
z_bounds_source = xr.DataArray(
src, dims=["z_bounds"], coords=[("z_bounds", src)]
)
z_bounds_target = xr.DataArray(
tar, dims=["z_bounds"], coords=[("z_bounds", tar)]
)
else:
data = xr.DataArray(np.array(dat), dims=["z"])
z_bounds_source = xr.DataArray(src, dims=["z_bounds"])
z_bounds_target = xr.DataArray(tar, dims=["z_bounds"])
if multi_dim:
# Add more dimension and shift values for each depth profile by a random amount
data = random_broadcast(data)
z_bounds_source = random_broadcast(z_bounds_source)
z_bounds_target = random_broadcast(z_bounds_target)
if dask:
if multi_dim:
chunks = {"test_c": 1}
else:
chunks = {}
data = data.chunk(chunks)
z_bounds_source = z_bounds_source.chunk(chunks)
z_bounds_target = z_bounds_target.chunk(chunks)
data_new = conservative_remap(
data, z_bounds_source, z_bounds_target, mask=mask, debug=False
)
# Calculate cell thickness and rename
# in case the bounds had coordinate values, these need to be stripped
# to align properly
dz_source = (
_strip_dim(z_bounds_source, "z_bounds")
.diff("z_bounds")
.rename({"z_bounds": "z"})
)
dz_target = (
_strip_dim(z_bounds_target, "z_bounds")
.diff("z_bounds")
.rename({"z_bounds": "remapped"})
)
raw = (data * dz_source).sum("z")
remapped = (data_new * dz_target).sum("remapped")
print(raw)
print(remapped)
xr.testing.assert_allclose(raw, remapped)
def test_regular_interp():
x = np.arange(5, dtype=np.float)
y = np.linspace(3, 7, 5)
target = np.array([0.5, 3.0, np.nan, 7.0]) #
interpolated = _regular_interp(x, y, target)
expected = np.array([3.5, 6.0, np.nan, np.nan])
np.testing.assert_allclose(interpolated, expected)
# so when I specify the literal boundary value it returns nan...
# not sure if this is causing and problems. Ill leave it here commented
# target = np.array([5.0])
# interpolated = _regular_interp(x, y, target)
# expected = np.array([7.0])
@pytest.mark.parametrize("multi_dim", [True, False])
@pytest.mark.parametrize("coords", [True, False])
@pytest.mark.parametrize("z_dim", ["z", "blob"])
@pytest.mark.parametrize("z_regridded_dim", ["z_new", "blab"])
@pytest.mark.parametrize("output_dim", ["remapped", "boink"])
@pytest.mark.parametrize(
"dat, src, tar",
[
(
# first example, going from low res to high res
# NOTE: the target bounds always need to cover all values of the
# source, otherwise properties are not conserved
np.array([30, 12.3, 5]),
np.array([4.5, 9, 23]),
np.array([0, 2, 4, 10, 11, 13.4, 23, 55.6, 80, 100]),
),
(
# second example, going from high res to low res
# NOTE: the target bounds always need to cover all values of the
# source, otherwise properties are not conserved
np.array([30, 12.3, 5, 2, -1, 4]),
np.array([4.5, 9, 23, 45.6, 46, 70]),
np.array([0, 10, 100]),
),
(
# third example, using negative depth values sorted
np.array([30, 12.3, 5, 2, -1, 4]),
np.array([-90, -70, -46, -45.6, -23, -9]),
np.array([-100, -10, -0]),
),
(
# forth example, using negative depth values unsorted
# this is not supported atm, because I need to find a way to sort a
# multidim depth array consistently (would be easy with 1d.)
np.array([30, 12.3, 5, 2, -1, 4]),
np.array([-4.5, -9, -23, -45.6, -46, -70]),
np.array([-0, -10, -100]),
),
(
# fifth example, using negative AND positive depth values sorted
np.array([30, 12.3, 5, 2, -1, 4]),
np.array([-90, -70, -45.6, -23, 0, 1]),
np.array([-100, -10, 0, 10]),
),
],
)
def test_linear_interpolation_remap(
dat, src, tar, coords, multi_dim, z_dim, z_regridded_dim, output_dim
):
# create test datasets
if coords:
data = xr.DataArray(np.array(dat), dims=[z_dim], coords=[(z_dim, src)])
z_source = xr.DataArray(src, dims=[z_dim], coords=[(z_dim, src)])
z_target = xr.DataArray(
tar, dims=[z_regridded_dim], coords=[(z_regridded_dim, tar)]
)
else:
data = xr.DataArray(np.array(dat), dims=[z_dim])
z_source = xr.DataArray(src, dims=[z_dim])
z_target = xr.DataArray(tar, dims=[z_regridded_dim])
# the target and data should always have other dimensions
data = random_broadcast(data)
z_target = random_broadcast(z_target)
if multi_dim:
z_source = random_broadcast(z_source)
remapped = linear_interpolation_remap(
z_source,
data,
z_target,
z_dim=z_dim,
z_regridded_dim=z_regridded_dim,
output_dim=output_dim,
)
# select random sample for 3 times
for iteration in range(3):
sample = {
"test_a": np.random.randint(0, len(remapped.test_a)),
"test_b": np.random.randint(0, len(remapped.test_b)),
"test_c": np.random.randint(0, len(remapped.test_c)),
}
profile = remapped.isel(**sample).load().data
if multi_dim:
x = z_source.isel(**sample)
else:
x = z_source
target = z_target.isel(**sample)
y = data.isel(**sample)
expected_profile = _regular_interp(x.data, y.data, target.data)
np.testing.assert_allclose(profile, expected_profile)
# check output coords
np.testing.assert_allclose(
remapped.coords[output_dim].data, z_target.coords[z_regridded_dim].data
)
def test_coord_interp():
# super simple test
z = np.array([0, 2, 6, 20, 400])
data = np.array([30, 20, 10, 5, 4])
z_new = _coord_interp(z, data, 15)
assert z_new == 4.0
# test multiple targets
z = np.array([0, 2, 6, 20, 400])
data = np.array([30, 20, 10, 5, 4])
z_new = _coord_interp(z, data, [15, 4.5])
np.testing.assert_allclose(z_new, np.array([4.0, 210.0]))
# test with nans
z = np.array([0, 2, 6, np.nan, 400])
data = np.array([30, 20, 10, 5, 4])
z_new = _coord_interp(z, data, 15)
assert z_new == 4.0
z = np.array([0, 2, 6, 20, 400])
data = np.array([30, 20, 10, 5, 4])
for zz in [34, 2]:
z_new = _coord_interp(z, data, zz)
assert np.isnan(z_new)
# test out of range with padding
z = np.array([0, 2, 6, 20, 400])
data = np.array([30, 20, 10, 5, 4])
for pad_left in [-10, -5]:
for pad_right in [400, 800]:
z_new = _coord_interp(z, data, [40, 15, 4.5, 2], pad_left, pad_right)
np.testing.assert_allclose(
z_new, np.array([pad_left, 4.0, 210.0, pad_right])
)
z_new = _coord_interp(z, data, [2, 4.5, 15, 40], pad_left, pad_right)
np.testing.assert_allclose(
z_new, np.array([pad_right, 210.0, 4.0, pad_left])
)
# test out of range (with ascending data)
z = np.array([0, 2, 6, 20, 400])
data = np.array([30, 20, 10, 5, 4])
for zz in [34, 2]:
z_new = _coord_interp(z, data, zz)
assert np.isnan(z_new)
# test out of range with padding
z = np.array([0, 2, 6, 20, 400])
data = np.array([4, 5, 10, 20, 30])
for pad_left in [-10, -5]:
for pad_right in [400, 800]:
z_new = _coord_interp(z, data, [40, 15, 4.5, 2], pad_left, pad_right)
np.testing.assert_allclose(
z_new, np.array([pad_right, 13.0, 1.0, pad_left])
)
z_new = _coord_interp(z, data, [2, 4.5, 15, 40], pad_left, pad_right)
np.testing.assert_allclose(
z_new, np.array([pad_left, 1.0, 13.0, pad_right])
)
# test super short array
z = np.array([2, 4])
data = np.array([20, 30])
z_new = _coord_interp(z, data, 25)
assert z_new == 3.0
# test boundary value
z = np.array([2, 4])
data = np.array([20, 30])
z_new = _coord_interp(z, data, 20)
assert z_new == 2 # this is the default interpolation behaviour
z_new = _coord_interp(z, data, 20, -10, 200)
assert z_new == -10
z_new = _coord_interp(z, data, 30, -10, 200)
assert z_new == 200
# test to short to interpolate
z = np.array([2, np.nan, np.nan])
data = np.array([30, np.nan, np.nan])
z_new = _coord_interp(z, data, 30)
assert np.isnan(z_new)
@pytest.mark.parametrize("multi_dim", [True, False])
@pytest.mark.parametrize("coords", [True, False])
@pytest.mark.parametrize("z_dim", ["z", "blob"])
@pytest.mark.parametrize("target_value_dim", ["temp", "blab"])
@pytest.mark.parametrize("output_dim", ["remapped", "boink"])
@pytest.mark.parametrize("z_bounds", [False, True])
@pytest.mark.parametrize("z_bounds_dim", ["z_bounds", "bawoosh"])
@pytest.mark.parametrize(
"dat, src, tar",
[
(
# first example, going from low res to high res
# NOTE: the target bounds always need to cover all values of the
# source, otherwise properties are not conserved
np.array([30, 12.3, 5]),
np.array([4.5, 9, 23, 40]),
np.array([0, 2, 4, 10, 11, 13.4, 23, 55.6, 80, 100]),
),
(
# second example, going from high res to low res
# NOTE: the target bounds always need to cover all values of the
# source, otherwise properties are not conserved
np.array([30, 12.3, 5, 2, -1, 4]),
np.array([4.5, 9, 23, 45.6, 46, 70, 90]),
np.array([0, 10, 30, 100]),
),
(
# third example, using negative depth values sorted
np.array([30, 12.3, 5, 2, -1, 4]),
np.array([-90, -70, -46, -45.6, -23, -9, -2]),
np.array([-100, -10, -4, -0]),
),
(
# forth example, using negative depth values unsorted
# this is not supported atm, because I need to find a way to sort a
# multidim depth array consistently (would be easy with 1d.)
np.array([30, 12.3, 5, 2, -1, 4]),
np.array([-4.5, -9, -23, -45.6, -46, -70, -80]),
np.array([-0, -10, -40, -100]),
),
(
# fifth example, using negative AND positive depth values sorted
np.array([30, 12.3, 5, 2, -1, 4]),
np.array([-90, -70, -45.6, -23, 0, 1, 2]),
np.array([-100, -10, 0, 10]),
),
],
)
def test_linear_interpolation_regrid(
dat,
src,
tar,
coords,
multi_dim,
z_dim,
target_value_dim,
output_dim,
z_bounds,
z_bounds_dim,
):
# this is a lot of setup but the test is simple. Create some random input arrays
# of different shapes, and see if for random samples, the profile data is what we
# would expect from the utility function. All other issues should be addressed within
# that function.
# create test datasets
src = np.array(src)
# reconstruct the source depth center from cell bounds
z_raw = 0.5 * (src[1:] + src[0:-1])
if coords:
data = xr.DataArray(dat, coords=[(z_dim, z_raw)])
z_source_bnds = xr.DataArray(src, coords=[(z_bounds_dim, src)])
z_target = xr.DataArray(tar, coords=[(target_value_dim, tar)])
z_source = xr.DataArray(z_raw, coords=[(z_dim, z_raw)])
else:
data = xr.DataArray(np.array(dat), dims=[z_dim])
z_source_bnds = xr.DataArray(src, dims=[z_bounds_dim])
z_target = xr.DataArray(tar, dims=[target_value_dim])
z_source = xr.DataArray(z_raw, dims=[z_dim])
# the target and data should always have other dimensions
data = random_broadcast(data)
z_target = random_broadcast(z_target)
if multi_dim:
z_source_bnds = random_broadcast(z_source_bnds)
z_source = random_broadcast(z_source)
if z_bounds:
bnds = z_source_bnds
else:
bnds = None
regridded = linear_interpolation_regrid(
z_source,
data,
z_target,
z_dim=z_dim,
target_value_dim=target_value_dim,
output_dim=output_dim,
z_bounds=bnds,
z_bounds_dim=z_bounds_dim,
)
# select random sample for 2 times
for iteration in range(2):
sample = {
"test_a": np.random.randint(0, len(regridded.test_a)),
"test_b": np.random.randint(0, len(regridded.test_b)),
"test_c": np.random.randint(0, len(regridded.test_c)),
}
profile = regridded.isel(**sample).load().data
if multi_dim:
z = z_source.isel(**sample)
else:
z = z_source
target = z_target.isel(**sample)
d = data.isel(**sample)
if z_bounds:
pad_left = bnds[{z_bounds_dim: 0}]
pad_right = bnds[{z_bounds_dim: -1}]
if multi_dim:
pad_left = pad_left.isel(**sample)
pad_right = pad_right.isel(**sample)
pad_left = pad_left.data
pad_right = pad_right.data
else:
pad_left = pad_right = None
expected_profile = _coord_interp(
z.data, d.data, target.data, pad_left=pad_left, pad_right=pad_right
)
np.testing.assert_allclose(profile, expected_profile)
# check output coords
np.testing.assert_allclose(
regridded.coords[output_dim].data, z_target.coords[target_value_dim].data
)
|
|
#!/usr/bin/env python
import sys
import getopt
import re
import struct
import socket
import stat
import os
debug = 0
QEMUCMDTEMPLATE = """#!/bin/bash
set -u
ARCHEND=%(ARCHEND)s
IID=%(IID)i
if [[ -e ./configure.sh ]]; then
. ./configure.sh
elif [[ -e ../configure.sh ]]; then
. ../configure.sh
elif [[ -e ../../configure.sh ]]; then
. ../../configure.sh
elif [[ -e ../../../configure.sh ]]; then
. ../../../configure.sh
else
echo "Error: Could not find 'configure.sh'!"
exit 1
fi
IMAGE=`get_fs ${IID}`
KERNEL=`get_kernel ${ARCHEND}`
QEMU=`get_qemu ${ARCHEND}`
QEMU_MACHINE=`get_qemu_machine ${ARCHEND}`
QEMU_ROOTFS=`get_qemu_disk ${ARCHEND}`
WORK_DIR=`get_vm ${IID}`
%(START_NET)s
function cleanup {
pkill -P $$
%(STOP_NET)s
}
trap cleanup EXIT
echo "Starting firmware emulation... use Ctrl-a + x to exit"
%(QEMU_ENV_VARS)s ${QEMU} -m 256 -M ${QEMU_MACHINE} -kernel ${KERNEL} \\
%(QEMU_DISK)s -append "root=${QEMU_ROOTFS} console=ttyS0 nandsim.parts=64,64,64,64,64,64,64,64,64,64 rdinit=/firmadyne/preInit.sh rw debug ignore_loglevel print-fatal-signals=1 user_debug=31 firmadyne.syscall=0" \\
-nographic \\
%(QEMU_NETWORK)s | tee ${WORK_DIR}/qemu.final.serial.log
echo "Done!"
"""
def stripTimestamps(data):
lines = data.split("\n")
lines = [re.sub(r"^\[[^\]]*\] firmadyne: ", "", l) for l in lines]
return lines
def findMacChanges(data, endianness):
lines = stripTimestamps(data)
candidates = filter(lambda l: l.startswith("ioctl_SIOCSIFHWADDR"), lines)
if debug:
print("Mac Changes %r" % candidates)
result = []
if endianness == "eb":
fmt = ">I"
elif endianness == "el":
fmt = "<I"
for c in candidates:
g = re.match(r"^ioctl_SIOCSIFHWADDR\[[^\]]+\]: dev:([^ ]+) mac:0x([0-9a-f]+) 0x([0-9a-f]+)", c)
if g:
(iface, mac0, mac1) = g.groups()
m0 = struct.pack(fmt, int(mac0, 16))[2:]
m1 = struct.pack(fmt, int(mac1, 16))
mac = "%02x:%02x:%02x:%02x:%02x:%02x" % struct.unpack("BBBBBB", m0+m1)
result.append((iface, mac))
return result
# Get the network interfaces in the router, except 127.0.0.1
def findNonLoInterfaces(data, endianness):
lines = stripTimestamps(data)
candidates = filter(lambda l: l.startswith("__inet_insert_ifa"), lines) # logs for the inconfig process
if debug:
print("Candidate ifaces: %r" % candidates)
result = []
if endianness == "eb":
fmt = ">I"
elif endianness == "el":
fmt = "<I"
for c in candidates:
g = re.match(r"^__inet_insert_ifa\[[^\]]+\]: device:([^ ]+) ifa:0x([0-9a-f]+)", c)
if g:
(iface, addr) = g.groups()
addr = socket.inet_ntoa(struct.pack(fmt, int(addr, 16)))
if addr != "127.0.0.1" and addr != "0.0.0.0":
result.append((iface, addr))
return result
def findIfacesForBridge(data, brif):
lines = stripTimestamps(data)
result = []
candidates = filter(lambda l: l.startswith("br_dev_ioctl") or l.startswith("br_add_if"), lines)
for c in candidates:
for p in [r"^br_dev_ioctl\[[^\]]+\]: br:%s dev:(.*)", r"^br_add_if\[[^\]]+\]: br:%s dev:(.*)"]:
pat = p % brif
g = re.match(pat, c)
if g:
iface = g.group(1)
#we only add it if the interface is not the bridge itself
#there are images that call brctl addif br0 br0 (e.g., 5152)
if iface != brif:
result.append(iface.strip())
return result
def findVlanInfoForDev(data, dev):
lines = stripTimestamps(data)
results = []
candidates = filter(lambda l: l.startswith("register_vlan_dev"), lines)
for c in candidates:
g = re.match(r"register_vlan_dev\[[^\]]+\]: dev:%s vlan_id:([0-9]+)" % dev, c)
if g:
results.append(int(g.group(1)))
return results
def ifaceNo(dev):
g = re.match(r"[^0-9]+([0-9]+)", dev)
return int(g.group(1)) if g else -1
def qemuArchNetworkConfig(i, arch, n):
if not n:
if arch == "arm":
return "-device virtio-net-device,netdev=net%(I)i -netdev socket,id=net%(I)i,listen=:200%(I)i" % {'I': i}
else:
return "-net nic,vlan=%(VLAN)i -net socket,vlan=%(VLAN)i,listen=:200%(I)i" % {'I': i, 'VLAN' : i}
else:
(ip, dev, vlan, mac) = n
# newer kernels use virtio only
if arch == "arm":
return "-device virtio-net-device,netdev=net%(I)i -netdev tap,id=net%(I)i,ifname=${TAPDEV_%(I)i},script=no" % {'I': i}
else:
vlan_id = vlan if vlan else i
mac_str = "" if not mac else ",macaddr=%s" % mac
return "-net nic,vlan=%(VLAN)i%(MAC)s -net tap,vlan=%(VLAN)i,id=net%(I)i,ifname=${TAPDEV_%(I)i},script=no" % { 'I' : i, 'MAC' : mac_str, 'VLAN' : vlan_id}
def qemuNetworkConfig(arch, network):
output = []
assigned = []
for i in range(0, 4):
for j, n in enumerate(network):
# need to connect the jth emulated network interface to the corresponding host interface
if i == ifaceNo(n[1]):
output.append(qemuArchNetworkConfig(j, arch, n))
assigned.append(n)
break
# otherwise, put placeholder socket connection
if len(output) <= i:
output.append(qemuArchNetworkConfig(i, arch, None))
# find unassigned interfaces
for j, n in enumerate(network):
if n not in assigned:
# guess assignment
print("Warning: Unmatched interface: %s" % (n,))
output[j] = qemuArchNetworkConfig(j, arch, n)
assigned.append(n)
return ' '.join(output)
def buildConfig(brif, iface, vlans, macs):
#there should be only one ip
ip = brif[1]
br = brif[0]
#strip vlanid from interface name (e.g., eth2.2 -> eth2)
dev = iface.split(".")[0]
#check whether there is a different mac set
mac = None
d = dict(macs)
if br in d:
mac = d[br]
elif dev in d:
mac = d[dev]
vlan_id = None
if len(vlans):
vlan_id = vlans[0]
return (ip, dev, vlan_id, mac)
def getIP(ip):
tups = [int(x) for x in ip.split(".")]
if tups[3] != 1:
tups[3] -= 1
else:
tups[3] = 2
return ".".join([str(x) for x in tups])
def startNetwork(network):
template_1 = """
TAPDEV_%(I)i=tap${IID}_%(I)i
HOSTNETDEV_%(I)i=${TAPDEV_%(I)i}
echo "Creating TAP device ${TAPDEV_%(I)i}..."
sudo tunctl -t ${TAPDEV_%(I)i} -u ${USER}
"""
template_vlan = """
echo "Initializing VLAN..."
HOSTNETDEV_%(I)i=${TAPDEV_%(I)i}.%(VLANID)i
sudo ip link add link ${TAPDEV_%(I)i} name ${HOSTNETDEV_%(I)i} type vlan id %(VLANID)i
sudo ip link set ${HOSTNETDEV_%(I)i} up
"""
template_2 = """
echo "Bringing up TAP device..."
sudo ip link set ${HOSTNETDEV_%(I)i} up
sudo ip addr add %(HOSTIP)s/24 dev ${HOSTNETDEV_%(I)i}
echo "Adding route to %(GUESTIP)s..."
sudo ip route add %(GUESTIP)s via %(GUESTIP)s dev ${HOSTNETDEV_%(I)i}
"""
output = []
for i, (ip, dev, vlan, mac) in enumerate(network):
output.append(template_1 % {'I' : i})
if vlan:
output.append(template_vlan % {'I' : i, 'VLANID' : vlan})
output.append(template_2 % {'I' : i, 'HOSTIP' : getIP(ip), 'GUESTIP': ip})
return '\n'.join(output)
def stopNetwork(network):
template_1 = """
echo "Deleting route..."
sudo ip route flush dev ${HOSTNETDEV_%(I)i}
echo "Bringing down TAP device..."
sudo ip link set ${TAPDEV_%(I)i} down
"""
template_vlan = """
echo "Removing VLAN..."
sudo ip link delete ${HOSTNETDEV_%(I)i}
"""
template_2 = """
echo "Deleting TAP device ${TAPDEV_%(I)i}..."
sudo tunctl -d ${TAPDEV_%(I)i}
"""
output = []
for i, (ip, dev, vlan, mac) in enumerate(network):
output.append(template_1 % {'I' : i})
if vlan:
output.append(template_vlan % {'I' : i})
output.append(template_2 % {'I' : i})
return '\n'.join(output)
def insert_ip (iid, ip):
import psycopg2
db = psycopg2.connect (dbname = "firmware",
user = "firmadyne",
password = "firmadyne",
host = "127.0.0.1")
try:
cur = db.cursor()
cur.execute("UPDATE image SET ip='" + ip + "' WHERE id=" + iid)
db.commit()
except BaseException:
ret = False
traceback.print_exc()
db.rollback()
finally:
if cur:
cur.close()
def qemuCmd(iid, network, arch, endianness):
if arch == "mips":
qemuEnvVars = ""
qemuDisk = "-drive if=ide,format=raw,file=${IMAGE}"
if endianness != "eb" and endianness != "el":
raise Exception("You didn't specify a valid endianness")
elif arch == "arm":
qemuDisk = "-drive if=none,file=${IMAGE},format=raw,id=rootfs -device virtio-blk-device,drive=rootfs"
if endianness == "el":
qemuEnvVars = "QEMU_AUDIO_DRV=none"
elif endianness == "eb":
raise Exception("armeb currently not supported")
else:
raise Exception("You didn't specify a valid endianness")
else:
raise Exception("Unsupported architecture")
# insert ip (GUEST_IP) into the database
if network:
insert_ip (str(iid), network[0][0])
return QEMUCMDTEMPLATE % {'IID': iid,
'ARCHEND' : arch + endianness,
'START_NET' : startNetwork(network),
'STOP_NET' : stopNetwork(network),
'QEMU_DISK' : qemuDisk,
'QEMU_NETWORK' : qemuNetworkConfig(arch, network),
'QEMU_ENV_VARS' : qemuEnvVars}
def process(infile, iid, arch, endianness=None, makeQemuCmd=False, outfile=None):
brifs = []
vlans = []
data = open(infile).read()
network = set()
success = False
#find interfaces with non loopback ip addresses
ifacesWithIps = findNonLoInterfaces(data, endianness)
#find changes of mac addresses for devices
macChanges = findMacChanges(data, endianness)
print("Interfaces: %r" % ifacesWithIps)
deviceHasBridge = False
for iwi in ifacesWithIps:
#find all interfaces that are bridged with that interface
brifs = findIfacesForBridge(data, iwi[0])
if debug:
print("brifs for %s %r" % (iwi[0], brifs))
for dev in brifs:
#find vlan_ids for all interfaces in the bridge
vlans = findVlanInfoForDev(data, dev)
#create a config for each tuple
network.add((buildConfig(iwi, dev, vlans, macChanges)))
deviceHasBridge = True
#if there is no bridge just add the interface
if not brifs and not deviceHasBridge:
vlans = findVlanInfoForDev(data, iwi[0])
network.add((buildConfig(iwi, iwi[0], vlans, macChanges)))
ips = set()
pruned_network = []
for n in network:
if n[0] not in ips:
ips.add(n[0])
pruned_network.append(n)
else:
if debug:
print("duplicate ip address for interface: ", n)
if makeQemuCmd:
qemuCommandLine = qemuCmd(iid, pruned_network, arch, endianness)
if qemuCommandLine:
success = True
if outfile:
with open(outfile, "w") as out:
out.write(qemuCommandLine)
os.chmod(outfile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
else:
print(qemuCommandLine)
return success
def archEnd(value):
arch = None
end = None
tmp = value.lower()
if tmp.startswith("mips"):
arch = "mips"
elif tmp.startswith("arm"):
arch = "arm"
if tmp.endswith("el"):
end = "el"
elif tmp.endswith("eb"):
end = "eb"
return (arch, end)
def main():
infile = None
makeQemuCmd = False
iid = None
outfile = None
arch = None
endianness = None
(opts, argv) = getopt.getopt(sys.argv[1:], 'f:i:S:a:oqd')
for (k, v) in opts:
if k == '-f':
infile = v
if k == '-d':
global debug
debug += 1
if k == '-q':
makeQemuCmd = True
if k == '-i':
iid = int(v)
if k == '-S':
VMDIR = v
if k == '-o':
outfile = True
if k == '-a':
(arch, endianness) = archEnd(v)
if not arch or not endianness:
raise Exception("Either arch or endianness not found try mipsel/mipseb/armel/armeb")
if not infile and iid:
infile = "%s/%i/qemu.initial.serial.log" % (VMDIR, iid)
if outfile and iid:
outfile = """%s/%i/run.sh""" % (VMDIR, iid)
if debug:
print("processing %i" % iid)
if infile:
process(infile, iid, arch, endianness, makeQemuCmd, outfile)
if __name__ == "__main__":
main()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Error rewriting logic.
Contains the functions responsible for rewriting tracebacks of errors raised
in AutoGraph (AG) code to refer to user written code, so that errors only refer
to the original user code.
When 'user code' is used in comments it refers to the original source code that
the user wrote and is converting using AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import logging
import sys
import traceback
from tensorflow.contrib.autograph.pyct.origin_info import CodeLocation
from tensorflow.python.framework import errors_impl
from tensorflow.python.util import tf_inspect
class GraphConstructionError(Exception):
"""Error for graph construction errors from AutoGraph generated code."""
def __init__(self, original_error, custom_traceback):
self.original_error = original_error
self.custom_traceback = custom_traceback
super(GraphConstructionError, self).__init__()
def __str__(self):
traceback_str = ''.join(traceback.format_list(self.custom_traceback))
return ('Traceback (most recent call last):\n' + traceback_str + '\n' + str(
self.original_error) + '\n')
class TfRuntimeError(Exception):
"""Error wrapper for runtime errors raised by AutoGraph generated code."""
def __init__(self, op_name, op_message, custom_traceback):
self.op_name = op_name
self.op_message = op_message
self.custom_traceback = custom_traceback
super(TfRuntimeError, self).__init__()
def __str__(self):
message = '%s\n\nCaused by op %r, defined at:\n' % (self.op_message,
self.op_name)
return message + ''.join(traceback.format_list(self.custom_traceback))
def _rewrite_frame(source_map, cleaned_traceback, stack_frame_indices):
"""Rewrites the stack frames at the given indices using the given source map.
Args:
source_map: Dict[CodeLocation, OriginInfo], a mapping between the user and
AG generated code.
cleaned_traceback: List[Tuple[text, text, text, text]], the current
traceback.
stack_frame_indices: Iterable[Int], frame indices to possibly rewrite if
there are matching source mapping keys.
Returns:
None
"""
for frame_index in stack_frame_indices:
# (file_path, line number, function name, code)
file_path, line_number, _, _ = cleaned_traceback[frame_index]
source_map_key = CodeLocation(file_path=file_path, line_number=line_number)
found_mapping = source_map_key in source_map
if found_mapping:
cleaned_traceback[frame_index] = source_map[source_map_key].as_frame()
# TODO(znado): Make more robust to name changes in the rewriting logic.
def _remove_rewrite_frames(tb):
"""Remove stack frames containing the error rewriting logic."""
cleaned_tb = []
for f in tb:
if 'ag__.rewrite_graph_construction_error' not in f[3]:
cleaned_tb.append(f)
return cleaned_tb
def rewrite_graph_construction_error(source_map):
"""Rewrites errors raised by non-AG APIs inside AG generated code.
Meant to be called from the try/except block inside each AutoGraph generated
function. Only rewrites the traceback frames corresponding to the function
that this is called from. When we raise a GraphConstructionError at the end
it is then caught by calling functions, where they can be responsible for
rewriting their own frames.
Args:
source_map: Dict[CodeLocation, OriginInfo], a mapping between the user and
AG generated code.
Raises:
GraphConstructionError: The rewritten underlying error.
Exception: The underlying error, if it could not be rewritten.
"""
error_info = sys.exc_info()
_, original_error, e_traceback = error_info
assert original_error is not None
try:
_, _, _, func_name, _, _ = tf_inspect.stack()[1]
# The latest function call is added to the beginning of a traceback, but
# when rewriting the traceback of multiple function calls the previous
# functions' except blocks may have already rewritten their own frames so
# we want to copy over all of the previous frames. We may have rewritten
# previous frames only if the error is a GraphConstructionError.
if isinstance(original_error, GraphConstructionError):
cleaned_traceback = traceback.extract_tb(e_traceback)
previous_traceback = original_error.custom_traceback
cleaned_traceback = [cleaned_traceback[0]] + previous_traceback
else:
cleaned_traceback = traceback.extract_tb(e_traceback)
cleaned_traceback = _remove_rewrite_frames(cleaned_traceback)
current_frame_indices = []
# This code is meant to be called from the try/except block that wraps a
# function body. Here we look for all frames that came from the function
# that this wraps, look for any matching line numbers in the source
# mapping, and then rewrite them if matches are found.
for fi, frame in enumerate(cleaned_traceback):
_, _, frame_func_name, _ = frame
if frame_func_name == func_name:
current_frame_indices.append(fi)
break
if current_frame_indices:
_rewrite_frame(source_map, cleaned_traceback, current_frame_indices)
if isinstance(original_error, GraphConstructionError):
original_error.custom_traceback = cleaned_traceback
new_error = original_error
else:
new_error = GraphConstructionError(original_error, cleaned_traceback)
except Exception:
logging.exception('Error while rewriting AutoGraph error:')
raise original_error
else:
raise new_error
finally:
# Addresses warning https://docs.python.org/2/library/sys.html#sys.exc_info.
del e_traceback
def rewrite_tf_runtime_error(error, source_map):
"""Rewrites TensorFlow runtime errors raised by ops created in AG code.
Args:
error: error_impl.OpError, an TensorFlow error that will have its traceback
rewritten.
source_map: Dict[CodeLocation, OriginInfo], a mapping between the user and
AG generated code.
Returns:
A TfRuntimeError with a traceback rewritten according to the given
source mapping.
"""
# Check for cases where we leave a user method and re-enter it in the
# traceback. This is done by looking at the function names when the
# filenames are from any files the user code is in. If we find a case where
# we return to a user method after leaving it then we cut out the frames in
# between because we assume this means these in between frames are from
# internal AutoGraph code that shouldn't be included.
#
# An example of this is:
#
# File "file1.py", line 57, in my_func
# ...
# File "control_flow_ops.py", line 231, in cond
# ...
# File "control_flow_ops.py", line 1039, in inner_cond
# ...
# File "file1.py", line 68, in my_func
# ...
#
# Where we would remove the control_flow_ops.py frames because we re-enter
# my_func in file1.py.
#
# The source map keys are (file_path, line_number) so get the set of all user
# file_paths.
try:
all_user_files = set(k.file_path for k in source_map)
cleaned_traceback = []
last_user_frame_index = None
last_user_user_file_path = None
last_user_user_fn_name = None
for fi, frame in enumerate(error.op.traceback):
frame_file_path, frame_line_number, _, _ = frame
src_map_key = CodeLocation(
file_path=frame_file_path, line_number=frame_line_number)
if frame_file_path in all_user_files:
if src_map_key in source_map:
original_fn_name = source_map[src_map_key].function_name
if (last_user_frame_index is not None and
last_user_user_file_path == frame_file_path):
if last_user_user_fn_name == original_fn_name:
cleaned_traceback = cleaned_traceback[:last_user_frame_index]
else:
cleaned_traceback = cleaned_traceback[:last_user_frame_index + 1]
last_user_user_fn_name = original_fn_name
else:
last_user_user_fn_name = None
last_user_frame_index = fi
last_user_user_file_path = frame_file_path
cleaned_traceback.append(frame)
for fi in range(len(cleaned_traceback)):
_rewrite_frame(source_map, cleaned_traceback, [fi])
op_name = error.op.name
op_message = error.message
rewritten_error = TfRuntimeError(op_name, op_message, cleaned_traceback)
return rewritten_error
except Exception: # pylint: disable=broad-except
logging.exception('Error while rewriting AutoGraph error:')
return error
# TODO(znado): Add arg to enable different levels of error rewriting.
@contextlib.contextmanager
def improved_errors(converted_function):
"""Context manager that rewrites runtime errors.
This context manager will rewrite runtime errors so that their traceback
is relative to the original code before conversion.
Use with the output of to_graph, and wrap the execution of respective ops.
Example:
converted_my_func = ag.to_graph(my_func)
ops = converted_my_func(...)
with ag.improved_errors(converted_my_func):
sess.run(ops)
Args:
converted_function: Callable[..., Any], the output of a to_graph call
Yields:
None
Raises:
TfRuntimeError: if any OpError originates in the converted code, it will
be wrapped into a TfRuntimeError
ValueError: If converted_function is not generated by AutoGraph
"""
if (getattr(converted_function, 'ag_source_map', None) is None or
not converted_function.ag_source_map):
raise ValueError(
'converted_function must be the result of an autograph.to_graph call')
try:
yield
except errors_impl.OpError as e:
raise rewrite_tf_runtime_error(e, converted_function.ag_source_map)
|
|
"""Utility functions for making arrangments / piles of objects.
Main functions are :func:`make_object_arrangement` and
:func:`make_object_pile`.
"""
from klampt import *
from klampt.math import vectorops,so3,se3
from klampt.model import collide
import random
import math
from typing import Union,List,Tuple,Sequence,Callable,Any
def _get_bound(objects):
"""Obtains the tight outer bounds of the object(s) at their current transforms, in world coordinates."""
if hasattr(objects,'__iter__'):
bbs = [_get_bound(o) for o in objects]
if len(bbs) == 1:
return bbs[0]
return collide.bb_union(*bbs)
else:
return objects.geometry().getBBTight()
def xy_randomize(obj,bmin,bmax):
"""Randomizes the xy position and z orientation of an object inside of a bounding box bmin->bmax.
Assumes the bounding box is large enough to hold the object in any orientation.
"""
R,t = obj.getTransform()
R = so3.mul(so3.rotation([0,0,1],random.uniform(0,math.pi*2)),R)
t[0] = 0
t[1] = 0
obj.setTransform(R,t)
obmin,obmax = obj.geometry().getBB()
t[0] = random.uniform(bmin[0]-obmin[0],bmax[0]-obmax[0])
t[1] = random.uniform(bmin[1]-obmin[1],bmax[1]-obmax[1])
obj.setTransform(R,t)
def xy_jiggle(world,objects,fixed_objects,bmin,bmax,iters,randomize=True,
verbose=0):
"""Jiggles the objects' x-y positions within the range bmin - bmax, and randomizes orientation about the z
axis until the objects are collision free. A list of fixed objects (fixed_objects) may be given as well.
Objects for which collision-free resolutions are not found are returned.
"""
if randomize:
for obj in objects:
xy_randomize(obj,bmin,bmax)
object_geometries = [o.geometry() for o in objects]
fixed_geometries = [o.geometry() for o in fixed_objects]
inner_iters = 10
while iters > 0:
numConflicts = [0]*len(objects)
for (i,j) in collide.self_collision_iter(object_geometries):
numConflicts[i] += 1
numConflicts[j] += 1
for (i,j) in collide.group_collision_iter(object_geometries,fixed_geometries):
numConflicts[i] += 1
amax = max((c,i) for (i,c) in enumerate(numConflicts))[1]
cmax = numConflicts[amax]
if cmax == 0:
#conflict free
return []
if verbose:
print(cmax,"conflicts with object",objects[amax].getName())
other_geoms = [o.geometry() for o in objects[:amax]+objects[amax+1:]+fixed_objects]
nc = 0
for it in range(inner_iters):
xy_randomize(objects[amax],bmin,bmax)
nc = sum([1 for p in collide.group_collision_iter([objects[amax].geometry()],other_geoms)])
if nc < cmax:
break
iters-=1
if verbose:
print("Now",nc,"conflicts with object",objects[amax].getName())
numConflicts = [0]*len(objects)
for (i,j) in collide.self_collision_iter(object_geometries):
numConflicts[i] += 1
numConflicts[j] += 1
for (i,j) in collide.group_collision_iter(object_geometries,fixed_geometries):
numConflicts[i] += 1
removed = []
while max(numConflicts) > 0:
amax = max((c,i) for (i,c) in enumerate(numConflicts))[1]
cmax = numConflicts[amax]
if verbose:
print("Unable to find conflict-free configuration for object",objects[amax].getName(),"with",cmax,"conflicts")
removed.append(amax)
#revise # of conflicts -- this could be faster, but whatever...
numConflicts = [0]*len(objects)
for (i,j) in collide.self_collision_iter(object_geometries):
if i in removed or j in removed:
continue
numConflicts[i] += 1
numConflicts[j] += 1
for (i,j) in collide.group_collision_iter(object_geometries,fixed_geometries):
if i in removed:
continue
numConflicts[i] += 1
return removed
def make_object_arrangement(world : WorldModel, container : Union[RigidObjectModel,TerrainModel],
objects : Sequence[RigidObjectModel], container_wall_thickness=0.01,max_iterations=100,
remove_failures=False) -> WorldModel:
"""For a given container and a list of objects in the world, places the objects inside the container with randomized x-y locations
and z orientations so that they are initially collision free and on the bottom of the container.
Args:
world (WorldModel): the world containing the objects and obstacles
container: the container RigidObjectModel / TerrainModel in world into
which objects should be spawned. Assumed axis-aligned.
objects (list of RigidObjectModel): a list of RigidObjects in the world,
at arbitrary locations. They are placed in order.
container_wall_thickness (float, optional): a margin subtracted from
the container's outer dimensions into which the objects are spawned.
max_iterations (int, optional): the maximum number of iterations used
for sampling object initial poses
remove_failures (bool): if True, then instead of returning None on
failure, the objects that fail placement are removed from the world.
Returns:
WorldModel: if successful, the positions of objects in world are
modified and world is returned. On failure, None is returned.
.. note::
Since world is modified in-place, if you wish to make multiple worlds with
piles of the same objects, you should use world.copy() to store the
configuration of the objects. You may also wish to randomize the object
ordering using random.shuffle(objects) between instances.
"""
container_outer_bb = _get_bound(container)
container_inner_bb = (vectorops.add(container_outer_bb[0],[container_wall_thickness]*3),vectorops.sub(container_outer_bb[1],[container_wall_thickness]*3))
collision_margin = 0.0025
for object in objects:
#make sure the bottom of the object touches the bottom of the container
obb = _get_bound(object)
zmin = obb[0][2]
R,t = object.getTransform()
t[2] = container_inner_bb[0][2] - zmin + collision_margin
object.setTransform(R,t)
failures = xy_jiggle(world,objects,[container],container_inner_bb[0],container_inner_bb[1],max_iterations,verbose=1)
if len(failures) > 0:
if remove_failures:
removeIDs = [objects[i].index for i in failures]
for i in sorted(removeIDs)[::-1]:
world.remove(world.rigidObject(i))
else:
return None
return world
def make_object_pile(world : WorldModel, container : Union[RigidObjectModel,TerrainModel],
objects : Sequence[RigidObjectModel],container_wall_thickness=0.01,randomize_orientation=True,
visualize=False,verbose=0) -> Tuple[WorldModel,Simulator]:
"""For a given container and a list of objects in the world, drops the
objects inside the container and simulates until stable.
Args:
world (WorldModel): the world containing the objects and obstacles
container: the container RigidObjectModel / TerrainModel in world into
which objects should be spawned. Assumed axis-aligned.
objects (list of RigidObjectModel): a list of RigidObjectModels in the
world, at arbitrary locations. They are placed in order.
container_wall_thickness (float, optional): a margin subtracted from
the container's outer dimensions into which the objects are spawned.
randomize_orientation (bool or str, optional): if True, the orientation
of the objects are completely randomized. If 'z', only the z
orientation is randomized. If False or None, the orientation is
unchanged
visualize (bool, optional): if True, pops up a visualization window to
show the progress of the pile
verbose (int, optional): if > 0, prints progress of the pile.
Returns:
A pair (world,sim), containing
- world (WorldModel): the original world
- sim (Simulator): the Simulator instance at the state used to obtain
the stable placement of the objects.
.. note::
If you wish to make multiple worlds with piles of the same objects, you
may wish to randomize the object ordering using
``random.shuffle(objects)`` between instances.
"""
container_outer_bb = _get_bound(container)
container_inner_bb = (vectorops.add(container_outer_bb[0],[container_wall_thickness]*3),vectorops.sub(container_outer_bb[1],[container_wall_thickness]*3))
spawn_area = (container_inner_bb[0][:],container_inner_bb[1][:])
collision_margin = 0.0025
"""
sim = Simulator(world)
sim.setSetting("maxContacts","20")
sim.setSetting("adaptiveTimeStepping","0")
Tfar = (so3.identity(),[0,0,-100000])
for object in objects:
R,t = object.getTransform()
object.setTransform(R,Tfar[1])
sim.body(object).setTransform(*Tfar)
sim.body(object).enable(False)
if verbose:
print("Spawn area",spawn_area)
"""
"""
if visualize:
from klampt import vis
from klampt.model import config
import time
oldwindow = vis.getWindow()
if oldwindow == None:
vis.createWindow()
oldwindow = vis.getWindow()
newwindow = vis.createWindow("make_object_pile dynamic visualization")
vis.setWindow(newwindow)
visworld = world.copy()
vis.add("world",visworld)
vis.addText("time","Time: 0",position=(20,20))
config.setConfig(visworld,config.getConfig(world))
vis.show()
"""
for index in range(len(objects)):
#always spawn above the current height of the pile
if index > 0:
objects_bound = _get_bound(objects[:index])
if verbose:
print("Existing objects bound:",objects_bound)
zshift = max(0.0,objects_bound[1][2] - spawn_area[0][2])
spawn_area[0][2] += zshift
spawn_area[1][2] += zshift
object = objects[index]
R0,t0 = object.getTransform()
object.setTransform(R0,[0,0,0])
obb = _get_bound(object)
zmin = obb[0][2]
feasible = False
for sample in range(1000):
R,t = R0[:],t0[:]
if randomize_orientation == True:
R = so3.sample()
t[2] = spawn_area[1][2] - zmin + collision_margin + 0.2
object.setTransform(R,t)
xy_randomize(object,spawn_area[0],spawn_area[1])
if verbose:
print("Sampled position of",object.getName(),object.getTransform()[1])
if not randomize_orientation:
_,t = object.getTransform()
object.setTransform(R,t)
#object spawned, now settle
feasible = True
break
"""
sobject = sim.body(object)
sobject.enable(True)
sobject.setTransform(*object.getTransform())
res = sim.checkObjectOverlap()
if len(res[0]) == 0:
feasible = True
#get it low as possible without overlapping
R,t = object.getTransform()
for lower in range(100):
sobject.setTransform(R,vectorops.add(t,[0,0,-(lower+1)*0.01]))
res = sim.checkObjectOverlap()
if len(res[0]) != 0:
if verbose:
print("Terminated lowering at",lower,"cm lower")
sobject.setTransform(R,vectorops.add(t,[0,0,-lower*0.01]))
res = sim.checkObjectOverlap()
break
sim.updateWorld()
break
"""
if not feasible:
if verbose:
print("Failed to place object",object.getName())
return None
"""
if visualize:
vis.lock()
config.setConfig(visworld,config.getConfig(world))
vis.unlock()
time.sleep(0.1)
"""
if verbose:
print("Beginning to simulate")
from klampt.sim import settle
for i,obj in enumerate(objects):
if i > 0:
objects_bound = _get_bound(objects[:i])
zothers = objects_bound[1][2]
else:
zothers = container_inner_bb[0][2]
R,t = obj.getTransform()
obj.setTransform(R,[0,0,0])
obb = _get_bound(obj)
zmin = obb[0][2]
t[2] = zothers - zmin + collision_margin
obj.setTransform(R,t)
print("Simulating object",obj.getName())
xform,touched = settle.settle(world,obj,debug=visualize)
obj.setTransform(*xform)
return (world,None)
"""
#start letting everything fall
for firstfall in range(10):
sim.simulate(0.01)
if visualize:
vis.lock()
config.setConfig(visworld,config.getConfig(world))
vis.unlock()
time.sleep(0.01)
maxT = 5.0
dt = 0.01
t = 0.0
wdamping = -0.01
vdamping = -0.1
while t < maxT:
settled = True
maxw = 0
maxv = 0
for object in objects:
sobject = sim.body(object)
if not collide.bb_contains((container_outer_bb[0][:2],container_outer_bb[1][:2]),object.getTransform()[1][:2]):
if verbose:
print("Object",object.getName(),"fell out of container area")
continue
if object.getTransform()[1][2] + 1 < container_outer_bb[0][2]:
if verbose:
print("Object",object.getName(),"fell out of container area")
continue
w,v = sobject.getVelocity()
maxw = max(maxw,vectorops.norm(w))
maxv = max(maxv,vectorops.norm(v))
sobject.applyWrench(vectorops.mul(v,vdamping),vectorops.mul(w,wdamping))
if vectorops.norm(w) + vectorops.norm(v) > 1e-4:
#settled
settled=False
break
if settled:
break
if visualize:
t0 = time.time()
sim.simulate(dt)
if visualize:
vis.lock()
vis.addText("time","Time: %.3f"%(t,),position=(20,20))
vis.addText("velocities","Ang vel %.3f, vel %.3f"%(maxw,maxv),position=(20,35))
config.setConfig(visworld,config.getConfig(world))
vis.unlock()
time.sleep(max(0.0,dt-(time.time()-t0)))
t += dt
if visualize:
vis.show(False)
vis.setWindow(oldwindow)
return (world,sim)
"""
|
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 11 13:16:19 2017
@author: Ashiv Dhondea
"""
import AstroFunctions as AstFn
import GeometryFunctions as GF
import TimeHandlingFunctions as THF
import math
import numpy as np
import datetime as dt
import pytz
import aniso8601
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)
params = {'text.latex.preamble' : [r'\usepackage{amsmath}', r'\usepackage{amssymb}']}
plt.rcParams.update(params)
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
import matplotlib as mpl
import matplotlib.patches as patches
from mpl_toolkits.mplot3d import Axes3D
# --------------------------------------------------------------------------- #
with open('main_meerkat_radar_parameters_doreen.txt') as fp:
for line in fp:
if 'HPBW Tx' in line:
good_index = line.index('=')
beamwidth_tx = float(line[good_index+1:-1]);
fp.close();
# --------------------------------------------------------------------------- #
print 'Loading data'
timevec = np.load('main_057_iss_05_timevec.npy'); # timevector
y_sph_tx = np.load('main_057_iss_05_y_sph_tx.npy'); # spherical measurement vectors in Tx frame
# discretization step length/PRF
delta_t = timevec[2]-timevec[1];
# time stamps
experiment_timestamps = [None]*len(timevec)
index=0;
with open('main_057_iss_05_experiment_timestamps.txt') as fp:
for line in fp:
modified_timestring = line[:-8];
experiment_timestamps[index] = aniso8601.parse_datetime(modified_timestring);
index+=1;
fp.close();
norad_id = '25544'
# --------------------------------------------------------------------------- #
# Bistatic Radar characteristics
# beamwidth of transmitter and receiver
beamwidth_tx = math.radians(beamwidth_tx );
# --------------------------------------------------------------------------- #
#az_tx_nice = GF.fnSmoothe_AngleSeries(y_sph_tx[2,:],2*math.pi);
time_index_rx = np.load('main_057_iss_06_time_index_rx.npy');
beam_centre_candidates = np.load('main_057_iss_07_beam_centre_candidates.npy');
tx_bw_bounds = np.load('main_057_iss_07_tx_bw_bounds.npy');
tx_bins_length = np.load('main_057_iss_07_tx_bins_length.npy');
tx_beam_indices = np.load('main_057_iss_07_tx_beam_indices.npy');
tx_beam_indices_best = np.load('main_057_iss_07_tx_beam_indices_best.npy');
overall_bound = np.load('main_057_iss_07_overall_bound.npy');
# --------------------------------------------------------------------------- #
# sort out a few variables
tx_bw_time_max = tx_beam_indices_best[1];
tx_beam_index_down = tx_beam_indices_best[0];
tx_beam_index_up = tx_beam_indices_best[2];
tx_bw_start_index = tx_bw_bounds[0];
tx_bw_end_index = tx_bw_bounds[1];
overall_bound_lower = overall_bound[0]
overall_bound_upper = overall_bound[1]
print 'tx beam index down, max and up'
print tx_beam_index_down
print tx_bw_time_max
print tx_beam_index_up
beam_centre = np.degrees(beam_centre_candidates[:,tx_bw_time_max]);
withincircle = np.empty([len(timevec)],dtype=bool);
for i in range(tx_beam_index_down,tx_beam_index_up+1):
testpt = np.degrees(beam_centre_candidates[:,i]);
withincircle[i] = GF.fnCheck_IsInCircle(beam_centre,0.5*math.degrees(beamwidth_tx),testpt);
early = np.where(withincircle[tx_beam_index_down:tx_beam_index_up] == True);
earliest_pt = early[0][0] + tx_beam_index_down;
latest_pt = early[0][-1] + tx_beam_index_down
print 'earliest and latest points for Tx beam'
print earliest_pt
print latest_pt
print 'actual dwell time'
print (latest_pt - earliest_pt)*delta_t
#el_tx_argmax = np.argmax(y_sph_tx[1,time_index_rx[0]:time_index_rx[1]+1]) + time_index_rx[0];
tx_beam_circ_index = np.array([earliest_pt,tx_bw_time_max,latest_pt],dtype=np.int64);
np.save('main_057_iss_08_tx_beam_circ_index.npy',tx_beam_circ_index)
title_string1 = str(experiment_timestamps[tx_bw_start_index].isoformat())+'/'+str(experiment_timestamps[tx_bw_end_index].isoformat());
# --------------------------------------------------------------------------- #
# Find the epoch of the relevant data points
plot_lim = 1.0;
plt_start_index = tx_beam_index_down - int(plot_lim/delta_t)
plt_end_index = tx_beam_index_up+1 + int(plot_lim/delta_t)
start_epoch_test = THF.fnCalculate_DatetimeEpoch(timevec,plt_start_index,experiment_timestamps[0]);
end_epoch_test = THF.fnCalculate_DatetimeEpoch(timevec,plt_end_index,experiment_timestamps[0]);
tx_beam_index_down_epoch = THF.fnCalculate_DatetimeEpoch(timevec,tx_beam_index_down,experiment_timestamps[0]);
tx_beam_index_up_epoch = THF.fnCalculate_DatetimeEpoch(timevec,tx_beam_index_up,experiment_timestamps[0]);
tx_bw_time_max_epoch = THF.fnCalculate_DatetimeEpoch(timevec,tx_bw_time_max,experiment_timestamps[0]);
end_epoch_test = end_epoch_test .replace(tzinfo=None);
start_epoch_test = start_epoch_test .replace(tzinfo=None)
title_string = str(start_epoch_test.isoformat())+'Z/'+str(end_epoch_test.isoformat())+'Z';
tx_beam_index_down_epoch = tx_beam_index_down_epoch.replace(tzinfo=None);
tx_beam_index_up_epoch = tx_beam_index_up_epoch.replace(tzinfo=None)
tx_bw_time_max_epoch = tx_bw_time_max_epoch.replace(tzinfo=None)
earliest_pt_epoch = THF.fnCalculate_DatetimeEpoch(timevec,earliest_pt,experiment_timestamps[0]);
latest_pt_epoch = THF.fnCalculate_DatetimeEpoch(timevec,latest_pt,experiment_timestamps[0]);
earliest_pt_epoch= earliest_pt_epoch.replace(tzinfo=None)
latest_pt_epoch= latest_pt_epoch.replace(tzinfo=None)
# --------------------------------------------------------------------------- #
beam_centre = np.degrees(np.array([y_sph_tx[2,tx_bw_time_max],y_sph_tx[1,tx_bw_time_max]],dtype=np.float64));
beam_xx = np.array([beam_centre[0] - 0.5*math.degrees(beamwidth_tx),beam_centre[0] + 0.5*math.degrees(beamwidth_tx)]);
beam_yy = np.array([beam_centre[1] - 0.5*math.degrees(beamwidth_tx),beam_centre[1] + 0.5*math.degrees(beamwidth_tx)]);
xx = np.array([beam_xx[0],beam_xx[1]],dtype=np.float64)
yy_below = np.array([beam_yy[0],beam_yy[0]],dtype=np.float64);
yy_above = np.array([beam_yy[1],beam_yy[1]],dtype=np.float64);
label_down = np.arange(np.degrees(y_sph_tx[2,tx_bw_time_max]), xx[1] + 0.1 + 0.05 , 0.05);
label_down_y = yy_below[1]*np.ones(len(label_down),dtype=np.float64);
label_up_y = yy_above[0]*np.ones(len(label_down),dtype=np.float64);
xx_below = np.array([beam_xx[0],beam_xx[0]],dtype=np.float64);
xx_above = np.array([beam_xx[1],beam_xx[1]],dtype=np.float64);
label_up = np.arange(beam_yy[0]-0.05,np.degrees(y_sph_tx[1,tx_bw_time_max])+0.04, 0.04);
label_up_x = xx_below[1]*np.ones(len(label_up),dtype=np.float64);
label_down_x = xx_above[0]*np.ones(len(label_up),dtype=np.float64);
numpts=360
circpts = GF.fnCalculate_CircumferencePoints(beam_centre,0.5*math.degrees(beamwidth_tx),numpts)
# --------------------------------------------------------------------------- #
#time_index = np.load('main_057_iss_05_time_index.npy')
ground_station='Tx';
#fig = plt.figure(6);
#ax = fig.gca()
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
#fig.suptitle(r"\textbf{Elevation angle to object %s from %s }" %(norad_id,ground_station),fontsize=12);
#plt.plot(timevec[time_index_rx[0]:time_index_rx[1]+1],np.rad2deg(y_sph_tx[1,time_index_rx[0]:time_index_rx[1]+1]))
#plt.axvspan(timevec[overall_bound[0]],timevec[overall_bound[1]],facecolor='green',alpha=0.2);
#plt.scatter(timevec[tx_beam_index_down],math.degrees(y_sph_tx[1,tx_beam_index_down]),s=50,marker=r"$\Box$",facecolors='none', edgecolors='crimson',label=r"%s" %str(tx_beam_index_down_epoch.isoformat()+'Z'));
#plt.scatter(timevec[tx_bw_time_max],math.degrees(y_sph_tx[1,tx_bw_time_max]),s=50,marker=r"$\oplus$",facecolors='none', edgecolors='purple',label=r"%s" %str(tx_bw_time_max_epoch.isoformat()+'Z'));
#plt.scatter(timevec[tx_beam_index_up],math.degrees(y_sph_tx[1,tx_beam_index_up]),s=50,marker=r"$\circledcirc$",facecolors='none', edgecolors='darkgreen',label=r"%s" %str(tx_beam_index_up_epoch.isoformat()+'Z'));
#
#plt.legend(loc='center left',title=r"\textbf{Timestamps}",bbox_to_anchor=(1, 0.5),
# fancybox=True, shadow=True)
#
#ax.set_ylabel(r"Elevation angle $\theta~[\mathrm{^\circ}]$")
#ax.set_xlabel(r'Time $t~[\mathrm{s}]$');
#at = AnchoredText(r"$\Delta_t = %f ~\mathrm{s}$" %delta_t,prop=dict(size=6), frameon=True,loc=4)
#at.patch.set_boxstyle("round,pad=0.05,rounding_size=0.2")
#ax.add_artist(at)
#plt.grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black')
#fig.savefig('main_057_iss_08_el.pdf',bbox_inches='tight',pad_inches=0.11,dpi=20)
print math.degrees(beamwidth_tx)
diff = math.degrees(y_sph_tx[1,tx_beam_index_up] - y_sph_tx[1,tx_beam_index_down])
print 'diff'
print diff
diff_up = math.degrees(y_sph_tx[1,tx_beam_index_up] - y_sph_tx[1,tx_bw_time_max])
print 'diff up'
print diff_up
diff_down = math.degrees(y_sph_tx[1,tx_bw_time_max] - y_sph_tx[1,tx_beam_index_down])
print 'diff down'
print diff_down
diff_az = math.degrees(y_sph_tx[2,tx_beam_index_up] - y_sph_tx[2,tx_beam_index_down])
print 'diff az'
print diff_az
diff_up_az = math.degrees(y_sph_tx[2,tx_beam_index_up] - y_sph_tx[2,tx_bw_time_max])
print 'diff up az'
print diff_up_az
diff_down_az = math.degrees(y_sph_tx[2,tx_bw_time_max] - y_sph_tx[2,tx_beam_index_down])
print 'diff down az'
print diff_down_az
# --------------------------------------------------------------------------- #
fig = plt.figure(1);
ax = fig.gca()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.axis('equal')
fig.suptitle(r"\textbf{Tx beam placement for object %s trajectory during %s}" %(norad_id,title_string),fontsize=12);
plt.plot(np.degrees(y_sph_tx[2,plt_start_index :tx_beam_index_down]),np.degrees(y_sph_tx[1,plt_start_index :tx_beam_index_down]),color='blue',linestyle='dashed')
plt.plot(np.degrees(y_sph_tx[2,tx_beam_index_up+1:plt_end_index]),np.degrees(y_sph_tx[1,tx_beam_index_up+1:plt_end_index]),color='blue',linestyle='dashed')
plt.plot(np.degrees(y_sph_tx[2,tx_beam_index_down:tx_beam_index_up]),np.degrees(y_sph_tx[1,tx_beam_index_down:tx_beam_index_up]),color='blue')
plt.scatter(np.degrees(y_sph_tx[2,tx_beam_index_down]),np.degrees(y_sph_tx[1,tx_beam_index_down]),s=50,marker=r"$\diamond$",facecolors='none',edgecolors='red',label=r"%s" %str(tx_beam_index_down_epoch.isoformat())+'Z');
plt.scatter(np.degrees(y_sph_tx[2,tx_bw_time_max]),np.degrees(y_sph_tx[1,tx_bw_time_max]),s=50,marker=r"$\otimes$",facecolors='none',edgecolors='green',label=r"%s" %str(tx_bw_time_max_epoch.isoformat())+'Z')
plt.scatter(np.degrees(y_sph_tx[2,tx_beam_index_up]),np.degrees(y_sph_tx[1,tx_beam_index_up]),s=50,marker=r"$\boxplus$",facecolors='none', edgecolors='magenta',label=r"%s" %str(tx_beam_index_up_epoch.isoformat())+'Z')
ax.fill_between(xx,yy_below,yy_above,facecolor='gray',alpha=0.2);
#plt.annotate(
# '', xy=(xx[1]+0.1, yy_below[1] ), xycoords='data',
# xytext=(xx[1]+0.1,yy_above[0]), textcoords='data',
# arrowprops={'arrowstyle': '<->'})
#
#plt.text(xx[1]+0.13,yy_below[1]+math.degrees(beamwidth_tx*0.5), r"$\Theta_{3~\mathrm{dB}}^{\text{Tx}} =%.3f \mathrm{^\circ}$" %math.degrees(beamwidth_tx))
plt.annotate(
'', xy=(xx[0],yy_below[0]-0.05), xycoords='data',
xytext=(xx[1],yy_below[0]-0.05), textcoords='data',
arrowprops={'arrowstyle': '<->'})
plt.text(xx[0]+0.5*(xx[1]-xx[0]), yy_below[0]-0.16,r"$\Theta_{3~\mathrm{dB}}^{\text{Tx}} =%.3f \mathrm{^\circ}$" %math.degrees(beamwidth_tx))
plt.plot(label_down,label_down_y,color='darkslategray',linestyle='dotted')
plt.plot(label_down,label_up_y,color='darkslategray',linestyle='dotted')
ax.set_xlabel(r'Azimuth angle $\psi_{\text{Tx}}~[\mathrm{^\circ}]$')
ax.set_ylabel(r'Elevation angle $ \theta_{\text{Tx}}~[\mathrm{^\circ}]$');
plt.grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black')
#plt.minorticks_on()
at = AnchoredText(r"$\Delta_t = %f ~\mathrm{s}$" %delta_t,prop=dict(size=6), frameon=True,loc=4)
at.patch.set_boxstyle("round,pad=0.05,rounding_size=0.2")
ax.add_artist(at)
plt.legend(loc='center left',title=r"\textbf{Timestamps}",bbox_to_anchor=(1, 0.5),
fancybox=True, shadow=True)
fig.savefig('main_057_iss_08_txbeam_square0.pdf',bbox_inches='tight',pad_inches=0.05,dpi=10)
# --------------------#
fig = plt.figure(2);
ax = fig.gca()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.axis('equal')
fig.suptitle(r"\textbf{Tx beam placement for object %s trajectory during %s}" %(norad_id,title_string),fontsize=12);
plt.plot(np.degrees(y_sph_tx[2,plt_start_index:tx_beam_index_down]),np.degrees(y_sph_tx[1,plt_start_index:tx_beam_index_down]),color='blue',linestyle='dashed')
plt.plot(np.degrees(y_sph_tx[2,tx_beam_index_up+1:plt_end_index]),np.degrees(y_sph_tx[1,tx_beam_index_up+1:plt_end_index]),color='blue',linestyle='dashed')
plt.plot(np.degrees(y_sph_tx[2,tx_beam_index_down:tx_beam_index_up]),np.degrees(y_sph_tx[1,tx_beam_index_down:tx_beam_index_up]),color='blue')
plt.scatter(np.degrees(y_sph_tx[2,tx_beam_index_down]),np.degrees(y_sph_tx[1,tx_beam_index_down]),s=50,marker=r"$\diamond$",facecolors='none',edgecolors='red',label=r"%s" %str(tx_beam_index_down_epoch.isoformat())+'Z');
plt.scatter(np.degrees(y_sph_tx[2,tx_bw_time_max]),np.degrees(y_sph_tx[1,tx_bw_time_max]),s=50,marker=r"$\otimes$",facecolors='none',edgecolors='green',label=r"%s" %str(tx_bw_time_max_epoch.isoformat())+'Z')
plt.scatter(np.degrees(y_sph_tx[2,tx_beam_index_up]),np.degrees(y_sph_tx[1,tx_beam_index_up]),s=50,marker=r"$\boxplus$",facecolors='none', edgecolors='magenta',label=r"%s" %str(tx_beam_index_up_epoch.isoformat())+'Z')
plt.plot(circpts[0,0:-1:2],circpts[1,0:-1:2],color='teal')
ax.fill_between(xx,yy_below,yy_above,facecolor='gray',alpha=0.2);
#plt.annotate(
# '', xy=(xx[1]+0.1, yy_below[1] ), xycoords='data',
# xytext=(xx[1]+0.1,yy_above[0]), textcoords='data',
# arrowprops={'arrowstyle': '<->'})
#plt.text(xx[1]+0.15,yy_below[1]+math.degrees(beamwidth_tx*0.5), r"$\Theta_{3~\mathrm{dB}}^{\text{Tx}} =%.3f \mathrm{^\circ}$" %math.degrees(beamwidth_tx))
plt.annotate(
'', xy=(xx[0],yy_below[0]-0.05), xycoords='data',
xytext=(xx[1],yy_below[0]-0.05), textcoords='data',
arrowprops={'arrowstyle': '<->'})
plt.text(xx[0]+0.5*(xx[1]-xx[0]), yy_below[0]-0.16,r"$\Theta_{3~\mathrm{dB}}^{\text{Tx}} =%.3f \mathrm{^\circ}$" %math.degrees(beamwidth_tx))
plt.plot(label_up_x,label_up,color='darkslategray',linestyle='dotted')
plt.plot(label_down_x,label_up,color='darkslategray',linestyle='dotted')
ax.set_xlabel(r'Azimuth angle $\psi_{\text{Tx}}~[\mathrm{^\circ}]$')
ax.set_ylabel(r'Elevation angle $ \theta_{\text{Tx}}~[\mathrm{^\circ}]$');
plt.grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black')
#plt.minorticks_on()
at = AnchoredText(r"$\Delta_t = %f ~\mathrm{s}$" %delta_t,prop=dict(size=6), frameon=True,loc=4)
at.patch.set_boxstyle("round,pad=0.05,rounding_size=0.2")
ax.add_artist(at);
plt.legend(loc='center left',title=r"\textbf{Timestamps}",bbox_to_anchor=(1, 0.5),
fancybox=True, shadow=True)
plt.ylim(11.3,13.1)
fig.savefig('main_057_iss_08_txbeam_squarecirc0.pdf',bbox_inches='tight',pad_inches=0.05,dpi=10)
# ---------------------------- #
fig = plt.figure(3);
ax = fig.gca()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.axis('equal')
fig.suptitle(r"\textbf{Tx beam placement for object %s trajectory during %s}" %(norad_id,title_string),fontsize=10);
plt.plot(np.degrees(y_sph_tx[2,plt_start_index:tx_beam_index_down+100:100]),np.degrees(y_sph_tx[1,plt_start_index :tx_beam_index_down+100:100]),color='blue',linestyle='dashed')
plt.plot(np.degrees(y_sph_tx[2,tx_beam_index_up+1:plt_end_index+100:100]),np.degrees(y_sph_tx[1,tx_beam_index_up+1:plt_end_index+100:100]),color='blue',linestyle='dashed')
plt.plot(np.degrees(y_sph_tx[2,tx_beam_index_down:tx_beam_index_up+100:100]),np.degrees(y_sph_tx[1,tx_beam_index_down:tx_beam_index_up+100:100]),color='blue')
plt.scatter(np.degrees(y_sph_tx[2,earliest_pt]),np.degrees(y_sph_tx[1,earliest_pt]),s=50,marker=r"$\square$",facecolors='none', edgecolors='red',label=r"%s" %str(earliest_pt_epoch.isoformat())+'Z')
plt.scatter(np.degrees(y_sph_tx[2,tx_bw_time_max]),np.degrees(y_sph_tx[1,tx_bw_time_max]),s=50,marker=r"$\otimes$",facecolors='none',edgecolors='green',label=r"%s" %str(tx_bw_time_max_epoch.isoformat())+'Z')
plt.scatter(np.degrees(y_sph_tx[2,latest_pt]),np.degrees(y_sph_tx[1,latest_pt]),s=50,marker=r"$\circledast$",facecolors='none', edgecolors='magenta',label=r"%s" %str(latest_pt_epoch.isoformat())+'Z')
for p in [
patches.Circle(
(np.degrees(y_sph_tx[2,tx_bw_time_max]),np.degrees(y_sph_tx[1,tx_bw_time_max])),0.5*math.degrees(beamwidth_tx),
color = 'gray',
alpha=0.3
),
]:
ax.add_patch(p)
#plt.annotate(
# '', xy=(xx[1]+0.1, yy_below[1] ), xycoords='data',
# xytext=(xx[1]+0.1,yy_above[0]), textcoords='data',
# arrowprops={'arrowstyle': '<->'})
#plt.text(xx[1]+0.15,yy_below[1]+math.degrees(beamwidth_tx*0.5), r"$\Theta_{3~\mathrm{dB}}^{\text{Tx}} =%.3f \mathrm{^\circ}$" %math.degrees(beamwidth_tx))
plt.annotate(
'', xy=(xx[0],yy_below[0]-0.05), xycoords='data',
xytext=(xx[1],yy_below[0]-0.05), textcoords='data',
arrowprops={'arrowstyle': '<->'})
plt.text(xx[0]+0.5*(xx[1]-xx[0]), yy_below[0]-0.16,r"$\Theta_{3~\mathrm{dB}}^{\text{Tx}} =%.3f \mathrm{^\circ}$" %math.degrees(beamwidth_tx))
plt.plot(label_up_x,label_up,color='darkslategray',linestyle='dotted')
plt.plot(label_down_x,label_up,color='darkslategray',linestyle='dotted')
plt.text(178.5,12.7,r"\begin{tabular}{|c | c |} \hline $k_{\text{max}}$ & $%d$ \\ \hline $\max \{ T_{i,Tx} \}$ & $%.6f~\mathrm{s}$ \\ \hline $\theta_\text{\gls{Tx}} \lbrack k_{\text{max}}\rbrack$ & $%.6f ^\circ$ \\ \hline $\psi_\text{\gls{Tx}} \lbrack k_{\text{max}}\rbrack$ & $%.6f ^\circ$ \\ \hline \end{tabular}" %(tx_bw_time_max,(latest_pt - earliest_pt)*delta_t,math.degrees(y_sph_tx[1,tx_bw_time_max]),math.degrees(y_sph_tx[2,tx_bw_time_max])),size=12)
ax.set_xlabel(r'Azimuth angle $\psi_{\text{Tx}}~[\mathrm{^\circ}]$')
ax.set_ylabel(r'Elevation angle $ \theta_{\text{Tx}}~[\mathrm{^\circ}]$');
plt.grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black')
at = AnchoredText(r"$\Delta_t = %f ~\mathrm{s}$" %delta_t,prop=dict(size=6), frameon=True,loc=4)
at.patch.set_boxstyle("round,pad=0.05,rounding_size=0.2")
ax.add_artist(at)
plt.legend(loc='center left',title=r"\textbf{Timestamps}",bbox_to_anchor=(1, 0.5),
fancybox=True, shadow=True)
plt.ylim(11.3,13.1)
plt.xlim(176.7,179.5)
fig.savefig('main_057_iss_08_txbeam_circ0.pdf',bbox_inches='tight',pad_inches=0.09,dpi=10)
# ------------------------------------------------------------------------------------------------------------------- #
"""
fig = plt.figure(4);
ax = fig.gca()
plt.rc('text', usetex=True)
plt.rc('font', family='serif');
fig.suptitle(r"\textbf{Dwell-time duration in Tx beam for the object %s trajectory on %s}" %(norad_id,title_string1),fontsize=12);
plt.plot(timevec[tx_bw_start_index:tx_bw_end_index],tx_bins_length[tx_bw_start_index:tx_bw_end_index]*delta_t);
plt.scatter(timevec[tx_bw_time_max],tx_bins_length[tx_bw_time_max]*delta_t,s=100,marker=r"$\square$",facecolors='none', edgecolors='red',label=r"%s" %str(tx_bw_time_max_epoch.isoformat())+'Z')
plt.legend(loc='best')
plt.xlim(timevec[tx_bw_start_index],timevec[tx_bw_end_index]);
ax.set_xlabel(r'Time $t~[\mathrm{s}]$')
ax.set_ylabel(r'Illumination Time $ T_{i,Tx}~[\mathrm{s}]$');
at = AnchoredText(r"$\Delta_t = %f ~\mathrm{s}$" %delta_t,prop=dict(size=6), frameon=True,loc=4)
at.patch.set_boxstyle("round,pad=0.05,rounding_size=0.2")
ax.add_artist(at)
plt.ylim(0,11)
plt.grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black')
fig.savefig('main_057_iss_08_dwelltime_tx.pdf',bbox_inches='tight',pad_inches=0.05,dpi=10)
"""
# ----------------------------------------------------------------------------------------------------------------------- #
print 'cool cool cool'
|
|
__source__ = 'https://leetcode.com/problems/reverse-nodes-in-k-group/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/reverse-nodes-in-k-group.py
# Time: O(n)
# Space: O(1)
# LinkedList
#
# Description: Leetcode # 25. Reverse Nodes in k-Group
#
# Given a linked list, reverse the nodes of a linked list k at a time and return its modified list.
#
# If the number of nodes is not a multiple of k then left-out nodes in the end should remain as it is.
#
# You may not alter the values in the nodes, only nodes itself may be changed.
#
# Only constant memory is allowed.
#
# For example,
# Given this linked list: 1->2->3->4->5
#
# For k = 2, you should return: 2->1->4->3->5
#
# For k = 3, you should return: 3->2->1->4->5
#
# Companies
# Microsoft Facebook
# Related Topics
# Linked List
# Similar Questions
# Swap Nodes in Pairs
import unittest
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object, unittest.TestCase):
def reverseKGroup(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
dummy = ListNode(0)
dummy.next = head
cur = dummy
while cur:
nextHead = cur.next
nextTail = cur
for i in xrange(k):
nextTail = nextTail.next
if not nextTail:
return dummy.next
nextNextHead = nextTail.next
nextTail.next = None
cur.next = None
cur.next = self.reverse(nextHead)
nextHead.next = nextNextHead
cur = nextHead
return dummy.next
def reverse(self, head):
prev = None
while head:
next = head.next
head.next = prev
prev = head
head = next
return prev
def test(self):
self.assertTrue()
# http://www.cnblogs.com/zuoyuan/p/3785555.html
class SolutionOther:
# @param head, a ListNode
# @param k, an integer
# @return a ListNode
def reverse(self, start, end):
newhead = ListNode(0)
newhead.next = start
while newhead.next != end:
tmp = start.next
start.next = tmp.next
tmp.next = newhead.next
newhead.next = tmp
return (end, start)
def reverseKGroup(self, head, k):
if head == None:
return head
nhead = ListNode(0)
nhead.next = head
start = nhead
while start.next:
end = start
for i in range(k-1):
end = end.next
if end.next == None:
return nhead.next
res = self.reverse(start.next, end.next)
start.next = res[0]
start = res[1]
return nhead.next
#test
l1 = ListNode(1)
l2 = ListNode(2)
l3 = ListNode(3)
l4 = ListNode(4)
l5 = ListNode(5)
l6 = ListNode(6)
l7 = ListNode(7)
l8 = ListNode(8)
l1.next = l2; l2.next = l3 ; l3.next = l4 ; l4.next = l5 ; l5.next = l6 ; l6.next = l7 ; l7.next = l8
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
head.next.next.next.next = ListNode(5)
print Solution().reverseKGroup(head, 4)
#print Solution().reverseKGroupif(head, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
/**
* Definition for singly-linked list.
* public class ListNode {
* int val;
* ListNode next;
* ListNode(int x) { val = x; }
* }
*/
# 5ms 44.94%
class Solution {
public ListNode reverseKGroup(ListNode head, int k) {
if (k < 2) {
return head;
}
ListNode fakeHead = new ListNode(0);
fakeHead.next = head;
ListNode cur = fakeHead;
while (cur != null) {
ListNode nextHead = cur.next;
ListNode nextTail = cur;
for (int i = 0; i < k; i++) {
nextTail = nextTail.next;
if (nextTail == null) {
return fakeHead.next;
}
}
ListNode nextNextHead = nextTail.next;
cur.next = null;
nextTail.next = null;
cur.next = reverse(nextHead);
nextHead.next = nextNextHead;
cur = nextHead;
}
return fakeHead.next;
}
private ListNode reverse(ListNode head) {
ListNode prev = null;
while (head != null) {
ListNode next = head.next;
head.next = prev;
prev = head;
head = next;
}
return prev;
}
}
# 10ms 4.78%
class Solution {
public ListNode reverseKGroup(ListNode head, int k) {
ListNode curr = head;
int count = 0;
while (curr != null && count != k) { // find the k+1 node
curr = curr.next;
count++;
}
if (count == k) { // if k+1 node is found
curr = reverseKGroup(curr, k); // reverse list with k+1 node as head
// head - head-pointer to direct part,
// curr - head-pointer to reversed part;
while (count-- > 0) { // reverse current k-group:
ListNode tmp = head.next; // tmp - next head in direct part
head.next = curr; // preappending "direct" head to the reversed list
curr = head; // move head of reversed part to a new node
head = tmp; // move "direct" head to the next node in direct part
}
head = curr;
}
return head;
}
}
'''
|
|
from tapiriik.settings import WEB_ROOT, STRAVA_CLIENT_SECRET, STRAVA_CLIENT_ID, STRAVA_RATE_LIMITS
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.service_record import ServiceRecord
from tapiriik.database import cachedb
from tapiriik.services.interchange import UploadedActivity, ActivityType, ActivityStatistic, ActivityStatisticUnit, Waypoint, WaypointType, Location, Lap
from tapiriik.services.api import APIException, UserException, UserExceptionType, APIExcludeActivity
from tapiriik.services.fit import FITIO
from django.core.urlresolvers import reverse
from datetime import datetime, timedelta
from urllib.parse import urlencode
import calendar
import requests
import os
import logging
import pytz
import re
import time
import json
logger = logging.getLogger(__name__)
class StravaService(ServiceBase):
ID = "strava"
DisplayName = "Strava"
DisplayAbbreviation = "STV"
AuthenticationType = ServiceAuthenticationType.OAuth
UserProfileURL = "http://www.strava.com/athletes/{0}"
UserActivityURL = "http://app.strava.com/activities/{1}"
AuthenticationNoFrame = True # They don't prevent the iframe, it just looks really ugly.
PartialSyncRequiresTrigger = True
LastUpload = None
SupportsHR = SupportsCadence = SupportsTemp = SupportsPower = True
SupportsActivityDeletion = True
# For mapping common->Strava; no ambiguity in Strava activity type
_activityTypeMappings = {
ActivityType.Cycling: "Ride",
ActivityType.MountainBiking: "Ride",
ActivityType.Hiking: "Hike",
ActivityType.Running: "Run",
ActivityType.Walking: "Walk",
ActivityType.Snowboarding: "Snowboard",
ActivityType.Skating: "IceSkate",
ActivityType.CrossCountrySkiing: "NordicSki",
ActivityType.DownhillSkiing: "AlpineSki",
ActivityType.Swimming: "Swim",
ActivityType.Gym: "Workout",
ActivityType.Rowing: "Rowing",
ActivityType.Elliptical: "Elliptical",
ActivityType.RollerSkiing: "RollerSki",
ActivityType.StrengthTraining: "WeightTraining",
ActivityType.Climbing: "RockClimbing",
}
# For mapping Strava->common
_reverseActivityTypeMappings = {
"Ride": ActivityType.Cycling,
"VirtualRide": ActivityType.Cycling,
"EBikeRide": ActivityType.Cycling,
"MountainBiking": ActivityType.MountainBiking,
"Run": ActivityType.Running,
"Hike": ActivityType.Hiking,
"Walk": ActivityType.Walking,
"AlpineSki": ActivityType.DownhillSkiing,
"CrossCountrySkiing": ActivityType.CrossCountrySkiing,
"NordicSki": ActivityType.CrossCountrySkiing,
"BackcountrySki": ActivityType.DownhillSkiing,
"Snowboard": ActivityType.Snowboarding,
"Swim": ActivityType.Swimming,
"IceSkate": ActivityType.Skating,
"Workout": ActivityType.Gym,
"Rowing": ActivityType.Rowing,
"Kayaking": ActivityType.Rowing,
"Canoeing": ActivityType.Rowing,
"StandUpPaddling": ActivityType.Rowing,
"Elliptical": ActivityType.Elliptical,
"RollerSki": ActivityType.RollerSkiing,
"WeightTraining": ActivityType.StrengthTraining,
"RockClimbing" : ActivityType.Climbing,
}
SupportedActivities = list(_activityTypeMappings.keys())
GlobalRateLimits = STRAVA_RATE_LIMITS
def UserUploadedActivityURL(self, uploadId):
return "https://www.strava.com/activities/%d" % uploadId
def WebInit(self):
params = {'scope':'write,view_private',
'client_id':STRAVA_CLIENT_ID,
'response_type':'code',
'redirect_uri':WEB_ROOT + reverse("oauth_return", kwargs={"service": "strava"})}
self.UserAuthorizationURL = \
"https://www.strava.com/oauth/authorize?" + urlencode(params)
def _apiHeaders(self, serviceRecord):
return {"Authorization": "access_token " + serviceRecord.Authorization["OAuthToken"]}
def RetrieveAuthorizationToken(self, req, level):
code = req.GET.get("code")
params = {"grant_type": "authorization_code", "code": code, "client_id": STRAVA_CLIENT_ID, "client_secret": STRAVA_CLIENT_SECRET, "redirect_uri": WEB_ROOT + reverse("oauth_return", kwargs={"service": "strava"})}
response = requests.post("https://www.strava.com/oauth/token", data=params)
if response.status_code != 200:
raise APIException("Invalid code")
data = response.json()
authorizationData = {"OAuthToken": data["access_token"]}
# Retrieve the user ID, meh.
id_resp = requests.get("https://www.strava.com/api/v3/athlete", headers=self._apiHeaders(ServiceRecord({"Authorization": authorizationData})))
return (id_resp.json()["id"], authorizationData)
def RevokeAuthorization(self, serviceRecord):
resp = requests.post("https://www.strava.com/oauth/deauthorize", headers=self._apiHeaders(serviceRecord))
if resp.status_code != 204 and resp.status_code != 200:
raise APIException("Unable to deauthorize Strava auth token, status " + str(resp.status_code) + " resp " + resp.text)
pass
def DownloadActivityList(self, svcRecord, exhaustive=False):
activities = []
exclusions = []
before = earliestDate = None
while True:
if before is not None and before < 0:
break # Caused by activities that "happened" before the epoch. We generally don't care about those activities...
logger.debug("Req with before=" + str(before) + "/" + str(earliestDate))
resp = requests.get("https://www.strava.com/api/v3/athletes/" + str(svcRecord.ExternalID) + "/activities", headers=self._apiHeaders(svcRecord), params={"before": before})
if resp.status_code == 401:
raise APIException("No authorization to retrieve activity list", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
earliestDate = None
try:
reqdata = resp.json()
except ValueError:
raise APIException("Failed parsing strava list response %s - %s" % (resp.status_code, resp.text))
if not len(reqdata):
break # No more activities to see
for ride in reqdata:
activity = UploadedActivity()
activity.TZ = pytz.timezone(re.sub("^\([^\)]+\)\s*", "", ride["timezone"])) # Comes back as "(GMT -13:37) The Stuff/We Want""
activity.StartTime = pytz.utc.localize(datetime.strptime(ride["start_date"], "%Y-%m-%dT%H:%M:%SZ"))
logger.debug("\tActivity s/t %s: %s" % (activity.StartTime, ride["name"]))
if not earliestDate or activity.StartTime < earliestDate:
earliestDate = activity.StartTime
before = calendar.timegm(activity.StartTime.astimezone(pytz.utc).timetuple())
activity.EndTime = activity.StartTime + timedelta(0, ride["elapsed_time"])
activity.ServiceData = {"ActivityID": ride["id"], "Manual": ride["manual"]}
if ride["type"] not in self._reverseActivityTypeMappings:
exclusions.append(APIExcludeActivity("Unsupported activity type %s" % ride["type"], activity_id=ride["id"], user_exception=UserException(UserExceptionType.Other)))
logger.debug("\t\tUnknown activity")
continue
activity.Type = self._reverseActivityTypeMappings[ride["type"]]
activity.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, value=ride["distance"])
if "max_speed" in ride or "average_speed" in ride:
activity.Stats.Speed = ActivityStatistic(ActivityStatisticUnit.MetersPerSecond, avg=ride["average_speed"] if "average_speed" in ride else None, max=ride["max_speed"] if "max_speed" in ride else None)
activity.Stats.MovingTime = ActivityStatistic(ActivityStatisticUnit.Seconds, value=ride["moving_time"] if "moving_time" in ride and ride["moving_time"] > 0 else None) # They don't let you manually enter this, and I think it returns 0 for those activities.
# Strava doesn't handle "timer time" to the best of my knowledge - although they say they do look at the FIT total_timer_time field, so...?
if "average_watts" in ride:
activity.Stats.Power = ActivityStatistic(ActivityStatisticUnit.Watts, avg=ride["average_watts"])
if "average_heartrate" in ride:
activity.Stats.HR.update(ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, avg=ride["average_heartrate"]))
if "max_heartrate" in ride:
activity.Stats.HR.update(ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, max=ride["max_heartrate"]))
if "average_cadence" in ride:
activity.Stats.Cadence.update(ActivityStatistic(ActivityStatisticUnit.RevolutionsPerMinute, avg=ride["average_cadence"]))
if "average_temp" in ride:
activity.Stats.Temperature.update(ActivityStatistic(ActivityStatisticUnit.DegreesCelcius, avg=ride["average_temp"]))
if "calories" in ride:
activity.Stats.Energy = ActivityStatistic(ActivityStatisticUnit.Kilocalories, value=ride["calories"])
activity.Name = ride["name"]
activity.Private = ride["private"]
activity.Stationary = ride["manual"]
activity.GPS = ("start_latlng" in ride) and (ride["start_latlng"] is not None)
activity.AdjustTZ()
activity.CalculateUID()
activities.append(activity)
if not exhaustive or not earliestDate:
break
return activities, exclusions
def SubscribeToPartialSyncTrigger(self, serviceRecord):
# There is no per-user webhook subscription with Strava.
serviceRecord.SetPartialSyncTriggerSubscriptionState(True)
def UnsubscribeFromPartialSyncTrigger(self, serviceRecord):
# As above.
serviceRecord.SetPartialSyncTriggerSubscriptionState(False)
def ExternalIDsForPartialSyncTrigger(self, req):
data = json.loads(req.body.decode("UTF-8"))
return [data["owner_id"]]
def PartialSyncTriggerGET(self, req):
# Strava requires this endpoint to echo back a challenge.
# Only happens once during manual endpoint setup?
from django.http import HttpResponse
return HttpResponse(json.dumps({
"hub.challenge": req.GET["hub.challenge"]
}))
def DownloadActivity(self, svcRecord, activity):
if activity.ServiceData["Manual"]: # I should really add a param to DownloadActivity for this value as opposed to constantly doing this
# We've got as much information as we're going to get - we need to copy it into a Lap though.
activity.Laps = [Lap(startTime=activity.StartTime, endTime=activity.EndTime, stats=activity.Stats)]
return activity
activityID = activity.ServiceData["ActivityID"]
streamdata = requests.get("https://www.strava.com/api/v3/activities/" + str(activityID) + "/streams/time,altitude,heartrate,cadence,watts,temp,moving,latlng,distance,velocity_smooth", headers=self._apiHeaders(svcRecord))
if streamdata.status_code == 401:
raise APIException("No authorization to download activity", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
try:
streamdata = streamdata.json()
except:
raise APIException("Stream data returned is not JSON")
if "message" in streamdata and streamdata["message"] == "Record Not Found":
raise APIException("Could not find activity")
ridedata = {}
for stream in streamdata:
ridedata[stream["type"]] = stream["data"]
lap = Lap(stats=activity.Stats, startTime=activity.StartTime, endTime=activity.EndTime) # Strava doesn't support laps, but we need somewhere to put the waypoints.
activity.Laps = [lap]
lap.Waypoints = []
hasHR = "heartrate" in ridedata and len(ridedata["heartrate"]) > 0
hasCadence = "cadence" in ridedata and len(ridedata["cadence"]) > 0
hasTemp = "temp" in ridedata and len(ridedata["temp"]) > 0
hasPower = ("watts" in ridedata and len(ridedata["watts"]) > 0)
hasAltitude = "altitude" in ridedata and len(ridedata["altitude"]) > 0
hasDistance = "distance" in ridedata and len(ridedata["distance"]) > 0
hasVelocity = "velocity_smooth" in ridedata and len(ridedata["velocity_smooth"]) > 0
if "error" in ridedata:
raise APIException("Strava error " + ridedata["error"])
inPause = False
waypointCt = len(ridedata["time"])
for idx in range(0, waypointCt - 1):
waypoint = Waypoint(activity.StartTime + timedelta(0, ridedata["time"][idx]))
if "latlng" in ridedata:
latlng = ridedata["latlng"][idx]
waypoint.Location = Location(latlng[0], latlng[1], None)
if waypoint.Location.Longitude == 0 and waypoint.Location.Latitude == 0:
waypoint.Location.Longitude = None
waypoint.Location.Latitude = None
if hasAltitude:
if not waypoint.Location:
waypoint.Location = Location(None, None, None)
waypoint.Location.Altitude = float(ridedata["altitude"][idx])
# When pausing, Strava sends this format:
# idx = 100 ; time = 1000; moving = true
# idx = 101 ; time = 1001; moving = true => convert to Pause
# idx = 102 ; time = 2001; moving = false => convert to Resume: (2001-1001) seconds pause
# idx = 103 ; time = 2002; moving = true
if idx == 0:
waypoint.Type = WaypointType.Start
elif idx == waypointCt - 2:
waypoint.Type = WaypointType.End
elif idx < waypointCt - 2 and ridedata["moving"][idx+1] and inPause:
waypoint.Type = WaypointType.Resume
inPause = False
elif idx < waypointCt - 2 and not ridedata["moving"][idx+1] and not inPause:
waypoint.Type = WaypointType.Pause
inPause = True
if hasHR:
waypoint.HR = ridedata["heartrate"][idx]
if hasCadence:
waypoint.Cadence = ridedata["cadence"][idx]
if hasTemp:
waypoint.Temp = ridedata["temp"][idx]
if hasPower:
waypoint.Power = ridedata["watts"][idx]
if hasVelocity:
waypoint.Speed = ridedata["velocity_smooth"][idx]
if hasDistance:
waypoint.Distance = ridedata["distance"][idx]
lap.Waypoints.append(waypoint)
return activity
def UploadActivity(self, serviceRecord, activity):
logger.info("Activity tz " + str(activity.TZ) + " dt tz " + str(activity.StartTime.tzinfo) + " starttime " + str(activity.StartTime))
if self.LastUpload is not None:
while (datetime.now() - self.LastUpload).total_seconds() < 5:
time.sleep(1)
logger.debug("Inter-upload cooldown")
source_svc = None
if hasattr(activity, "ServiceDataCollection"):
source_svc = str(list(activity.ServiceDataCollection.keys())[0])
upload_id = None
if activity.CountTotalWaypoints():
req = {
"data_type": "fit",
"activity_name": activity.Name,
"description": activity.Notes, # Paul Mach said so.
"activity_type": self._activityTypeMappings[activity.Type],
"private": 1 if activity.Private else 0}
if "fit" in activity.PrerenderedFormats:
logger.debug("Using prerendered FIT")
fitData = activity.PrerenderedFormats["fit"]
else:
# TODO: put the fit back into PrerenderedFormats once there's more RAM to go around and there's a possibility of it actually being used.
fitData = FITIO.Dump(activity, drop_pauses=True)
files = {"file":("tap-sync-" + activity.UID + "-" + str(os.getpid()) + ("-" + source_svc if source_svc else "") + ".fit", fitData)}
response = requests.post("https://www.strava.com/api/v3/uploads", data=req, files=files, headers=self._apiHeaders(serviceRecord))
if response.status_code != 201:
if response.status_code == 401:
raise APIException("No authorization to upload activity " + activity.UID + " response " + response.text + " status " + str(response.status_code), block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
if "duplicate of activity" in response.text:
logger.debug("Duplicate")
self.LastUpload = datetime.now()
return # Fine by me. The majority of these cases were caused by a dumb optimization that meant existing activities on services were never flagged as such if tapiriik didn't have to synchronize them elsewhere.
raise APIException("Unable to upload activity " + activity.UID + " response " + response.text + " status " + str(response.status_code))
upload_id = response.json()["id"]
upload_poll_wait = 8 # The mode of processing times
while not response.json()["activity_id"]:
time.sleep(upload_poll_wait)
response = requests.get("https://www.strava.com/api/v3/uploads/%s" % upload_id, headers=self._apiHeaders(serviceRecord))
logger.debug("Waiting for upload - status %s id %s" % (response.json()["status"], response.json()["activity_id"]))
if response.json()["error"]:
error = response.json()["error"]
if "duplicate of activity" in error:
self.LastUpload = datetime.now()
logger.debug("Duplicate")
return # I guess we're done here?
raise APIException("Strava failed while processing activity - last status %s" % response.text)
upload_id = response.json()["activity_id"]
else:
localUploadTS = activity.StartTime.strftime("%Y-%m-%d %H:%M:%S")
req = {
"name": activity.Name if activity.Name else activity.StartTime.strftime("%d/%m/%Y"), # This is required
"description": activity.Notes,
"type": self._activityTypeMappings[activity.Type],
"private": 1 if activity.Private else 0,
"start_date_local": localUploadTS,
"distance": activity.Stats.Distance.asUnits(ActivityStatisticUnit.Meters).Value,
"elapsed_time": round((activity.EndTime - activity.StartTime).total_seconds())
}
headers = self._apiHeaders(serviceRecord)
response = requests.post("https://www.strava.com/api/v3/activities", data=req, headers=headers)
# FFR this method returns the same dict as the activity listing, as REST services are wont to do.
if response.status_code != 201:
if response.status_code == 401:
raise APIException("No authorization to upload activity " + activity.UID + " response " + response.text + " status " + str(response.status_code), block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to upload stationary activity " + activity.UID + " response " + response.text + " status " + str(response.status_code))
upload_id = response.json()["id"]
self.LastUpload = datetime.now()
return upload_id
def DeleteCachedData(self, serviceRecord):
cachedb.strava_cache.remove({"Owner": serviceRecord.ExternalID})
cachedb.strava_activity_cache.remove({"Owner": serviceRecord.ExternalID})
def DeleteActivity(self, serviceRecord, uploadId):
headers = self._apiHeaders(serviceRecord)
del_res = requests.delete("https://www.strava.com/api/v3/activities/%d" % uploadId, headers=headers)
del_res.raise_for_status()
|
|
#! /usr/bin/env python
import sys, os,time, shutil, copy
sys.path.append("../../")
import numpy as np
from PyQt4 import QtCore, QtGui,QtNetwork
from grid_layout import Ui_MainWindow
from ticker_audio import Audio
from utils import Utils
from ticker_widgets import InstructionsDisplay,SentenceDisplay
from grid_config import ChannelConfig
class GridGui(QtGui.QMainWindow, Ui_MainWindow):
##################################### Init
def __init__(self):
t=time.time()
QtGui.QMainWindow.__init__(self)
self.setupUi(self)
self.utils = Utils()
#######################################################################
#The grid settings
#######################################################################
self.main_timer_delay = 10 #Timer delay in ms
self.n_prog_status = 2 #Number of iterations before reading program status
self.n_undo_last = 4 #Number iterations before undo last action
self.delete_char = '$' #The user writes this character to delete the previous character
scan_delay = self.getScanDelay()
#######################################################################
#Widget instantiation
#######################################################################
self.cur_dir = os.path.dirname(os.path.abspath(__file__)) + "/"
self.config_dir = self.cur_dir + "grid_config/"
self.audio = Audio(i_root_dir=self.config_dir)
self.instructions_display = InstructionsDisplay(self.label_instructions, self.centralwidget, i_title=None)
self.sentence_display = SentenceDisplay(self.selected_words_disp, self.centralwidget, i_title=None)
self.channel_config = ChannelConfig(scan_delay,self.config_dir)
#Main time calls update audio
self.main_timer = QtCore.QTimer()
#Best timer: after the alphabet has been played to the user, this time interval will pass before starting again
self.best_letters_timer = QtCore.QTimer()
self.best_letters_timer.setInterval(0) #Give some time before continuing to the next letter
self.best_letters_timer.setSingleShot(True)
#######################################################################
#Complete initialisation of separate components, and connects all signals
#######################################################################
self.setSliderLabel(scan_delay*10.0)
self.audio.setTicks(1) #The number tick sounds before playing the alphabet
#Pause/play
self.restart = True
self.hideRecordingWidgets()
#Load initial settings from file
#self.initSettings(i_settings_dir, i_settings_file)
self.initDisplayForNewWord()
self.setInstructLetterSelectStr()
self.__connectSignals()
#Reset everything - clickdistr estc
def reset(self):
self.audio.setAlphabetDir(self.channel_config.alphabet_dir)
self.audio.setConfigDir(self.channel_config.config_dir)
self.audio.setChannels(1)
def initDisplayForNewWord(self):
self.repeat_count = 0
self.letter_idx = 0
self.delete_cnt = 0
self.nclicks = 0
self.selected_word = None
self.click_times = []
self.scan_delay = self.getScanDelay()
self.tutorial = self.action_tutorial.isChecked()
self.undo_last_action_cnt = 0
self.delete_cnt = 0
self.nscans = []
self.setInstructLetterSelectStr()
self.setRowScan()
def __connectSignals(self):
#Menubar actions
QtCore.QObject.connect( self.action_close, QtCore.SIGNAL("triggered(bool)"), self.actionCloseApplication)
QtCore.QObject.connect( self.action_clear_sentence, QtCore.SIGNAL("triggered(bool)"), self.actionClear)
QtCore.QObject.connect( self.action_tutorial, QtCore.SIGNAL("toggled(bool)"), self.setTutorial)
#Speed scrollbar
QtCore.QObject.connect( self.scrollbar_letter_speed, QtCore.SIGNAL("sliderReleased()"), self.setScanDelay )
QtCore.QObject.connect( self.scrollbar_letter_speed, QtCore.SIGNAL("sliderMoved(int)"), self.setSliderLabel )
#Start/stop/pause
QtCore.QObject.connect( self.clear_button, QtCore.SIGNAL("clicked(bool)"), self.startSoundFalse )
#Pause/unpause
QtCore.QObject.connect( self.button_pause, QtCore.SIGNAL("clicked(bool)"), self.pauseSlot )
#Timers
QtCore.QObject.connect( self.main_timer, QtCore.SIGNAL("timeout()"), self.update)
QtCore.QObject.connect( self.best_letters_timer, QtCore.SIGNAL("timeout()"), self.processAlphabetRepetions)
##################################### Main functions
def setRowScan(self):
self.channel_config.setRowScan()
self.startNewScan()
def setColScan(self):
self.stopTimers()
sound_idx = self.audio.getSoundIndex(self.channel_config)
id = self.channel_config.alphabet.getAlphabet(i_with_spaces=False, i_group=False)[sound_idx]
self.waitAudioReady([id])
self.channel_config.setColScan(id)
self.startNewScan()
def startNewScan(self):
self.reset()
if self.button_pause.isChecked() and (not self.main_timer.isActive()):
self.main_timer.start()
self.audio.clear()
self.nscans.append(0)
print "NSCANS INIT: ", self.nscans
def waitAudioReady(self, i_commands=None):
if i_commands is not None:
self.audio.playInstructions(i_commands)
while self.audio.isPlayingInstructions() or (not self.audio.isReady()):
self.audio.update(self.channel_config)
#Call this function if the click has to be processed
def processClick(self):
self.stopTimers()
click_time = self.audio.getTime(self.channel_config)
sound_idx = self.audio.getSoundIndex(self.channel_config)
#self.waitAudioReady(['click'])
self.nclicks += 1
self.selected_word = None
self.repeat_count = 0
if self.audio.isPlayingInstructions():
return
print "ADDING : TO NSCANS FROM CLICK: ", self.audio.sound_index+1, " NSCANS BEFORE = ", self.nscans
self.nscans[-1] += (self.audio.sound_index+1)
print "NSCANS NOW ", self.nscans
print " Click time in processClick: ", self.audio.getTime(self.channel_config)
self.click_times.append(click_time)
if self.channel_config.row_mode:
self.setColScan()
return
alphabet = self.channel_config.alphabet.getAlphabet(i_with_spaces=False, i_group=False)
letter = alphabet[sound_idx]
print "selected letter " , letter
if letter == self.delete_char:
self.deleteLastLetter()
self.delete_cnt += 1
elif (letter == "_") or (letter == "."):
self.selected_word = self.processWord(letter)
else:
self.updateNextLetter(letter)
self.setRowScan()
def processWord(self, i_letter, i_play_new_word=True):
selected_word = ""
self.sentence_display.update(i_letter, i_adjust_stop=False, i_add_space=False)
if i_letter == ".":
selected_word = "."
else:
if self.letter_idx > 0:
selected_word = self.sentence_display.lastWord()
self.waitAudioReady(['written', "_"])
print "SELECTED WORD = ", selected_word
if not selected_word == "":
self.newWord(selected_word, i_is_word_selected=True, i_play_new_word=i_play_new_word)
else:
self.newWord(i_is_word_selected=False, i_play_new_word=i_play_new_word)
self.waitAudioReady()
return selected_word
def deleteLastLetter(self):
self.letter_idx -= 1
self.repeat_count = 0
is_delete = True
if self.letter_idx < 0:
is_delete = False
self.letter_idx = 0
else:
self.sentence_display.deleteLastLetter()
self.newLetter(['written', self.delete_char])
return is_delete
def updateNextLetter(self, i_letter):
self.letter_idx += 1
self.sentence_display.update(i_letter, i_adjust_stop=False, i_add_space=False)
self.newLetter(['written', i_letter, "next"])
def newLetter(self, i_cmd_str):
self.waitAudioReady(i_cmd_str)
#cmd_str = self.currentLetterIndexStr()
#self.waitAudioReady(cmd_str)
self.repeat_count=0
self.setInstructLetterSelectStr()
def playCurrentLetterIndex(self, i_cmd_str=[]):
cmd_str = self.currentLetterIndexStr(i_cmd_str)
self.audio.playInstructions(cmd_str)
def currentLetterIndexStr(self, i_cmd_str=[]):
cmd_str = list(i_cmd_str)
letter_idx = self.instructions_display.letter_dict[self.letter_idx + 1]
cmd_str.extend(letter_idx.split(" "))
cmd_str.append("letter")
return cmd_str
def processAlphabetRepetions(self):
#Process the number of times the alphabet sequence has been repeated when no clicks were received
self.repeat_count += 1
self.nscans[-1] += (self.channel_config.getSoundTimes().shape[0])
self.audio.clear()
is_undo_last = self.repeat_count % self.n_undo_last
is_prog_status = self.repeat_count % self.n_prog_status
if (is_undo_last is 0) and (not self.channel_config.row_mode):
self.undo_last_action_cnt += 1
self.newLetter(['undo'])
self.setRowScan()
elif is_prog_status is 0:
self.playCurrentLetterIndex()
def newWord(self, i_extra_command=None, i_is_word_selected=True, i_play_new_word=True):
self.initDisplayForNewWord()
if i_extra_command is not None:
print "i_extra_command = ", i_extra_command
if i_is_word_selected:
self.audio.synthesiseWord(i_extra_command)
else:
self.audio.synthesise(i_extra_command)
if i_play_new_word:
print "PLAY NEW WORD"
self.audio.playInstructions(self.newWordStr())
#These functions are update functions synchronised with the GUI timer
def update(self):
if self.best_letters_timer.isActive():
(is_read_next, is_update_time, is_first_letter) = self.audio.update(self.channel_config, i_loop=False)
return (is_read_next, is_update_time, is_first_letter)
(is_read_next, is_update_time, is_first_letter) = self.audio.update(self.channel_config)
if is_read_next and (not self.best_letters_timer.isActive()):
self.audio.readTime(self.channel_config)
self.best_letters_timer.start()
return (is_read_next, is_update_time, is_first_letter)
self.best_letters_timer.stop()
return (is_read_next, is_update_time, is_first_letter)
#################################### Start/Stop/Pause/Close
def pauseSlot(self, i_checked):
if i_checked:
self.pauseTrue()
if self.restart:
self.restart = False
self.startSoundTrue()
self.newWord()
self.main_timer.start(self.main_timer_delay)
else:
self.pauseFalse()
def pauseTrue(self, i_play_cur_letter_idx=True):
self.startSoundTrue()
self.setRowScan()
if i_play_cur_letter_idx:
self.playCurrentLetterIndex()
self.button_pause.setChecked(True)
self.button_pause.setText("Pause")
def pauseFalse(self, i_undo_last=True):
self.button_pause.setText("Play")
self.button_pause.setChecked(False)
self.stopTimers()
self.audio.stop()
def startSoundTrue(self):
self.audio.clear()
self.audio.restart()
def startSoundFalse(self):
self.audio.clear()
self.audio.stop()
self.pauseFalse(i_undo_last=False)
self.restart = True
def closeEvent(self, event):
self.startSoundFalse()
while not self.audio.isReady():
continue
QtGui.QMainWindow.close(self)
def stopTimers(self):
self.main_timer.stop()
#################################### Switch Events
def keyPressEvent(self, event):
if (event.key() == QtCore.Qt.Key_Space) and (not self.button_pause.isChecked()):
self.pauseSlot(True)
if event.key() == QtCore.Qt.Key_Space:
self.processClick()
##################################### Feedback Phrases
def setInstructLetterSelectStr(self):
disp_str = str(self.instructions_display.getInstructSentence(self.letter_idx+1))
self.instructions_display.update(disp_str)
def newWordStr(self):
instruct_str = ["start"]
return instruct_str
##################################### Set functions
def setSliderLabel(self, i_value):
scan_delay = "%.2f" % (i_value/10.0)
self.label_letter_speed.setText(QtCore.QString("Scan delay (seconds): %s" % scan_delay))
def setScanDelay(self):
self.setNewSetting()
scan_delay = self.__setScanDelay()
print "Scan delay after set scan delay = ", scan_delay
self.channel_config.setScanDelay(scan_delay)
def __setScanDelay(self):
scan_delay = self.getScanDelay()
self.setSliderLabel(10.0 * scan_delay)
return scan_delay
def setTutorial(self, i_checked):
self.setNewSetting()
def setNewSetting(self):
self.startSoundFalse()
self.setRowScan()
self.initDisplayForNewWord()
##################################### Get functions
def getScanDelay(self):
val = 0.1*float(self.scrollbar_letter_speed.value())
val = float(str( "%.2f" % val))
return val
##################################### Actions
def actionClear(self):
self.sentence_display.clear()
self.setNewSetting()
def actionCloseApplication(self):
self.close()
#################################### Show/Hide recordings widgets
def hideRecordingWidgets(self):
self.phrase_disp.hide()
self.label_phrases.hide()
self.action_inc_phrases.setVisible(False)
def showRecordingWidgets(self):
self.phrase_disp.show()
self.label_phrases.show()
self.action_inc_phrases.setVisible(True)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
gui = GridGui()
gui.show()
# gui.showRecordingWidgets()
sys.exit( app.exec_())
|
|
##
# Copyright (c) 2008-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from zope.interface import implements
from twisted.internet.interfaces import IConnector, IReactorTCP
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet.address import IPv4Address
from twistedcaldav.test.util import InMemoryMemcacheProtocol
from twistedcaldav.memcachepool import PooledMemCacheProtocol
from twistedcaldav.memcachepool import MemCacheClientFactory
from twistedcaldav.memcachepool import MemCachePool
from twistedcaldav.test.util import TestCase
MC_ADDRESS = IPv4Address('TCP', '127.0.0.1', 11211)
class StubConnectionPool(object):
"""
A stub client connection pool that records it's calls in the form of a list
of (status, client) tuples where status is C{'free'} or C{'busy'}
@ivar calls: A C{list} of C{tuple}s of the form C{(status, client)} where
status is C{'free'}, C{'busy'} or C{'gone'} and client is the protocol
instance that made the call.
"""
def __init__(self):
self.calls = []
self.shutdown_deferred = None
self.shutdown_requested = False
def clientFree(self, client):
"""
Record a C{'free'} call for C{client}.
"""
self.calls.append(('free', client))
def clientBusy(self, client):
"""
Record a C{'busy'} call for C{client}.
"""
self.calls.append(('busy', client))
def clientGone(self, client):
"""
Record a C{'gone'} call for C{client}
"""
self.calls.append(('gone', client))
class StubConnector(object):
"""
A stub L{IConnector} that can be used for testing.
"""
implements(IConnector)
def connect(self):
"""
A L{IConnector.connect} implementation that doesn't do anything.
"""
def stopConnecting(self):
"""
A L{IConnector.stopConnecting} that doesn't do anything.
"""
class StubReactor(object):
"""
A stub L{IReactorTCP} that records the calls to connectTCP.
@ivar calls: A C{list} of tuples (args, kwargs) sent to connectTCP.
"""
implements(IReactorTCP)
def __init__(self):
self.calls = []
def connectTCP(self, *args, **kwargs):
self.calls.append((args, kwargs))
return StubConnector()
def addSystemEventTrigger(self, *args, **kwds):
pass
class PooledMemCacheProtocolTests(TestCase):
"""
Tests for the L{PooledMemCacheProtocol}
"""
def test_connectionMadeFiresDeferred(self):
"""
Test that L{PooledMemCacheProtocol.connectionMade} fires the factory's
deferred.
"""
p = PooledMemCacheProtocol()
p.factory = MemCacheClientFactory()
p.connectionPool = StubConnectionPool()
d = p.factory.deferred
d.addCallback(self.assertEquals, p)
p.connectionMade()
return d
class MemCacheClientFactoryTests(TestCase):
"""
Tests for the L{MemCacheClientFactory}
@ivar factory: A L{MemCacheClientFactory} instance with a
L{StubConnectionPool}.
@ivar protocol: A L{PooledMemCacheProtocol} that was built by
L{MemCacheClientFactory.buildProtocol}.
@ivar pool: The L{StubConnectionPool} attached to C{self.factory} and
C{self.protocol}.
"""
def setUp(self):
"""
Create a L{MemCacheClientFactory} instance and and give it a
L{StubConnectionPool} instance.
"""
super(MemCacheClientFactoryTests, self).setUp()
self.pool = StubConnectionPool()
self.factory = MemCacheClientFactory()
self.factory.connectionPool = self.pool
self.protocol = self.factory.buildProtocol(None)
def test_clientConnectionFailedNotifiesPool(self):
"""
Test that L{MemCacheClientFactory.clientConnectionFailed} notifies
the it's connectionPool that it is busy.
"""
self.factory.clientConnectionFailed(StubConnector(), None)
self.assertEquals(self.factory.connectionPool.calls,
[('busy', self.protocol)])
def test_clientConnectionLostNotifiesPool(self):
"""
Test that L{MemCacheClientFactory.clientConnectionLost} notifies
the it's connectionPool that it is busy.
"""
self.factory.clientConnectionLost(StubConnector(), None)
self.assertEquals(self.factory.connectionPool.calls,
[('busy', self.protocol)])
def test_buildProtocolRemovesExistingClient(self):
"""
Test that L{MemCacheClientFactory.buildProtocol} notifies
the connectionPool when an old protocol instance is going away.
This will happen when we get reconnected. We'll remove the old protocol
and add a new one.
"""
self.factory.buildProtocol(None)
self.assertEquals(self.factory.connectionPool.calls,
[('gone', self.protocol)])
def tearDown(self):
"""
Make sure the L{MemCacheClientFactory} isn't trying to reconnect
anymore.
"""
self.factory.stopTrying()
class MemCachePoolTests(TestCase):
"""
Tests for L{MemCachePool}.
@ivar reactor: A L{StubReactor} instance.
@ivar pool: A L{MemCachePool} for testing.
"""
def setUp(self):
"""
Create a L{MemCachePool}.
"""
TestCase.setUp(self)
self.reactor = StubReactor()
self.pool = MemCachePool(
TCP4ClientEndpoint(self.reactor, MC_ADDRESS.host, MC_ADDRESS.port),
maxClients=5, reactor=self.reactor
)
realClientFactory = self.pool.clientFactory
self.clientFactories = []
def capturingClientFactory(*a, **k):
cf = realClientFactory(*a, **k)
self.clientFactories.append(cf)
return cf
self.pool.clientFactory = capturingClientFactory
def test_clientFreeAddsNewClient(self):
"""
Test that a client not in the busy set gets added to the free set.
"""
p = MemCacheClientFactory().buildProtocol(None)
self.pool.clientFree(p)
self.assertEquals(self.pool._freeClients, set([p]))
def test_clientFreeAddsBusyClient(self):
"""
Test that a client in the busy set gets moved to the free set.
"""
p = MemCacheClientFactory().buildProtocol(None)
self.pool.clientBusy(p)
self.pool.clientFree(p)
self.assertEquals(self.pool._freeClients, set([p]))
self.assertEquals(self.pool._busyClients, set([]))
def test_clientBusyAddsNewClient(self):
"""
Test that a client not in the free set gets added to the busy set.
"""
p = MemCacheClientFactory().buildProtocol(None)
self.pool.clientBusy(p)
self.assertEquals(self.pool._busyClients, set([p]))
def test_clientBusyAddsFreeClient(self):
"""
Test that a client in the free set gets moved to the busy set.
"""
p = MemCacheClientFactory().buildProtocol(None)
self.pool.clientFree(p)
self.pool.clientBusy(p)
self.assertEquals(self.pool._busyClients, set([p]))
self.assertEquals(self.pool._freeClients, set([]))
def test_clientGoneRemovesFreeClient(self):
"""
Test that a client in the free set gets removed when
L{MemCachePool.clientGone} is called.
"""
p = MemCacheClientFactory().buildProtocol(None)
self.pool.clientFree(p)
self.assertEquals(self.pool._freeClients, set([p]))
self.assertEquals(self.pool._busyClients, set([]))
self.pool.clientGone(p)
self.assertEquals(self.pool._freeClients, set([]))
def test_clientGoneRemovesBusyClient(self):
"""
Test that a client in the busy set gets removed when
L{MemCachePool.clientGone} is called.
"""
p = MemCacheClientFactory().buildProtocol(None)
self.pool.clientBusy(p)
self.assertEquals(self.pool._busyClients, set([p]))
self.assertEquals(self.pool._freeClients, set([]))
self.pool.clientGone(p)
self.assertEquals(self.pool._busyClients, set([]))
def test_performRequestCreatesConnection(self):
"""
Test that L{MemCachePool.performRequest} on a fresh instance causes
a new connection to be created.
"""
results = []
p = InMemoryMemcacheProtocol()
p.set('foo', 'bar')
d = self.pool.performRequest('get', 'foo')
d.addCallback(results.append)
args, _ignore_kwargs = self.reactor.calls.pop()
self.assertEquals(args[:2], (MC_ADDRESS.host, MC_ADDRESS.port))
self.clientFactories[-1].deferred.callback(p)
self.assertEquals(results, [(0, 'bar')])
def test_performRequestUsesFreeConnection(self):
"""
Test that L{MemCachePool.performRequest} doesn't create a new connection
to be created if there is a free connection.
"""
def _checkResult(result):
self.assertEquals(result, (0, 'bar'))
self.assertEquals(self.reactor.calls, [])
p = InMemoryMemcacheProtocol()
p.set('foo', 'bar')
self.pool.clientFree(p)
d = self.pool.performRequest('get', 'foo')
d.addCallback(_checkResult)
return d
def test_performRequestMaxBusyQueuesRequest(self):
"""
Test that L{MemCachePool.performRequest} queues the request if
all clients are busy.
"""
def _checkResult(result):
self.assertEquals(result, (0, 'bar'))
self.assertEquals(self.reactor.calls, [])
p = InMemoryMemcacheProtocol()
p.set('foo', 'bar')
p1 = InMemoryMemcacheProtocol()
p1.set('foo', 'baz')
self.pool.suggestMaxClients(2)
self.pool.clientBusy(p)
self.pool.clientBusy(p1)
d = self.pool.performRequest('get', 'foo')
d.addCallback(_checkResult)
self.pool.clientFree(p)
return d
def test_performRequestCreatesConnectionsUntilMaxBusy(self):
"""
Test that L{MemCachePool.performRequest} will create new connections
until it reaches the maximum number of busy clients.
"""
def _checkResult(result):
self.assertEquals(result, (0, 'baz'))
self.pool.suggestMaxClients(2)
p = InMemoryMemcacheProtocol()
p.set('foo', 'bar')
p1 = InMemoryMemcacheProtocol()
p1.set('foo', 'baz')
self.pool.clientBusy(p)
self.pool.performRequest('get', 'foo')
args, _ignore_kwargs = self.reactor.calls.pop()
self.assertEquals(args[:2], (MC_ADDRESS.host, MC_ADDRESS.port))
def test_pendingConnectionsCountAgainstMaxClients(self):
"""
Test that L{MemCachePool.performRequest} will not initiate a new
connection if there are pending connections that count towards max
clients.
"""
self.pool.suggestMaxClients(1)
self.pool.performRequest('get', 'foo')
args, _ignore_kwargs = self.reactor.calls.pop()
self.assertEquals(args[:2], (MC_ADDRESS.host, MC_ADDRESS.port))
self.pool.performRequest('get', 'bar')
self.assertEquals(self.reactor.calls, [])
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic training script that trains a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from datasets import dataset_factory
from deployment import model_deploy
from nets import nets_factory
from preprocessing import preprocessing_factory
from optimizer.yellowfin import YFOptimizer
slim = tf.contrib.slim
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'train_dir', '/tmp/tfmodel/',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer('num_clones', 1,
'Number of model clones to deploy.')
tf.app.flags.DEFINE_boolean('clone_on_cpu', False,
'Use CPUs to deploy clones.')
tf.app.flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.')
tf.app.flags.DEFINE_integer(
'num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
tf.app.flags.DEFINE_integer(
'num_readers', 4,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 600,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 600,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'task', 0, 'Task id of the replica running the training.')
######################
# Optimization Flags #
######################
tf.app.flags.DEFINE_float(
'weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_string(
'optimizer', 'rmsprop',
'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
'"ftrl", "momentum", "sgd" or "rmsprop".')
tf.app.flags.DEFINE_float(
'adadelta_rho', 0.95,
'The decay rate for adadelta.')
tf.app.flags.DEFINE_float(
'adagrad_initial_accumulator_value', 0.1,
'Starting value for the AdaGrad accumulators.')
tf.app.flags.DEFINE_float(
'adam_beta1', 0.9,
'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
'adam_beta2', 0.999,
'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,
'The learning rate power.')
tf.app.flags.DEFINE_float(
'ftrl_initial_accumulator_value', 0.1,
'Starting value for the FTRL accumulators.')
tf.app.flags.DEFINE_float(
'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
tf.app.flags.DEFINE_float(
'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
#######################
# Learning Rate Flags #
#######################
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.0001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 2.0,
'Number of epochs after which learning rate decays.')
tf.app.flags.DEFINE_bool(
'sync_replicas', False,
'Whether or not to synchronize the replicas during training.')
tf.app.flags.DEFINE_integer(
'replicas_to_aggregate', 1,
'The Number of gradients to collect before updating params.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
#######################
# Dataset Flags #
#######################
tf.app.flags.DEFINE_string(
'dataset_name', 'imagenet', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_integer(
'batch_size', 32, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', None, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', None,
'The maximum number of training steps.')
tf.app.flags.DEFINE_float('width_multiplier', 1.0,
'Width Multiplier, for MobileNet only.')
#####################
# Fine-Tuning Flags #
#####################
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring '
'from a checkpoint.')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', False,
'When restoring a checkpoint would ignore missing variables.')
FLAGS = tf.app.flags.FLAGS
def _configure_learning_rate(num_samples_per_epoch, global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
decay_steps = int(num_samples_per_epoch / FLAGS.batch_size *
FLAGS.num_epochs_per_decay)
if FLAGS.sync_replicas:
decay_steps /= FLAGS.replicas_to_aggregate
if FLAGS.learning_rate_decay_type == 'exponential':
return tf.train.exponential_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif FLAGS.learning_rate_decay_type == 'fixed':
return tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')
elif FLAGS.learning_rate_decay_type == 'polynomial':
return tf.train.polynomial_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized',
FLAGS.learning_rate_decay_type)
def _configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=FLAGS.adadelta_rho,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=FLAGS.adam_beta1,
beta2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=FLAGS.ftrl_learning_rate_power,
initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
l1_regularization_strength=FLAGS.ftrl_l1,
l2_regularization_strength=FLAGS.ftrl_l2)
elif FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=FLAGS.momentum,
name='Momentum')
elif FLAGS.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
elif FLAGS.optimizer == 'yellowfin':
optimizer = YFOptimizer(lr=1.0, mu=0.0)
else:
raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)
return optimizer
def _add_variables_summaries(learning_rate):
summaries = []
for variable in slim.get_model_variables():
summaries.append(tf.summary.histogram(variable.op.name, variable))
summaries.append(tf.summary.scalar('training/Learning Rate', learning_rate))
return summaries
def _get_init_fn():
"""Returns a function run by the chief worker to warm-start the training.
Note that the init_fn is only run when initializing the model during the very
first global step.
Returns:
An init function run by the supervisor.
"""
if FLAGS.checkpoint_path is None:
return None
# Warn the user if a checkpoint exists in the train_dir. Then we'll be
# ignoring the checkpoint anyway.
if tf.train.latest_checkpoint(FLAGS.train_dir):
tf.logging.info(
'Ignoring --checkpoint_path because a checkpoint already exists in %s'
% FLAGS.train_dir)
return None
exclusions = []
if FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in FLAGS.checkpoint_exclude_scopes.split(',')]
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Fine-tuning from %s' % checkpoint_path)
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=FLAGS.ignore_missing_vars)
def _get_variables_to_train():
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if FLAGS.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
#######################
# Config model_deploy #
#######################
deploy_config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.worker_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
# Create global_step
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
######################
# Select the network #
######################
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=True,
width_multiplier=FLAGS.width_multiplier)
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=True)
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
with tf.device(deploy_config.inputs_device()):
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.num_readers,
common_queue_capacity=20 * FLAGS.batch_size,
common_queue_min=10 * FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
train_image_size = FLAGS.train_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, train_image_size, train_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
labels = slim.one_hot_encoding(
labels, dataset.num_classes - FLAGS.labels_offset)
batch_queue = slim.prefetch_queue.prefetch_queue(
[images, labels], capacity=2 * deploy_config.num_clones)
####################
# Define the model #
####################
def clone_fn(batch_queue):
"""Allows data parallelism by creating multiple clones of network_fn."""
images, labels = batch_queue.dequeue()
logits, end_points = network_fn(images)
#############################
# Specify the loss function #
#############################
if 'AuxLogits' in end_points:
tf.losses.softmax_cross_entropy(
logits=end_points['AuxLogits'], onehot_labels=labels,
label_smoothing=FLAGS.label_smoothing, weights=0.4, scope='aux_loss')
tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels,
label_smoothing=FLAGS.label_smoothing, weights=1.0)
return end_points
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])
first_clone_scope = deploy_config.clone_scope(0)
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by network_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
# Add summaries for end_points.
end_points = clones[0].outputs
for end_point in end_points:
x = end_points[end_point]
summaries.add(tf.summary.histogram('activations/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity/' + end_point,
tf.nn.zero_fraction(x)))
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Add summaries for variables.
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
#################################
# Configure the moving averages #
#################################
if FLAGS.moving_average_decay:
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
else:
moving_average_variables, variable_averages = None, None
#########################################
# Configure the optimization procedure. #
#########################################
with tf.device(deploy_config.optimizer_device()):
learning_rate = _configure_learning_rate(dataset.num_samples, global_step)
optimizer = _configure_optimizer(learning_rate)
summaries.add(tf.summary.scalar('learning_rate', learning_rate))
if FLAGS.sync_replicas:
# If sync_replicas is enabled, the averaging will be done in the chief
# queue runner.
optimizer = tf.train.SyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=FLAGS.replicas_to_aggregate,
variable_averages=variable_averages,
variables_to_average=moving_average_variables,
replica_id=tf.constant(FLAGS.task, tf.int32, shape=()),
total_num_replicas=FLAGS.worker_replicas)
elif FLAGS.moving_average_decay:
# Update ops executed locally by trainer.
update_ops.append(variable_averages.apply(moving_average_variables))
# Variables to train.
variables_to_train = _get_variables_to_train()
# and returns a train_tensor and summary_op
total_loss, clones_gradients = model_deploy.optimize_clones(
clones,
optimizer,
var_list=variables_to_train)
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
train_tensor = control_flow_ops.with_dependencies([update_op], total_loss,
name='train_op')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone_scope))
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
###########################
# Kicks off the training. #
###########################
slim.learning.train(
train_tensor,
logdir=FLAGS.train_dir,
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
init_fn=_get_init_fn(),
summary_op=summary_op,
number_of_steps=FLAGS.max_number_of_steps,
log_every_n_steps=FLAGS.log_every_n_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
sync_optimizer=optimizer if FLAGS.sync_replicas else None)
if __name__ == '__main__':
tf.app.run()
|
|
#! /usr/bin/env python3
"""
Sets up an Ubuntu 14.04 x64 server to be a Counterblock Federated Node.
NOTE: The system should be properly secured before running this script.
This is admittedly a (bit of a) hack. In the future, most likely
utilize a cleaner, docker-based approach.
"""
import os
import sys
import re
import time
import getopt
import logging
import shutil
import socket
import urllib
import zipfile
import platform
import collections
import tempfile
import tarfile
import random
import string
import subprocess
try: #ignore import errors on windows
import pwd
import grp
except ImportError:
pass
REPO_NAME = "federatednode_build"
REPO_URL = "https://github.com/CounterpartyXCP/federatednode_build.git"
USERNAME = "xcp"
DAEMON_USERNAME = "xcpd"
USER_HOMEDIR = "/home/xcp"
PYTHON2_VER = "2.7" #Ubuntu 14.04 uses python2.7
PYTHON3_VER = "3.4" #Ubuntu 14.04 uses python3.4
paths = None
questions = None
###
### UTILITY METHODS
###
def pass_generator(size=14, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def runcmd(command, abort_on_failure=True):
logging.debug("RUNNING COMMAND: %s" % command)
ret = os.system(command)
if abort_on_failure and ret != 0:
logging.error("Command failed: '%s'" % command)
sys.exit(1)
def modify_config(param_re, content_to_add, filenames, replace_if_exists=True, dotall=False):
if not isinstance(filenames, (list, tuple)):
filenames = [filenames,]
re_flags = re.MULTILINE | re.DOTALL if dotall else re.MULTILINE
for filename in filenames:
f = open(filename, 'r')
content = f.read()
f.close()
if not re.search(param_re, content, re_flags): #missing; add to config
if content[-1] != '\n': content += '\n'
content += content_to_add
elif replace_if_exists: #replace in config
content = re.sub(param_re, content_to_add, content, flags=re_flags)
f = open(filename, 'w')
f.write(content)
f.close()
def modify_cp_config(param_re, content_to_add, config, net, replace_if_exists=True):
assert net in ('mainnet', 'testnet')
net_path_part = '.testnet' if net == 'testnet' else ''
cfg_filebase = {
'counterparty': {'path': "counterparty", 'file': "server%s.conf" % net_path_part},
'counterblock': {'path': "counterblock", 'file': "server%s.conf" % net_path_part},
'counterblock-modules': {'path': "counterblock", 'file': "modules%s.conf" % net_path_part},
'counterblock-counterwallet': {'path': "counterblock", 'file': "counterwallet%s.conf" % net_path_part}
}
assert config in cfg_filebase.keys()
cfg_filename = os.path.join(paths['config_path.template'] % cfg_filebase[config]['path'], cfg_filebase[config]['file'])
modify_config(param_re, content_to_add, cfg_filename, replace_if_exists=replace_if_exists)
def ask_question(question, options, default_option):
assert isinstance(options, (list, tuple))
assert default_option in options
answer = None
while True:
answer = input(question + ": ")
answer = answer.lower()
if answer and answer not in options:
logging.error("Please enter one of: " + ', '.join(options))
else:
if answer == '': answer = default_option
break
return answer
def git_repo_clone(repo_name, repo_url, repo_dest_dir, branch="AUTO", for_user="xcp", for_group="xcp", hash=None):
if branch == 'AUTO':
try:
branch = subprocess.check_output("cd %s && git rev-parse --abbrev-ref HEAD"
% repo_dest_dir, shell=True).strip().decode('utf-8')
except:
branch = "master" #branch doesn't exist, default to master
logging.info("Checking out/updating %s:%s from git..." % (repo_name, branch))
if os.path.exists(repo_dest_dir):
runcmd("cd %s && git pull origin %s" % (repo_dest_dir, branch))
else:
runcmd("git clone -b %s %s %s" % (branch, repo_url, repo_dest_dir))
if hash:
runcmd("cd %s && git reset --hard %s" % (repo_dest_dir, hash))
runcmd("cd %s && git config core.sharedRepository group && find %s -type d -print0 | xargs -0 chmod g+s" % (
repo_dest_dir, repo_dest_dir)) #to allow for group git actions
runcmd("chown -R %s:%s %s" % (for_user, for_group, repo_dest_dir))
runcmd("chmod -R u+rw,g+rw,o+r,o-w %s" % (repo_dest_dir,)) #just in case
def config_runit_for_service(dist_path, service_name, enabled=True, manual_control=True):
assert os.path.exists("%s/linux/runit/%s" % (dist_path, service_name))
#stop old upstart service and remove old upstart init scripts (if present)
if os.path.exists("/etc/init/%s.conf" % service_name):
runcmd("service %s stop" % service_name, abort_on_failure=False)
runcmd("rm -f /etc/init/%s.conf /etc/init/%s.conf.override" % (service_name, service_name))
runcmd("cp -dRf --preserve=mode %s/linux/runit/%s /etc/sv/" % (dist_path, service_name))
if manual_control:
runcmd("touch /etc/sv/%s/down" % service_name)
else:
runcmd("rm -f /etc/sv/%s/down" % service_name)
if enabled:
runcmd("ln -sf /etc/sv/%s /etc/service/" % service_name)
#runcmd("sv stop %s" % service_name) #prevent service from starting automatically
else:
runcmd("rm -f /etc/service/%s" % service_name) #service will automatically be stopped within 5 seconds
def config_runit_disable_manual_control(service_name):
runcmd("rm -f /etc/service/%s/down" % service_name)
def remove_runit(service_name):
runcmd("rm -f /etc/service/%s" % service_name)
runcmd("rm -rf /etc/sv/%s" % service_name)
###
### PRIMARY FUNCTIONS
###
def do_federated_node_prerun_checks(require_sudo=True):
#make sure this is running on a supported OS
if os.name != "posix" or platform.dist()[0] != "Ubuntu" or platform.architecture()[0] != '64bit':
logging.error("Only 64bit Ubuntu Linux is supported at this time")
sys.exit(1)
ubuntu_release = platform.linux_distribution()[1]
if ubuntu_release != "14.04":
logging.error("Only Ubuntu 14.04 supported for Counterblock Federated Node install.")
sys.exit(1)
#script must be run as root
if os.geteuid() != 0:
logging.error("This script must be run as root (use 'sudo' to run)")
sys.exit(1)
assert os.name == "posix"
if require_sudo and "SUDO_USER" not in os.environ:
logging.error("Please use `sudo` to run this script.")
sys.exit(1)
def get_base_paths():
paths = {}
paths['sys_python_path'] = os.path.dirname(sys.executable)
paths['base_path'] = os.path.join(USER_HOMEDIR, REPO_NAME)
#^ the dir of where counterparty source was downloaded to
#find the location of the virtualenv command and make sure it exists
paths['virtualenv_path'] = "/usr/bin/virtualenv"
paths['virtualenv_args'] = "--python=python%s" % PYTHON3_VER
#compose the rest of the paths...
paths['dist_path'] = os.path.join(paths['base_path'], "dist")
paths['env_path'] = os.path.join(paths['base_path'], "env") # home for the virtual environment
#the pip executable that we'll be using does not exist yet, but it will, once we've created the virtualenv
paths['pip_path'] = os.path.join(paths['env_path'], "bin", "pip")
paths['python_path'] = os.path.join(paths['env_path'], "bin", "python3")
#for now, counterblockd currently uses Python 2.7 due to gevent-socketio's lack of support for Python 3
#because of this, it needs its own virtual environment
paths['virtualenv_args.counterblock'] = "--system-site-packages --python=python2.7"
paths['env_path.counterblock'] = os.path.join(paths['base_path'], "env.counterblock") # home for the virtual environment
paths['pip_path.counterblock'] = os.path.join(paths['env_path.counterblock'], "bin", "pip")
paths['python_path.counterblock'] = os.path.join(paths['env_path.counterblock'], "bin", "python")
#user paths
paths['log_path.template'] = os.path.join(USER_HOMEDIR, ".cache", "%s", "log")
paths['config_path.template'] = os.path.join(USER_HOMEDIR, ".config", "%s")
paths['data_path.template'] = os.path.join(USER_HOMEDIR, ".local", "share", "%s")
return paths
def remove_old():
"remove old things from the previous counterpartyd_build system"
#program links/executiables
runcmd("rm -f /usr/local/bin/counterpartyd /usr/local/bin/counterblockd /usr/local/bin/armory_utxsvr")
#remove any insight stuff...
runcmd("rm -rf /etc/sv/insight /etc/sv/insight-testnet /etc/service/insight /etc/service/insight-testnet")
#runit entries
for service_name in ("bitcoind", "bitcoind-testnet",
"counterpartyd", "counterpartyd-testnet", "counterblockd", "counterblockd-testnet"):
remove_runit(service_name)
def do_base_setup(run_as_user):
"""This creates the xcp and xcpd users and checks out the federatednode_build system from git"""
#change time to UTC
runcmd("ln -sf /usr/share/zoneinfo/UTC /etc/localtime")
#install some necessary base deps
runcmd("apt-key update && apt-get update")
runcmd("apt-get -y install git-core software-properties-common python-software-properties build-essential ssl-cert ntp runit curl libjpeg8-dev libgmp-dev")
#install node-js
#node-gyp building has ...issues out of the box on Ubuntu... use Chris Lea's nodejs build instead, which is newer
runcmd("apt-get -y remove nodejs npm gyp")
runcmd("add-apt-repository -y ppa:chris-lea/node.js")
runcmd("apt-get update")
runcmd("apt-get -y install nodejs") #includes npm
gypdir = None
try:
import gyp
gypdir = os.path.dirname(gyp.__file__)
except:
pass
else:
runcmd("mv %s %s_bkup" % (gypdir, gypdir))
#^ fix for https://github.com/TooTallNate/node-gyp/issues/363
#Create xcp user, under which the files will be stored, and who will own the files, etc
try:
pwd.getpwnam(USERNAME)
except:
logging.info("Creating user '%s' ..." % USERNAME)
runcmd("adduser --system --disabled-password --shell /bin/false --group %s" % USERNAME)
#Create xcpd user (to run counterparty, counterblock, bitcoind, nginx) if not already made
try:
pwd.getpwnam(DAEMON_USERNAME)
except:
logging.info("Creating user '%s' ..." % DAEMON_USERNAME)
runcmd("adduser --system --disabled-password --shell /bin/false --ingroup nogroup --home %s %s"
% (USER_HOMEDIR, DAEMON_USERNAME))
#add the run_as_user to the xcp group
runcmd("adduser %s %s" % (run_as_user, USERNAME))
#Check out federatednode_build repo under this user's home dir and use that for the build
git_repo_clone(REPO_NAME, REPO_URL, paths['base_path'], questions.branch, for_user=run_as_user)
#enhance fd limits for the xcpd user
runcmd("cp -af %s/linux/other/xcpd_security_limits.conf /etc/security/limits.d/" % paths['dist_path'])
def do_backend_rpc_setup():
"""Installs and configures bitcoind"""
def install_from_source(): #TODO: FIX
#Install bitcoind (btcbrak's 0.11.2 addrindex branch)
BITCOIND_VERSION="0.12-0"
BITCOIND_DEB_VERSION="0.12.0"
#Install deps (see https://help.ubuntu.com/community/bitcoin)
runcmd("apt-get -y install build-essential libtool autotools-dev autoconf pkg-config libssl-dev libboost-dev libboost-all-dev software-properties-common checkinstall")
runcmd("add-apt-repository -y ppa:bitcoin/bitcoin")
runcmd("apt-get update")
runcmd("apt-get -y install libdb4.8-dev libdb4.8++-dev")
runcmd("apt-get -y remove bitcoin.addrindex", abort_on_failure=False) #remove old version if it exists
runcmd("rm -rf /tmp/bitcoin-addrindex-%s" % BITCOIND_VERSION)
runcmd("wget -O /tmp/bitcoin-addrindex-%s.tar.gz https://github.com/btcdrak/bitcoin/archive/addrindex-%s.tar.gz"
% (BITCOIND_VERSION, BITCOIND_VERSION))
runcmd("cd /tmp && tar -zxvf /tmp/bitcoin-addrindex-%s.tar.gz" % BITCOIND_VERSION)
runcmd("cd /tmp/bitcoin-addrindex-%s && ./autogen.sh && ./configure --without-gui && make && checkinstall -y -D --install --pkgversion=%s"
% (BITCOIND_VERSION, BITCOIND_DEB_VERSION))
runcmd("rm -rf /tmp/bitcoin-addrindex-%s" % BITCOIND_VERSION)
runcmd("ln -sf /usr/local/bin/bitcoind /usr/bin/bitcoind && ln -sf /usr/local/bin/bitcoin-cli /usr/bin/bitcoin-cli")
def install_binaries():
BITCOIND_URL="https://github.com/btcdrak/bitcoin/releases/download/v0.12.0-addrindex/bitcoin-0.12.0-addrindex-linux64.tar.gz"
BITCOIND_FILENAME="bitcoin-0.12.0-addrindex-linux64.tar.gz"
BITCOIND_DIRNAME="bitcoin-0.12.0"
BITCOIND_SHA256_HASH="1c4a337ba20d2ea61aac0b595af22276bd552568525f456f9372209d893ae925"
runcmd("apt-get -y remove bitcoin.addrindex bitcoin-addrindex-0.10", abort_on_failure=False) #remove old versions
if not os.path.exists("/tmp/%s" % BITCOIND_DIRNAME):
runcmd("wget -O /tmp/%s %s" % (BITCOIND_FILENAME, BITCOIND_URL))
runcmd('bash -c "echo \"%s /tmp/%s\" | sha256sum -c"' % (BITCOIND_SHA256_HASH, BITCOIND_FILENAME))
runcmd("tar -C /tmp --no-overwrite-dir -zxvf /tmp/%s" % BITCOIND_FILENAME)
#dont install libbitcoinconsensus.so for now on the system...not needed
runcmd("install -C --backup=off -m 755 -o root -g root /tmp/%s/bin/* /usr/local/bin/" % BITCOIND_DIRNAME)
runcmd("ln -sf /usr/local/bin/bitcoind /usr/bin/bitcoind && ln -sf /usr/local/bin/bitcoin-cli /usr/bin/bitcoin-cli")
DEFAULT_CONFIG = "rpcuser=rpc\nrpcpassword=%s\nserver=1\ndaemon=1\nrpcthreads=1000\nrpctimeout=300\ntxindex=1\naddrindex=1\nminrelaytxfee=0.00005\nlimitfreerelay=0"
DEFAULT_CONFIG_TESTNET = DEFAULT_CONFIG + "\ntestnet=1"
backend_rpc_password = pass_generator()
backend_rpc_password_testnet = pass_generator()
#would prefer to compile from source, but there are issues with openssl version in use
# on ubuntu 14.04 that cause issues with bitcoind. using binaries is more deterministic
# for now...
install_binaries()
#Do basic inital bitcoin config (for both testnet and mainnet)
runcmd("mkdir -p ~%s/.bitcoin" % (USERNAME,))
if not os.path.exists(os.path.join(USER_HOMEDIR, '.bitcoin', 'bitcoin.conf')):
runcmd(r"""bash -c 'echo -e "%s" > ~%s/.bitcoin/bitcoin.conf'""" % (
DEFAULT_CONFIG % backend_rpc_password, USERNAME))
else: #grab the existing RPC password
backend_rpc_password = subprocess.check_output(
r"""bash -c "cat ~%s/.bitcoin/bitcoin.conf | sed -n 's/.*rpcpassword=\([^ \n]*\).*/\1/p'" """ % USERNAME, shell=True).strip().decode('utf-8')
if not os.path.exists(os.path.join(USER_HOMEDIR, '.bitcoin', 'bitcoin.testnet.conf')):
runcmd(r"""bash -c 'echo -e "%s" > ~%s/.bitcoin/bitcoin.testnet.conf'""" % (
DEFAULT_CONFIG_TESTNET % backend_rpc_password_testnet, USERNAME))
else:
backend_rpc_password_testnet = subprocess.check_output(
r"""bash -c "cat ~%s/.bitcoin/bitcoin.testnet.conf | sed -n 's/.*rpcpassword=\([^ \n]*\).*/\1/p'" """
% USERNAME, shell=True).strip().decode('utf-8')
#set permissions
runcmd("chown -R %s:%s ~%s/.bitcoin" % (DAEMON_USERNAME, USERNAME, USERNAME,))
#install logrotate file
runcmd("cp -dRf --preserve=mode %s/linux/logrotate/bitcoind /etc/logrotate.d/bitcoind" % paths['dist_path'])
#set up runit startup scripts
config_runit_for_service(paths['dist_path'], "bitcoin", enabled=questions.with_mainnet)
config_runit_for_service(paths['dist_path'], "bitcoin-testnet", enabled=questions.with_testnet)
return backend_rpc_password, backend_rpc_password_testnet
def do_counterparty_setup(run_as_user, backend_rpc_password, backend_rpc_password_testnet):
"""Installs and configures counterparty-server and counterblock"""
username_uid = pwd.getpwnam(USERNAME).pw_uid
username_gid = grp.getgrnam(USERNAME).gr_gid
daemon_username_uid = pwd.getpwnam(DAEMON_USERNAME).pw_uid
def install_dependencies():
runcmd("apt-get -y update")
runcmd("apt-get -y install runit software-properties-common python-software-properties git-core wget \
python3 python3-setuptools python3-dev python3-pip build-essential python3-sphinx python-virtualenv libsqlite3-dev python3-apsw python3-zmq")
if questions.with_counterblock:
#counterblockd currently uses Python 2.7 due to gevent-socketio's lack of support for Python 3
runcmd("apt-get -y install python python-dev python-setuptools python-pip python-sphinx python-zmq libzmq3 libzmq3-dev libxml2-dev libxslt-dev zlib1g-dev libimage-exiftool-perl libevent-dev cython")
#install mongodb
MONGO_VERSION = "3.2.3"
runcmd("apt-get -y remove mongodb mongodb-server") #remove ubuntu stock packages, if installed
runcmd("apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10")
runcmd("/bin/bash -c \"echo 'deb http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.0 multiverse' | sudo tee /etc/apt/sources.list.d/mongodb.list\"")
runcmd("apt-get update")
runcmd("apt-get -y --force-yes install mongodb-org=%s mongodb-org-server=%s mongodb-org-shell=%s mongodb-org-mongos=%s mongodb-org-tools=%s" % (
MONGO_VERSION, MONGO_VERSION, MONGO_VERSION, MONGO_VERSION, MONGO_VERSION))
for p in ('mongodb-org', 'mongodb-org-server', 'mongodb-org-shell', 'mongodb-org-mongos', 'mongodb-org-tools'):
runcmd("echo \"%s hold\" | sudo dpkg --set-selections" % p)
#replace use of mongo init script with our runit version
if os.path.exists("/etc/init.d/mongod"):
runcmd("""bash -c "echo 'manual' > /etc/init/mongod.override" """)
runcmd("service mongod stop", abort_on_failure=False)
config_runit_for_service(paths['dist_path'], "mongod", manual_control=False)
#also install redis
runcmd("apt-get -y install redis-server")
#install sqlite utilities (not technically required, but nice to have)
runcmd("apt-get -y install sqlite sqlite3 libleveldb-dev")
def create_virtualenv():
def create_venv(env_path, pip_path, python_path, virtualenv_args, delete_if_exists=True):
if paths['virtualenv_path'] is None or not os.path.exists(paths['virtualenv_path']):
logging.debug("ERROR: virtualenv missing (%s)" % (paths['virtualenv_path'],))
sys.exit(1)
if delete_if_exists and os.path.exists(env_path):
logging.warning("Deleting existing virtualenv...")
shutil.rmtree(env_path)
assert not os.path.exists(os.path.join(env_path, 'bin'))
logging.info("Creating virtualenv at '%s' ..." % env_path)
runcmd("%s %s %s" % (paths['virtualenv_path'], virtualenv_args, env_path))
#group should be xcp and have write permissions
runcmd("chown -R %s:%s %s && chmod -R g+w %s" % (run_as_user, USERNAME, env_path, env_path))
#pip should now exist
if not os.path.exists(pip_path):
logging.error("pip does not exist at path '%s'" % pip_path)
sys.exit(1)
create_venv(paths['env_path'], paths['pip_path'], paths['python_path'], paths['virtualenv_args'])
if questions.with_counterblock:
#as counterblockd uses python 2.x, it needs its own virtualenv
runcmd("rm -rf %s && mkdir -p %s" % (paths['env_path.counterblock'], paths['env_path.counterblock']))
create_venv(paths['env_path.counterblock'], paths['pip_path.counterblock'], paths['python_path.counterblock'],
paths['virtualenv_args.counterblock'], delete_if_exists=False)
def create_default_dirs():
for dir_base in ["counterparty", "counterblock"]:
for dir in [paths['log_path.template'] % dir_base, paths['config_path.template'] % dir_base, paths['data_path.template'] % dir_base]:
if not os.path.exists(dir):
os.makedirs(dir)
os.chown(dir, daemon_username_uid, username_gid)
os.chmod(dir, 0o775)
def create_default_config():
DEFAULT_CONFIG = "[Default]\nbackend-user=rpc\nbackend-password=1234\nrpc-password=xcppw1234\n"
DEFAULT_CONFIG_TESTNET = DEFAULT_CONFIG + "\ntestnet=1\n"
DEFAULT_CONFIG_COUNTERBLOCK = "[Default]\nbackend-user=rpc\nbackend-password=1234\ncounterparty-password=xcppw1234\nrpc-host=0.0.0.0\nsocketio-host=0.0.0.0\nsocketio-chat-host=0.0.0.0\nredis-enable-apicache=0\n"
DEFAULT_CONFIG_COUNTERBLOCK_TESTNET = DEFAULT_CONFIG_COUNTERBLOCK + "\ntestnet=1\n"
#defaut modules installed for non-counterwallet build
DEFAULT_CONFIG_COUNTERBLOCK_NONCW_MODULES = """[LoadModule]
lib/modules/assets = True
lib/modules/dex = True
lib/modules/transaction_stats = True"""
DEFAULT_CONFIG_COUNTERBLOCK_NONCW_MODULES_TESTNET = DEFAULT_CONFIG_COUNTERBLOCK_NONCW_MODULES
#modules installed for counterwallet build
DEFAULT_CONFIG_COUNTERBLOCK_CW_MODULES = """[LoadModule]
lib/modules/assets = True
lib/modules/counterwallet = True
lib/modules/counterwallet_iofeeds = True
lib/modules/dex = True
lib/modules/transaction_stats = True
lib/modules/betting = True"""
DEFAULT_CONFIG_COUNTERBLOCK_CW_MODULES_TESTNET = DEFAULT_CONFIG_COUNTERBLOCK_CW_MODULES
DEFAULT_CONFIG_COUNTERBLOCK_CW_CONF = "[Default]\nsupport-email="
DEFAULT_CONFIG_COUNTERBLOCK_CW_CONF_TESTNET = DEFAULT_CONFIG_COUNTERBLOCK_CW_CONF
def create_config(dir_base, cfg_name, default_config):
created = False
cfg_path = os.path.join(paths['config_path.template'] % dir_base, cfg_name)
cfg_missing = not os.path.exists(cfg_path)
if not os.path.exists(cfg_path):
created = True
logging.info("Creating new configuration file at: %s" % cfg_path)
#create a default config file
cfg = open(cfg_path, 'w')
cfg.write(default_config)
cfg.close()
logging.info("%s config file has been created at '%s'" % (dir_base, cfg_path))
else:
logging.info("%s config file already exists at: '%s'" % (dir_base, cfg_path))
#set/reset proper file ownership and mode
os.chown(cfg_path, daemon_username_uid, username_gid)
os.chmod(cfg_path, 0o600)
return created
#base counterparty and counterblock config
create_config('counterparty', 'server.conf', DEFAULT_CONFIG)
if questions.with_testnet:
create_config('counterparty', 'server.testnet.conf', DEFAULT_CONFIG_TESTNET)
if questions.with_counterblock:
create_config('counterblock', 'server.conf', DEFAULT_CONFIG_COUNTERBLOCK)
if questions.with_testnet:
create_config('counterblock', 'server.testnet.conf', DEFAULT_CONFIG_COUNTERBLOCK_TESTNET)
#modules config
create_config('counterblock', 'modules.conf',
DEFAULT_CONFIG_COUNTERBLOCK_CW_MODULES if questions.role == 'counterwallet' else DEFAULT_CONFIG_COUNTERBLOCK_NONCW_MODULES)
if questions.with_testnet:
create_config('counterblock', 'modules.testnet.conf',
DEFAULT_CONFIG_COUNTERBLOCK_CW_MODULES_TESTNET if questions.role == 'counterwallet' else DEFAULT_CONFIG_COUNTERBLOCK_NONCW_MODULES_TESTNET)
#counterwallet conf file
if questions.role == 'counterwallet':
create_config('counterblock', 'counterwallet.conf', DEFAULT_CONFIG_COUNTERBLOCK_CW_CONF)
if questions.with_testnet:
create_config('counterblock', 'counterwallet.testnet.conf', DEFAULT_CONFIG_COUNTERBLOCK_CW_CONF_TESTNET)
def alter_config():
#modify out configuration values as necessary
counterparty_rpc_password = '1234' if questions.role in ['counterparty-server_only', 'counterblock_basic'] \
and questions.server_public == 'y' else pass_generator()
counterparty_rpc_password_testnet = '1234' if questions.role in ['counterparty-server_only', 'counterblock_basic'] \
and questions.server_public == 'y' else pass_generator()
for net, backend_password, cp_password in (
('mainnet', backend_rpc_password, counterparty_rpc_password),
('testnet', backend_rpc_password_testnet, counterparty_rpc_password_testnet)):
# Check if the config was chosen to be replaced. Prevents accessing non-existing files.
if (net == 'testnet' and not questions.with_testnet) \
or (net == 'mainnet' and not questions.with_mainnet):
continue
#modify the default stored bitcoind passwords in counterparty conf
modify_cp_config(r'^backend-password=.*?$', 'backend-password=%s' % backend_password,
config='counterparty', net=net)
#modify the counterparty API rpc password in counterparty conf
modify_cp_config(r'^rpc\-password=.*?$', 'rpc-password=%s' % cp_password,
config='counterparty', net=net)
#backend for counterpartyd should be addrindex
modify_cp_config(r'^backend\-name=.*?$', 'backend-name=addrindex',
config='counterparty', net=net)
if questions.role in ['counterparty-server_only', 'counterblock_basic'] and questions.server_public == 'y':
modify_cp_config(r'^rpc\-host=.*?$', 'rpc-host=0.0.0.0', config='counterparty', net=net)
if questions.with_counterblock:
modify_cp_config(r'^(backend\-rpc|backend)\-password=.*?$',
'backend-password=%s' % backend_password, config='counterblock', net=net)
#modify the counterparty API rpc password in counterblockd.conf
modify_cp_config(r'^counterparty\-password=.*?$',
'counterparty-password=%s' % cp_password, config='counterblock', net=net)
#role-specific counterblockd.conf values
if questions.role == 'counterwallet':
modify_cp_config(r'^support\-email=.*?$',
'support-email=%s' % questions.counterwallet_support_email, config='counterblock-counterwallet', net=net) #may be blank string
if questions.role == 'counterblock_basic' and questions.server_public == 'y':
modify_cp_config(r'^rpc\-host=.*?$', 'rpc-host=0.0.0.0', config='counterblock', net=net)
def configure_startup():
#link over counterparty and counterblock script
runcmd("ln -sf %s/bin/counterparty-server /usr/local/bin/counterparty-server" % paths['env_path'])
runcmd("ln -sf %s/bin/counterparty-client /usr/local/bin/counterparty-client" % paths['env_path'])
if questions.with_counterblock:
runcmd("ln -sf %s/bin/counterblock /usr/local/bin/counterblock" % paths['env_path.counterblock'])
if questions.with_mainnet:
config_runit_for_service(paths['dist_path'], "counterparty", manual_control=not questions.start_on_boot)
else:
runcmd("rm -f /etc/service/counterparty")
if questions.with_testnet:
config_runit_for_service(paths['dist_path'], "counterparty-testnet",
enabled=questions.with_testnet, manual_control=not questions.start_on_boot)
else:
runcmd("rm -f /etc/service/counterparty-testnet")
if questions.with_counterblock and questions.with_mainnet:
config_runit_for_service(paths['dist_path'], "counterblock", enabled=questions.with_counterblock,
manual_control=not questions.start_on_boot)
else:
runcmd("rm -f /etc/service/counterblock")
if questions.with_counterblock and questions.with_testnet:
config_runit_for_service(paths['dist_path'], "counterblock-testnet",
enabled=questions.with_counterblock and questions.with_testnet, manual_control=not questions.start_on_boot)
else:
runcmd("rm -f /etc/service/counterblock-testnet")
install_dependencies()
create_virtualenv()
create_default_dirs()
create_default_config()
alter_config()
install_base_via_pip(questions.branch)
configure_startup()
def install_base_via_pip(branch="AUTO"):
COUNTERPARTY_LIB_DIST_PATH = os.path.join(paths['dist_path'], "counterparty-lib")
COUNTERPARTY_CLI_DIST_PATH = os.path.join(paths['dist_path'], "counterparty-cli")
COUNTERBLOCK_DIST_PATH = os.path.join(paths['dist_path'], "counterblock")
BRANCH_SETTINGS_PATH = os.path.join(paths['base_path'], ".base_branch")
found_counterblock = os.path.exists(paths['env_path.counterblock'])
if branch == "AUTO": #used with updates
assert os.path.exists(BRANCH_SETTINGS_PATH)
f = open(BRANCH_SETTINGS_PATH, 'r')
branch = f.read()
f.close()
assert branch != "AUTO"
PIP_COUNTERPARTY_LIB = "https://github.com/CounterpartyXCP/counterpartyd/archive/%s.zip#egg=counterpartylib" % branch
PIP_COUNTERPARTY_CLI = "https://github.com/CounterpartyXCP/counterparty-cli/archive/%s.zip#egg=counterpartycli" % branch
PIP_COUNTERBLOCK = "https://github.com/CounterpartyXCP/counterblock/archive/%s.zip#egg=counterblock" % branch
#pip install counterparty-cli, counterparty-lib and (optionally) counterblock for the chosen branch
#only do this if there's not a directory there (this allows people to check out the repo and put it at that path)
do_bootstrap = not os.path.exists(paths['data_path.template'] % "counterparty")
if not os.path.exists(COUNTERPARTY_LIB_DIST_PATH) or os.path.islink(COUNTERPARTY_LIB_DIST_PATH):
runcmd("sudo su -s /bin/bash -c '%s install --upgrade %s' %s"
% (paths['pip_path'], PIP_COUNTERPARTY_LIB, USERNAME))
if not os.path.islink(COUNTERPARTY_LIB_DIST_PATH) or not os.path.exists(os.readlink(COUNTERPARTY_LIB_DIST_PATH)):
runcmd("ln -sf %s %s" % ( #create symlink
os.path.join(paths['env_path'], "lib/python" + PYTHON3_VER, "site-packages/counterpartylib"),
COUNTERPARTY_LIB_DIST_PATH))
else:
assert os.path.exists(os.path.join(COUNTERPARTY_LIB_DIST_PATH, "setup.py"))
runcmd("%s %s install" % (paths['python_path'], os.path.join(COUNTERPARTY_LIB_DIST_PATH, "setup.py")))
if not os.path.exists(COUNTERPARTY_CLI_DIST_PATH) or os.path.islink(COUNTERPARTY_CLI_DIST_PATH):
runcmd("sudo su -s /bin/bash -c '%s install --upgrade %s' %s" % (paths['pip_path'], PIP_COUNTERPARTY_CLI, USERNAME))
if not os.path.islink(COUNTERPARTY_CLI_DIST_PATH) or not os.path.exists(os.readlink(COUNTERPARTY_CLI_DIST_PATH)):
runcmd("ln -sf %s %s" % ( #create symlink
os.path.join(paths['env_path'], "lib/python" + PYTHON3_VER, "site-packages/counterpartycli"),
COUNTERPARTY_CLI_DIST_PATH))
else:
assert os.path.exists(os.path.join(COUNTERPARTY_CLI_DIST_PATH, "setup.py"))
runcmd("%s %s install" % (paths['python_path'], os.path.join(COUNTERPARTY_CLI_DIST_PATH, "setup.py")))
#install bootstrap
if do_bootstrap:
runcmd("bash -c 'XDG_DATA_HOME=%s %s bootstrap' %s"
% (xcp_user_data_dir, os.path.join(paths['env_path'], "bin", "counterparty-server"), DAEMON_USERNAME))
runcmd("chown -R %s:%s %s" % (DAEMON_USERNAME, USERNAME, paths['data_path.template'] % "counterparty"))
if found_counterblock:
if not os.path.exists(COUNTERBLOCK_DIST_PATH) or os.path.islink(COUNTERBLOCK_DIST_PATH):
runcmd("sudo su -s /bin/bash -c '%s install --upgrade %s' %s"
% (paths['pip_path.counterblock'], PIP_COUNTERBLOCK, USERNAME))
if not os.path.islink(COUNTERBLOCK_DIST_PATH) or not os.path.exists(os.readlink(COUNTERBLOCK_DIST_PATH)):
runcmd("ln -sf %s %s" % (
os.path.join(paths['env_path.counterblock'], "lib/python" + PYTHON2_VER, "site-packages/counterblock"),
COUNTERBLOCK_DIST_PATH))
else:
assert os.path.exists(os.path.join(COUNTERBLOCK_DIST_PATH, "setup.py"))
runcmd("%s %s install" % (paths['python_path.counterblock'], os.path.join(COUNTERBLOCK_DIST_PATH, "setup.py")))
if branch != "AUTO":
f = open(BRANCH_SETTINGS_PATH, 'w')
f.write(branch)
f.close()
def do_nginx_setup(run_as_user, enable=True):
if not enable:
runcmd("apt-get -y remove nginx-openresty", abort_on_failure=False)
remove_runit("nginx")
return
#Build and install nginx (openresty) on Ubuntu
#Most of these build commands from http://brian.akins.org/blog/2013/03/19/building-openresty-on-ubuntu/
OPENRESTY_VER = "1.9.7.3"
#uninstall nginx if already present
runcmd("apt-get -y remove nginx")
#install deps
runcmd("apt-get -y install make ruby1.9.1 ruby1.9.1-dev git-core libpcre3-dev libxslt1-dev libgd2-xpm-dev libgeoip-dev unzip zip build-essential libssl-dev")
runcmd("gem install fpm")
#grab openresty and compile
runcmd("rm -rf /tmp/openresty /tmp/ngx_openresty-* /tmp/nginx-openresty.tar.gz /tmp/nginx-openresty*.deb")
runcmd('''wget -O /tmp/nginx-openresty.tar.gz http://openresty.org/download/ngx_openresty-%s.tar.gz''' % OPENRESTY_VER)
runcmd("tar -C /tmp -zxvf /tmp/nginx-openresty.tar.gz")
runcmd('''cd /tmp/ngx_openresty-%s && ./configure \
--with-luajit \
--sbin-path=/usr/sbin/nginx \
--conf-path=/etc/nginx/nginx.conf \
--error-log-path=/var/log/nginx/error.log \
--http-client-body-temp-path=/var/lib/nginx/body \
--http-fastcgi-temp-path=/var/lib/nginx/fastcgi \
--http-log-path=/var/log/nginx/access.log \
--http-proxy-temp-path=/var/lib/nginx/proxy \
--http-scgi-temp-path=/var/lib/nginx/scgi \
--http-uwsgi-temp-path=/var/lib/nginx/uwsgi \
--lock-path=/var/lock/nginx.lock \
--pid-path=/var/run/nginx.pid \
--with-http_geoip_module \
--with-http_gzip_static_module \
--with-http_realip_module \
--with-http_ssl_module \
--with-http_sub_module \
--with-http_xslt_module \
--with-ipv6 \
--with-sha1=/usr/include/openssl \
--with-md5=/usr/include/openssl \
--with-http_stub_status_module \
--with-http_secure_link_module \
--with-http_sub_module && make''' % OPENRESTY_VER)
#set up the build environment
runcmd('''cd /tmp/ngx_openresty-%s && make install DESTDIR=/tmp/openresty \
&& mkdir -p /tmp/openresty/var/lib/nginx \
&& install -m 0755 -D %s/linux/runit/nginx/run /tmp/openresty/etc/sv/nginx/run \
&& install -m 0755 -D %s/linux/nginx/nginx.conf /tmp/openresty/etc/nginx/nginx.conf \
&& install -m 0755 -D %s/linux/nginx/counterblock.conf /tmp/openresty/etc/nginx/sites-enabled/counterblock.conf \
&& install -m 0755 -D %s/linux/nginx/counterblock_api.inc /tmp/openresty/etc/nginx/sites-enabled/counterblock_api.inc \
&& install -m 0755 -D %s/linux/nginx/counterblock_api_cache.inc /tmp/openresty/etc/nginx/sites-enabled/counterblock_api_cache.inc \
&& install -m 0755 -D %s/linux/nginx/counterblock_socketio.inc /tmp/openresty/etc/nginx/sites-enabled/counterblock_socketio.inc \
&& install -m 0755 -D %s/linux/logrotate/nginx /tmp/openresty/etc/logrotate.d/nginx''' % (
OPENRESTY_VER, paths['dist_path'], paths['dist_path'], paths['dist_path'], paths['dist_path'], paths['dist_path'], paths['dist_path'], paths['dist_path']))
#package it up using fpm
runcmd('''cd /tmp && fpm -s dir -t deb -n nginx-openresty -v %s --iteration 1 -C /tmp/openresty \
--description "openresty %s" \
--conflicts nginx \
--conflicts nginx-common \
-d libxslt1.1 \
-d libgeoip1 \
-d geoip-database \
-d libpcre3 \
--config-files /etc/nginx/nginx.conf \
--config-files /etc/nginx/sites-enabled/counterblock.conf \
--config-files /etc/nginx/fastcgi.conf.default \
--config-files /etc/nginx/win-utf \
--config-files /etc/nginx/fastcgi_params \
--config-files /etc/nginx/nginx.conf \
--config-files /etc/nginx/koi-win \
--config-files /etc/nginx/nginx.conf.default \
--config-files /etc/nginx/mime.types.default \
--config-files /etc/nginx/koi-utf \
--config-files /etc/nginx/uwsgi_params \
--config-files /etc/nginx/uwsgi_params.default \
--config-files /etc/nginx/fastcgi_params.default \
--config-files /etc/nginx/mime.types \
--config-files /etc/nginx/scgi_params.default \
--config-files /etc/nginx/scgi_params \
--config-files /etc/nginx/fastcgi.conf \
etc usr var''' % (OPENRESTY_VER, OPENRESTY_VER))
#now install the .deb package that was created (along with its deps)
runcmd("apt-get -y install libxslt1.1 libgeoip1 geoip-database libpcre3")
runcmd("dpkg -i /tmp/nginx-openresty_%s-1_amd64.deb" % OPENRESTY_VER)
#remove any .dpkg-old or .dpkg-dist files that might have been installed out of the nginx config dir
runcmd("rm -f /etc/nginx/sites-enabled/*.dpkg-old /etc/nginx/sites-enabled/*.dpkg-dist")
#clean up after ourselves
runcmd("rm -rf /tmp/openresty /tmp/ngx_openresty-* /tmp/nginx-openresty.tar.gz /tmp/nginx-openresty*.deb")
#set up init
runcmd("ln -sf /etc/sv/nginx /etc/service/")
def do_armory_utxsvr_setup(run_as_user, enable=True):
if not enable:
remove_runit("armory_utxsvr")
remove_runit("armory_utxsvr-testnet")
runcmd("apt-get -y remove armory", abort_on_failure=False)
runcmd("rm -f /usr/local/bin/armory_utxsvr")
return
runcmd("apt-get -y install xvfb python-qt4 python-twisted python-psutil xdg-utils hicolor-icon-theme")
ARMORY_VERSION = "0.93.3_ubuntu-64bit"
if not os.path.exists("/tmp/armory_%s.deb" % ARMORY_VERSION):
runcmd("wget -O /tmp/armory_%s.deb https://s3.amazonaws.com/bitcoinarmory-releases/armory_%s.deb"
% (ARMORY_VERSION, ARMORY_VERSION))
runcmd("mkdir -p /usr/share/desktop-directories/") #bug fix (see http://askubuntu.com/a/406015)
runcmd("dpkg -i /tmp/armory_%s.deb" % ARMORY_VERSION)
runcmd("mkdir -p ~%s/.armory ~%s/.armory/log ~%s/.armory/log-testnet" % (USERNAME, USERNAME, USERNAME))
runcmd("chown -R %s:%s ~%s/.armory" % (DAEMON_USERNAME, USERNAME, USERNAME))
#create armory_utxsvr script
f = open("/usr/local/bin/armory_utxsvr", 'w')
f.write("#!/bin/sh\nDISPLAY=localhost:1.0 xvfb-run --auto-servernum %s/bin/armory_utxsvr \"$@\""
% paths['env_path.counterblock'])
f.close()
runcmd("chmod +x /usr/local/bin/armory_utxsvr")
#Set up upstart scripts (will be disabled later from autostarting on system startup if necessary)
config_runit_for_service(paths['dist_path'], "armory_utxsvr", enabled=questions.with_mainnet)
config_runit_for_service(paths['dist_path'], "armory_utxsvr-testnet", enabled=questions.with_testnet)
def do_counterwallet_setup(run_as_user, branch, updateOnly=False):
#check out counterwallet from git
git_repo_clone("counterwallet", "https://github.com/CounterpartyXCP/counterwallet.git",
os.path.join(USER_HOMEDIR, "counterwallet"), branch, for_user=run_as_user)
if not updateOnly:
runcmd("npm install -g grunt-cli bower")
runcmd("cd ~%s/counterwallet/src && bower --allow-root --config.interactive=false install" % USERNAME)
runcmd("cd ~%s/counterwallet && npm install" % USERNAME)
runcmd("cd ~%s/counterwallet && grunt build --force" % USERNAME) #will generate the minified site
runcmd("chown -R %s:%s ~%s/counterwallet" % (USERNAME, USERNAME, USERNAME)) #just in case
runcmd("chmod -R u+rw,g+rw,o+r,o-w ~%s/counterwallet" % USERNAME) #just in case
#copy over the default config to the initial config if it doesn't exist
if not os.path.exists(os.path.join(USER_HOMEDIR, "counterwallet", "counterwallet.conf.json")):
runcmd("cp -a ~%s/counterwallet/counterwallet.conf.json.example ~%s/counterwallet/counterwallet.conf.json" % (USERNAME, USERNAME))
def do_security_setup(run_as_user, branch):
"""Some helpful security-related tasks, to tighten up the box"""
#modify host.conf
modify_config(r'^nospoof on$', 'nospoof on', '/etc/host.conf')
#enable automatic security updates
runcmd("apt-get -y install unattended-upgrades")
runcmd('''bash -c "echo -e 'APT::Periodic::Update-Package-Lists "1";\nAPT::Periodic::Unattended-Upgrade "1";' > /etc/apt/apt.conf.d/20auto-upgrades" ''')
runcmd("dpkg-reconfigure -fnoninteractive -plow unattended-upgrades")
#sysctl
runcmd("install -m 0644 -o root -g root -D %s/linux/other/sysctl_rules.conf /etc/sysctl.d/60-tweaks.conf" % paths['dist_path'])
#set up fail2ban
runcmd("apt-get -y install fail2ban")
runcmd("install -m 0644 -o root -g root -D %s/linux/other/fail2ban.jail.conf /etc/fail2ban/jail.d/counterblock.conf" % paths['dist_path'])
runcmd("service fail2ban restart")
#set up psad (this will install postfix, which will prompt the user)
runcmd("apt-get -y install psad")
modify_config(r'^ENABLE_AUTO_IDS\s+?N;$', 'ENABLE_AUTO_IDS\tY;', '/etc/psad/psad.conf')
modify_config(r'^ENABLE_AUTO_IDS_EMAILS\s+?Y;$', 'ENABLE_AUTO_IDS_EMAILS\tN;', '/etc/psad/psad.conf')
for f in ['/etc/ufw/before.rules', '/etc/ufw/before6.rules']:
modify_config(r'^# End required lines.*?# allow all on loopback$',
'# End required lines\n\n#CUSTOM: for psad\n-A INPUT -j LOG\n-A FORWARD -j LOG\n\n# allow all on loopback',
f, dotall=True)
runcmd("psad -R && psad --sig-update")
runcmd("service ufw restart")
runcmd("service psad restart")
#set up chkrootkit, rkhunter
runcmd("apt-get -y install rkhunter chkrootkit")
runcmd('bash -c "rkhunter --update; exit 0"')
runcmd("rkhunter --propupd")
runcmd('bash -c "rkhunter --check --sk; exit 0"')
runcmd("rkhunter --propupd")
#logwatch
runcmd("apt-get -y install logwatch libdate-manip-perl")
#apparmor
runcmd("apt-get -y install apparmor apparmor-profiles")
#auditd
#note that auditd will need a reboot to fully apply the rules, due to it operating in "immutable mode" by default
runcmd("apt-get -y install auditd audispd-plugins")
runcmd("install -m 0640 -o root -g root -D %s/linux/other/audit.rules /etc/audit/rules.d/counterblock.rules" % paths['dist_path'])
modify_config(r'^USE_AUGENRULES=.*?$', 'USE_AUGENRULES="yes"', '/etc/default/auditd')
runcmd("service auditd restart")
#iwatch
runcmd("apt-get -y install iwatch")
modify_config(r'^START_DAEMON=.*?$', 'START_DAEMON=true', '/etc/default/iwatch')
runcmd("install -m 0644 -o root -g root -D %s/linux/other/iwatch.xml /etc/iwatch/iwatch.xml" % paths['dist_path'])
modify_config(r'guard email="root@localhost"', 'guard email="noreply@%s"' % socket.gethostname(), '/etc/iwatch/iwatch.xml')
runcmd("service iwatch restart")
def find_configured_services():
services = ["bitcoin", "bitcoin-testnet", "counterparty", "counterparty-testnet",
"counterblock", "counterblock-testnet", "armory_utxsvr", "armory_utxsvr-testnet"]
configured_services = []
for s in services:
if os.path.exists("/etc/service/%s" % s):
configured_services.append(s)
return configured_services
def command_services(command, prompt=False):
assert command in ("stop", "restart")
if prompt:
confirmation = ask_question("%s services? (y/N)" % command.capitalize(), ('y', 'n',), 'n')
if confirmation == 'n':
return False
logging.warn("STOPPING SERVICES" if command == 'stop' else "RESTARTING SERVICES")
if os.path.exists("/etc/init.d/iwatch"):
runcmd("service iwatch %s" % command, abort_on_failure=False)
configured_services = find_configured_services()
for s in configured_services:
runcmd("sv %s %s" % (command, s), abort_on_failure=False)
return True
class BuildQuestions:
VALID = collections.OrderedDict({
"op": ('u', 'r', 'restart', 'stop'),
"role": ('counterwallet', 'counterparty-server_only', 'counterblock_basic'),
"branch": ('master', 'develop'),
"net": ('t', 'm', 'b'),
"security_hardening": ('y', 'n'),
"server_public": ('y', 'n'),
"counterwallet_support_email": None,
"autostart_services": ('y', 'n'),
})
def __init__(self):
self.op = None
self.role = None
self.branch = None
self.net = None
self.security_hardening = None
self.server_public = None
self.counterwallet_support_email = None
self.autostart_services = None
def gather(self, noninteractive):
if not questions.role and noninteractive:
self.role = '1'
elif not self.role:
role = ask_question("Enter the number for the role you want to build:\n"
+ "\t1: Counterwallet server\n\t2: counterparty-server only\n\t3: counterblock basic (no Counterwallet)\n"
+ "Your choice",
('1', '2', '3'), '1')
if role == '1':
role = 'counterwallet'
role_desc = "Counterwallet server"
elif role == '2':
role = 'counterparty-server_only'
role_desc = "counterparty-server only"
elif role == '3':
role = 'counterblock_basic'
role_desc = "Basic counterblock server"
print("\tBuilding a %s" % role_desc)
self.role = role
assert self.role in self.VALID['role']
if not self.branch and noninteractive:
self.branch = 'master'
elif not self.branch:
branch = ask_question("Build from branch (M)aster or (d)evelop? (M/d)", ('m', 'd'), 'm')
if branch == 'm': branch = 'master'
elif branch == 'd': branch = 'develop'
print("\tWorking with branch: %s" % branch)
self.branch = branch
assert self.branch in self.VALID['branch']
if not self.net and noninteractive:
self.net = 'b'
elif not self.net:
self.net = ask_question(
"Run as (t)estnet node, (m)ainnet node, or (B)oth? (t/m/B)", ('t', 'm', 'b'), 'b')
print("\tSetting up to run on %s" % ('testnet' if self.net.lower() == 't'
else ('mainnet' if self.net.lower() == 'm' else 'testnet and mainnet')))
assert self.net in self.VALID['net']
if self.role in ['counterparty-server_only', 'counterblock_basic']:
if not self.server_public and noninteractive:
self.server_public = 'y'
elif not self.server_public:
self.server_public = ask_question(
"Enable public setup (listen on all network interfaces) (Y/n)", ('y', 'n'), 'y')
else:
self.server_public = getattr(self, 'server_public', 'n') #does not apply
assert self.server_public in self.VALID['server_public']
else: self.server_public = None
if self.role == 'counterwallet':
counterwallet_support_email = None
if not self.counterwallet_support_email and noninteractive:
self.counterwallet_support_email = ''
elif not self.counterwallet_support_email:
while True:
counterwallet_support_email = input("Email address where support cases should go (blank to disable): ")
counterwallet_support_email = getattr(self, counterwallet_support_email, '').strip()
if counterwallet_support_email:
counterwallet_support_email_confirm = ask_question(
"You entered '%s', is that right? (Y/n): " % counterwallet_support_email, ('y', 'n'), 'y')
if counterwallet_support_email_confirm == 'y': break
else: break
self.counterwallet_support_email = counterwallet_support_email
else:
self.counterwallet_support_email = self.counterwallet_support_email.strip()
else: self.counterwallet_support_email = None
if not self.security_hardening and noninteractive:
self.security_hardening = 'y'
elif not self.security_hardening:
self.security_hardening = ask_question("Set up security hardening? (Y/n)", ('y', 'n'), 'y')
assert self.security_hardening in self.VALID['security_hardening']
if not self.autostart_services and noninteractive:
self.autostart_services = 'n'
elif not self.autostart_services:
self.autostart_services = ask_question("Autostart services (including on boot)? (y/N)", ('y', 'n'), 'n')
assert self.autostart_services in self.VALID['autostart_services']
def _with_mainnet(self):
return self.net in ['m', 'b']
with_mainnet = property(_with_mainnet)
def _with_testnet(self):
return self.net in ['t', 'b']
with_testnet = property(_with_testnet)
def _with_counterblock(self):
return self.role != 'counterparty-server_only'
with_counterblock = property(_with_counterblock)
def _start_on_boot(self):
return self.autostart_services == 'y'
start_on_boot = property(_start_on_boot)
def usage():
print("SYNTAX: %s [-h] [--noninteractive] %s" % (
sys.argv[0], ' '.join([('[--%s=%s]' % (q, '|'.join(v) if v else '')) for q, v in BuildQuestions.VALID.items()])))
def main():
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s|%(levelname)s: %(message)s')
do_federated_node_prerun_checks()
run_as_user = os.environ["SUDO_USER"]
assert run_as_user
global paths
paths = get_base_paths()
global questions
questions = BuildQuestions() #don't gather yet
#parse any command line objects
noninteractive = False
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help", "noninteractive"] + ['%s=' % q for q in BuildQuestions.VALID.keys()])
except getopt.GetoptError as err:
usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o == "--noninteractive":
noninteractive = True
elif o in ['--%s' % q for q in BuildQuestions.VALID.keys()]: #process flags for non-interactivity
setattr(questions, o.lstrip('-'), a)
else:
assert False, "Unhandled or unimplemented switch or option"
#Detect if we should ask the user if they just want to update the source and not do a rebuild
do_rebuild = 'r'
try:
pwd.getpwnam(USERNAME) #hacky check ...as this user is created by the script
except:
questions.op = 'r' #do a build
else: #setup has already been run at least once
if questions.op == 'restart':
command_services("restart", prompt=False)
sys.exit(0)
if questions.op == 'stop':
command_services("stop", prompt=False)
sys.exit(0)
if questions.op not in BuildQuestions.VALID['op']:
questions.op = ask_question(
"It appears this setup has been run already or has another instance of counterpartyd or Federated Node. (r)ebuild node, or just (U)pdate from git? (r/U)",
('r', 'u'), 'u')
assert questions.op in BuildQuestions.VALID['op']
if os.path.exists("/etc/init.d/iwatch"):
runcmd("service iwatch stop", abort_on_failure=False)
if questions.op == 'u': #just refresh counterpartyd, counterblockd, and counterwallet, etc. from github
#refresh this repo
git_repo_clone(REPO_NAME, REPO_URL, paths['base_path'], for_user=USERNAME)
#refresh counterparty-server, counterparty-cli and counterblock (if available)
install_base_via_pip()
#refresh counterwallet (if available)
if os.path.exists(os.path.expanduser("~%s/counterwallet" % USERNAME)):
do_counterwallet_setup(run_as_user, "AUTO", updateOnly=True)
#offer to restart services
restarted = command_services("restart", prompt=not noninteractive)
if not restarted and os.path.exists("/etc/init.d/iwatch"):
runcmd("service iwatch start", abort_on_failure=False)
else:
assert questions.op == 'r'
#If here, a) federated node has not been set up yet or b) the user wants a rebuild
questions.gather(noninteractive)
command_services("stop")
remove_old()
do_base_setup(run_as_user)
backend_rpc_password, backend_rpc_password_testnet = do_backend_rpc_setup()
do_counterparty_setup(run_as_user, backend_rpc_password, backend_rpc_password_testnet)
do_nginx_setup(run_as_user,
enable=questions.role not in ["counterparty-server_only", "counterblock_basic"])
do_armory_utxsvr_setup(run_as_user, enable=questions.role == 'counterwallet')
if questions.role == 'counterwallet':
do_counterwallet_setup(run_as_user, questions.branch)
if questions.security_hardening == 'y':
do_security_setup(run_as_user, questions.branch)
logging.info("Counterblock Federated Node Build Complete (whew).")
if questions.start_on_boot:
configured_services = find_configured_services()
for s in configured_services:
config_runit_disable_manual_control(s)
if os.path.exists("/etc/init.d/iwatch"):
runcmd("service iwatch start", abort_on_failure=False)
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
"""
Sessions module for the Tornado framework.
Milan Cermak <milan.cermak@gmail.com>
This module implements sessions for Tornado. It can store
session data in files or MySQL databse, Memcached, Redis
and MongoDB.
USAGE:
======
Every session object can be handled as a dictionary:
self.session[key] = value
var = self.session[key]
The session data is saved automatically for you when the request
handler finishes.
Two utility functions, invalidate() and refresh() are available to
every session object. Read their documentation to learn more.
The application provider is responsible for removing stale, expired
sessions from the storage. However, he can use the delete_expired()
function provided with every storage class except Memcached, which
knows when a session expired and removes it automatically.
SETTINGS:
=========
The session module introduces new settings available to the
application:
session_age: how long should the session be valid (applies also to cookies);
the value can be anything an integer, long, string or datetime.timedelta;
integer, long and string are meant to represent seconds,
default is 900 seconds (15 mins);
check out _expires_at for additional info
session_regeneration_interval: period in seconds, after which the session_id should be
regenerated; when the session creation time + period
exceed current time, a new session is stored
server-side (the sesion data remains unchanged) and
the client cookie is refreshed; the old session
is no longer valid
session regeneration is used to strenghten security
and prevent session hijacking; default interval
is 4 minutes
the setting accepts integer, string or timedelta values,
read _next_regeneration_at() documentation for more info
session_cookie_name: the name of the cookie, which stores the session_id;
default is 'session_id'
session_cookie_path: path attribute for the session cookie;
default is '/'
session_cookie_domain: domain attribute for the session cookie;
default is None
session_storage: a string specifying the session storage;
available storage engines are: file-based sessions (all sessions
are stored in a single file), directory-based sessions (every
session is stored in a single file, all in one directory),
MySQL-based sessions (sessions are stored in a MySQL database),
Redis-based sessions (using Redis to store them, obviously),
MongoDB-based sessions (each session stored as a document
in MongoDB)
if you want to store session data in a single file, set
this to a url of the following format:
'file:///path/to/session_storage_file'
another choice is to store session in a directory, where
each session is stored in a separate, single file; to
enable this behaviour, set this setting to:
dir://[/path/to/session/storage/directory]
if you omit the directory path, Tornado will create
a temporary directory for you
each session will be mapped to a file following the
<session_id>.session format, saved in this directory
be sure the Tornado process has read & write access to
this path, whether it's a file or a directory
if you want to use MySQL, set it in this format:
'mysql://username:password[@hostname[:port]]/database'
to enable Redis as a storage engine, set this setting
to 'redis://' with optional password, host, port and database
elements (e.g. 'redis://secret@127.0.0.1:8888/1'; if using
password with default host, you have to append an @-sign:
'redis://secret@/2'); if not complete, defaults are used (
localhost:6379, no auth, db 1)
remember that you have to have the redis python library
available on your system to enable Redis-based sessions
to use MongoDB as session storage, set this to a string
following the format:
'mongodb://[host[:port]]/db
If no host or port is specified, defaults are used (localhost,
27017)
if you don't specify any storage, the default behaviour is
to create a new temporary file according to yours OS'
conventions (on Unix-like systems in the /tmp directory);
the file will have 'tornado_sessions_' as name prefix
session_security_model: not implemented yet;
the plan to future versions is to provide some basic
mechanisms to prevent session hijacking, based on
users IP address, User-Agent, GeoIP or whatever
other data; suggestions welcomed
"""
import base64
import csv
import collections
import datetime
import os
import cPickle as pickle
import re
import tempfile
import time
import types
class BaseSession(collections.MutableMapping):
"""The base class for the session object. Work with the session object
is really simple, just treat is as any other dictionary:
class Handler(tornado.web.RequestHandler):
def get(self):
var = self.session['key']
self.session['another_key'] = 'value'
Session is automatically saved on handler finish. Session expiration
is updated with every request. If configured, session ID is
regenerated periodically.
The session_id attribute stores a unique, random, 64 characters long
string serving as an indentifier.
To create a new storage system for the sessions, subclass BaseSession
and define save(), load() and delete(). For inspiration, check out any
of the already available classes and documentation to aformentioned functions."""
def __init__(self, session_id=None, data=None, security_model=[], expires=None,
duration=None, ip_address=None, user_agent=None,
regeneration_interval=None, next_regeneration=None, **kwargs):
# if session_id is True, we're loading a previously initialized session
if session_id:
self.session_id = session_id
self.data = data
self.duration = duration
self.expires = expires
self.dirty = False
else:
self.session_id = self._generate_session_id()
self.data = {}
self.duration = duration
self.expires = self._expires_at()
self.dirty = True
self.ip_address = ip_address
self.user_agent = user_agent
self.security_model = security_model
self.regeneration_interval = regeneration_interval
self.next_regeneration = next_regeneration or self._next_regeneration_at()
self._delete_cookie = False
def __repr__(self):
return '<session id: %s data: %s>' % (self.session_id, self.data)
def __str__(self):
return self.session_id
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
self.dirty = True
def __delitem__(self, key):
del self.data[key]
self.dirty = True
def keys(self):
return self.data.keys()
def __iter__(self):
return self.data.__iter__()
def __len__(self):
return len(self.data.keys())
def _generate_session_id(cls):
return os.urandom(32).encode('hex') # 256 bits of entropy
def _is_expired(self):
"""Check if the session has expired."""
if self.expires is None: # never expire
return False
return datetime.datetime.utcnow() > self.expires
def _expires_at(self):
"""Find out the expiration time. Returns datetime.datetime."""
v = self.duration
if v is None: # never expire
return None
elif isinstance(v, datetime.timedelta):
pass
elif isinstance(v, (int, long)):
self.duration = datetime.timedelta(seconds=v)
elif isinstance(v, basestring):
self.duration = datetime.timedelta(seconds=int(v))
else:
self.duration = datetime.timedelta(seconds=900) # 15 mins
return datetime.datetime.utcnow() + self.duration
def _serialize_expires(self):
""" Determines what value of expires is stored to DB during save()."""
if self.expires is None:
return None
else:
return int(time.mktime(self.expires.timetuple()))
def _should_regenerate(self):
"""Determine if the session_id should be regenerated."""
if self.regeneration_interval is None: # never regenerate
return False
return datetime.datetime.utcnow() > self.next_regeneration
def _next_regeneration_at(self):
"""Return a datetime object when the next session id regeneration
should occur."""
# convert whatever value to an timedelta (period in seconds)
# store it in self.regeneration_interval to prevent
# converting in later calls and return the datetime
# of next planned regeneration
v = self.regeneration_interval
if v is None: # never regenerate
return None
elif isinstance(v, datetime.timedelta):
pass
elif isinstance(v, (int, long)):
self.regeneration_interval = datetime.timedelta(seconds=v)
elif isinstance(v, basestring):
self.regeneration_interval = datetime.timedelta(seconds=int(v))
else:
self.regeneration_interval = datetime.timedelta(seconds=240) # 4 mins
return datetime.datetime.utcnow() + self.regeneration_interval
def invalidate(self):
"""Destorys the session, both server-side and client-side.
As a best practice, it should be used when the user logs out of
the application."""
self.delete() # remove server-side
self._delete_cookie = True # remove client-side
def refresh(self, duration=None, new_session_id=False): # the opposite of invalidate
"""Prolongs the session validity. You can specify for how long passing a
value in the duration argument (the same rules as for session_age apply).
Be aware that henceforward this particular session may have different
expiry date, not respecting the global setting.
If new_session_id is True, a new session identifier will be generated.
This should be used e.g. on user authentication for security reasons."""
if duration:
self.duration = duration
self.expires = self._expires_at()
else:
self.expires = self._expires_at()
if new_session_id:
self.delete()
self.session_id = self._generate_session_id()
self.next_regeneration = self._next_regeneration_at()
self.dirty = True # force save
self.save()
def save(self):
"""Save the session data and metadata to the backend storage
if necessary (self.dirty == True). On successful save set
dirty to False."""
pass
@staticmethod
def load(session_id, location):
"""Load the stored session from storage backend or return
None if the session was not found, in case of stale cookie."""
pass
def delete(self):
"""Remove all data representing the session from backend storage."""
pass
@staticmethod
def delete_expired(file_path):
"""Deletes sessions with timestamps in the past form storage."""
pass
def serialize(self):
dump = {'session_id': self.session_id,
'data': self.data,
'duration': self.duration,
'expires': self.expires,
'ip_address': self.ip_address,
'user_agent': self.user_agent,
'security_model': self.security_model,
'regeneration_interval': self.regeneration_interval,
'next_regeneration': self.next_regeneration}
return base64.encodestring(pickle.dumps(dump))
@staticmethod
def deserialize(datastring):
return pickle.loads(base64.decodestring(datastring))
class FileSession(BaseSession):
"""File based session storage. Sessions are stored in CSV format. The file
is either specified in the session_storage setting (be sure it is writable
to the Tornado process) or a new tempfile with 'tornado_sessions_' prefix
is created in the OS' standard location.
Be aware that file-based sessions can get really slow with many stored
session as any action (save, load, delete) has to cycle through the whole
file. """
def __init__(self, file_path, **kwargs):
super(FileSession, self).__init__(**kwargs)
self.file_path = file_path
if not kwargs.has_key('session_id'):
self.save() # save only if it is a newly created session, not if loaded from storage
def save(self):
"""Save the session. To prevent data loss, we read from the original
file and write the updated data to a temporary file. When all data is
written, we rename the temporary file to the original. """
if not self.dirty:
return
found = False
reader_file = open(self.file_path, 'rb')
reader = csv.DictReader(reader_file,
fieldnames=['session_id', 'data', 'expires', 'ip_address', 'user-agent'])
writer_temp = tempfile.mkstemp()[1]
writer_temp_file = open(writer_temp, 'w+b')
writer = csv.DictWriter(writer_temp_file,
['session_id', 'data', 'expires', 'ip_address', 'user-agent'])
for line in reader:
if line['session_id'] == self.session_id:
writer.writerow({'session_id': self.session_id,
'data': self.serialize(),
'expires': self._serialize_expires(),
'ip_address': self.ip_address,
'user-agent': self.user_agent})
found = True
else:
writer.writerow(line)
if not found: # not previously stored session
# column data will contain the whole object, not just the
# data attribute
writer.writerow({'session_id': self.session_id,
'data': self.serialize(),
'expires': self._serialize_expires(),
'ip_address': self.ip_address,
'user-agent': self.user_agent})
reader_file.close()
writer_temp_file.close()
os.rename(writer_temp, self.file_path)
self.dirty = False
@staticmethod
def load(session_id, path):
"""Loads a session from the specified file."""
try:
reader_file = open(path, 'rb')
reader = csv.DictReader(reader_file,
fieldnames=['session_id', 'data', 'expires', 'ip_address', 'user-agent'])
for line in reader:
if line['session_id'] == session_id:
reader_file.close()
kwargs = FileSession.deserialize(line['data'])
return FileSession(path, **kwargs)
reader_file.close()
return None
except:
return None
def delete(self):
"""Remove the session from the storage file. File manipulation is
done the same way as in save()."""
reader_file = open(self.file_path, 'rb')
reader = csv.DictReader(reader_file,
fieldnames=['session_id', 'data', 'expires', 'ip_address', 'user-agent'])
writer_temp = tempfile.mkstemp()[1]
writer_temp_file = open(writer_temp, 'w+b')
writer = csv.DictWriter(writer_temp_file,
['session_id', 'data', 'expires', 'ip_address', 'user-agent'])
for line in reader:
if line['session_id'] != self.session_id:
writer.writerow(line)
reader_file.close()
writer_temp_file.close()
os.rename(writer_temp, self.file_path) # rename the temporary holder to the session file
@staticmethod
def delete_expired(file_path):
reader_file = open(file_path, 'rb')
reader = csv.DictReader(reader_file,
fieldnames=['session_id', 'data', 'expires', 'ip_address', 'user-agent'])
writer_temp = tempfile.mkstemp()[1]
writer_temp_file = open(writer_temp, 'w+b')
writer = csv.DictWriter(writer_temp_file,
['session_id', 'data', 'expires', 'ip_address', 'user-agent'])
for line in reader:
if int(line['expires']) > int(time.time()):
writer.writerow(line)
reader_file.close()
writer_temp_file.close()
os.rename(writer_temp, file_path)
class DirSession(BaseSession):
"""A "directory" based session storage. Every session is stored in a
separate file, so one file represents one session. The files are
named as the session_id plus '.session' suffix. Data is stored in
CSV format. Make sure the directory where the files are stored is
readable and writtable to the Tornado process."""
def __init__(self, dir_path, **kwargs):
super(DirSession, self).__init__(**kwargs)
self.dir_path = dir_path
if not kwargs.has_key('session_id'):
self.save()
def save(self):
"""Save the session to a file. The algorithm first writes to a temp
file created in the sessions directory. When all data is written,
it renames it to the correct name (<session_id>.session)."""
if not self.dirty:
return
session_file = os.path.join(self.dir_path, self.session_id+'.session')
# write to temp file and then rename
temp_fd, temp_name = tempfile.mkstemp(dir=self.dir_path)
temp_file = os.fdopen(temp_fd, 'w+b')
writer = csv.writer(temp_file)
writer.writerow([self.session_id,
self.serialize(),
self._serialize_expires(),
self.ip_address,
self.user_agent])
temp_file.close()
os.rename(temp_name, session_file)
self.dirty = False
@staticmethod
def load(session_id, directory):
"""Load session from file storage."""
try:
session_file_name = os.path.join(directory, session_id+'.session')
if os.path.isfile(session_file_name):
session_file = open(session_file_name, 'rb')
reader = csv.reader(session_file)
l = reader.next()
kwargs = DirSession.deserialize(l[1])
return DirSession(directory, **kwargs)
return None
except:
return None
def delete(self):
"""Deletes the session file."""
session_file = os.path.join(self.dir_path, self.session_id+'.session')
if os.path.isfile(session_file):
os.remove(session_file)
@staticmethod
def delete_expired(dir_path):
assert os.path.isdir(dir_path)
all_files = os.listdir(dir_path)
session_files = filter(lambda x: x.endswith('.session'), all_files)
for s in session_files:
name = os.path.join(dir_path, s)
session_file = open(name, 'rb')
reader = csv.reader(session_file)
data = reader.next()
session_file.close()
if int(data[2]) < int(time.time()):
os.remove(name)
class MySQLSession(BaseSession):
"""Enables MySQL to act as a session storage engine. It uses Tornado's
MySQL wrapper from database.py.
The connection details are specified in the session_storage settings
as string mysql://username:password[@hostname[:port]]/database. It
stores session data in the table tornado_sessions. If hostname or
port aren't specified, localhost:3306 are used as defaults. """
def __init__(self, connection, max_ua_len=756, **kwargs):
super(MySQLSession, self).__init__(**kwargs)
# Trim UA if it's over limit
if self.user_agent and len(self.user_agent) > max_ua_len:
self.user_agent = self.user_agent[:max_ua_len]
self.connection = connection
if not kwargs.has_key('session_id'):
self.save()
@staticmethod
def _parse_connection_details(details):
# mysql://username:password[@hostname[:port]]/db
if details.find('@') != -1:
match = re.match('mysql://(\w+):(.*?)@([\w|\.]+)(?::(\d+))?/(\S+)', details)
username = match.group(1)
password = match.group(2)
hostname = match.group(3)
port = match.group(4) or '3306'
database = match.group(5)
host_port = hostname + ':' + port
else: # hostname and port not specified
host_port = 'localhost:3306'
match = re.match('mysql://(\w+):(.*?)/(\S+)', details)
username = match.group(1)
password = match.group(2)
database = match.group(3)
return username, password, host_port, database
def save(self):
"""Store the session data to database. Session is saved only if it
is necessary. If the table 'tornado_sessions' does not exist yet,
create it. It uses MySQL's "non-standard insert ... on duplicate key
"update query."""
if not self.dirty:
return
if not self.connection.get("""show tables like 'tornado_sessions'"""):
self.connection.execute( # create table if it doesn't exist
"""create table tornado_sessions (
session_id varchar(64) not null primary key,
data longtext,
expires integer,
ip_address varchar(46),
user_agent varchar(768)
);""")
self.connection.execute( # MySQL's upsert
"""insert into tornado_sessions
(session_id, data, expires, ip_address, user_agent) values
(%s, %s, %s, %s, %s)
on duplicate key update
session_id=values(session_id), data=values(data), expires=values(expires),
ip_address=values(ip_address), user_agent=values(user_agent);""",
self.session_id, self.serialize(), self._serialize_expires(),
self.ip_address, self.user_agent)
self.dirty = False
@staticmethod
def load(session_id, connection):
"""Load the stored session."""
try:
data = connection.get("""
select session_id, data, expires, ip_address, user_agent
from tornado_sessions where session_id = %s;""", session_id)
if data:
kwargs = MySQLSession.deserialize(data['data'])
return MySQLSession(connection, **kwargs)
return None
except:
return None
def delete(self):
"""Remove session data from the database."""
self.connection.execute("""
delete from tornado_sessions where session_id = %s;""", self.session_id)
@staticmethod
def delete_expired(connection):
connection.execute("""
delete from tornado_sessions where expires < %s;""", int(time.time()))
try:
import redis
class RedisSession(BaseSession):
"""Class handling session storing in Redis.
It uses default Redis settings for host and port, without
authentication. The session_id is used as a key to a string
value holding the session details. The value has a format of
serialized_session_object_data:expires:ip_address:user_agent.
The save() and delete() methods both trigger BGSAVE. Be sure
you're aware of possible limitations (saving is not guaranteed
in the unfortunate case of a failure between the call to BGSAVE
and actual writing data to HDD by Redis)."""
def __init__(self, connection, **kwargs):
super(RedisSession, self).__init__(**kwargs)
self.connection = connection
if not kwargs.has_key('session_id'):
self.save()
@staticmethod
def _parse_connection_details(details):
# redis://[auth@][host[:port]][/db]
match = re.match('redis://(?:(\S+)@)?([^\s:/]+)?(?::(\d+))?(?:/(\d+))?$', details)
password, host, port, db = match.groups()
return password, host, int(port), db
def _serialize_expires(self):
""" Determines what value of expires is stored to DB during save()."""
if self.expires is None:
return '-1'
else:
return str(int(time.mktime(self.expires.timetuple())))
def save(self):
"""Save the current sesssion to Redis. The session_id
acts as a key. The value is constructed of colon separated values
serialized_data, expires, ip_address and user_agent. This
function calls BGSAVE on Redis, so it may terminate before
the data is actually updated on the HDD."""
if not self.dirty:
return
value = ':'.join((self.serialize(),
self._serialize_expires(),
self.ip_address,
self.user_agent))
self.connection.set(self.session_id, value)
try:
self.connection.bgsave()
except redis.ResponseError:
pass
self.dirty = False
@staticmethod
def load(session_id, connection):
"""Load the stored session."""
if connection.exists(session_id) == 1:
try:
data = connection.get(session_id)
kwargs = RedisSession.deserialize(data.split(':', 1)[0])
return RedisSession(connection, **kwargs)
except:
return None
return None
def delete(self):
"""Delete the session key-value from Redis. As save(),
delete() too calls BGSAVE."""
self.connection.delete(self.session_id)
try:
self.connection.bgsave()
except redis.ResponseError:
pass
@staticmethod
def delete_expired(connection):
t = int(time.time())
for key in connection.keys('*'):
value = connection.get(key)
expires = value.split(':', 2)[1]
if int(expires) < t:
connection.delete(key)
except ImportError:
pass
try:
import pymongo
class MongoDBSession(BaseSession):
"""Class implementing the MongoDB based session storage.
All sessions are stored in a collection "tornado_sessions" in the db
you specify in the session_storage setting.
The session document structure is following:
'session_id': session ID
'data': serialized session object
'expires': a timestamp of when the session expires, in sec since epoch
'user_agent': self-explanatory
An index on session_id is created automatically, on application's init.
The end_request() is called after every operation (save, load, delete),
to return the connection back to the pool.
"""
def __init__(self, db, **kwargs):
super(MongoDBSession, self).__init__(**kwargs)
self.db = db # an instance of pymongo.collection.Collection
if not kwargs.has_key('session_id'):
self.save()
@staticmethod
def _parse_connection_details(details):
# mongodb://[host[:port]]/db
if details[10] != '/':
# host and port specified
match = re.match('mongodb://([\S|\.]+?)?(?::(\d+))?/(\S+)', details)
host = match.group(1)
port = int(match.group(2))
database = match.group(3)
else:
# default host and port
host = 'localhost'
port = 27017
match = re.match('mongodb:///(\S+)', details)
database = match.group(1)
return host, port, database
def save(self):
"""Upsert a document to the tornado_sessions collection.
The document's structure is like so:
{'session_id': self.session_id,
'data': self.serialize(),
'expires': self._serialize_expires(),
'user_agent': self.user_agent}
"""
# upsert
self.db.update(
{'session_id': self.session_id}, # equality criteria
{'session_id': self.session_id,
'data': self.serialize(),
'expires': self._serialize_expires(),
'user_agent': self.user_agent}, # new document
upsert=True)
self.db.database.connection.end_request()
@staticmethod
def load(session_id, db):
"""Load session from the storage."""
try:
data = db.find_one({'session_id': session_id})
if data:
kwargs = MongoDBSession.deserialize(data['data'])
db.database.connection.end_request()
return MongoDBSession(db, **kwargs)
db.database.connection.end_request()
return None
except:
db.database.connection.end_request()
return None
def delete(self):
"""Remove session from the storage."""
self.db.remove({'session_id': self.session_id})
self.db.database.connection.end_request()
@staticmethod
def delete_expired(db):
db.remove({'expires': {'$lte': int(time.time())}})
except ImportError:
pass
try:
import pylibmc
class MemcachedSession(BaseSession):
"""Class responsible for Memcached stored sessions. It uses the
pylibmc library because it's fast. It communicates with the
memcached server through the binary protocol and uses async
I/O (no_block set to 1) to speed things up even more.
Session ID is used as a key. The value consists of colon
separated values of serializes session object, expiry timestamp,
IP address and User-Agent.
Values are stored with timeout set to the difference between
saving time and expiry time in seconds. Therefore, no
old sessions will be held in Memcached memory."""
def __init__(self, connection, **kwargs):
super(MemcachedSession, self).__init__(**kwargs)
self.connection = connection
if not kwargs.has_key('session_id'):
self.save()
@staticmethod
def _parse_connection_details(details):
if len(details) > 12:
return re.sub('\s+', '', details[12:]).split(',')
else:
return ['127.0.0.1']
def _serialize_expires(self):
""" Determines what value of expires is stored to DB during save()."""
if self.expires is None:
return '-1'
else:
return str(int(time.mktime(self.expires.timetuple())))
def save(self):
"""Write the session to Memcached. Session ID is used as
key, value is constructed as colon separated values of
serialized session, session expiry timestamp, ip address
and User-Agent.
The value is not stored indefinitely. It's expiration time
in seconds is calculated as the difference between the saving
time and session expiry."""
if not self.dirty:
return
value = ':'.join((self.serialize(),
self._serialize_expires(),
self.ip_address,
self.user_agent))
# count how long should it last and then add or rewrite
if self.expires is None:
# set expiry 30 days, max for memcache
# http://code.google.com/p/memcached/wiki/FAQ#What_are_the_limits_on_setting_expire_time?_%28why_is_there_a_30_d
self.connection.set(self.session_id, value, time=timedelta.max.seconds * 30)
else:
live_sec = self.expires - datetime.datetime.utcnow()
self.connection.set(self.session_id, value, time=live_sec.seconds)
self.dirty = False
@staticmethod
def load(session_id, connection):
"""Load the session from storage."""
try:
value = connection.get(session_id)
if value:
data = value.split(':', 1)[0]
kwargs = MemcachedSession.deserialize(data)
return MemcachedSession(connection, **kwargs)
except:
return None
return None
def delete(self):
"""Delete the session from storage."""
self.connection.delete(self.session_id)
def delete_expired(connection):
"""With Memcached as session storage, this function does
not make sense as all keys are saved with expiry time
exactly the same as the session's. Hence Memcached takse
care of cleaning out the garbage."""
raise NotImplementedError
except ImportError:
pass
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the input_lib library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import data_service_ops
from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy
from tensorflow.python.data.experimental.service import server_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_tensor as ragged_tensor_lib
from tensorflow.python.util import nest
class DistributedIteratorTestBase(test.TestCase):
# The passed input_context is to create a sharded dataset in between-graph
# case.
# TODO(yuefengz): rewrite the following method to make it less DRY.
def _wrap_iterator(self,
input_type,
dataset_or_input_fn,
input_workers,
devices,
num_replicas_in_sync,
strategy,
input_context=None):
# The `input_context` passed in is to shard dataset for
# MultiWorkerMirroredStrategy. It doesn't apply to in-graph case where
# multiple InputContexts are needed.
if input_type == "input_fn":
self.assertIsNone(
input_context,
msg=("`The input_context` arg is only used to shard dataset in "
"`MultiWorkerMirroredStrategy` when the input type is dataset."))
input_contexts = []
for i in range(input_workers.num_workers):
input_contexts.append(
distribute_lib.InputContext(
# Note: `input_workers.num_workers` is always 1 in between-graph
# case.
num_input_pipelines=input_workers.num_workers,
input_pipeline_id=i,
num_replicas_in_sync=len(devices)))
iterator = input_lib.InputFunctionIterator(
dataset_or_input_fn,
input_workers,
input_contexts,
strategy)
else:
iterator = input_lib.DatasetIterator(
dataset_or_input_fn,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
return iterator
def _wrap_dataset(self,
input_type,
dataset,
input_workers,
num_replicas_in_sync,
strategy,
input_context=None):
if input_type == "dataset":
if tf2.enabled():
return input_lib.DistributedDataset(
input_workers,
strategy,
dataset,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
else:
return input_lib.DistributedDatasetV1(
dataset,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
else:
return strategy.distribute_datasets_from_function(dataset)
def _assert_iterator_values(self,
iterator,
expected_values,
evaluate_fn,
devices,
enable_get_next_as_optional=False):
actual_values = []
for _ in range(len(expected_values)):
if enable_get_next_as_optional:
next_element = iterator.get_next_as_optional().get_value()
else:
next_element = iterator.get_next()
computed_value = evaluate_fn([
distribute_utils.select_replica(r, next_element)
for r in range(len(devices))
])
actual_values.append(computed_value)
for expected_value, actual_value in zip(expected_values, actual_values):
for expected, actual in zip(expected_value, actual_value):
self.assertAllEqual(expected, actual)
def _assert_dataset_values_for_loop(self, dataset, expected_values,
evaluate_fn, devices):
actual_values = []
for x in dataset:
computed_value = self.evaluate(
[distribute_utils.select_replica(r, x) for r in range(len(devices))])
actual_values.append(computed_value)
for expected_value, actual_value in zip(expected_values, actual_values):
for expected, actual in zip(expected_value, actual_value):
self.assertAllEqual(expected, actual)
def _test_input_iteration(self,
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
strategy,
sess=None,
num_replicas_in_sync=None,
input_context=None):
if iteration_type == "for_loop" and not context.executing_eagerly():
self.skipTest("unsupported test combination.")
if api_type == "wrap_into_iterator" and iteration_type == "for_loop":
self.skipTest("unsupported test combination.")
if api_type == "wrap_into_iterator" and input_type == "input_fn":
self.skipTest("unsupported test combination.")
devices = nest.flatten([ds for _, ds in worker_device_pairs])
input_workers = input_lib.InputWorkers(worker_device_pairs)
if api_type == "wrap_into_iterator":
iterator = self._wrap_iterator(
input_type,
dataset_or_input_fn,
input_workers,
devices,
num_replicas_in_sync,
strategy,
input_context=input_context)
else:
# wrapping into a dataset:
dataset = self._wrap_dataset(
input_type,
dataset_or_input_fn,
input_workers,
num_replicas_in_sync,
strategy,
input_context=input_context)
if ops.executing_eagerly_outside_functions():
iterator = iter(dataset)
else:
if isinstance(dataset, input_lib.DistributedDatasetV1):
iterator = dataset.make_initializable_iterator()
else:
self.skipTest("unsupported test combination")
if isinstance(iterator, composite_tensor.CompositeTensor):
nest.assert_same_structure(iterator, iterator._type_spec,
expand_composites=True)
if iteration_type == "get_next":
evaluate = lambda x: sess.run(x) if sess else self.evaluate(x)
if not ops.executing_eagerly_outside_functions():
evaluate(control_flow_ops.group(iterator.initializer))
def test_get_next(iterator):
self._assert_iterator_values(iterator, expected_values, evaluate,
devices)
with self.assertRaises(errors.OutOfRangeError):
self._assert_iterator_values(iterator, expected_values, evaluate,
devices)
# After re-initializing the iterator, should be able to iterate again.
if not ops.executing_eagerly_outside_functions():
evaluate(control_flow_ops.group(iterator.initializer))
else:
if api_type == "wrap_into_iterator":
self.skipTest("unsupported test combination")
else:
iterator = iter(dataset)
self._assert_iterator_values(iterator, expected_values, evaluate,
devices)
def test_get_next_as_optional(iterator):
self._assert_iterator_values(
iterator,
expected_values,
evaluate,
devices,
enable_get_next_as_optional=True)
next_element = iterator.get_next_as_optional()
self.assertFalse(self.evaluate(next_element.has_value()))
with self.assertRaises(errors.InvalidArgumentError):
self._assert_iterator_values(
iterator, [0],
evaluate,
devices,
enable_get_next_as_optional=True)
test_get_next(iterator)
# re-initializing the iterator
if not tf2.enabled():
# TODO(yuefengz): we should split this function.
return
else:
if api_type == "wrap_into_iterator":
return
else:
iterator = iter(dataset)
test_get_next_as_optional(iterator)
if iteration_type == "for_loop" and context.executing_eagerly():
self._assert_dataset_values_for_loop(dataset, expected_values,
self.evaluate, devices)
def _create_dataset_or_input_fn(self, input_type, input_fn):
if input_type == "input_fn":
return input_fn
else:
return input_fn(distribute_lib.InputContext())
class DistributedIteratorTest(DistributedIteratorTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu
]))
def testDisablingOwnedIteratorsInTF2(self, distribution, input_type):
if not tf2.enabled():
self.skipTest("unsupported test combination")
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
input_workers = input_lib.InputWorkers(worker_device_pairs)
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
input_workers = input_lib.InputWorkers(worker_device_pairs)
if input_type == "dataset":
dist_dataset = input_lib.get_distributed_dataset(dataset_or_input_fn,
input_workers,
distribution)
else:
dist_dataset = input_lib.get_distributed_datasets_from_function(
dataset_or_input_fn, input_workers, [distribute_lib.InputContext()],
distribution)
# Default Iterator types in TF2.
iterator = iter(dist_dataset)
self.assertIsInstance(iterator, input_lib.DistributedIterator)
self.assertIsInstance(iterator._iterators[0],
input_lib._SingleWorkerOwnedDatasetIterator)
# Disable creating owned iterators by setting a property on the strategy.
distribution._enable_legacy_iterators = True
iterator = iter(dist_dataset)
self.assertIsInstance(iterator, input_lib.DistributedIteratorV1)
self.assertIsInstance(iterator._iterators[0],
input_lib._SingleWorkerDatasetIterator)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
]))
def testMultiDeviceIterInitialize(self, distribution):
if tf2.enabled():
self.skipTest("Only V1 is supported.")
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.DatasetV1.range(10)
input_workers = input_lib.InputWorkers(worker_device_pairs)
dist_dataset = input_lib.get_distributed_dataset(
dataset_fn(distribute_lib.InputContext()), input_workers, distribution)
iterator = dataset_ops.make_one_shot_iterator(dist_dataset)
@def_function.function
def init_func_for_iter():
self.evaluate(iterator.initializer)
init_func_for_iter()
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
],
enable_get_next_as_optional=[True, False]))
def testOneDeviceCPU(self, input_type, api_type, iteration_type, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.Dataset.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i] for i in range(10)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[strategy_combinations.multi_worker_mirrored_2x1_cpu],
enable_get_next_as_optional=[True, False]))
def testOneDeviceCPUMultiWorker(self, input_type, api_type, iteration_type,
distribution, enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.DatasetV1.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i] for i in range(10)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution)
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
],
enable_get_next_as_optional=[True, False]))
def testTwoDevicesOneGPUOneCPU(self, input_type, api_type, iteration_type,
distribution, enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.Dataset.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution)
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[strategy_combinations.tpu_strategy],
enable_get_next_as_optional=[True, False]))
def testTPU(self, input_type, api_type, iteration_type, distribution,
enable_get_next_as_optional):
worker_device_pairs = collections.OrderedDict()
for tpu_device in distribution.extended.worker_devices:
host_device = device_util.get_host_for_device(tpu_device)
worker_device_pairs.setdefault(host_device, [])
worker_device_pairs[host_device].append(tpu_device)
worker_device_pairs = worker_device_pairs.items()
dataset_fn = lambda _: dataset_ops.Dataset.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i, i + 1] for i in range(0, 10, 2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution)
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
],
enable_get_next_as_optional=[True, False]))
def testTupleDataset(self, input_type, api_type, iteration_type, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
def dataset_fn(ctx):
del ctx
dataset1 = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(10).map(lambda x: x**2)
return dataset_ops.Dataset.zip((dataset1, dataset2))
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[(i, i**2), (i+1, (i+1)**2)] for i in range(0, 10, 2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[strategy_combinations.multi_worker_mirrored_2x2_gpu],
enable_get_next_as_optional=[True, False]))
def testTupleDatasetMultiworker(self, input_type, api_type, iteration_type,
distribution, enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:GPU:1"])]
def dataset_fn(ctx):
del ctx
dataset1 = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(10).map(lambda x: x**2)
return dataset_ops.Dataset.zip((dataset1, dataset2))
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [
[(i, i**2), (i + 1, (i + 1)**2)] for i in range(0, 10, 2)
]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
# Input_context is not passed in and thus no sharding.
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testIterableIterator(self, distribution):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
input_workers = input_lib.InputWorkers(worker_device_pairs)
dataset = dataset_ops.Dataset.range(10)
dist_dataset = input_lib.get_distributed_dataset(dataset, input_workers,
distribution)
iterator = iter(dist_dataset)
for i, element in enumerate(iterator):
self.assertAllEqual(distribution.experimental_local_results(element), [i])
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
drop_remainder=[True, False],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
]))
def testUnevenDatasetBatches(self, input_type, api_type, iteration_type,
drop_remainder, distribution):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.Dataset.range(9).batch( # pylint: disable=g-long-lambda
2, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
# The last global batch only contains data for one replica.
if drop_remainder:
expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
else:
expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8], []]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
drop_remainder=[True, False],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testUnevenDatasetBatchesMultiWorker(self, input_type, api_type,
iteration_type, drop_remainder,
distribution):
# Actual devices don't matter in this test as long as the number of global
# repices is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
worker_count = multi_worker_util.worker_count(cr.cluster_spec(),
cr.task_type)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(9)
if input_type == "input_fn":
# When input_fn is used, there is no automatic rebatching and sharding,
# so we add them here.
return dataset.shard(worker_count, id_in_cluster).batch(1)
else:
return dataset.batch(2, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
if drop_remainder and input_type == "dataset":
if id_in_cluster == 0:
expected_values = [[[0]], [[2]], [[4]], [[6]]]
else:
expected_values = [[[1]], [[3]], [[5]], [[7]]]
else:
# The last global batch only contains data for one replica.
if id_in_cluster == 0:
expected_values = [[[0]], [[2]], [[4]], [[6]], [[8]]]
else:
expected_values = [[[1]], [[3]], [[5]], [[7]], [[]]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
drop_remainder=[True, False],
distribution=[
strategy_combinations.multi_worker_mirrored_2x2_gpu,
]))
def testUnevenDatasetBatchesMultiWorkerFourReplicas(self, input_type,
api_type, iteration_type,
drop_remainder,
distribution):
# Actual devices don't matter in this test as long as the number of global
# repices is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:GPU:1"])]
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
worker_count = multi_worker_util.worker_count(cr.cluster_spec(),
cr.task_type)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(15)
if input_type == "input_fn":
# When input_fn is used, there is no automatic rebatching and sharding,
# so we add them here.
return dataset.shard(worker_count, id_in_cluster).batch(1)
else:
return dataset.batch(4, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
# The last global batch only contains data for one replica.
if drop_remainder and input_type == "dataset":
if id_in_cluster == 0:
expected_values = [[[0], [2]], [[4], [6]], [[8], [10]]]
else:
expected_values = [[[1], [3]], [[5], [7]], [[9], [11]]]
else:
if id_in_cluster == 0:
expected_values = [[[0], [2]], [[4], [6]], [[8], [10]], [[12], [14]]]
else:
expected_values = [[[1], [3]], [[5], [7]], [[9], [11]], [[13], []]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
num_replicas_in_sync=[None, 2],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
],
enable_get_next_as_optional=[True, False]))
def testBatchSplitting(self, input_type, api_type, iteration_type,
num_replicas_in_sync, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
batch_size = 10
dataset_fn = lambda _: dataset_ops.Dataset.range(100).batch(batch_size)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
updated_batch_size = (
batch_size //
num_replicas_in_sync if num_replicas_in_sync else batch_size)
expected_values = [[range(i, i+updated_batch_size),
range(i+updated_batch_size, i+2*updated_batch_size)]
for i in range(0, 100, updated_batch_size*2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
sess=None,
num_replicas_in_sync=num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
num_replicas_in_sync=[None, 2],
distribution=[
strategy_combinations.multi_worker_mirrored_2x2_gpu,
],
enable_get_next_as_optional=[True, False]))
def testBatchSplittingMultiWorker(self, input_type, api_type, iteration_type,
num_replicas_in_sync, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:GPU:1"])]
batch_size = 10
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(100).batch(batch_size)
return dataset
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
updated_batch_size = (
batch_size //
num_replicas_in_sync if num_replicas_in_sync else batch_size)
expected_values = [
[ # pylint: disable=g-complex-comprehension
range(i, i + updated_batch_size),
range(i + updated_batch_size, i + 2 * updated_batch_size)
] for i in range(0, 100, updated_batch_size * 2)
]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
sess=None,
num_replicas_in_sync=num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
))
def testCacheAcrossIteration(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
dataset = dataset_ops.Dataset.range(16).shuffle(16).cache().batch(4)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
first_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
second_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
self.assertAllEqual(first_epoch, second_epoch)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
reshuffle=[True, False]))
def testShuffleAcrossIterations(self, distribution, reshuffle):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
if not reshuffle and not compat.forward_compatible(2020, 5, 22):
self.skipTest("Functionality currently not supported.")
dataset = dataset_ops.Dataset.range(12).shuffle(
12, reshuffle_each_iteration=reshuffle).batch(4)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
first_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
second_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
if reshuffle:
self.assertNotAllEqual(first_epoch, second_epoch)
else:
self.assertAllEqual(first_epoch, second_epoch)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testGetNextOptionalShape(self, distribution):
batch_size = 8
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"feature": array_ops.ones([batch_size, 10]),
"label": array_ops.ones([batch_size]),
})
dataset = dataset.batch(batch_size, drop_remainder=True)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
per_replica_batch_size = batch_size // distribution.num_replicas_in_sync
@def_function.function
def train_fn():
for data in dist_dataset:
data = nest.map_structure(distribution.experimental_local_results, data)
feature = data["feature"]
label = data["label"]
# Assert the shapes are still static from all replicas.
for replica_id in range(len(distribution.extended.worker_devices)):
self.assertEqual([per_replica_batch_size, 10],
feature[replica_id].shape)
self.assertEqual([per_replica_batch_size], label[replica_id].shape)
train_fn()
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
auto_shard_policy=[AutoShardPolicy.AUTO, AutoShardPolicy.OFF]))
def testAutoshardingOption(self, distribution, input_type, api_type,
iteration_type, auto_shard_policy):
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
ds_option = dataset_ops.Options()
ds_option.experimental_distribute.auto_shard_policy = auto_shard_policy
dataset_fn = (
lambda _: dataset_ops.Dataset.range(4).with_options(ds_option))
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
if auto_shard_policy == AutoShardPolicy.AUTO:
if id_in_cluster == 0:
expected_values = [[0], [2]]
else:
expected_values = [[1], [3]]
else:
expected_values = [[0], [1], [2], [3]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
input_type=["input_fn"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"]))
def testDifferentDatasetsMultiWorker(self, distribution, input_type, api_type,
iteration_type):
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(ctx):
if ctx.input_pipeline_id == 0:
return dataset_ops.Dataset.range(8).batch(2)
else:
return dataset_ops.Dataset.range(9).batch(2)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
if id_in_cluster == 0:
expected_values = [[[0, 1]], [[2, 3]], [[4, 5]], [[6, 7]], [[]]]
else:
expected_values = [[[0, 1]], [[2, 3]], [[4, 5]], [[6, 7]], [[8]]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=["eager"]))
def testLoopOverDatasetInTFFunction(self, strategy):
dataset = dataset_ops.Dataset.range(10).map(lambda x: { # pylint: disable=g-long-lambda
"y": math_ops.cast(x, dtypes.float32) ** 2,
}).batch(4)
dist_dataset = strategy.experimental_distribute_dataset(dataset)
with strategy.scope():
v = variables.Variable(0.0, aggregation=variables.VariableAggregation.SUM)
@def_function.function
def iterator_fn(dist_dataset):
def assign_add_fn(data):
v.assign_add(math_ops.reduce_sum(data["y"]))
for data in dist_dataset:
strategy.run(assign_add_fn, args=(data,))
iterator_fn(dist_dataset)
self.assertEqual(v.numpy(), 285.0)
class DistributedIteratorTensorTypeTest(DistributedIteratorTestBase,
parameterized.TestCase):
"""Tests for DistributedDataset with non-dense tensors."""
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
defun_type=["lambda", "tf_function"],
))
def testRaggedSparse(self, distribution, input_type, drop_remainder,
defun_type):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
defun = {"lambda": lambda f: f,
"tf_function": def_function.function}[defun_type]
distribution.extended.experimental_enable_get_next_as_optional = True
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"dense": ragged_tensor.to_tensor(),
"ragged": ragged_tensor,
"sparse": ragged_tensor.to_sparse(),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
return dataset.batch(batch_size, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
dataset = self._wrap_dataset(input_type, dataset_or_input_fn,
distribution.extended._input_workers,
len(distribution.extended.worker_devices),
distribution)
# Assert that the tensors are rebatched and sparsity is preserved.
per_replica_batch = defun(lambda x: next(iter(x)))(dataset)
self.assertAllEqual(
distribute_utils.select_replica(0, per_replica_batch["dense"]),
[[0., 0., 0.], [1., 0., 0.], [2., 2., 0.], [3., 3., 3.]])
self.assertAllEqual(
distribute_utils.select_replica(1, per_replica_batch["dense"]),
[[0., 0., 0.], [5., 0., 0.], [6., 6., 0.], [7., 7., 7.]])
# Transitively check the ragged and sparse tensors by densification.
for i in range(2):
self.assertLen(
distribute_utils.select_replica(i,
per_replica_batch["ragged"]).values,
6)
self.assertAllEqual(
distribute_utils.select_replica(
i, per_replica_batch["ragged"]).to_tensor(),
distribute_utils.select_replica(i, per_replica_batch["dense"]))
self.assertLen(
distribute_utils.select_replica(i,
per_replica_batch["sparse"]).indices,
6)
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(
distribute_utils.select_replica(i, per_replica_batch["sparse"])),
distribute_utils.select_replica(i, per_replica_batch["dense"]))
# Iterate through all the batches and sum them up.
def sum_batch(per_replica_features):
"""Sums the `PerReplica` values in the `per_replica_features` map."""
def map_fn(per_replica_values):
per_replica_sums = distribution.run(
(lambda x: math_ops.reduce_sum(x.values)) if all(
map(sparse_tensor.is_sparse, per_replica_values.values)) else
math_ops.reduce_sum, (per_replica_values,))
return distribution.reduce(
reduce_util.ReduceOp.SUM, per_replica_sums, axis=None)
return nest.map_structure(map_fn, per_replica_features)
def _reduce(state, batch):
sums = sum_batch(batch)
return {name: value + sums[name] for name, value in state.items()}
def sum_for_loop(dataset):
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
for batch in dataset:
sums = _reduce(sums, batch)
return sums
def sum_while_loop(iterator, reduce_fn):
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
while True:
try:
sums = reduce_fn(sums, iterator)
except (StopIteration, errors.OutOfRangeError):
return sums
while_sums = sum_while_loop(
iter(dataset),
defun(lambda state, iterator: _reduce(state, next(iterator))))
self.assertAllEqual(
nest.flatten(while_sums),
# When there's no partial batch, the sum is smaller.
[200. if drop_remainder else 310.] * 3)
for_sums = defun(sum_for_loop)(dataset)
# For loops always call get next as optional inside tf functions, so we
# expect 310 here when using an input function (as there are 5 batches of
# size 4 round robined over 2 replicas.
expected_for_sum = 200.
if (not drop_remainder or (
defun_type == "tf_function" and input_type == "input_fn")):
expected_for_sum = 310.
self.assertAllEqual(nest.flatten(for_sums), [expected_for_sum] * 3)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
tensor_type=["sparse", "ragged"],
enable_get_next_as_optional=[True, False]
))
def testRaggedSparseGetNextAsOptional(
self, distribution, input_type, drop_remainder, tensor_type,
enable_get_next_as_optional):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
tensor_type: (ragged_tensor if tensor_type == "ragged" else
ragged_tensor.to_sparse()),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
return dataset.batch(batch_size, drop_remainder=drop_remainder)
if input_type == "dataset":
ds = distribution.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
else:
ds = distribution.distribute_datasets_from_function(dataset_fn)
iterator = iter(ds)
self.assertEqual(iterator._enable_get_next_as_optional,
(not drop_remainder) and enable_get_next_as_optional)
@combinations.generate(
combinations.combine(
tf_api_version=2,
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
# TODO(mdan): Add these?
# strategy_combinations.multi_worker_mirrored_2x1_cpu,
# strategy_combinations.multi_worker_mirrored_2x1_gpu,
# strategy_combinations.multi_worker_mirrored_2x2_gpu,
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
))
def testRaggedSparseGetNextAsOptionalInLoop(
self, distribution, input_type, drop_remainder):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
self.skipTest("b/323359921")
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"dense": ragged_tensor.to_tensor(),
"ragged": ragged_tensor,
"sparse": ragged_tensor.to_sparse(),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
return dataset.batch(batch_size, drop_remainder=drop_remainder)
if input_type == "dataset":
ds = distribution.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
else:
ds = distribution.distribute_datasets_from_function(dataset_fn)
# Iterate through all the batches and sum them up.
def sum_batch(per_replica_features):
"""Sums the `PerReplica` values in the `per_replica_features` map."""
def map_fn(per_replica_values):
per_replica_sums = distribution.run(
(lambda x: math_ops.reduce_sum(x.values)) if all(
map(sparse_tensor.is_sparse, per_replica_values.values)) else
math_ops.reduce_sum, (per_replica_values,))
return distribution.reduce(
reduce_util.ReduceOp.SUM, per_replica_sums, axis=None)
return nest.map_structure(map_fn, per_replica_features)
def _reduce(state, batch):
sums = sum_batch(batch)
return {name: value + sums[name] for name, value in state.items()}
def sum_while_loop(ds):
iterator = iter(ds)
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
try_next = constant_op.constant(True)
while try_next:
opt_iterate = iterator.get_next_as_optional()
if opt_iterate.has_value():
sums = _reduce(sums, opt_iterate.get_value())
else:
try_next = False
return sums
sums = def_function.function(sum_while_loop)(ds)
# For loops always call get next as optional inside tf functions, so we
# expect 310 here when using an input function (as there are 5 batches of
# size 4 round robined over 2 replicas.
expected_for_sum = 200.
if not drop_remainder or input_type == "input_fn":
expected_for_sum = 310.
self.assertAllEqual(nest.flatten(sums), [expected_for_sum] * 3)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testMWMSPartialBatch(self, input_type, api_type, iteration_type,
distribution):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior when we have two files each with
# 12 elements and a global batch size of 8. When we consider the dataset in
# aggregate (non-distributed), there are 24 elements divided into 3 batches
# of size 8. Hence, the correct distributed behavior is for each replica to
# see sub-batches of size 4, over three steps.
def dataset_fn(ctx):
del ctx
dataset = dataset_ops.Dataset.range(12).batch(8)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as there is 1 local
# replica.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. Each worker should rebatch its dataset into
# smaller batches of size 4.
expected_values = [[[0, 1, 2, 3]], [[4, 5, 6, 7]], [[8, 9, 10, 11]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testMWMSPartialBatchWithLegacyRebatch(self, input_type, api_type,
iteration_type, distribution):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior when we have two files each with
# 12 elements and a global batch size of 8. When we consider the dataset in
# aggregate (non-distributed), there are 24 elements divided into 3 batches
# of size 8. Hence, the correct distributed behavior is for each replica to
# see sub-batches of size 4, over three steps. However, when we create a
# DistributedDataset and cannot statically infer the intended global batch
# size (e.g. if the user does not use a batching dataset), each worker will
# rebatch based on the dynamic batch size of the data encountered, even when
# it encounters partial batches. The last per-worker partial batch (size 4)
# ends up being split into two replicas, resulting in 4 steps in total, of
# (global) batch sizes 8, 8, 4, 4.
def dataset_fn(ctx):
del ctx
# The following dataset is equivalent to
# tf.data.Dataset.range(12).batch(8), but does not use a batching dataset.
# This causes DistributedDataset to use LegacyRebatch instead.
batch_sizes = dataset_ops.Dataset.from_tensor_slices([8, 4])
offsets = dataset_ops.Dataset.from_tensor_slices([0, 8])
dataset = dataset_ops.Dataset.zip((offsets, batch_sizes))
def map_fn(offset, batch_size):
return math_ops.range(offset, offset + batch_size)
dataset = dataset.map(map_fn)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is equivalent to
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as the number of global
# replicas is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. Each worker should rebatch its dataset into
# smaller batches of size 4.
expected_values = [[[0, 1, 2, 3]], [[4, 5, 6, 7]], [[8, 9]], [[10, 11]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
auto_shard_policy=[AutoShardPolicy.AUTO, AutoShardPolicy.DATA]))
def testMWMSWithDataSharding(self, input_type, api_type, iteration_type,
distribution, auto_shard_policy):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior the dataset is sharded by data
# and the batch size is indivisible by the number of replicas. This checks
# that the elements are as expected and the batch size across all workers
# adds up to 3. This test will only pass if the autoshard rewrite rewrites
# RebatchDatasetV2 to legacy RebatchDataset when sharding by data.
def dataset_fn(ctx):
del ctx
dataset = dataset_ops.Dataset.range(8).batch(3)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = auto_shard_policy
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as there is 1 local
# replica.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. We expect each worker to see different shards of
# data.
cr = distribution.cluster_resolver
worker_id = multi_worker_util.id_in_cluster(cr.cluster_spec(), cr.task_type,
cr.task_id)
if worker_id == 0:
expected_values = [[[0, 1]], [[3, 4]], [[6]]]
elif worker_id == 1:
expected_values = [[[2]], [[5]], [[7]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
class DistributedIteratorPerDeviceTest(DistributedIteratorTestBase,
parameterized.TestCase):
"""Tests for PER_WORKER and PER_REPLICA's InputOptions variants."""
def setUp(self):
context._reset_context()
strategy_combinations.set_virtual_cpus_to_at_least(3)
super(DistributedIteratorPerDeviceTest, self).setUp()
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA),
],
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
]))
def testDevicePlacementForPerWorkerValuesWithPrefetch(self, distribution,
input_options):
def dataset_fn(input_context): # pylint: disable=[unused-argument]
return dataset_ops.Dataset.from_tensor_slices([1, 2, 3, 4])
ds = distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
for x in ds:
assert x.values[0].device == distribution.extended.worker_devices[0]
assert x.values[0].backing_device == distribution.extended.worker_devices[
0]
assert x.values[1].device == distribution.extended.worker_devices[1]
assert x.values[1].backing_device == distribution.extended.worker_devices[
1]
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER)
],
mode=["eager"],
))
def testDevicePlacementForPerWorkerValuesWithoutPrefetch(
self, distribution, input_options):
def dataset_fn(input_context):
return dataset_ops.Dataset.from_tensor_slices(
np.full(4, input_context.input_pipeline_id))
ds = distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
for x in ds:
x = distribution.run(lambda inputs: inputs, args=(x,))
assert x.values[
0].device == "/job:localhost/replica:0/task:0/device:CPU:0"
assert x.values[
0].backing_device == "/job:localhost/replica:0/task:0/device:CPU:0"
assert x.values[
1].device == "/job:localhost/replica:0/task:0/device:CPU:0"
assert x.values[
1].backing_device == "/job:localhost/replica:0/task:0/device:CPU:0"
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=True,
experimental_fetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=True,
experimental_fetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA)
],
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
]))
def testDevicePlacementForInvalidCombinations(self, distribution,
input_options):
def dataset_fn(input_context):
return dataset_ops.Dataset.from_tensor_slices(
np.full(4, input_context.input_pipeline_id))
with self.assertRaises(ValueError):
distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=False,
experimental_per_replica_buffer_size=2),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=True,
experimental_per_replica_buffer_size=2),
],
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
]))
def testPrefetchBufferSizeInputOptions(self, distribution, input_options):
def dataset_fn(input_context):
return dataset_ops.Dataset.from_tensor_slices(
np.arange(1, 11).reshape(
(2, 5)) * (input_context.input_pipeline_id + 1))
ds = distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
# validating the values
x = next(iter(ds))
assert np.array_equal(x.values[0].numpy(), np.array([1, 2, 3, 4, 5]))
assert np.array_equal(x.values[1].numpy(), np.array([6, 7, 8, 9, 10]))
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER),
],
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
]))
def testOutputValuesForPerWorkerInputOptions(self, distribution,
input_options):
def dataset_fn(input_context):
return dataset_ops.Dataset.from_tensor_slices(
np.arange(1, 11).reshape(
(2, 5)) * (input_context.input_pipeline_id + 1))
ds = distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
# validating the values
x = next(iter(ds))
assert np.array_equal(x.values[0].numpy(), np.array([1, 2, 3, 4, 5]))
assert np.array_equal(x.values[1].numpy(), np.array([6, 7, 8, 9, 10]))
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=True,
experimental_fetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA),
],
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
]))
def testOutputValuesForPerReplicaInputOptions(self, distribution,
input_options):
def dataset_fn(input_context):
return dataset_ops.Dataset.from_tensor_slices(
np.arange(1, 10) * (input_context.input_pipeline_id + 1))
ds = distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
expected = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
for i, x in enumerate(ds):
# validating the values
assert x.values[0].numpy() == expected[i]
assert x.values[1].numpy() == expected[i] * 2
loop_num = i
assert loop_num == len(expected) - 1
class DistributedIteratorTfDataServiceTest(DistributedIteratorTestBase,
parameterized.TestCase):
"""Tests for distributed iterators which read from tf.data service."""
def setUp(self):
super(DistributedIteratorTfDataServiceTest, self).setUp()
self.num_workers = 3
if combinations.in_main_process():
self.dispatcher = server_lib.DispatchServer()
self.workers = []
for _ in range(self.num_workers):
self.workers.append(
server_lib.WorkerServer(
server_lib.WorkerConfig(
dispatcher_address=self.dispatcher.target.split("://")[1],
heartbeat_interval_ms=100,
dispatcher_timeout_ms=1000)))
combinations.env().tf_data_service_dispatcher = self.dispatcher.target
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testTfDataService(self, distribution):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
input_workers = input_lib.InputWorkers(worker_device_pairs)
dataset = dataset_ops.Dataset.range(1, 50)
dataset = dataset.apply(
data_service_ops._distribute(
processing_mode="parallel_epochs",
service=combinations.env().tf_data_service_dispatcher,
job_name="foo"))
dist_dataset = input_lib.get_distributed_dataset(dataset, input_workers,
distribution)
iterator = iter(dist_dataset)
results = []
for element in iterator:
local_results = distribution.experimental_local_results(element)
for result in local_results:
# input_lib.distributed_dataset may add extra '0' elements to pad
# per-replica results.
if result.numpy() != 0:
results.append(result.numpy())
self.assertNotEmpty(results)
gathered = distribution.gather(constant_op.constant(results), axis=0)
self.assertCountEqual(self.num_workers * list(range(1, 50)), gathered)
if __name__ == "__main__":
test_util.main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the Partially Signed Transaction RPCs.
"""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes_bi, disconnect_nodes, find_output, sync_blocks
import json
import os
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
# Create one-input, one-output, no-fee transaction:
class PSBTTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 3
self.extra_args = [
["-walletrbf=1"],
["-walletrbf=0"],
[]
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_utxo_conversion(self):
mining_node = self.nodes[2]
offline_node = self.nodes[0]
online_node = self.nodes[1]
# Disconnect offline node from others
disconnect_nodes(offline_node, 1)
disconnect_nodes(online_node, 0)
disconnect_nodes(offline_node, 2)
disconnect_nodes(mining_node, 0)
# Mine a transaction that credits the offline address
offline_addr = offline_node.getnewaddress(address_type="p2sh-segwit")
online_addr = online_node.getnewaddress(address_type="p2sh-segwit")
online_node.importaddress(offline_addr, "", False)
mining_node.sendtoaddress(address=offline_addr, amount=1.0)
mining_node.generate(nblocks=1)
sync_blocks([mining_node, online_node])
# Construct an unsigned PSBT on the online node (who doesn't know the output is Segwit, so will include a non-witness UTXO)
utxos = online_node.listunspent(addresses=[offline_addr])
raw = online_node.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}])
psbt = online_node.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"]
assert("non_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0])
# Have the offline node sign the PSBT (which will update the UTXO to segwit)
signed_psbt = offline_node.walletprocesspsbt(psbt)["psbt"]
assert("witness_utxo" in mining_node.decodepsbt(signed_psbt)["inputs"][0])
# Make sure we can mine the resulting transaction
txid = mining_node.sendrawtransaction(mining_node.finalizepsbt(signed_psbt)["hex"])
mining_node.generate(1)
sync_blocks([mining_node, online_node])
assert_equal(online_node.gettxout(txid,0)["confirmations"], 1)
# Reconnect
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
# Create and fund a raw tx for sending 10 BTC
psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']
# Node 1 should not be able to add anything to it but still return the psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
# Sign the transaction and send
signed_tx = self.nodes[0].walletprocesspsbt(psbtx)['psbt']
final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex']
self.nodes[0].sendrawtransaction(final_tx)
# Create p2sh, p2wpkh, and p2wsh addresses
pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']
pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']
p2sh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address']
p2wsh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address']
p2sh_p2wsh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address']
p2wpkh = self.nodes[1].getnewaddress("", "bech32")
p2pkh = self.nodes[1].getnewaddress("", "legacy")
p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit")
# fund those addresses
rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10})
rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3})
signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']
txid = self.nodes[0].sendrawtransaction(signed_tx)
self.nodes[0].generate(6)
self.sync_all()
# Find the output pos
p2sh_pos = -1
p2wsh_pos = -1
p2wpkh_pos = -1
p2pkh_pos = -1
p2sh_p2wsh_pos = -1
p2sh_p2wpkh_pos = -1
decoded = self.nodes[0].decoderawtransaction(signed_tx)
for out in decoded['vout']:
if out['scriptPubKey']['addresses'][0] == p2sh:
p2sh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2wsh:
p2wsh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2wpkh:
p2wpkh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2sh_p2wsh:
p2sh_p2wsh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2sh_p2wpkh:
p2sh_p2wpkh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2pkh:
p2pkh_pos = out['n']
# spend single key from node 1
rawtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(rawtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# feeRate of 0.1 BTC / KB produces a total fee slightly below -maxtxfee (~0.05280000):
res = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99}, 0, {"feeRate": 0.1})
assert_greater_than(res["fee"], 0.05)
assert_greater_than(0.06, res["fee"])
# feeRate of 10 BTC / KB produces a total fee well above -maxtxfee
# previously this was silently capped at -maxtxfee
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by -maxtxfee", self.nodes[1].walletcreatefundedpsbt, [{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99}, 0, {"feeRate": 10})
# partially sign multisig things with node 1
psbtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)
psbtx = walletprocesspsbt_out['psbt']
assert_equal(walletprocesspsbt_out['complete'], False)
# partially sign with node 2. This should be complete and sendable
walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# check that walletprocesspsbt fails to decode a non-psbt
rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99})
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)
# Convert a non-psbt to psbt and make sure we can decode it
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})
rawtx = self.nodes[0].fundrawtransaction(rawtx)
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Make sure that a non-psbt with signatures cannot be converted
# Error could be either "TX decode failed" (segwit inputs causes parsing to fail) or "Inputs must not have scriptSigs and scriptWitnesses"
# We must set iswitness=True because the serialized transaction has inputs and is therefore a witness transaction
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], iswitness=True)
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], permitsigdata=False, iswitness=True)
# Unless we allow it to convert and strip signatures
self.nodes[0].converttopsbt(signedtx['hex'], True)
# Explicitly allow converting non-empty txs
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Create outputs to nodes 1 and 2
node1_addr = self.nodes[1].getnewaddress()
node2_addr = self.nodes[2].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_addr, 13)
txid2 = self.nodes[0].sendtoaddress(node2_addr, 13)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout1 = find_output(self.nodes[1], txid1, 13, blockhash=blockhash)
vout2 = find_output(self.nodes[2], txid2, 13, blockhash=blockhash)
# Create a psbt spending outputs from nodes 1 and 2
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
finalized = self.nodes[0].finalizepsbt(combined)['hex']
self.nodes[0].sendrawtransaction(finalized)
self.nodes[0].generate(6)
self.sync_all()
# Test additional args in walletcreatepsbt
# Make sure both pre-included and funded inputs
# have the correct sequence numbers based on
# replaceable arg
block_height = self.nodes[0].getblockcount()
unspent = self.nodes[0].listunspent()[0]
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable":False}, False)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" not in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height+2)
# Same construction with only locktime set and RBF explicitly enabled
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {"replaceable": True}, True)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height)
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in in decoded_psbt["tx"]["vin"]:
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# Same construction without optional arguments, for a node with -walletrbf=0
unspent1 = self.nodes[1].listunspent()[0]
psbtx_info = self.nodes[1].walletcreatefundedpsbt([{"txid":unspent1["txid"], "vout":unspent1["vout"]}], [{self.nodes[2].getnewaddress():unspent1["amount"]+1}], block_height)
decoded_psbt = self.nodes[1].decodepsbt(psbtx_info["psbt"])
for tx_in in decoded_psbt["tx"]["vin"]:
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
# Make sure change address wallet does not have P2SH innerscript access to results in success
# when attempting BnB coin selection
self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"changeAddress":self.nodes[1].getnewaddress()}, False)
# Regression test for 14473 (mishandling of already-signed witness transaction):
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"])
double_processed_psbt = self.nodes[0].walletprocesspsbt(complete_psbt["psbt"])
assert_equal(complete_psbt, double_processed_psbt)
# We don't care about the decode result, but decoding must succeed.
self.nodes[0].decodepsbt(double_processed_psbt["psbt"])
# BIP 174 Test Vectors
# Check that unknown values are just passed through
unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA="
unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']
assert_equal(unknown_psbt, unknown_out)
# Open the data file
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
# Invalid PSBTs
for invalid in invalids:
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)
# Valid PSBTs
for valid in valids:
self.nodes[0].decodepsbt(valid)
# Creator Tests
for creator in creators:
created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs'])
assert_equal(created_tx, creator['result'])
# Signer tests
for i, signer in enumerate(signers):
self.nodes[2].createwallet("wallet{}".format(i))
wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))
for key in signer['privkeys']:
wrpc.importprivkey(key)
signed_tx = wrpc.walletprocesspsbt(signer['psbt'])['psbt']
assert_equal(signed_tx, signer['result'])
# Combiner test
for combiner in combiners:
combined = self.nodes[2].combinepsbt(combiner['combine'])
assert_equal(combined, combiner['result'])
# Empty combiner test
assert_raises_rpc_error(-8, "Parameter 'txs' cannot be empty", self.nodes[0].combinepsbt, [])
# Finalizer test
for finalizer in finalizers:
finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']
assert_equal(finalized, finalizer['result'])
# Extractor test
for extractor in extractors:
extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']
assert_equal(extracted, extractor['result'])
# Unload extra wallets
for i, signer in enumerate(signers):
self.nodes[2].unloadwallet("wallet{}".format(i))
self.test_utxo_conversion()
# Test that psbts with p2pkh outputs are created properly
p2pkh = self.nodes[0].getnewaddress(address_type='legacy')
psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True)
self.nodes[0].decodepsbt(psbt['psbt'])
# Test decoding error: invalid base64
assert_raises_rpc_error(-22, "TX decode failed invalid base64", self.nodes[0].decodepsbt, ";definitely not base64;")
# Send to all types of addresses
addr1 = self.nodes[1].getnewaddress("", "bech32")
txid1 = self.nodes[0].sendtoaddress(addr1, 11)
vout1 = find_output(self.nodes[0], txid1, 11)
addr2 = self.nodes[1].getnewaddress("", "legacy")
txid2 = self.nodes[0].sendtoaddress(addr2, 11)
vout2 = find_output(self.nodes[0], txid2, 11)
addr3 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid3 = self.nodes[0].sendtoaddress(addr3, 11)
vout3 = find_output(self.nodes[0], txid3, 11)
self.sync_all()
# Update a PSBT with UTXOs from the node
# Bech32 inputs should be filled with witness UTXO. Other inputs should not be filled because they are non-witness
psbt = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1},{"txid":txid2, "vout":vout2},{"txid":txid3, "vout":vout3}], {self.nodes[0].getnewaddress():32.999})
decoded = self.nodes[1].decodepsbt(psbt)
assert "witness_utxo" not in decoded['inputs'][0] and "non_witness_utxo" not in decoded['inputs'][0]
assert "witness_utxo" not in decoded['inputs'][1] and "non_witness_utxo" not in decoded['inputs'][1]
assert "witness_utxo" not in decoded['inputs'][2] and "non_witness_utxo" not in decoded['inputs'][2]
updated = self.nodes[1].utxoupdatepsbt(psbt)
decoded = self.nodes[1].decodepsbt(updated)
assert "witness_utxo" in decoded['inputs'][0] and "non_witness_utxo" not in decoded['inputs'][0]
assert "witness_utxo" not in decoded['inputs'][1] and "non_witness_utxo" not in decoded['inputs'][1]
assert "witness_utxo" not in decoded['inputs'][2] and "non_witness_utxo" not in decoded['inputs'][2]
# Two PSBTs with a common input should not be joinable
psbt1 = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1}], {self.nodes[0].getnewaddress():Decimal('10.999')})
assert_raises_rpc_error(-8, "exists in multiple PSBTs", self.nodes[1].joinpsbts, [psbt1, updated])
# Join two distinct PSBTs
addr4 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid4 = self.nodes[0].sendtoaddress(addr4, 5)
vout4 = find_output(self.nodes[0], txid4, 5)
self.nodes[0].generate(6)
self.sync_all()
psbt2 = self.nodes[1].createpsbt([{"txid":txid4, "vout":vout4}], {self.nodes[0].getnewaddress():Decimal('4.999')})
psbt2 = self.nodes[1].walletprocesspsbt(psbt2)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert "final_scriptwitness" in psbt2_decoded['inputs'][0] and "final_scriptSig" in psbt2_decoded['inputs'][0]
joined = self.nodes[0].joinpsbts([psbt, psbt2])
joined_decoded = self.nodes[0].decodepsbt(joined)
assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3]
# Check that joining shuffles the inputs and outputs
# 10 attempts should be enough to get a shuffled join
shuffled = False
for i in range(0, 10):
shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2])
shuffled |= joined != shuffled_joined
if shuffled:
break
assert shuffled
# Newly created PSBT needs UTXOs and updating
addr = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid = self.nodes[0].sendtoaddress(addr, 7)
addrinfo = self.nodes[1].getaddressinfo(addr)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout = find_output(self.nodes[0], txid, 7, blockhash=blockhash)
psbt = self.nodes[1].createpsbt([{"txid":txid, "vout":vout}], {self.nodes[0].getnewaddress("", "p2sh-segwit"):Decimal('6.999')})
analyzed = self.nodes[0].analyzepsbt(psbt)
assert not analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'updater' and analyzed['next'] == 'updater'
# After update with wallet, only needs signing
updated = self.nodes[1].walletprocesspsbt(psbt, False, 'ALL', True)['psbt']
analyzed = self.nodes[0].analyzepsbt(updated)
assert analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'signer' and analyzed['next'] == 'signer' and analyzed['inputs'][0]['missing']['signatures'][0] == addrinfo['embedded']['witness_program']
# Check fee and size things
assert analyzed['fee'] == Decimal('0.001') and analyzed['estimated_vsize'] == 134 and analyzed['estimated_feerate'] == Decimal('0.00746268')
# After signing and finalizing, needs extracting
signed = self.nodes[1].walletprocesspsbt(updated)['psbt']
analyzed = self.nodes[0].analyzepsbt(signed)
assert analyzed['inputs'][0]['has_utxo'] and analyzed['inputs'][0]['is_final'] and analyzed['next'] == 'extractor'
if __name__ == '__main__':
PSBTTest().main()
|
|
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def someFunctionThatReturnsDeletedValueViaAttributeLookup():
class C:
def __getattr__(self, attr_name):
nonlocal a
del a
c = C()
a = 1
c.something
return a
try:
someFunctionThatReturnsDeletedValueViaAttributeLookup()
except UnboundLocalError:
print("OK, object attribute look-up correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaAttributeSetting():
class C:
def __setattr__(self, attr_name, value):
nonlocal a
del a
c = C()
a = 1
c.something = 1
return a
try:
someFunctionThatReturnsDeletedValueViaAttributeSetting()
except UnboundLocalError:
print("OK, object attribute setting correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaAttributeDel():
class C:
def __delattr__(self, attr_name):
nonlocal a
del a
return True
c = C()
a = 1
del c.something
return a
try:
someFunctionThatReturnsDeletedValueViaAttributeDel()
except UnboundLocalError:
print("OK, object attribute del correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaItemLookup():
class C:
def __getitem__(self, attr_name):
nonlocal a
del a
c = C()
a = 1
c[2]
return a
try:
someFunctionThatReturnsDeletedValueViaItemLookup()
except UnboundLocalError:
print("OK, object subscript look-up correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaItemSetting():
class C:
def __setitem__(self, attr_name, value):
nonlocal a
del a
c = C()
a = 1
c[2] = 3
return a
try:
someFunctionThatReturnsDeletedValueViaItemSetting()
except UnboundLocalError:
print("OK, object subscript setting correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaItemDel():
class C:
def __delitem__(self, attr_name):
nonlocal a
del a
c = C()
a = 1
del c[2]
return a
try:
someFunctionThatReturnsDeletedValueViaItemDel()
except UnboundLocalError:
print("OK, object subscript del correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaCall():
class C:
def __call__(self):
nonlocal a
del a
c = C()
a = 1
c()
return a
try:
someFunctionThatReturnsDeletedValueViaCall()
except UnboundLocalError:
print("OK, object call correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaAdd():
class C:
def __add__(self, other):
nonlocal a
del a
c = C()
a = 1
c + 1
return a
try:
someFunctionThatReturnsDeletedValueViaAdd()
except UnboundLocalError:
print("OK, object add correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaSub():
class C:
def __sub__(self, other):
nonlocal a
del a
c = C()
a = 1
c - 1
return a
try:
someFunctionThatReturnsDeletedValueViaSub()
except UnboundLocalError:
print("OK, object sub correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaMul():
class C:
def __mul__(self, other):
nonlocal a
del a
return 7
c = C()
a = 1
c * 1
return a
try:
someFunctionThatReturnsDeletedValueViaMul()
except UnboundLocalError:
print("OK, object mul correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaRemainder():
class C:
def __mod__(self, other):
nonlocal a
del a
return 7
c = C()
a = 1
c % 1
return a
try:
someFunctionThatReturnsDeletedValueViaRemainder()
except UnboundLocalError:
print("OK, object remainder correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaDivmod():
class C:
def __divmod__(self, other):
nonlocal a
del a
return 7
c = C()
a = 1
divmod(c, 1)
return a
try:
someFunctionThatReturnsDeletedValueViaDivmod()
except UnboundLocalError:
print("OK, object divmod correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaPower():
class C:
def __pow__(self, other):
nonlocal a
del a
return 7
c = C()
a = 1
c ** 1
return a
try:
someFunctionThatReturnsDeletedValueViaPower()
except UnboundLocalError:
print("OK, object power correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaUnaryMinus():
class C:
def __neg__(self):
nonlocal a
del a
return 7
c = C()
a = 1
-c
return a
try:
someFunctionThatReturnsDeletedValueViaUnaryMinus()
except UnboundLocalError:
print("OK, object unary minus correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaUnaryPlus():
class C:
def __pos__(self):
nonlocal a
del a
return 7
c = C()
a = 1
+c
return a
try:
someFunctionThatReturnsDeletedValueViaUnaryPlus()
except UnboundLocalError:
print("OK, object unary plus correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaNot():
class C:
def __bool__(self):
nonlocal a
del a
return False
c = C()
a = 1
not c
return a
try:
someFunctionThatReturnsDeletedValueViaNot()
except UnboundLocalError:
print("OK, object bool correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInvert():
class C:
def __invert__(self):
nonlocal a
del a
return False
c = C()
a = 1
~c
return a
try:
someFunctionThatReturnsDeletedValueViaInvert()
except UnboundLocalError:
print("OK, object invert correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaLshift():
class C:
def __lshift__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c << 1
return a
try:
someFunctionThatReturnsDeletedValueViaLshift()
except UnboundLocalError:
print("OK, object lshift correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaRshift():
class C:
def __rshift__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c >> 1
return a
try:
someFunctionThatReturnsDeletedValueViaRshift()
except UnboundLocalError:
print("OK, object rshift correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaBitwiseAnd():
class C:
def __and__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c & 1
return a
try:
someFunctionThatReturnsDeletedValueViaBitwiseAnd()
except UnboundLocalError:
print("OK, object bitwise and correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaBitwiseOr():
class C:
def __or__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c | 1
return a
try:
someFunctionThatReturnsDeletedValueViaBitwiseOr()
except UnboundLocalError:
print("OK, object bitwise or correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaBitwiseXor():
class C:
def __xor__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c ^ 1
return a
try:
someFunctionThatReturnsDeletedValueViaBitwiseXor()
except UnboundLocalError:
print("OK, object bitwise xor correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInt():
class C:
def __int__(self):
nonlocal a
del a
return False
c = C()
a = 1
int(c)
return a
try:
someFunctionThatReturnsDeletedValueViaInt()
except UnboundLocalError:
print("OK, object int correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaFloat():
class C:
def __float__(self):
nonlocal a
del a
return 0.0
c = C()
a = 1
float(c)
return a
try:
someFunctionThatReturnsDeletedValueViaFloat()
except UnboundLocalError:
print("OK, object float correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaComplex():
class C:
def __complex__(self):
nonlocal a
del a
return 0j
c = C()
a = 1
complex(c)
return a
try:
someFunctionThatReturnsDeletedValueViaComplex()
except UnboundLocalError:
print("OK, object complex correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInplaceAdd():
class C:
def __iadd__(self, other):
nonlocal a
del a
c = C()
a = 1
c += 1
return a
try:
someFunctionThatReturnsDeletedValueViaInplaceAdd()
except UnboundLocalError:
print("OK, object inplace add correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInplaceSub():
class C:
def __isub__(self, other):
nonlocal a
del a
c = C()
a = 1
c -= 1
return a
try:
someFunctionThatReturnsDeletedValueViaInplaceSub()
except UnboundLocalError:
print("OK, object inplace sub correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInplaceMul():
class C:
def __imul__(self, other):
nonlocal a
del a
c = C()
a = 1
c *= 1
return a
try:
someFunctionThatReturnsDeletedValueViaInplaceMul()
except UnboundLocalError:
print("OK, object inplace mul correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInplaceRemainder():
class C:
def __imod__(self, other):
nonlocal a
del a
c = C()
a = 1
c %= 1
return a
try:
someFunctionThatReturnsDeletedValueViaInplaceRemainder()
except UnboundLocalError:
print("OK, object inplace remainder correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInplacePower():
class C:
def __ipow__(self, other):
nonlocal a
del a
return 7
c = C()
a = 1
c **= 1
return a
try:
someFunctionThatReturnsDeletedValueViaInplacePower()
except UnboundLocalError:
print("OK, object inplace power correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInplaceAnd():
class C:
def __iand__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c &= 1
return a
try:
someFunctionThatReturnsDeletedValueViaInplaceAnd()
except UnboundLocalError:
print("OK, object inplace and correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInplaceFloordiv():
class C:
def __ifloordiv__(self, other):
nonlocal a
del a
return 7
c = C()
a = 1
c //= 1
return a
try:
someFunctionThatReturnsDeletedValueViaInplaceFloordiv()
except UnboundLocalError:
print("OK, object inplace floordiv correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInplaceLshift():
class C:
def __ilshift__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c <<= 1
return a
try:
someFunctionThatReturnsDeletedValueViaInplaceLshift()
except UnboundLocalError:
print("OK, object inplace lshift correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInplaceRshift():
class C:
def __irshift__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c >>= 1
return a
try:
someFunctionThatReturnsDeletedValueViaInplaceRshift()
except UnboundLocalError:
print("OK, object inplace rshift correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInplaceOr():
class C:
def __ior__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c |= 1
return a
try:
someFunctionThatReturnsDeletedValueViaInplaceOr()
except UnboundLocalError:
print("OK, object inplace or correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInplaceTrueDiv():
class C:
def __itruediv__(self, other):
nonlocal a
del a
return 7
c = C()
a = 1
c /= 1
return a
try:
someFunctionThatReturnsDeletedValueViaInplaceTrueDiv()
except UnboundLocalError:
print("OK, object inplace truediv correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInplaceXor():
class C:
def __ixor__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c ^= 1
return a
try:
someFunctionThatReturnsDeletedValueViaInplaceXor()
except UnboundLocalError:
print("OK, object inplace xor correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaIndex():
class C:
def __index__(self):
nonlocal a
del a
return 0
c = C()
a = 1
[1][c]
return a
try:
someFunctionThatReturnsDeletedValueViaIndex()
except UnboundLocalError:
print("OK, object index correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaLen():
class C:
def __len__(self):
nonlocal a
del a
return 0
c = C()
a = 1
len(c)
return a
try:
someFunctionThatReturnsDeletedValueViaLen()
except UnboundLocalError:
print("OK, object len correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaRepr():
class C:
def __repr__(self):
nonlocal a
del a
return "<some_repr>"
c = C()
a = 1
repr(c)
return a
try:
someFunctionThatReturnsDeletedValueViaRepr()
except UnboundLocalError:
print("OK, object repr correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaStr():
class C:
def __str__(self):
nonlocal a
del a
return "<some_repr>"
c = C()
a = 1
str(c)
return a
try:
someFunctionThatReturnsDeletedValueViaStr()
except UnboundLocalError:
print("OK, object str correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaCompare():
class C:
def __lt__(self, other):
nonlocal a
del a
return "<some_repr>"
c = C()
a = 1
c < None
return a
try:
someFunctionThatReturnsDeletedValueViaCompare()
except UnboundLocalError:
print("OK, object compare correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaDel():
class C:
def __del__(self):
nonlocal a
del a
return "<some_repr>"
c = C()
a = 1
del c
return a
try:
someFunctionThatReturnsDeletedValueViaDel()
except UnboundLocalError:
print("OK, object del correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaHash():
class C:
def __hash__(self):
nonlocal a
del a
return 42
c = C()
a = 1
{}[c] = 1
return a
try:
someFunctionThatReturnsDeletedValueViaHash()
except UnboundLocalError:
print("OK, object hash correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaIter():
class C:
def __iter__(self):
nonlocal a
del a
return iter(range(2))
c = C()
a = 1
x, y = c
return a, x, y
try:
someFunctionThatReturnsDeletedValueViaIter()
except UnboundLocalError:
print("OK, object iter correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaBytes():
class C:
def __bytes__(self):
nonlocal a
del a
return bytes(range(2))
c = C()
a = 1
bytes(c)
return a, x, y
try:
someFunctionThatReturnsDeletedValueViaBytes()
except UnboundLocalError:
print("OK, object bytes correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaEq():
class C:
def __eq__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c == 1
return a
try:
someFunctionThatReturnsDeletedValueViaEq()
except UnboundLocalError:
print("OK, object eq correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaLe():
class C:
def __le__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c <= 1
return a
try:
someFunctionThatReturnsDeletedValueViaEq()
except UnboundLocalError:
print("OK, object le correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaGt():
class C:
def __gt__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c > 1
return a
try:
someFunctionThatReturnsDeletedValueViaEq()
except UnboundLocalError:
print("OK, object gt correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaGe():
class C:
def __ge__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c >= 1
return a
try:
someFunctionThatReturnsDeletedValueViaEq()
except UnboundLocalError:
print("OK, object ge correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaNe():
class C:
def __ne__(self, other):
nonlocal a
del a
return False
c = C()
a = 1
c != 1
return a
try:
someFunctionThatReturnsDeletedValueViaEq()
except UnboundLocalError:
print("OK, object ne correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaContains():
class C:
def __contains__(self, item):
nonlocal a
del a
return False
c = C()
a = 1
1 in c
return a
try:
someFunctionThatReturnsDeletedValueViaContains()
except UnboundLocalError:
print("OK, object contains correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaInit():
class C:
def __init__(self):
nonlocal a
del a
a = 1
c = C()
return a
try:
someFunctionThatReturnsDeletedValueViaInit()
except UnboundLocalError:
print("OK, object init correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaNew():
class C:
def __new__(self):
nonlocal a
del a
a = 1
c = C()
return a
try:
someFunctionThatReturnsDeletedValueViaNew()
except UnboundLocalError:
print("OK, object new correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaDir():
class C:
def __dir__(self):
nonlocal a
del a
return []
c = C()
a = 1
dir(c)
return a
try:
someFunctionThatReturnsDeletedValueViaDir()
except UnboundLocalError:
print("OK, object dir correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaReversed():
class C:
def __reversed__(self):
nonlocal a
del a
return None
a = 1
c = C()
reversed(c)
return a
try:
someFunctionThatReturnsDeletedValueViaReversed()
except UnboundLocalError:
print("OK, object reversed correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaFormat():
class C:
def __format__(self, string):
nonlocal a
del a
return "formated string"
c = C()
a = 1
format(c, "some string")
return a
try:
someFunctionThatReturnsDeletedValueViaFormat()
except UnboundLocalError:
print("OK, object format correctly deleted an item.")
else:
print("Ouch.!")
def someFunctionThatReturnsDeletedValueViaAbs():
class C:
def __abs__(self):
nonlocal a
del a
return abs(10)
a = 1
c = C()
abs(c)
return a
try:
someFunctionThatReturnsDeletedValueViaAbs()
except UnboundLocalError:
print("OK, object abs correctly deleted an item.")
else:
print("Ouch.!")
# TODO: There must be way more than these.
|
|
""" Statistical methods used to define or modify position of glyphs.
References:
Wilkinson L. The Grammer of Graphics, sections 7, 7.1
Method Types:
- Bin: Partitions a space before statistical calculation
- Summary: Produces a single value comprising a statistical summary
- Region: Produces two values bounding an interval.
- Smooth: Produces values representing smoothed versions of the input data.
- Link: Produces edges from pairs of nodes in a graph.
"""
from __future__ import absolute_import
import numpy as np
import pandas as pd
from bokeh.models.sources import ColumnDataSource
from bokeh.core.properties import (HasProps, Float, Either, String, Date, Datetime, Int,
Bool, List, Instance)
from .properties import Column, EitherColumn, ColumnLabel
class Stat(HasProps):
"""Represents a statistical operation to summarize a column of data.
Can be computed from either a ColumnLabel with a ColumnDataSource, *or*, a
discrete column of data.
"""
# inputs
column = ColumnLabel(help="""A column to use for the stat calculation. Required
when providing a ColumnDataSource as input.""")
source = Instance(ColumnDataSource, help="""One option for providing the data
source for stat calculation.""")
values = EitherColumn(Column(Float), Column(Int), Column(String),
Column(Date), Column(Datetime), Column(Bool), default=None, help="""
Second option for providing values for stat calculation is by
passing the actual column of data.""")
# output
value = Float(help="""The value calculated for the stat. Some stats could use
multiple properties to provide the calculation if required.""")
def __init__(self, **properties):
source = properties.pop('source', None)
if source is not None:
if isinstance(source, pd.DataFrame):
source = ColumnDataSource(source)
properties['source'] = source
super(Stat, self).__init__(**properties)
self._refresh()
def _refresh(self):
"""Lazy update of properties, used for initial transform init."""
if self.get_data() is not None:
self.update()
self.calculate()
def set_data(self, data, column=None):
"""Set data properties and update all dependent properties."""
if isinstance(data, pd.DataFrame):
data = ColumnDataSource(data)
if isinstance(data, ColumnDataSource):
self.source = data
if column is not None:
self.column = column
else:
self.values = data
self.update()
self.calculate()
def get_data(self, column=None):
"""Returns the available columnlabel/source values or column values."""
if self.source is not None and (self.column is not None or column is not None):
if column is not None:
col = column
else:
col = self.column
return pd.Series(self.source.data[col])
elif self.values is None and self.source is not None:
return pd.Series(self.source.to_df().index)
elif self.values is not None:
return self.values
else:
return None
def calculate(self):
"""Return transformed value from column label/source or column-like data."""
raise NotImplementedError('You must implement the calculate method '
'for each stat type.')
def update(self):
"""Perform any initial work before the actual calculation is performed."""
pass
class Sum(Stat):
def calculate(self):
self.value = self.get_data().sum()
class Mean(Stat):
def calculate(self):
self.value = self.get_data().mean()
class Count(Stat):
def calculate(self):
self.value = self.get_data().count()
class CountDistinct(Stat):
def calculate(self):
self.value = self.get_data().nunique()
class Median(Stat):
def calculate(self):
self.value = self.get_data().median()
class StdDeviation(Stat):
def calculate(self):
self.value = self.get_data().std()
class Min(Stat):
def calculate(self):
self.value = self.get_data().min()
class Max(Stat):
def calculate(self):
self.value = self.get_data().max()
class Quantile(Stat):
"""Produces the cutpoint that divides the input data by the interval.
Quartiles are a special case of quartiles that divide a dataset into four
equal-size groups. (https://en.wikipedia.org/wiki/Quantile)
"""
interval = Float(default=0.5)
def calculate(self):
self.value = self.get_data().quantile(self.interval)
class Bin(Stat):
"""Represents a single bin of data values and attributes of the bin."""
label = Either(String, List(String))
start = Either(Float, List(Float))
stop = Either(Float, List(Float))
start_label = String()
stop_label = String()
center = Either(Float, List(Float))
stat = Instance(Stat, default=Count())
width = Float()
def __init__(self, bin_label, values=None, source=None, **properties):
if isinstance(bin_label, tuple):
bin_label = list(bin_label)
else:
bin_label = [bin_label]
properties['label'] = bin_label
bounds = self.process_bounds(bin_label)
starts, stops = zip(*bounds)
centers = [(start + stop)/2.0 for start, stop in zip(starts, stops)]
if len(starts) == 1:
starts = starts[0]
stops = stops[0]
centers = centers[0]
else:
starts = list(starts)
stops = list(stops)
centers = list(centers)
properties['start'] = starts
properties['stop'] = stops
properties['center'] = centers
properties['values'] = values
super(Bin, self).__init__(**properties)
@staticmethod
def binstr_to_list(bins):
"""Produce a consistent display of a bin of data."""
value_chunks = bins.split(',')
value_chunks = [val.replace('[', '').replace(']', '').replace('(', '').replace(')', '') for val in value_chunks]
bin_values = [float(value) for value in value_chunks]
return bin_values[0], bin_values[1]
def process_bounds(self, bin_label):
if isinstance(bin_label, list):
return [self.binstr_to_list(dim) for dim in bin_label]
else:
return [self.binstr_to_list(bin_label)]
def update(self):
self.stat.set_data(self.values)
def calculate(self):
self.value = self.stat.value
class BinStats(Stat):
"""A set of statistical calculations for binning values.
Bin counts using: https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule
"""
bins = Either(Int, Float, List(Float), default=None, help="""
If bins is an int, it defines the number of equal-width bins in the
given range. If bins is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform
bin widths.
(default: None, use Freedman-Diaconis rule)
""")
bin_width = Float(default=None, help='Use Freedman-Diaconis rule if None.')
q1 = Quantile(interval=0.25)
q3 = Quantile(interval=0.75)
labels = List(String)
def __init__(self, values=None, column=None, **properties):
properties['values'] = values
properties['column'] = column or 'values'
super(BinStats, self).__init__(**properties)
def update(self):
values = self.get_data()
self.q1.set_data(values)
self.q3.set_data(values)
if self.bins is None:
self.calc_num_bins(values)
def calc_num_bins(self, values):
"""Calculate optimal number of bins using IQR.
From: http://stats.stackexchange.com/questions/114490/optimal-bin-width-for-two-dimensional-histogram
"""
iqr = self.q3.value - self.q1.value
if iqr == 0:
self.bin_width = np.sqrt(values.size)
else:
self.bin_width = 2 * iqr * (len(values) ** -(1. / 3.))
self.bins = int(np.ceil((values.max() - values.min()) / self.bin_width))
if self.bins <= 1:
self.bins = 3
def calculate(self):
pass
class BinnedStat(Stat):
""" Base class for shared functionality accross bins and aggregates
dimensions for plotting.
"""
bin_stat = Instance(BinStats, help="""
A mapping between each dimension and associated binning calculations.
""")
bins = List(Instance(Bin), help="""
A list of the `Bin` instances that were produced as result of the inputs.
Iterating over `Bins` will iterate over this list. Each `Bin` can be inspected
for metadata about the bin and the values associated with it.
""")
stat = Instance(Stat, default=Count(), help="""
The statistical operation to be used on the values in each bin.
""")
bin_column = String()
centers_column = String()
aggregate = Bool(default=True)
bin_values = Bool(default=False)
bin_width = Float()
def __init__(self, values=None, column=None, bins=None,
stat='count', source=None, **properties):
if isinstance(stat, str):
stat = stats[stat]()
properties['column'] = column or 'vals'
properties['stat'] = stat
properties['values'] = values
properties['source'] = source
self._bins = bins
super(BinnedStat, self).__init__(**properties)
def _get_stat(self):
stat_kwargs = {}
if self.source is not None:
stat_kwargs['source'] = self.source
stat_kwargs['column'] = self.column
elif self.values is not None:
stat_kwargs['values'] = self.values
stat_kwargs['bins'] = self._bins
return BinStats(**stat_kwargs)
def update(self):
self.bin_stat = self._get_stat()
self.bin_stat.update()
class Bins(BinnedStat):
"""Bins and aggregates dimensions for plotting.
Takes the inputs and produces a list of bins that can be iterated over and
inspected for their metadata. The bins provide easy access to consistent labeling,
bounds, and values.
"""
def calculate(self):
bin_str = '_bin'
self.bin_column = self.column + bin_str
bin_models = []
data = self.bin_stat.get_data()
bins = self.bin_stat.bins
# Choose bin bounds when data range is ill-defined; pd.cut()
# does not handle this well for values that are <= 0
if data.size < 2:
raise ValueError('Histogram data must have at least two elements.')
if data.ndim == 1 and data.std() == 0:
margin = 0.01 * abs(float(data[0])) or 0.01
bins = np.linspace(data[0] - margin, data[0] + margin, bins+1)
binned, bin_bounds = pd.cut(data, bins,
retbins=True, include_lowest=True, precision=0)
self.bin_width = np.round(bin_bounds[2] - bin_bounds[1], 1)
if self.source is not None:
# add bin column to data source
self.source.add(binned.tolist(), name=self.bin_column)
df = self.source.to_df()
else:
df = pd.DataFrame({self.column: self.values, self.bin_column: binned})
for name, group in df.groupby(self.bin_column):
bin_models.append(Bin(bin_label=name, values=group[self.column],
stat=self.stat))
self.bins = bin_models
centers = binned.copy()
centers = centers.astype(str)
for bin in self.bins:
centers[binned == bin.label] = bin.center
self.centers_column = self.column + '_center'
if self.source is not None:
self.source.add(centers.tolist(), name=self.centers_column)
else:
df[self.centers_column] = centers
def __getitem__(self, item):
return self.bins[item]
def apply(self, data):
self.set_data(data.source)
return self.source.to_df()
def sort(self, ascending=True):
if self.bins is not None:
self.bins = list(sorted(self.bins, key=lambda x: x.center,
reverse=~ascending))
class Histogram(BinnedStat):
"""Bins and aggregates dimensions for plotting.
Takes the inputs and produces a list of bins that can be iterated over and
inspected for their metadata. The bins provide easy access to consistent labeling,
bounds, and values.
"""
density = Bool(False, help="""
Whether to normalize the histogram.
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check ``numpy.histogram`` function documentation.
(default: False)
""")
def calculate(self):
bin_str = '_bin'
self.bin_column = self.column + bin_str
data = self.bin_stat.get_data()
bins = self.bin_stat.bins
binned, bin_bounds = np.histogram(
np.array(data), density=self.density, bins=bins
)
self.bin_width = np.round(bin_bounds[2] - bin_bounds[1], 1)
self.bins = []
for i, b in enumerate(binned):
width = bin_bounds[i+1] - bin_bounds[i]
if i == 0:
lbl = "[%.1f, %.1f]" % (bin_bounds[i], bin_bounds[i+1])
else:
lbl = "(%.1f, %.1f]" % (bin_bounds[i], bin_bounds[i+1])
self.bins.append(Bin(bin_label=lbl, values=[binned[i]], stat=Max(),
width=width))
def bins(data, values=None, column=None, bins=None, labels=None,
**kwargs):
"""Specify binning or bins to be used for column or values."""
if isinstance(data, str):
column = data
values = None
else:
column = None
return Bins(values=values, column=column, bins=bins, **kwargs)
stats = {
'sum': Sum,
'mean': Mean,
'count': Count,
'nunique': CountDistinct,
'median': Median,
'stddev': StdDeviation,
'min': Min,
'max': Max,
'quantile': Quantile
}
|
|
# Version 0.6 vom 11.05.2014
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import RPi.GPIO as GPIO
import os, urlparse
import time
import datetime
import ConfigParser
import smtplib
from SocketServer import ThreadingMixIn
import threading
from email.mime.text import MIMEText
from array import array
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import fcntl
class PiBell:
def __init__(self,configFile="/etc/pibell.conf"):
self._logObj = LogFile("log")
self._logObj.writeToLog("starting service")
#check if Configuration File exists
if not os.path.isfile(configFile):
self._logObj.writeToLog("[RaspiPo] configuration file was not found: " + configFile)
raise Exception("[RaspiPo] configuration file was not found: " + configFile)
#load configuration file
self._configuration = ConfigParser.ConfigParser()
self._configuration.read(configFile)
self._loadConfigurationItems(self._configuration)
self._setupGPIOPin(self._listen_gpio_pin)
#start services
self._logObj.writeToLog("[RaspiPo] listen on GPIO Pin No. " + str(self._listen_gpio_pin))
self._run()
def __del__(self):
self._logObj.writeToLog("[RaspiPo] shutting down service")
def _setupGPIOPin(self,gpio_pin):
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(gpio_pin,GPIO.IN)
def _loadConfigurationItems(self,configuration):
section = "Basic Configuration"
# section_name mandatory type/string = ''
optionlist = [ ['scan_period', True, 'float'],
['idle_time', True, 'float'],
['listen_gpio_pin', True, 'int']
]
for option in optionlist:
if not configuration.has_option(section,option[0]) and option[1]:
self._logObj.writeToLog("error in configuration file" + section + " Option: " + option[0])
raise Exception("Konfigurationsdatei fehlerhaft" + section + " Option: " + option[0])
else:
method = getattr(configuration,'get'+option[2])
val = method(section,option[0])
setattr(self,'_'+option[0],val)
#check email configuration options
section = "Email Notification"
if configuration.has_section(section):
if not self._configuration.getboolean(section,"email_enable"):
self._logObj.writeToLog("[Email] email notification is <disbaled>")
else:
self._logObj.writeToLog("[Email] email notification is <enabled>")
#the eMail - Object will load configuration itself.
self._EmailObj = EmailNotificiation(self._configuration)
#check webui configuration options
section = 'WebUI'
if self._configuration.has_section(section):
if self._configuration.getboolean(section,"webui_enable"):
self._webui_port = self._configuration.getint(section,"webui_port")
self._logObj.writeToLog("[WEBUI] webui started on port " + str(self._webui_port))
try:
self._webui = ThreadedHTTPServer(('',self._webui_port),WebUIRequestHandler)
thread = threading.Thread(target=self._webui.serve_forever)
thread.setDaemon(True);
thread.start()
except Exception as e:
self._logObj.writeToLog("[WEBUI] error when starting the webui: " + e.args[1])
def _sendEmail(self):
try:
self._EmailObj.sendEmail()
except:
#email_enable = false
pass
def _run(self):
try:
while True:
#do some GPIO things here
if GPIO.input(self._listen_gpio_pin) == GPIO.LOW:
self._logObj.writeToLog("[RaspiPo] <<<<<< ... signal detected!!! >>>>>")
self._sendEmail()
#wait the amount of idle time in seconds
self._logObj.writeToLog("[RaspiPo] going to sleep for " + str(self._idle_time) + " seconds")
time.sleep(self._idle_time)
time.sleep(self._scan_period)
except KeyboardInterrupt:
pass #do nothing
except Exception as exception:
self._logObj.writeToLog("[RaspiPo] stop listen on GPIO Pin " + str(self._listen_gpio_pin))
def _startWebServer(self):
pass
class EmailNotificiation:
def __init__(self,configuration,logObj=None):
if logObj == None:
self._logObj = LogFile("log")
else:
self._logObj = logObj
self._logObj.writeToLog("[Email] starting email notifciation services")
self._loadConfiguration(configuration)
self._logObj.writeToLog("[Email] trying to to reach smtp server")
def _loadConfiguration(self,configuration):
self._logObj.writeToLog("[Email] reading email notification config")
section = "Email Notification"
# section_name mandatory type/string = ''
optionlist = [ ['email_recipient', True, ''],
['email_sendername', True, ''],
['email_senderaddress', True, ''],
['email_server_smtp', True, ''],
['email_server_port', True, 'int'],
['email_loginname', True, ''],
['email_loginpassword', True, ''],
['email_subject', True, ''],
['email_message', True, '']
]
for option in optionlist:
if not configuration.has_option(section,option[0]) and option[1]:
self._logObj.writeToLog("[Email] error in configuration file - section" + section + " option: " + option[0])
raise Exception("Konfigurationsdatei fehlerhaft" + section + " Option: " + option[0])
else:
method = getattr(configuration,'get'+option[2])
val = method(section,option[0])
setattr(self,'_'+option[0],val)
def sendEmail(self):
self._logObj.writeToLog("[Email] trying to send email")
try:
self._logObj.writeToLog("[Email] say EHLO to STMP server")
self._SMTPConnection= smtplib.SMTP(self._email_server_smtp,self._email_server_port)
self._SMTPConnection.ehlo()
self._logObj.writeToLog("[Email] trying to start secure connection")
self._SMTPConnection.starttls()
self._logObj.writeToLog("[Email] logging in to SMTP server")
self._SMTPConnection.login(self._email_loginname,self._email_loginpassword)
message = self._createMessage()
self._logObj.writeToLog("[Email] sending email to " + message['To'])
self._SMTPConnection.sendmail(message['From'],message['To'],message.as_string())
self._SMTPConnection.quit()
except Exception as e:
self._logObj.writeToLog("[Email]" + e.args[0])
def _createMessage(self):
message = self._email_message.replace("&time&",datetime.datetime.now().strftime("%H:%M:%S"))
message = message.replace("&date&",datetime.datetime.now().strftime("%d.%m.%Y"))
msg = MIMEText(message)
msg['Subject'] = self._email_subject
msg['From'] = self._email_senderaddress
msg['To'] = self._email_recipient
return msg
class LogFile:
def __init__(self,path):
self._now = datetime.datetime.now()
self._filename = path + "/" + self._now.strftime("%Y_%m_%d") + " logfile.log"
def writeToLog(self,message):
self._now = datetime.datetime.now()
file = open(sys.path[0] + "/" + self._filename, 'a+')
file.write(self._now.strftime("%H:%M:%S -> ") + message + "\n")
file.close()
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a seperate thread"""
class WebUIRequestHandler(BaseHTTPRequestHandler):
#handle GET command
def do_GET(self):
self._rootdir = sys.path[0] + "/" + 'webui'
parsed_path = urlparse.urlparse(self.path)
self.serve_content(self.path)
def send_headers(self, path):
htype = ''
ftype = ''
if path.endswith('.js'):
htype = 'application/javascript'
ftype = 'r'
if path.endswith('.css'):
htype = 'text/css'
ftype = 'r'
if path.endswith('.html'):
htype = 'text/html'
ftype = 'r'
if path.endswith('.py'):
htype = 'text/html'
ftype = 'execute'
if path.endswith('.png'):
htype = 'image/png'
ftype = 'rb'
if path.endswith('.jpg'):
htype = 'image/jpeg'
ftype = 'rb'
if path.endswith('.jepg'):
htype = 'image/jpeg'
ftype = 'rb'
if path.endswith('.ico'):
htype = 'image/x-icon'
ftype = 'rb'
if path.endswith('.gif'):
htype = 'image/gif'
ftype = 'rb'
if htype != '':
self.send_header('Content-type', htype)
self.end_headers()
else:
self.send_header('Content-type', 'text/plain')
self.end_headers()
return ftype
def do_redirect(self, path="/index.html"):
self.send_response(301)
self.send_header('Location', path)
self.end_headers()
def serve_content(self, path="/"):
if path == "" or path == "/":
path = "/index.html"
self.do_redirect()
else:
f2r = self._rootdir + path
if os.path.isfile(f2r) or path.endswith('.log'):
try:
self.send_response(200)
ftype = self.send_headers(path)
if ftype != 'execute':
if path.endswith('.log'):
fopen = open(sys.path[0] + "/log" + path.replace("%20"," "))
content = fopen.read()
else:
fopen = open(self._rootdir + path)
content = fopen.read()
content = self._parseSpecialChars(content)
self.wfile.write(content)
fopen.close()
except Exception as exception:
self.send_error(404)
self.wfile.write(exception.args[0])
self.wfile.write("Requested resource %s unavailable" % str(f2r))
else:
self.send_error(404)
def _parseSpecialChars(self,content_in):
content = content_in.replace("%head%",open(self._rootdir + "/header.html").read())
if '%logfilelist%' in content:
logfilelist = os.listdir(sys.path[0] + "/log")
HTMLLogfileList = ''
for logfile in logfilelist:
if logfile.endswith(".log"):
HTMLLogfileList = HTMLLogfileList + '<a href="' + logfile + '" target="logfileViewer">' + logfile + '</a><br/>'
content = content.replace("%logfilelist%",HTMLLogfileList)
return content
def main(configFile="/etc/pibell.conf"):
if not prog_lock_acq('singleton.lock'):
print("RaspiPo already running")
exit(1)
try:
print "Starting services..."
pybell = PiBell(configFile)
except Exception as exception:
print "Shutting Down services..."
print exception.args[0]
os._exit(os.EX_OK)
def prog_lock_acq(lpath):
fd = None
try:
fd = os.open(lpath, os.O_CREAT)
fcntl.flock(fd, fcntl.LOCK_NB | fcntl.LOCK_EX)
return True
except(OSError, IOError):
if fd: os.close(fd)
return False
if __name__ == '__main__':
if len(sys.argv) > 1:
main(sys.argv[1])
else:
main()
|
|
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import http.client
import mock
import pytest
import requests
def test__make_retry_timeout_kwargs_w_empty():
from google.cloud.datastore._http import _make_retry_timeout_kwargs
expected = {}
assert _make_retry_timeout_kwargs(None, None) == expected
def test__make_retry_timeout_kwargs_w_retry():
from google.cloud.datastore._http import _make_retry_timeout_kwargs
retry = object()
expected = {"retry": retry}
assert _make_retry_timeout_kwargs(retry, None) == expected
def test__make_retry_timeout_kwargs_w_timeout():
from google.cloud.datastore._http import _make_retry_timeout_kwargs
timeout = 5.0
expected = {"timeout": timeout}
assert _make_retry_timeout_kwargs(None, timeout) == expected
def test__make_retry_timeout_kwargs_w_both():
from google.cloud.datastore._http import _make_retry_timeout_kwargs
retry = object()
timeout = 5.0
expected = {"retry": retry, "timeout": timeout}
assert _make_retry_timeout_kwargs(retry, timeout) == expected
def test__make_request_pb_w_empty_dict():
from google.cloud.datastore._http import _make_request_pb
request = {}
foo = _make_request_pb(request, Foo)
assert isinstance(foo, Foo)
assert foo.bar is None
assert foo.baz is None
def test__make_request_pb_w_partial_dict():
from google.cloud.datastore._http import _make_request_pb
request = {"bar": "Bar"}
foo = _make_request_pb(request, Foo)
assert isinstance(foo, Foo)
assert foo.bar == "Bar"
assert foo.baz is None
def test__make_request_pb_w_complete_dict():
from google.cloud.datastore._http import _make_request_pb
request = {"bar": "Bar", "baz": "Baz"}
foo = _make_request_pb(request, Foo)
assert isinstance(foo, Foo)
assert foo.bar == "Bar"
assert foo.baz == "Baz"
def test__make_request_pb_w_instance():
from google.cloud.datastore._http import _make_request_pb
passed = Foo()
foo = _make_request_pb(passed, Foo)
assert foo is passed
def _request_helper(retry=None, timeout=None):
from google.cloud import _http as connection_module
from google.cloud.datastore._http import _request
project = "PROJECT"
method = "METHOD"
data = b"DATA"
base_url = "http://api-url"
user_agent = "USER AGENT"
client_info = _make_client_info(user_agent)
response_data = "CONTENT"
http = _make_requests_session([_make_response(content=response_data)])
kwargs = _retry_timeout_kw(retry, timeout, http)
response = _request(http, project, method, data, base_url, client_info, **kwargs)
assert response == response_data
# Check that the mocks were called as expected.
expected_url = _build_expected_url(base_url, project, method)
expected_headers = {
"Content-Type": "application/x-protobuf",
"User-Agent": user_agent,
connection_module.CLIENT_INFO_HEADER: user_agent,
}
if retry is not None:
retry.assert_called_once_with(http.request)
kwargs.pop("retry", None)
http.request.assert_called_once_with(
method="POST", url=expected_url, headers=expected_headers, data=data, **kwargs
)
def test__request_defaults():
_request_helper()
def test__request_w_retry():
retry = mock.MagicMock()
_request_helper(retry=retry)
def test__request_w_timeout():
timeout = 5.0
_request_helper(timeout=timeout)
def test__request_failure():
from google.cloud.exceptions import BadRequest
from google.cloud.datastore._http import _request
from google.rpc import code_pb2
from google.rpc import status_pb2
project = "PROJECT"
method = "METHOD"
data = "DATA"
uri = "http://api-url"
user_agent = "USER AGENT"
client_info = _make_client_info(user_agent)
error = status_pb2.Status()
error.message = "Entity value is indexed."
error.code = code_pb2.FAILED_PRECONDITION
session = _make_requests_session(
[_make_response(http.client.BAD_REQUEST, content=error.SerializeToString())]
)
with pytest.raises(BadRequest) as exc:
_request(session, project, method, data, uri, client_info)
expected_message = "400 Entity value is indexed."
assert exc.match(expected_message)
def _rpc_helper(retry=None, timeout=None):
from google.cloud.datastore._http import _rpc
from google.cloud.datastore_v1.types import datastore as datastore_pb2
http = object()
project = "projectOK"
method = "beginTransaction"
base_url = "test.invalid"
client_info = _make_client_info()
request_pb = datastore_pb2.BeginTransactionRequest(project_id=project)
response_pb = datastore_pb2.BeginTransactionResponse(transaction=b"7830rmc")
kwargs = _retry_timeout_kw(retry, timeout)
patch = mock.patch(
"google.cloud.datastore._http._request",
return_value=response_pb._pb.SerializeToString(),
)
with patch as mock_request:
result = _rpc(
http,
project,
method,
base_url,
client_info,
request_pb,
datastore_pb2.BeginTransactionResponse,
**kwargs
)
assert result == response_pb._pb
mock_request.assert_called_once_with(
http,
project,
method,
request_pb._pb.SerializeToString(),
base_url,
client_info,
**kwargs
)
def test__rpc_defaults():
_rpc_helper()
def test__rpc_w_retry():
retry = mock.MagicMock()
_rpc_helper(retry=retry)
def test__rpc_w_timeout():
timeout = 5.0
_rpc_helper(timeout=timeout)
def test_api_ctor():
client = object()
ds_api = _make_http_datastore_api(client)
assert ds_api.client is client
def _lookup_single_helper(
read_consistency=None, transaction=None, empty=True, retry=None, timeout=None,
):
from google.cloud.datastore_v1.types import datastore as datastore_pb2
from google.cloud.datastore_v1.types import entity as entity_pb2
project = "PROJECT"
key_pb = _make_key_pb(project)
options_kw = {}
if read_consistency is not None:
options_kw["read_consistency"] = read_consistency
if transaction is not None:
options_kw["transaction"] = transaction
read_options = datastore_pb2.ReadOptions(**options_kw)
rsp_pb = datastore_pb2.LookupResponse()
if not empty:
entity = entity_pb2.Entity()
entity.key._pb.CopyFrom(key_pb._pb)
rsp_pb._pb.found.add(entity=entity._pb)
http = _make_requests_session(
[_make_response(content=rsp_pb._pb.SerializeToString())]
)
client_info = _make_client_info()
client = mock.Mock(
_http=http,
_base_url="test.invalid",
_client_info=client_info,
spec=["_http", "_base_url", "_client_info"],
)
ds_api = _make_http_datastore_api(client)
request = {
"project_id": project,
"keys": [key_pb],
"read_options": read_options,
}
kwargs = _retry_timeout_kw(retry, timeout, http)
response = ds_api.lookup(request=request, **kwargs)
response == rsp_pb._pb
if empty:
assert len(response.found) == 0
else:
assert len(response.found) == 1
assert len(response.missing) == 0
assert len(response.deferred) == 0
uri = _build_expected_url(client._base_url, project, "lookup")
request = _verify_protobuf_call(
http, uri, datastore_pb2.LookupRequest(), retry=retry, timeout=timeout,
)
if retry is not None:
retry.assert_called_once_with(http.request)
assert list(request.keys) == [key_pb._pb]
assert request.read_options == read_options._pb
def test_api_lookup_single_key_miss():
_lookup_single_helper()
def test_api_lookup_single_key_miss_w_read_consistency():
from google.cloud.datastore_v1.types import datastore as datastore_pb2
read_consistency = datastore_pb2.ReadOptions.ReadConsistency.EVENTUAL
_lookup_single_helper(read_consistency=read_consistency)
def test_api_lookup_single_key_miss_w_transaction():
transaction = b"TRANSACTION"
_lookup_single_helper(transaction=transaction)
def test_api_lookup_single_key_hit():
_lookup_single_helper(empty=False)
def test_api_lookup_single_key_hit_w_retry():
retry = mock.MagicMock()
_lookup_single_helper(empty=False, retry=retry)
def test_api_lookup_single_key_hit_w_timeout():
timeout = 5.0
_lookup_single_helper(empty=False, timeout=timeout)
def _lookup_multiple_helper(
found=0, missing=0, deferred=0, retry=None, timeout=None,
):
from google.cloud.datastore_v1.types import datastore as datastore_pb2
from google.cloud.datastore_v1.types import entity as entity_pb2
project = "PROJECT"
key_pb1 = _make_key_pb(project)
key_pb2 = _make_key_pb(project, id_=2345)
keys = [key_pb1, key_pb2]
read_options = datastore_pb2.ReadOptions()
rsp_pb = datastore_pb2.LookupResponse()
found_keys = []
for i_found in range(found):
key = keys[i_found]
found_keys.append(key._pb)
entity = entity_pb2.Entity()
entity.key._pb.CopyFrom(key._pb)
rsp_pb._pb.found.add(entity=entity._pb)
missing_keys = []
for i_missing in range(missing):
key = keys[i_missing]
missing_keys.append(key._pb)
entity = entity_pb2.Entity()
entity.key._pb.CopyFrom(key._pb)
rsp_pb._pb.missing.add(entity=entity._pb)
deferred_keys = []
for i_deferred in range(deferred):
key = keys[i_deferred]
deferred_keys.append(key._pb)
rsp_pb._pb.deferred.append(key._pb)
http = _make_requests_session(
[_make_response(content=rsp_pb._pb.SerializeToString())]
)
client_info = _make_client_info()
client = mock.Mock(
_http=http,
_base_url="test.invalid",
_client_info=client_info,
spec=["_http", "_base_url", "_client_info"],
)
ds_api = _make_http_datastore_api(client)
request = {
"project_id": project,
"keys": keys,
"read_options": read_options,
}
kwargs = _retry_timeout_kw(retry, timeout, http)
response = ds_api.lookup(request=request, **kwargs)
assert response == rsp_pb._pb
assert [found.entity.key for found in response.found] == found_keys
assert [missing.entity.key for missing in response.missing] == missing_keys
assert list(response.deferred) == deferred_keys
uri = _build_expected_url(client._base_url, project, "lookup")
request = _verify_protobuf_call(
http, uri, datastore_pb2.LookupRequest(), retry=retry, timeout=timeout,
)
assert list(request.keys) == [key_pb1._pb, key_pb2._pb]
assert request.read_options == read_options._pb
def test_api_lookup_multiple_keys_w_empty_response():
_lookup_multiple_helper()
def test_api_lookup_multiple_keys_w_retry():
retry = mock.MagicMock()
_lookup_multiple_helper(retry=retry)
def test_api_lookup_multiple_keys_w_timeout():
timeout = 5.0
_lookup_multiple_helper(timeout=timeout)
def test_api_lookup_multiple_keys_w_found():
_lookup_multiple_helper(found=2)
def test_api_lookup_multiple_keys_w_missing():
_lookup_multiple_helper(missing=2)
def test_api_lookup_multiple_keys_w_deferred():
_lookup_multiple_helper(deferred=2)
def _run_query_helper(
read_consistency=None,
transaction=None,
namespace=None,
found=0,
retry=None,
timeout=None,
):
from google.cloud.datastore_v1.types import datastore as datastore_pb2
from google.cloud.datastore_v1.types import entity as entity_pb2
from google.cloud.datastore_v1.types import query as query_pb2
project = "PROJECT"
kind = "Nonesuch"
query_pb = query_pb2.Query(kind=[query_pb2.KindExpression(name=kind)])
partition_kw = {"project_id": project}
if namespace is not None:
partition_kw["namespace_id"] = namespace
partition_id = entity_pb2.PartitionId(**partition_kw)
options_kw = {}
if read_consistency is not None:
options_kw["read_consistency"] = read_consistency
if transaction is not None:
options_kw["transaction"] = transaction
read_options = datastore_pb2.ReadOptions(**options_kw)
cursor = b"\x00"
batch_kw = {
"entity_result_type": query_pb2.EntityResult.ResultType.FULL,
"end_cursor": cursor,
"more_results": query_pb2.QueryResultBatch.MoreResultsType.NO_MORE_RESULTS,
}
if found:
batch_kw["entity_results"] = [
query_pb2.EntityResult(entity=entity_pb2.Entity())
] * found
rsp_pb = datastore_pb2.RunQueryResponse(
batch=query_pb2.QueryResultBatch(**batch_kw)
)
http = _make_requests_session(
[_make_response(content=rsp_pb._pb.SerializeToString())]
)
client_info = _make_client_info()
client = mock.Mock(
_http=http,
_base_url="test.invalid",
_client_info=client_info,
spec=["_http", "_base_url", "_client_info"],
)
ds_api = _make_http_datastore_api(client)
request = {
"project_id": project,
"partition_id": partition_id,
"read_options": read_options,
"query": query_pb,
}
kwargs = _retry_timeout_kw(retry, timeout, http)
response = ds_api.run_query(request=request, **kwargs)
assert response == rsp_pb._pb
uri = _build_expected_url(client._base_url, project, "runQuery")
request = _verify_protobuf_call(
http, uri, datastore_pb2.RunQueryRequest(), retry=retry, timeout=timeout,
)
assert request.partition_id == partition_id._pb
assert request.query == query_pb._pb
assert request.read_options == read_options._pb
def test_api_run_query_simple():
_run_query_helper()
def test_api_run_query_w_retry():
retry = mock.MagicMock()
_run_query_helper(retry=retry)
def test_api_run_query_w_timeout():
timeout = 5.0
_run_query_helper(timeout=timeout)
def test_api_run_query_w_read_consistency():
from google.cloud.datastore_v1.types import datastore as datastore_pb2
read_consistency = datastore_pb2.ReadOptions.ReadConsistency.EVENTUAL
_run_query_helper(read_consistency=read_consistency)
def test_api_run_query_w_transaction():
transaction = b"TRANSACTION"
_run_query_helper(transaction=transaction)
def test_api_run_query_w_namespace_nonempty_result():
namespace = "NS"
_run_query_helper(namespace=namespace, found=1)
def _begin_transaction_helper(options=None, retry=None, timeout=None):
from google.cloud.datastore_v1.types import datastore as datastore_pb2
project = "PROJECT"
transaction = b"TRANSACTION"
rsp_pb = datastore_pb2.BeginTransactionResponse()
rsp_pb.transaction = transaction
# Create mock HTTP and client with response.
http = _make_requests_session(
[_make_response(content=rsp_pb._pb.SerializeToString())]
)
client_info = _make_client_info()
client = mock.Mock(
_http=http,
_base_url="test.invalid",
_client_info=client_info,
spec=["_http", "_base_url", "_client_info"],
)
# Make request.
ds_api = _make_http_datastore_api(client)
request = {"project_id": project}
if options is not None:
request["transaction_options"] = options
kwargs = _retry_timeout_kw(retry, timeout, http)
response = ds_api.begin_transaction(request=request, **kwargs)
# Check the result and verify the callers.
assert response == rsp_pb._pb
uri = _build_expected_url(client._base_url, project, "beginTransaction")
request = _verify_protobuf_call(
http,
uri,
datastore_pb2.BeginTransactionRequest(),
retry=retry,
timeout=timeout,
)
def test_api_begin_transaction_wo_options():
_begin_transaction_helper()
def test_api_begin_transaction_w_options():
from google.cloud.datastore_v1.types import TransactionOptions
read_only = TransactionOptions.ReadOnly._meta.pb()
options = TransactionOptions(read_only=read_only)
_begin_transaction_helper(options=options)
def test_api_begin_transaction_w_retry():
retry = mock.MagicMock()
_begin_transaction_helper(retry=retry)
def test_api_begin_transaction_w_timeout():
timeout = 5.0
_begin_transaction_helper(timeout=timeout)
def _commit_helper(transaction=None, retry=None, timeout=None):
from google.cloud.datastore_v1.types import datastore as datastore_pb2
from google.cloud.datastore.helpers import _new_value_pb
project = "PROJECT"
key_pb = _make_key_pb(project)
rsp_pb = datastore_pb2.CommitResponse()
req_pb = datastore_pb2.CommitRequest()
mutation = req_pb._pb.mutations.add()
insert = mutation.upsert
insert.key.CopyFrom(key_pb._pb)
value_pb = _new_value_pb(insert, "foo")
value_pb.string_value = u"Foo"
http = _make_requests_session(
[_make_response(content=rsp_pb._pb.SerializeToString())]
)
client_info = _make_client_info()
client = mock.Mock(
_http=http,
_base_url="test.invalid",
_client_info=client_info,
spec=["_http", "_base_url", "_client_info"],
)
rq_class = datastore_pb2.CommitRequest
ds_api = _make_http_datastore_api(client)
request = {"project_id": project, "mutations": [mutation]}
if transaction is not None:
request["transaction"] = transaction
mode = request["mode"] = rq_class.Mode.TRANSACTIONAL
else:
mode = request["mode"] = rq_class.Mode.NON_TRANSACTIONAL
kwargs = _retry_timeout_kw(retry, timeout, http)
result = ds_api.commit(request=request, **kwargs)
assert result == rsp_pb._pb
uri = _build_expected_url(client._base_url, project, "commit")
request = _verify_protobuf_call(
http, uri, rq_class(), retry=retry, timeout=timeout,
)
assert list(request.mutations) == [mutation]
assert request.mode == mode
if transaction is not None:
assert request.transaction == transaction
else:
assert request.transaction == b""
def test_api_commit_wo_transaction():
_commit_helper()
def test_api_commit_w_transaction():
transaction = b"xact"
_commit_helper(transaction=transaction)
def test_api_commit_w_retry():
retry = mock.MagicMock()
_commit_helper(retry=retry)
def test_api_commit_w_timeout():
timeout = 5.0
_commit_helper(timeout=timeout)
def _rollback_helper(retry=None, timeout=None):
from google.cloud.datastore_v1.types import datastore as datastore_pb2
project = "PROJECT"
transaction = b"xact"
rsp_pb = datastore_pb2.RollbackResponse()
# Create mock HTTP and client with response.
http = _make_requests_session(
[_make_response(content=rsp_pb._pb.SerializeToString())]
)
client_info = _make_client_info()
client = mock.Mock(
_http=http,
_base_url="test.invalid",
_client_info=client_info,
spec=["_http", "_base_url", "_client_info"],
)
# Make request.
ds_api = _make_http_datastore_api(client)
request = {"project_id": project, "transaction": transaction}
kwargs = _retry_timeout_kw(retry, timeout, http)
response = ds_api.rollback(request=request, **kwargs)
# Check the result and verify the callers.
assert response == rsp_pb._pb
uri = _build_expected_url(client._base_url, project, "rollback")
request = _verify_protobuf_call(
http, uri, datastore_pb2.RollbackRequest(), retry=retry, timeout=timeout,
)
assert request.transaction == transaction
def test_api_rollback_ok():
_rollback_helper()
def test_api_rollback_w_retry():
retry = mock.MagicMock()
_rollback_helper(retry=retry)
def test_api_rollback_w_timeout():
timeout = 5.0
_rollback_helper(timeout=timeout)
def _allocate_ids_helper(count=0, retry=None, timeout=None):
from google.cloud.datastore_v1.types import datastore as datastore_pb2
project = "PROJECT"
before_key_pbs = []
after_key_pbs = []
rsp_pb = datastore_pb2.AllocateIdsResponse()
for i_count in range(count):
requested = _make_key_pb(project, id_=None)
before_key_pbs.append(requested)
allocated = _make_key_pb(project, id_=i_count)
after_key_pbs.append(allocated)
rsp_pb._pb.keys.add().CopyFrom(allocated._pb)
http = _make_requests_session(
[_make_response(content=rsp_pb._pb.SerializeToString())]
)
client_info = _make_client_info()
client = mock.Mock(
_http=http,
_base_url="test.invalid",
_client_info=client_info,
spec=["_http", "_base_url", "_client_info"],
)
ds_api = _make_http_datastore_api(client)
request = {"project_id": project, "keys": before_key_pbs}
kwargs = _retry_timeout_kw(retry, timeout, http)
response = ds_api.allocate_ids(request=request, **kwargs)
assert response == rsp_pb._pb
assert list(response.keys) == [i._pb for i in after_key_pbs]
uri = _build_expected_url(client._base_url, project, "allocateIds")
request = _verify_protobuf_call(
http, uri, datastore_pb2.AllocateIdsRequest(), retry=retry, timeout=timeout,
)
assert len(request.keys) == len(before_key_pbs)
for key_before, key_after in zip(before_key_pbs, request.keys):
assert key_before == key_after
def test_api_allocate_ids_empty():
_allocate_ids_helper()
def test_api_allocate_ids_non_empty():
_allocate_ids_helper(count=2)
def test_api_allocate_ids_w_retry():
retry = mock.MagicMock()
_allocate_ids_helper(retry=retry)
def test_api_allocate_ids_w_timeout():
timeout = 5.0
_allocate_ids_helper(timeout=timeout)
def _reserve_ids_helper(count=0, retry=None, timeout=None):
from google.cloud.datastore_v1.types import datastore as datastore_pb2
project = "PROJECT"
before_key_pbs = []
rsp_pb = datastore_pb2.ReserveIdsResponse()
for i_count in range(count):
requested = _make_key_pb(project, id_=i_count)
before_key_pbs.append(requested)
http = _make_requests_session(
[_make_response(content=rsp_pb._pb.SerializeToString())]
)
client_info = _make_client_info()
client = mock.Mock(
_http=http,
_base_url="test.invalid",
_client_info=client_info,
spec=["_http", "_base_url", "_client_info"],
)
ds_api = _make_http_datastore_api(client)
request = {"project_id": project, "keys": before_key_pbs}
kwargs = _retry_timeout_kw(retry, timeout, http)
response = ds_api.reserve_ids(request=request, **kwargs)
assert response == rsp_pb._pb
uri = _build_expected_url(client._base_url, project, "reserveIds")
request = _verify_protobuf_call(
http, uri, datastore_pb2.AllocateIdsRequest(), retry=retry, timeout=timeout,
)
assert len(request.keys) == len(before_key_pbs)
for key_before, key_after in zip(before_key_pbs, request.keys):
assert key_before == key_after
def test_api_reserve_ids_empty():
_reserve_ids_helper()
def test_api_reserve_ids_non_empty():
_reserve_ids_helper(count=2)
def test_api_reserve_ids_w_retry():
retry = mock.MagicMock()
_reserve_ids_helper(retry=retry)
def test_api_reserve_ids_w_timeout():
timeout = 5.0
_reserve_ids_helper(timeout=timeout)
def _make_http_datastore_api(*args, **kwargs):
from google.cloud.datastore._http import HTTPDatastoreAPI
return HTTPDatastoreAPI(*args, **kwargs)
def _make_response(status=http.client.OK, content=b"", headers={}):
response = requests.Response()
response.status_code = status
response._content = content
response.headers = headers
response.request = requests.Request()
return response
def _make_requests_session(responses):
session = mock.create_autospec(requests.Session, instance=True)
session.request.side_effect = responses
return session
def _build_expected_url(api_base_url, project, method):
from google.cloud.datastore._http import API_VERSION
return "/".join([api_base_url, API_VERSION, "projects", project + ":" + method])
def _make_key_pb(project, id_=1234):
from google.cloud.datastore.key import Key
path_args = ("Kind",)
if id_ is not None:
path_args += (id_,)
return Key(*path_args, project=project).to_protobuf()
_USER_AGENT = "TESTING USER AGENT"
def _make_client_info(user_agent=_USER_AGENT):
from google.api_core.client_info import ClientInfo
client_info = mock.create_autospec(ClientInfo)
client_info.to_user_agent.return_value = user_agent
return client_info
def _verify_protobuf_call(http, expected_url, pb, retry=None, timeout=None):
from google.cloud import _http as connection_module
expected_headers = {
"Content-Type": "application/x-protobuf",
"User-Agent": _USER_AGENT,
connection_module.CLIENT_INFO_HEADER: _USER_AGENT,
}
if retry is not None:
retry.assert_called_once_with(http.request)
if timeout is not None:
http.request.assert_called_once_with(
method="POST",
url=expected_url,
headers=expected_headers,
data=mock.ANY,
timeout=timeout,
)
else:
http.request.assert_called_once_with(
method="POST", url=expected_url, headers=expected_headers, data=mock.ANY
)
data = http.request.mock_calls[0][2]["data"]
pb._pb.ParseFromString(data)
return pb
def _retry_timeout_kw(retry, timeout, http=None):
kwargs = {}
if retry is not None:
kwargs["retry"] = retry
if http is not None:
retry.return_value = http.request
if timeout is not None:
kwargs["timeout"] = timeout
return kwargs
class Foo:
def __init__(self, bar=None, baz=None):
self.bar = bar
self.baz = baz
|
|
import sublime
import json
from ... import utils
SIDE_COLOR = "color(#336B81 blend(var(--background) 60%))"
testbox = {"completions": {}, "documentation": {}}
def _plugin_loaded():
sublime.set_timeout_async(load)
def load():
global testbox
completions_data = load_json_data("completions")
for key in completions_data:
cfc = key.split(".").pop().capitalize()
if cfc == "Basespec":
cfc = "BaseSpec"
testbox["completions"][key] = [
(comp_key + "\t" + "TestBox " + cfc, completions_data[key][comp_key])
for comp_key in sorted(completions_data[key].keys())
]
if key == "expectation":
negated_completions = [
(
negate_string(comp_key) + "\t" + cfc,
negate_string(completions_data[key][comp_key]),
)
for comp_key in sorted(completions_data[key].keys())
]
testbox["completions"][key].extend(negated_completions)
testbox["documentation"] = load_json_data("documentation")
def load_json_data(filename):
json_data = sublime.load_resource(
"Packages/"
+ utils.get_plugin_name()
+ "/src/plugins_/testbox/json/"
+ filename
+ ".json"
)
return json.loads(json_data)
def negate_string(string_to_negate):
return "not" + string_to_negate[0].upper() + string_to_negate[1:]
def get_setting(view, setting_key):
if (
view.window().project_file_name()
and setting_key in view.window().project_data()
):
return view.window().project_data()[setting_key]
package_settings = sublime.load_settings("cfml_package.sublime-settings")
return package_settings.get(setting_key)
def extends_testbox(cfml_view):
if cfml_view.view_metadata["extends"]:
return cfml_view.view_metadata["extends"].lower() == "testbox.system.basespec"
return False
def is_testbox_file(cfml_view):
if extends_testbox(cfml_view):
return True
if not cfml_view.file_name:
return False
for folder in get_setting(cfml_view.view, "testbox_folders"):
if "/" + folder in cfml_view.file_path:
return True
return False
def get_dot_completions(cfml_view):
if not get_setting(cfml_view.view, "testbox_enabled"):
return None
# expectations
if (
is_testbox_file(cfml_view)
and len(cfml_view.dot_context) > 0
and cfml_view.dot_context[-1].name == "expect"
):
return cfml_view.CompletionList(testbox["completions"]["expectation"], 1, False)
# assertions
if (
is_testbox_file(cfml_view)
and len(cfml_view.dot_context) == 1
and cfml_view.dot_context[-1].name == "assert"
):
return cfml_view.CompletionList(testbox["completions"]["assertion"], 1, False)
return None
def get_script_completions(cfml_view):
if not get_setting(cfml_view.view, "testbox_enabled"):
return None
if is_testbox_file(cfml_view) and cfml_view.view.match_selector(
cfml_view.position, "meta.class.body.cfml"
):
return cfml_view.CompletionList(testbox["completions"]["basespec"], 1, False)
return None
def get_inline_documentation(cfml_view, doc_type):
if not get_setting(cfml_view.view, "testbox_enabled") or not is_testbox_file(
cfml_view
):
return None
if cfml_view.view.match_selector(
cfml_view.position, "meta.function-call.method.cfml"
):
function_name, function_name_region, function_args_region = utils.get_function_call(
cfml_view.view, cfml_view.position
)
region = sublime.Region(
function_name_region.begin(), function_args_region.end()
)
if doc_type == "hover_doc" and not function_name_region.contains(
cfml_view.position
):
return None
if cfml_view.view.substr(function_name_region.begin() - 1) == ".":
dot_context = cfml_view.get_dot_context(function_name_region.begin() - 1)
if dot_context[-1].name == "expect":
if function_name in testbox["documentation"]["expectation"]:
doc = get_documentation(
function_name,
testbox["documentation"]["expectation"][function_name],
)
return cfml_view.Documentation(
[dot_context[-1].name_region, region], doc, None, 2
)
if (
len(function_name) > 3
and function_name[:3] == "not"
and function_name[3:] in testbox["documentation"]["expectation"]
):
doc = testbox["documentation"]["expectation"][function_name[3:]]
return cfml_view.Documentation(
[dot_context[-1].name_region, region],
get_documentation(function_name, doc, True),
None,
2,
)
if (
dot_context[-1].name == "assert"
and function_name in testbox["documentation"]["assertion"]
):
doc = get_documentation(
function_name, testbox["documentation"]["assertion"][function_name]
)
return cfml_view.Documentation(
[dot_context[-1].name_region, region], doc, None, 2
)
if cfml_view.view.match_selector(cfml_view.position, "meta.function-call.cfml"):
function_name, function_name_region, function_args_region = utils.get_function_call(
cfml_view.view, cfml_view.position
)
region = sublime.Region(
function_name_region.begin(), function_args_region.end()
)
if doc_type == "hover_doc" and not function_name_region.contains(
cfml_view.position
):
return None
if function_name in testbox["documentation"]["basespec"]:
doc = get_documentation(
function_name, testbox["documentation"]["basespec"][function_name]
)
return cfml_view.Documentation([region], doc, None, 2)
return None
def get_documentation(key, metadata, negated=False):
testbox_doc = {"side_color": SIDE_COLOR, "html": {}}
testbox_doc["html"]["header"] = metadata["header"]
testbox_doc["html"]["body"] = metadata["body"]
testbox_doc["html"]["links"] = metadata["links"]
if negated:
testbox_doc["html"]["header"] += " (negated)"
return testbox_doc
|
|
from billing import Gateway, GatewayNotConfigured
from billing.gateway import InvalidData
from billing.signals import *
from billing.utils.credit_card import InvalidCard, Visa, MasterCard, \
AmericanExpress, Discover, CreditCard
from django.conf import settings
import braintree
class BraintreePaymentsGateway(Gateway):
supported_cardtypes = [Visa, MasterCard, AmericanExpress, Discover]
supported_countries = ["US"]
default_currency = "USD"
homepage_url = "http://www.braintreepayments.com/"
display_name = "Braintree Payments"
def __init__(self):
test_mode = getattr(settings, "MERCHANT_TEST_MODE", True)
if test_mode:
env = braintree.Environment.Sandbox
else:
env = braintree.Environment.Production
merchant_settings = getattr(settings, "MERCHANT_SETTINGS")
if not merchant_settings or not merchant_settings.get("braintree_payments"):
raise GatewayNotConfigured("The '%s' gateway is not correctly "
"configured." % self.display_name)
braintree_settings = merchant_settings['braintree_payments']
braintree.Configuration.configure(
env,
braintree_settings['MERCHANT_ACCOUNT_ID'],
braintree_settings['PUBLIC_KEY'],
braintree_settings['PRIVATE_KEY']
)
def _cc_expiration_date(self, credit_card):
return "%s/%s" % (credit_card.month, credit_card.year)
def _cc_cardholder_name(self, credit_card):
return "%s %s" % (credit_card.first_name, credit_card.last_name)
def _build_request_hash(self, options):
request_hash = {
"order_id": options.get("order_id", ""),
}
if options.get("customer"):
name = options["customer"].get("name", "")
try:
first_name, last_name = name.split(" ", 1)
except ValueError:
first_name = name
last_name = ""
request_hash.update({
"customer": {
"first_name": first_name,
"last_name": last_name,
"company": options["customer"].get("company", ""),
"phone": options["customer"].get("phone", ""),
"fax": options["customer"].get("fax", ""),
"website": options["customer"].get("website", ""),
"email": options["customer"].get("email", options.get("email", ""))
}
})
if options.get("billing_address"):
name = options["billing_address"].get("name", "")
try:
first_name, last_name = name.split(" ", 1)
except ValueError:
first_name = name
last_name = ""
request_hash.update({
"billing": {
"first_name": first_name,
"last_name": last_name,
"company": options["billing_address"].get("company", ""),
"street_address": options["billing_address"].get("address1", ""),
"extended_address": options["billing_address"].get("address2", ""),
"locality": options["billing_address"].get("city", ""),
"region": options["billing_address"].get("state", ""),
"postal_code": options["billing_address"].get("zip", ""),
"country_code_alpha2": options["billing_address"].get("country", "")
}
})
if options.get("shipping_address"):
name = options["shipping_address"].get("name", "")
try:
first_name, last_name = name.split(" ", 1)
except ValueError:
first_name = name
last_name = ""
request_hash.update({
"shipping": {
"first_name": first_name,
"last_name": last_name,
"company": options["shipping_address"].get("company", ""),
"street_address": options["shipping_address"].get("address1", ""),
"extended_address": options["shipping_address"].get("address2", ""),
"locality": options["shipping_address"].get("city", ""),
"region": options["shipping_address"].get("state", ""),
"postal_code": options["shipping_address"].get("zip", ""),
"country_code_alpha2": options["shipping_address"].get("country", "")
}
})
return request_hash
def purchase(self, money, credit_card, options=None):
if not options:
options = {}
if isinstance(credit_card, CreditCard) and not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
request_hash = self._build_request_hash(options)
request_hash["amount"] = money
if options.get("merchant_account_id"):
request_hash["merchant_account_id"] = options.get("merchant_account_id")
if isinstance(credit_card, CreditCard):
request_hash["credit_card"] = {
"number": credit_card.number,
"expiration_date": self._cc_expiration_date(credit_card),
"cardholder_name": self._cc_cardholder_name(credit_card),
"cvv": credit_card.verification_value,
}
else:
request_hash["payment_method_token"] = credit_card
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
request_hash = self._build_request_hash(options)
request_hash["amount"] = money
request_hash["credit_card"] = {
"number": credit_card.number,
"expiration_date": self._cc_expiration_date(credit_card),
"cardholder_name": self._cc_cardholder_name(credit_card),
"cvv": credit_card.verification_value,
}
braintree_options = options.get("options", {})
braintree_options.update({"submit_for_settlement": True})
request_hash.update({
"options": braintree_options
})
response = braintree.Transaction.sale(request_hash)
if response.is_success:
status = "SUCCESS"
transaction_was_successful.send(sender=self,
type="purchase",
response=response)
else:
status = "FAILURE"
transaction_was_unsuccessful.send(sender=self,
type="purchase",
response=response)
return {"status": status, "response": response}
def authorize(self, money, credit_card, options=None):
if not options:
options = {}
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
request_hash = self._build_request_hash(options)
request_hash["amount"] = money
request_hash["credit_card"] = {
"number": credit_card.number,
"expiration_date": self._cc_expiration_date(credit_card),
"cardholder_name": self._cc_cardholder_name(credit_card),
"cvv": credit_card.verification_value,
}
braintree_options = options.get("options", {})
if braintree_options:
request_hash.update({
"options": braintree_options
})
response = braintree.Transaction.sale(request_hash)
if response.is_success:
status = "SUCCESS"
transaction_was_successful.send(sender=self,
type="authorize",
response=response)
else:
status = "FAILURE"
transaction_was_unsuccessful.send(sender=self,
type="authorize",
response=response)
return {"status": status, "response": response}
def capture(self, money, authorization, options=None):
response = braintree.Transaction.submit_for_settlement(authorization, money)
if response.is_success:
status = "SUCCESS"
transaction_was_successful.send(sender=self,
type="capture",
response=response)
else:
status = "FAILURE"
transaction_was_unsuccessful.send(sender=self,
type="capture",
response=response)
return {"status": status, "response": response}
def void(self, identification, options=None):
response = braintree.Transaction.void(identification)
if response.is_success:
status = "SUCCESS"
transaction_was_successful.send(sender=self,
type="void",
response=response)
else:
status = "FAILURE"
transaction_was_unsuccessful.send(sender=self,
type="void",
response=response)
return {"status": status, "response": response}
def credit(self, money, identification, options=None):
response = braintree.Transaction.refund(identification, money)
if response.is_success:
status = "SUCCESS"
transaction_was_successful.send(sender=self,
type="credit",
response=response)
else:
status = "FAILURE"
transaction_was_unsuccessful.send(sender=self,
type="credit",
response=response)
return {"status": status, "response": response}
def recurring(self, money, credit_card, options=None):
resp = self.store(credit_card, options=options)
if resp["status"] == "FAILURE":
transaction_was_unsuccessful.send(sender=self,
type="recurring",
response=resp)
return resp
payment_token = resp["response"].customer.credit_cards[0].token
request_hash = options["recurring"]
request_hash.update({
"payment_method_token": payment_token,
})
response = braintree.Subscription.create(request_hash)
if response.is_success:
status = "SUCCESS"
transaction_was_successful.send(sender=self,
type="recurring",
response=response)
else:
status = "FAILURE"
transaction_was_unsuccessful.send(sender=self,
type="recurring",
response=response)
return {"status": status, "response": response}
def store(self, credit_card, options=None):
if not options:
options = {}
customer = options.get("customer", None)
if not customer:
raise InvalidData("Customer information needs to be passed.")
try:
first_name, last_name = customer["name"].split(" ", 1)
except ValueError:
first_name = customer["name"]
last_name = ""
search_resp = braintree.Customer.search(
braintree.CustomerSearch.cardholder_name == credit_card.name,
braintree.CustomerSearch.credit_card_number.starts_with(credit_card.number[:6]),
braintree.CustomerSearch.credit_card_number.ends_with(credit_card.number[-4:]),
braintree.CustomerSearch.credit_card_expiration_date == self._cc_expiration_date(credit_card)
)
customer_list = []
for customer in search_resp.items:
customer_list.append(customer)
if len(customer_list) >= 1:
# Take the first customer
customer = customer_list[0]
else:
card_hash = {
"number": credit_card.number,
"expiration_date": self._cc_expiration_date(credit_card),
"cardholder_name": self._cc_cardholder_name(credit_card),
}
if options.get("options"):
card_hash["options"] = options["options"]
request_hash = {
"first_name": first_name,
"last_name": last_name,
"company": customer.get("company", ""),
"email": customer.get("email", options.get("email", "")),
"phone": customer.get("phone", ""),
"credit_card": card_hash,
}
result = braintree.Customer.create(request_hash)
if not result.is_success:
transaction_was_unsuccessful.send(sender=self,
type="store",
response=result)
return {"status": "FAILURE", "response": result}
customer = result.customer
request_hash = {}
if options.get("billing_address"):
name = options["billing_address"].get("name", "")
try:
first_name, last_name = name.split(" ", 1)
except ValueError:
first_name = name
last_name = ""
request_hash.update({
"first_name": first_name,
"last_name": last_name,
"company": options["billing_address"].get("company", ""),
"street_address": options["billing_address"].get("address1", ""),
"extended_address": options["billing_address"].get("address2", ""),
"locality": options["billing_address"].get("city", ""),
"region": options["billing_address"].get("state", ""),
"postal_code": options["billing_address"].get("zip", ""),
"country_name": options["billing_address"].get("country", "")
})
card_hash = {
"number": credit_card.number,
"expiration_date": self._cc_expiration_date(credit_card),
"cardholder_name": self._cc_cardholder_name(credit_card),
"options": {
"update_existing_token": customer.credit_cards[0].token,
}
}
if options.get("options"):
card_hash["options"].update(options["options"])
if request_hash:
card_hash.update({"billing_address": request_hash})
response = braintree.Customer.update(customer.id, {
"credit_card": card_hash,
})
if response.is_success:
status = "SUCCESS"
transaction_was_successful.send(sender=self,
type="store",
response=response)
else:
for ii in response.errors.deep_errors:
print ii.message
status = "FAILURE"
transaction_was_unsuccessful.send(sender=self,
type="store",
response=response)
return {"status": status, "response": response}
def unstore(self, identification, options=None):
response = braintree.CreditCard.delete(identification)
if response.is_success:
status = "SUCCESS"
transaction_was_successful.send(sender=self,
type="unstore",
response=response)
else:
status = "FAILURE"
transaction_was_unsuccessful.send(sender=self,
type="unstore",
response=response)
return {"status": status, "response": response}
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all files contain proper licensing information."""
import optparse
import os.path
import subprocess
import sys
def PrintUsage():
print """Usage: python checklicenses.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checklicenses".
--ignore-suppressions Ignores path-specific license whitelist. Useful when
trying to remove a suppression/whitelist entry.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checklicenses.py
python checklicenses.py --root ~/chromium/src third_party"""
WHITELISTED_LICENSES = [
'Apache (v2.0)',
'Apache (v2.0) BSD (2 clause)',
'Apache (v2.0) GPL (v2)',
'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License
'APSL (v2)',
'APSL (v2) BSD (4 clause)',
'BSD',
'BSD (2 clause)',
'BSD (2 clause) MIT/X11 (BSD like)',
'BSD (3 clause)',
'BSD (3 clause) ISC',
'BSD (3 clause) LGPL (v2 or later)',
'BSD (3 clause) LGPL (v2.1 or later)',
'BSD (3 clause) MIT/X11 (BSD like)',
'BSD (4 clause)',
'BSD-like',
# TODO(phajdan.jr): Make licensecheck not print BSD-like twice.
'BSD-like MIT/X11 (BSD like)',
'BSL (v1.0)',
'GPL (v2 or later) with Bison parser exception',
'GPL (v2 or later) with libtool exception',
'GPL (v3 or later) with Bison parser exception',
'GPL with Bison parser exception',
'ISC',
'LGPL',
'LGPL (v2)',
'LGPL (v2 or later)',
'LGPL (v2.1)',
'LGPL (v3 or later)',
# TODO(phajdan.jr): Make licensecheck convert that comma to a dot.
'LGPL (v2,1 or later)',
'LGPL (v2.1 or later)',
'MPL (v1.0) LGPL (v2 or later)',
'MPL (v1.1)',
'MPL (v1.1) BSD-like',
'MPL (v1.1) BSD-like GPL (unversioned/unknown version)',
'MPL (v1.1,) BSD (3 clause) GPL (unversioned/unknown version) '
'LGPL (v2.1 or later)',
'MPL (v1.1) GPL (unversioned/unknown version)',
'MPL (v2.0)',
# TODO(phajdan.jr): Make licensecheck not print the comma after 1.1.
'MPL (v1.1,) GPL (unversioned/unknown version) LGPL (v2 or later)',
'MPL (v1.1,) GPL (unversioned/unknown version) LGPL (v2.1 or later)',
'MIT/X11 (BSD like)',
'Ms-PL',
'Public domain',
'Public domain BSD',
'Public domain BSD (3 clause)',
'Public domain BSD-like',
'Public domain LGPL (v2.1 or later)',
'libpng',
'zlib/libpng',
'SGI Free Software License B',
'University of Illinois/NCSA Open Source License (BSD like)',
]
PATH_SPECIFIC_WHITELISTED_LICENSES = {
'base/hash.cc': [ # http://crbug.com/98100
'UNKNOWN',
],
'base/third_party/icu': [ # http://crbug.com/98087
'UNKNOWN',
],
# http://code.google.com/p/google-breakpad/issues/detail?id=450
'breakpad/src': [
'UNKNOWN',
],
'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092
'UNKNOWN',
],
'chrome/test/data/gpu/vt': [
'UNKNOWN',
],
'chrome/test/data/layout_tests/LayoutTests': [
'UNKNOWN',
],
'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095
'UNKNOWN',
],
'data/mozilla_js_tests': [
'UNKNOWN',
],
'data/page_cycler': [
'UNKNOWN',
'GPL (v2 or later)',
],
'data/tab_switching': [
'UNKNOWN',
],
'googleurl': [ # http://code.google.com/p/google-url/issues/detail?id=15
'UNKNOWN',
],
'native_client': [ # http://crbug.com/98099
'UNKNOWN',
],
'native_client/toolchain': [
'BSD GPL (v2 or later)',
'BSD (2 clause) GPL (v2 or later)',
'BSD (3 clause) GPL (v2 or later)',
'BSL (v1.0) GPL',
'BSL (v1.0) GPL (v3.1)',
'GPL',
'GPL (unversioned/unknown version)',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3.1)',
'GPL (v3 or later)',
],
'net/tools/spdyshark': [
'GPL (v2 or later)',
'UNKNOWN',
],
'third_party/WebKit': [
'UNKNOWN',
],
'third_party/WebKit/Websites/webkit.org/blog/wp-content/plugins/'
'akismet/akismet.php': [
'GPL (v2 or later)'
],
'third_party/WebKit/Source/JavaScriptCore/tests/mozilla': [
'GPL',
'GPL (v2 or later)',
'GPL (unversioned/unknown version)',
],
'third_party/active_doc': [ # http://crbug.com/98113
'UNKNOWN',
],
# http://code.google.com/p/angleproject/issues/detail?id=217
'third_party/angle': [
'UNKNOWN',
],
'third_party/bsdiff/mbsdiff.cc': [
'UNKNOWN',
],
'third_party/bzip2': [
'UNKNOWN',
],
# http://crbug.com/222828
# http://bugs.python.org/issue17514
'third_party/chromite/third_party/argparse.py': [
'UNKNOWN',
],
# Not used. http://crbug.com/156020
# Using third_party/cros_dbus_cplusplus/cros_dbus_cplusplus.gyp instead.
'third_party/cros_dbus_cplusplus/source/autogen.sh': [
'UNKNOWN',
],
# Included in the source tree but not built. http://crbug.com/156020
'third_party/cros_dbus_cplusplus/source/examples': [
'UNKNOWN',
],
'third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/expat/files/lib': [ # http://crbug.com/98121
'UNKNOWN',
],
'third_party/ffmpeg': [
'GPL',
'GPL (v2)',
'GPL (v2 or later)',
'UNKNOWN', # http://crbug.com/98123
],
'third_party/findbugs/doc': [ # http://crbug.com/157206
'UNKNOWN',
],
'third_party/freetype2': [ # http://crbug.com/177319
'UNKNOWN',
],
'third_party/gles2_book': [ # http://crbug.com/98130
'UNKNOWN',
],
'third_party/gles2_conform/GTF_ES': [ # http://crbug.com/98131
'UNKNOWN',
],
'third_party/harfbuzz': [ # http://crbug.com/98133
'UNKNOWN',
],
'third_party/hunspell': [ # http://crbug.com/98134
'UNKNOWN',
],
'third_party/hyphen/hyphen.tex': [ # http://crbug.com/157375
'UNKNOWN',
],
'third_party/iccjpeg': [ # http://crbug.com/98137
'UNKNOWN',
],
'third_party/icu': [ # http://crbug.com/98301
'UNKNOWN',
],
'third_party/jemalloc': [ # http://crbug.com/98302
'UNKNOWN',
],
'third_party/lcov': [ # http://crbug.com/98304
'UNKNOWN',
],
'third_party/lcov/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/lcov-1.9/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/libevent': [ # http://crbug.com/98309
'UNKNOWN',
],
'third_party/libjingle/source/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjingle/source_internal/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjpeg': [ # http://crbug.com/98313
'UNKNOWN',
],
'third_party/libjpeg_turbo': [ # http://crbug.com/98314
'UNKNOWN',
],
'third_party/libpng': [ # http://crbug.com/98318
'UNKNOWN',
],
# The following files lack license headers, but are trivial.
'third_party/libusb/src/libusb/os/poll_posix.h': [
'UNKNOWN',
],
'third_party/libusb/src/libusb/version.h': [
'UNKNOWN',
],
'third_party/libusb/src/autogen.sh': [
'UNKNOWN',
],
'third_party/libusb/src/config.h': [
'UNKNOWN',
],
'third_party/libusb/src/msvc/config.h': [
'UNKNOWN',
],
'third_party/libvpx/source': [ # http://crbug.com/98319
'UNKNOWN',
],
'third_party/libvpx/source/libvpx/examples/includes': [
'GPL (v2 or later)',
],
'third_party/libwebp': [ # http://crbug.com/98448
'UNKNOWN',
],
'third_party/libxml': [
'UNKNOWN',
],
'third_party/libxslt': [
'UNKNOWN',
],
'third_party/lzma_sdk': [
'UNKNOWN',
],
'third_party/mesa/MesaLib': [
'GPL (v2)',
'GPL (v3 or later)',
'MIT/X11 (BSD like) GPL (v3 or later) with Bison parser exception',
'UNKNOWN', # http://crbug.com/98450
],
'third_party/modp_b64': [
'UNKNOWN',
],
'third_party/npapi/npspy/extern/java': [
'GPL (unversioned/unknown version)',
],
'third_party/openmax_dl/dl' : [
'Khronos Group',
],
'third_party/openssl': [ # http://crbug.com/98451
'UNKNOWN',
],
'third_party/ots/tools/ttf-checksum.py': [ # http://code.google.com/p/ots/issues/detail?id=2
'UNKNOWN',
],
'third_party/molokocacao': [ # http://crbug.com/98453
'UNKNOWN',
],
'third_party/npapi/npspy': [
'UNKNOWN',
],
'third_party/ocmock/OCMock': [ # http://crbug.com/98454
'UNKNOWN',
],
'third_party/ply/__init__.py': [
'UNKNOWN',
],
'third_party/protobuf': [ # http://crbug.com/98455
'UNKNOWN',
],
# http://crbug.com/222831
# https://bitbucket.org/eliben/pyelftools/issue/12
'third_party/pyelftools': [
'UNKNOWN',
],
'third_party/pylib': [
'UNKNOWN',
],
'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462
'UNKNOWN',
],
'third_party/simplejson': [
'UNKNOWN',
],
'third_party/skia': [ # http://crbug.com/98463
'UNKNOWN',
],
'third_party/snappy/src': [ # http://crbug.com/98464
'UNKNOWN',
],
'third_party/smhasher/src': [ # http://crbug.com/98465
'UNKNOWN',
],
'third_party/sqlite': [
'UNKNOWN',
],
'third_party/swig/Lib/linkruntime.c': [ # http://crbug.com/98585
'UNKNOWN',
],
'third_party/talloc': [
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98588
],
'third_party/tcmalloc': [
'UNKNOWN', # http://crbug.com/98589
],
'third_party/tlslite': [
'UNKNOWN',
],
'third_party/webdriver': [ # http://crbug.com/98590
'UNKNOWN',
],
'third_party/webrtc': [ # http://crbug.com/98592
'UNKNOWN',
],
'third_party/xdg-utils': [ # http://crbug.com/98593
'UNKNOWN',
],
'third_party/yasm/source': [ # http://crbug.com/98594
'UNKNOWN',
],
'third_party/zlib/contrib/minizip': [
'UNKNOWN',
],
'third_party/zlib/trees.h': [
'UNKNOWN',
],
'tools/dromaeo_benchmark_runner/dromaeo_benchmark_runner.py': [
'UNKNOWN',
],
'tools/emacs': [ # http://crbug.com/98595
'UNKNOWN',
],
'tools/grit/grit/node/custom/__init__.py': [
'UNKNOWN',
],
'tools/gyp/test': [
'UNKNOWN',
],
'tools/histograms': [
'UNKNOWN',
],
'tools/memory_watcher': [
'UNKNOWN',
],
'tools/playback_benchmark': [
'UNKNOWN',
],
'tools/python/google/__init__.py': [
'UNKNOWN',
],
'tools/site_compare': [
'UNKNOWN',
],
'tools/stats_viewer/Properties/AssemblyInfo.cs': [
'UNKNOWN',
],
'tools/symsrc/pefile.py': [
'UNKNOWN',
],
'v8/test/cctest': [ # http://crbug.com/98597
'UNKNOWN',
],
'webkit/data/ico_decoder': [
'UNKNOWN',
],
}
def check_licenses(options, args):
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = options.base_directory
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(options.base_directory, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print "Using base directory:", options.base_directory
print "Checking:", start_dir
print
licensecheck_path = os.path.abspath(os.path.join(options.base_directory,
'third_party',
'devscripts',
'licensecheck.pl'))
licensecheck = subprocess.Popen([licensecheck_path,
'-l', '100',
'-r', start_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = licensecheck.communicate()
if options.verbose:
print '----------- licensecheck stdout -----------'
print stdout
print '--------- end licensecheck stdout ---------'
if licensecheck.returncode != 0 or stderr:
print '----------- licensecheck stderr -----------'
print stderr
print '--------- end licensecheck stderr ---------'
print "\nFAILED\n"
return 1
success = True
for line in stdout.splitlines():
filename, license = line.split(':', 1)
filename = os.path.relpath(filename.strip(), options.base_directory)
# All files in the build output directory are generated one way or another.
# There's no need to check them.
if filename.startswith('out/') or filename.startswith('sconsbuild/'):
continue
# For now we're just interested in the license.
license = license.replace('*No copyright*', '').strip()
# Skip generated files.
if 'GENERATED FILE' in license:
continue
if license in WHITELISTED_LICENSES:
continue
if not options.ignore_suppressions:
found_path_specific = False
for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES:
if (filename.startswith(prefix) and
license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]):
found_path_specific = True
break
if found_path_specific:
continue
print "'%s' has non-whitelisted license '%s'" % (filename, license)
success = False
if success:
print "\nSUCCESS\n"
return 0
else:
print "\nFAILED\n"
print "Please read",
print "http://www.chromium.org/developers/adding-3rd-party-libraries"
print "for more info how to handle the failure."
print
print "Please respect OWNERS of checklicenses.py. Changes violating"
print "this requirement may be reverted."
return 1
def main():
default_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
option_parser = optparse.OptionParser()
option_parser.add_option('--root', default=default_root,
dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Print debug logging')
option_parser.add_option('--ignore-suppressions',
action='store_true',
default=False,
help='Ignore path-specific license whitelist.')
options, args = option_parser.parse_args()
return check_licenses(options, args)
if '__main__' == __name__:
sys.exit(main())
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from copy import deepcopy
from google.api_core.exceptions import AlreadyExists, GoogleAPICallError, RetryError
from google.cloud.vision_v1 import ProductSearchClient
from google.protobuf.json_format import MessageToDict
from airflow import AirflowException
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
class NameDeterminer:
"""
Class used for checking if the entity has the 'name' attribute set.
- If so, no action is taken.
- If not, and the name can be constructed from other parameters provided, it is created and filled in
the entity.
- If both the entity's 'name' attribute is set and the name can be constructed from other parameters
provided:
- If they are the same: no action is taken.
- If they are different: an exception is thrown.
"""
def __init__(self, label, id_label, get_path):
self.label = label
self.id_label = id_label
self.get_path = get_path
def get_entity_with_name(self, entity, entity_id, location, project_id):
entity = deepcopy(entity)
explicit_name = getattr(entity, 'name')
if location and entity_id:
# Necessary parameters to construct the name are present. Checking for conflict with explicit name
constructed_name = self.get_path(project_id, location, entity_id)
if not explicit_name:
entity.name = constructed_name
return entity
elif explicit_name != constructed_name:
self._raise_ex_different_names(constructed_name, explicit_name)
else:
# Not enough parameters to construct the name. Trying to use the name from Product / ProductSet.
if explicit_name:
return entity
else:
self._raise_ex_unable_to_determine_name()
def _raise_ex_unable_to_determine_name(self):
raise AirflowException(
"Unable to determine the {label} name. Please either set the name directly in the {label} "
"object or provide the `location` and `{id_label}` parameters.".format(
label=self.label, id_label=self.id_label
)
)
def _raise_ex_different_names(self, constructed_name, explicit_name):
raise AirflowException(
"The {label} name provided in the object ({explicit_name}) is different than the name created "
"from the input parameters ({constructed_name}). Please either: 1) Remove the {label} name, 2) "
"Remove the location and {id_label} parameters, 3) Unify the {label} name and input "
"parameters.".format(
label=self.label,
explicit_name=explicit_name,
constructed_name=constructed_name,
id_label=self.id_label,
)
)
class CloudVisionHook(GoogleCloudBaseHook):
"""
Hook for Google Cloud Vision APIs.
"""
_client = None
product_name_determiner = NameDeterminer('Product', 'product_id', ProductSearchClient.product_path)
product_set_name_determiner = NameDeterminer(
'ProductSet', 'productset_id', ProductSearchClient.product_set_path
)
def __init__(self, gcp_conn_id='google_cloud_default', delegate_to=None):
super(CloudVisionHook, self).__init__(gcp_conn_id, delegate_to)
def get_conn(self):
"""
Retrieves connection to Cloud Vision.
:return: Google Cloud Vision client object.
:rtype: google.cloud.vision_v1.ProductSearchClient
"""
if not self._client:
self._client = ProductSearchClient(credentials=self._get_credentials())
return self._client
@GoogleCloudBaseHook.fallback_to_default_project_id
def create_product_set(
self,
location,
product_set,
project_id=None,
product_set_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetCreateOperator`
"""
client = self.get_conn()
parent = ProductSearchClient.location_path(project_id, location)
self.log.info('Creating a new ProductSet under the parent: %s', parent)
response = self._handle_request(
lambda **kwargs: client.create_product_set(**kwargs),
parent=parent,
product_set=product_set,
product_set_id=product_set_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('ProductSet created: %s', response.name if response else '')
self.log.debug('ProductSet created:\n%s', response)
if not product_set_id:
# Product set id was generated by the API
product_set_id = self._get_autogenerated_id(response)
self.log.info('Extracted autogenerated ProductSet ID from the response: %s', product_set_id)
return product_set_id
@GoogleCloudBaseHook.fallback_to_default_project_id
def get_product_set(
self, location, product_set_id, project_id=None, retry=None, timeout=None, metadata=None
):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetGetOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info('Retrieving ProductSet: %s', name)
response = self._handle_request(
lambda **kwargs: client.get_product_set(**kwargs),
name=name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('ProductSet retrieved.')
self.log.debug('ProductSet retrieved:\n%s', response)
return MessageToDict(response)
@GoogleCloudBaseHook.fallback_to_default_project_id
def update_product_set(
self,
product_set,
location=None,
product_set_id=None,
update_mask=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetUpdateOperator`
"""
client = self.get_conn()
product_set = self.product_set_name_determiner.get_entity_with_name(
product_set, product_set_id, location, project_id
)
self.log.info('Updating ProductSet: %s', product_set.name)
response = self._handle_request(
lambda **kwargs: client.update_product_set(**kwargs),
product_set=product_set,
update_mask=update_mask,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('ProductSet updated: %s', response.name if response else '')
self.log.debug('ProductSet updated:\n%s', response)
return MessageToDict(response)
@GoogleCloudBaseHook.fallback_to_default_project_id
def delete_product_set(
self, location, product_set_id, project_id=None, retry=None, timeout=None, metadata=None
):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetDeleteOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info('Deleting ProductSet: %s', name)
response = self._handle_request(
lambda **kwargs: client.delete_product_set(**kwargs),
name=name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('ProductSet with the name [%s] deleted.', name)
return response
@GoogleCloudBaseHook.fallback_to_default_project_id
def create_product(
self, location, product, project_id=None, product_id=None, retry=None, timeout=None, metadata=None
):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductCreateOperator`
"""
client = self.get_conn()
parent = ProductSearchClient.location_path(project_id, location)
self.log.info('Creating a new Product under the parent: %s', parent)
response = self._handle_request(
lambda **kwargs: client.create_product(**kwargs),
parent=parent,
product=product,
product_id=product_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('Product created: %s', response.name if response else '')
self.log.debug('Product created:\n%s', response)
if not product_id:
# Product id was generated by the API
product_id = self._get_autogenerated_id(response)
self.log.info('Extracted autogenerated Product ID from the response: %s', product_id)
return product_id
@GoogleCloudBaseHook.fallback_to_default_project_id
def get_product(self, location, product_id, project_id=None, retry=None, timeout=None, metadata=None):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductGetOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_path(project_id, location, product_id)
self.log.info('Retrieving Product: %s', name)
response = self._handle_request(
lambda **kwargs: client.get_product(**kwargs),
name=name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('Product retrieved.')
self.log.debug('Product retrieved:\n%s', response)
return MessageToDict(response)
@GoogleCloudBaseHook.fallback_to_default_project_id
def update_product(
self,
product,
location=None,
product_id=None,
update_mask=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductUpdateOperator`
"""
client = self.get_conn()
product = self.product_name_determiner.get_entity_with_name(product, product_id, location, project_id)
self.log.info('Updating ProductSet: %s', product.name)
response = self._handle_request(
lambda **kwargs: client.update_product(**kwargs),
product=product,
update_mask=update_mask,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('Product updated: %s', response.name if response else '')
self.log.debug('Product updated:\n%s', response)
return MessageToDict(response)
@GoogleCloudBaseHook.fallback_to_default_project_id
def delete_product(self, location, product_id, project_id=None, retry=None, timeout=None, metadata=None):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductDeleteOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_path(project_id, location, product_id)
self.log.info('Deleting ProductSet: %s', name)
response = self._handle_request(
lambda **kwargs: client.delete_product(**kwargs),
name=name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('Product with the name [%s] deleted:', name)
return response
def _handle_request(self, fun, **kwargs):
try:
return fun(**kwargs)
except GoogleAPICallError as e:
if isinstance(e, AlreadyExists):
raise e
else:
self.log.error('The request failed:\n%s', str(e))
raise AirflowException(e)
except RetryError as e:
self.log.error('The request failed due to a retryable error and retry attempts failed.')
raise AirflowException(e)
except ValueError as e:
self.log.error('The request failed, the parameters are invalid.')
raise AirflowException(e)
@staticmethod
def _get_entity_name(is_product, project_id, location, entity_id):
if is_product:
return ProductSearchClient.product_path(project_id, location, entity_id)
else:
return ProductSearchClient.product_set_path(project_id, location, entity_id)
@staticmethod
def _get_autogenerated_id(response):
try:
name = response.name
except AttributeError as e:
raise AirflowException('Unable to get name from response... [{}]\n{}'.format(response, e))
if '/' not in name:
raise AirflowException('Unable to get id from name... [{}]'.format(name))
return name.rsplit('/', 1)[1]
|
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tf_agents.specs import tensor_spec
from tf_agents.policies import tf_policy
from typing import Any, Callable, Iterable, Optional, Sequence, Text, Tuple, Union
import dice_rl.data.dataset as dataset_lib
import dice_rl.utils.common as common_lib
import dice_rl.estimators.estimator as estimator_lib
class TabularBayesDice(object):
"""Robust policy evaluation."""
def __init__(self,
dataset_spec,
gamma: Union[float, tf.Tensor],
reward_fn: Optional[Callable] = None,
solve_for_state_action_ratio: bool = True,
nu_learning_rate: Union[float, tf.Tensor] = 0.1,
zeta_learning_rate: Union[float, tf.Tensor] = 0.1,
kl_regularizer: Union[float, tf.Tensor] = 1.,
eps_std: Union[float, tf.Tensor] = 1):
"""Initializes the solver.
Args:
dataset_spec: The spec of the dataset that will be given.
gamma: The discount factor to use.
reward_fn: A function that takes in an EnvStep and returns the reward for
that step. If not specified, defaults to just EnvStep.reward.
solve_for_state_action_ratio: Whether to solve for state-action density
ratio. Defaults to True. When solving an environment with a large
state/action space (taxi), better to set this to False to avoid OOM
issues.
nu_learning_rate: Learning rate for nu.
zeta_learning_rate: Learning rate for zeta.
kl_regularizer: Regularization constant for D_kl(q || p).
eps_std: epsilon standard deviation for sampling from the posterior.
"""
self._dataset_spec = dataset_spec
self._gamma = gamma
if reward_fn is None:
reward_fn = lambda env_step: env_step.reward
self._reward_fn = reward_fn
self._kl_regularizer = kl_regularizer
self._eps_std = eps_std
self._solve_for_state_action_ratio = solve_for_state_action_ratio
if (not self._solve_for_state_action_ratio and
not self._dataset_spec.has_log_probability()):
raise ValueError('Dataset must contain log-probability when '
'solve_for_state_action_ratio is False.')
# Get number of states/actions.
observation_spec = self._dataset_spec.observation
action_spec = self._dataset_spec.action
if not common_lib.is_categorical_spec(observation_spec):
raise ValueError('Observation spec must be discrete and bounded.')
self._num_states = observation_spec.maximum + 1
if not common_lib.is_categorical_spec(action_spec):
raise ValueError('Action spec must be discrete and bounded.')
self._num_actions = action_spec.maximum + 1
self._dimension = (
self._num_states * self._num_actions
if self._solve_for_state_action_ratio else self._num_states)
self._td_residuals = np.zeros([self._dimension, self._dimension])
self._total_weights = np.zeros([self._dimension])
self._initial_weights = np.zeros([self._dimension])
self._nu_optimizer = tf.keras.optimizers.Adam(nu_learning_rate)
self._zeta_optimizer = tf.keras.optimizers.Adam(zeta_learning_rate)
# Initialize variational Bayes parameters
self._nu_mu = tf.Variable(tf.zeros([self._dimension]))
self._nu_log_sigma = tf.Variable(tf.zeros([self._dimension]))
self._prior_mu = tf.Variable(tf.zeros([self._dimension]), trainable=True)
self._prior_log_sigma = tf.Variable(
tf.zeros([self._dimension]), trainable=False)
def _get_index(self, state, action):
if self._solve_for_state_action_ratio:
return state * self._num_actions + action
else:
return state
def prepare_dataset(self, dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy):
episodes, valid_steps = dataset.get_all_episodes()
tfagents_episodes = dataset_lib.convert_to_tfagents_timestep(episodes)
for episode_num in range(tf.shape(valid_steps)[0]):
# Precompute probabilites for this episode.
this_episode = tf.nest.map_structure(lambda t: t[episode_num], episodes)
first_step = tf.nest.map_structure(lambda t: t[0], this_episode)
this_tfagents_episode = dataset_lib.convert_to_tfagents_timestep(
this_episode)
episode_target_log_probabilities = target_policy.distribution(
this_tfagents_episode).action.log_prob(this_episode.action)
episode_target_probs = target_policy.distribution(
this_tfagents_episode).action.probs_parameter()
for step_num in range(tf.shape(valid_steps)[1] - 1):
this_step = tf.nest.map_structure(lambda t: t[episode_num, step_num],
episodes)
next_step = tf.nest.map_structure(
lambda t: t[episode_num, step_num + 1], episodes)
if this_step.is_last() or not valid_steps[episode_num, step_num]:
continue
weight = 1.0
nu_index = self._get_index(this_step.observation, this_step.action)
self._td_residuals[nu_index, nu_index] += -weight
self._total_weights[nu_index] += weight
policy_ratio = 1.0
if not self._solve_for_state_action_ratio:
policy_ratio = tf.exp(episode_target_log_probabilities[step_num] -
this_step.get_log_probability())
# Need to weight next nu by importance weight.
next_weight = (
weight if self._solve_for_state_action_ratio else policy_ratio *
weight)
next_probs = episode_target_probs[step_num + 1]
for next_action, next_prob in enumerate(next_probs):
next_nu_index = self._get_index(next_step.observation, next_action)
self._td_residuals[next_nu_index, nu_index] += (
next_prob * self._gamma * next_weight)
initial_probs = episode_target_probs[0]
for initial_action, initial_prob in enumerate(initial_probs):
initial_nu_index = self._get_index(first_step.observation,
initial_action)
self._initial_weights[initial_nu_index] += weight * initial_prob
self._initial_weights = tf.cast(self._initial_weights, tf.float32)
self._total_weights = tf.cast(self._total_weights, tf.float32)
self._td_residuals = self._td_residuals / np.sqrt(
1e-8 + self._total_weights)[None, :]
self._td_errors = tf.cast(
np.dot(self._td_residuals, self._td_residuals.T), tf.float32)
self._td_residuals = tf.cast(self._td_residuals, tf.float32)
@tf.function
def train_step(self, regularizer: float = 1e-6):
# Solve primal form min (1-g) * E[nu0] + E[(B nu - nu)^2].
with tf.GradientTape() as tape:
nu_sigma = tf.sqrt(tf.exp(self._nu_log_sigma))
eps = tf.random.normal(tf.shape(nu_sigma), 0, self._eps_std)
nu = self._nu_mu + nu_sigma * eps
init_nu_loss = tf.einsum('m,m', (1 - self._gamma) * self._initial_weights,
nu)
residuals = tf.einsum('n,nm->m', nu, self._td_residuals)
bellman_loss = 0.5 * tf.einsum('m,m', residuals, residuals)
prior_sigma = tf.sqrt(tf.exp(self._prior_log_sigma))
prior_var = tf.square(prior_sigma)
prior_var = 1.
neg_kl = (0.5 * (1. - 2. * tf.math.log(prior_sigma / nu_sigma + 1e-8) -
(self._nu_mu - self._prior_mu)**2 / prior_var -
nu_sigma**2 / prior_var))
loss = init_nu_loss + bellman_loss - self._kl_regularizer * neg_kl
grads = tape.gradient(loss, [
self._nu_mu, self._nu_log_sigma, self._prior_mu, self._prior_log_sigma
])
self._nu_optimizer.apply_gradients(
zip(grads, [
self._nu_mu, self._nu_log_sigma, self._prior_mu,
self._prior_log_sigma
]))
return loss
def estimate_average_reward(self,
dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy,
num_samples=100):
"""Estimates value (average per-step reward) of policy.
The estimation is based on solved values of zeta, so one should call
solve() before calling this function.
Args:
dataset: The dataset to sample experience from.
target_policy: The policy whose value we want to estimate.
num_samples: number of posterior samples.
Returns:
A tensor with num_samples samples of estimated average per-step reward
of the target policy.
"""
nu_sigma = tf.sqrt(tf.exp(self._nu_log_sigma))
eps = tf.random.normal(
tf.concat([[num_samples], tf.shape(nu_sigma)], axis=-1), 0,
self._eps_std)
nu = self._nu_mu + nu_sigma * eps
self._zeta = (
tf.einsum('bn,nm->bm', nu, self._td_residuals) /
tf.math.sqrt(1e-8 + self._total_weights))
def weight_fn(env_step):
index = self._get_index(env_step.observation, env_step.action)
zeta = tf.gather(
self._zeta, tf.tile(index[None, :], [num_samples, 1]), batch_dims=1)
policy_ratio = 1.0
if not self._solve_for_state_action_ratio:
tfagents_timestep = dataset_lib.convert_to_tfagents_timestep(env_step)
target_log_probabilities = target_policy.distribution(
tfagents_timestep).action.log_prob(env_step.action)
policy_ratio = tf.exp(target_log_probabilities -
env_step.get_log_probability())
return tf.cast(zeta * policy_ratio, tf.float32)
return estimator_lib.get_fullbatch_average(
dataset,
limit=None,
by_steps=True,
reward_fn=self._reward_fn,
weight_fn=weight_fn)
|
|
""" Dual ParserNode implementation """
from certbot_apache._internal import apacheparser
from certbot_apache._internal import assertions
from certbot_apache._internal import augeasparser
class DualNodeBase:
""" Dual parser interface for in development testing. This is used as the
base class for dual parser interface classes. This class handles runtime
attribute value assertions."""
def save(self, msg): # pragma: no cover
""" Call save for both parsers """
self.primary.save(msg)
self.secondary.save(msg)
def __getattr__(self, aname):
""" Attribute value assertion """
firstval = getattr(self.primary, aname)
secondval = getattr(self.secondary, aname)
exclusions = [
# Metadata will inherently be different, as ApacheParserNode does
# not have Augeas paths and so on.
aname == "metadata",
callable(firstval)
]
if not any(exclusions):
assertions.assertEqualSimple(firstval, secondval)
return firstval
def find_ancestors(self, name):
""" Traverses the ancestor tree and returns ancestors matching name """
return self._find_helper(DualBlockNode, "find_ancestors", name)
def _find_helper(self, nodeclass, findfunc, search, **kwargs):
"""A helper for find_* functions. The function specific attributes should
be passed as keyword arguments.
:param interfaces.ParserNode nodeclass: The node class for results.
:param str findfunc: Name of the find function to call
:param str search: The search term
"""
primary_res = getattr(self.primary, findfunc)(search, **kwargs)
secondary_res = getattr(self.secondary, findfunc)(search, **kwargs)
# The order of search results for Augeas implementation cannot be
# assured.
pass_primary = assertions.isPassNodeList(primary_res)
pass_secondary = assertions.isPassNodeList(secondary_res)
new_nodes = []
if pass_primary and pass_secondary:
# Both unimplemented
new_nodes.append(nodeclass(primary=primary_res[0],
secondary=secondary_res[0])) # pragma: no cover
elif pass_primary:
for c in secondary_res:
new_nodes.append(nodeclass(primary=primary_res[0],
secondary=c))
elif pass_secondary:
for c in primary_res:
new_nodes.append(nodeclass(primary=c,
secondary=secondary_res[0]))
else:
assert len(primary_res) == len(secondary_res)
matches = self._create_matching_list(primary_res, secondary_res)
for p, s in matches:
new_nodes.append(nodeclass(primary=p, secondary=s))
return new_nodes
class DualCommentNode(DualNodeBase):
""" Dual parser implementation of CommentNode interface """
def __init__(self, **kwargs):
""" This initialization implementation allows ordinary initialization
of CommentNode objects as well as creating a DualCommentNode object
using precreated or fetched CommentNode objects if provided as optional
arguments primary and secondary.
Parameters other than the following are from interfaces.CommentNode:
:param CommentNode primary: Primary pre-created CommentNode, mainly
used when creating new DualParser nodes using add_* methods.
:param CommentNode secondary: Secondary pre-created CommentNode
"""
kwargs.setdefault("primary", None)
kwargs.setdefault("secondary", None)
primary = kwargs.pop("primary")
secondary = kwargs.pop("secondary")
if primary or secondary:
assert primary and secondary
self.primary = primary
self.secondary = secondary
else:
self.primary = augeasparser.AugeasCommentNode(**kwargs)
self.secondary = apacheparser.ApacheCommentNode(**kwargs)
assertions.assertEqual(self.primary, self.secondary)
class DualDirectiveNode(DualNodeBase):
""" Dual parser implementation of DirectiveNode interface """
def __init__(self, **kwargs):
""" This initialization implementation allows ordinary initialization
of DirectiveNode objects as well as creating a DualDirectiveNode object
using precreated or fetched DirectiveNode objects if provided as optional
arguments primary and secondary.
Parameters other than the following are from interfaces.DirectiveNode:
:param DirectiveNode primary: Primary pre-created DirectiveNode, mainly
used when creating new DualParser nodes using add_* methods.
:param DirectiveNode secondary: Secondary pre-created DirectiveNode
"""
kwargs.setdefault("primary", None)
kwargs.setdefault("secondary", None)
primary = kwargs.pop("primary")
secondary = kwargs.pop("secondary")
if primary or secondary:
assert primary and secondary
self.primary = primary
self.secondary = secondary
else:
self.primary = augeasparser.AugeasDirectiveNode(**kwargs)
self.secondary = apacheparser.ApacheDirectiveNode(**kwargs)
assertions.assertEqual(self.primary, self.secondary)
def set_parameters(self, parameters):
""" Sets parameters and asserts that both implementation successfully
set the parameter sequence """
self.primary.set_parameters(parameters)
self.secondary.set_parameters(parameters)
assertions.assertEqual(self.primary, self.secondary)
class DualBlockNode(DualNodeBase):
""" Dual parser implementation of BlockNode interface """
def __init__(self, **kwargs):
""" This initialization implementation allows ordinary initialization
of BlockNode objects as well as creating a DualBlockNode object
using precreated or fetched BlockNode objects if provided as optional
arguments primary and secondary.
Parameters other than the following are from interfaces.BlockNode:
:param BlockNode primary: Primary pre-created BlockNode, mainly
used when creating new DualParser nodes using add_* methods.
:param BlockNode secondary: Secondary pre-created BlockNode
"""
kwargs.setdefault("primary", None)
kwargs.setdefault("secondary", None)
primary = kwargs.pop("primary")
secondary = kwargs.pop("secondary")
if primary or secondary:
assert primary and secondary
self.primary = primary
self.secondary = secondary
else:
self.primary = augeasparser.AugeasBlockNode(**kwargs)
self.secondary = apacheparser.ApacheBlockNode(**kwargs)
assertions.assertEqual(self.primary, self.secondary)
def add_child_block(self, name, parameters=None, position=None):
""" Creates a new child BlockNode, asserts that both implementations
did it in a similar way, and returns a newly created DualBlockNode object
encapsulating both of the newly created objects """
primary_new = self.primary.add_child_block(name, parameters, position)
secondary_new = self.secondary.add_child_block(name, parameters, position)
assertions.assertEqual(primary_new, secondary_new)
new_block = DualBlockNode(primary=primary_new, secondary=secondary_new)
return new_block
def add_child_directive(self, name, parameters=None, position=None):
""" Creates a new child DirectiveNode, asserts that both implementations
did it in a similar way, and returns a newly created DualDirectiveNode
object encapsulating both of the newly created objects """
primary_new = self.primary.add_child_directive(name, parameters, position)
secondary_new = self.secondary.add_child_directive(name, parameters, position)
assertions.assertEqual(primary_new, secondary_new)
new_dir = DualDirectiveNode(primary=primary_new, secondary=secondary_new)
return new_dir
def add_child_comment(self, comment="", position=None):
""" Creates a new child CommentNode, asserts that both implementations
did it in a similar way, and returns a newly created DualCommentNode
object encapsulating both of the newly created objects """
primary_new = self.primary.add_child_comment(comment, position)
secondary_new = self.secondary.add_child_comment(comment, position)
assertions.assertEqual(primary_new, secondary_new)
new_comment = DualCommentNode(primary=primary_new, secondary=secondary_new)
return new_comment
def _create_matching_list(self, primary_list, secondary_list):
""" Matches the list of primary_list to a list of secondary_list and
returns a list of tuples. This is used to create results for find_
methods.
This helper function exists, because we cannot ensure that the list of
search results returned by primary.find_* and secondary.find_* are ordered
in a same way. The function pairs the same search results from both
implementations to a list of tuples.
"""
matched = []
for p in primary_list:
match = None
for s in secondary_list:
try:
assertions.assertEqual(p, s)
match = s
break
except AssertionError:
continue
if match:
matched.append((p, match))
else:
raise AssertionError("Could not find a matching node.")
return matched
def find_blocks(self, name, exclude=True):
"""
Performs a search for BlockNodes using both implementations and does simple
checks for results. This is built upon the assumption that unimplemented
find_* methods return a list with a single assertion passing object.
After the assertion, it creates a list of newly created DualBlockNode
instances that encapsulate the pairs of returned BlockNode objects.
"""
return self._find_helper(DualBlockNode, "find_blocks", name,
exclude=exclude)
def find_directives(self, name, exclude=True):
"""
Performs a search for DirectiveNodes using both implementations and
checks the results. This is built upon the assumption that unimplemented
find_* methods return a list with a single assertion passing object.
After the assertion, it creates a list of newly created DualDirectiveNode
instances that encapsulate the pairs of returned DirectiveNode objects.
"""
return self._find_helper(DualDirectiveNode, "find_directives", name,
exclude=exclude)
def find_comments(self, comment):
"""
Performs a search for CommentNodes using both implementations and
checks the results. This is built upon the assumption that unimplemented
find_* methods return a list with a single assertion passing object.
After the assertion, it creates a list of newly created DualCommentNode
instances that encapsulate the pairs of returned CommentNode objects.
"""
return self._find_helper(DualCommentNode, "find_comments", comment)
def delete_child(self, child):
"""Deletes a child from the ParserNode implementations. The actual
ParserNode implementations are used here directly in order to be able
to match a child to the list of children."""
self.primary.delete_child(child.primary)
self.secondary.delete_child(child.secondary)
def unsaved_files(self):
""" Fetches the list of unsaved file paths and asserts that the lists
match """
primary_files = self.primary.unsaved_files()
secondary_files = self.secondary.unsaved_files()
assertions.assertEqualSimple(primary_files, secondary_files)
return primary_files
def parsed_paths(self):
"""
Returns a list of file paths that have currently been parsed into the parser
tree. The returned list may include paths with wildcard characters, for
example: ['/etc/apache2/conf.d/*.load']
This is typically called on the root node of the ParserNode tree.
:returns: list of file paths of files that have been parsed
"""
primary_paths = self.primary.parsed_paths()
secondary_paths = self.secondary.parsed_paths()
assertions.assertEqualPathsList(primary_paths, secondary_paths)
return primary_paths
|
|
import os
import re
from collections import defaultdict
from functools import wraps
from django.conf import settings
from django.contrib.auth.hashers import check_password, make_password
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.http import HttpResponse
from couchforms import const
from corehq.apps.api.resources import DictObject
from corehq.form_processor.models import CommCareCase, CommCareCaseIndex, XFormInstance
from corehq.form_processor.models.cases import CaseToXMLMixin, get_index_map
PERMISSION_POST_SMS = "POST_SMS"
PERMISSION_POST_WISEPILL = "POST_WISEPILL"
class ApiUser(models.Model):
id = models.CharField(max_length=255, primary_key=True)
password = models.CharField(max_length=255, null=True)
permissions = ArrayField(
models.CharField(max_length=126, null=True, blank=True),
null=True,
default=list
)
class Meta:
db_table = "api_apiuser"
@property
def username(self):
if self.id.startswith("ApiUser-"):
return self.id[len("ApiUser-"):]
else:
raise Exception("ApiUser _id has to be 'ApiUser-' + username")
def set_password(self, raw_password):
salt = os.urandom(5).hex()
self.password = make_password(raw_password, salt=salt)
def check_password(self, raw_password):
return check_password(raw_password, self.password)
def has_permission(self, permission):
return permission in self.permissions
@classmethod
def create(cls, username, password, permissions=None):
"""
To create a new ApiUser on the server:
./manage.py shell
$ from corehq.apps.api.models import *
$ ApiUser.create('buildserver', 'RANDOM').save()
"""
self = cls()
self.id = "ApiUser-%s" % username
self.set_password(password)
self.permissions = permissions or []
return self
@classmethod
def get_user(cls, username):
return cls.objects.get(id="ApiUser-%s" % username)
@classmethod
def auth(cls, username, password, permission=None):
try:
user = cls.get_user(username)
if user.check_password(password):
if permission is not None:
return user.has_permission(permission)
else:
return True
else:
return False
except ApiUser.DoesNotExist:
return False
def _require_api_user(permission=None):
def _outer2(fn):
from django.views.decorators.http import require_POST
if settings.DEBUG:
return fn
@require_POST
@wraps(fn)
def _outer(request, *args, **kwargs):
if ApiUser.auth(request.POST.get('username', ''), request.POST.get('password', ''), permission):
response = fn(request, *args, **kwargs)
else:
response = HttpResponse(status=401)
return response
return _outer
return _outer2
require_api_user = _require_api_user()
require_api_user_permission = _require_api_user
class ESXFormInstance(DictObject):
"""This wrapper around form data returned from ES which
provides attribute access and helper functions for
the Form API.
"""
@property
def form_data(self):
return self._data[const.TAG_FORM]
@property
def metadata(self):
from corehq.form_processor.utils import clean_metadata
if const.TAG_META in self.form_data:
return clean_metadata(self.form_data[const.TAG_META])
return None
@property
def is_archived(self):
return self.doc_type == 'XFormArchived'
@property
def blobs(self):
from corehq.blobs.mixin import BlobMetaRef
blobs = {}
if self._attachments:
blobs.update({
name: BlobMetaRef(
content_length=info.get("length", None),
content_type=info.get("content_type", None),
) for name, info in self._attachments.items()
})
if self.external_blobs:
blobs.update({
name: BlobMetaRef.wrap(info)
for name, info in self.external_blobs.items()
})
return blobs
@property
def version(self):
return self.form_data.get(const.TAG_VERSION, "")
@property
def uiversion(self):
return self.form_data.get(const.TAG_UIVERSION, "")
@property
def type(self):
return self.form_data.get(const.TAG_TYPE, "")
@property
def name(self):
return self.form_data.get(const.TAG_NAME, "")
@property
def server_modified_on(self):
server_modified_on = self._data.get('server_modified_on', None)
if not server_modified_on:
server_modified_on = self._data.get('edited_on', None)
if not server_modified_on:
server_modified_on = self._data['received_on']
return server_modified_on
class ESCase(DictObject, CaseToXMLMixin):
"""This wrapper around case data returned from ES which
provides attribute access and helper functions for
the Case API.
"""
@property
def case_id(self):
return self._id
@property
def server_opened_on(self):
try:
open_action = self.actions[0]
return open_action['server_date']
except Exception:
pass
@property
def indices(self):
return [CommCareCaseIndex(**index) for index in self._data['indices'] if index["referenced_id"]]
def get_index_map(self):
return get_index_map(self.indices)
def get_properties_in_api_format(self):
return dict(list(self.dynamic_case_properties().items()) + list({
"external_id": self.external_id,
"owner_id": self.owner_id,
# renamed
"case_name": self.name,
# renamed
"case_type": self.type,
# renamed
"date_opened": self.opened_on,
# all custom properties go here
}.items()))
def dynamic_case_properties(self):
if self.case_json is not None:
return self.case_json
def is_dynamic(name, letterfirst=re.compile(r'^[a-zA-Z]')):
return name not in CASE_PROPERTIES and letterfirst.search(name)
return {k: v for k, v in sorted(self._data.items()) if is_dynamic(k)}
@property
def _reverse_indices(self):
return CommCareCaseIndex.objects.get_all_reverse_indices_info(self.domain, [self._id])
def get_forms(self):
from corehq.apps.api.util import form_to_es_form
forms = XFormInstance.objects.get_forms(self.xform_ids, self.domain)
return list(filter(None, [form_to_es_form(form) for form in forms]))
@property
def child_cases(self):
from corehq.apps.api.util import case_to_es_case
return {
index.case_id: case_to_es_case(
CommCareCase.objects.get_case(index.case_id, self.domain))
for index in self._reverse_indices
}
@property
def parent_cases(self):
from corehq.apps.api.util import case_to_es_case
return {
index.identifier: case_to_es_case(
CommCareCase.objects.get_case(index.referenced_id, self.domain))
for index in self.indices if index.referenced_id
}
@property
def xforms_by_name(self):
return _group_by_dict(self.get_forms(), lambda form: form.name)
@property
def xforms_by_xmlns(self):
return _group_by_dict(self.get_forms(), lambda form: form.xmlns)
def _group_by_dict(objs, fn):
"""
Itertools.groupby returns a transient iterator with alien
data types in it. This returns a dictionary of lists.
Less efficient but clients can write naturally and used
only for things that have to fit in memory easily anyhow.
"""
result = defaultdict(list)
for obj in objs:
key = fn(obj)
result[key].append(obj)
return result
CASE_PROPERTIES = {
# CommCareCase.properties()
'_attachments',
'_id',
'_rev',
'actions',
'case_attachments',
'closed_by',
'closed_on',
'closed',
'computed_',
'computed_modified_on_',
'doc_type',
'domain',
'export_tag',
'external_blobs',
'external_id',
'indices',
'initial_processing_complete'
'modified_on',
'name',
'opened_by',
'opened_on',
'owner_id',
'server_modified_on',
'type',
'user_id',
'version',
'xform_ids',
# CommCareCase data descriptors
# Derived from JsonObjectBase.__is_dynamic_property
#
# def is_data(name):
# return inspect.isdatadescriptor(getattr(CommCareCase, name))
# {n for n in dir(CommCareCase) if is_data(n)} - CommCareCase.properties().keys()
'_JsonObjectBase__dynamic_properties',
'__weakref__',
'_doc',
'_dynamic_properties',
'blobs',
'case_id',
'case_name',
'deletion_date',
'deletion_id',
'get_id',
'get_rev',
'has_indices',
'host',
'is_deleted',
'live_indices',
'modified_by',
'new_document',
'parent',
'persistent_blobs',
'phone_sync_key',
'raw_username',
'reverse_indices',
'server_opened_on',
}
|
|
import datetime
import os
import tempfile
import uuid
from django.core import validators
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.db import models
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
class Person(models.Model):
name = models.CharField(max_length=100)
class Category(models.Model):
name = models.CharField(max_length=20)
slug = models.SlugField(max_length=20)
url = models.CharField('The URL', max_length=40)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
class Writer(models.Model):
name = models.CharField(max_length=50, help_text='Use both first and last names.')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Article(models.Model):
ARTICLE_STATUS = (
(1, 'Draft'),
(2, 'Pending'),
(3, 'Live'),
)
headline = models.CharField(max_length=50)
slug = models.SlugField()
pub_date = models.DateField()
created = models.DateField(editable=False)
writer = models.ForeignKey(Writer, models.CASCADE)
article = models.TextField()
categories = models.ManyToManyField(Category, blank=True)
status = models.PositiveIntegerField(choices=ARTICLE_STATUS, blank=True, null=True)
def save(self, *args, **kwargs):
if not self.id:
self.created = datetime.date.today()
return super().save(*args, **kwargs)
def __str__(self):
return self.headline
class ImprovedArticle(models.Model):
article = models.OneToOneField(Article, models.CASCADE)
class ImprovedArticleWithParentLink(models.Model):
article = models.OneToOneField(Article, models.CASCADE, parent_link=True)
class BetterWriter(Writer):
score = models.IntegerField()
class Publication(models.Model):
title = models.CharField(max_length=30)
date_published = models.DateField()
def __str__(self):
return self.title
def default_mode():
return 'di'
def default_category():
return 3
class PublicationDefaults(models.Model):
MODE_CHOICES = (('di', 'direct'), ('de', 'delayed'))
CATEGORY_CHOICES = ((1, 'Games'), (2, 'Comics'), (3, 'Novel'))
title = models.CharField(max_length=30)
date_published = models.DateField(default=datetime.date.today)
datetime_published = models.DateTimeField(default=datetime.datetime(2000, 1, 1))
mode = models.CharField(max_length=2, choices=MODE_CHOICES, default=default_mode)
category = models.IntegerField(choices=CATEGORY_CHOICES, default=default_category)
active = models.BooleanField(default=True)
file = models.FileField(default='default.txt')
class Author(models.Model):
publication = models.OneToOneField(Publication, models.SET_NULL, null=True, blank=True)
full_name = models.CharField(max_length=255)
class Author1(models.Model):
publication = models.OneToOneField(Publication, models.CASCADE, null=False)
full_name = models.CharField(max_length=255)
class WriterProfile(models.Model):
writer = models.OneToOneField(Writer, models.CASCADE, primary_key=True)
age = models.PositiveIntegerField()
def __str__(self):
return "%s is %s" % (self.writer, self.age)
class Document(models.Model):
myfile = models.FileField(upload_to='unused', blank=True)
class TextFile(models.Model):
description = models.CharField(max_length=20)
file = models.FileField(storage=temp_storage, upload_to='tests', max_length=15)
def __str__(self):
return self.description
class CustomFileField(models.FileField):
def save_form_data(self, instance, data):
been_here = getattr(self, 'been_saved', False)
assert not been_here, "save_form_data called more than once"
setattr(self, 'been_saved', True)
class CustomFF(models.Model):
f = CustomFileField(upload_to='unused', blank=True)
class FilePathModel(models.Model):
path = models.FilePathField(path=os.path.dirname(__file__), match='models.py', blank=True)
try:
from PIL import Image # NOQA: detect if Pillow is installed
test_images = True
class ImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
# Deliberately put the image field *after* the width/height fields to
# trigger the bug in #10404 with width/height not getting assigned.
width = models.IntegerField(editable=False)
height = models.IntegerField(editable=False)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height')
path = models.CharField(max_length=16, blank=True, default='')
def __str__(self):
return self.description
class OptionalImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height',
blank=True, null=True)
width = models.IntegerField(editable=False, null=True)
height = models.IntegerField(editable=False, null=True)
path = models.CharField(max_length=16, blank=True, default='')
def __str__(self):
return self.description
class NoExtensionImageFile(models.Model):
def upload_to(self, filename):
return 'tests/no_extension'
description = models.CharField(max_length=20)
image = models.ImageField(storage=temp_storage, upload_to=upload_to)
def __str__(self):
return self.description
except ImportError:
test_images = False
class Homepage(models.Model):
url = models.URLField()
class Product(models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.slug
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
class Meta:
unique_together = (('price', 'quantity'),)
def __str__(self):
return "%s for %s" % (self.quantity, self.price)
class Triple(models.Model):
left = models.IntegerField()
middle = models.IntegerField()
right = models.IntegerField()
class Meta:
unique_together = (('left', 'middle'), ('middle', 'right'))
class ArticleStatus(models.Model):
ARTICLE_STATUS_CHAR = (
('d', 'Draft'),
('p', 'Pending'),
('l', 'Live'),
)
status = models.CharField(max_length=2, choices=ARTICLE_STATUS_CHAR, blank=True, null=True)
class Inventory(models.Model):
barcode = models.PositiveIntegerField(unique=True)
parent = models.ForeignKey('self', models.SET_NULL, to_field='barcode', blank=True, null=True)
name = models.CharField(blank=False, max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
class Book(models.Model):
title = models.CharField(max_length=40)
author = models.ForeignKey(Writer, models.SET_NULL, blank=True, null=True)
special_id = models.IntegerField(blank=True, null=True, unique=True)
class Meta:
unique_together = ('title', 'author')
class BookXtra(models.Model):
isbn = models.CharField(max_length=16, unique=True)
suffix1 = models.IntegerField(blank=True, default=0)
suffix2 = models.IntegerField(blank=True, default=0)
class Meta:
unique_together = (('suffix1', 'suffix2'))
abstract = True
class DerivedBook(Book, BookXtra):
pass
class ExplicitPK(models.Model):
key = models.CharField(max_length=20, primary_key=True)
desc = models.CharField(max_length=20, blank=True, unique=True)
class Meta:
unique_together = ('key', 'desc')
def __str__(self):
return self.key
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __str__(self):
return self.title
class DateTimePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateTimeField(editable=False)
def __str__(self):
return self.title
class DerivedPost(Post):
pass
class BigInt(models.Model):
biggie = models.BigIntegerField()
def __str__(self):
return str(self.biggie)
class MarkupField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs["max_length"] = 20
super().__init__(*args, **kwargs)
def formfield(self, **kwargs):
# don't allow this field to be used in form (real use-case might be
# that you know the markup will always be X, but it is among an app
# that allows the user to say it could be something else)
# regressed at r10062
return None
class CustomFieldForExclusionModel(models.Model):
name = models.CharField(max_length=10)
markup = MarkupField()
class FlexibleDatePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField(blank=True, null=True)
class Colour(models.Model):
name = models.CharField(max_length=50)
def __iter__(self):
yield from range(5)
def __str__(self):
return self.name
class ColourfulItem(models.Model):
name = models.CharField(max_length=50)
colours = models.ManyToManyField(Colour)
class CustomErrorMessage(models.Model):
name1 = models.CharField(
max_length=50,
validators=[validators.validate_slug],
error_messages={'invalid': 'Model custom error message.'},
)
name2 = models.CharField(
max_length=50,
validators=[validators.validate_slug],
error_messages={'invalid': 'Model custom error message.'},
)
def clean(self):
if self.name1 == 'FORBIDDEN_VALUE':
raise ValidationError({'name1': [ValidationError('Model.clean() error messages.')]})
elif self.name1 == 'FORBIDDEN_VALUE2':
raise ValidationError({'name1': 'Model.clean() error messages (simpler syntax).'})
elif self.name1 == 'GLOBAL_ERROR':
raise ValidationError("Global error message.")
def today_callable_dict():
return {"last_action__gte": datetime.datetime.today()}
def today_callable_q():
return models.Q(last_action__gte=datetime.datetime.today())
class Character(models.Model):
username = models.CharField(max_length=100)
last_action = models.DateTimeField()
def __str__(self):
return self.username
class StumpJoke(models.Model):
most_recently_fooled = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to=today_callable_dict,
related_name="+",
)
has_fooled_today = models.ManyToManyField(Character, limit_choices_to=today_callable_q, related_name="+")
# Model for #13776
class Student(models.Model):
character = models.ForeignKey(Character, models.CASCADE)
study = models.CharField(max_length=30)
# Model for #639
class Photo(models.Model):
title = models.CharField(max_length=30)
image = models.FileField(storage=temp_storage, upload_to='tests')
# Support code for the tests; this keeps track of how many times save()
# gets called on each instance.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._savecount = 0
def save(self, force_insert=False, force_update=False):
super().save(force_insert, force_update)
self._savecount += 1
class UUIDPK(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=30)
# Models for #24706
class StrictAssignmentFieldSpecific(models.Model):
title = models.CharField(max_length=30)
_should_error = False
def __setattr__(self, key, value):
if self._should_error is True:
raise ValidationError(message={key: "Cannot set attribute"}, code='invalid')
super().__setattr__(key, value)
class StrictAssignmentAll(models.Model):
title = models.CharField(max_length=30)
_should_error = False
def __setattr__(self, key, value):
if self._should_error is True:
raise ValidationError(message="Cannot set attribute", code='invalid')
super().__setattr__(key, value)
# A model with ForeignKey(blank=False, null=True)
class Award(models.Model):
name = models.CharField(max_length=30)
character = models.ForeignKey(Character, models.SET_NULL, blank=False, null=True)
class NullableUniqueCharFieldModel(models.Model):
codename = models.CharField(max_length=50, blank=True, null=True, unique=True)
|
|
import json
import tarfile
from .exceptions import DockerError, DockerContainerError
from .jsonstream import json_stream_result
from .multiplexed import multiplexed_result
from .utils import identical, parse_result
from .logs import DockerLog
class DockerContainers(object):
def __init__(self, docker):
self.docker = docker
async def list(self, **kwargs):
data = await self.docker._query_json(
"containers/json",
method='GET',
params=kwargs
)
return [DockerContainer(self.docker, **x) for x in data]
async def create_or_replace(self, name, config):
container = None
try:
container = await self.get(name)
if not identical(config, container._container):
running = container._container.get(
"State", {}).get("Running", False)
if running:
await container.stop()
await container.delete()
container = None
except DockerError:
pass
if container is None:
container = await self.create(config, name=name)
return container
async def create(self, config, *, name=None):
url = "containers/create"
config = json.dumps(config, sort_keys=True).encode('utf-8')
kwargs = {}
if name:
kwargs['name'] = name
data = await self.docker._query_json(
url,
method='POST',
data=config,
params=kwargs
)
return DockerContainer(self.docker, id=data['Id'])
async def run(self, config, *, name=None):
"""
Create and start a container.
If container.start() will raise an error the exception will contain
a `container_id` attribute with the id of the container.
"""
try:
container = await self.create(config, name=name)
except DockerError as err:
# image not find, try pull it
if err.status == 404 and 'Image' in config:
await self.docker.pull(config['Image'])
container = await self.create(config, name=name)
else:
raise err
try:
await container.start()
except DockerError as err:
raise DockerContainerError(
err.status,
{"message": err.message},
container['id'])
return container
async def get(self, container, **kwargs):
data = await self.docker._query_json(
"containers/{container}/json".format(container=container),
method='GET',
params=kwargs
)
return DockerContainer(self.docker, **data)
def container(self, container_id, **kwargs):
data = {
'id': container_id
}
data.update(kwargs)
return DockerContainer(self.docker, **data)
class DockerContainer:
def __init__(self, docker, **kwargs):
self.docker = docker
self._container = kwargs
self._id = self._container.get("id", self._container.get(
"ID", self._container.get("Id")))
self.logs = DockerLog(docker, self)
async def log(self, *, stdout=False, stderr=False, follow=False, **kwargs):
if stdout is False and stderr is False:
raise TypeError("Need one of stdout or stderr")
params = {
"stdout": stdout,
"stderr": stderr,
"follow": follow,
}
params.update(kwargs)
inspect_info = await self.show()
is_tty = inspect_info['Config']['Tty']
response = await self.docker._query(
"containers/{self._id}/logs".format(self=self),
method='GET',
params=params,
)
return await multiplexed_result(response, follow, is_tty=is_tty)
async def copy(self, resource, **kwargs):
# TODO: this is deprecated, use get_archive instead
request = json.dumps({
"Resource": resource,
}, sort_keys=True).encode('utf-8')
data = await self.docker._query(
"containers/{self._id}/copy".format(self=self),
method='POST',
data=request,
headers={"content-type": "application/json"},
params=kwargs
)
return data
async def get_archive(self, path: str) -> tarfile.TarFile:
response = await self.docker._query(
"containers/{self._id}/archive".format(self=self),
method='GET',
params={'path': path}
)
data = await parse_result(response)
return data
async def put_archive(self, path, data):
response = await self.docker._query(
"containers/{self._id}/archive".format(self=self),
method='PUT',
data=data,
headers={"content-type": "application/json"},
params={'path': path}
)
data = await parse_result(response)
return data
async def show(self, **kwargs):
data = await self.docker._query_json(
"containers/{self._id}/json".format(self=self),
method='GET',
params=kwargs
)
self._container = data
return data
async def stop(self, **kwargs):
response = await self.docker._query(
"containers/{self._id}/stop".format(self=self),
method='POST',
params=kwargs
)
await response.release()
return
async def start(self, **kwargs):
response = await self.docker._query(
"containers/{self._id}/start".format(self=self),
method='POST',
headers={"content-type": "application/json"},
data=kwargs
)
await response.release()
return
async def restart(self, timeout=None):
params = {}
if timeout is not None:
params['t'] = timeout
response = await self.docker._query(
"containers/{self._id}/restart".format(self=self),
method='POST',
params=params
)
await response.release()
return
async def kill(self, **kwargs):
response = await self.docker._query(
"containers/{self._id}/kill".format(self=self),
method='POST',
params=kwargs
)
await response.release()
return
async def wait(self, *, timeout=None, **kwargs):
data = await self.docker._query_json(
"containers/{self._id}/wait".format(self=self),
method='POST',
params=kwargs,
timeout=timeout,
)
return data
async def delete(self, **kwargs):
response = await self.docker._query(
"containers/{self._id}".format(self=self),
method='DELETE',
params=kwargs
)
await response.release()
return
async def websocket(self, **params):
path = "containers/{self._id}/attach/ws".format(self=self)
ws = await self.docker._websocket(path, **params)
return ws
async def port(self, private_port):
if 'NetworkSettings' not in self._container:
await self.show()
private_port = str(private_port)
h_ports = None
# Port settings is None when the container is running with
# network_mode=host.
port_settings = self._container.get('NetworkSettings', {}).get('Ports')
if port_settings is None:
return None
if '/' in private_port:
return port_settings.get(private_port)
h_ports = port_settings.get(private_port + '/tcp')
if h_ports is None:
h_ports = port_settings.get(private_port + '/udp')
return h_ports
async def stats(self, *, stream=True):
if stream:
response = await self.docker._query(
"containers/{self._id}/stats".format(self=self),
params={'stream': '1'},
)
return (await json_stream_result(response))
else:
data = await self.docker._query_json(
"containers/{self._id}/stats".format(self=self),
params={'stream': '0'},
)
return data
def __getitem__(self, key):
return self._container[key]
def __hasitem__(self, key):
return key in self._container
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prepares a Chrome HTML file by inlining resources and adding references to
high DPI resources and removing references to unsupported scale factors.
This is a small gatherer that takes a HTML file, looks for src attributes
and inlines the specified file, producing one HTML file with no external
dependencies. It recursively inlines the included files. When inlining CSS
image files this script also checks for the existence of high DPI versions
of the inlined file including those on relevant platforms. Unsupported scale
factors are also removed from existing image sets to support explicitly
referencing all available images.
"""
import os
import re
from grit import lazy_re
from grit import util
from grit.format import html_inline
from grit.gather import interface
# Distribution string to replace with distribution.
DIST_SUBSTR = '%DISTRIBUTION%'
# Matches a chrome theme source URL.
_THEME_SOURCE = lazy_re.compile(
'(?P<baseurl>chrome://theme/IDR_[A-Z0-9_]*)(?P<query>\?.*)?')
# Matches CSS image urls with the capture group 'filename'.
_CSS_IMAGE_URLS = lazy_re.compile(
'(?P<attribute>content|background|[\w-]*-image):[ ]*' +
'url\((?P<quote>"|\'|)(?P<filename>[^"\'()]*)(?P=quote)')
# Matches CSS image sets.
_CSS_IMAGE_SETS = lazy_re.compile(
'(?P<attribute>content|background|[\w-]*-image):[ ]*' +
'-webkit-image-set\((?P<images>' +
'([,\r\n ]*url\((?P<quote>"|\'|)[^"\'()]*(?P=quote)\)[ ]*[0-9.]*x)*)\)',
re.MULTILINE)
# Matches a single image in a CSS image set with the capture group scale.
_CSS_IMAGE_SET_IMAGE = lazy_re.compile('[,\r\n ]*' +
'url\((?P<quote>"|\'|)[^"\'()]*(?P=quote)\)[ ]*(?P<scale>[0-9.]*x)',
re.MULTILINE)
_HTML_IMAGE_SRC = lazy_re.compile(
'<img[^>]+src=\"(?P<filename>[^">]*)\"[^>]*>')
def GetImageList(
base_path, filename, scale_factors, distribution,
filename_expansion_function=None):
"""Generate the list of images which match the provided scale factors.
Takes an image filename and checks for files of the same name in folders
corresponding to the supported scale factors. If the file is from a
chrome://theme/ source, inserts supported @Nx scale factors as high DPI
versions.
Args:
base_path: path to look for relative file paths in
filename: name of the base image file
scale_factors: a list of the supported scale factors (i.e. ['2x'])
distribution: string that should replace %DISTRIBUTION%
Returns:
array of tuples containing scale factor and image (i.e.
[('1x', 'image.png'), ('2x', '2x/image.png')]).
"""
# Any matches for which a chrome URL handler will serve all scale factors
# can simply request all scale factors.
theme_match = _THEME_SOURCE.match(filename)
if theme_match:
images = [('1x', filename)]
for scale_factor in scale_factors:
scale_filename = "%s@%s" % (theme_match.group('baseurl'), scale_factor)
if theme_match.group('query'):
scale_filename += theme_match.group('query')
images.append((scale_factor, scale_filename))
return images
if filename.find(':') != -1:
# filename is probably a URL, only return filename itself.
return [('1x', filename)]
filename = filename.replace(DIST_SUBSTR, distribution)
if filename_expansion_function:
filename = filename_expansion_function(filename)
filepath = os.path.join(base_path, filename)
images = [('1x', filename)]
for scale_factor in scale_factors:
# Check for existence of file and add to image set.
scale_path = os.path.split(os.path.join(base_path, filename))
scale_image_path = os.path.join(scale_path[0], scale_factor, scale_path[1])
if os.path.isfile(scale_image_path):
# HTML/CSS always uses forward slashed paths.
scale_image_name = re.sub('(?P<path>(.*/)?)(?P<file>[^/]*)',
'\\g<path>' + scale_factor + '/\\g<file>',
filename)
images.append((scale_factor, scale_image_name))
return images
def GenerateImageSet(images, quote):
"""Generates a -webkit-image-set for the provided list of images.
Args:
images: an array of tuples giving scale factor and file path
(i.e. [('1x', 'image.png'), ('2x', '2x/image.png')]).
quote: a string giving the quotation character to use (i.e. "'")
Returns:
string giving a -webkit-image-set rule referencing the provided images.
(i.e. '-webkit-image-set(url('image.png') 1x, url('2x/image.png') 2x)')
"""
imageset = []
for (scale_factor, filename) in images:
imageset.append("url(%s%s%s) %s" % (quote, filename, quote, scale_factor))
return "-webkit-image-set(%s)" % (', '.join(imageset))
def InsertImageSet(
src_match, base_path, scale_factors, distribution,
filename_expansion_function=None):
"""Regex replace function which inserts -webkit-image-set.
Takes a regex match for url('path'). If the file is local, checks for
files of the same name in folders corresponding to the supported scale
factors. If the file is from a chrome://theme/ source, inserts the
supported @Nx scale factor request. In either case inserts a
-webkit-image-set rule to fetch the appropriate image for the current
scale factor.
Args:
src_match: regex match object from _CSS_IMAGE_URLS
base_path: path to look for relative file paths in
scale_factors: a list of the supported scale factors (i.e. ['2x'])
distribution: string that should replace %DISTRIBUTION%.
Returns:
string
"""
quote = src_match.group('quote')
filename = src_match.group('filename')
attr = src_match.group('attribute')
image_list = GetImageList(
base_path, filename, scale_factors, distribution,
filename_expansion_function=filename_expansion_function)
# Don't modify the source if there is only one image.
if len(image_list) == 1:
return src_match.group(0)
return "%s: %s" % (attr, GenerateImageSet(image_list, quote)[:-1])
def InsertImageStyle(
src_match, base_path, scale_factors, distribution,
filename_expansion_function=None):
"""Regex replace function which adds a content style to an <img>.
Takes a regex match from _HTML_IMAGE_SRC and replaces the attribute with a CSS
style which defines the image set.
"""
filename = src_match.group('filename')
image_list = GetImageList(
base_path, filename, scale_factors, distribution,
filename_expansion_function=filename_expansion_function)
# Don't modify the source if there is only one image or image already defines
# a style.
if src_match.group(0).find(" style=\"") != -1 or len(image_list) == 1:
return src_match.group(0)
return "%s style=\"content: %s;\">" % (src_match.group(0)[:-1],
GenerateImageSet(image_list, "'"))
def InsertImageSets(
filepath, text, scale_factors, distribution,
filename_expansion_function=None):
"""Helper function that adds references to external images available in any of
scale_factors in CSS backgrounds.
"""
# Add high DPI urls for css attributes: content, background,
# or *-image or <img src="foo">.
return _CSS_IMAGE_URLS.sub(
lambda m: InsertImageSet(
m, filepath, scale_factors, distribution,
filename_expansion_function=filename_expansion_function),
_HTML_IMAGE_SRC.sub(
lambda m: InsertImageStyle(
m, filepath, scale_factors, distribution,
filename_expansion_function=filename_expansion_function),
text)).decode('utf-8').encode('utf-8')
def RemoveImagesNotIn(scale_factors, src_match):
"""Regex replace function which removes images for scale factors not in
scale_factors.
Takes a regex match for _CSS_IMAGE_SETS. For each image in the group images,
checks if this scale factor is in scale_factors and if not, removes it.
Args:
scale_factors: a list of the supported scale factors (i.e. ['1x', '2x'])
src_match: regex match object from _CSS_IMAGE_SETS
Returns:
string
"""
attr = src_match.group('attribute')
images = _CSS_IMAGE_SET_IMAGE.sub(
lambda m: m.group(0) if m.group('scale') in scale_factors else '',
src_match.group('images'))
return "%s: -webkit-image-set(%s)" % (attr, images)
def RemoveImageSetImages(text, scale_factors):
"""Helper function which removes images in image sets not in the list of
supported scale_factors.
"""
return _CSS_IMAGE_SETS.sub(
lambda m: RemoveImagesNotIn(scale_factors, m), text)
def ProcessImageSets(
filepath, text, scale_factors, distribution,
filename_expansion_function=None):
"""Helper function that adds references to external images available in other
scale_factors and removes images from image-sets in unsupported scale_factors.
"""
# Explicitly add 1x to supported scale factors so that it is not removed.
supported_scale_factors = ['1x']
supported_scale_factors.extend(scale_factors)
return InsertImageSets(
filepath,
RemoveImageSetImages(text, supported_scale_factors),
scale_factors,
distribution,
filename_expansion_function=filename_expansion_function)
class ChromeHtml(interface.GathererBase):
"""Represents an HTML document processed for Chrome WebUI.
HTML documents used in Chrome WebUI have local resources inlined and
automatically insert references to high DPI assets used in CSS properties
with the use of the -webkit-image-set value. References to unsupported scale
factors in image sets are also removed. This does not generate any
translateable messages and instead generates a single DataPack resource.
"""
def __init__(self, *args, **kwargs):
super(ChromeHtml, self).__init__(*args, **kwargs)
self.allow_external_script_ = False
self.flatten_html_ = False
# 1x resources are implicitly already in the source and do not need to be
# added.
self.scale_factors_ = []
self.filename_expansion_function = None
def SetAttributes(self, attrs):
self.allow_external_script_ = ('allowexternalscript' in attrs and
attrs['allowexternalscript'] == 'true')
self.flatten_html_ = ('flattenhtml' in attrs and
attrs['flattenhtml'] == 'true')
def SetDefines(self, defines):
if 'scale_factors' in defines:
self.scale_factors_ = defines['scale_factors'].split(',')
def GetText(self):
"""Returns inlined text of the HTML document."""
return self.inlined_text_
def GetTextualIds(self):
return [self.extkey]
def GetData(self, lang, encoding):
"""Returns inlined text of the HTML document."""
return self.inlined_text_
def GetHtmlResourceFilenames(self):
"""Returns a set of all filenames inlined by this file."""
if self.flatten_html_:
return html_inline.GetResourceFilenames(
self.grd_node.ToRealPath(self.GetInputPath()),
allow_external_script=self.allow_external_script_,
rewrite_function=lambda fp, t, d: ProcessImageSets(
fp, t, self.scale_factors_, d,
filename_expansion_function=self.filename_expansion_function),
filename_expansion_function=self.filename_expansion_function)
return []
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
"""Returns this document translated."""
return self.inlined_text_
def SetFilenameExpansionFunction(self, fn):
self.filename_expansion_function = fn
def Parse(self):
"""Parses and inlines the represented file."""
filename = self.GetInputPath()
if self.filename_expansion_function:
filename = self.filename_expansion_function(filename)
# Hack: some unit tests supply an absolute path and no root node.
if not os.path.isabs(filename):
filename = self.grd_node.ToRealPath(filename)
if self.flatten_html_:
self.inlined_text_ = html_inline.InlineToString(
filename,
self.grd_node,
allow_external_script = self.allow_external_script_,
rewrite_function=lambda fp, t, d: ProcessImageSets(
fp, t, self.scale_factors_, d,
filename_expansion_function=self.filename_expansion_function),
filename_expansion_function=self.filename_expansion_function)
else:
distribution = html_inline.GetDistribution()
self.inlined_text_ = ProcessImageSets(
os.path.dirname(filename),
util.ReadFile(filename, 'utf-8'),
self.scale_factors_,
distribution,
filename_expansion_function=self.filename_expansion_function)
|
|
"""Helper classes for Google Assistant integration."""
from asyncio import gather
from collections.abc import Mapping
from typing import List
from homeassistant.core import Context, callback
from homeassistant.const import (
CONF_NAME, STATE_UNAVAILABLE, ATTR_SUPPORTED_FEATURES,
ATTR_DEVICE_CLASS, CLOUD_NEVER_EXPOSED_ENTITIES
)
from . import trait
from .const import (
DOMAIN_TO_GOOGLE_TYPES, CONF_ALIASES, ERR_FUNCTION_NOT_SUPPORTED,
DEVICE_CLASS_TO_GOOGLE_TYPES, CONF_ROOM_HINT
)
from .error import SmartHomeError
class Config:
"""Hold the configuration for Google Assistant."""
def __init__(self, should_expose,
entity_config=None, secure_devices_pin=None,
agent_user_id=None, should_2fa=None):
"""Initialize the configuration."""
self.should_expose = should_expose
self.entity_config = entity_config or {}
self.secure_devices_pin = secure_devices_pin
self._should_2fa = should_2fa
# Agent User Id to use for query responses
self.agent_user_id = agent_user_id
def should_2fa(self, state):
"""If an entity should have 2FA checked."""
return self._should_2fa is None or self._should_2fa(state)
class RequestData:
"""Hold data associated with a particular request."""
def __init__(self, config, user_id, request_id):
"""Initialize the request data."""
self.config = config
self.request_id = request_id
self.context = Context(user_id=user_id)
def get_google_type(domain, device_class):
"""Google type based on domain and device class."""
typ = DEVICE_CLASS_TO_GOOGLE_TYPES.get((domain, device_class))
return typ if typ is not None else DOMAIN_TO_GOOGLE_TYPES[domain]
class GoogleEntity:
"""Adaptation of Entity expressed in Google's terms."""
def __init__(self, hass, config, state):
"""Initialize a Google entity."""
self.hass = hass
self.config = config
self.state = state
self._traits = None
@property
def entity_id(self):
"""Return entity ID."""
return self.state.entity_id
@callback
def traits(self):
"""Return traits for entity."""
if self._traits is not None:
return self._traits
state = self.state
domain = state.domain
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
self._traits = [Trait(self.hass, state, self.config)
for Trait in trait.TRAITS
if Trait.supported(domain, features, device_class)]
return self._traits
@callback
def is_supported(self) -> bool:
"""Return if the entity is supported by Google."""
return self.state.state != STATE_UNAVAILABLE and bool(self.traits())
@callback
def might_2fa(self) -> bool:
"""Return if the entity might encounter 2FA."""
state = self.state
domain = state.domain
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
return any(trait.might_2fa(domain, features, device_class)
for trait in self.traits())
async def sync_serialize(self):
"""Serialize entity for a SYNC response.
https://developers.google.com/actions/smarthome/create-app#actiondevicessync
"""
state = self.state
entity_config = self.config.entity_config.get(state.entity_id, {})
name = (entity_config.get(CONF_NAME) or state.name).strip()
domain = state.domain
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
traits = self.traits()
device_type = get_google_type(domain,
device_class)
device = {
'id': state.entity_id,
'name': {
'name': name
},
'attributes': {},
'traits': [trait.name for trait in traits],
'willReportState': False,
'type': device_type,
}
# use aliases
aliases = entity_config.get(CONF_ALIASES)
if aliases:
device['name']['nicknames'] = aliases
for trt in traits:
device['attributes'].update(trt.sync_attributes())
room = entity_config.get(CONF_ROOM_HINT)
if room:
device['roomHint'] = room
return device
dev_reg, ent_reg, area_reg = await gather(
self.hass.helpers.device_registry.async_get_registry(),
self.hass.helpers.entity_registry.async_get_registry(),
self.hass.helpers.area_registry.async_get_registry(),
)
entity_entry = ent_reg.async_get(state.entity_id)
if not (entity_entry and entity_entry.device_id):
return device
device_entry = dev_reg.devices.get(entity_entry.device_id)
if not (device_entry and device_entry.area_id):
return device
area_entry = area_reg.areas.get(device_entry.area_id)
if area_entry and area_entry.name:
device['roomHint'] = area_entry.name
return device
@callback
def query_serialize(self):
"""Serialize entity for a QUERY response.
https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
"""
state = self.state
if state.state == STATE_UNAVAILABLE:
return {'online': False}
attrs = {'online': True}
for trt in self.traits():
deep_update(attrs, trt.query_attributes())
return attrs
async def execute(self, data, command_payload):
"""Execute a command.
https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
"""
command = command_payload['command']
params = command_payload.get('params', {})
challenge = command_payload.get('challenge', {})
executed = False
for trt in self.traits():
if trt.can_execute(command, params):
await trt.execute(command, data, params, challenge)
executed = True
break
if not executed:
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
'Unable to execute {} for {}'.format(command,
self.state.entity_id))
@callback
def async_update(self):
"""Update the entity with latest info from Home Assistant."""
self.state = self.hass.states.get(self.entity_id)
if self._traits is None:
return
for trt in self._traits:
trt.state = self.state
def deep_update(target, source):
"""Update a nested dictionary with another nested dictionary."""
for key, value in source.items():
if isinstance(value, Mapping):
target[key] = deep_update(target.get(key, {}), value)
else:
target[key] = value
return target
@callback
def async_get_entities(hass, config) -> List[GoogleEntity]:
"""Return all entities that are supported by Google."""
entities = []
for state in hass.states.async_all():
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
continue
entity = GoogleEntity(hass, config, state)
if entity.is_supported():
entities.append(entity)
return entities
|
|
'''
Created on December 25, 2016
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
import bot
from locations.location import Location
class Intelligence:
"""
Base Intelligence Module Class / Interface
"""
def __init__(self, botengine, parent):
"""
Instantiate this object
:param parent: Parent object, either a location or a device object.
"""
import uuid
self.intelligence_id = str(uuid.uuid4())
self.parent = parent
def initialize(self, botengine):
"""
Initialize
:param botengine: BotEngine environment
"""
return
def destroy(self, botengine):
"""
This device or object is getting permanently deleted - it is no longer in the user's account.
:param botengine: BotEngine environment
"""
return
def get_html_summary(self, botengine, oldest_timestamp_ms, newest_timestamp_ms, test_mode=False):
"""
Return a human-friendly HTML summary of insights or status of this intelligence module to report in weekly and test mode emails
:param botengine: BotEngine environment
:param oldest_timestamp_ms: Oldest timestamp in milliseconds to summarize
:param newest_timestamp_ms: Newest timestamp in milliseconds to summarize
:param test_mode: True to add or modify details for test mode, instead of a general weekly summary
"""
return ""
def mode_updated(self, botengine, current_mode):
"""
Mode was updated
:param botengine: BotEngine environment
:param current_mode: Current mode
:param current_timestamp: Current timestamp
"""
return
def occupancy_status_updated(self, botengine, status, reason, last_status, last_reason):
"""
AI Occupancy Status updated
:param botengine: BotEngine
:param status: Current occupancy status
:param reason: Current occupancy reason
:param last_status: Last occupancy status
:param last_reason: Last occupancy reason
"""
return
def device_measurements_updated(self, botengine, device_object):
"""
Device was updated
:param botengine: BotEngine environment
:param device_object: Device object that was updated
"""
return
def device_metadata_updated(self, botengine, device_object):
"""
Evaluate a device that is new or whose goal/scenario was recently updated
:param botengine: BotEngine environment
:param device_object: Device object that was updated
"""
return
def device_alert(self, botengine, device_object, alert_type, alert_params):
"""
Device sent an alert.
When a device disconnects, it will send an alert like this: [{u'alertType': u'status', u'params': [{u'name': u'deviceStatus', u'value': u'2'}], u'deviceId': u'eb10e80a006f0d00'}]
When a device reconnects, it will send an alert like this: [{u'alertType': u'on', u'deviceId': u'eb10e80a006f0d00'}]
:param botengine: BotEngine environment
:param device_object: Device object that sent the alert
:param alert_type: Type of alert
"""
return
def device_deleted(self, botengine, device_object):
"""
Device is getting deleted
:param botengine: BotEngine environment
:param device_object: Device object that is getting deleted
"""
return
def question_answered(self, botengine, question):
"""
The user answered a question
:param botengine: BotEngine environment
:param question: Question object
"""
return
def datastream_updated(self, botengine, address, content):
"""
Data Stream Message Received
:param botengine: BotEngine environment
:param address: Data Stream address
:param content: Content of the message
"""
if hasattr(self, address):
getattr(self, address)(botengine, content)
def schedule_fired(self, botengine, schedule_id):
"""
The bot executed on a hard coded schedule specified by our runtime.json file
:param botengine: BotEngine environment
:param schedule_id: Schedule ID that is executing from our list of runtime schedules
"""
return
def timer_fired(self, botengine, argument):
"""
The bot's intelligence timer fired
:param botengine: Current botengine environment
:param argument: Argument applied when setting the timer
"""
return
def file_uploaded(self, botengine, device_object, file_id, filesize_bytes, content_type, file_extension):
"""
A device file has been uploaded
:param botengine: BotEngine environment
:param device_object: Device object that uploaded the file
:param file_id: File ID to reference this file at the server
:param filesize_bytes: The file size in bytes
:param content_type: The content type, for example 'video/mp4'
:param file_extension: The file extension, for example 'mp4'
"""
return
def coordinates_updated(self, botengine, latitude, longitude):
"""
Approximate coordinates of the parent proxy device object have been updated
:param latitude: Latitude
:param longitude: Longitude
"""
return
def user_role_updated(self, botengine, user_id, alert_category, location_access, previous_alert_category, previous_location_access):
"""
A user changed roles
:param botengine: BotEngine environment
:param user_id: User ID that changed roles
:param alert_category: User's current alert/communications category (1=resident; 2=supporter)
:param location_access: User's access to the location and devices. (0=None; 10=read location/device data; 20=control devices and modes; 30=update location info and manage devices)
:param previous_alert_category: User's previous category, if any
:param previous_location_access: User's previous access to the location, if any
"""
return
def data_request_ready(self, botengine, reference, csv_dict):
"""
A botengine.request_data() asynchronous request for CSV data is ready.
This is part of a very scalable method to extract large amounts of data from the server for the purpose of
machine learning services. If a service needs to extract a large amount of data for one or multiple devices,
the developer should call botengine.request_data(..) and also allow the bot to trigger off of trigger type 2048.
The bot can exit its current execution. The server will independently gather all the necessary data and
capture it into a LZ4-compressed CSV file on the server which is available for one day and accessible only by
the bot through a public HTTPS URL identified by a cryptographic token. The bot then gets triggered and
downloads the CSV data, passing the data throughout the environment with this data_request_ready()
event-driven method.
Developers are encouraged to use the 'reference' argument inside calls to botengine.request_data(..). The
reference is passed back out at the completion of the request, allowing the developer to ensure the
data request that is now available was truly destined for their microservice.
Your bots will need to include the following configuration for data requests to operate:
* runtime.json should include trigger 2048
* structure.json should include inside 'pip_install_remotely' a reference to the "lz4" Python package
:param botengine: BotEngine environment
:param reference: Optional reference passed into botengine.request_data(..)
:param csv_dict: { device_object: 'csv data string' }
"""
return
#===============================================================================
# Built-in Timer and Alarm methods.
#===============================================================================
def start_timer_ms(self, botengine, milliseconds, argument=None, reference=""):
"""
Start a relative timer in milliseconds
:param botengine: BotEngine environment
:param seconds: Time in milliseconds for the timer to fire
:param argument: Optional argument to provide when the timer fires.
:param reference: Optional reference to use to manage this timer.
"""
# We seed the reference with this intelligence ID to make it unique against all other intelligence modules.
if isinstance(self.parent, Location):
# Location intelligence
bot.start_location_intelligence_timer_ms(botengine, milliseconds, self.intelligence_id, argument, self.intelligence_id + str(reference))
else:
# Device intelligence
bot.start_device_intelligence_timer_ms(botengine, milliseconds, self.intelligence_id, argument, self.intelligence_id + str(reference))
def start_timer_s(self, botengine, seconds, argument=None, reference=""):
"""
Helper function with an explicit "_s" at the end, to start a timer in seconds
:param botengine: BotEngine environment
:param seconds: Time in seconds for the timer to fire
:param argument: Optional argument to provide when the timer fires.
:param reference: Optional reference to use to manage this timer.
"""
self.start_timer(botengine, seconds, argument, str(reference))
def start_timer(self, botengine, seconds, argument=None, reference=""):
"""
Start a relative timer in seconds
:param botengine: BotEngine environment
:param seconds: Time in seconds for the timer to fire
:param argument: Optional argument to provide when the timer fires.
:param reference: Optional reference to use to manage this timer.
"""
# We seed the reference with this intelligence ID to make it unique against all other intelligence modules.
if isinstance(self.parent, Location):
# Location intelligence
bot.start_location_intelligence_timer(botengine, seconds, self.intelligence_id, argument, self.intelligence_id + str(reference))
else:
# Device intelligence
bot.start_device_intelligence_timer(botengine, seconds, self.intelligence_id, argument, self.intelligence_id + str(reference))
def is_timer_running(self, botengine, reference=""):
"""
Check if a timer or alarm with the given reference is running
:param botengine: BotEngine environment
:param reference: Reference
:return: True if timers or alarms with the given reference are running.
"""
return botengine.is_timer_running(self.intelligence_id + str(reference))
def cancel_timers(self, botengine, reference=""):
"""
Cancel timers with the given reference
:param botengine: BotEngine environment
:param reference: Cancel all timers with the given reference
"""
botengine.cancel_timers(self.intelligence_id + str(reference))
def set_alarm(self, botengine, timestamp_ms, argument=None, reference=""):
"""
Set an absolute alarm
:param botengine: BotEngine environment
:param timestamp_ms: Absolute time in milliseconds for the timer to fire
:param argument: Optional argument to provide when the timer fires.
:param reference: Optional reference to use to manage this timer.
"""
# We seed the reference with this intelligence ID to make it unique against all other intelligence modules.
if isinstance(self.parent, Location):
# Location intelligence
bot.set_location_intelligence_alarm(botengine, timestamp_ms, self.intelligence_id, argument, self.intelligence_id + str(reference))
else:
# Device intelligence
bot.set_device_intelligence_alarm(botengine, timestamp_ms, self.intelligence_id, argument, self.intelligence_id + str(reference))
def is_alarm_running(self, botengine, reference=""):
"""
Check if a timer or alarm with the given reference is running
:param botengine: BotEngine environment
:param reference: Reference
:return: True if timers or alarms with the given reference are running.
"""
return botengine.is_timer_running(self.intelligence_id + str(reference))
def cancel_alarms(self, botengine, reference=""):
"""
Cancel alarms with the given reference
:param botengine: BotEngine environment
:param reference: Cancel all alarms with the given reference
"""
# It's not a mistake that this is forwarding to `cancel_timers`.
# They're all the same thing underneath, and this is a convenience method help to avoid confusion and questions.
botengine.cancel_timers(self.intelligence_id + str(reference))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.