code
stringlengths 1
199k
|
|---|
"""Integration tests for loading and saving netcdf files."""
import iris.tests as tests
from os.path import join as path_join, dirname, sep as os_sep
import shutil
from subprocess import check_call
import tempfile
import iris
from iris.tests import stock
class TestClimatology(iris.tests.IrisTest):
reference_cdl_path = os_sep.join(
[
dirname(tests.__file__),
(
"results/integration/climatology/TestClimatology/"
"reference_simpledata.cdl"
),
]
)
@classmethod
def _simple_cdl_string(cls):
with open(cls.reference_cdl_path, "r") as f:
cdl_content = f.read()
# Add the expected CDL first line since this is removed from the
# stored results file.
cdl_content = "netcdf {\n" + cdl_content
return cdl_content
@staticmethod
def _load_sanitised_cube(filepath):
cube = iris.load_cube(filepath)
# Remove attributes convention, if any.
cube.attributes.pop("Conventions", None)
# Remove any var-names.
for coord in cube.coords():
coord.var_name = None
cube.var_name = None
return cube
@classmethod
def setUpClass(cls):
# Create a temp directory for temp files.
cls.temp_dir = tempfile.mkdtemp()
cls.path_ref_cdl = path_join(cls.temp_dir, "standard.cdl")
cls.path_ref_nc = path_join(cls.temp_dir, "standard.nc")
# Create reference CDL file.
with open(cls.path_ref_cdl, "w") as f_out:
f_out.write(cls._simple_cdl_string())
# Create reference netCDF file from reference CDL.
command = "ncgen -o {} {}".format(cls.path_ref_nc, cls.path_ref_cdl)
check_call(command, shell=True)
cls.path_temp_nc = path_join(cls.temp_dir, "tmp.nc")
# Create reference cube.
cls.cube_ref = stock.climatology_3d()
@classmethod
def tearDownClass(cls):
# Destroy a temp directory for temp files.
shutil.rmtree(cls.temp_dir)
###############################################################################
# Round-trip tests
def test_cube_to_cube(self):
# Save reference cube to file, load cube from same file, test against
# reference cube.
iris.save(self.cube_ref, self.path_temp_nc)
cube = self._load_sanitised_cube(self.path_temp_nc)
self.assertEqual(cube, self.cube_ref)
def test_file_to_file(self):
# Load cube from reference file, save same cube to file, test against
# reference CDL.
cube = iris.load_cube(self.path_ref_nc)
iris.save(cube, self.path_temp_nc)
self.assertCDL(
self.path_temp_nc,
reference_filename=self.reference_cdl_path,
flags="",
)
# NOTE:
# The saving half of the round-trip tests is tested in the
# appropriate dedicated test class:
# unit.fileformats.netcdf.test_Saver.Test_write.test_with_climatology .
# The loading half has no equivalent dedicated location, so is tested
# here as test_load_from_file.
def test_load_from_file(self):
# Create cube from file, test against reference cube.
cube = self._load_sanitised_cube(self.path_ref_nc)
self.assertEqual(cube, self.cube_ref)
if __name__ == "__main__":
tests.main()
|
import pymorphy2
blacklist1 = ['ъб', 'ъв', 'ъг', 'ъд', 'ъж', 'ъз', 'ък', 'ъл', 'ъм', 'ън', 'ъп', 'ър', 'ъс', 'ът', 'ъф', 'ъх', 'ъц', 'ъч', 'ъш', 'ъщ', 'йй', 'ьь', 'ъъ', 'ыы', 'чя', 'чю', 'чй', 'щя', 'щю', 'щй', 'шя', 'шю', 'шй', 'жы', 'шы', 'аь', 'еь', 'ёь', 'иь', 'йь', 'оь', 'уь', 'ыь', 'эь', 'юь', 'яь', 'аъ', 'еъ', 'ёъ', 'иъ', 'йъ', 'оъ', 'уъ', 'ыъ', 'эъ', 'юъ', 'яъ']
blacklist2 = ['чьк', 'чьн', 'щьн'] # forbidden
blacklist3 = ['руметь']
base1 = ['ло','уа', 'ая', 'ши', 'ти', 'ни', 'ки', 'ко', 'ли', 'уи', 'до', 'аи', 'то'] # unchanged
base2 = ['алз','бва', 'йты','ике','нту','лди','лит', 'вра','афе', 'бле', 'яху','уке', 'дзе', 'ури', 'ава', 'чче','нте', 'нне', 'гие', 'уро', 'сут', 'оне', 'ино', 'йду', 'нью', 'ньо', 'ньи', 'ери', 'ску', 'дье']
base3 = ['иани','льди', 'льде', 'ейру', 'зема', 'хими', 'ками', 'кала', 'мари', 'осси', 'лари', 'тано', 'ризе', 'енте', 'енеи']
base4 = ['швили', 'льяри']
change1 = ['лл','рр', 'пп', 'тт', 'ер', 'ук', 'ун', 'юк', 'ан', 'ян', 'ия', 'ин'] # declines
change2 = ['вец','дюн', 'еув', 'инз', 'ейн', 'лис','лек','бен','нек','рок', 'ргл', 'бих','бус','айс','гас','таш', 'хэм', 'аал', 'дад', 'анд', 'лес', 'мар','ньш', 'рос','суф', 'вик', 'якс', 'веш','анц', 'янц', 'сон', 'сен', 'нен', 'ман', 'цак', 'инд', 'кин', 'чин', 'рем', 'рём', 'дин']
change3 = ['ерит', 'гард', 'иньш', 'скис', 'ллит', 'еней', 'рроз', 'манн', 'берг', 'вист', 'хайм',]
female1 = ['ская', 'ской', 'скую']
female2 = ['овой']
female3 = ['евой']
female4 = ['иной']
middlemale = ['а', 'у']
middlestr1 = ['ии', 'ию'] # for Данелия
middlestr2 = ['ией']
male = ['ов', 'ев', 'ин']
male1 = ['ский', 'ским', 'ском']
male2 = ['ского', 'скому']
male3 = ['е', 'ы']
male4 = ['ым', 'ом', 'ем', 'ой']
side1 = ['авы', 'аве', 'аву', 'фик', 'иол', 'риц', 'икк', 'ест', 'рех', 'тин']
side2 = ['авой']
sname = ['вич', 'вна']
sname1 = ['вн']
def lemmas_done(found, lemmatized):
"""
Check predicted lemmas according to the rules.
"""
morph = pymorphy2.MorphAnalyzer()
fix = []
fixednames = []
doublefemale =[]
for i in range(len(lemmatized)):
p = morph.parse(found[i])[0]
if p.tag.POS == 'NOUN':
if (found[i].istitle()) and ((found[i][-2:] in base1) or (found[i][-2:] in male) or (found[i][-3:] in base2) or (found[i][-4:] in base3) or (found[i][-5:] in base4)):
fixednames.append(found[i])
elif (found[i].istitle()) and ((found[i][-2:] in change1) or (found[i][-3:] in change2) or (found[i][-4:] in change3)):
fixednames.append(found[i])
elif (found[i].istitle()) and (found[i][-4:] in female1):
fixednames.append(found[i][:-2] + 'ая')
elif (found[i].istitle()) and (found[i][-4:] in female2):
fixednames.append(found[i][:-4] + 'ова')
elif (found[i].istitle()) and (found[i][-4:] in female3):
fixednames.append(found[i][:-4] + 'ева')
elif (found[i].istitle()) and (found[i][-4:] in female4):
fixednames.append(found[i][:-4] + 'ина')
elif (found[i].istitle()) and (found[i][-4:] in male1):
fixednames.append(found[i][:-2] + 'ий')
elif (found[i].istitle()) and (found[i][-5:] in male2):
fixednames.append(found[i][:-3] + 'ий')
elif (found[i].istitle()) and (found[i][-1:] in male3) and (found[i][-3:-1] in male):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and (found[i][-2:] in male4) and (found[i][-4:-2] in male):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-1:] in middlemale) and (found[i][-3:-1] in male):
fixednames.append(found[i][:-1])
doublefemale.append(found[i][:-1] + 'а')
elif (found[i].istitle()) and ((found[i][-1:] in male3) or (found[i][-1:] in middlemale)) and (found[i][-3:-1] in change1):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and ((found[i][-1:] in male3) or (found[i][-1:] in middlemale)) and (found[i][-4:-1] in change2):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and ((found[i][-1:] in male3) or (found[i][-1:] in middlemale)) and (found[i][-5:-1] in change3):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and (found[i][-2:] in male4) and (found[i][-4:-2] in change1):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-2:] in male4) and (found[i][-5:-2] in change2):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-2:] in male4) and (found[i][-6:-2] in change3):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-2:] in middlestr1):
fixednames.append(found[i][:-1] + 'я')
elif (found[i].istitle()) and (found[i][-3:] in middlestr2):
fixednames.append(found[i][:-2] + 'я')
elif (found[i].istitle()) and (found[i][-3:] in side1):
fixednames.append(found[i][:-1] + 'а')
elif (found[i].istitle()) and (found[i][-4:] in side2):
fixednames.append(found[i][:-2] + 'а')
elif (found[i].istitle()) and (found[i][-4:-1] in side1):
fixednames.append(found[i][:-1] + 'а')
elif (found[i].istitle()) and (found[i][-5:-2] in side1):
fixednames.append(found[i][:-2] + 'а')
elif (found[i].istitle()) and (found[i][-3:] in sname):
fixednames.append(found[i])
elif (found[i].istitle()) and (found[i][-4:-1] in sname) and ((found[i][-1:] in middlemale) or (found[i][-1:] in male3)):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and (found[i][-5:-2] in sname) and (found[i][-2:] in male4):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-3:-1] in sname1) and ((found[i][-1:] in middlemale) or (found[i][-1:] in male3)):
fixednames.append(found[i][:-1] + 'а')
elif (found[i].istitle()) and (found[i][-4:-2] in sname1) and (found[i][-2:] in male4):
fixednames.append(found[i][:-2] + 'а')
else:
fixednames.append(lemmatized[i])
else:
fixednames.append(lemmatized[i])
for i in range(len(fixednames)):
if (fixednames[i][-2:] in blacklist1) or (fixednames[i][-3:] in blacklist2) or (fixednames[i][-6:] in blacklist3):
fix.append(found[i])
else:
fix.append(fixednames[i])
fix = fix + doublefemale
newfreq = len(doublefemale)
return fix, newfreq
|
import logging.handlers
import os
import tempfile
from ceilometer.dispatcher import file
from ceilometer.openstack.common.fixture import config
from ceilometer.openstack.common import test
from ceilometer.publisher import utils
class TestDispatcherFile(test.BaseTestCase):
def setUp(self):
super(TestDispatcherFile, self).setUp()
self.CONF = self.useFixture(config.Config()).conf
def test_file_dispatcher_with_all_config(self):
# Create a temporaryFile to get a file name
tf = tempfile.NamedTemporaryFile('r')
filename = tf.name
tf.close()
self.CONF.dispatcher_file.file_path = filename
self.CONF.dispatcher_file.max_bytes = 50
self.CONF.dispatcher_file.backup_count = 5
dispatcher = file.FileDispatcher(self.CONF)
# The number of the handlers should be 1
self.assertEqual(1, len(dispatcher.log.handlers))
# The handler should be RotatingFileHandler
handler = dispatcher.log.handlers[0]
self.assertIsInstance(handler,
logging.handlers.RotatingFileHandler)
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
}
msg['message_signature'] = utils.compute_signature(
msg,
self.CONF.publisher.metering_secret,
)
# The record_metering_data method should exist and not produce errors.
dispatcher.record_metering_data(msg)
# After the method call above, the file should have been created.
self.assertTrue(os.path.exists(handler.baseFilename))
def test_file_dispatcher_with_path_only(self):
# Create a temporaryFile to get a file name
tf = tempfile.NamedTemporaryFile('r')
filename = tf.name
tf.close()
self.CONF.dispatcher_file.file_path = filename
self.CONF.dispatcher_file.max_bytes = None
self.CONF.dispatcher_file.backup_count = None
dispatcher = file.FileDispatcher(self.CONF)
# The number of the handlers should be 1
self.assertEqual(1, len(dispatcher.log.handlers))
# The handler should be RotatingFileHandler
handler = dispatcher.log.handlers[0]
self.assertIsInstance(handler,
logging.FileHandler)
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
}
msg['message_signature'] = utils.compute_signature(
msg,
self.CONF.publisher.metering_secret,
)
# The record_metering_data method should exist and not produce errors.
dispatcher.record_metering_data(msg)
# After the method call above, the file should have been created.
self.assertTrue(os.path.exists(handler.baseFilename))
def test_file_dispatcher_with_no_path(self):
self.CONF.dispatcher_file.file_path = None
dispatcher = file.FileDispatcher(self.CONF)
# The log should be None
self.assertIsNone(dispatcher.log)
|
from pecan import rest
from pecan import expose
from pecan import request
from mistral.openstack.common import log as logging
from mistral.db import api as db_api
from mistral.services import scheduler
LOG = logging.getLogger(__name__)
class WorkbookDefinitionController(rest.RestController):
@expose()
def get(self, workbook_name):
LOG.debug("Fetch workbook definition [workbook_name=%s]" %
workbook_name)
return db_api.workbook_definition_get(workbook_name)
@expose(content_type="text/plain")
def put(self, workbook_name):
text = request.text
LOG.debug("Update workbook definition [workbook_name=%s, text=%s]" %
(workbook_name, text))
wb = db_api.workbook_definition_put(workbook_name, text)
scheduler.create_associated_triggers(wb)
return wb['definition']
|
from trex_astf_lib.api import *
class Prof1():
def __init__(self):
pass
def get_profile(self, **kwargs):
# ip generator
ip_gen_c = ASTFIPGenDist(ip_range=["16.0.0.0", "16.0.0.255"], distribution="seq")
ip_gen_s = ASTFIPGenDist(ip_range=["48.0.0.0", "48.0.255.255"], distribution="seq")
ip_gen = ASTFIPGen(glob=ASTFIPGenGlobal(ip_offset="1.0.0.0"),
dist_client=ip_gen_c,
dist_server=ip_gen_s)
c_glob_info = ASTFGlobalInfo()
c_glob_info.tcp.rxbufsize = 8*1024
c_glob_info.tcp.txbufsize = 8*1024
s_glob_info = ASTFGlobalInfo()
s_glob_info.tcp.rxbufsize = 8*1024
s_glob_info.tcp.txbufsize = 8*1024
return ASTFProfile(default_ip_gen=ip_gen,
# Defaults affects all files
default_c_glob_info=c_glob_info,
default_s_glob_info=s_glob_info,
cap_list=[
ASTFCapInfo(file="../avl/delay_10_http_browsing_0.pcap", cps=1)
]
)
def register():
return Prof1()
|
from pony.orm.core import *
|
from chaco.data_label import DataLabel
from chaco.plot_label import PlotLabel
from numpy import max
from traits.api import Bool, Str
from pychron.pipeline.plot.overlays.mean_indicator_overlay import MovableMixin
try:
class FlowPlotLabel(PlotLabel, MovableMixin):
def overlay(self, component, gc, *args, **kw):
if self.ox:
self.x = self.ox - self.offset_x
self.y = self.oy - self.offset_y
super(FlowPlotLabel, self).overlay(component, gc, *args, **kw)
def hittest(self, pt):
x, y = pt
w, h = self.get_preferred_size()
return abs(x - self.x) < w and abs(y - self.y) < h
except TypeError:
# documentation auto doc hack
class FlowPlotLabel:
pass
class FlowDataLabel(DataLabel):
"""
this label repositions itself if doesn't fit within the
its component bounds.
"""
constrain_x = Bool(True)
constrain_y = Bool(True)
# position_event=Event
id = Str
# _ox=None
# def _draw(self, gc, **kw):
# self.font='modern 18'
# gc.set_font(self.font)
# print 'draw', self.font
# super(FlowDataLabel, self)._draw(gc,**kw)
# def _set_x(self, val):
# super(FlowDataLabel, self)._set_x(val)
# if self._ox is None:
# self._ox = val
# elif self._ox != val:
# self.position_event=(self.x, self.y)
#
# def _set_y(self, val):
# super(FlowDataLabel, self)._set_y(val)
# if val>0:
# self.position_event = (self.x, self.y)
def overlay(self, component, gc, *args, **kw):
# face name was getting set to "Helvetica" by reportlab during pdf generation
# set face_name back to "" to prevent font display issue. see issue #72
self.font.face_name = ""
super(FlowDataLabel, self).overlay(component, gc, *args, **kw)
def do_layout(self, **kw):
DataLabel.do_layout(self, **kw)
ws, hs = self._cached_line_sizes.T
if self.constrain_x:
w = max(ws)
d = self.component.x2 - (self.x + w + 3 * self.border_padding)
if d < 0:
self.x += d
self.x = max((self.x, 0))
if self.constrain_y:
h = max(hs)
self.y = max((self.y, 0))
yd = self.component.y2 - h - 2 * self.border_padding - self.line_spacing
self.y = min((self.y, yd))
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
import logging
_logger = logging.getLogger('websocket')
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
_logger.addHandler(NullHandler())
_traceEnabled = False
__all__ = ["enableTrace", "dump", "error", "warning", "debug", "trace",
"isEnabledForError", "isEnabledForDebug", "isEnabledForTrace"]
def enableTrace(traceable, handler = logging.StreamHandler()):
"""
turn on/off the traceability.
traceable: boolean value. if set True, traceability is enabled.
"""
global _traceEnabled
_traceEnabled = traceable
if traceable:
_logger.addHandler(handler)
_logger.setLevel(logging.DEBUG)
def dump(title, message):
if _traceEnabled:
_logger.debug("--- " + title + " ---")
_logger.debug(message)
_logger.debug("-----------------------")
def error(msg):
_logger.error(msg)
def warning(msg):
_logger.warning(msg)
def debug(msg):
_logger.debug(msg)
def trace(msg):
if _traceEnabled:
_logger.debug(msg)
def isEnabledForError():
return _logger.isEnabledFor(logging.ERROR)
def isEnabledForDebug():
return _logger.isEnabledFor(logging.DEBUG)
def isEnabledForTrace():
return _traceEnabled
|
import pytest
from mock import Mock
from calvin.tests import DummyNode
from calvin.runtime.north.actormanager import ActorManager
from calvin.runtime.south.endpoint import LocalOutEndpoint, LocalInEndpoint
from calvin.actor.actor import Actor
pytestmark = pytest.mark.unittest
def create_actor(node):
actor_manager = ActorManager(node)
actor_id = actor_manager.new('std.Identity', {})
actor = actor_manager.actors[actor_id]
actor._calvinsys = Mock()
return actor
@pytest.fixture
def actor():
return create_actor(DummyNode())
@pytest.mark.parametrize("port_type,port_name,port_property,value,expected", [
("invalid", "", "", "", False),
("in", "missing", "", "", False),
("out", "missing", "", "", False),
("out", "token", "missing", "", False),
("in", "token", "missing", "", False),
("out", "token", "name", "new_name", True),
("out", "token", "name", "new_name", True),
])
def test_set_port_property(port_type, port_name, port_property, value, expected):
assert actor().set_port_property(port_type, port_name, port_property, value) is expected
@pytest.mark.parametrize("inport_ret_val,outport_ret_val,expected", [
(False, False, False),
(False, True, False),
(True, False, False),
(True, True, True),
])
def test_did_connect(actor, inport_ret_val, outport_ret_val, expected):
for port in actor.inports.values():
port.is_connected = Mock(return_value=inport_ret_val)
for port in actor.outports.values():
port.is_connected = Mock(return_value=outport_ret_val)
actor.fsm = Mock()
actor.did_connect(None)
if expected:
actor.fsm.transition_to.assert_called_with(Actor.STATUS.ENABLED)
assert actor._calvinsys.scheduler_wakeup.called
else:
assert not actor.fsm.transition_to.called
assert not actor._calvinsys.scheduler_wakeup.called
@pytest.mark.parametrize("inport_ret_val,outport_ret_val,expected", [
(True, True, False),
(True, False, False),
(False, True, False),
(False, False, True),
])
def test_did_disconnect(actor, inport_ret_val, outport_ret_val, expected):
for port in actor.inports.values():
port.is_connected = Mock(return_value=inport_ret_val)
for port in actor.outports.values():
port.is_connected = Mock(return_value=outport_ret_val)
actor.fsm = Mock()
actor.did_disconnect(None)
if expected:
actor.fsm.transition_to.assert_called_with(Actor.STATUS.READY)
else:
assert not actor.fsm.transition_to.called
def test_enabled(actor):
actor.enable()
assert actor.enabled()
actor.disable()
assert not actor.enabled()
def test_connections():
node = DummyNode()
node.id = "node_id"
actor = create_actor(node)
inport = actor.inports['token']
outport = actor.outports['token']
port = Mock()
port.id = "x"
peer_port = Mock()
peer_port.id = "y"
inport.attach_endpoint(LocalInEndpoint(port, peer_port))
outport.attach_endpoint(LocalOutEndpoint(port, peer_port))
assert actor.connections(node) == {
'actor_id': actor.id,
'actor_name': actor.name,
'inports': {inport.id: (node, "y")},
'outports': {outport.id: [(node, "y")]}
}
def test_state(actor):
inport = actor.inports['token']
outport = actor.outports['token']
correct_state = {
'_component_members': set([actor.id]),
'_deployment_requirements': [],
'_managed': set(['dump', '_signature', 'id', '_deployment_requirements', 'name', 'credentials']),
'_signature': None,
'dump': False,
'id': actor.id,
'inports': {'token': {'fifo': {'N': 5,
'fifo': [{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'}],
'read_pos': {inport.id: 0},
'readers': [inport.id],
'tentative_read_pos': {inport.id: 0},
'write_pos': 0},
'id': inport.id,
'name': 'token'}},
'name': '',
'outports': {'token': {'fanout': 1,
'fifo': {'N': 5,
'fifo': [{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'}],
'read_pos': {},
'readers': [],
'tentative_read_pos': {},
'write_pos': 0},
'id': outport.id,
'name': 'token'}}}
test_state = actor.state()
for k, v in correct_state.iteritems():
# Read state use list to support JSON serialization
if isinstance(v, set):
assert set(test_state[k]) == v
else:
assert test_state[k] == v
@pytest.mark.parametrize("prev_signature,new_signature,expected", [
(None, "new_val", "new_val"),
("old_val", "new_val", "old_val")
])
def test_set_signature(actor, prev_signature, new_signature, expected):
actor.signature_set(prev_signature)
actor.signature_set(new_signature)
assert actor._signature == expected
def test_component(actor):
actor.component_add(1)
assert 1 in actor.component_members()
actor.component_add([2, 3])
assert 2 in actor.component_members()
assert 3 in actor.component_members()
actor.component_remove(1)
assert 1 not in actor.component_members()
actor.component_remove([2, 3])
assert 2 not in actor.component_members()
assert 3 not in actor.component_members()
def test_requirements(actor):
assert actor.requirements_get() == []
actor.requirements_add([1, 2, 3])
assert actor.requirements_get() == [1, 2, 3]
actor.requirements_add([4, 5])
assert actor.requirements_get() == [4, 5]
actor.requirements_add([6, 7], extend=True)
assert actor.requirements_get() == [4, 5, 6, 7]
|
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
|
import logging
import threading
import time
from ballast.discovery import ServerList
from ballast.rule import Rule, RoundRobinRule
from ballast.ping import (
Ping,
SocketPing,
PingStrategy,
SerialPingStrategy
)
class LoadBalancer(object):
DEFAULT_PING_INTERVAL = 30
MAX_PING_TIME = 3
def __init__(self, server_list, rule=None, ping_strategy=None, ping=None, ping_on_start=True):
assert isinstance(server_list, ServerList)
assert rule is None or isinstance(rule, Rule)
assert ping_strategy is None or isinstance(ping_strategy, PingStrategy)
assert ping is None or isinstance(ping, Ping)
# some locks for thread-safety
self._lock = threading.Lock()
self._server_lock = threading.Lock()
self._rule = rule \
if rule is not None \
else RoundRobinRule()
self._ping_strategy = ping_strategy \
if ping_strategy is not None \
else SerialPingStrategy()
self._ping = ping \
if ping is not None \
else SocketPing()
self.max_ping_time = self.MAX_PING_TIME
self._ping_interval = self.DEFAULT_PING_INTERVAL
self._server_list = server_list
self._servers = set()
self._stats = LoadBalancerStats()
self._rule.load_balancer = self
self._logger = logging.getLogger(self.__module__)
# start our background worker
# to periodically ping our servers
self._ping_timer_running = False
self._ping_timer = None
if ping_on_start:
self._start_ping_timer()
@property
def ping_interval(self):
return self._ping_interval
@ping_interval.setter
def ping_interval(self, value):
self._ping_interval = value
if self._ping_timer_running:
self._stop_ping_timer()
self._start_ping_timer()
@property
def max_ping_time(self):
if self._ping is None:
return 0
return self._ping.max_ping_time
@max_ping_time.setter
def max_ping_time(self, value):
if self._ping is not None:
self._ping.max_ping_time = value
@property
def stats(self):
return self._stats
@property
def servers(self):
with self._server_lock:
return set(self._servers)
@property
def reachable_servers(self):
with self._server_lock:
servers = set()
for s in self._servers:
if s.is_alive:
servers.add(s)
return servers
def choose_server(self):
# choose a server, will
# throw if there are none
server = self._rule.choose()
return server
def mark_server_down(self, server):
self._logger.debug("Marking server down: %s", server)
server._is_alive = False
def ping(self, server=None):
if server is None:
self._ping_all_servers()
else:
is_alive = self._ping.is_alive(server)
server._is_alive = is_alive
def ping_async(self, server=None):
if server is None:
# self._ping_all_servers()
t = threading.Thread(name='ballast-worker', target=self._ping_all_servers)
t.daemon = True
t.start()
else:
is_alive = self._ping.is_alive(server)
server._is_alive = is_alive
def _ping_all_servers(self):
with self._server_lock:
results = self._ping_strategy.ping(
self._ping,
self._server_list
)
self._servers = set(results)
def _start_ping_timer(self):
with self._lock:
if self._ping_timer_running:
self._logger.debug("Background pinger already running")
return
self._ping_timer_running = True
self._ping_timer = threading.Thread(name='ballast-worker', target=self._ping_loop)
self._ping_timer.daemon = True
self._ping_timer.start()
def _stop_ping_timer(self):
with self._lock:
self._ping_timer_running = False
self._ping_timer = None
def _ping_loop(self):
while self._ping_timer_running:
try:
self._ping_all_servers()
except BaseException as e:
self._logger.error("There was an error pinging servers: %s", e)
time.sleep(self._ping_interval)
class LoadBalancerStats(object):
def get_server_stats(self, server):
pass
|
import os, re, csv
noise_pattern = 'noise: \[(.+)\]'
res_pattern = '^([0-9.]+$)'
search_dir = "output"
results_file = '../results.csv'
os.chdir( search_dir )
files = filter( os.path.isfile, os.listdir( '.' ))
files.sort( key=lambda x: os.path.getmtime( x ))
results = []
for file in files:
f = open( file )
contents = f.read()
# noise
matches = re.search( noise_pattern, contents, re.DOTALL )
try:
noise = matches.group( 1 )
noise = noise.strip()
noise = noise.split()
except AttributeError:
print "noise error 1: %s" % ( contents )
continue
# rmse
matches = re.search( res_pattern, contents, re.M )
try:
res = matches.group( 1 )
except AttributeError:
print "matches error 2: %s" % ( contents )
continue
results.append( [ res ] + noise )
writer = csv.writer( open( results_file, 'wb' ))
for result in results:
writer.writerow( result )
|
import os
import logging
from mongodb_consistent_backup.Common import LocalCommand
from mongodb_consistent_backup.Pipeline import PoolThread
class TarThread(PoolThread):
def __init__(self, backup_dir, output_file, compression='none', verbose=False, binary="tar"):
super(TarThread, self).__init__(self.__class__.__name__, compression)
self.compression_method = compression
self.backup_dir = backup_dir
self.output_file = output_file
self.verbose = verbose
self.binary = binary
self._command = None
def close(self, exit_code=None, frame=None):
if self._command and not self.stopped:
logging.debug("Stopping running tar command: %s" % self._command.command)
del exit_code
del frame
self._command.close()
self.stopped = True
def run(self):
if os.path.isdir(self.backup_dir):
if not os.path.isfile(self.output_file):
try:
backup_base_dir = os.path.dirname(self.backup_dir)
backup_base_name = os.path.basename(self.backup_dir)
log_msg = "Archiving directory: %s" % self.backup_dir
cmd_flags = ["-C", backup_base_dir, "-c", "-f", self.output_file, "--remove-files"]
if self.do_gzip():
log_msg = "Archiving and compressing directory: %s" % self.backup_dir
cmd_flags.append("-z")
cmd_flags.append(backup_base_name)
logging.info(log_msg)
self.running = True
self._command = LocalCommand(self.binary, cmd_flags, self.verbose)
self.exit_code = self._command.run()
except Exception, e:
return self.result(False, "Failed archiving file: %s!" % self.output_file, e)
finally:
self.running = False
self.stopped = True
self.completed = True
else:
return self.result(False, "Output file: %s already exists!" % self.output_file, None)
return self.result(True, "Archiving successful.", None)
def result(self, success, message, error):
return {
"success": success,
"message": message,
"error": error,
"directory": self.backup_dir,
"exit_code": self.exit_code
}
|
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def garch(X: Matrix,
kmax: int,
momentum: float,
start_stepsize: float,
end_stepsize: float,
start_vicinity: float,
end_vicinity: float,
sim_seed: int,
verbose: bool):
"""
:param X: The input Matrix to apply Arima on.
:param kmax: Number of iterations
:param momentum: Momentum for momentum-gradient descent (set to 0 to deactivate)
:param start_stepsize: Initial gradient-descent stepsize
:param end_stepsize: gradient-descent stepsize at end (linear descent)
:param start_vicinity: proportion of randomness of restart-location for gradient descent at beginning
:param end_vicinity: same at end (linear decay)
:param sim_seed: seed for simulation of process on fitted coefficients
:param verbose: verbosity, comments during fitting
:return: 'OperationNode' containing simulated garch(1,1) process on fitted coefficients & variances of simulated fitted process & constant term of fitted process & 1-st arch-coefficient of fitted process & 1-st garch-coefficient of fitted process & drawbacks: slow convergence of optimization (sort of simulated annealing/gradient descent)
"""
params_dict = {'X': X, 'kmax': kmax, 'momentum': momentum, 'start_stepsize': start_stepsize, 'end_stepsize': end_stepsize, 'start_vicinity': start_vicinity, 'end_vicinity': end_vicinity, 'sim_seed': sim_seed, 'verbose': verbose}
vX_0 = Matrix(X.sds_context, '')
vX_1 = Matrix(X.sds_context, '')
vX_2 = Scalar(X.sds_context, '')
vX_3 = Scalar(X.sds_context, '')
vX_4 = Scalar(X.sds_context, '')
output_nodes = [vX_0, vX_1, vX_2, vX_3, vX_4, ]
op = MultiReturn(X.sds_context, 'garch', output_nodes, named_input_nodes=params_dict)
vX_0._unnamed_input_nodes = [op]
vX_1._unnamed_input_nodes = [op]
vX_2._unnamed_input_nodes = [op]
vX_3._unnamed_input_nodes = [op]
vX_4._unnamed_input_nodes = [op]
return op
|
"""
Tests for login and logout.
"""
import datetime
from unittest.mock import patch
import responses
import quilt3
from .utils import QuiltTestCase
class TestSession(QuiltTestCase):
@patch('quilt3.session.open_url')
@patch('quilt3.session.input', return_value='123456')
@patch('quilt3.session.login_with_token')
def test_login(self, mock_login_with_token, mock_input, mock_open_url):
quilt3.login()
url = quilt3.session.get_registry_url()
mock_open_url.assert_called_with(f'{url}/login')
mock_login_with_token.assert_called_with('123456')
@patch('quilt3.session._save_auth')
@patch('quilt3.session._save_credentials')
def test_login_with_token(self, mock_save_credentials, mock_save_auth):
url = quilt3.session.get_registry_url()
mock_auth = dict(
refresh_token='refresh-token',
access_token='access-token',
expires_at=123456789
)
self.requests_mock.add(
responses.POST,
f'{url}/api/token',
json=mock_auth,
status=200
)
self.requests_mock.add(
responses.GET,
f'{url}/api/auth/get_credentials',
json=dict(
AccessKeyId='access-key',
SecretAccessKey='secret-key',
SessionToken='session-token',
Expiration="2019-05-28T23:58:07+00:00"
),
status=200
)
quilt3.session.login_with_token('123456')
mock_save_auth.assert_called_with({url: mock_auth})
mock_save_credentials.assert_called_with(dict(
access_key='access-key',
secret_key='secret-key',
token='session-token',
expiry_time="2019-05-28T23:58:07+00:00"
))
@patch('quilt3.session._save_credentials')
@patch('quilt3.session._load_credentials')
def test_create_botocore_session(self, mock_load_credentials, mock_save_credentials):
def format_date(date):
return date.replace(tzinfo=datetime.timezone.utc, microsecond=0).isoformat()
# Test good credentials.
future_date = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
mock_load_credentials.return_value = dict(
access_key='access-key',
secret_key='secret-key',
token='session-token',
expiry_time=format_date(future_date)
)
session = quilt3.session.create_botocore_session()
credentials = session.get_credentials()
assert credentials.access_key == 'access-key'
assert credentials.secret_key == 'secret-key'
assert credentials.token == 'session-token'
mock_save_credentials.assert_not_called()
# Test expired credentials.
past_date = datetime.datetime.utcnow() - datetime.timedelta(minutes=5)
mock_load_credentials.return_value = dict(
access_key='access-key',
secret_key='secret-key',
token='session-token',
expiry_time=format_date(past_date)
)
url = quilt3.session.get_registry_url()
self.requests_mock.add(
responses.GET,
f'{url}/api/auth/get_credentials',
json=dict(
AccessKeyId='access-key2',
SecretAccessKey='secret-key2',
SessionToken='session-token2',
Expiration=format_date(future_date)
),
status=200
)
session = quilt3.session.create_botocore_session()
credentials = session.get_credentials()
assert credentials.access_key == 'access-key2'
assert credentials.secret_key == 'secret-key2'
assert credentials.token == 'session-token2'
mock_save_credentials.assert_called()
def test_logged_in(self):
registry_url = quilt3.session.get_registry_url()
other_registry_url = registry_url + 'other'
mock_auth = dict(
refresh_token='refresh-token',
access_token='access-token',
expires_at=123456789,
)
with patch('quilt3.session._load_auth', return_value={registry_url: mock_auth}) as mocked_load_auth:
assert quilt3.logged_in() == 'https://example.com'
mocked_load_auth.assert_called_once()
with patch('quilt3.session._load_auth', return_value={other_registry_url: mock_auth}) as mocked_load_auth:
assert quilt3.logged_in() is None
mocked_load_auth.assert_called_once()
|
import logging
import re
import socket
from mopidy.config import validators
from mopidy.internal import log, path
def decode(value):
if isinstance(value, bytes):
value = value.decode(errors="surrogateescape")
for char in ("\\", "\n", "\t"):
value = value.replace(
char.encode(encoding="unicode-escape").decode(), char
)
return value
def encode(value):
if isinstance(value, bytes):
value = value.decode(errors="surrogateescape")
for char in ("\\", "\n", "\t"):
value = value.replace(
char, char.encode(encoding="unicode-escape").decode()
)
return value
class DeprecatedValue:
pass
class ConfigValue:
"""Represents a config key's value and how to handle it.
Normally you will only be interacting with sub-classes for config values
that encode either deserialization behavior and/or validation.
Each config value should be used for the following actions:
1. Deserializing from a raw string and validating, raising ValueError on
failure.
2. Serializing a value back to a string that can be stored in a config.
3. Formatting a value to a printable form (useful for masking secrets).
:class:`None` values should not be deserialized, serialized or formatted,
the code interacting with the config should simply skip None config values.
"""
def deserialize(self, value):
"""Cast raw string to appropriate type."""
return decode(value)
def serialize(self, value, display=False):
"""Convert value back to string for saving."""
if value is None:
return ""
return str(value)
class Deprecated(ConfigValue):
"""Deprecated value.
Used for ignoring old config values that are no longer in use, but should
not cause the config parser to crash.
"""
def deserialize(self, value):
return DeprecatedValue()
def serialize(self, value, display=False):
return DeprecatedValue()
class String(ConfigValue):
"""String value.
Is decoded as utf-8 and \\n \\t escapes should work and be preserved.
"""
def __init__(self, optional=False, choices=None):
self._required = not optional
self._choices = choices
def deserialize(self, value):
value = decode(value).strip()
validators.validate_required(value, self._required)
if not value:
return None
validators.validate_choice(value, self._choices)
return value
def serialize(self, value, display=False):
if value is None:
return ""
return encode(value)
class Secret(String):
"""Secret string value.
Is decoded as utf-8 and \\n \\t escapes should work and be preserved.
Should be used for passwords, auth tokens etc. Will mask value when being
displayed.
"""
def __init__(self, optional=False, choices=None):
self._required = not optional
self._choices = None # Choices doesn't make sense for secrets
def serialize(self, value, display=False):
if value is not None and display:
return "********"
return super().serialize(value, display)
class Integer(ConfigValue):
"""Integer value."""
def __init__(
self, minimum=None, maximum=None, choices=None, optional=False
):
self._required = not optional
self._minimum = minimum
self._maximum = maximum
self._choices = choices
def deserialize(self, value):
value = decode(value)
validators.validate_required(value, self._required)
if not value:
return None
value = int(value)
validators.validate_choice(value, self._choices)
validators.validate_minimum(value, self._minimum)
validators.validate_maximum(value, self._maximum)
return value
class Boolean(ConfigValue):
"""Boolean value.
Accepts ``1``, ``yes``, ``true``, and ``on`` with any casing as
:class:`True`.
Accepts ``0``, ``no``, ``false``, and ``off`` with any casing as
:class:`False`.
"""
true_values = ("1", "yes", "true", "on")
false_values = ("0", "no", "false", "off")
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value):
value = decode(value)
validators.validate_required(value, self._required)
if not value:
return None
if value.lower() in self.true_values:
return True
elif value.lower() in self.false_values:
return False
raise ValueError(f"invalid value for boolean: {value!r}")
def serialize(self, value, display=False):
if value is True:
return "true"
elif value in (False, None):
return "false"
else:
raise ValueError(f"{value!r} is not a boolean")
class List(ConfigValue):
"""List value.
Supports elements split by commas or newlines. Newlines take presedence and
empty list items will be filtered out.
"""
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value):
value = decode(value)
if "\n" in value:
values = re.split(r"\s*\n\s*", value)
else:
values = re.split(r"\s*,\s*", value)
values = tuple(v.strip() for v in values if v.strip())
validators.validate_required(values, self._required)
return tuple(values)
def serialize(self, value, display=False):
if not value:
return ""
return "\n " + "\n ".join(encode(v) for v in value if v)
class LogColor(ConfigValue):
def deserialize(self, value):
value = decode(value)
validators.validate_choice(value.lower(), log.COLORS)
return value.lower()
def serialize(self, value, display=False):
if value.lower() in log.COLORS:
return encode(value.lower())
return ""
class LogLevel(ConfigValue):
"""Log level value.
Expects one of ``critical``, ``error``, ``warning``, ``info``, ``debug``,
``trace``, or ``all``, with any casing.
"""
levels = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG,
"trace": log.TRACE_LOG_LEVEL,
"all": logging.NOTSET,
}
def deserialize(self, value):
value = decode(value)
validators.validate_choice(value.lower(), self.levels.keys())
return self.levels.get(value.lower())
def serialize(self, value, display=False):
lookup = {v: k for k, v in self.levels.items()}
if value in lookup:
return encode(lookup[value])
return ""
class Hostname(ConfigValue):
"""Network hostname value."""
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value, display=False):
value = decode(value).strip()
validators.validate_required(value, self._required)
if not value:
return None
socket_path = path.get_unix_socket_path(value)
if socket_path is not None:
path_str = Path(not self._required).deserialize(socket_path)
return f"unix:{path_str}"
try:
socket.getaddrinfo(value, None)
except OSError:
raise ValueError("must be a resolveable hostname or valid IP")
return value
class Port(Integer):
"""Network port value.
Expects integer in the range 0-65535, zero tells the kernel to simply
allocate a port for us.
"""
def __init__(self, choices=None, optional=False):
super().__init__(
minimum=0, maximum=2 ** 16 - 1, choices=choices, optional=optional
)
class _ExpandedPath(str):
def __new__(cls, original, expanded):
return super().__new__(cls, expanded)
def __init__(self, original, expanded):
self.original = original
class Path(ConfigValue):
"""File system path.
The following expansions of the path will be done:
- ``~`` to the current user's home directory
- ``$XDG_CACHE_DIR`` according to the XDG spec
- ``$XDG_CONFIG_DIR`` according to the XDG spec
- ``$XDG_DATA_DIR`` according to the XDG spec
- ``$XDG_MUSIC_DIR`` according to the XDG spec
"""
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value):
value = decode(value).strip()
expanded = path.expand_path(value)
validators.validate_required(value, self._required)
validators.validate_required(expanded, self._required)
if not value or expanded is None:
return None
return _ExpandedPath(value, expanded)
def serialize(self, value, display=False):
if isinstance(value, _ExpandedPath):
value = value.original
if isinstance(value, bytes):
value = value.decode(errors="surrogateescape")
return value
|
"""This module is deprecated. Please use :mod:`airflow.providers.qubole.operators.qubole`."""
import warnings
from airflow.providers.qubole.operators.qubole import QuboleOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.qubole.operators.qubole`.",
DeprecationWarning,
stacklevel=2,
)
|
"""
Created on Jan 21, 2020
@author: alfoa, wangc
Lasso model fit with Lars using BIC or AIC for model selection.
"""
from numpy import finfo
from SupervisedLearning.ScikitLearn import ScikitLearnBase
from utils import InputData, InputTypes
class LassoLarsIC(ScikitLearnBase):
"""
Lasso model fit with Lars using BIC or AIC for model selection
"""
info = {'problemtype':'regression', 'normalize':False}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
import sklearn
import sklearn.linear_model
self.model = sklearn.linear_model.LassoLarsIC
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(LassoLarsIC, cls).getInputSpecification()
specs.description = r"""The \xmlNode{LassoLarsIC} (\textit{Lasso model fit with Lars using BIC or AIC for model selection})
is a Lasso model fit with Lars using BIC or AIC for model selection.
The optimization objective for Lasso is:
$(1 / (2 * n\_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1$
AIC is the Akaike information criterion and BIC is the Bayes Information criterion. Such criteria
are useful to select the value of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should explain well the data
while being simple.
\zNormalizationNotPerformed{LassoLarsIC}
"""
specs.addSub(InputData.parameterInputFactory("criterion", contentType=InputTypes.makeEnumType("criterion", "criterionType",['bic', 'aic']),
descr=r"""The type of criterion to use.""", default='aic'))
specs.addSub(InputData.parameterInputFactory("fit_intercept", contentType=InputTypes.BoolType,
descr=r"""Whether the intercept should be estimated or not. If False,
the data is assumed to be already centered.""", default=True))
specs.addSub(InputData.parameterInputFactory("normalize", contentType=InputTypes.BoolType,
descr=r"""This parameter is ignored when fit_intercept is set to False. If True,
the regressors X will be normalized before regression by subtracting the mean and
dividing by the l2-norm.""", default=True))
specs.addSub(InputData.parameterInputFactory("max_iter", contentType=InputTypes.IntegerType,
descr=r"""The maximum number of iterations.""", default=500))
specs.addSub(InputData.parameterInputFactory("precompute", contentType=InputTypes.StringType,
descr=r"""Whether to use a precomputed Gram matrix to speed up calculations.
For sparse input this option is always True to preserve sparsity.""", default='auto'))
specs.addSub(InputData.parameterInputFactory("eps", contentType=InputTypes.FloatType,
descr=r"""The machine-precision regularization in the computation of the Cholesky
diagonal factors. Increase this for very ill-conditioned systems. Unlike the tol
parameter in some iterative optimization-based algorithms, this parameter does not
control the tolerance of the optimization.""", default=finfo(float).eps))
specs.addSub(InputData.parameterInputFactory("positive", contentType=InputTypes.BoolType,
descr=r"""When set to True, forces the coefficients to be positive.""", default=False))
specs.addSub(InputData.parameterInputFactory("verbose", contentType=InputTypes.BoolType,
descr=r"""Amount of verbosity.""", default=False))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['fit_intercept','max_iter', 'normalize', 'precompute',
'eps','positive','criterion', 'verbose'])
# notFound must be empty
assert(not notFound)
self.initializeModel(settings)
|
import collections
import time
from enum import Enum
from pyflink.datastream import TimerService
from pyflink.datastream.timerservice import InternalTimer, K, N, InternalTimerService
from pyflink.fn_execution.state_impl import RemoteKeyedStateBackend
class InternalTimerImpl(InternalTimer[K, N]):
def __init__(self, timestamp: int, key: K, namespace: N):
self._timestamp = timestamp
self._key = key
self._namespace = namespace
def get_timestamp(self) -> int:
return self._timestamp
def get_key(self) -> K:
return self._key
def get_namespace(self) -> N:
return self._namespace
def __hash__(self):
result = int(self._timestamp ^ (self._timestamp >> 32))
result = 31 * result + hash(tuple(self._key))
result = 31 * result + hash(self._namespace)
return result
def __eq__(self, other):
return self.__class__ == other.__class__ and self._timestamp == other._timestamp \
and self._key == other._key and self._namespace == other._namespace
class TimerOperandType(Enum):
REGISTER_EVENT_TIMER = 0
REGISTER_PROC_TIMER = 1
DELETE_EVENT_TIMER = 2
DELETE_PROC_TIMER = 3
class InternalTimerServiceImpl(InternalTimerService[N]):
"""
Internal implementation of InternalTimerService.
"""
def __init__(self, keyed_state_backend: RemoteKeyedStateBackend):
self._keyed_state_backend = keyed_state_backend
self._current_watermark = None
self.timers = collections.OrderedDict()
def current_processing_time(self):
return int(time.time() * 1000)
def current_watermark(self):
return self._current_watermark
def advance_watermark(self, watermark: int):
self._current_watermark = watermark
def register_processing_time_timer(self, namespace: N, t: int):
current_key = self._keyed_state_backend.get_current_key()
timer = (TimerOperandType.REGISTER_PROC_TIMER, InternalTimerImpl(t, current_key, namespace))
self.timers[timer] = None
def register_event_time_timer(self, namespace: N, t: int):
current_key = self._keyed_state_backend.get_current_key()
timer = (TimerOperandType.REGISTER_EVENT_TIMER,
InternalTimerImpl(t, current_key, namespace))
self.timers[timer] = None
def delete_processing_time_timer(self, namespace: N, t: int):
current_key = self._keyed_state_backend.get_current_key()
timer = (TimerOperandType.DELETE_PROC_TIMER, InternalTimerImpl(t, current_key, namespace))
self.timers[timer] = None
def delete_event_time_timer(self, namespace: N, t: int):
current_key = self._keyed_state_backend.get_current_key()
timer = (TimerOperandType.DELETE_EVENT_TIMER, InternalTimerImpl(t, current_key, namespace))
self.timers[timer] = None
class TimerServiceImpl(TimerService):
"""
Internal implementation of TimerService.
"""
def __init__(self, internal_timer_service: InternalTimerServiceImpl):
self._internal = internal_timer_service
self.timers = self._internal.timers
def current_processing_time(self) -> int:
return self._internal.current_processing_time()
def current_watermark(self) -> int:
return self._internal.current_watermark()
def advance_watermark(self, wm):
self._internal.advance_watermark(wm)
def register_processing_time_timer(self, t: int):
self._internal.register_processing_time_timer(None, t)
def register_event_time_timer(self, t: int):
self._internal.register_event_time_timer(None, t)
def delete_processing_time_timer(self, t: int):
self._internal.delete_processing_time_timer(None, t)
def delete_event_time_timer(self, t: int):
self._internal.delete_event_time_timer(None, t)
|
from mainapp import create_app
app = create_app()
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
"""Identity v2 EC2 Credentials action implementations"""
import logging
import six
from cliff import command
from cliff import lister
from cliff import show
from openstackclient.common import utils
from openstackclient.i18n import _ # noqa
class CreateEC2Creds(show.ShowOne):
"""Create EC2 credentials"""
log = logging.getLogger(__name__ + ".CreateEC2Creds")
def get_parser(self, prog_name):
parser = super(CreateEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'--project',
metavar='<project>',
help=_('Specify a project [admin only]'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.project:
project = utils.find_resource(
identity_client.tenants,
parsed_args.project,
).id
else:
# Get the project from the current auth
project = identity_client.auth_tenant_id
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
creds = identity_client.ec2.create(user, project)
info = {}
info.update(creds._info)
return zip(*sorted(six.iteritems(info)))
class DeleteEC2Creds(command.Command):
"""Delete EC2 credentials"""
log = logging.getLogger(__name__ + '.DeleteEC2Creds')
def get_parser(self, prog_name):
parser = super(DeleteEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'access_key',
metavar='<access-key>',
help=_('Credentials access key'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
identity_client.ec2.delete(user, parsed_args.access_key)
class ListEC2Creds(lister.Lister):
"""List EC2 credentials"""
log = logging.getLogger(__name__ + '.ListEC2Creds')
def get_parser(self, prog_name):
parser = super(ListEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
columns = ('access', 'secret', 'tenant_id', 'user_id')
column_headers = ('Access', 'Secret', 'Project ID', 'User ID')
data = identity_client.ec2.list(user)
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class ShowEC2Creds(show.ShowOne):
"""Show EC2 credentials"""
log = logging.getLogger(__name__ + '.ShowEC2Creds')
def get_parser(self, prog_name):
parser = super(ShowEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'access_key',
metavar='<access-key>',
help=_('Credentials access key'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
creds = identity_client.ec2.get(user, parsed_args.access_key)
info = {}
info.update(creds._info)
return zip(*sorted(six.iteritems(info)))
|
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import argparse
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
import six
import stevedore.named
from climate.openstack.common import gettextutils
from climate.openstack.common import importutils
gettextutils.install('climate')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def generate(argv):
parser = argparse.ArgumentParser(
description='generate sample configuration file',
)
parser.add_argument('-m', dest='modules', action='append')
parser.add_argument('-l', dest='libraries', action='append')
parser.add_argument('srcfiles', nargs='*')
parsed_args = parser.parse_args(argv)
mods_by_pkg = dict()
for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
if parsed_args.modules:
for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
# Look for entry points defined in libraries (or applications) for
# option discovery, and include their return values in the output.
#
# Each entry point should be a function returning an iterable
# of pairs with the group name (or None for the default group)
# and the list of Opt instances for that group.
if parsed_args.libraries:
loader = stevedore.named.NamedExtensionManager(
'oslo.config.opts',
names=list(set(parsed_args.libraries)),
invoke_on_load=False,
)
for ext in loader:
for group, opts in ext.plugin():
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
opt_list.append((ext.name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group in sorted(opts_by_group.keys()):
print_group_opts(group, opts_by_group[group])
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except Exception as e:
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
return 'climate'
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help = u'%s (%s)' % (opt_help,
OPT_TYPES[opt_type])
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
if deprecated_opt.name:
deprecated_group = (deprecated_opt.group if
deprecated_opt.group else "DEFAULT")
print('# Deprecated group/name - [%s]/%s' %
(deprecated_group,
deprecated_opt.name))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, six.string_types))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == DICTOPT:
assert(isinstance(opt_default, dict))
opt_default_strlist = [str(key) + ':' + str(value)
for (key, value) in opt_default.items()]
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
generate(sys.argv[1:])
if __name__ == '__main__':
main()
|
"""
IP Types
"""
import logging
from ipaddress import ip_address
from socket import AF_INET, AF_INET6
from vpp_papi import VppEnum
from vpp_object import VppObject
try:
text_type = unicode
except NameError:
text_type = str
_log = logging.getLogger(__name__)
class DpoProto:
DPO_PROTO_IP4 = 0
DPO_PROTO_IP6 = 1
DPO_PROTO_MPLS = 2
DPO_PROTO_ETHERNET = 3
DPO_PROTO_BIER = 4
DPO_PROTO_NSH = 5
INVALID_INDEX = 0xffffffff
def get_dpo_proto(addr):
if ip_address(addr).version == 6:
return DpoProto.DPO_PROTO_IP6
else:
return DpoProto.DPO_PROTO_IP4
class VppIpAddressUnion():
def __init__(self, addr):
self.addr = addr
self.ip_addr = ip_address(text_type(self.addr))
def encode(self):
if self.version == 6:
return {'ip6': self.ip_addr}
else:
return {'ip4': self.ip_addr}
@property
def version(self):
return self.ip_addr.version
@property
def address(self):
return self.addr
@property
def length(self):
return self.ip_addr.max_prefixlen
@property
def bytes(self):
return self.ip_addr.packed
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.ip_addr == other.ip_addr
elif hasattr(other, "ip4") and hasattr(other, "ip6"):
# vl_api_address_union_t
if 4 == self.version:
return self.ip_addr == other.ip4
else:
return self.ip_addr == other.ip6
else:
raise Exception("Comparing VppIpAddressUnions:%s"
" with incomparable type: %s",
self, other)
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.ip_addr)
class VppIpMPrefix():
def __init__(self, saddr, gaddr, glen):
self.saddr = saddr
self.gaddr = gaddr
self.glen = glen
if ip_address(self.saddr).version != \
ip_address(self.gaddr).version:
raise ValueError('Source and group addresses must be of the '
'same address family.')
def encode(self):
return {
'af': ip_address(self.gaddr).vapi_af,
'grp_address': {
ip_address(self.gaddr).vapi_af_name: self.gaddr
},
'src_address': {
ip_address(self.saddr).vapi_af_name: self.saddr
},
'grp_address_length': self.glen,
}
@property
def length(self):
return self.glen
@property
def version(self):
return ip_address(self.gaddr).version
def __str__(self):
return "(%s,%s)/%d" % (self.saddr, self.gaddr, self.glen)
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.glen == other.glen and
self.saddr == other.gaddr and
self.saddr == other.saddr)
elif (hasattr(other, "grp_address_length") and
hasattr(other, "grp_address") and
hasattr(other, "src_address")):
# vl_api_mprefix_t
if 4 == self.version:
return (self.glen == other.grp_address_length and
self.gaddr == str(other.grp_address.ip4) and
self.saddr == str(other.src_address.ip4))
else:
return (self.glen == other.grp_address_length and
self.gaddr == str(other.grp_address.ip6) and
self.saddr == str(other.src_address.ip6))
return NotImplemented
class VppIpPuntPolicer(VppObject):
def __init__(self, test, policer_index, is_ip6=False):
self._test = test
self._policer_index = policer_index
self._is_ip6 = is_ip6
def add_vpp_config(self):
self._test.vapi.ip_punt_police(policer_index=self._policer_index,
is_ip6=self._is_ip6, is_add=True)
def remove_vpp_config(self):
self._test.vapi.ip_punt_police(policer_index=self._policer_index,
is_ip6=self._is_ip6, is_add=False)
def query_vpp_config(self):
NotImplemented
class VppIpPuntRedirect(VppObject):
def __init__(self, test, rx_index, tx_index, nh_addr):
self._test = test
self._rx_index = rx_index
self._tx_index = tx_index
self._nh_addr = ip_address(nh_addr)
def encode(self):
return {"rx_sw_if_index": self._rx_index,
"tx_sw_if_index": self._tx_index, "nh": self._nh_addr}
def add_vpp_config(self):
self._test.vapi.ip_punt_redirect(punt=self.encode(), is_add=True)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.ip_punt_redirect(punt=self.encode(), is_add=False)
def get_vpp_config(self):
is_ipv6 = True if self._nh_addr.version == 6 else False
return self._test.vapi.ip_punt_redirect_dump(
sw_if_index=self._rx_index, is_ipv6=is_ipv6)
def query_vpp_config(self):
if self.get_vpp_config():
return True
return False
class VppIpPathMtu(VppObject):
def __init__(self, test, nh, pmtu, table_id=0):
self._test = test
self.nh = nh
self.pmtu = pmtu
self.table_id = table_id
def add_vpp_config(self):
self._test.vapi.ip_path_mtu_update(pmtu={'nh': self.nh,
'table_id': self.table_id,
'path_mtu': self.pmtu})
self._test.registry.register(self, self._test.logger)
return self
def modify(self, pmtu):
self.pmtu = pmtu
self._test.vapi.ip_path_mtu_update(pmtu={'nh': self.nh,
'table_id': self.table_id,
'path_mtu': self.pmtu})
return self
def remove_vpp_config(self):
self._test.vapi.ip_path_mtu_update(pmtu={'nh': self.nh,
'table_id': self.table_id,
'path_mtu': 0})
def query_vpp_config(self):
ds = list(self._test.vapi.vpp.details_iter(
self._test.vapi.ip_path_mtu_get))
for d in ds:
if self.nh == str(d.pmtu.nh) \
and self.table_id == d.pmtu.table_id \
and self.pmtu == d.pmtu.path_mtu:
return True
return False
def object_id(self):
return ("ip-path-mtu-%d-%s-%d" % (self.table_id,
self.nh,
self.pmtu))
def __str__(self):
return self.object_id()
|
import numpy as np
from math import sin, pi, cos
from banti.glyph import Glyph
halfsize = 40
size = 2*halfsize + 1
picture = np.zeros((size, size))
for t in range(-135, 135):
x = round(halfsize + halfsize * cos(pi * t / 180))
y = round(halfsize + halfsize * sin(pi * t / 180))
picture[x][y] = 1
zoomsz = 1 * halfsize
b = Glyph(['O', 0, 0, size, size, 0, 0, 0, 0, None])
b.set_pix(picture)
c = Glyph()
for t in range(0, 360, 15):
x = round(zoomsz + zoomsz * cos(pi * t / 180))
y = round(zoomsz + zoomsz * sin(pi * t / 180))
b.set_xy_wh((x, y, size, size))
c = c + b
print(b)
print(c)
|
from __future__ import absolute_import
import logging
import os.path
import re
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.exceptions import BadCommand, InstallationError
from pip._internal.utils.misc import display_path, hide_url
from pip._internal.utils.subprocess import make_command
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.vcs.versioncontrol import (
RemoteNotFoundError,
VersionControl,
find_path_to_setup_from_repo_root,
vcs,
)
if MYPY_CHECK_RUNNING:
from typing import Optional, Tuple
from pip._internal.utils.misc import HiddenText
from pip._internal.vcs.versioncontrol import AuthInfo, RevOptions
urlsplit = urllib_parse.urlsplit
urlunsplit = urllib_parse.urlunsplit
logger = logging.getLogger(__name__)
HASH_REGEX = re.compile('^[a-fA-F0-9]{40}$')
def looks_like_hash(sha):
return bool(HASH_REGEX.match(sha))
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = (
'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',
)
# Prevent the user's environment variables from interfering with pip:
# https://github.com/pypa/pip/issues/1130
unset_environ = ('GIT_DIR', 'GIT_WORK_TREE')
default_arg_rev = 'HEAD'
@staticmethod
def get_base_rev_args(rev):
return [rev]
def is_immutable_rev_checkout(self, url, dest):
# type: (str, str) -> bool
_, rev_options = self.get_url_rev_options(hide_url(url))
if not rev_options.rev:
return False
if not self.is_commit_id_equal(dest, rev_options.rev):
# the current commit is different from rev,
# which means rev was something else than a commit hash
return False
# return False in the rare case rev is both a commit hash
# and a tag or a branch; we don't want to cache in that case
# because that branch/tag could point to something else in the future
is_tag_or_branch = bool(
self.get_revision_sha(dest, rev_options.rev)[0]
)
return not is_tag_or_branch
def get_git_version(self):
VERSION_PFX = 'git version '
version = self.run_command(
['version'], show_stdout=False, stdout_only=True
)
if version.startswith(VERSION_PFX):
version = version[len(VERSION_PFX):].split()[0]
else:
version = ''
# get first 3 positions of the git version because
# on windows it is x.y.z.windows.t, and this parses as
# LegacyVersion which always smaller than a Version.
version = '.'.join(version.split('.')[:3])
return parse_version(version)
@classmethod
def get_current_branch(cls, location):
"""
Return the current branch, or None if HEAD isn't at a branch
(e.g. detached HEAD).
"""
# git-symbolic-ref exits with empty stdout if "HEAD" is a detached
# HEAD rather than a symbolic ref. In addition, the -q causes the
# command to exit with status code 1 instead of 128 in this case
# and to suppress the message to stderr.
args = ['symbolic-ref', '-q', 'HEAD']
output = cls.run_command(
args,
extra_ok_returncodes=(1, ),
show_stdout=False,
stdout_only=True,
cwd=location,
)
ref = output.strip()
if ref.startswith('refs/heads/'):
return ref[len('refs/heads/'):]
return None
def export(self, location, url):
# type: (str, HiddenText) -> None
"""Export the Git repository at the url to the destination location"""
if not location.endswith('/'):
location = location + '/'
with TempDirectory(kind="export") as temp_dir:
self.unpack(temp_dir.path, url=url)
self.run_command(
['checkout-index', '-a', '-f', '--prefix', location],
show_stdout=False, cwd=temp_dir.path
)
@classmethod
def get_revision_sha(cls, dest, rev):
"""
Return (sha_or_none, is_branch), where sha_or_none is a commit hash
if the revision names a remote branch or tag, otherwise None.
Args:
dest: the repository directory.
rev: the revision name.
"""
# Pass rev to pre-filter the list.
output = cls.run_command(
['show-ref', rev],
cwd=dest,
show_stdout=False,
stdout_only=True,
on_returncode='ignore',
)
refs = {}
for line in output.strip().splitlines():
try:
sha, ref = line.split()
except ValueError:
# Include the offending line to simplify troubleshooting if
# this error ever occurs.
raise ValueError('unexpected show-ref line: {!r}'.format(line))
refs[ref] = sha
branch_ref = 'refs/remotes/origin/{}'.format(rev)
tag_ref = 'refs/tags/{}'.format(rev)
sha = refs.get(branch_ref)
if sha is not None:
return (sha, True)
sha = refs.get(tag_ref)
return (sha, False)
@classmethod
def _should_fetch(cls, dest, rev):
"""
Return true if rev is a ref or is a commit that we don't have locally.
Branches and tags are not considered in this method because they are
assumed to be always available locally (which is a normal outcome of
``git clone`` and ``git fetch --tags``).
"""
if rev.startswith("refs/"):
# Always fetch remote refs.
return True
if not looks_like_hash(rev):
# Git fetch would fail with abbreviated commits.
return False
if cls.has_commit(dest, rev):
# Don't fetch if we have the commit locally.
return False
return True
@classmethod
def resolve_revision(cls, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> RevOptions
"""
Resolve a revision to a new RevOptions object with the SHA1 of the
branch, tag, or ref if found.
Args:
rev_options: a RevOptions object.
"""
rev = rev_options.arg_rev
# The arg_rev property's implementation for Git ensures that the
# rev return value is always non-None.
assert rev is not None
sha, is_branch = cls.get_revision_sha(dest, rev)
if sha is not None:
rev_options = rev_options.make_new(sha)
rev_options.branch_name = rev if is_branch else None
return rev_options
# Do not show a warning for the common case of something that has
# the form of a Git commit hash.
if not looks_like_hash(rev):
logger.warning(
"Did not find branch or tag '%s', assuming revision or ref.",
rev,
)
if not cls._should_fetch(dest, rev):
return rev_options
# fetch the requested revision
cls.run_command(
make_command('fetch', '-q', url, rev_options.to_args()),
cwd=dest,
)
# Change the revision to the SHA of the ref we fetched
sha = cls.get_revision(dest, rev='FETCH_HEAD')
rev_options = rev_options.make_new(sha)
return rev_options
@classmethod
def is_commit_id_equal(cls, dest, name):
"""
Return whether the current commit hash equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
if not name:
# Then avoid an unnecessary subprocess call.
return False
return cls.get_revision(dest) == name
def fetch_new(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
rev_display = rev_options.to_display()
logger.info('Cloning %s%s to %s', url, rev_display, display_path(dest))
self.run_command(make_command('clone', '-q', url, dest))
if rev_options.rev:
# Then a specific revision was requested.
rev_options = self.resolve_revision(dest, url, rev_options)
branch_name = getattr(rev_options, 'branch_name', None)
if branch_name is None:
# Only do a checkout if the current commit id doesn't match
# the requested revision.
if not self.is_commit_id_equal(dest, rev_options.rev):
cmd_args = make_command(
'checkout', '-q', rev_options.to_args(),
)
self.run_command(cmd_args, cwd=dest)
elif self.get_current_branch(dest) != branch_name:
# Then a specific branch was requested, and that branch
# is not yet checked out.
track_branch = 'origin/{}'.format(branch_name)
cmd_args = [
'checkout', '-b', branch_name, '--track', track_branch,
]
self.run_command(cmd_args, cwd=dest)
#: repo may contain submodules
self.update_submodules(dest)
def switch(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
self.run_command(
make_command('config', 'remote.origin.url', url),
cwd=dest,
)
cmd_args = make_command('checkout', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
self.update_submodules(dest)
def update(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
# First fetch changes from the default remote
if self.get_git_version() >= parse_version('1.9.0'):
# fetch tags in addition to everything else
self.run_command(['fetch', '-q', '--tags'], cwd=dest)
else:
self.run_command(['fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maybe even origin/master)
rev_options = self.resolve_revision(dest, url, rev_options)
cmd_args = make_command('reset', '--hard', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
#: update submodules
self.update_submodules(dest)
@classmethod
def get_remote_url(cls, location):
"""
Return URL of the first remote encountered.
Raises RemoteNotFoundError if the repository does not have a remote
url configured.
"""
# We need to pass 1 for extra_ok_returncodes since the command
# exits with return code 1 if there are no matching lines.
stdout = cls.run_command(
['config', '--get-regexp', r'remote\..*\.url'],
extra_ok_returncodes=(1, ),
show_stdout=False,
stdout_only=True,
cwd=location,
)
remotes = stdout.splitlines()
try:
found_remote = remotes[0]
except IndexError:
raise RemoteNotFoundError
for remote in remotes:
if remote.startswith('remote.origin.url '):
found_remote = remote
break
url = found_remote.split(' ')[1]
return url.strip()
@classmethod
def has_commit(cls, location, rev):
"""
Check if rev is a commit that is available in the local repository.
"""
try:
cls.run_command(
['rev-parse', '-q', '--verify', "sha^" + rev],
cwd=location,
log_failed_cmd=False,
)
except InstallationError:
return False
else:
return True
@classmethod
def get_revision(cls, location, rev=None):
if rev is None:
rev = 'HEAD'
current_rev = cls.run_command(
['rev-parse', rev],
show_stdout=False,
stdout_only=True,
cwd=location,
)
return current_rev.strip()
@classmethod
def get_subdirectory(cls, location):
"""
Return the path to setup.py, relative to the repo root.
Return None if setup.py is in the repo root.
"""
# find the repo root
git_dir = cls.run_command(
['rev-parse', '--git-dir'],
show_stdout=False,
stdout_only=True,
cwd=location,
).strip()
if not os.path.isabs(git_dir):
git_dir = os.path.join(location, git_dir)
repo_root = os.path.abspath(os.path.join(git_dir, '..'))
return find_path_to_setup_from_repo_root(location, repo_root)
@classmethod
def get_url_rev_and_auth(cls, url):
# type: (str) -> Tuple[str, Optional[str], AuthInfo]
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes don't
work with a ssh:// scheme (e.g. GitHub). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
# Works around an apparent Git bug
# (see https://article.gmane.org/gmane.comp.version-control.git/146500)
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = (
initial_slashes +
urllib_request.url2pathname(path)
.replace('\\', '/').lstrip('/')
)
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit(
(scheme[after_plus:], netloc, newpath, query, fragment),
)
if '://' not in url:
assert 'file:' not in url
url = url.replace('git+', 'git+ssh://')
url, rev, user_pass = super(Git, cls).get_url_rev_and_auth(url)
url = url.replace('ssh://', '')
else:
url, rev, user_pass = super(Git, cls).get_url_rev_and_auth(url)
return url, rev, user_pass
@classmethod
def update_submodules(cls, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
cls.run_command(
['submodule', 'update', '--init', '--recursive', '-q'],
cwd=location,
)
@classmethod
def get_repository_root(cls, location):
loc = super(Git, cls).get_repository_root(location)
if loc:
return loc
try:
r = cls.run_command(
['rev-parse', '--show-toplevel'],
cwd=location,
show_stdout=False,
stdout_only=True,
on_returncode='raise',
log_failed_cmd=False,
)
except BadCommand:
logger.debug("could not determine if %s is under git control "
"because git is not available", location)
return None
except InstallationError:
return None
return os.path.normpath(r.rstrip('\r\n'))
vcs.register(Git)
|
import os
import re
from migrate.changeset import ansisql
from migrate.changeset.databases import sqlite
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from sqlalchemy.schema import UniqueConstraint
from essential.db import exception
from essential.gettextutils import _
def _get_unique_constraints(self, table):
"""Retrieve information about existing unique constraints of the table
This feature is needed for _recreate_table() to work properly.
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
"""
data = table.metadata.bind.execute(
"""SELECT sql
FROM sqlite_master
WHERE
type='table' AND
name=:table_name""",
table_name=table.name
).fetchone()[0]
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
return [
UniqueConstraint(
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
name=name
)
for name, cols in re.findall(UNIQUE_PATTERN, data)
]
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
"""Recreate the table properly
Unlike the corresponding original method of sqlalchemy-migrate this one
doesn't drop existing unique constraints when creating a new one.
"""
table_name = self.preparer.format_table(table)
# we remove all indexes so as not to have
# problems during copy and re-create
for index in table.indexes:
index.drop()
# reflect existing unique constraints
for uc in self._get_unique_constraints(table):
table.append_constraint(uc)
# omit given unique constraints when creating a new table if required
table.constraints = set([
cons for cons in table.constraints
if omit_uniques is None or cons.name not in omit_uniques
])
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
self.execute()
insertion_string = self._modify_table(table, column, delta)
table.create(bind=self.connection)
self.append(insertion_string % {'table_name': table_name})
self.execute()
self.append('DROP TABLE migration_tmp')
self.execute()
def _visit_migrate_unique_constraint(self, *p, **k):
"""Drop the given unique constraint
The corresponding original method of sqlalchemy-migrate just
raises NotImplemented error
"""
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
def patch_migrate():
"""A workaround for SQLite's inability to alter things
SQLite abilities to alter tables are very limited (please read
http://www.sqlite.org/lang_altertable.html for more details).
E. g. one can't drop a column or a constraint in SQLite. The
workaround for this is to recreate the original table omitting
the corresponding constraint (or column).
sqlalchemy-migrate library has recreate_table() method that
implements this workaround, but it does it wrong:
- information about unique constraints of a table
is not retrieved. So if you have a table with one
unique constraint and a migration adding another one
you will end up with a table that has only the
latter unique constraint, and the former will be lost
- dropping of unique constraints is not supported at all
The proper way to fix this is to provide a pull-request to
sqlalchemy-migrate, but the project seems to be dead. So we
can go on with monkey-patching of the lib at least for now.
"""
# this patch is needed to ensure that recreate_table() doesn't drop
# existing unique constraints of the table when creating a new one
helper_cls = sqlite.SQLiteHelper
helper_cls.recreate_table = _recreate_table
helper_cls._get_unique_constraints = _get_unique_constraints
# this patch is needed to be able to drop existing unique constraints
constraint_cls = sqlite.SQLiteConstraintDropper
constraint_cls.visit_migrate_unique_constraint = \
_visit_migrate_unique_constraint
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
sqlite.SQLiteConstraintGenerator)
def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True):
"""Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
available version.
:param init_version: Initial database version
:param sanity_check: Require schema sanity checking for all tables
"""
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.DbMigrationError(
message=_("version should be an integer"))
current_version = db_version(engine, abs_path, init_version)
repository = _find_migrate_repo(abs_path)
if sanity_check:
_db_schema_sanity_check(engine)
if version is None or version > current_version:
return versioning_api.upgrade(engine, repository, version)
else:
return versioning_api.downgrade(engine, repository,
version)
def _db_schema_sanity_check(engine):
"""Ensure all database tables were created with required parameters.
:param engine: SQLAlchemy engine instance for a given database
"""
if engine.name == 'mysql':
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
'from information_schema.TABLES '
'where TABLE_SCHEMA=%s and '
'TABLE_COLLATION NOT LIKE "%%utf8%%"')
table_names = [res[0] for res in engine.execute(onlyutf8_sql,
engine.url.database)]
if len(table_names) > 0:
raise ValueError(_('Tables "%s" have non utf8 collation, '
'please make sure all tables are CHARSET=utf8'
) % ','.join(table_names))
def db_version(engine, abs_path, init_version):
"""Show the current version of the repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
try:
return versioning_api.db_version(engine, repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0 or 'alembic_version' in tables:
db_version_control(engine, abs_path, version=init_version)
return versioning_api.db_version(engine, repository)
else:
raise exception.DbMigrationError(
message=_(
"The database is not under version control, but has "
"tables. Please stamp the current version of the schema "
"manually."))
def db_version_control(engine, abs_path, version=None):
"""Mark a database as under this repository's version control.
Once a database is under version control, schema changes should
only be done via change scripts in this repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
versioning_api.version_control(engine, repository, version)
return version
def _find_migrate_repo(abs_path):
"""Get the project's change script repository
:param abs_path: Absolute path to migrate repository
"""
if not os.path.exists(abs_path):
raise exception.DbMigrationError("Path %s not found" % abs_path)
return Repository(abs_path)
|
"""WebElement implementation."""
import hashlib
import os
import zipfile
try:
from StringIO import StringIO as IOStream
except ImportError: # 3+
from io import BytesIO as IOStream
import base64
from .command import Command
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidSelectorException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
try:
str = basestring
except NameError:
pass
class WebElement(object):
"""Represents an HTML element.
Generally, all interesting operations to do with interacting with a page
will be performed through this interface."""
def __init__(self, parent, id_):
self._parent = parent
self._id = id_
@property
def tag_name(self):
"""Gets this element's tagName property."""
return self._execute(Command.GET_ELEMENT_TAG_NAME)['value']
@property
def text(self):
"""Gets the text of the element."""
return self._execute(Command.GET_ELEMENT_TEXT)['value']
def click(self):
"""Clicks the element."""
self._execute(Command.CLICK_ELEMENT)
def submit(self):
"""Submits a form."""
self._execute(Command.SUBMIT_ELEMENT)
def clear(self):
"""Clears the text if it's a text entry element."""
self._execute(Command.CLEAR_ELEMENT)
def get_attribute(self, name):
"""Gets the attribute value.
:Args:
- name - name of the attribute property to retieve.
Example::
# Check if the 'active' css class is applied to an element.
is_active = "active" in target_element.get_attribute("class")
"""
resp = self._execute(Command.GET_ELEMENT_ATTRIBUTE, {'name': name})
attributeValue = ''
if resp['value'] is None:
attributeValue = None
else:
attributeValue = resp['value']
if name != 'value' and attributeValue.lower() in ('true', 'false'):
attributeValue = attributeValue.lower()
return attributeValue
def is_selected(self):
"""Whether the element is selected.
Can be used to check if a checkbox or radio button is selected.
"""
return self._execute(Command.IS_ELEMENT_SELECTED)['value']
def is_enabled(self):
"""Whether the element is enabled."""
return self._execute(Command.IS_ELEMENT_ENABLED)['value']
def find_element_by_id(self, id_):
"""Finds element within the child elements of this element.
:Args:
- id_ - ID of child element to locate.
"""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""Finds a list of elements within the children of this element
with the matching ID.
:Args:
- id_ - Id of child element to find.
"""
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
"""Find element with in this element's children by name.
:Args:
- name - name property of the element to find.
"""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
"""Finds a list of elements with in this element's children by name.
:Args:
- name - name property to search for.
"""
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
"""Finds element with in this element's children by visible link text.
:Args:
- link_text - Link text string to search for.
"""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
"""Finds a list of elements with in this element's children by visible link text.
:Args:
- link_text - Link text string to search for.
"""
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
"""Finds element with in this element's children by parial visible link text.
:Args:
- link_text - Link text string to search for.
"""
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
"""Finds a list of elements with in this element's children by link text.
:Args:
- link_text - Link text string to search for.
"""
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
"""Finds element with in this element's children by tag name.
:Args:
- name - name of html tag (eg: h1, a, span)
"""
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
"""Finds a list of elements with in this element's children by tag name.
:Args:
- name - name of html tag (eg: h1, a, span)
"""
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
"""Finds element by xpath.
:Args:
xpath - xpath of element to locate. "//input[@class='myelement']"
Note: The base path will be relative to this element's location.
This will select the first link under this element.::
myelement.find_elements_by_xpath(".//a")
However, this will select the first link on the page.
myelement.find_elements_by_xpath("//a")
"""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""Finds elements within the elements by xpath.
:Args:
- xpath - xpath locator string.
Note: The base path will be relative to this element's location.
This will select all links under this element.::
myelement.find_elements_by_xpath(".//a")
However, this will select all links in the page itself.
myelement.find_elements_by_xpath("//a")
"""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
"""Finds an element within this element's children by their class name.
:Args:
- name - class name to search on.
"""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""Finds a list of elements within children of this element by their class name.
:Args:
- name - class name to search on.
"""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""Find and return an element that's a child of this element by CSS selector.
:Args:
- css_selector - CSS selctor string, ex: 'a.nav#home'
"""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""Find and return list of multiple elements within the children of this
element by CSS selector.
:Args:
- css_selector - CSS selctor string, ex: 'a.nav#home'
"""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def send_keys(self, *value):
"""Simulates typing into the element.
:Args:
- value - A string for typing, or setting form fields. For setting
file inputs, this could be a local file path.
Use this to send simple key events or to fill out form fields::
form_textfield = driver.find_element_by_name('username')
form_textfield.send_keys("admin")
This can also be used to set file inputs.::
file_input = driver.find_element_by_name('profilePic')
file_input.send_keys("path/to/profilepic.gif")
# Generally it's better to wrap the file path in one of the methods
# in os.path to return the actual path to support cross OS testing.
# file_input.send_keys(os.path.abspath("path/to/profilepic.gif"))
"""
# transfer file to another machine only if remote driver is used
# the same behaviour as for java binding
if self.parent._is_remote:
local_file = LocalFileDetector.is_local_file(*value)
if local_file is not None:
value = self._upload(local_file)
typing = []
for val in value:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = val.__str__()
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
self._execute(Command.SEND_KEYS_TO_ELEMENT, {'value': typing})
# RenderedWebElement Items
def is_displayed(self):
"""Whether the element would be visible to a user
"""
return self._execute(Command.IS_ELEMENT_DISPLAYED)['value']
@property
def location_once_scrolled_into_view(self):
"""CONSIDERED LIABLE TO CHANGE WITHOUT WARNING. Use this to discover where on the screen an
element is so that we can click it. This method should cause the element to be scrolled
into view.
Returns the top lefthand corner location on the screen, or None if the element is not visible"""
return self._execute(Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW)['value']
@property
def size(self):
""" Returns the size of the element """
size = self._execute(Command.GET_ELEMENT_SIZE)['value']
new_size = {}
new_size["height"] = size["height"]
new_size["width"] = size["width"]
return new_size
def value_of_css_property(self, property_name):
""" Returns the value of a CSS property """
return self._execute(Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY,
{'propertyName': property_name})['value']
@property
def location(self):
""" Returns the location of the element in the renderable canvas"""
old_loc = self._execute(Command.GET_ELEMENT_LOCATION)['value']
new_loc = {"x": old_loc['x'],
"y": old_loc['y']}
return new_loc
@property
def rect(self):
""" Returns a dictionary with the size and location of the element"""
return self._execute(Command.GET_ELEMENT_RECT)['value']
@property
def parent(self):
""" Returns parent element is available. """
return self._parent
@property
def id(self):
""" Returns internal id used by selenium.
This is mainly for internal use. Simple use cases such as checking if 2 webelements
refer to the same element, can be done using '=='::
if element1 == element2:
print("These 2 are equal")
"""
return self._id
def __eq__(self, element):
if self._id == element.id:
return True
else:
return self._execute(Command.ELEMENT_EQUALS, {'other': element.id})['value']
# Private Methods
def _execute(self, command, params=None):
"""Executes a command against the underlying HTML element.
Args:
command: The name of the command to _execute as a string.
params: A dictionary of named parameters to send with the command.
Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {}
params['id'] = self._id
return self._parent.execute(command, params)
def find_element(self, by=By.ID, value=None):
if not By.is_valid(by) or not isinstance(value, str):
raise InvalidSelectorException("Invalid locator values passed in")
return self._execute(Command.FIND_CHILD_ELEMENT,
{"using": by, "value": value})['value']
def find_elements(self, by=By.ID, value=None):
if not By.is_valid(by) or not isinstance(value, str):
raise InvalidSelectorException("Invalid locator values passed in")
return self._execute(Command.FIND_CHILD_ELEMENTS,
{"using": by, "value": value})['value']
def __hash__(self):
return int(hashlib.md5(self._id.encode('utf-8')).hexdigest(), 16)
def _upload(self, filename):
fp = IOStream()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
zipped.write(filename, os.path.split(filename)[1])
zipped.close()
content = base64.encodestring(fp.getvalue())
if not isinstance(content, str):
content = content.decode('utf-8')
try:
return self._execute(Command.UPLOAD_FILE,
{'file': content})['value']
except WebDriverException as e:
if "Unrecognized command: POST" in e.__str__():
return filename
elif "Command not found: POST " in e.__str__():
return filename
elif '{"status":405,"value":["GET","HEAD","DELETE"]}' in e.__str__():
return filename
else:
raise e
class LocalFileDetector(object):
@classmethod
def is_local_file(cls, *keys):
file_path = ''
typing = []
for val in keys:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = val.__str__()
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
file_path = ''.join(typing)
if file_path is '':
return None
try:
if os.path.isfile(file_path):
return file_path
except:
pass
return None
|
"""This component provides support to the Ring Door Bell camera."""
import asyncio
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import ATTR_ATTRIBUTION, CONF_SCAN_INTERVAL
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
from homeassistant.util import dt as dt_util
from . import ATTRIBUTION, DATA_RING, NOTIFICATION_ID
CONF_FFMPEG_ARGUMENTS = 'ffmpeg_arguments'
FORCE_REFRESH_INTERVAL = timedelta(minutes=45)
_LOGGER = logging.getLogger(__name__)
NOTIFICATION_TITLE = 'Ring Camera Setup'
SCAN_INTERVAL = timedelta(seconds=90)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_FFMPEG_ARGUMENTS): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL): cv.time_period,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a Ring Door Bell and StickUp Camera."""
ring = hass.data[DATA_RING]
cams = []
cams_no_plan = []
for camera in ring.doorbells:
if camera.has_subscription:
cams.append(RingCam(hass, camera, config))
else:
cams_no_plan.append(camera)
for camera in ring.stickup_cams:
if camera.has_subscription:
cams.append(RingCam(hass, camera, config))
else:
cams_no_plan.append(camera)
# show notification for all cameras without an active subscription
if cams_no_plan:
cameras = str(', '.join([camera.name for camera in cams_no_plan]))
err_msg = '''A Ring Protect Plan is required for the''' \
''' following cameras: {}.'''.format(cameras)
_LOGGER.error(err_msg)
hass.components.persistent_notification.create(
'Error: {}<br />'
'You will need to restart hass after fixing.'
''.format(err_msg),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID)
add_entities(cams, True)
return True
class RingCam(Camera):
"""An implementation of a Ring Door Bell camera."""
def __init__(self, hass, camera, device_info):
"""Initialize a Ring Door Bell camera."""
super(RingCam, self).__init__()
self._camera = camera
self._hass = hass
self._name = self._camera.name
self._ffmpeg = hass.data[DATA_FFMPEG]
self._ffmpeg_arguments = device_info.get(CONF_FFMPEG_ARGUMENTS)
self._last_video_id = self._camera.last_recording_id
self._video_url = self._camera.recording_url(self._last_video_id)
self._utcnow = dt_util.utcnow()
self._expires_at = FORCE_REFRESH_INTERVAL + self._utcnow
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._camera.id
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
'device_id': self._camera.id,
'firmware': self._camera.firmware,
'kind': self._camera.kind,
'timezone': self._camera.timezone,
'type': self._camera.family,
'video_url': self._video_url,
}
async def async_camera_image(self):
"""Return a still image response from the camera."""
from haffmpeg.tools import ImageFrame, IMAGE_JPEG
ffmpeg = ImageFrame(self._ffmpeg.binary, loop=self.hass.loop)
if self._video_url is None:
return
image = await asyncio.shield(ffmpeg.get_image(
self._video_url, output_format=IMAGE_JPEG,
extra_cmd=self._ffmpeg_arguments))
return image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
from haffmpeg.camera import CameraMjpeg
if self._video_url is None:
return
stream = CameraMjpeg(self._ffmpeg.binary, loop=self.hass.loop)
await stream.open_camera(
self._video_url, extra_cmd=self._ffmpeg_arguments)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass, request, stream_reader,
self._ffmpeg.ffmpeg_stream_content_type)
finally:
await stream.close()
@property
def should_poll(self):
"""Update the image periodically."""
return True
def update(self):
"""Update camera entity and refresh attributes."""
_LOGGER.debug("Checking if Ring DoorBell needs to refresh video_url")
self._camera.update()
self._utcnow = dt_util.utcnow()
try:
last_event = self._camera.history(limit=1)[0]
except (IndexError, TypeError):
return
last_recording_id = last_event['id']
video_status = last_event['recording']['status']
if video_status == 'ready' and \
(self._last_video_id != last_recording_id or
self._utcnow >= self._expires_at):
video_url = self._camera.recording_url(last_recording_id)
if video_url:
_LOGGER.info("Ring DoorBell properties refreshed")
# update attributes if new video or if URL has expired
self._last_video_id = last_recording_id
self._video_url = video_url
self._expires_at = FORCE_REFRESH_INTERVAL + self._utcnow
|
from . import parser
class Rule(parser.Parser):
def __init__(self, expected_attr_type=None):
self.__expected_attr_type = expected_attr_type
@property
def attr_type(self):
if self.__expected_attr_type:
return self.__expected_attr_type
else:
try:
inner_parser = self.__parser
except AttributeError:
raise NotImplementedError
else:
return inner_parser.attr_type
@property
def parser(self):
return self.__parser
@parser.setter
def parser(self, value):
if self.__expected_attr_type and self.__expected_attr_type != value.attr_type:
raise ValueError('Unexpected attribute type')
self.__parser = parser.as_parser(value)
def _parse(self, state, *args, **kwargs):
with state.open_scope(*args, **kwargs):
self.__parser._parse(state)
def __imod__(self, other):
self.parser = other
return self
def __call__(self, *args, **kwargs):
return RuleCall(self, *args, **kwargs)
class RuleCall(parser.Unary):
def __init__(self, rule, *args, **kwargs):
if not isinstance(rule, Rule):
raise TypeError('Expected rule to be type Rule, was {}'.format(type(rule).__name__))
super(RuleCall, self).__init__(rule)
self.__args = args
self.__kwargs = kwargs
@property
def args(self):
return self.__args
@property
def kwargs(self):
return dict(self.__kwargs)
def _parse(self, state):
args = [state.invoke(a) for a in self.__args]
kwargs = {k: state.invoke(v) for k, v in self.__kwargs.items()}
self.parser._parse(state, *args, **kwargs)
|
import os
import sys
import argparse
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestermisc.selfcheck import harvesterPackageInfo
def main():
oparser = argparse.ArgumentParser(prog='prescript', add_help=True)
oparser.add_argument('-f', '--local_info_file', action='store', dest='local_info_file', help='path of harvester local info file')
if len(sys.argv) == 1:
print('No argument or flag specified. Did nothing')
sys.exit(0)
args = oparser.parse_args(sys.argv[1:])
local_info_file = os.path.normpath(args.local_info_file)
hpi = harvesterPackageInfo(local_info_file=local_info_file)
if hpi.package_changed:
print('Harvester package changed')
#TODO
pass
hpi.renew_local_info()
else:
print('Harvester package unchanged. Skipped')
if __name__ == '__main__':
main()
|
import tempfile
from git import InvalidGitRepositoryError
try:
from unittest2 import TestCase
from mock import patch, Mock
except ImportError:
from unittest import TestCase
from mock import patch, Mock
import textwrap
from datetime import datetime
from botocore.exceptions import ClientError
from dateutil.tz import tzutc
from cfn_sphere import util, CloudFormationStack
from cfn_sphere.exceptions import CfnSphereException, CfnSphereBotoError
from cfn_sphere.template import CloudFormationTemplate
class UtilTests(TestCase):
def test_convert_yaml_to_json_string_returns_valid_json_string(self):
data = textwrap.dedent("""
foo:
foo: baa
""")
self.assertEqual('{\n "foo": {\n "foo": "baa"\n }\n}', util.convert_yaml_to_json_string(data))
def test_convert_yaml_to_json_string_returns_valid_json_string_on_empty_string_input(self):
data = ""
self.assertEqual('{}', util.convert_yaml_to_json_string(data))
def test_convert_json_to_yaml_string_returns_valid_yaml_string(self):
data = textwrap.dedent("""
{
"foo": {
"foo": "baa"
}
}
""")
self.assertEqual('foo:\n foo: baa\n', util.convert_json_to_yaml_string(data))
def test_convert_json_to_yaml_string_returns_empty_string_on_empty_json_input(self):
data = {}
self.assertEqual('', util.convert_json_to_yaml_string(data))
@patch("cfn_sphere.util.urllib2.urlopen")
def test_get_cfn_api_server_time_returns_gmt_datetime(self, urlopen_mock):
urlopen_mock.return_value.info.return_value.get.return_value = "Mon, 21 Sep 2015 17:17:26 GMT"
expected_timestamp = datetime(year=2015, month=9, day=21, hour=17, minute=17, second=26, tzinfo=tzutc())
self.assertEqual(expected_timestamp, util.get_cfn_api_server_time())
@patch("cfn_sphere.util.urllib2.urlopen")
def test_get_cfn_api_server_time_raises_exception_on_empty_date_header(self, urlopen_mock):
urlopen_mock.return_value.info.return_value.get.return_value = ""
with self.assertRaises(CfnSphereException):
util.get_cfn_api_server_time()
def test_with_boto_retry_retries_method_call_for_throttling_exception(self):
count_func = Mock()
@util.with_boto_retry(max_retries=1, pause_time_multiplier=1)
def my_retried_method(count_func):
count_func()
exception = CfnSphereBotoError(
ClientError(error_response={"Error": {"Code": "Throttling", "Message": "Rate exceeded"}},
operation_name="DescribeStacks"))
raise exception
with self.assertRaises(CfnSphereBotoError):
my_retried_method(count_func)
self.assertEqual(2, count_func.call_count)
def test_with_boto_retry_does_not_retry_for_simple_exception(self):
count_func = Mock()
@util.with_boto_retry(max_retries=1, pause_time_multiplier=1)
def my_retried_method(count_func):
count_func()
raise Exception
with self.assertRaises(Exception):
my_retried_method(count_func)
self.assertEqual(1, count_func.call_count)
def test_with_boto_retry_does_not_retry_for_another_boto_client_error(self):
count_func = Mock()
@util.with_boto_retry(max_retries=1, pause_time_multiplier=1)
def my_retried_method(count_func):
count_func()
exception = ClientError(error_response={"Error": {"Code": "Another Error", "Message": "Foo"}},
operation_name="DescribeStacks")
raise exception
with self.assertRaises(ClientError):
my_retried_method(count_func)
self.assertEqual(1, count_func.call_count)
def test_with_boto_retry_does_not_retry_without_exception(self):
count_func = Mock()
@util.with_boto_retry(max_retries=1, pause_time_multiplier=1)
def my_retried_method(count_func):
count_func()
return "foo"
self.assertEqual("foo", my_retried_method(count_func))
self.assertEqual(1, count_func.call_count)
def test_get_pretty_parameters_string(self):
template_body = {
'Parameters': {
'myParameter1': {
'Type': 'String',
'NoEcho': True
},
'myParameter2': {
'Type': 'String'
},
'myParameter3': {
'Type': 'Number',
'NoEcho': 'true'
},
'myParameter4': {
'Type': 'Number',
'NoEcho': 'false'
},
'myParameter5': {
'Type': 'Number',
'NoEcho': False
}
}
}
parameters = {
'myParameter1': 'super-secret',
'myParameter2': 'not-that-secret',
'myParameter3': 'also-super-secret',
'myParameter4': 'could-be-public',
'myParameter5': 'also-ok'
}
template = CloudFormationTemplate(template_body, 'just-another-template')
stack = CloudFormationStack(template, parameters, 'just-another-stack', 'eu-west-1')
expected_string = """+--------------+-----------------+
| Parameter | Value |
+--------------+-----------------+
| myParameter1 | *** |
| myParameter2 | not-that-secret |
| myParameter3 | *** |
| myParameter4 | could-be-public |
| myParameter5 | also-ok |
+--------------+-----------------+"""
self.assertEqual(expected_string, util.get_pretty_parameters_string(stack))
def test_get_pretty_stack_outputs_returns_proper_table(self):
outputs = [
{
'OutputKey': 'key1',
'OutputValue': 'value1',
'Description': 'desc1'
}, {
'OutputKey': 'key2',
'OutputValue': 'value2',
'Description': 'desc2'
}, {
'OutputKey': 'key3',
'OutputValue': 'value3',
'Description': 'desc3'
}
]
expected = """+--------+--------+
| Output | Value |
+--------+--------+
| key1 | value1 |
| key2 | value2 |
| key3 | value3 |
+--------+--------+"""
result = util.get_pretty_stack_outputs(outputs)
self.assertEqual(expected, result)
def test_strip_string_strips_string(self):
s = "sfsdklgashgslkadghkafhgaknkbndkjfbnwurtqwhgsdnkshGLSAKGKLDJFHGSKDLGFLDFGKSDFLGKHAsdjdghskjdhsdcxbvwerA323"
result = util.strip_string(s)
self.assertEqual(
"sfsdklgashgslkadghkafhgaknkbndkjfbnwurtqwhgsdnkshGLSAKGKLDJFHGSKDLGFLDFGKSDFLGKHAsdjdghskjdhsdcxbvwe...",
result)
def test_strip_string_doesnt_strip_short_strings(self):
s = "my-short-string"
result = util.strip_string(s)
self.assertEqual("my-short-string...", result)
@patch("cfn_sphere.util.Repo")
def test_get_git_repository_remote_url_returns_none_if_no_repository_present(self, repo_mock):
repo_mock.side_effect = InvalidGitRepositoryError
self.assertEqual(None, util.get_git_repository_remote_url(tempfile.mkdtemp()))
@patch("cfn_sphere.util.Repo")
def test_get_git_repository_remote_url_returns_repo_url(self, repo_mock):
url = "http://config.repo.git"
repo_mock.return_value.remotes.origin.url = url
self.assertEqual(url, util.get_git_repository_remote_url(tempfile.mkdtemp()))
@patch("cfn_sphere.util.Repo")
def test_get_git_repository_remote_url_returns_repo_url_from_parent_dir(self, repo_mock):
url = "http://config.repo.git"
repo_object_mock = Mock()
repo_object_mock.remotes.origin.url = url
repo_mock.side_effect = [InvalidGitRepositoryError, repo_object_mock]
self.assertEqual(url, util.get_git_repository_remote_url(tempfile.mkdtemp()))
def test_get_git_repository_remote_url_returns_none_for_none_working_dir(self):
self.assertEqual(None, util.get_git_repository_remote_url(None))
def test_get_git_repository_remote_url_returns_none_for_empty_string_working_dir(self):
self.assertEqual(None, util.get_git_repository_remote_url(""))
def test_kv_list_to_dict_returns_empty_dict_for_empty_list(self):
result = util.kv_list_to_dict([])
self.assertEqual({}, result)
def test_kv_list_to_dict(self):
result = util.kv_list_to_dict(["k1=v1", "k2=v2"])
self.assertEqual({"k1": "v1", "k2": "v2"}, result)
def test_kv_list_to_dict_raises_exception_on_syntax_error(self):
with self.assertRaises(CfnSphereException):
util.kv_list_to_dict(["k1=v1", "k2:v2"])
|
"""Basic tests for TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ReshapeTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
outputs = []
# Here we test two types of reshapes, one changes the batch dimension and
# the other does not. Note that we're not able to test reshaping to
# scalar, since TRT requires input tensor to be of rank at least 2, so a
# reshape with scalar input will be filtered out of the segment before
# conversion.
#
# These reshapes happen at batch dimension, thus conversion should fail.
for shape in [[2, 50, 24, 24, 2], [-1, 50, 24, 24, 2], [2, 50, -1, 24, 2]]:
incompatible_reshape = array_ops.reshape(inp, shape)
reshape_back = array_ops.reshape(incompatible_reshape, [-1, 24, 24, 2])
outputs.append(self.trt_incompatible_op(reshape_back))
# Add another block with many reshapes that don't change the batch
# dimension.
compatible_reshape = array_ops.reshape(
inp, [-1, 24 * 24, 2], name="reshape-0")
compatible_reshape = array_ops.reshape(
compatible_reshape, [100, 24, -1], name="reshape-1")
compatible_reshape = array_ops.reshape(
compatible_reshape, [100, 24 * 2, 24], name="reshape-2")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 24, 24 * 2], name="reshape-3")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 6, 4, 24, 2], name="reshape-4")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 6, 4, 6, 4, 2, 1], name="reshape-5")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 24, 24, 2], name="reshape-6")
outputs.append(self.trt_incompatible_op(compatible_reshape))
return math_ops.add_n(outputs, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]],
[[100, 24, 24, 2]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["reshape-%d" % i for i in range(7)] +
["reshape-%d/shape" % i for i in range(7)]
}
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
return (not trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.dynamic_engine)
class TransposeTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
# Add a block with compatible transposes.
compatible_transpose = array_ops.transpose(
inp, [0, 3, 1, 2], name="transpose-1")
compatible_transpose = array_ops.transpose(
compatible_transpose, [0, 2, 3, 1], name="transposeback")
# Add an incompatible op so the first block will not be in the same
# subgraph where the following block belongs.
bridge = self.trt_incompatible_op(compatible_transpose)
# Add a block with incompatible transposes.
#
# Note: by default Grappler will run the TRT optimizer twice. At the
# first time it will group the two transpose ops below to same segment
# then fail the conversion due to the expected batch dimension problem.
# At the second time, since the input of bridge op is TRTEngineOp_0, it
# will fail to do shape inference which then cause conversion to fail.
# TODO(laigd): support shape inference, make TRT optimizer run only
# once, and fix this.
incompatible_transpose = array_ops.transpose(
bridge, [2, 1, 0, 3], name="transpose-2")
excluded_transpose = array_ops.transpose(
incompatible_transpose, [0, 2, 3, 1], name="transpose-3")
return array_ops.identity(excluded_transpose, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]],
[[24, 100, 2, 24]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": [
"transpose-1", "transpose-1/perm", "transposeback",
"transposeback/perm"
]
}
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
return (not trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.dynamic_engine)
if __name__ == "__main__":
test.main()
|
from O365 import attachment
import unittest
import json
import base64
from random import randint
att_rep = open('attachment.json','r').read()
att_j = json.loads(att_rep)
class TestAttachment (unittest.TestCase):
def setUp(self):
self.att = attachment.Attachment(att_j['value'][0])
def test_isType(self):
self.assertTrue(self.att.isType('txt'))
def test_getType(self):
self.assertEqual(self.att.getType(),'.txt')
def test_save(self):
name = self.att.json['Name']
name1 = self.newFileName(name)
self.att.json['Name'] = name1
self.assertTrue(self.att.save('/tmp'))
with open('/tmp/'+name1,'r') as ins:
f = ins.read()
self.assertEqual('testing w00t!',f)
name2 = self.newFileName(name)
self.att.json['Name'] = name2
self.assertTrue(self.att.save('/tmp/'))
with open('/tmp/'+name2,'r') as ins:
f = ins.read()
self.assertEqual('testing w00t!',f)
def newFileName(self,val):
for i in range(4):
val = str(randint(0,9)) + val
return val
def test_getByteString(self):
self.assertEqual(self.att.getByteString(),b'testing w00t!')
def test_getBase64(self):
self.assertEqual(self.att.getBase64(),'dGVzdGluZyB3MDB0IQ==\n')
def test_setByteString(self):
test_string = b'testing testie test'
self.att.setByteString(test_string)
enc = base64.encodebytes(test_string)
self.assertEqual(self.att.json['ContentBytes'],enc)
def setBase64(self):
wrong_test_string = 'I am sooooo not base64 encoded.'
right_test_string = 'Base64 <3 all around!'
enc = base64.encodestring(right_test_string)
self.assertRaises(self.att.setBase64(wrong_test_string))
self.assertEqual(self.att.json['ContentBytes'],'dGVzdGluZyB3MDB0IQ==\n')
self.att.setBase64(enc)
self.assertEqual(self.att.json['ContentBytes'],enc)
if __name__ == '__main__':
unittest.main()
|
"""Base model class."""
__author__ = 'Sean Lip'
import feconf
import utils
from core.platform import models
transaction_services = models.Registry.import_transaction_services()
from google.appengine.ext import ndb
class BaseModel(ndb.Model):
"""Base model for all persistent object storage classes."""
# When this entity was first created.
created_on = ndb.DateTimeProperty(auto_now_add=True)
# When this entity was last updated.
last_updated = ndb.DateTimeProperty(auto_now=True)
# Whether the current version of the file is deleted.
deleted = ndb.BooleanProperty(indexed=True, default=False)
@property
def id(self):
"""A unique id for this model instance."""
return self.key.id()
def _pre_put_hook(self):
"""This is run before model instances are saved to the datastore.
Subclasses of BaseModel should override this method.
"""
pass
class EntityNotFoundError(Exception):
"""Raised when no entity for a given id exists in the datastore."""
pass
@classmethod
def get(cls, entity_id, strict=True):
"""Gets an entity by id. Fails noisily if strict == True.
Args:
entity_id: str. The id of the entity.
strict: bool. Whether to fail noisily if no entity with the given id
exists in the datastore.
Returns:
None, if strict == False and no undeleted entity with the given id
exists in the datastore. Otherwise, the entity instance that
corresponds to the given id.
Raises:
- base_models.BaseModel.EntityNotFoundError: if strict == True and
no undeleted entity with the given id exists in the datastore.
"""
entity = cls.get_by_id(entity_id)
if entity and entity.deleted:
entity = None
if strict and entity is None:
raise cls.EntityNotFoundError(
'Entity for class %s with id %s not found' %
(cls.__name__, entity_id))
return entity
def put(self):
super(BaseModel, self).put()
@classmethod
def get_multi(cls, entity_ids):
entity_keys = [ndb.Key(cls, entity_id) for entity_id in entity_ids]
return ndb.get_multi(entity_keys)
@classmethod
def put_multi(cls, entities):
return ndb.put_multi(entities)
def delete(self):
super(BaseModel, self).key.delete()
@classmethod
def get_all(cls, include_deleted_entities=False):
"""Returns a filterable iterable of all entities of this class.
If include_deleted_entities is True then entities that have been marked
deleted are returned as well.
"""
query = cls.query()
if not include_deleted_entities:
query = query.filter(cls.deleted == False)
return query
@classmethod
def get_new_id(cls, entity_name):
"""Gets a new id for an entity, based on its name.
The returned id is guaranteed to be unique among all instances of this
entity.
Args:
entity_name: the name of the entity. Coerced to a utf-8 encoded
string. Defaults to ''.
Returns:
str: a new unique id for this entity class.
Raises:
- Exception: if an id cannot be generated within a reasonable number
of attempts.
"""
try:
entity_name = unicode(entity_name).encode('utf-8')
except Exception:
entity_name = ''
MAX_RETRIES = 10
RAND_RANGE = 127 * 127
ID_LENGTH = 12
for i in range(MAX_RETRIES):
new_id = utils.convert_to_hash(
'%s%s' % (entity_name, utils.get_random_int(RAND_RANGE)),
ID_LENGTH)
if not cls.get_by_id(new_id):
return new_id
raise Exception('New id generator is producing too many collisions.')
class VersionedModel(BaseModel):
"""Model that handles storage of the version history of model instances.
To use this class, you must declare a SNAPSHOT_METADATA_CLASS and a
SNAPSHOT_CONTENT_CLASS. The former must contain the String fields
'committer_id', 'commit_type' and 'commit_message', and a JSON field for
the Python list of dicts, 'commit_cmds'. The latter must contain the JSON
field 'content'. The item that is being versioned must be serializable to a
JSON blob.
Note that commit() should be used for VersionedModels, as opposed to put()
for direct subclasses of BaseModel.
"""
# The class designated as the snapshot model. This should be a subclass of
# BaseSnapshotMetadataModel.
SNAPSHOT_METADATA_CLASS = None
# The class designated as the snapshot content model. This should be a
# subclass of BaseSnapshotContentModel.
SNAPSHOT_CONTENT_CLASS = None
# Whether reverting is allowed. Default is False.
ALLOW_REVERT = False
### IMPORTANT: Subclasses should only overwrite things above this line. ###
# The possible commit types.
_COMMIT_TYPE_CREATE = 'create'
_COMMIT_TYPE_REVERT = 'revert'
_COMMIT_TYPE_EDIT = 'edit'
_COMMIT_TYPE_DELETE = 'delete'
# A list containing the possible commit types.
COMMIT_TYPE_CHOICES = [
_COMMIT_TYPE_CREATE, _COMMIT_TYPE_REVERT, _COMMIT_TYPE_EDIT,
_COMMIT_TYPE_DELETE
]
# The delimiter used to separate the version number from the model instance
# id. To get the instance id from a snapshot id, use Python's rfind()
# method to find the location of this delimiter.
_VERSION_DELIMITER = '-'
# The reserved prefix for keys that are automatically inserted into a
# commit_cmd dict by this model.
_AUTOGENERATED_PREFIX = 'AUTO'
# The current version number of this instance. In each PUT operation,
# this number is incremented and a snapshot of the modified instance is
# stored in the snapshot metadata and content models. The snapshot
# version number starts at 1 when the model instance is first created.
# All data in this instance represents the version at HEAD; data about the
# previous versions is stored in the snapshot models.
version = ndb.IntegerProperty(default=0)
def _require_not_marked_deleted(self):
if self.deleted:
raise Exception('This model instance has been deleted.')
def _compute_snapshot(self):
"""Generates a snapshot (a Python dict) from the model fields."""
return self.to_dict(exclude=['created_on', 'last_updated'])
def _reconstitute(self, snapshot_dict):
"""Makes this instance into a reconstitution of the given snapshot."""
self.populate(**snapshot_dict)
return self
def _reconstitute_from_snapshot_id(self, snapshot_id):
"""Makes this instance into a reconstitution of the given snapshot."""
snapshot_model = self.SNAPSHOT_CONTENT_CLASS.get(snapshot_id)
snapshot_dict = snapshot_model.content
return self._reconstitute(snapshot_dict)
@classmethod
def _get_snapshot_id(cls, instance_id, version_number):
return '%s%s%s' % (
instance_id, cls._VERSION_DELIMITER, version_number)
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
if self.SNAPSHOT_METADATA_CLASS is None:
raise Exception('No snapshot metadata class defined.')
if self.SNAPSHOT_CONTENT_CLASS is None:
raise Exception('No snapshot content class defined.')
if not isinstance(commit_cmds, list):
raise Exception(
'Expected commit_cmds to be a list of dicts, received %s'
% commit_cmds)
for item in commit_cmds:
if not isinstance(item, dict):
raise Exception(
'Expected commit_cmds to be a list of dicts, received %s'
% commit_cmds)
self.version += 1
snapshot = self._compute_snapshot()
snapshot_id = self._get_snapshot_id(self.id, self.version)
snapshot_metadata_instance = self.SNAPSHOT_METADATA_CLASS(
id=snapshot_id, committer_id=committer_id, commit_type=commit_type,
commit_message=commit_message, commit_cmds=commit_cmds)
snapshot_content_instance = self.SNAPSHOT_CONTENT_CLASS(
id=snapshot_id, content=snapshot)
transaction_services.run_in_transaction(
ndb.put_multi,
[snapshot_metadata_instance, snapshot_content_instance, self])
def delete(self, committer_id, commit_message, force_deletion=False):
if force_deletion:
current_version = self.version
version_numbers = [str(num + 1) for num in range(current_version)]
snapshot_ids = [
self._get_snapshot_id(self.id, version_number)
for version_number in version_numbers]
metadata_keys = [
ndb.Key(self.SNAPSHOT_METADATA_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
ndb.delete_multi(metadata_keys)
content_keys = [
ndb.Key(self.SNAPSHOT_CONTENT_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
ndb.delete_multi(content_keys)
super(VersionedModel, self).delete()
else:
self._require_not_marked_deleted()
self.deleted = True
CMD_DELETE = '%s_mark_deleted' % self._AUTOGENERATED_PREFIX
commit_cmds = [{
'cmd': CMD_DELETE
}]
self._trusted_commit(
committer_id, self._COMMIT_TYPE_DELETE, commit_message,
commit_cmds)
def put(self, *args, **kwargs):
"""For VersionedModels, this method is replaced with commit()."""
raise NotImplementedError
def commit(self, committer_id, commit_message, commit_cmds):
"""Saves a version snapshot and updates the model.
commit_cmds should give sufficient information to reconstruct the
commit.
"""
self._require_not_marked_deleted()
for commit_cmd in commit_cmds:
if 'cmd' not in commit_cmd:
raise Exception(
'Invalid commit_cmd: %s. Expected a \'cmd\' key.'
% commit_cmd)
if commit_cmd['cmd'].startswith(self._AUTOGENERATED_PREFIX):
raise Exception(
'Invalid change list command: ' % commit_cmd['cmd'])
commit_type = (
self._COMMIT_TYPE_CREATE if self.version == 0 else
self._COMMIT_TYPE_EDIT)
self._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
def revert(self, committer_id, commit_message, version_number):
self._require_not_marked_deleted()
if not self.ALLOW_REVERT:
raise Exception(
'Reverting of objects of type %s is not allowed.'
% self.__class__.__name__)
CMD_REVERT = '%s_revert_version_number' % self._AUTOGENERATED_PREFIX
commit_cmds = [{
'cmd': CMD_REVERT,
'version_number': version_number
}]
# Do not overwrite the version number.
current_version = self.version
snapshot_id = self._get_snapshot_id(self.id, version_number)
self._reconstitute_from_snapshot_id(snapshot_id)
self.version = current_version
self._trusted_commit(
committer_id, self._COMMIT_TYPE_REVERT, commit_message,
commit_cmds)
@classmethod
def get_version(cls, model_instance_id, version_number):
"""Returns a model instance representing the given version.
The snapshot content is used to populate this model instance. The
snapshot metadata is not used.
"""
cls.get(model_instance_id)._require_not_marked_deleted()
snapshot_id = cls._get_snapshot_id(model_instance_id, version_number)
return cls(id=model_instance_id)._reconstitute_from_snapshot_id(
snapshot_id)
@classmethod
def get(cls, entity_id, strict=True, version=None):
"""Gets an entity by id. Fails noisily if strict == True."""
if version is None:
return super(VersionedModel, cls).get(entity_id, strict=strict)
else:
return cls.get_version(entity_id, version)
@classmethod
def get_snapshots_metadata(cls, model_instance_id, version_numbers):
"""Returns a list of dicts, each representing a model snapshot.
One dict is returned for each version number in the list of version
numbers requested. If any of the version numbers does not exist, an
error is raised.
"""
cls.get(model_instance_id)._require_not_marked_deleted()
snapshot_ids = [
cls._get_snapshot_id(model_instance_id, version_number)
for version_number in version_numbers]
metadata_keys = [
ndb.Key(cls.SNAPSHOT_METADATA_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
returned_models = ndb.get_multi(metadata_keys)
for ind, model in enumerate(returned_models):
if model is None:
raise Exception(
'Invalid version number %s for model %s with id %s'
% (version_numbers[ind], cls.__name__, model_instance_id))
return [{
'committer_id': model.committer_id,
'commit_message': model.commit_message,
'commit_cmds': model.commit_cmds,
'commit_type': model.commit_type,
'version_number': version_numbers[ind],
'created_on': model.created_on.strftime(
feconf.HUMAN_READABLE_DATETIME_FORMAT),
} for (ind, model) in enumerate(returned_models)]
class BaseSnapshotMetadataModel(BaseModel):
"""Base class for snapshot metadata classes.
The id of this model is computed using VersionedModel.get_snapshot_id().
"""
# The id of the user who committed this revision.
committer_id = ndb.StringProperty(required=True)
# The type of the commit associated with this snapshot.
commit_type = ndb.StringProperty(
required=True, choices=VersionedModel.COMMIT_TYPE_CHOICES)
# The commit message associated with this snapshot.
commit_message = ndb.TextProperty(indexed=False)
# A sequence of commands that can be used to describe this commit.
# Represented as a list of dicts.
commit_cmds = ndb.JsonProperty(indexed=False)
class BaseSnapshotContentModel(BaseModel):
"""Base class for snapshot content classes.
The id of this model is computed using VersionedModel.get_snapshot_id().
"""
# The snapshot content, as a JSON blob.
content = ndb.JsonProperty(indexed=False)
|
"""Arm(R) Ethos(TM)-N integration relu tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, dtype, a_min, a_max):
assert a_min >= np.iinfo(dtype).min and a_max <= np.iinfo(dtype).max
a = relay.var("a", shape=shape, dtype=dtype)
relu = relay.clip(a, a_min=a_min, a_max=a_max)
return relu
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_relu(dtype):
trials = [
((1, 4, 4, 4), 65, 178, "uint8"),
((1, 8, 4, 2), 1, 254, "uint8"),
((1, 16), 12, 76, "uint8"),
((1, 4, 4, 4), 65, 125, "int8"),
((1, 8, 4, 2), -100, 100, "int8"),
((1, 16), -120, -20, "int8"),
]
np.random.seed(0)
for shape, a_min, a_max, trial_dtype in trials:
if trial_dtype == dtype:
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
def test_relu_failure():
trials = [
((1, 4, 4, 4, 4), "uint8", 65, 78, "dimensions=5, dimensions must be <= 4"),
((1, 8, 4, 2), "int16", 1, 254, "dtype='int16', dtype must be either uint8, int8 or int32"),
((1, 8, 4, 2), "uint8", 254, 1, "Relu has lower bound > upper bound"),
((2, 2, 2, 2), "uint8", 1, 63, "batch size=2, batch size must = 1; "),
]
for shape, dtype, a_min, a_max, err_msg in trials:
model = _get_model(shape, dtype, a_min, a_max)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
|
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.map_message_type import *
REQUEST_TYPE = MAP_REMOVEENTRYLISTENER
RESPONSE_TYPE = 101
RETRYABLE = True
def calculate_size(name, registration_id):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += calculate_size_str(registration_id)
return data_size
def encode_request(name, registration_id):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, registration_id))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_str(registration_id)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
parameters['response'] = client_message.read_bool()
return parameters
|
"""Tests for checks."""
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.checks import hints
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import config_file as rdf_config_file
from grr.lib.rdfvalues import protodict as rdf_protodict
class HintsTests(test_lib.GRRBaseTest):
"""Test hint operations."""
def testCheckOverlay(self):
"""Overlay(hint1, hint2) should populate hint2 with the values of hint1."""
# Fully populated hint.
full = {
"problem": "Terminator needs trousers.\n",
"fix": "Give me your clothes.\n",
"format": "{mission}, {target}\n",
"summary": "I'll be back."
}
# Partial hint
partial = {
"problem": "Terminator needs to go shopping.",
"fix": "Phased plasma rifle in the 40-watt range.",
"format": "",
"summary": ""
}
# Partial overlaid with full.
overlay = {
"problem": "Terminator needs to go shopping.",
"fix": "Phased plasma rifle in the 40-watt range.",
"format": "{mission}, {target}",
"summary": "I'll be back."
}
# Empty hint.
empty = {"problem": "", "fix": "", "format": "", "summary": ""}
# Empty hint should not clobber populated hint.
starts_full = full.copy()
starts_empty = empty.copy()
hints.Overlay(starts_full, starts_empty)
self.assertDictEqual(full, starts_full)
self.assertDictEqual(empty, starts_empty)
# Populate empty hint from partially populated hint.
starts_partial = partial.copy()
starts_empty = empty.copy()
hints.Overlay(starts_empty, starts_partial)
self.assertDictEqual(partial, starts_partial)
self.assertDictEqual(partial, starts_empty)
# Overlay the full and partial hints to get the hybrid.
starts_full = full.copy()
starts_partial = partial.copy()
hints.Overlay(starts_partial, starts_full)
self.assertDictEqual(full, starts_full)
self.assertDictEqual(overlay, starts_partial)
def testRdfFormatter(self):
"""Hints format RDF values with arbitrary values and attributes."""
# Create a complex RDF value
rdf = rdf_client.ClientSummary()
rdf.system_info.system = "Linux"
rdf.system_info.node = "coreai.skynet.com"
# Users (repeated)
rdf.users = [rdf_client.User(username=u) for u in ("root", "jconnor")]
# Interface (nested, repeated)
addresses = [
rdf_client.NetworkAddress(human_readable=a)
for a in ("1.1.1.1", "2.2.2.2", "3.3.3.3")
]
eth0 = rdf_client.Interface(ifname="eth0", addresses=addresses[:2])
ppp0 = rdf_client.Interface(ifname="ppp0", addresses=addresses[2])
rdf.interfaces = [eth0, ppp0]
template = ("{system_info.system} {users.username} {interfaces.ifname} "
"{interfaces.addresses.human_readable}\n")
hinter = hints.Hinter(template=template)
expected = "Linux root,jconnor eth0,ppp0 1.1.1.1,2.2.2.2,3.3.3.3"
result = hinter.Render(rdf)
self.assertEqual(expected, result)
def testRdfFormatterHandlesKeyValuePair(self):
"""rdfvalue.KeyValue items need special handling to expand k and v."""
key = rdf_protodict.DataBlob().SetValue("skynet")
value = rdf_protodict.DataBlob().SetValue([1997])
rdf = rdf_protodict.KeyValue(k=key, v=value)
template = "{k}: {v}"
hinter = hints.Hinter(template=template)
expected = "skynet: 1997"
result = hinter.Render(rdf)
self.assertEqual(expected, result)
def testRdfFormatterAttributedDict(self):
sshd = rdf_config_file.SshdConfig()
sshd.config = rdf_protodict.AttributedDict(skynet="operational")
template = "{config.skynet}"
hinter = hints.Hinter(template=template)
expected = "operational"
result = hinter.Render(sshd)
self.assertEqual(expected, result)
def testRdfFormatterFanOut(self):
rdf = rdf_protodict.Dict()
user1 = rdf_client.User(username="drexler")
user2 = rdf_client.User(username="joy")
rdf["cataclysm"] = "GreyGoo"
rdf["thinkers"] = [user1, user2]
rdf["reference"] = {
"ecophage": ["bots", ["nanobots", ["picobots"]]],
"doomsday": {
"books": ["cats cradle", "prey"]
}
}
template = ("{cataclysm}; {thinkers.username}; {reference.ecophage}; "
"{reference.doomsday}\n")
hinter = hints.Hinter(template=template)
expected = ("GreyGoo; drexler,joy; bots,nanobots,picobots; "
"books:cats cradle,prey")
result = hinter.Render(rdf)
self.assertEqual(expected, result)
def testStatModeFormat(self):
rdf = rdf_client.StatEntry(st_mode=33204)
expected = "-rw-rw-r--"
template = "{st_mode}"
hinter = hints.Hinter(template=template)
result = hinter.Render(rdf)
self.assertEqual(expected, result)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
import os
import pexpect
import pytest
import shlex
import shutil
import socket
import signal
from impala_shell_results import get_shell_cmd_result, cancellation_helper
from subprocess import Popen, PIPE
from tests.common.impala_service import ImpaladService
from tests.verifiers.metric_verifier import MetricVerifier
from time import sleep
SHELL_CMD = "%s/bin/impala-shell.sh" % os.environ['IMPALA_HOME']
SHELL_HISTORY_FILE = os.path.expanduser("~/.impalahistory")
TMP_HISTORY_FILE = os.path.expanduser("~/.impalahistorytmp")
class TestImpalaShellInteractive(object):
"""Test the impala shell interactively"""
def _send_cmd_to_shell(self, p, cmd):
"""Given an open shell process, write a cmd to stdin
This method takes care of adding the delimiter and EOL, callers should send the raw
command.
"""
p.stdin.write("%s;\n" % cmd)
p.stdin.flush()
def _start_new_shell_process(self, args=None):
"""Starts a shell process and returns the process handle"""
cmd = "%s %s" % (SHELL_CMD, args) if args else SHELL_CMD
return Popen(shlex.split(SHELL_CMD), shell=True, stdout=PIPE,
stdin=PIPE, stderr=PIPE)
@classmethod
def setup_class(cls):
if os.path.exists(SHELL_HISTORY_FILE):
shutil.move(SHELL_HISTORY_FILE, TMP_HISTORY_FILE)
@classmethod
def teardown_class(cls):
if os.path.exists(TMP_HISTORY_FILE): shutil.move(TMP_HISTORY_FILE, SHELL_HISTORY_FILE)
@pytest.mark.execute_serially
def test_escaped_quotes(self):
"""Test escaping quotes"""
# test escaped quotes outside of quotes
result = run_impala_shell_interactive("select \\'bc';")
assert "could not match input" in result.stderr
result = run_impala_shell_interactive("select \\\"bc\";")
assert "could not match input" in result.stderr
# test escaped quotes within quotes
result = run_impala_shell_interactive("select 'ab\\'c';")
assert "Fetched 1 row(s)" in result.stderr
result = run_impala_shell_interactive("select \"ab\\\"c\";")
assert "Fetched 1 row(s)" in result.stderr
@pytest.mark.execute_serially
def test_cancellation(self):
impalad = ImpaladService(socket.getfqdn())
impalad.wait_for_num_in_flight_queries(0)
command = "select sleep(10000);"
p = self._start_new_shell_process()
self._send_cmd_to_shell(p, command)
sleep(1)
# iterate through all processes with psutil
shell_pid = cancellation_helper()
sleep(2)
os.kill(shell_pid, signal.SIGINT)
result = get_shell_cmd_result(p)
assert impalad.wait_for_num_in_flight_queries(0)
@pytest.mark.execute_serially
def test_unicode_input(self):
"Test queries containing non-ascii input"
# test a unicode query spanning multiple lines
unicode_text = u'\ufffd'
args = "select '%s'\n;" % unicode_text.encode('utf-8')
result = run_impala_shell_interactive(args)
assert "Fetched 1 row(s)" in result.stderr
@pytest.mark.execute_serially
def test_welcome_string(self):
"""Test that the shell's welcome message is only printed once
when the shell is started. Ensure it is not reprinted on errors.
Regression test for IMPALA-1153
"""
result = run_impala_shell_interactive('asdf;')
assert result.stdout.count("Welcome to the Impala shell") == 1
result = run_impala_shell_interactive('select * from non_existent_table;')
assert result.stdout.count("Welcome to the Impala shell") == 1
@pytest.mark.execute_serially
def test_bash_cmd_timing(self):
"""Test existence of time output in bash commands run from shell"""
args = "! ls;"
result = run_impala_shell_interactive(args)
assert "Executed in" in result.stderr
@pytest.mark.execute_serially
def test_reconnect(self):
"""Regression Test for IMPALA-1235
Verifies that a connect command by the user is honoured.
"""
def get_num_open_sessions(impala_service):
"""Helper method to retrieve the number of open sessions"""
return impala_service.get_metric_value('impala-server.num-open-beeswax-sessions')
hostname = socket.getfqdn()
initial_impala_service = ImpaladService(hostname)
target_impala_service = ImpaladService(hostname, webserver_port=25001,
beeswax_port=21001, be_port=22001)
# Get the initial state for the number of sessions.
num_sessions_initial = get_num_open_sessions(initial_impala_service)
num_sessions_target = get_num_open_sessions(target_impala_service)
# Connect to localhost:21000 (default)
p = self._start_new_shell_process()
sleep(2)
# Make sure we're connected <hostname>:21000
assert get_num_open_sessions(initial_impala_service) == num_sessions_initial + 1, \
"Not connected to %s:21000" % hostname
self._send_cmd_to_shell(p, "connect %s:21001" % hostname)
# Wait for a little while
sleep(2)
# The number of sessions on the target impalad should have been incremented.
assert get_num_open_sessions(target_impala_service) == num_sessions_target + 1, \
"Not connected to %s:21001" % hostname
# The number of sessions on the initial impalad should have been decremented.
assert get_num_open_sessions(initial_impala_service) == num_sessions_initial, \
"Connection to %s:21000 should have been closed" % hostname
@pytest.mark.execute_serially
def test_ddl_queries_are_closed(self):
"""Regression test for IMPALA-1317
The shell does not call close() for alter, use and drop queries, leaving them in
flight. This test issues those queries in interactive mode, and checks the debug
webpage to confirm that they've been closed.
TODO: Add every statement type.
"""
TMP_DB = 'inflight_test_db'
TMP_TBL = 'tmp_tbl'
MSG = '%s query should be closed'
NUM_QUERIES = 'impala-server.num-queries'
impalad = ImpaladService(socket.getfqdn())
p = self._start_new_shell_process()
try:
start_num_queries = impalad.get_metric_value(NUM_QUERIES)
self._send_cmd_to_shell(p, 'create database if not exists %s' % TMP_DB)
self._send_cmd_to_shell(p, 'use %s' % TMP_DB)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 2)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'use'
self._send_cmd_to_shell(p, 'create table %s(i int)' % TMP_TBL)
self._send_cmd_to_shell(p, 'alter table %s add columns (j int)' % TMP_TBL)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 4)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'alter'
self._send_cmd_to_shell(p, 'drop table %s' % TMP_TBL)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 5)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'drop'
finally:
run_impala_shell_interactive("drop table if exists %s.%s;" % (TMP_DB, TMP_TBL))
run_impala_shell_interactive("drop database if exists foo;")
@pytest.mark.execute_serially
def test_multiline_queries_in_history(self):
"""Test to ensure that multiline queries with comments are preserved in history
Ensure that multiline queries are preserved when they're read back from history.
Additionally, also test that comments are preserved.
"""
# regex for pexpect, a shell prompt is expected after each command..
prompt_regex = '.*%s:2100.*' % socket.getfqdn()
# readline gets its input from tty, so using stdin does not work.
child_proc = pexpect.spawn(SHELL_CMD)
queries = ["select\n1--comment;",
"select /*comment*/\n1;",
"select\n/*comm\nent*/\n1;"]
for query in queries:
child_proc.expect(prompt_regex)
child_proc.sendline(query)
child_proc.expect(prompt_regex)
child_proc.sendline('quit;')
p = self._start_new_shell_process()
self._send_cmd_to_shell(p, 'history')
result = get_shell_cmd_result(p)
for query in queries:
assert query in result.stderr, "'%s' not in '%s'" % (query, result.stderr)
def run_impala_shell_interactive(command, shell_args=''):
"""Runs a command in the Impala shell interactively."""
cmd = "%s %s" % (SHELL_CMD, shell_args)
# workaround to make Popen environment 'utf-8' compatible
# since piping defaults to ascii
my_env = os.environ
my_env['PYTHONIOENCODING'] = 'utf-8'
p = Popen(shlex.split(cmd), shell=True, stdout=PIPE,
stdin=PIPE, stderr=PIPE, env=my_env)
p.stdin.write(command + "\n")
p.stdin.flush()
return get_shell_cmd_result(p)
|
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import DBMS
from lib.core.settings import MAXDB_SYSTEM_DBS
from lib.core.unescaper import unescaper
from plugins.dbms.maxdb.enumeration import Enumeration
from plugins.dbms.maxdb.filesystem import Filesystem
from plugins.dbms.maxdb.fingerprint import Fingerprint
from plugins.dbms.maxdb.syntax import Syntax
from plugins.dbms.maxdb.takeover import Takeover
from plugins.generic.misc import Miscellaneous
class MaxDBMap(Syntax, Fingerprint, Enumeration, Filesystem, Miscellaneous, Takeover):
"""
This class defines SAP MaxDB methods
"""
def __init__(self):
self.excludeDbsList = MAXDB_SYSTEM_DBS
Syntax.__init__(self)
Fingerprint.__init__(self)
Enumeration.__init__(self)
Filesystem.__init__(self)
Miscellaneous.__init__(self)
Takeover.__init__(self)
unescaper[DBMS.MAXDB] = Syntax.escape
|
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from op_test import OpTest
class ApiFMaxTest(unittest.TestCase):
"""ApiFMaxTest"""
def setUp(self):
"""setUp"""
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()
self.input_x = np.random.rand(10, 15).astype("float32")
self.input_y = np.random.rand(10, 15).astype("float32")
self.input_z = np.random.rand(15).astype("float32")
self.input_a = np.array([0, np.nan, np.nan]).astype('int64')
self.input_b = np.array([2, np.inf, -np.inf]).astype('int64')
self.input_c = np.array([4, 1, 3]).astype('int64')
self.np_expected1 = np.fmax(self.input_x, self.input_y)
self.np_expected2 = np.fmax(self.input_x, self.input_z)
self.np_expected3 = np.fmax(self.input_a, self.input_c)
self.np_expected4 = np.fmax(self.input_b, self.input_c)
def test_static_api(self):
"""test_static_api"""
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_x = paddle.static.data("x", shape=[10, 15], dtype="float32")
data_y = paddle.static.data("y", shape=[10, 15], dtype="float32")
result_fmax = paddle.fmax(data_x, data_y)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"x": self.input_x,
"y": self.input_y},
fetch_list=[result_fmax])
self.assertTrue(np.allclose(res, self.np_expected1))
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_x = paddle.static.data("x", shape=[10, 15], dtype="float32")
data_z = paddle.static.data("z", shape=[15], dtype="float32")
result_fmax = paddle.fmax(data_x, data_z)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"x": self.input_x,
"z": self.input_z},
fetch_list=[result_fmax])
self.assertTrue(np.allclose(res, self.np_expected2))
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_a = paddle.static.data("a", shape=[3], dtype="int64")
data_c = paddle.static.data("c", shape=[3], dtype="int64")
result_fmax = paddle.fmax(data_a, data_c)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"a": self.input_a,
"c": self.input_c},
fetch_list=[result_fmax])
self.assertTrue(np.allclose(res, self.np_expected3))
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_b = paddle.static.data("b", shape=[3], dtype="int64")
data_c = paddle.static.data("c", shape=[3], dtype="int64")
result_fmax = paddle.fmax(data_b, data_c)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"b": self.input_b,
"c": self.input_c},
fetch_list=[result_fmax])
self.assertTrue(np.allclose(res, self.np_expected4))
def test_dynamic_api(self):
"""test_dynamic_api"""
paddle.disable_static()
x = paddle.to_tensor(self.input_x)
y = paddle.to_tensor(self.input_y)
z = paddle.to_tensor(self.input_z)
a = paddle.to_tensor(self.input_a)
b = paddle.to_tensor(self.input_b)
c = paddle.to_tensor(self.input_c)
res = paddle.fmax(x, y)
res = res.numpy()
self.assertTrue(np.allclose(res, self.np_expected1))
# test broadcast
res = paddle.fmax(x, z)
res = res.numpy()
self.assertTrue(np.allclose(res, self.np_expected2))
res = paddle.fmax(a, c)
res = res.numpy()
self.assertTrue(np.allclose(res, self.np_expected3))
res = paddle.fmax(b, c)
res = res.numpy()
self.assertTrue(np.allclose(res, self.np_expected4))
class TestElementwiseFmaxOp(OpTest):
"""TestElementwiseFmaxOp"""
def setUp(self):
"""setUp"""
self.op_type = "elementwise_fmax"
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float64")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.fmax(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
"""test_check_output"""
self.check_output()
def test_check_grad_normal(self):
"""test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x"""
self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
"""test_check_grad_ingore_y"""
self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
class TestElementwiseFmax2Op(OpTest):
"""TestElementwiseFmax2Op"""
def setUp(self):
"""setUp"""
self.op_type = "elementwise_fmax"
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float64")
y[2, 10:] = np.nan
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.fmax(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
"""test_check_output"""
self.check_output()
def test_check_grad_normal(self):
"""test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x"""
self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
"""test_check_grad_ingore_y"""
self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
|
import base64
import struct
import socket
import logging
import netaddr
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_2_parser
from ryu.lib import hub
from ryu.lib import mac
LOG = logging.getLogger('ryu.lib.ofctl_v1_2')
DEFAULT_TIMEOUT = 1.0
def str_to_int(src):
if isinstance(src, str):
if src.startswith("0x") or src.startswith("0X"):
dst = int(src, 16)
else:
dst = int(src)
else:
dst = src
return dst
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
if action_type == 'OUTPUT':
out_port = int(dic.get('port', ofp.OFPP_ANY))
max_len = int(dic.get('max_len', ofp.OFPCML_MAX))
result = parser.OFPActionOutput(out_port, max_len)
elif action_type == 'COPY_TTL_OUT':
result = parser.OFPActionCopyTtlOut()
elif action_type == 'COPY_TTL_IN':
result = parser.OFPActionCopyTtlIn()
elif action_type == 'SET_MPLS_TTL':
mpls_ttl = int(dic.get('mpls_ttl'))
result = parser.OFPActionSetMplsTtl(mpls_ttl)
elif action_type == 'DEC_MPLS_TTL':
result = parser.OFPActionDecMplsTtl()
elif action_type == 'PUSH_VLAN':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushVlan(ethertype)
elif action_type == 'POP_VLAN':
result = parser.OFPActionPopVlan()
elif action_type == 'PUSH_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushMpls(ethertype)
elif action_type == 'POP_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPopMpls(ethertype)
elif action_type == 'SET_QUEUE':
queue_id = int(dic.get('queue_id'))
result = parser.OFPActionSetQueue(queue_id)
elif action_type == 'GROUP':
group_id = int(dic.get('group_id'))
result = parser.OFPActionGroup(group_id)
elif action_type == 'SET_NW_TTL':
nw_ttl = int(dic.get('nw_ttl'))
result = parser.OFPActionSetNwTtl(nw_ttl)
elif action_type == 'DEC_NW_TTL':
result = parser.OFPActionDecNwTtl()
elif action_type == 'SET_FIELD':
field = dic.get('field')
value = dic.get('value')
result = parser.OFPActionSetField(**{field: value})
else:
result = None
return result
def to_actions(dp, acts):
inst = []
actions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for a in acts:
action = to_action(dp, a)
if action is not None:
actions.append(action)
else:
action_type = a.get('type')
if action_type == 'GOTO_TABLE':
table_id = int(a.get('table_id'))
inst.append(parser.OFPInstructionGotoTable(table_id))
elif action_type == 'WRITE_METADATA':
metadata = str_to_int(a.get('metadata'))
metadata_mask = (str_to_int(a['metadata_mask'])
if 'metadata_mask' in a
else parser.UINT64_MAX)
inst.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
else:
LOG.debug('Unknown action type: %s' % action_type)
inst.append(parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
return inst
def action_to_str(act):
action_type = act.cls_action_type
if action_type == ofproto_v1_2.OFPAT_OUTPUT:
buf = 'OUTPUT:' + str(act.port)
elif action_type == ofproto_v1_2.OFPAT_COPY_TTL_OUT:
buf = 'COPY_TTL_OUT'
elif action_type == ofproto_v1_2.OFPAT_COPY_TTL_IN:
buf = 'COPY_TTL_IN'
elif action_type == ofproto_v1_2.OFPAT_SET_MPLS_TTL:
buf = 'SET_MPLS_TTL:' + str(act.mpls_ttl)
elif action_type == ofproto_v1_2.OFPAT_DEC_MPLS_TTL:
buf = 'DEC_MPLS_TTL'
elif action_type == ofproto_v1_2.OFPAT_PUSH_VLAN:
buf = 'PUSH_VLAN:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_POP_VLAN:
buf = 'POP_VLAN'
elif action_type == ofproto_v1_2.OFPAT_PUSH_MPLS:
buf = 'PUSH_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_POP_MPLS:
buf = 'POP_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_SET_QUEUE:
buf = 'SET_QUEUE:' + str(act.queue_id)
elif action_type == ofproto_v1_2.OFPAT_GROUP:
buf = 'GROUP:' + str(act.group_id)
elif action_type == ofproto_v1_2.OFPAT_SET_NW_TTL:
buf = 'SET_NW_TTL:' + str(act.nw_ttl)
elif action_type == ofproto_v1_2.OFPAT_DEC_NW_TTL:
buf = 'DEC_NW_TTL'
elif action_type == ofproto_v1_2.OFPAT_SET_FIELD:
buf = 'SET_FIELD: {%s:%s}' % (act.key, act.value)
else:
buf = 'UNKNOWN'
return buf
def actions_to_str(instructions):
actions = []
for instruction in instructions:
if isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionActions):
for a in instruction.actions:
actions.append(action_to_str(a))
elif isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionGotoTable):
buf = 'GOTO_TABLE:' + str(instruction.table_id)
actions.append(buf)
elif isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionWriteMetadata):
buf = ('WRITE_METADATA:0x%x/0x%x' % (instruction.metadata,
instruction.metadata_mask)
if instruction.metadata_mask
else 'WRITE_METADATA:0x%x' % instruction.metadata)
actions.append(buf)
else:
continue
return actions
def to_match(dp, attrs):
convert = {'in_port': int,
'in_phy_port': int,
'metadata': to_match_metadata,
'dl_dst': to_match_eth,
'dl_src': to_match_eth,
'eth_dst': to_match_eth,
'eth_src': to_match_eth,
'dl_type': int,
'eth_type': int,
'dl_vlan': to_match_vid,
'vlan_vid': to_match_vid,
'vlan_pcp': int,
'ip_dscp': int,
'ip_ecn': int,
'nw_proto': int,
'ip_proto': int,
'nw_src': to_match_ip,
'nw_dst': to_match_ip,
'ipv4_src': to_match_ip,
'ipv4_dst': to_match_ip,
'tp_src': int,
'tp_dst': int,
'tcp_src': int,
'tcp_dst': int,
'udp_src': int,
'udp_dst': int,
'sctp_src': int,
'sctp_dst': int,
'icmpv4_type': int,
'icmpv4_code': int,
'arp_op': int,
'arp_spa': to_match_ip,
'arp_tpa': to_match_ip,
'arp_sha': to_match_eth,
'arp_tha': to_match_eth,
'ipv6_src': to_match_ip,
'ipv6_dst': to_match_ip,
'ipv6_flabel': int,
'icmpv6_type': int,
'icmpv6_code': int,
'ipv6_nd_target': to_match_ip,
'ipv6_nd_sll': to_match_eth,
'ipv6_nd_tll': to_match_eth,
'mpls_label': int,
'mpls_tc': int}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('dl_type') == ether.ETH_TYPE_ARP or \
attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'nw_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['nw_src']
del attrs['nw_src']
if 'nw_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['nw_dst']
del attrs['nw_dst']
kwargs = {}
for key, value in attrs.items():
if key in convert:
value = convert[key](value)
if key in keys:
# For old field name
key = keys[key]
if key == 'tp_src' or key == 'tp_dst':
# TCP/UDP port
conv = {inet.IPPROTO_TCP: {'tp_src': 'tcp_src',
'tp_dst': 'tcp_dst'},
inet.IPPROTO_UDP: {'tp_src': 'udp_src',
'tp_dst': 'udp_dst'}}
ip_proto = attrs.get('nw_proto', attrs.get('ip_proto', 0))
key = conv[ip_proto][key]
kwargs[key] = value
else:
# others
kwargs[key] = value
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_eth(value):
if '/' in value:
value = value.split('/')
return value[0], value[1]
else:
return value
def to_match_ip(value):
if '/' in value:
ip = netaddr.ip.IPNetwork(value)
ip_addr = str(ip.ip)
ip_mask = str(ip.netmask)
return ip_addr, ip_mask
else:
return value
def to_match_vid(value):
# NOTE: If "vlan_id/dl_vlan" field is described as decimal int value
# (and decimal string value), it is treated as values of
# VLAN tag, and OFPVID_PRESENT(0x1000) bit is automatically
# applied. OTOH, If it is described as hexadecimal string,
# treated as values of oxm_value (including OFPVID_PRESENT
# bit), and OFPVID_PRESENT bit is NOT automatically applied.
if isinstance(value, int):
# described as decimal int value
return value | ofproto_v1_2.OFPVID_PRESENT
else:
if '/' in value:
val = value.split('/')
return int(val[0], 0), int(val[1], 0)
else:
if value.isdigit():
# described as decimal string value
return int(value, 10) | ofproto_v1_2.OFPVID_PRESENT
else:
return int(value, 0)
def to_match_metadata(value):
if '/' in value:
value = value.split('/')
return str_to_int(value[0]), str_to_int(value[1])
else:
return str_to_int(value)
def match_to_str(ofmatch):
keys = {'eth_src': 'dl_src',
'eth_dst': 'dl_dst',
'eth_type': 'dl_type',
'vlan_vid': 'dl_vlan',
'ipv4_src': 'nw_src',
'ipv4_dst': 'nw_dst',
'ip_proto': 'nw_proto',
'tcp_src': 'tp_src',
'tcp_dst': 'tp_dst',
'udp_src': 'tp_src',
'udp_dst': 'tp_dst'
}
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
if key in keys:
key = keys[key]
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'dl_vlan':
value = match_vid_to_str(value, mask)
elif key == 'metadata':
value = match_metadata_to_str(value, mask)
else:
if mask is not None:
value = value + '/' + mask
else:
value = value
match.setdefault(key, value)
return match
def match_metadata_to_str(value, mask):
return ('%d/%d' % (value, mask) if mask else '%d' % value)
def match_vid_to_str(value, mask):
if mask is not None:
value = '0x%04x/0x%04x' % (value, mask)
else:
if value & ofproto_v1_2.OFPVID_PRESENT:
value = str(value & ~ofproto_v1_2.OFPVID_PRESENT)
else:
value = '0x%04x' % value
return value
def send_stats_request(dp, stats, waiters, msgs):
dp.set_xid(stats)
waiters_per_dp = waiters.setdefault(dp.id, {})
lock = hub.Event()
waiters_per_dp[stats.xid] = (lock, msgs)
dp.send_msg(stats)
lock.wait(timeout=DEFAULT_TIMEOUT)
if not lock.is_set():
del waiters_per_dp[stats.xid]
def get_desc_stats(dp, waiters):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = {}
for msg in msgs:
stats = msg.body
s = {'mfr_desc': stats.mfr_desc,
'hw_desc': stats.hw_desc,
'sw_desc': stats.sw_desc,
'serial_num': stats.serial_num,
'dp_desc': stats.dp_desc}
desc = {str(dp.id): s}
return desc
def get_queue_stats(dp, waiters):
ofp = dp.ofproto
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, ofp.OFPP_ANY,
ofp.OFPQ_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = []
for msg in msgs:
stats = msg.body
for stat in stats:
s.append({'port_no': stat.port_no,
'queue_id': stat.queue_id,
'tx_bytes': stat.tx_bytes,
'tx_errors': stat.tx_errors,
'tx_packets': stat.tx_packets})
desc = {str(dp.id): s}
return desc
def get_flow_stats(dp, waiters, flow={}):
table_id = int(flow.get('table_id', dp.ofproto.OFPTT_ALL))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, table_id, out_port, out_group, cookie, cookie_mask, match)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
flows = []
for msg in msgs:
for stats in msg.body:
actions = actions_to_str(stats.instructions)
match = match_to_str(stats.match)
s = {'priority': stats.priority,
'cookie': stats.cookie,
'idle_timeout': stats.idle_timeout,
'hard_timeout': stats.hard_timeout,
'actions': actions,
'match': match,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'packet_count': stats.packet_count,
'table_id': stats.table_id,
'length': stats.length}
flows.append(s)
flows = {str(dp.id): flows}
return flows
def get_port_stats(dp, waiters):
stats = dp.ofproto_parser.OFPPortStatsRequest(
dp, dp.ofproto.OFPP_ANY, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ports = []
for msg in msgs:
for stats in msg.body:
s = {'port_no': stats.port_no,
'rx_packets': stats.rx_packets,
'tx_packets': stats.tx_packets,
'rx_bytes': stats.rx_bytes,
'tx_bytes': stats.tx_bytes,
'rx_dropped': stats.rx_dropped,
'tx_dropped': stats.tx_dropped,
'rx_errors': stats.rx_errors,
'tx_errors': stats.tx_errors,
'rx_frame_err': stats.rx_frame_err,
'rx_over_err': stats.rx_over_err,
'rx_crc_err': stats.rx_crc_err,
'collisions': stats.collisions}
ports.append(s)
ports = {str(dp.id): ports}
return ports
def get_group_stats(dp, waiters):
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, dp.ofproto.OFPG_ALL, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
groups = []
for msg in msgs:
for stats in msg.body:
bucket_counters = []
for bucket_counter in stats.bucket_counters:
c = {'packet_count': bucket_counter.packet_count,
'byte_count': bucket_counter.byte_count}
bucket_counters.append(c)
g = {'length': stats.length,
'group_id': stats.group_id,
'ref_count': stats.ref_count,
'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'bucket_stats': bucket_counters}
groups.append(g)
groups = {str(dp.id): groups}
return groups
def get_group_features(dp, waiters):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD'}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
types.append(v)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
capabilities.append(v)
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
acts.append(v2)
actions.append({v1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
features = {str(dp.id): features}
return features
def get_group_desc(dp, waiters):
type_convert = {dp.ofproto.OFPGT_ALL: 'ALL',
dp.ofproto.OFPGT_SELECT: 'SELECT',
dp.ofproto.OFPGT_INDIRECT: 'INDIRECT',
dp.ofproto.OFPGT_FF: 'FF'}
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
for stats in msg.body:
buckets = []
for bucket in stats.buckets:
actions = []
for action in bucket.actions:
actions.append(action_to_str(action))
b = {'weight': bucket.weight,
'watch_port': bucket.watch_port,
'watch_group': bucket.watch_group,
'actions': actions}
buckets.append(b)
d = {'type': type_convert.get(stats.type),
'group_id': stats.group_id,
'buckets': buckets}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def get_port_desc(dp, waiters):
stats = dp.ofproto_parser.OFPFeaturesRequest(dp)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
stats = msg.ports
for stat in stats.values():
d = {'port_no': stat.port_no,
'hw_addr': stat.hw_addr,
'name': stat.name,
'config': stat.config,
'state': stat.state,
'curr': stat.curr,
'advertised': stat.advertised,
'supported': stat.supported,
'peer': stat.peer,
'curr_speed': stat.curr_speed,
'max_speed': stat.max_speed}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def mod_flow_entry(dp, flow, cmd):
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
table_id = int(flow.get('table_id', 0))
idle_timeout = int(flow.get('idle_timeout', 0))
hard_timeout = int(flow.get('hard_timeout', 0))
priority = int(flow.get('priority', 0))
buffer_id = int(flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
flags = int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_actions(dp, flow.get('actions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
flags, match, inst)
dp.send_msg(flow_mod)
def mod_group_entry(dp, group, cmd):
type_convert = {'ALL': dp.ofproto.OFPGT_ALL,
'SELECT': dp.ofproto.OFPGT_SELECT,
'INDIRECT': dp.ofproto.OFPGT_INDIRECT,
'FF': dp.ofproto.OFPGT_FF}
type_ = type_convert.get(group.get('type', 'ALL'))
if type_ is None:
LOG.debug('Unknown type: %s', group.get('type'))
group_id = int(group.get('group_id', 0))
buckets = []
for bucket in group.get('buckets', []):
weight = int(bucket.get('weight', 0))
watch_port = int(bucket.get('watch_port', dp.ofproto.OFPP_ANY))
watch_group = int(bucket.get('watch_group', dp.ofproto.OFPG_ANY))
actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
actions.append(action)
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, type_, group_id, buckets)
dp.send_msg(group_mod)
def mod_port_behavior(dp, port_config):
port_no = int(port_config.get('port_no', 0))
hw_addr = port_config.get('hw_addr')
config = int(port_config.get('config', 0))
mask = int(port_config.get('mask', 0))
advertise = int(port_config.get('advertise'))
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, advertise)
dp.send_msg(port_mod)
def send_experimenter(dp, exp):
experimenter = exp.get('experimenter', 0)
exp_type = exp.get('exp_type', 0)
data_type = exp.get('data_type', 'ascii')
if data_type != 'ascii' and data_type != 'base64':
LOG.debug('Unknown data type: %s', data_type)
data = exp.get('data', '')
if data_type == 'base64':
data = base64.b64decode(data)
expmsg = dp.ofproto_parser.OFPExperimenter(
dp, experimenter, exp_type, data)
dp.send_msg(expmsg)
|
"""Handles database requests from other nova services."""
import copy
from nova.api.ec2 import ec2utils
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.db import base
from nova import exception
from nova.image import glance
from nova import manager
from nova import network
from nova.network.security_group import openstack_driver
from nova import notifications
from nova.objects import base as nova_object
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
'power_state', 'access_ip_v4', 'access_ip_v6',
'launched_at', 'terminated_at', 'host', 'node',
'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
'instance_type_id', 'root_device_name', 'launched_on',
'progress', 'vm_mode', 'default_ephemeral_device',
'default_swap_device', 'root_device_name',
'system_metadata', 'updated_at'
]
datetime_fields = ['launched_at', 'terminated_at', 'updated_at']
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
RPC_API_VERSION = '1.58'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self._network_api = None
self._compute_api = None
self.compute_task_mgr = ComputeTaskManager()
self.quotas = quota.QUOTAS
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def create_rpc_dispatcher(self, *args, **kwargs):
kwargs['additional_apis'] = [self.compute_task_mgr]
return super(ConductorManager, self).create_rpc_dispatcher(*args,
**kwargs)
@property
def network_api(self):
# NOTE(danms): We need to instantiate our network_api on first use
# to avoid the circular dependency that exists between our init
# and network_api's
if self._network_api is None:
self._network_api = network.API()
return self._network_api
@property
def compute_api(self):
if self._compute_api is None:
self._compute_api = compute_api.API()
return self._compute_api
def ping(self, context, arg):
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# now a part of the base rpc API.
return jsonutils.to_primitive({'service': 'conductor', 'arg': arg})
@rpc_common.client_exceptions(KeyError, ValueError,
exception.InvalidUUID,
exception.InstanceNotFound,
exception.UnexpectedTaskStateError)
def instance_update(self, context, instance_uuid,
updates, service=None):
for key, value in updates.iteritems():
if key not in allowed_updates:
LOG.error(_("Instance update attempted for "
"'%(key)s' on %(instance_uuid)s"),
{'key': key, 'instance_uuid': instance_uuid})
raise KeyError("unexpected update keyword '%s'" % key)
if key in datetime_fields and isinstance(value, basestring):
updates[key] = timeutils.parse_strtime(value)
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance_uuid, updates)
notifications.send_update(context, old_ref, instance_ref, service)
return jsonutils.to_primitive(instance_ref)
@rpc_common.client_exceptions(exception.InstanceNotFound)
def instance_get(self, context, instance_id):
return jsonutils.to_primitive(
self.db.instance_get(context, instance_id))
@rpc_common.client_exceptions(exception.InstanceNotFound)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join=None):
return jsonutils.to_primitive(
self.db.instance_get_by_uuid(context, instance_uuid,
columns_to_join))
# NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all(self, context):
return jsonutils.to_primitive(self.db.instance_get_all(context))
def instance_get_all_by_host(self, context, host, node=None,
columns_to_join=None):
if node is not None:
result = self.db.instance_get_all_by_host_and_node(
context.elevated(), host, node)
else:
result = self.db.instance_get_all_by_host(context.elevated(), host,
columns_to_join)
return jsonutils.to_primitive(result)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
@rpc_common.client_exceptions(exception.MigrationNotFound)
def migration_get(self, context, migration_id):
migration_ref = self.db.migration_get(context.elevated(),
migration_id)
return jsonutils.to_primitive(migration_ref)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def migration_get_unconfirmed_by_dest_compute(self, context,
confirm_window,
dest_compute):
migrations = self.db.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
return jsonutils.to_primitive(migrations)
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
migrations = self.db.migration_get_in_progress_by_host_and_node(
context, host, node)
return jsonutils.to_primitive(migrations)
# NOTE(comstud): This method can be removed in v2.0 of the RPC API.
def migration_create(self, context, instance, values):
values.update({'instance_uuid': instance['uuid'],
'source_compute': instance['host'],
'source_node': instance['node']})
migration_ref = self.db.migration_create(context.elevated(), values)
return jsonutils.to_primitive(migration_ref)
@rpc_common.client_exceptions(exception.MigrationNotFound)
def migration_update(self, context, migration, status):
migration_ref = self.db.migration_update(context.elevated(),
migration['id'],
{'status': status})
return jsonutils.to_primitive(migration_ref)
@rpc_common.client_exceptions(exception.AggregateHostExists)
def aggregate_host_add(self, context, aggregate, host):
host_ref = self.db.aggregate_host_add(context.elevated(),
aggregate['id'], host)
return jsonutils.to_primitive(host_ref)
@rpc_common.client_exceptions(exception.AggregateHostNotFound)
def aggregate_host_delete(self, context, aggregate, host):
self.db.aggregate_host_delete(context.elevated(),
aggregate['id'], host)
@rpc_common.client_exceptions(exception.AggregateNotFound)
def aggregate_get(self, context, aggregate_id):
aggregate = self.db.aggregate_get(context.elevated(), aggregate_id)
return jsonutils.to_primitive(aggregate)
def aggregate_get_by_host(self, context, host, key=None):
aggregates = self.db.aggregate_get_by_host(context.elevated(),
host, key)
return jsonutils.to_primitive(aggregates)
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
new_metadata = self.db.aggregate_metadata_add(context.elevated(),
aggregate['id'],
metadata, set_delete)
return jsonutils.to_primitive(new_metadata)
@rpc_common.client_exceptions(exception.AggregateMetadataNotFound)
def aggregate_metadata_delete(self, context, aggregate, key):
self.db.aggregate_metadata_delete(context.elevated(),
aggregate['id'], key)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
result = self.db.aggregate_metadata_get_by_host(context, host, key)
return jsonutils.to_primitive(result)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in=None, bw_out=None,
last_ctr_in=None, last_ctr_out=None,
last_refreshed=None,
update_cells=True):
if [bw_in, bw_out, last_ctr_in, last_ctr_out].count(None) != 4:
self.db.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed,
update_cells=update_cells)
usage = self.db.bw_usage_get(context, uuid, start_period, mac)
return jsonutils.to_primitive(usage)
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# deprecated in favor of the method in the base API.
def get_backdoor_port(self, context):
return self.backdoor_port
def security_group_get_by_instance(self, context, instance):
group = self.db.security_group_get_by_instance(context,
instance['uuid'])
return jsonutils.to_primitive(group)
def security_group_rule_get_by_security_group(self, context, secgroup):
rules = self.db.security_group_rule_get_by_security_group(
context, secgroup['id'])
return jsonutils.to_primitive(rules, max_depth=4)
def provider_fw_rule_get_all(self, context):
rules = self.db.provider_fw_rule_get_all(context)
return jsonutils.to_primitive(rules)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
info = self.db.agent_build_get_by_triple(context, hypervisor, os,
architecture)
return jsonutils.to_primitive(info)
def block_device_mapping_update_or_create(self, context, values,
create=None):
if create is None:
bdm = self.db.block_device_mapping_update_or_create(context,
values)
elif create is True:
bdm = self.db.block_device_mapping_create(context, values)
else:
bdm = self.db.block_device_mapping_update(context,
values['id'],
values)
# NOTE:comstud): 'bdm' is always in the new format, so we
# account for this in cells/messaging.py
self.cells_rpcapi.bdm_update_or_create_at_top(context, bdm,
create=create)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])
if legacy:
bdms = block_device.legacy_mapping(bdms)
return jsonutils.to_primitive(bdms)
def block_device_mapping_destroy(self, context, bdms=None,
instance=None, volume_id=None,
device_name=None):
if bdms is not None:
for bdm in bdms:
self.db.block_device_mapping_destroy(context, bdm['id'])
# NOTE(comstud): bdm['id'] will be different in API cell,
# so we must try to destroy by device_name or volume_id.
# We need an instance_uuid in order to do this properly,
# too.
# I hope to clean a lot of this up in the object
# implementation.
instance_uuid = (bdm['instance_uuid'] or
(instance and instance['uuid']))
if not instance_uuid:
continue
# Better to be safe than sorry. device_name is not
# NULLable, however it could be an empty string.
if bdm['device_name']:
self.cells_rpcapi.bdm_destroy_at_top(
context, instance_uuid,
device_name=bdm['device_name'])
elif bdm['volume_id']:
self.cells_rpcapi.bdm_destroy_at_top(
context, instance_uuid,
volume_id=bdm['volume_id'])
elif instance is not None and volume_id is not None:
self.db.block_device_mapping_destroy_by_instance_and_volume(
context, instance['uuid'], volume_id)
self.cells_rpcapi.bdm_destroy_at_top(
context, instance['uuid'], volume_id=volume_id)
elif instance is not None and device_name is not None:
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device_name)
self.cells_rpcapi.bdm_destroy_at_top(
context, instance['uuid'], device_name=device_name)
else:
# NOTE(danms): This shouldn't happen
raise exception.Invalid(_("Invalid block_device_mapping_destroy"
" invocation"))
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join=None):
result = self.db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir,
columns_to_join=columns_to_join)
return jsonutils.to_primitive(result)
# NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all_hung_in_rebooting(self, context, timeout):
result = self.db.instance_get_all_hung_in_rebooting(context, timeout)
return jsonutils.to_primitive(result)
def instance_get_active_by_window(self, context, begin, end=None,
project_id=None, host=None):
# Unused, but cannot remove until major RPC version bump
result = self.db.instance_get_active_by_window(context, begin, end,
project_id, host)
return jsonutils.to_primitive(result)
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
result = self.db.instance_get_active_by_window_joined(
context, begin, end, project_id, host)
return jsonutils.to_primitive(result)
def instance_destroy(self, context, instance):
self.db.instance_destroy(context, instance['uuid'])
def instance_info_cache_delete(self, context, instance):
self.db.instance_info_cache_delete(context, instance['uuid'])
def instance_info_cache_update(self, context, instance, values):
self.db.instance_info_cache_update(context, instance['uuid'],
values)
def instance_type_get(self, context, instance_type_id):
result = self.db.flavor_get(context, instance_type_id)
return jsonutils.to_primitive(result)
def instance_fault_create(self, context, values):
result = self.db.instance_fault_create(context, values)
return jsonutils.to_primitive(result)
# NOTE(kerrin): This method can be removed in v2.0 of the RPC API.
def vol_get_usage_by_time(self, context, start_time):
result = self.db.vol_get_usage_by_time(context, start_time)
return jsonutils.to_primitive(result)
# NOTE(kerrin): The last_refreshed argument is unused by this method
# and can be removed in v2.0 of the RPC API.
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
vol_usage = self.db.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance['uuid'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
update_totals)
# We have just updated the database, so send the notification now
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
@rpc_common.client_exceptions(exception.ComputeHostNotFound,
exception.HostBinaryNotFound)
def service_get_all_by(self, context, topic=None, host=None, binary=None):
if not any((topic, host, binary)):
result = self.db.service_get_all(context)
elif all((topic, host)):
if topic == 'compute':
result = self.db.service_get_by_compute_host(context, host)
# FIXME(comstud) Potentially remove this on bump to v2.0
result = [result]
else:
result = self.db.service_get_by_host_and_topic(context,
host, topic)
elif all((host, binary)):
result = self.db.service_get_by_args(context, host, binary)
elif topic:
result = self.db.service_get_all_by_topic(context, topic)
elif host:
result = self.db.service_get_all_by_host(context, host)
return jsonutils.to_primitive(result)
def action_event_start(self, context, values):
evt = self.db.action_event_start(context, values)
return jsonutils.to_primitive(evt)
def action_event_finish(self, context, values):
evt = self.db.action_event_finish(context, values)
return jsonutils.to_primitive(evt)
def service_create(self, context, values):
svc = self.db.service_create(context, values)
return jsonutils.to_primitive(svc)
@rpc_common.client_exceptions(exception.ServiceNotFound)
def service_destroy(self, context, service_id):
self.db.service_destroy(context, service_id)
def compute_node_create(self, context, values):
result = self.db.compute_node_create(context, values)
return jsonutils.to_primitive(result)
def compute_node_update(self, context, node, values, prune_stats=False):
result = self.db.compute_node_update(context, node['id'], values,
prune_stats)
return jsonutils.to_primitive(result)
def compute_node_delete(self, context, node):
result = self.db.compute_node_delete(context, node['id'])
return jsonutils.to_primitive(result)
@rpc_common.client_exceptions(exception.ServiceNotFound)
def service_update(self, context, service, values):
svc = self.db.service_update(context, service['id'], values)
return jsonutils.to_primitive(svc)
def task_log_get(self, context, task_name, begin, end, host, state=None):
result = self.db.task_log_get(context, task_name, begin, end, host,
state)
return jsonutils.to_primitive(result)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
result = self.db.task_log_begin_task(context.elevated(), task_name,
begin, end, host, task_items,
message)
return jsonutils.to_primitive(result)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message=None):
result = self.db.task_log_end_task(context.elevated(), task_name,
begin, end, host, errors, message)
return jsonutils.to_primitive(result)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period,
ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, args):
self.security_group_api.trigger_handler(event, context, *args)
def security_groups_trigger_members_refresh(self, context, group_ids):
self.security_group_api.trigger_members_refresh(context, group_ids)
def network_migrate_instance_start(self, context, instance, migration):
self.network_api.migrate_instance_start(context, instance, migration)
def network_migrate_instance_finish(self, context, instance, migration):
self.network_api.migrate_instance_finish(context, instance, migration)
def quota_commit(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.commit(context, reservations, project_id=project_id,
user_id=user_id)
def quota_rollback(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.rollback(context, reservations, project_id=project_id,
user_id=user_id)
def get_ec2_ids(self, context, instance):
ec2_ids = {}
ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(instance['uuid'])
ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(context,
instance['image_ref'])
for image_type in ['kernel', 'ramdisk']:
if '%s_id' % image_type in instance:
image_id = instance['%s_id' % image_type]
ec2_image_type = ec2utils.image_type(image_type)
ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id,
ec2_image_type)
ec2_ids['%s-id' % image_type] = ec2_id
return ec2_ids
# NOTE(danms): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def compute_stop(self, context, instance, do_cast=True):
# NOTE(mriedem): Clients using an interface before 1.43 will be sending
# dicts so we need to handle that here since compute/api::stop()
# requires an object.
if isinstance(instance, dict):
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance)
self.compute_api.stop(context, instance, do_cast)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def compute_confirm_resize(self, context, instance, migration_ref):
if isinstance(instance, dict):
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=attrs)
if isinstance(migration_ref, dict):
migration_ref = migration_obj.Migration._from_db_object(
context.elevated(), migration_ref)
self.compute_api.confirm_resize(context, instance,
migration=migration_ref)
def compute_unrescue(self, context, instance):
self.compute_api.unrescue(context, instance)
def _object_dispatch(self, target, method, context, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in a ClientException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(context, *args, **kwargs)
except Exception:
raise rpc_common.ClientException()
def object_class_action(self, context, objname, objmethod,
objver, args, kwargs):
"""Perform a classmethod action on an object."""
objclass = nova_object.NovaObject.obj_class_from_name(objname,
objver)
return self._object_dispatch(objclass, objmethod, context,
args, kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = copy.copy(objinst)
result = self._object_dispatch(objinst, objmethod, context,
args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for field in objinst.fields:
if not objinst.obj_attr_is_set(field):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(field) or
oldobj[field] != objinst[field]):
updates[field] = objinst._attr_to_primitive(field)
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
# NOTE(danms): This method is now deprecated and can be removed in
# v2.0 of the RPC API
def compute_reboot(self, context, instance, reboot_type):
self.compute_api.reboot(context, instance, reboot_type)
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
RPC_API_NAMESPACE = 'compute_task'
RPC_API_VERSION = '1.6'
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.image_service = glance.get_default_image_service()
self.quotas = quota.QUOTAS
@rpc_common.client_exceptions(exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None):
if instance and not isinstance(instance, instance_obj.Instance):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=attrs)
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit)
elif not live and not rebuild and flavor:
instance_uuid = instance['uuid']
with compute_utils.EventReporter(context, ConductorManager(),
'cold_migrate', instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
reservations)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations):
image_ref = instance.image_ref
image = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance)
request_spec = scheduler_utils.build_request_spec(
context, image, [instance], instance_type=flavor)
try:
hosts = self.scheduler_rpcapi.select_destinations(
context, request_spec, filter_properties)
host_state = hosts[0]
except exception.NoValidHost as ex:
vm_state = instance['vm_state']
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
if reservations:
self.quotas.rollback(context, reservations)
LOG.warning(_("No valid host found for cold migrate"))
return
try:
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
# context is not serializable
filter_properties.pop('context', None)
# TODO(timello): originally, instance_type in request_spec
# on compute.api.resize does not have 'extra_specs', so we
# remove it for now to keep tests backward compatibility.
request_spec['instance_type'].pop('extra_specs')
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.prep_resize(
context, image, instance,
flavor, host,
reservations, request_spec=request_spec,
filter_properties=filter_properties, node=node)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': vm_states.ERROR,
'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
if reservations:
self.quotas.rollback(context, reservations)
def _set_vm_state_and_notify(self, context, method, updates, ex,
request_spec):
scheduler_utils.set_vm_state_and_notify(
context, 'compute_task', method, updates,
ex, request_spec, self.db)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit):
destination = scheduler_hint.get("host")
try:
live_migrate.execute(context, instance, destination,
block_migration, disk_over_commit)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError) as ex:
with excutils.save_and_reraise_exception():
#TODO(johngarbutt) - eventually need instance actions here
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
scheduler_utils.set_vm_state_and_notify(context,
'compute_task', 'migrate_server',
dict(vm_state=instance['vm_state'],
task_state=None,
expected_task_state=task_states.MIGRATING,),
ex, request_spec, self.db)
except Exception as ex:
with excutils.save_and_reraise_exception():
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
scheduler_utils.set_vm_state_and_notify(context,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ERROR},
ex, request_spec, self.db)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# NOTE(alaski): For compatibility until a new scheduler method is used.
request_spec.update({'block_device_mapping': block_device_mapping,
'security_group': security_groups})
self.scheduler_rpcapi.run_instance(context, request_spec=request_spec,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks, is_first_time=True,
filter_properties=filter_properties,
legacy_bdm_in_spec=legacy_bdm)
def _get_image(self, context, image_id):
if not image_id:
return None
return self.image_service.show(context, image_id)
def _delete_image(self, context, image_id):
(image_service, image_id) = glance.get_remote_image_service(context,
image_id)
return image_service.delete(context, image_id)
def _schedule_instances(self, context, image, filter_properties,
*instances):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# dict(host='', nodename='', limits='')
hosts = self.scheduler_rpcapi.select_destinations(context,
request_spec, filter_properties)
return hosts
def unshelve_instance(self, context, instance):
sys_meta = instance.system_metadata
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
snapshot_id = sys_meta.get('shelved_image_id')
if snapshot_id:
self._delete_image(context, snapshot_id)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
try:
with compute_utils.EventReporter(context, self.db,
'get_image_info', instance.uuid):
image = self._get_image(context,
sys_meta['shelved_image_id'])
except exception.ImageNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_('Unshelve attempted but vm_state not SHELVED '
'or SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
hosts = self._schedule_instances(context, image, [], instance)
host = hosts.pop(0)['host']
self.compute_rpcapi.unshelve_instance(context, instance, host,
image)
else:
LOG.error(_('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
if key in sys_meta:
del(sys_meta[key])
instance.system_metadata = sys_meta
instance.save()
|
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def augment(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def model(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
logits = classifier(x, training=True)
return logits
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
|
import unittest
from mock import patch, Mock, call
from nose_parameterized import parameterized
from netaddr import IPAddress, IPNetwork
from subprocess import CalledProcessError
from calico_ctl.bgp import *
from calico_ctl import container
from calico_ctl import utils
from pycalico.datastore_datatypes import Endpoint, IPPool
class TestContainer(unittest.TestCase):
@parameterized.expand([
({'<CONTAINER>':'node1', 'ip':1, 'add':1, '<IP>':'127.a.0.1'}, True),
({'<CONTAINER>':'node1', 'ip':1, 'add':1, '<IP>':'aa:bb::zz'}, True),
({'add':1, '<CONTAINER>':'node1', '<IP>':'127.a.0.1'}, True),
({'add':1, '<CONTAINER>':'node1', '<IP>':'aa:bb::zz'}, True)
])
def test_validate_arguments(self, case, sys_exit_called):
"""
Test validate_arguments for calicoctl container command
"""
with patch('sys.exit', autospec=True) as m_sys_exit:
# Call method under test
container.validate_arguments(case)
# Assert method exits if bad input
self.assertEqual(m_sys_exit.called, sys_exit_called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_add(self, m_netns, m_get_pool_or_exit, m_client,
m_get_container_info_or_exit, m_enforce_root):
"""
Test container_add method of calicoctl container command
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'},
'HostConfig': {'NetworkMode': "not host"}
}
m_client.get_endpoint.side_effect = KeyError
m_client.get_default_next_hops.return_value = 'next_hops'
# Call method under test
test_return = container.container_add('container1', '1.1.1.1', 'interface')
# Assert
m_enforce_root.assert_called_once_with()
m_get_container_info_or_exit.assert_called_once_with('container1')
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_get_pool_or_exit.assert_called_once_with(IPAddress('1.1.1.1'))
m_client.get_default_next_hops.assert_called_once_with(utils.hostname)
# Check an enpoint object was returned
self.assertTrue(isinstance(test_return, Endpoint))
self.assertTrue(m_netns.create_veth.called)
self.assertTrue(m_netns.move_veth_into_ns.called)
self.assertTrue(m_netns.add_ip_to_ns_veth.called)
self.assertTrue(m_netns.add_ns_default_route.called)
self.assertTrue(m_netns.get_ns_veth_mac.called)
self.assertTrue(m_client.set_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_add_container_host_ns(self, m_client,
m_get_container_info_or_exit, m_enforce_root):
"""
Test container_add method of calicoctl container command when the
container shares the host namespace.
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'},
'HostConfig': {'NetworkMode': 'host'}
}
m_client.get_endpoint.side_effect = KeyError
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
m_enforce_root.assert_called_once_with()
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
def test_container_add_existing_container(
self, m_get_pool_or_exit, m_client, m_get_container_info_or_exit,
m_enforce_root):
"""
Test container_add when a container already exists.
Do not raise an exception when the client tries 'get_endpoint'
Assert that the system then exits and all expected calls are made
"""
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
# Assert only expected calls were made
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_get_pool_or_exit.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
def test_container_add_container_not_running(
self, m_get_pool_or_exit, m_client,
m_get_container_info_or_exit, m_enforce_root):
"""
Test container_add when a container is not running
get_container_info_or_exit returns a running state of value 0
Assert that the system then exits and all expected calls are made
"""
# Set up mock object
m_client.get_endpoint.side_effect = KeyError
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 0, 'Pid': 'Pid_info'}
}
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
# Assert only expected calls were made
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_get_pool_or_exit.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
def test_container_add_not_ipv4_configured(
self, m_get_pool_or_exit, m_client, m_get_container_info_or_exit,
m_enforce_root):
"""
Test container_add when the client cannot obtain next hop IPs
client.get_default_next_hops returns an empty dictionary, which produces
a KeyError when trying to determine the IP.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_client.get_endpoint.side_effect = KeyError
m_client.get_default_next_hops.return_value = {}
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
# Assert only expected calls were made
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_client.get_default_next_hops.called)
self.assertFalse(m_client.assign_address.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_add_ip_previously_assigned(
self, m_netns, m_get_pool_or_exit, m_client,
m_get_container_info_or_exit, m_enforce_root):
"""
Test container_add when an ip address is already assigned in pool
client.assign_address returns an empty list.
Assert that the system then exits and all expected calls are made
"""
# Set up mock object
m_client.get_endpoint.side_effect = KeyError
m_client.assign_address.return_value = []
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
# Assert only expected calls were made
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_client.get_default_next_hops.called)
self.assertTrue(m_client.assign_address.called)
self.assertFalse(m_netns.create_veth.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_id', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_remove(self, m_netns, m_client, m_get_container_id,
m_enforce_root):
"""
Test for container_remove of calicoctl container command
"""
# Set up mock objects
m_get_container_id.return_value = 666
ipv4_nets = set()
ipv4_nets.add(IPNetwork(IPAddress('1.1.1.1')))
ipv6_nets = set()
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv4_nets = ipv4_nets
m_endpoint.ipv6_nets = ipv6_nets
m_endpoint.endpoint_id = 12
m_endpoint.name = "eth1234"
ippool = IPPool('1.1.1.1/24')
m_client.get_endpoint.return_value = m_endpoint
m_client.get_ip_pools.return_value = [ippool]
# Call method under test
container.container_remove('container1')
# Assert
m_enforce_root.assert_called_once_with()
m_get_container_id.assert_called_once_with('container1')
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
self.assertEqual(m_client.unassign_address.call_count, 1)
m_netns.remove_veth.assert_called_once_with("eth1234")
m_client.remove_workload.assert_called_once_with(
utils.hostname, utils.ORCHESTRATOR_ID, 666)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_id', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_remove_no_endpoint(
self, m_client, m_get_container_id, m_enforce_root):
"""
Test for container_remove when the client cannot obtain an endpoint
client.get_endpoint raises a KeyError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_client.get_endpoint.side_effect = KeyError
# Call function under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_remove, 'container1')
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_id.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_client.get_ip_pools.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_add_ipv4(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add with an ipv4 ip argument
Assert that the correct calls associated with an ipv4 address are made
"""
# Set up mock objects
pool_return = 'pool'
m_get_pool_or_exit.return_value = pool_return
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_endpoint = Mock()
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
ip_addr = IPAddress(ip)
interface = 'interface'
# Call method under test
container.container_ip_add(container_name, ip, interface)
# Assert
m_enforce_root.assert_called_once_with()
m_get_pool_or_exit.assert_called_once_with(ip_addr)
m_get_container_info_or_exit.assert_called_once_with(container_name)
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_client.assign_address.assert_called_once_with(pool_return, ip_addr)
m_endpoint.ipv4_nets.add.assert_called_once_with(IPNetwork(ip_addr))
m_client.update_endpoint.assert_called_once_with(m_endpoint)
m_netns.add_ip_to_ns_veth.assert_called_once_with(
'Pid_info', ip_addr, interface
)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_add_ipv6(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add with an ipv6 ip argument
Assert that the correct calls associated with an ipv6 address are made
"""
# Set up mock objects
pool_return = 'pool'
m_get_pool_or_exit.return_value = pool_return
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_endpoint = Mock()
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
ip_addr = IPAddress(ip)
interface = 'interface'
# Call method under test
container.container_ip_add(container_name, ip, interface)
# Assert
m_enforce_root.assert_called_once_with()
m_get_pool_or_exit.assert_called_once_with(ip_addr)
m_get_container_info_or_exit.assert_called_once_with(container_name)
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_client.assign_address.assert_called_once_with(pool_return, ip_addr)
m_endpoint.ipv6_nets.add.assert_called_once_with(IPNetwork(ip_addr))
m_client.update_endpoint.assert_called_once_with(m_endpoint)
m_netns.add_ip_to_ns_veth.assert_called_once_with(
'Pid_info', ip_addr, interface
)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client.get_endpoint', autospec=True)
def test_container_ip_add_container_not_running(
self, m_client_get_endpoint, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add when the container is not running
get_container_info_or_exit returns a running state of value 0.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 0, 'Pid': 'Pid_info'}
}
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertFalse(m_client_get_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.print_container_not_in_calico_msg', autospec=True)
def test_container_ip_add_container_not_in_calico(
self, m_print_container_not_in_calico_msg, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add when the container is not networked into calico
client.get_endpoint raises a KeyError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_client.get_endpoint.return_value = Mock()
m_client.get_endpoint.side_effect = KeyError
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a System Exit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
m_print_container_not_in_calico_msg.assert_called_once_with(container_name)
self.assertFalse(m_client.assign_address.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_add_fail_assign_address(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add when the client cannot assign an IP
client.assign_address returns an empty list.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_client.assign_address.return_value = []
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_netns.add_ip_to_ns_veth.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns.add_ip_to_ns_veth', autospec=True)
def test_container_ip_add_error_updating_datastore(
self, m_netns_add_ip_to_ns_veth, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add when the client fails to update endpoint
client.update_endpoint raises a KeyError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_pool_or_exit.return_value = 'pool'
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_client.update_endpoint.side_effect = KeyError
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.assign_address.called)
m_client.unassign_address.assert_called_once_with('pool', ip)
self.assertFalse(m_netns_add_ip_to_ns_veth.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns.add_ip_to_ns_veth', autospec=True)
def test_container_ip_add_netns_error_ipv4(
self, m_netns_add_ip_to_ns_veth, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_add when netns cannot add an ipv4 to interface
netns.add_ip_to_ns_veth throws a CalledProcessError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_get_pool_or_exit.return_value = 'pool'
m_endpoint = Mock()
m_client.get_endpoint.return_value = m_endpoint
err = CalledProcessError(
1, m_netns_add_ip_to_ns_veth, "Error updating container")
m_netns_add_ip_to_ns_veth.side_effect = err
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.assign_address.called)
self.assertTrue(m_netns_add_ip_to_ns_veth.called)
m_endpoint.ipv4_nets.remove.assert_called_once_with(
IPNetwork(IPAddress(ip))
)
m_client.update_endpoint.assert_has_calls([
call(m_endpoint), call(m_endpoint)])
m_client.unassign_address.assert_called_once_with('pool', ip)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.print_container_not_in_calico_msg', autospec=True)
@patch('calico_ctl.container.netns.add_ip_to_ns_veth', autospec=True)
def test_container_ip_add_netns_error_ipv6(
self, m_netns, m_print_container_not_in_calico_msg, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_add when netns cannot add an ipv6 to interface
netns.add_ip_to_ns_veth throws a CalledProcessError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_get_pool_or_exit.return_value = 'pool'
m_endpoint = Mock()
m_client.get_endpoint.return_value = m_endpoint
err = CalledProcessError(1, m_netns, "Error updating container")
m_netns.side_effect = err
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.assign_address.called)
self.assertTrue(m_netns.called)
m_endpoint.ipv6_nets.remove.assert_called_once_with(
IPNetwork(IPAddress(ip))
)
m_client.update_endpoint.assert_has_calls([
call(m_endpoint), call(m_endpoint)])
m_client.unassign_address.assert_called_once_with('pool', ip)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_remove_ipv4(self, m_netns, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_remove with an ipv4 ip argument
"""
# Set up mock objects
m_get_pool_or_exit.return_value = 'pool'
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv4_nets = set()
ipv4_nets.add(IPNetwork(IPAddress('1.1.1.1')))
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv4_nets = ipv4_nets
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test
container.container_ip_remove(container_name, ip, interface)
# Assert
m_enforce_root.assert_called_once_with()
m_get_pool_or_exit.assert_called_once_with(IPAddress(ip))
m_get_container_info_or_exit.assert_called_once_with(container_name)
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_client.update_endpoint.assert_called_once_with(m_endpoint)
m_netns.remove_ip_from_ns_veth.assert_called_once_with(
'Pid_info',
IPAddress(ip),
interface
)
m_client.unassign_address.assert_called_once_with('pool', ip)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_remove_ipv6(self, m_netns, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_remove with an ipv6 ip argument
"""
# Set up mock objects
m_get_pool_or_exit.return_value = 'pool'
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv6_nets = set()
ipv6_nets.add(IPNetwork(IPAddress('1::1')))
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv6_nets = ipv6_nets
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test
container.container_ip_remove(container_name, ip, interface)
# Assert
m_enforce_root.assert_called_once_with()
m_get_pool_or_exit.assert_called_once_with(IPAddress(ip))
m_get_container_info_or_exit.assert_called_once_with(container_name)
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_client.update_endpoint.assert_called_once_with(m_endpoint)
m_netns.remove_ip_from_ns_veth.assert_called_once_with(
'Pid_info',
IPAddress(ip),
interface
)
m_client.unassign_address.assert_called_once_with('pool', ip)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_ip_remove_not_running(
self, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_remove when the container is not running
get_container_info_or_exit returns a running state of value 0.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 0, 'Pid': 'Pid_info'}
}
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertFalse(m_client.get_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_ip_remove_ip_not_assigned(
self, m_client, m_get_container_info_or_exit, m_get_pool_or_exit,
m_enforce_root):
"""
Test container_ip_remove when an IP address is not assigned to a container
client.get_endpoint returns an endpoint with no ip nets
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv6_nets = set()
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv6_nets = ipv6_nets
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_client.update_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_ip_remove_container_not_on_calico(
self, m_client, m_get_container_info_or_exit, m_get_pool_or_exit,
m_enforce_root):
"""
Test for container_ip_remove when container is not networked into Calico
client.get_endpoint raises a KeyError
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_client.get_endpoint.side_effect = KeyError
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_client.update_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_remove_fail_updating_datastore(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_remove when client fails to update endpoint in datastore
client.update_endpoint throws a KeyError
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv6_nets = set()
ipv6_nets.add(IPNetwork(IPAddress('1::1')))
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv6_nets = ipv6_nets
m_client.get_endpoint.return_value = m_endpoint
m_client.update_endpoint.side_effect = KeyError
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.update_endpoint.called)
self.assertFalse(m_netns.remove_ip_from_ns_veth.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_remove_netns_error(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_remove when client fails on removing ip from interface
netns.remove_ip_from_ns_veth raises a CalledProcessError
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv6_nets = set()
ipv6_nets.add(IPNetwork(IPAddress('1::1')))
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv6_nets = ipv6_nets
m_client.get_endpoint.return_value = m_endpoint
err = CalledProcessError(1, m_netns, "Error removing ip")
m_netns.remove_ip_from_ns_veth.side_effect = err
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.update_endpoint.called)
self.assertTrue(m_netns.remove_ip_from_ns_veth.called)
self.assertFalse(m_client.unassign_address.called)
|
from firenado.util.sqlalchemy_util import Base, base_to_dict
from sqlalchemy import Column, String
from sqlalchemy.types import Integer, DateTime
from sqlalchemy.sql import text
import unittest
class TestBase(Base):
__tablename__ = "test"
id = Column("id", Integer, primary_key=True)
username = Column("username", String(150), nullable=False)
first_name = Column("first_name", String(150), nullable=False)
last_name = Column("last_name", String(150), nullable=False)
password = Column("password", String(150), nullable=False)
email = Column("email", String(150), nullable=False)
created = Column("created", DateTime, nullable=False,
server_default=text("now()"))
modified = Column("modified", DateTime, nullable=False,
server_default=text("now()"))
class BaseToDictTestCase(unittest.TestCase):
def setUp(self):
self.test_object = TestBase()
self.test_object.id = 1
self.test_object.username = "anusername"
self.test_object.password = "apassword"
self.test_object.first_name = "Test"
self.test_object.last_name = "Object"
self.test_object.email = "test@example.com"
def test_base_to_dict(self):
dict_from_base = base_to_dict(self.test_object)
self.assertEqual(dict_from_base['id'], self.test_object.id)
self.assertEqual(dict_from_base['username'], self.test_object.username)
self.assertEqual(dict_from_base['password'], self.test_object.password)
self.assertEqual(dict_from_base['first_name'],
self.test_object.first_name)
self.assertEqual(dict_from_base['last_name'],
self.test_object.last_name)
self.assertEqual(dict_from_base['email'], self.test_object.email)
self.assertEqual(dict_from_base['created'], self.test_object.created)
self.assertEqual(dict_from_base['modified'], self.test_object.modified)
def test_base_to_dict(self):
dict_from_base = base_to_dict(self.test_object,
["id", "username", "first_name"])
self.assertEqual(dict_from_base['id'], self.test_object.id)
self.assertEqual(dict_from_base['username'], self.test_object.username)
self.assertEqual(dict_from_base['first_name'],
self.test_object.first_name)
self.assertTrue("password" not in dict_from_base)
self.assertTrue("last_name" not in dict_from_base)
self.assertTrue("email" not in dict_from_base)
self.assertTrue("created" not in dict_from_base)
self.assertTrue("modified" not in dict_from_base)
|
import sys
import os
import logging
import traceback
import mallet_lda
class MalletTagTopics(mallet_lda.MalletLDA):
"""
Topic modeling with separation based on tags
"""
def _basic_params(self):
self.name = 'mallet_lda_tags'
self.categorical = False
self.template_name = 'mallet_lda'
self.dry_run = False
self.topics = 50
self.dfr = len(self.extra_args) > 0
if self.dfr:
self.dfr_dir = self.extra_args[0]
def post_setup(self):
if self.named_args is not None:
if 'tags' in self.named_args:
self.tags = self.named_args['tags']
for filename in self.metadata.keys():
my_tags = [x for (x, y) in self.tags.iteritems()
if int(self.metadata[filename]['itemID'
]) in y]
if len(my_tags) > 0:
self.metadata[filename]['label'] = my_tags[0]
else:
del self.metadata[filename]
self.files.remove(filename)
if __name__ == '__main__':
try:
processor = MalletTagTopics(track_progress=False)
processor.process()
except:
logging.error(traceback.format_exc())
|
import sys
import os
import subprocess
import string
printable = set(string.printable)
def sanitize(txt):
txt = ''.join(filter(lambda c: c in printable, txt))
return txt
def traverse(t, outfile):
print>>outfile, sanitize(t.code+'\t'+t.description)
for c in t.children:
traverse(c, outfile)
def getEdges(t, outfile):
for c in t.children:
print >>outfile, sanitize(t.code+'\t'+c.code)
getEdges(c, outfile)
print 'cloning github repository sirrice/icd9.git'
subprocess.call('git clone https://github.com/sirrice/icd9.git', shell=1)
sys.path.append('icd9')
from icd9 import ICD9
tree = ICD9('icd9/codes.json')
toplevelnodes = tree.children
print 'creating name file'
outfile = file('code.names', 'w')
traverse(tree, outfile)
outfile.close()
print 'creating edges file'
outfile = file('code.edges', 'w')
getEdges(tree, outfile)
outfile.close()
print 'cleaning up'
|
"""Bootstrap setuptools installation
To use setuptools in your package's setup.py, include this
file in the same directory and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
To require a specific version of setuptools, set a download
mirror, or use an alternate download directory, simply supply
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import zipfile
import optparse
import subprocess
import platform
import textwrap
import contextlib
from distutils import log
try:
# noinspection PyCompatibility
from urllib.request import urlopen
except ImportError:
# noinspection PyCompatibility
from urllib2 import urlopen
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "7.0"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
"""
Return True if the command succeeded.
"""
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(archive_filename, install_args=()):
with archive_context(archive_filename):
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
def _build_egg(egg, archive_filename, to_dir):
with archive_context(archive_filename):
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
@contextlib.contextmanager
def archive_context(filename):
# extracting the archive
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
with ContextualZipFile(filename) as archive:
archive.extractall()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
yield
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
to_dir = os.path.abspath(to_dir)
rep_modules = 'pkg_resources', 'setuptools'
imported = set(sys.modules).intersection(rep_modules)
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir, download_delay)
except pkg_resources.VersionConflict as VC_err:
if imported:
msg = textwrap.dedent("""
The required version of setuptools (>={version}) is not available,
and can't be installed while this script is running. Please
install a more recent version first, using
'easy_install -U setuptools'.
(Currently using {VC_err.args[0]!r})
""").format(VC_err=VC_err, version=version)
sys.stderr.write(msg)
sys.exit(2)
# otherwise, reload ok
del pkg_resources, sys.modules['pkg_resources']
return _do_download(version, download_base, to_dir, download_delay)
def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
ps_cmd = (
"[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
"[System.Net.CredentialCache]::DefaultCredentials; "
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)"
% vars()
)
cmd = [
'powershell',
'-Command',
ps_cmd,
]
_clean_check(cmd, target)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
src = urlopen(url)
try:
# Read all the data in one block.
data = src.read()
finally:
src.close()
# Write all the data in one block to avoid creating a partial file.
with open(target, "wb") as dst:
dst.write(data)
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = (
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
)
viable_downloaders = (dl for dl in downloaders if dl.viable())
return next(viable_downloaders, None)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader):
"""
Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an sdist for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
zip_name = "setuptools-%s.zip" % version
url = download_base + zip_name
saveto = os.path.join(to_dir, zip_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
return ['--user'] if options.user_install else []
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
parser.add_option(
'--version', help="Specify which version to download",
default=DEFAULT_VERSION,
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main():
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
archive = download_setuptools(
version=options.version,
download_base=options.download_base,
downloader_factory=options.downloader_factory,
)
return _install(archive, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
|
import sys
import unittest
from streamlink.plugin.api.utils import itertags
def unsupported_versions_1979():
"""Unsupported python versions for itertags
3.7.0 - 3.7.2 and 3.8.0a1
- https://github.com/streamlink/streamlink/issues/1979
- https://bugs.python.org/issue34294
"""
v = sys.version_info
if (v.major == 3) and (
# 3.7.0 - 3.7.2
(v.minor == 7 and v.micro <= 2)
# 3.8.0a1
or (v.minor == 8 and v.micro == 0 and v.releaselevel == 'alpha' and v.serial <= 1)
):
return True
else:
return False
class TestPluginUtil(unittest.TestCase):
test_html = """
<!doctype html>
<html lang="en" class="no-js">
<title>Title</title>
<meta property="og:type" content= "website" />
<meta property="og:url" content="http://test.se/"/>
<meta property="og:site_name" content="Test" />
<script src="https://test.se/test.js"></script>
<link rel="stylesheet" type="text/css" href="https://test.se/test.css">
<script>Tester.ready(function () {
alert("Hello, world!"); });</script>
<p>
<a
href="http://test.se/foo">bar</a>
</p>
</html>
""" # noqa: W291
def test_itertags_single_text(self):
title = list(itertags(self.test_html, "title"))
self.assertTrue(len(title), 1)
self.assertEqual(title[0].tag, "title")
self.assertEqual(title[0].text, "Title")
self.assertEqual(title[0].attributes, {})
def test_itertags_attrs_text(self):
script = list(itertags(self.test_html, "script"))
self.assertTrue(len(script), 2)
self.assertEqual(script[0].tag, "script")
self.assertEqual(script[0].text, "")
self.assertEqual(script[0].attributes, {"src": "https://test.se/test.js"})
self.assertEqual(script[1].tag, "script")
self.assertEqual(script[1].text.strip(), """Tester.ready(function () {\nalert("Hello, world!"); });""")
self.assertEqual(script[1].attributes, {})
@unittest.skipIf(unsupported_versions_1979(),
"python3.7 issue, see bpo-34294")
def test_itertags_multi_attrs(self):
metas = list(itertags(self.test_html, "meta"))
self.assertTrue(len(metas), 3)
self.assertTrue(all(meta.tag == "meta" for meta in metas))
self.assertEqual(metas[0].text, None)
self.assertEqual(metas[1].text, None)
self.assertEqual(metas[2].text, None)
self.assertEqual(metas[0].attributes, {"property": "og:type", "content": "website"})
self.assertEqual(metas[1].attributes, {"property": "og:url", "content": "http://test.se/"})
self.assertEqual(metas[2].attributes, {"property": "og:site_name", "content": "Test"})
def test_multi_line_a(self):
anchor = list(itertags(self.test_html, "a"))
self.assertTrue(len(anchor), 1)
self.assertEqual(anchor[0].tag, "a")
self.assertEqual(anchor[0].text, "bar")
self.assertEqual(anchor[0].attributes, {"href": "http://test.se/foo"})
@unittest.skipIf(unsupported_versions_1979(),
"python3.7 issue, see bpo-34294")
def test_no_end_tag(self):
links = list(itertags(self.test_html, "link"))
self.assertTrue(len(links), 1)
self.assertEqual(links[0].tag, "link")
self.assertEqual(links[0].text, None)
self.assertEqual(links[0].attributes, {"rel": "stylesheet",
"type": "text/css",
"href": "https://test.se/test.css"})
def test_tag_inner_tag(self):
links = list(itertags(self.test_html, "p"))
self.assertTrue(len(links), 1)
self.assertEqual(links[0].tag, "p")
self.assertEqual(links[0].text.strip(), '<a \nhref="http://test.se/foo">bar</a>')
self.assertEqual(links[0].attributes, {})
|
import batoid
import numpy as np
from test_helpers import timer, do_pickle, all_obj_diff, init_gpu, rays_allclose
@timer
def test_properties():
rng = np.random.default_rng(5)
for i in range(100):
R = rng.normal(0.0, 0.3) # negative allowed
sphere = batoid.Sphere(R)
assert sphere.R == R
do_pickle(sphere)
@timer
def test_sag():
rng = np.random.default_rng(57)
for i in range(100):
R = 1./rng.normal(0.0, 0.3)
sphere = batoid.Sphere(R)
for j in range(10):
x = rng.uniform(-0.7*abs(R), 0.7*abs(R))
y = rng.uniform(-0.7*abs(R), 0.7*abs(R))
result = sphere.sag(x, y)
np.testing.assert_allclose(
result,
R*(1-np.sqrt(1.0-(x*x + y*y)/R/R))
)
# Check that it returned a scalar float and not an array
assert isinstance(result, float)
# Check 0,0
np.testing.assert_allclose(sphere.sag(0, 0), 0.0, rtol=0, atol=1e-17)
# Check vectorization
x = rng.uniform(-0.7*abs(R), 0.7*abs(R), size=(10, 10))
y = rng.uniform(-0.7*abs(R), 0.7*abs(R), size=(10, 10))
np.testing.assert_allclose(
sphere.sag(x, y),
R*(1-np.sqrt(1.0-(x*x + y*y)/R/R))
)
# Make sure non-unit stride arrays also work
np.testing.assert_allclose(
sphere.sag(x[::5,::2], y[::5,::2]),
R*(1-np.sqrt(1.0-(x*x + y*y)/R/R))[::5,::2]
)
do_pickle(sphere)
@timer
def test_normal():
rng = np.random.default_rng(577)
for i in range(100):
R = 1./rng.normal(0.0, 0.3)
sphere = batoid.Sphere(R)
for j in range(10):
x = rng.uniform(-0.7*abs(R), 0.7*abs(R))
y = rng.uniform(-0.7*abs(R), 0.7*abs(R))
result = sphere.normal(x, y)
r = np.hypot(x, y)
rat = r/R
dzdr = rat/np.sqrt(1-rat*rat)
nz = 1/np.sqrt(1+dzdr*dzdr)
normal = np.array([-x/r*dzdr*nz, -y/r*dzdr*nz, nz])
np.testing.assert_allclose(result, normal)
# Check 0,0
np.testing.assert_equal(sphere.normal(0, 0), np.array([0, 0, 1]))
# Check vectorization
x = rng.uniform(-0.7*abs(R), 0.7*abs(R), size=(10, 10))
y = rng.uniform(-0.7*abs(R), 0.7*abs(R), size=(10, 10))
r = np.hypot(x, y)
rat = r/R
dzdr = rat/np.sqrt(1-rat*rat)
nz = 1/np.sqrt(1+dzdr*dzdr)
normal = np.dstack([-x/r*dzdr*nz, -y/r*dzdr*nz, nz])
np.testing.assert_allclose(
sphere.normal(x, y),
normal
)
# Make sure non-unit stride arrays also work
np.testing.assert_allclose(
sphere.normal(x[::5,::2], y[::5,::2]),
normal[::5, ::2]
)
@timer
def test_intersect():
rng = np.random.default_rng(5772)
size = 10_000
for i in range(100):
R = 1./rng.normal(0.0, 0.3)
sphereCoordSys = batoid.CoordSys(origin=[0, 0, -1])
sphere = batoid.Sphere(R)
x = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size)
y = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size)
z = np.full_like(x, -2*abs(R))
# If we shoot rays straight up, then it's easy to predict the intersection
vx = np.zeros_like(x)
vy = np.zeros_like(x)
vz = np.ones_like(x)
rv = batoid.RayVector(x, y, z, vx, vy, vz)
np.testing.assert_allclose(rv.z, -2*abs(R))
rv2 = batoid.intersect(sphere, rv.copy(), sphereCoordSys)
assert rv2.coordSys == sphereCoordSys
rv2 = rv2.toCoordSys(batoid.CoordSys())
np.testing.assert_allclose(rv2.x, x)
np.testing.assert_allclose(rv2.y, y)
np.testing.assert_allclose(rv2.z, sphere.sag(x, y)-1, rtol=0, atol=1e-9)
# Check default intersect coordTransform
rv2 = rv.copy().toCoordSys(sphereCoordSys)
batoid.intersect(sphere, rv2)
assert rv2.coordSys == sphereCoordSys
rv2 = rv2.toCoordSys(batoid.CoordSys())
np.testing.assert_allclose(rv2.x, x)
np.testing.assert_allclose(rv2.y, y)
np.testing.assert_allclose(rv2.z, sphere.sag(x, y)-1, rtol=0, atol=1e-9)
@timer
def test_reflect():
rng = np.random.default_rng(57721)
size = 10_000
for i in range(100):
R = 1./rng.normal(0.0, 0.3)
sphere = batoid.Sphere(R)
x = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size)
y = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size)
z = np.full_like(x, -2*abs(R))
vx = rng.uniform(-1e-5, 1e-5, size=size)
vy = rng.uniform(-1e-5, 1e-5, size=size)
vz = np.full_like(x, 1)
rv = batoid.RayVector(x, y, z, vx, vy, vz)
rvr = batoid.reflect(sphere, rv.copy())
rvr2 = sphere.reflect(rv.copy())
rays_allclose(rvr, rvr2)
# print(f"{np.sum(rvr.failed)/len(rvr)*100:.2f}% failed")
normal = sphere.normal(rvr.x, rvr.y)
# Test law of reflection
a0 = np.einsum("ad,ad->a", normal, rv.v)[~rvr.failed]
a1 = np.einsum("ad,ad->a", normal, -rvr.v)[~rvr.failed]
np.testing.assert_allclose(
a0, a1,
rtol=0, atol=1e-12
)
# Test that rv.v, rvr.v and normal are all in the same plane
np.testing.assert_allclose(
np.einsum(
"ad,ad->a",
np.cross(normal, rv.v),
rv.v
)[~rvr.failed],
0.0,
rtol=0, atol=1e-12
)
@timer
def test_refract():
rng = np.random.default_rng(577215)
size = 10_000
for i in range(100):
R = 1./rng.normal(0.0, 0.3)
sphere = batoid.Sphere(R)
m0 = batoid.ConstMedium(rng.normal(1.2, 0.01))
m1 = batoid.ConstMedium(rng.normal(1.3, 0.01))
x = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size)
y = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size)
z = np.full_like(x, -2*abs(R))
vx = rng.uniform(-1e-5, 1e-5, size=size)
vy = rng.uniform(-1e-5, 1e-5, size=size)
vz = np.sqrt(1-vx*vx-vy*vy)/m0.n
rv = batoid.RayVector(x, y, z, vx, vy, vz)
rvr = batoid.refract(sphere, rv.copy(), m0, m1)
rvr2 = sphere.refract(rv.copy(), m0, m1)
rays_allclose(rvr, rvr2)
# print(f"{np.sum(rvr.failed)/len(rvr)*100:.2f}% failed")
normal = sphere.normal(rvr.x, rvr.y)
# Test Snell's law
s0 = np.sum(np.cross(normal, rv.v*m0.n)[~rvr.failed], axis=-1)
s1 = np.sum(np.cross(normal, rvr.v*m1.n)[~rvr.failed], axis=-1)
np.testing.assert_allclose(
m0.n*s0, m1.n*s1,
rtol=0, atol=1e-9
)
# Test that rv.v, rvr.v and normal are all in the same plane
np.testing.assert_allclose(
np.einsum(
"ad,ad->a",
np.cross(normal, rv.v),
rv.v
)[~rvr.failed],
0.0,
rtol=0, atol=1e-12
)
@timer
def test_ne():
objs = [
batoid.Sphere(1.0),
batoid.Sphere(2.0),
batoid.Plane()
]
all_obj_diff(objs)
@timer
def test_fail():
sphere = batoid.Sphere(1.0)
rv = batoid.RayVector(0, 10, 0, 0, 0, -1) # Too far to side
rv2 = batoid.intersect(sphere, rv.copy())
np.testing.assert_equal(rv2.failed, np.array([True]))
# This one passes
rv = batoid.RayVector(0, 0, 0, 0, 0, -1)
rv2 = batoid.intersect(sphere, rv.copy())
np.testing.assert_equal(rv2.failed, np.array([False]))
if __name__ == '__main__':
test_properties()
test_sag()
test_normal()
test_intersect()
test_reflect()
test_refract()
test_ne()
test_fail()
|
import sys
import re
import mpmath as mp
mp.dps=250
mp.mp.dps = 250
if len(sys.argv) != 2:
print("Usage: format_CIAAW.py ciaawfile")
quit(1)
path = sys.argv[1]
atomre = re.compile(r'^(\d+) +(\w\w*) +(\w+) +\[?(\d+)\]?\*? +(.*) *$')
isore = re.compile(r'^(\d+)\*? +(\[?\d.*.*\]?) *$')
brange = re.compile(r'^\[([\d\.]+),([\d\.]+)\].*$')
buncertain = re.compile(r'^([\d\.]+)\((\d+)\)[a-z]*$')
bnum = re.compile(r'^([\d\d]+)$')
atommassline = re.compile(r'^(\d+) +(\w\w*) +(\w+) +(.*) *$')
def NumberStr(n):
# Replace spaces
s = n.replace(' ', '')
# remove "exactly" for the carbon mass
s = s.replace('(exactly)', '')
# if only a number, put it three times
m = bnum.match(s)
if m:
s = "{:<25} {:<25} {:<25}".format(m.group(1), m.group(1), m.group(1))
# if parentheses uncertainty...
m = buncertain.match(s)
if m:
# tricky. duplicate the first part as a string
s2 = m.group(1)
# but replace with all zero
s2 = re.sub(r'\d', '0', s2)
# now replace last characters
l = len(m.group(2))
s2 = s2[:len(s2)-l] + m.group(2)
# convert to a float
serr = mp.mpf(s2)
scenter = mp.mpf(m.group(1))
s = "{:<25} {:<25} {:<25}".format(mp.nstr(scenter, 18), mp.nstr(scenter-serr, 18), mp.nstr(scenter+serr, 18))
# Replace bracketed ranges with parentheses
m = brange.match(s)
if m:
slow = mp.mpf(m.group(1))
shigh = mp.mpf(m.group(2))
smid = (shigh + slow)/mp.mpf("2.0")
s = "{:<25} {:<25} {:<25}".format(mp.nstr(smid, 18), mp.nstr(slow, 18), mp.nstr(shigh, 18))
# just a dash?
if s == "-":
s = "{:<25} {:<25} {:<25}".format(0, 0, 0)
return s
filelines = [ x.strip() for x in open(path).readlines() ]
curatom = None
for line in filelines:
matomre = atomre.match(line)
misore = isore.match(line)
matommass = atommassline.match(line)
if matomre:
curatom = "{:<5} {:<5}".format(matomre.group(1), matomre.group(2))
print("{} {:<6} {:<25}".format(curatom, matomre.group(4), NumberStr(matomre.group(5))))
elif misore:
print("{} {:<6} {:<25}".format(curatom, misore.group(1), NumberStr(misore.group(2))))
elif matommass:
curatom = "{:<5} {:<5}".format(matommass.group(1), matommass.group(2))
print("{} {:<25}".format(curatom, NumberStr(matommass.group(4))))
else:
print(line) # comment lines, etc
|
import warnings
import unittest
import sys
from nose.tools import assert_raises
from gplearn.skutils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
|
import numberlink
from datastore import *
from hashlib import sha1, sha256
from flask import make_response, render_template
import random
import datetime
from tz import gae_datetime_JST
from define import DEFAULT_YEAR
def adc_response(msg, isjson, code=200, json_encoded=False):
if json_encoded:
body = msg
else:
template = 'response.json' if isjson else 'response.html'
body = render_template(template, msg=msg)
resp = make_response(body)
if code == 200:
resp.status = 'OK'
elif code == 400:
resp.status = 'Bad Request'
elif code == 401:
resp.status = 'Unauthorized'
resp.status_code = code
resp.headers['Content-Type'] = 'application/json' if isjson else 'text/html; charset=utf-8'
return resp
def adc_response_html(html, code=200):
template = 'raw.html'
body = render_template(template, raw=html)
resp = make_response(body)
resp.status_code = code
resp.headers['Content-Type'] = 'text/html; charset=utf-8'
return resp
def adc_response_text(body, code=200):
resp = make_response(body)
resp.status_code = code
resp.headers['Content-Type'] = 'text/plain; charset=utf-8'
return resp
def adc_response_json(body, code=200):
resp = make_response(body)
resp.status_code = code
resp.headers['Content-Type'] = 'application/json'
return resp
def adc_response_Q_data(result):
"問題テキストデータを返す"
if result is None:
code = 404
text = "Not Found\r\n"
else:
code = 200
text = result.text
return adc_response_text(text, code)
def log(username, what):
root = log_key()
i = Log(parent = root,
username = username,
what = what)
i.put()
def log_get_or_delete(username=None, fetch_num=100, when=None, delete=False):
query = Log.query(ancestor = log_key()).order(-Log.date)
if username is not None:
query = query.filter(Log.username == username)
if when is not None:
before = datetime.datetime.now() - when
#print "before=", before
query = query.filter(Log.date > before)
q = query.fetch(fetch_num)
results = []
for i in q:
if delete:
tmp = { 'date': gae_datetime_JST(i.date) }
i.key.delete()
else:
tmp = { 'date': gae_datetime_JST(i.date),
'username': i.username,
'what': i.what }
results.append( tmp )
return results
def adc_login(salt, username, password, users):
"パスワードがあっているかチェックする"
hashed256 = hashed_password(username, password, salt)
u = adc_get_user_info(username, users)
if u is not None and u[1]==hashed256:
return u
else:
return None
def adc_change_password(salt, username, users, attr, priv_admin=False):
"パスワード変更。管理者は他人のパスワードも変更できる。"
if ('password_old' in attr and
'password_new1' in attr and
'password_new2' in attr):
if not priv_admin: # 管理者でないときは、現在のパスワードをチェック
u = adc_login(salt, username, attr['password_old'], users)
if u is None:
return False, "password mismatched"
if attr['password_new1'] != attr['password_new2']:
return False, "new password is not same"
if change_password(username, attr['password_new1'].encode('utf-8'), salt):
return True, "password changed"
else:
return False, "password change failed"
else:
return False, "error"
def adc_get_user_info(username, users):
# まずはローカルに定義されたユーザを検索
for u in users:
if username == (u[0]):
return u
# 次に、データベースに登録されたユーザを検索
r = get_userinfo(username)
if r is not None:
return [r.username, r.password, r.displayname, r.uid, r.gid]
else:
return None
def adc_get_user_list(users):
res = []
# まずはローカルに定義されたユーザを検索
for u in users:
res.append(u[0])
# 次に、データベースに登録されたユーザを検索
res2 = get_username_list()
res.extend(res2)
return res
def insert_Q_data(q_num, text, author="DASymposium", year=DEFAULT_YEAR, uniq=True):
"""
問題データをデータベースに登録する。
uniq==Trueのとき、q_numとauthorが重複する場合、登録は失敗する。
"""
#重複チェック
if uniq:
q = get_user_Q_data(q_num, author, year)
if q is not None:
return (False, "Error: Q%d data already exists" % q_num) # 重複エラー
# 問題データのチェック
(size, line_num, line_mat, msg, ok) = numberlink.read_input_data(text)
if not ok:
return (False, "Error: syntax error in Q data\n"+msg)
# text2は、textを正規化したテキストデータ(改行コードなど)
text2 = numberlink.generate_Q_data(size, line_num, line_mat)
# rootエンティティを決める
userinfo = get_userinfo(author)
if userinfo is None:
return (False, "Error: user not found: %s" % author)
else:
root = userinfo.key
# 問題データのエンティティ
q = Question( parent = root,
id = str(q_num),
qnum = q_num,
text = text2,
rows = size[1], # Y
cols = size[0], # X
linenum = line_num,
author = author )
# 登録する
q.put()
#
return (True, size, line_num)
def update_Q_data(q_num, text, author="DASymposium", year=DEFAULT_YEAR):
"問題データを変更する"
# 問題データの内容チェック
(size, line_num, line_mat, msg, ok) = numberlink.read_input_data(text)
if not ok:
return (False, "Error: syntax error in Q data\n"+msg, None, None)
text2 = numberlink.generate_Q_data(size, line_num, line_mat)
# 既存のエンティティを取り出す
res = get_user_Q_data(q_num, author, year)
if res is None:
num = 0
else:
num = 1
res.text = text2
res.rows = size[1]
res.cols = size[0]
res.linenum = line_num
res.put()
return (True, num, size, line_num)
def get_Q_data(q_num, year=DEFAULT_YEAR, fetch_num=5):
"出題の番号を指定して、Question問題データをデータベースから取り出す"
qla = ndb.Key(QuestionListAll, 'master', parent=qdata_key()).get()
if qla is None:
return None
# q_numは1から始まる整数なので、配列のインデックスとは1だけずれる
qn = q_num-1
if qn < 0 or len(qla.qs) <= qn:
return None
return qla.qs[q_num-1].get()
def get_Q_author_all():
"出題の番号から、authorを引けるテーブルを作る"
qla = ndb.Key(QuestionListAll, 'master', parent=qdata_key()).get()
if qla is None:
return None
authors = ['']*(len(qla.qs)+1) # q_numは1から始まるので、+1しておく
qn = 1 # 出題番号
for q_key in qla.qs:
q = q_key.get()
authors[qn] = q.author
qn += 1
# q.qnum は、問題登録したときの番号であり、出題番号ではない
return authors
def get_Q_data_text(q_num, year=DEFAULT_YEAR, fetch_num=5):
"問題のテキストを返す"
result = get_Q_data(q_num, year, fetch_num)
if result is not None:
text = result.text
ret = True
else: # result is None
text = "Error: data not found: Q%d" % q_num
ret = False
return ret, text
def get_user_Q_data(q_num, author, year=DEFAULT_YEAR, fetch_num=99):
"qnumとauthorを指定して問題データをデータベースから取り出す"
userinfo = get_userinfo(author)
if userinfo is None:
root = qdata_key(year)
else:
root = userinfo.key
key = ndb.Key(Question, str(q_num), parent=root)
return key.get()
def get_admin_Q_all():
"データベースに登録されたすべての問題の一覧リスト"
#query = Question.query().order(Question.author, Question.qnum)
query = Question.query(ancestor=userlist_key()).order(Question.author, Question.qnum)
q = query.fetch()
num = len(q)
out = str(num) + "\n"
for i in q:
dt = gae_datetime_JST(i.date)
out += "Q%02d SIZE %dX%d LINE_NUM %d (%s) %s\n" % (i.qnum, i.cols, i.rows, i.linenum, i.author, dt)
return out
def admin_Q_list_get():
"コンテストの出題リストを取り出す"
qla = ndb.Key(QuestionListAll, 'master', parent=qdata_key()).get()
if qla is None:
return ''
else:
return qla.text_admin
def admin_Q_list_create():
"コンテスト用の出題リストを作成する"
#query = Question.query(ancestor=userlist_key()).order(Question.author, Question.qnum)
query = Question.query(ancestor=userlist_key())
qlist = []
q = query.fetch()
num = len(q)
for i in q:
qlist.append([i.qnum, i.author, i.key])
random.shuffle(qlist)
out = str(num) + "\n"
root = qdata_key()
#既存の問題リストを削除する … のはやめた
#out += admin_Q_list_delete() + "\n"
num = 1
out_admin = ""
out_user = ""
qs = []
for i in qlist:
qs.append(i[2])
out_admin += "Q%d %s %d\n" % (num, i[1], i[0])
out_user += "Q%d\n" % num
num += 1
out += out_admin
qla = QuestionListAll.get_or_insert('master', parent=root, qs=qs, text_admin=out_admin, text_user=out_user)
if qla.text_admin != out_admin:
out += "Already inserted\n"
return out
def admin_Q_list_delete():
"コンテストの出題リストを削除する"
root = qdata_key()
ndb.Key(QuestionListAll, 'master', parent=root).delete()
return "DELETE Q-list"
def get_Q_all(html=False):
"問題データの一覧リストを返す"
qla = ndb.Key(QuestionListAll, 'master', parent=qdata_key()).get()
if qla is None:
return ''
if html:
out = ""
num=1
for i in qla.text_user.splitlines():
out += '<a href="/Q/%d">%s</a><br />\n' % (num, i)
num += 1
return out
else:
return qla.text_user
def menu_post_A(username):
"回答ファイルをアップロードするフォームを返す"
qla = ndb.Key(QuestionListAll, 'master', parent=qdata_key()).get()
if qla is None:
return ''
out = ""
num=1
for i in qla.text_user.splitlines():
out += '<a href="/A/%s/Q/%d">post answer %s</a><br />\n' % (username, num, i)
num += 1
return out
def post_A(username, atext, form):
anum = (int)(form['anum'])
cpu_sec = 0
mem_byte = 0
try:
cpu_sec = (float)(form['cpu_sec'])
mem_byte = (int)(form['mem_byte'])
except ValueError:
# (float)'' がエラー
pass
misc_text = form['misc_text']
print "A%d\n%f\n%d\n%s" % (anum, cpu_sec, mem_byte, misc_text.encode('utf-8'))
return put_A_data(anum, username, atext, cpu_sec, mem_byte, misc_text)
def get_user_Q_all(author, html=None):
"authorを指定して、問題データの一覧リストを返す"
userinfo = get_userinfo(author)
if userinfo is None:
root = qdata_key()
else:
root = userinfo.key
query = Question.query( ancestor = root ).order(Question.qnum)
#query = query.filter(Question.author == author )
q = query.fetch()
num = len(q)
out = ""
for i in q:
if html is None:
out += "Q%d SIZE %dX%d LINE_NUM %d (%s)\n" % (i.qnum, i.cols, i.rows, i.linenum, i.author)
else:
url = '/user/%s/Q/%d' % (author, i.qnum)
out += '<a href="%s">Q%d SIZE %dX%d LINE_NUM %d (%s)</a><br />\n' % (url, i.qnum, i.cols, i.rows, i.linenum, i.author)
return out
def delete_user_Q_data(q_num, author, year=DEFAULT_YEAR):
"qnumとauthorを指定して、問題データをデータベースから削除する"
res = get_user_Q_data(q_num, author, year)
msg = ""
if res is None:
msg = "Q%d data not found" % q_num
else:
msg += "DELETE /user/%s/Q/%d\n" % (author, q_num)
res.key.delete()
return msg
def get_admin_A_all():
"データベースに登録されたすべての回答データの一覧リスト"
#query = Answer.query(ancestor=userlist_key()).order(Answer.owner, Answer.anum)
query = Answer.query(ancestor=userlist_key())
q = query.fetch()
num = len(q)
out = str(num) + "\n"
for i in q:
dt = gae_datetime_JST(i.date)
out += "A%02d (%s) %s\n" % (i.anum, i.owner, dt)
return out
def get_A_data(a_num=None, username=None):
"""
データベースから回答データを取り出す。
a_numがNoneのとき、複数のデータを返す。
a_numが数値のとき、その数値のデータを1つだけ返す。存在しないときはNone。
"""
if username is None:
root = userlist_key()
else:
userinfo = get_userinfo(username)
if userinfo is None:
msg = "ERROR: user not found: %s" % username
return False, msg, None
root = userinfo.key
if a_num is not None:
a = ndb.Key(Answer, str(a_num), parent=root).get()
return True, a, root
#query = Answer.query(ancestor=root).order(Answer.anum)
query = Answer.query(ancestor=root)
#if a_num is not None:
# query = query.filter(Answer.anum == a_num)
q = query.fetch()
return True, q, root
def put_A_data(a_num, username, text, cpu_sec=None, mem_byte=None, misc_text=None):
"回答データをデータベースに格納する"
msg = ""
# 出題データを取り出す
ret, q_text = get_Q_data_text(a_num)
if not ret:
msg = "Error in Q%d data: " % a_num + q_text
return False, msg
# 重複回答していないかチェック
ret, q, root = get_A_data(a_num, username)
if ret==True and q is not None:
msg += "ERROR: duplicated answer\n";
return False, msg
# 回答データのチェックをする
judges, msg = numberlink.check_A_data(text, q_text)
q = 0.0
if judges[0] != True:
msg += "Error in answer A%d\n" % a_num
check_A = False
else:
check_A = True # 正解
q = judges[1]
# 解の品質
msg += "Quality factor = %1.19f\n" % q
# データベースに登録する。不正解でも登録する
a = Answer( parent = root,
id = str(a_num),
anum = a_num,
text = text,
owner = username,
cpu_sec = cpu_sec,
mem_byte = mem_byte,
misc_text = misc_text,
result = msg[-1499:], # 長さ制限がある。末尾のみ保存。
judge = int(check_A),
q_factor = q )
a_key = a.put()
return True, msg
def put_A_info(a_num, username, info):
"回答データの補足情報をデータベースに格納する"
msg = ""
# 回答データを取り出す。rootはUserInfoのkey、aはAnswer
ret, a, root = get_A_data(a_num, username)
if ret==False or a is None:
if ret==False: msg += a + "\n"
msg += "ERROR: A%d data not found" % a_num
return False, msg
a.cpu_sec = info['cpu_sec']
a.mem_byte = info['mem_byte']
a.misc_text = info['misc_text']
a.put()
msg += "UPDATE A%d info\n" % a_num
return True, msg
def get_or_delete_A_data(a_num=None, username=None, delete=False):
"回答データをデータベースから、削除or取り出し"
ret, q, root = get_A_data(a_num=a_num, username=username)
if not ret:
return False, q # q==msg
if q is None:
return ret, []
result = []
if a_num is None: # a_num==Noneのとき、データが複数個になる
q2 = q
else:
q2 = [q]
if delete:
get_or_delete_A_info(a_num=a_num, username=username, delete=True)
for i in q2:
result.append("DELETE A%d" % i.anum)
i.key.delete()
else: # GETの場合
for i in q2:
result.append("GET A%d" % i.anum)
result.append(i.text)
return True, result
def get_user_A_all(username, html=None):
"ユーザーを指定して、回答データの一覧リストを返す"
ret, q, root = get_A_data(username=username)
if not ret:
return False, q
text = ""
for i in q:
if html:
text += '<a href="/A/%s/Q/%d">A%d</a> <a href="/A/%s/Q/%d/info">info</a><br />\n' % (username, i.anum, i.anum, username, i.anum)
else:
text += 'A%d\n' % i.anum
return True, text
def get_or_delete_A_info(a_num=None, username=None, delete=False):
"回答データの補足情報をデータベースから、削除or取り出し"
msg = ""
r, a, root = get_A_data(a_num, username)
if not r:
return False, a, None
if a_num is None:
q = a
else:
if a is None:
msg += "A%d not found" % a_num
return True, msg, []
q = [a]
results = []
num = 0
for i in q:
num += 1
if delete:
results.append({'anum': i.anum})
i.cpu_sec = None
i.mem_byte = None
i.misc_text = None
i.put()
else:
tmp = i.to_dict()
del tmp['text']
results.append( tmp )
method = 'DELETE' if delete else 'GET'
a_num2 = 0 if a_num is None else a_num
msg += "%s A%d info %d" % (method, a_num2, num)
return True, msg, results
def hashed_password(username, password, salt):
"ハッシュ化したパスワード"
tmp = salt + username.encode('utf-8') + password.encode('utf-8')
return sha256(tmp).hexdigest()
def create_user(username, password, displayname, uid, gid, salt):
"ユーザーをデータベースに登録"
hashed = hashed_password(username, password, salt)
userlist = userlist_key()
u = UserInfo( parent = userlist,
id = username,
username = username,
password = hashed,
displayname = displayname,
uid = uid,
gid = gid )
u.put()
def change_password(username, password, salt):
"パスワード変更"
info = get_userinfo(username)
if info is None:
return False
hashed = hashed_password(username, password, salt)
info.password = hashed
info.put()
return True
def get_username_list():
"ユーザー名の一覧リストをデータベースから取り出す"
#query = UserInfo.query( ancestor = userlist_key() ).order(UserInfo.uid)
query = UserInfo.query( ancestor = userlist_key() )
q = query.fetch()
res = []
for u in q:
res.append(u.username)
return res
def get_userinfo(username):
"ユーザー情報をデータベースから取り出す"
key = ndb.Key(UserInfo, username, parent=userlist_key())
info = key.get()
return info
def delete_user(username):
"ユーザーをデータベースから削除"
userinfo = get_userinfo(username)
if userinfo is None:
return 0
else:
userinfo.key.delete()
return 1
return n
def Q_check(qtext):
"問題ファイルの妥当性チェックを行う"
hr = '-'*40 + "\n"
(size, line_num, line_mat, msg, ok) = numberlink.read_input_data(qtext)
if ok:
q = numberlink.generate_Q_data(size, line_num, line_mat)
out = "OK\n" + hr + q + hr
else:
out = "NG\n" + hr + qtext + hr + msg
return out, ok
def calc_score_all():
"スコア計算"
authors = get_Q_author_all()
#print "authors=", authors
q_factors = {}
q_point = {}
ok_point = {}
bonus_point = {}
result = {}
misc = {}
query = Answer.query(ancestor=userlist_key())
q = query.fetch()
all_numbers = {}
all_users = {}
for i in q:
#anum = 'A%d' % i.anum
anum = 'A%02d' % i.anum
username = i.owner
all_numbers[anum] = 1
all_users[username] = 1
# 正解ポイント
if not(anum in ok_point):
ok_point[anum] = {}
ok_point[anum][username] = i.judge
# 品質ポイント
if not(anum in q_factors):
q_factors[anum] = {}
q_factors[anum][username] = i.q_factor
# 出題ボーナスポイント
if i.judge in (0,1) and authors[i.anum] == username:
#print "check_bonus:", i.anum, i.judge, authors[i.anum], username
if not(anum in bonus_point):
bonus_point[anum] = {}
bonus_point[anum][username] = i.judge
# result(ログメッセージ)
if not(anum in result):
result[anum] = {}
result[anum][username] = i.result
# (その他) date, cpu_sec, mem_byte, misc_text
if not(anum in misc):
misc[anum] = {}
misc[anum][username] = [i.date, i.cpu_sec, i.mem_byte, i.misc_text]
#print "ok_point=", ok_point
#print "bonus_point=", bonus_point
#print "q_factors=", q_factors
#print "result=\n", result
# 品質ポイントを計算する
q_pt = 10.0
for anum, values in q_factors.iteritems(): # 問題番号ごとに
#print "anum=", anum
qf_total = 0.0 # Q_factorの合計
for user, qf in values.iteritems():
#print "qf=", qf
qf_total += qf
#print "qf_total=", qf_total
for user, qf in values.iteritems():
if qf_total == 0.0:
tmp = 0.0
else:
tmp = q_pt * qf / qf_total
if not anum in q_point:
q_point[anum] = {}
q_point[anum][user] = tmp
#print "q_point=", q_point
# 集計する
tmp = ['']*(len(all_numbers) + 1)
i = 0
for anum in sorted(all_numbers.keys()):
tmp[i] = anum
i += 1
tmp[i] = 'TOTAL'
score_board = {'/header/': tmp} # 見出しの行
for user in sorted(all_users.keys()):
#print user
if not(user in score_board):
score_board[user] = [0]*(len(all_numbers) + 1)
i = 0
ptotal = 0.0
for anum in sorted(all_numbers.keys()):
#print anum
p = 0.0
if user in ok_point[anum]: p += ok_point[anum][user]
if user in q_point[anum]: p += q_point[anum][user]
if anum in bonus_point and user in bonus_point[anum]:
p += bonus_point[anum][user]
#print "p=", p
score_board[user][i] = p
ptotal += p
i += 1
score_board[user][i] = ptotal
#print "score_board=", score_board
return score_board, ok_point, q_point, bonus_point, q_factors, result, misc
def html_score_board(score_board):
hd_key = '/header/'
out = '<table border=1>\n'
line = '<tr><th>-</th>'
for hd in score_board[hd_key]:
line += '<th>%s</th>' % hd
line += '</tr>\n'
out += line
for user in sorted(score_board.keys()):
if user == hd_key: continue
line = '<tr><th>%s</th>' % user
for val in score_board[user]:
line += '<td>%1.1f</td>' % val
line += '</tr>\n'
out += line
out += '</table>\n'
#print "out=\n", out
return out
|
from constance.admin import ConstanceForm
from django.forms import fields
from django.test import TestCase
class TestForm(TestCase):
def test_form_field_types(self):
f = ConstanceForm({})
self.assertIsInstance(f.fields['INT_VALUE'], fields.IntegerField)
self.assertIsInstance(f.fields['BOOL_VALUE'], fields.BooleanField)
self.assertIsInstance(f.fields['STRING_VALUE'], fields.CharField)
self.assertIsInstance(f.fields['DECIMAL_VALUE'], fields.DecimalField)
self.assertIsInstance(f.fields['DATETIME_VALUE'], fields.SplitDateTimeField)
self.assertIsInstance(f.fields['TIMEDELTA_VALUE'], fields.DurationField)
self.assertIsInstance(f.fields['FLOAT_VALUE'], fields.FloatField)
self.assertIsInstance(f.fields['DATE_VALUE'], fields.DateField)
self.assertIsInstance(f.fields['TIME_VALUE'], fields.TimeField)
# from CONSTANCE_ADDITIONAL_FIELDS
self.assertIsInstance(f.fields['CHOICE_VALUE'], fields.ChoiceField)
self.assertIsInstance(f.fields['EMAIL_VALUE'], fields.EmailField)
|
from pybrain.rl.environments.timeseries.maximizereturntask import DifferentialSharpeRatioTask
from pybrain.rl.environments.timeseries.timeseries import AR1Environment, SnPEnvironment
from pybrain.rl.learners.valuebased.linearfa import Q_LinFA
from pybrain.rl.agents.linearfa import LinearFA_Agent
from pybrain.rl.experiments import ContinuousExperiment
from matplotlib import pyplot
"""
This script aims to create a trading model that trades on a simple AR(1) process
"""
env=AR1Environment(2000)
task=DifferentialSharpeRatioTask(env)
learner = Q_LinFA(2,1)
agent = LinearFA_Agent(learner)
exp = ContinuousExperiment(task,agent)
from decimal import Decimal
ts=env.ts.tolist()
exp.doInteractionsAndLearn(1999)
actionHist=env.actionHistory
pyplot.plot(ts[0])
pyplot.plot(actionHist)
pyplot.show()
|
from datetime import datetime, timedelta
from django import http
from django.conf import settings
from django.core.exceptions import PermissionDenied
import mock
import pytest
from olympia.amo.tests import BaseTestCase, TestCase
from olympia.amo import decorators, get_user, set_user
from olympia.amo.urlresolvers import reverse
from olympia.users.models import UserProfile
pytestmark = pytest.mark.django_db
def test_post_required():
def func(request):
return mock.sentinel.response
g = decorators.post_required(func)
request = mock.Mock()
request.method = 'GET'
assert isinstance(g(request), http.HttpResponseNotAllowed)
request.method = 'POST'
assert g(request) == mock.sentinel.response
def test_json_view():
"""Turns a Python object into a response."""
def func(request):
return {'x': 1}
response = decorators.json_view(func)(mock.Mock())
assert isinstance(response, http.HttpResponse)
assert response.content == '{"x": 1}'
assert response['Content-Type'] == 'application/json'
assert response.status_code == 200
def test_json_view_normal_response():
"""Normal responses get passed through."""
expected = http.HttpResponseForbidden()
def func(request):
return expected
response = decorators.json_view(func)(mock.Mock())
assert expected is response
assert response['Content-Type'] == 'text/html; charset=utf-8'
def test_json_view_error():
"""json_view.error returns 400 responses."""
response = decorators.json_view.error({'msg': 'error'})
assert isinstance(response, http.HttpResponseBadRequest)
assert response.content == '{"msg": "error"}'
assert response['Content-Type'] == 'application/json'
def test_json_view_status():
def func(request):
return {'x': 1}
response = decorators.json_view(func, status_code=202)(mock.Mock())
assert response.status_code == 202
def test_json_view_response_status():
response = decorators.json_response({'msg': 'error'}, status_code=202)
assert response.content == '{"msg": "error"}'
assert response['Content-Type'] == 'application/json'
assert response.status_code == 202
class TestTaskUser(TestCase):
fixtures = ['base/users']
def test_set_task_user(self):
@decorators.set_task_user
def some_func():
return get_user()
set_user(UserProfile.objects.get(username='regularuser'))
assert get_user().pk == 999
assert some_func().pk == int(settings.TASK_USER_ID)
assert get_user().pk == 999
class TestLoginRequired(BaseTestCase):
def setUp(self):
super(TestLoginRequired, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'function'
self.request = mock.Mock()
self.request.user.is_authenticated.return_value = False
self.request.get_full_path.return_value = 'path'
def test_normal(self):
func = decorators.login_required(self.f)
response = func(self.request)
assert not self.f.called
assert response.status_code == 302
assert response['Location'] == (
'%s?to=%s' % (reverse('users.login'), 'path'))
def test_no_redirect(self):
func = decorators.login_required(self.f, redirect=False)
response = func(self.request)
assert not self.f.called
assert response.status_code == 401
def test_decorator_syntax(self):
# @login_required(redirect=False)
func = decorators.login_required(redirect=False)(self.f)
response = func(self.request)
assert not self.f.called
assert response.status_code == 401
def test_no_redirect_success(self):
func = decorators.login_required(redirect=False)(self.f)
self.request.user.is_authenticated.return_value = True
func(self.request)
assert self.f.called
class TestSetModifiedOn(TestCase):
fixtures = ['base/users']
@decorators.set_modified_on
def some_method(self, worked):
return worked
def test_set_modified_on(self):
users = list(UserProfile.objects.all()[:3])
self.some_method(True, set_modified_on=users)
for user in users:
assert UserProfile.objects.get(pk=user.pk).modified.date() == (
datetime.today().date())
def test_not_set_modified_on(self):
yesterday = datetime.today() - timedelta(days=1)
qs = UserProfile.objects.all()
qs.update(modified=yesterday)
users = list(qs[:3])
self.some_method(False, set_modified_on=users)
for user in users:
date = UserProfile.objects.get(pk=user.pk).modified.date()
assert date < datetime.today().date()
class TestPermissionRequired(TestCase):
def setUp(self):
super(TestPermissionRequired, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'function'
self.request = mock.Mock()
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_not_allowed(self, action_allowed):
action_allowed.return_value = False
func = decorators.permission_required('', '')(self.f)
with self.assertRaises(PermissionDenied):
func(self.request)
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_allowed(self, action_allowed):
action_allowed.return_value = True
func = decorators.permission_required('', '')(self.f)
func(self.request)
assert self.f.called
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_allowed_correctly(self, action_allowed):
func = decorators.permission_required('Admin', '%')(self.f)
func(self.request)
action_allowed.assert_called_with(self.request, 'Admin', '%')
|
from django.core.management.base import BaseCommand
import amo
from mkt.webapps.models import AddonPremium
class Command(BaseCommand):
help = 'Clean up existing AddonPremium objects for free apps.'
def handle(self, *args, **options):
(AddonPremium.objects.filter(addon__premium_type__in=amo.ADDON_FREES)
.delete())
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-LockWorkStation',
'Author': ['@harmj0y'],
'Description': ("Locks the workstation's display."),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'http://poshcode.org/1640'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
script = """
Function Invoke-LockWorkStation {
# region define P/Invoke types dynamically
# stolen from PowerSploit https://github.com/mattifestation/PowerSploit/blob/master/Mayhem/Mayhem.psm1
# thanks matt and chris :)
$DynAssembly = New-Object System.Reflection.AssemblyName('Win32')
$AssemblyBuilder = [AppDomain]::CurrentDomain.DefineDynamicAssembly($DynAssembly, [Reflection.Emit.AssemblyBuilderAccess]::Run)
$ModuleBuilder = $AssemblyBuilder.DefineDynamicModule('Win32', $False)
$TypeBuilder = $ModuleBuilder.DefineType('Win32.User32', 'Public, Class')
$DllImportConstructor = [Runtime.InteropServices.DllImportAttribute].GetConstructor(@([String]))
$SetLastError = [Runtime.InteropServices.DllImportAttribute].GetField('SetLastError')
$SetLastErrorCustomAttribute = New-Object Reflection.Emit.CustomAttributeBuilder($DllImportConstructor,
@('User32.dll'),
[Reflection.FieldInfo[]]@($SetLastError),
@($True))
# Define [Win32.User32]::LockWorkStation()
$PInvokeMethod = $TypeBuilder.DefinePInvokeMethod('LockWorkStation',
'User32.dll',
([Reflection.MethodAttributes]::Public -bor [Reflection.MethodAttributes]::Static),
[Reflection.CallingConventions]::Standard,
[Bool],
[Type[]]@(),
[Runtime.InteropServices.CallingConvention]::Winapi,
[Runtime.InteropServices.CharSet]::Ansi)
$PInvokeMethod.SetCustomAttribute($SetLastErrorCustomAttribute)
$User32 = $TypeBuilder.CreateType()
$Null = $User32::LockWorkStation()
}
Invoke-LockWorkStation; "Workstation locked."
"""
return script
|
from __future__ import division
""" This python module defines Connection class.
"""
import copy
from vistrails.db.domain import DBConnection
from vistrails.core.vistrail.port import PortEndPoint, Port
import unittest
from vistrails.db.domain import IdScope
class Connection(DBConnection):
""" A Connection is a connection between two modules.
Right now there's only Module connections.
"""
##########################################################################
# Constructors and copy
@staticmethod
def from_port_specs(source, dest):
"""from_port_specs(source: PortSpec, dest: PortSpec) -> Connection
Static method that creates a Connection given source and
destination ports.
"""
conn = Connection()
conn.source = copy.copy(source)
conn.destination = copy.copy(dest)
return conn
@staticmethod
def fromID(id):
"""fromTypeID(id: int) -> Connection
Static method that creates a Connection given an id.
"""
conn = Connection()
conn.id = id
conn.source.endPoint = PortEndPoint.Source
conn.destination.endPoint = PortEndPoint.Destination
return conn
def __init__(self, *args, **kwargs):
"""__init__() -> Connection
Initializes source and destination ports.
"""
DBConnection.__init__(self, *args, **kwargs)
if self.id is None:
self.db_id = -1
if not len(self.ports) > 0:
self.source = Port(type='source')
self.destination = Port(type='destination')
def __copy__(self):
"""__copy__() -> Connection - Returns a clone of self.
"""
return Connection.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBConnection.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = Connection
for port in cp.ports:
Port.convert(port)
return cp
##########################################################################
@staticmethod
def convert(_connection):
if _connection.__class__ == Connection:
return
_connection.__class__ = Connection
for port in _connection.ports:
Port.convert(port)
##########################################################################
# Properties
id = DBConnection.db_id
ports = DBConnection.db_ports
def add_port(self, port):
self.db_add_port(port)
def _get_sourceId(self):
""" _get_sourceId() -> int
Returns the module id of source port. Do not use this function,
use sourceId property: c.sourceId
"""
return self.source.moduleId
def _set_sourceId(self, id):
""" _set_sourceId(id : int) -> None
Sets this connection source id. It updates both self.source.moduleId
and self.source.id. Do not use this function, use sourceId
property: c.sourceId = id
"""
self.source.moduleId = id
self.source.id = id
sourceId = property(_get_sourceId, _set_sourceId)
def _get_destinationId(self):
""" _get_destinationId() -> int
Returns the module id of dest port. Do not use this function,
use sourceId property: c.destinationId
"""
return self.destination.moduleId
def _set_destinationId(self, id):
""" _set_destinationId(id : int) -> None
Sets this connection destination id. It updates self.dest.moduleId.
Do not use this function, use destinationId property:
c.destinationId = id
"""
self.destination.moduleId = id
destinationId = property(_get_destinationId, _set_destinationId)
def _get_source(self):
"""_get_source() -> Port
Returns source port. Do not use this function, use source property:
c.source
"""
try:
return self.db_get_port_by_type('source')
except KeyError:
pass
return None
def _set_source(self, source):
"""_set_source(source: Port) -> None
Sets this connection source port. Do not use this function,
use source property instead: c.source = source
"""
try:
port = self.db_get_port_by_type('source')
self.db_delete_port(port)
except KeyError:
pass
if source is not None:
self.db_add_port(source)
source = property(_get_source, _set_source)
def _get_destination(self):
"""_get_destination() -> Port
Returns destination port. Do not use this function, use destination
property: c.destination
"""
try:
return self.db_get_port_by_type('destination')
except KeyError:
pass
return None
def _set_destination(self, dest):
"""_set_destination(dest: Port) -> None
Sets this connection destination port. Do not use this
function, use destination property instead: c.destination = dest
"""
try:
port = self.db_get_port_by_type('destination')
self.db_delete_port(port)
except KeyError:
pass
if dest is not None:
self.db_add_port(dest)
destination = property(_get_destination, _set_destination)
dest = property(_get_destination, _set_destination)
##########################################################################
# Operators
def __str__(self):
"""__str__() -> str - Returns a string representation of a Connection
object.
"""
rep = "<connection id='%s'>%s%s</connection>"
return rep % (str(self.id), str(self.source), str(self.destination))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if type(other) != type(self):
return False
return (self.source == other.source and
self.dest == other.dest)
def equals_no_id(self, other):
"""Checks equality up to ids (connection and ports)."""
if type(self) != type(other):
return False
return (self.source.equals_no_id(other.source) and
self.dest.equals_no_id(other.dest))
class TestConnection(unittest.TestCase):
def create_connection(self, id_scope=IdScope()):
from vistrails.core.vistrail.port import Port
from vistrails.core.modules.basic_modules import identifier as basic_pkg
source = Port(id=id_scope.getNewId(Port.vtType),
type='source',
moduleId=21L,
moduleName='String',
name='value',
signature='(%s:String)' % basic_pkg)
destination = Port(id=id_scope.getNewId(Port.vtType),
type='destination',
moduleId=20L,
moduleName='Float',
name='value',
signature='(%s:Float)' % basic_pkg)
connection = Connection(id=id_scope.getNewId(Connection.vtType),
ports=[source, destination])
return connection
def test_copy(self):
id_scope = IdScope()
c1 = self.create_connection(id_scope)
c2 = copy.copy(c1)
self.assertEquals(c1, c2)
self.assertEquals(c1.id, c2.id)
c3 = c1.do_copy(True, id_scope, {})
self.assertEquals(c1, c3)
self.assertNotEquals(c1.id, c3.id)
def test_serialization(self):
import vistrails.core.db.io
c1 = self.create_connection()
xml_str = vistrails.core.db.io.serialize(c1)
c2 = vistrails.core.db.io.unserialize(xml_str, Connection)
self.assertEquals(c1, c2)
self.assertEquals(c1.id, c2.id)
def testEmptyConnection(self):
"""Tests sane initialization of empty connection"""
c = Connection()
self.assertEquals(c.source.endPoint, PortEndPoint.Source)
self.assertEquals(c.destination.endPoint, PortEndPoint.Destination)
if __name__ == '__main__':
unittest.main()
|
import urllib
from canvas import util
def make_cookie_key(key):
return 'after_signup_' + str(key)
def _get(request, key):
key = make_cookie_key(key)
val = request.COOKIES.get(key)
if val is not None:
val = util.loads(urllib.unquote(val))
return (key, val,)
def get_posted_comment(request):
'''
Gets a comment waiting to be posted, if one exists.
Returns a pair containing the cookie key used to retrieve it and its deserialized JSON.
'''
#TODO use dcramer's django-cookies so that we don't rely on having the response object to mutate cookies.
# That would make this API much cleaner and isolated.
return _get(request, 'post_comment')
|
import numpy as np
from Coupling import Coupling
class Coupling2DCavities2D(Coupling):
"""
Coupling for cavity2D to cavity transmission.
"""
@property
def impedance_from(self):
"""
Choses the right impedance of subsystem_from.
Applies boundary conditions correction as well.
"""
return self.subsystem_from.impedance
@property
def impedance_to(self):
"""
Choses the right impedance of subsystem_from.
Applies boundary conditions correction as well.
"""
return self.subsystem_to.impedance
@property
def tau(self):
"""
Transmission coefficient.
"""
return np.zeros(self.frequency.amount)
@property
def clf(self):
"""
Coupling loss factor for transmission from a 2D cavity to a cavity.
.. math:: \\eta_{12} = \\frac{ \\tau_{12}}{4 \\pi}
See BAC, equation 3.14
"""
return self.tau / (4.0 * np.pi)
|
''' The `Filter` hierarchy contains Transformer classes that take a `Stim`
of one type as input and return a `Stim` of the same type as output (but with
some changes to its data).
'''
from .audio import (AudioTrimmingFilter,
AudioResamplingFilter)
from .base import TemporalTrimmingFilter
from .image import (ImageCroppingFilter,
ImageResizingFilter,
PillowImageFilter)
from .text import (WordStemmingFilter,
TokenizingFilter,
TokenRemovalFilter,
PunctuationRemovalFilter,
LowerCasingFilter)
from .video import (FrameSamplingFilter,
VideoTrimmingFilter)
__all__ = [
'AudioTrimmingFilter',
'AudioResamplingFilter',
'TemporalTrimmingFilter',
'ImageCroppingFilter',
'ImageResizingFilter',
'PillowImageFilter',
'WordStemmingFilter',
'TokenizingFilter',
'TokenRemovalFilter',
'PunctuationRemovalFilter',
'LowerCasingFilter',
'FrameSamplingFilter',
'VideoTrimmingFilter'
]
|
from setuptools import setup, find_packages
setup(name='reddit_gold',
description='reddit gold',
version='0.1',
author='Chad Birch',
author_email='chad@reddit.com',
packages=find_packages(),
install_requires=[
'r2',
],
entry_points={
'r2.plugin':
['gold = reddit_gold:Gold']
},
include_package_data=True,
zip_safe=False,
)
|
from django.contrib import admin
from ionyweb.plugin_app.plugin_video.models import Plugin_Video
admin.site.register(Plugin_Video)
|
from __future__ import unicode_literals
__all__ = (
'Key',
'Keys',
)
class Key(object):
def __init__(self, name):
#: Descriptive way of writing keys in configuration files. e.g. <C-A>
#: for ``Control-A``.
self.name = name
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.name)
class Keys(object):
Escape = Key('<Escape>')
ControlA = Key('<C-A>')
ControlB = Key('<C-B>')
ControlC = Key('<C-C>')
ControlD = Key('<C-D>')
ControlE = Key('<C-E>')
ControlF = Key('<C-F>')
ControlG = Key('<C-G>')
ControlH = Key('<C-H>')
ControlI = Key('<C-I>') # Tab
ControlJ = Key('<C-J>') # Enter
ControlK = Key('<C-K>')
ControlL = Key('<C-L>')
ControlM = Key('<C-M>') # Enter
ControlN = Key('<C-N>')
ControlO = Key('<C-O>')
ControlP = Key('<C-P>')
ControlQ = Key('<C-Q>')
ControlR = Key('<C-R>')
ControlS = Key('<C-S>')
ControlT = Key('<C-T>')
ControlU = Key('<C-U>')
ControlV = Key('<C-V>')
ControlW = Key('<C-W>')
ControlX = Key('<C-X>')
ControlY = Key('<C-Y>')
ControlZ = Key('<C-Z>')
ControlSpace = Key('<C-Space>')
ControlBackslash = Key('<C-Backslash>')
ControlSquareClose = Key('<C-SquareClose>')
ControlCircumflex = Key('<C-Circumflex>')
ControlUnderscore = Key('<C-Underscore>')
ControlLeft = Key('<C-Left>')
ControlRight = Key('<C-Right>')
ControlUp = Key('<C-Up>')
ControlDown = Key('<C-Down>')
Up = Key('<Up>')
Down = Key('<Down>')
Right = Key('<Right>')
Left = Key('<Left>')
Home = Key('<Home>')
End = Key('<End>')
Delete = Key('<Delete>')
ShiftDelete = Key('<ShiftDelete>')
PageUp = Key('<PageUp>')
PageDown = Key('<PageDown>')
BackTab = Key('<BackTab>') # shift + tab
Tab = ControlI
Backspace = ControlH
F1 = Key('<F1>')
F2 = Key('<F2>')
F3 = Key('<F3>')
F4 = Key('<F4>')
F5 = Key('<F5>')
F6 = Key('<F6>')
F7 = Key('<F7>')
F8 = Key('<F8>')
F9 = Key('<F9>')
F10 = Key('<F10>')
F11 = Key('<F11>')
F12 = Key('<F12>')
F13 = Key('<F13>')
F14 = Key('<F14>')
F15 = Key('<F15>')
F16 = Key('<F16>')
F17 = Key('<F17>')
F18 = Key('<F18>')
F19 = Key('<F19>')
F20 = Key('<F20>')
# Matches any key.
Any = Key('<Any>')
# Special
CPRResponse = Key('<Cursor-Position-Response>')
|
"""
================
sMRI: FreeSurfer
================
This script, smri_freesurfer.py, demonstrates the ability to call reconall on
a set of subjects and then make an average subject.
python smri_freesurfer.py
Import necessary modules from nipype.
"""
import os
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
from nipype.interfaces.freesurfer.preprocess import ReconAll
from nipype.interfaces.freesurfer.utils import MakeAverageSubject
subject_list = ['s1', 's3']
data_dir = os.path.abspath('data')
subjects_dir = os.path.abspath('amri_freesurfer_tutorial/subjects_dir')
wf = pe.Workflow(name="l1workflow")
wf.base_dir = os.path.abspath('amri_freesurfer_tutorial/workdir')
"""
Grab data
"""
datasource = pe.MapNode(interface=nio.DataGrabber(infields=['subject_id'],
outfields=['struct']),
name='datasource',
iterfield=['subject_id'])
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '%s/%s.nii'
datasource.inputs.template_args = dict(struct=[['subject_id', 'struct']])
datasource.inputs.subject_id = subject_list
"""
Run recon-all
"""
recon_all = pe.MapNode(interface=ReconAll(), name='recon_all',
iterfield=['subject_id', 'T1_files'])
recon_all.inputs.subject_id = subject_list
if not os.path.exists(subjects_dir):
os.mkdir(subjects_dir)
recon_all.inputs.subjects_dir = subjects_dir
wf.connect(datasource, 'struct', recon_all, 'T1_files')
"""
Make average subject
"""
average = pe.Node(interface=MakeAverageSubject(), name="average")
average.inputs.subjects_dir = subjects_dir
wf.connect(recon_all, 'subject_id', average, 'subjects_ids')
wf.run("MultiProc", plugin_args={'n_procs': 4})
|
import shutil
from nose.tools import *
from holland.lib.lvm import LogicalVolume
from holland.lib.lvm.snapshot import *
from tests.constants import *
class TestSnapshot(object):
def setup(self):
self.tmpdir = tempfile.mkdtemp()
def teardown(self):
shutil.rmtree(self.tmpdir)
def test_snapshot_fsm(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
snapshot.start(lv)
def test_snapshot_fsm_with_callbacks(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
def handle_event(event, *args, **kwargs):
pass
snapshot.register('pre-mount', handle_event)
snapshot.register('post-mount', handle_event)
snapshot.start(lv)
def test_snapshot_fsm_with_failures(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
def bad_callback(event, *args, **kwargs):
raise Exception("Oooh nooo!")
for evt in ('initialize', 'pre-snapshot', 'post-snapshot',
'pre-mount', 'post-mount', 'pre-unmount', 'post-unmount',
'pre-remove', 'post-remove', 'finish'):
snapshot.register(evt, bad_callback)
assert_raises(CallbackFailuresError, snapshot.start, lv)
snapshot.unregister(evt, bad_callback)
if snapshot.sigmgr._handlers:
raise Exception("WTF. sigmgr handlers still exist when checking event => %r", evt)
|
from django.apps import AppConfig
class ContentStoreAppConfig(AppConfig):
name = "contentstore"
def ready(self):
import contentstore.signals
contentstore.signals
|
"""Git tools."""
from shlex import split
from plumbum import ProcessExecutionError
from plumbum.cmd import git
DEVELOPMENT_BRANCH = "develop"
def run_git(*args, dry_run=False, quiet=False):
"""Run a git command, print it before executing and capture the output."""
command = git[split(" ".join(args))]
if not quiet:
print("{}{}".format("[DRY-RUN] " if dry_run else "", command))
if dry_run:
return ""
rv = command()
if not quiet and rv:
print(rv)
return rv
def branch_exists(branch):
"""Return True if the branch exists."""
try:
run_git("rev-parse --verify {}".format(branch), quiet=True)
return True
except ProcessExecutionError:
return False
def get_current_branch():
"""Get the current branch name."""
return run_git("rev-parse --abbrev-ref HEAD", quiet=True).strip()
|
from __future__ import unicode_literals
from .atomicparsley import AtomicParsleyPP
from .ffmpeg import (
FFmpegPostProcessor,
FFmpegAudioFixPP,
FFmpegEmbedSubtitlePP,
FFmpegExtractAudioPP,
FFmpegFixupStretchedPP,
FFmpegMergerPP,
FFmpegMetadataPP,
FFmpegVideoConvertorPP,
)
from .xattrpp import XAttrMetadataPP
from .execafterdownload import ExecAfterDownloadPP
def get_postprocessor(key):
return globals()[key + 'PP']
__all__ = [
'AtomicParsleyPP',
'ExecAfterDownloadPP',
'FFmpegAudioFixPP',
'FFmpegEmbedSubtitlePP',
'FFmpegExtractAudioPP',
'FFmpegFixupStretchedPP',
'FFmpegMergerPP',
'FFmpegMetadataPP',
'FFmpegPostProcessor',
'FFmpegVideoConvertorPP',
'XAttrMetadataPP',
]
|
__author__ = 'Nishanth'
from juliabox.cloud import JBPluginCloud
from juliabox.jbox_util import JBoxCfg, retry_on_errors
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
import threading
class JBoxGCD(JBPluginCloud):
provides = [JBPluginCloud.JBP_DNS, JBPluginCloud.JBP_DNS_GCD]
threadlocal = threading.local()
INSTALLID = None
REGION = None
DOMAIN = None
@staticmethod
def configure():
cloud_host = JBoxCfg.get('cloud_host')
JBoxGCD.INSTALLID = cloud_host['install_id']
JBoxGCD.REGION = cloud_host['region']
JBoxGCD.DOMAIN = cloud_host['domain']
@staticmethod
def domain():
if JBoxGCD.DOMAIN is None:
JBoxGCD.configure()
return JBoxGCD.DOMAIN
@staticmethod
def connect():
c = getattr(JBoxGCD.threadlocal, 'conn', None)
if c is None:
JBoxGCD.configure()
creds = GoogleCredentials.get_application_default()
JBoxGCD.threadlocal.conn = c = build("dns", "v1", credentials=creds)
return c
@staticmethod
@retry_on_errors(retries=2)
def add_cname(name, value):
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'additions': [
{'rrdatas': [value],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': 300} ] }).execute()
@staticmethod
@retry_on_errors(retries=2)
def delete_cname(name):
resp = JBoxGCD.connect().resourceRecordSets().list(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
name=name, type='A').execute()
if len(resp['rrsets']) == 0:
JBoxGCD.log_debug('No prior dns registration found for %s', name)
else:
cname = resp['rrsets'][0]['rrdatas'][0]
ttl = resp['rrsets'][0]['ttl']
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'deletions': [
{'rrdatas': [str(cname)],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': ttl} ] }).execute()
JBoxGCD.log_warn('Prior dns registration was found for %s', name)
|
from datetime import timedelta
from flask import flash, redirect, request, session
from indico.core.db import db
from indico.modules.admin import RHAdminBase
from indico.modules.news import logger, news_settings
from indico.modules.news.forms import NewsForm, NewsSettingsForm
from indico.modules.news.models.news import NewsItem
from indico.modules.news.util import get_recent_news
from indico.modules.news.views import WPManageNews, WPNews
from indico.util.date_time import now_utc
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.forms.base import FormDefaults
from indico.web.rh import RH
from indico.web.util import jsonify_data, jsonify_form
class RHNews(RH):
@staticmethod
def _is_new(item):
days = news_settings.get('new_days')
if not days:
return False
return item.created_dt.date() >= (now_utc() - timedelta(days=days)).date()
def _process(self):
news = NewsItem.query.order_by(NewsItem.created_dt.desc()).all()
return WPNews.render_template('news.html', news=news, _is_new=self._is_new)
class RHNewsItem(RH):
normalize_url_spec = {
'locators': {
lambda self: self.item.locator.slugged
}
}
def _process_args(self):
self.item = NewsItem.get_or_404(request.view_args['news_id'])
def _process(self):
return WPNews.render_template('news_item.html', item=self.item)
class RHManageNewsBase(RHAdminBase):
pass
class RHManageNews(RHManageNewsBase):
def _process(self):
news = NewsItem.query.order_by(NewsItem.created_dt.desc()).all()
return WPManageNews.render_template('admin/news.html', 'news', news=news)
class RHNewsSettings(RHManageNewsBase):
def _process(self):
form = NewsSettingsForm(obj=FormDefaults(**news_settings.get_all()))
if form.validate_on_submit():
news_settings.set_multi(form.data)
get_recent_news.clear_cached()
flash(_('Settings have been saved'), 'success')
return jsonify_data()
return jsonify_form(form)
class RHCreateNews(RHManageNewsBase):
def _process(self):
form = NewsForm()
if form.validate_on_submit():
item = NewsItem()
form.populate_obj(item)
db.session.add(item)
db.session.flush()
get_recent_news.clear_cached()
logger.info('News %r created by %s', item, session.user)
flash(_("News '{title}' has been posted").format(title=item.title), 'success')
return jsonify_data(flash=False)
return jsonify_form(form)
class RHManageNewsItemBase(RHManageNewsBase):
def _process_args(self):
RHManageNewsBase._process_args(self)
self.item = NewsItem.get_or_404(request.view_args['news_id'])
class RHEditNews(RHManageNewsItemBase):
def _process(self):
form = NewsForm(obj=self.item)
if form.validate_on_submit():
old_title = self.item.title
form.populate_obj(self.item)
db.session.flush()
get_recent_news.clear_cached()
logger.info('News %r modified by %s', self.item, session.user)
flash(_("News '{title}' has been updated").format(title=old_title), 'success')
return jsonify_data(flash=False)
return jsonify_form(form)
class RHDeleteNews(RHManageNewsItemBase):
def _process(self):
db.session.delete(self.item)
get_recent_news.clear_cached()
flash(_("News '{title}' has been deleted").format(title=self.item.title), 'success')
logger.info('News %r deleted by %r', self.item, session.user)
return redirect(url_for('news.manage'))
|
import sys
def fix_terminator(tokens):
if not tokens:
return
last = tokens[-1]
if last not in ('.', '?', '!') and last.endswith('.'):
tokens[-1] = last[:-1]
tokens.append('.')
def balance_quotes(tokens):
count = tokens.count("'")
if not count:
return
processed = 0
for i, token in enumerate(tokens):
if token == "'":
if processed % 2 == 0 and (i == 0 or processed != count - 1):
tokens[i] = "`"
processed += 1
def output(tokens):
if not tokens:
return
balance_quotes(tokens)
print ' '.join(tokens)
prev = None
for line in sys.stdin:
tokens = line.split()
if len(tokens) == 1 and tokens[0] in ('"', "'", ')', ']'):
prev.append(tokens[0])
else:
output(prev)
prev = tokens
output(prev)
|
"""
webModifySqlAPI
~~~~~~~~~~~~~~
为web应用与后台数据库操作(插入,更新,删除操作)的接口
api_functions 中的DataApiFunc.py为其接口函数汇聚点,所有全局变量设置都在此;所有后台函数调用都在此设置
Implementation helpers for the JSON support in Flask.
:copyright: (c) 2015 by Armin kissf lu.
:license: ukl, see LICENSE for more details.
"""
from . import api
from flask import json
from flask import request
from bson import json_util
from api_functions.DataApiFunc import (deleManuleVsimSrc,
insertManuleVsimSrc,
updateManuleVsimSrc,
deleteNewVsimTestInfo,
insertNewVsimTestInfo,
updateNewVsimTestInfo)
@api.route('/delet_manulVsim/', methods=['POST'])
def delet_manulVsim():
"""
:return:
"""
if request.method == 'POST':
arrayData = request.get_array(field_name='file')
return deleManuleVsimSrc(array_data=arrayData)
else:
returnJsonData = {'err': True, 'errinfo': '操作违法!', 'data': []}
return json.dumps(returnJsonData, sort_keys=True, indent=4, default=json_util.default)
@api.route('/insert_manulVsim/', methods=['POST'])
def insert_manulVsim():
"""
:return:
"""
if request.method == 'POST':
arrayData = request.get_array(field_name='file')
return insertManuleVsimSrc(array_data=arrayData)
else:
returnJsonData = {'err': True, 'errinfo': '操作违法!', 'data': []}
return json.dumps(returnJsonData, sort_keys=True, indent=4, default=json_util.default)
@api.route('/update_manulVsim/', methods=['POST'])
def update_manulVsim():
"""
:return:
"""
if request.method == 'POST':
arrayData = request.get_array(field_name='file')
return updateManuleVsimSrc(array_data=arrayData)
else:
returnJsonData = {'err': True, 'errinfo': '操作违法!', 'data': []}
return json.dumps(returnJsonData, sort_keys=True, indent=4, default=json_util.default)
@api.route('/delet_newvsimtest_info_table/', methods=['POST'])
def delet_newvsimtest_info_table():
"""
:return:
"""
if request.method == 'POST':
arrayData = request.get_array(field_name='file')
return deleteNewVsimTestInfo(array_data=arrayData)
else:
returnJsonData = {'err': True, 'errinfo': '操作违法!', 'data': []}
return json.dumps(returnJsonData, sort_keys=True, indent=4, default=json_util.default)
@api.route('/insert_newvsimtest_info_table/', methods=['POST'])
def insert_newvsimtest_info_table():
"""
:return:
"""
if request.method == 'POST':
arrayData = request.get_array(field_name='file')
return insertNewVsimTestInfo(array_data=arrayData)
else:
returnJsonData = {'err': True, 'errinfo': '操作违法!', 'data': []}
return json.dumps(returnJsonData, sort_keys=True, indent=4, default=json_util.default)
@api.route('/update_newvsimtest_info_table/', methods=['POST'])
def update_newvsimtest_info_table():
"""
:return:
"""
if request.method == 'POST':
arrayData = request.get_array(field_name='file')
return updateNewVsimTestInfo(array_data=arrayData)
else:
returnJsonData = {'err': True, 'errinfo': '操作违法!', 'data': []}
return json.dumps(returnJsonData, sort_keys=True, indent=4, default=json_util.default)
|
import os, scrapy, argparse
from realclearpolitics.spiders.spider import RcpSpider
from scrapy.crawler import CrawlerProcess
parser = argparse.ArgumentParser('Scrap realclearpolitics polls data')
parser.add_argument('url', action="store")
parser.add_argument('--locale', action="store", default='')
parser.add_argument('--race', action="store", default='primary')
parser.add_argument('--csv', dest='to_csv', action='store_true')
parser.add_argument('--output', dest='output', action='store')
args = parser.parse_args()
url = args.url
extra_fields = { 'locale': args.locale, 'race': args.race }
if (args.to_csv):
if args.output is None:
filename = url.split('/')[-1].split('.')[0]
output = filename + ".csv"
print("No output file specified : using " + output)
else:
output = args.output
if not output.endswith(".csv"):
output = output + ".csv"
if os.path.isfile(output):
os.remove(output)
os.system("scrapy crawl realclearpoliticsSpider -a url="+url+" -o "+output)
else:
settings = {
'ITEM_PIPELINES' : {
'realclearpolitics.pipeline.PollPipeline': 300,
},
'LOG_LEVEL' : 'ERROR',
'DOWNLOAD_HANDLERS' : {'s3': None,}
}
process = CrawlerProcess(settings);
process.crawl(RcpSpider, url, extra_fields)
process.start()
|
from panda3d.core import NodePath, DecalEffect
import DNANode
import DNAWall
import random
class DNAFlatBuilding(DNANode.DNANode):
COMPONENT_CODE = 9
currentWallHeight = 0
def __init__(self, name):
DNANode.DNANode.__init__(self, name)
self.width = 0
self.hasDoor = False
def setWidth(self, width):
self.width = width
def getWidth(self):
return self.width
def setCurrentWallHeight(self, currentWallHeight):
DNAFlatBuilding.currentWallHeight = currentWallHeight
def getCurrentWallHeight(self):
return DNAFlatBuilding.currentWallHeight
def setHasDoor(self, hasDoor):
self.hasDoor = hasDoor
def getHasDoor(self):
return self.hasDoor
def makeFromDGI(self, dgi):
DNANode.DNANode.makeFromDGI(self, dgi)
self.width = dgi.getInt16() / 100.0
self.hasDoor = dgi.getBool()
def setupSuitFlatBuilding(self, nodePath, dnaStorage):
name = self.getName()
if name[:2] != 'tb':
return
name = 'sb' + name[2:]
node = nodePath.attachNewNode(name)
node.setPosHpr(self.getPos(), self.getHpr())
numCodes = dnaStorage.getNumCatalogCodes('suit_wall')
if numCodes < 1:
return
code = dnaStorage.getCatalogCode(
'suit_wall', random.randint(0, numCodes - 1))
wallNode = dnaStorage.findNode(code)
if not wallNode:
return
wallNode = wallNode.copyTo(node, 0)
wallScale = wallNode.getScale()
wallScale.setX(self.width)
wallScale.setZ(DNAFlatBuilding.currentWallHeight)
wallNode.setScale(wallScale)
if self.getHasDoor():
wallNodePath = node.find('wall_*')
doorNode = dnaStorage.findNode('suit_door')
doorNode = doorNode.copyTo(wallNodePath, 0)
doorNode.setScale(NodePath(), (1, 1, 1))
doorNode.setPosHpr(0.5, 0, 0, 0, 0, 0)
wallNodePath.setEffect(DecalEffect.make())
node.flattenMedium()
node.stash()
def setupCogdoFlatBuilding(self, nodePath, dnaStorage):
name = self.getName()
if name[:2] != 'tb':
return
name = 'cb' + name[2:]
node = nodePath.attachNewNode(name)
node.setPosHpr(self.getPos(), self.getHpr())
numCodes = dnaStorage.getNumCatalogCodes('cogdo_wall')
if numCodes < 1:
return
code = dnaStorage.getCatalogCode(
'cogdo_wall', random.randint(0, numCodes - 1))
wallNode = dnaStorage.findNode(code)
if not wallNode:
return
wallNode = wallNode.copyTo(node, 0)
wallScale = wallNode.getScale()
wallScale.setX(self.width)
wallScale.setZ(DNAFlatBuilding.currentWallHeight)
wallNode.setScale(wallScale)
if self.getHasDoor():
wallNodePath = node.find('wall_*')
doorNode = dnaStorage.findNode('suit_door')
doorNode = doorNode.copyTo(wallNodePath, 0)
doorNode.setScale(NodePath(), (1, 1, 1))
doorNode.setPosHpr(0.5, 0, 0, 0, 0, 0)
wallNodePath.setEffect(DecalEffect.make())
node.flattenMedium()
node.stash()
def traverse(self, nodePath, dnaStorage):
DNAFlatBuilding.currentWallHeight = 0
node = nodePath.attachNewNode(self.getName())
internalNode = node.attachNewNode(self.getName() + '-internal')
scale = self.getScale()
scale.setX(self.width)
internalNode.setScale(scale)
node.setPosHpr(self.getPos(), self.getHpr())
for child in self.children:
if isinstance(child, DNAWall.DNAWall):
child.traverse(internalNode, dnaStorage)
else:
child.traverse(node, dnaStorage)
if DNAFlatBuilding.currentWallHeight == 0:
print 'empty flat building with no walls'
else:
cameraBarrier = dnaStorage.findNode('wall_camera_barrier')
if cameraBarrier is None:
raise DNAError.DNAError('DNAFlatBuilding requires that there is a wall_camera_barrier in storage')
cameraBarrier = cameraBarrier.copyTo(internalNode, 0)
cameraBarrier.setScale((1, 1, DNAFlatBuilding.currentWallHeight))
internalNode.flattenStrong()
collisionNode = node.find('**/door_*/+CollisionNode')
if not collisionNode.isEmpty():
collisionNode.setName('KnockKnockDoorSphere_' + dnaStorage.getBlock(self.getName()))
cameraBarrier.wrtReparentTo(nodePath, 0)
wallCollection = internalNode.findAllMatches('wall*')
wallHolder = node.attachNewNode('wall_holder')
wallDecal = node.attachNewNode('wall_decal')
windowCollection = internalNode.findAllMatches('**/window*')
doorCollection = internalNode.findAllMatches('**/door*')
corniceCollection = internalNode.findAllMatches('**/cornice*_d')
wallCollection.reparentTo(wallHolder)
windowCollection.reparentTo(wallDecal)
doorCollection.reparentTo(wallDecal)
corniceCollection.reparentTo(wallDecal)
for i in xrange(wallHolder.getNumChildren()):
iNode = wallHolder.getChild(i)
iNode.clearTag('DNACode')
iNode.clearTag('DNARoot')
wallHolder.flattenStrong()
wallDecal.flattenStrong()
holderChild0 = wallHolder.getChild(0)
wallDecal.getChildren().reparentTo(holderChild0)
holderChild0.reparentTo(internalNode)
holderChild0.setEffect(DecalEffect.make())
wallHolder.removeNode()
wallDecal.removeNode()
self.setupSuitFlatBuilding(nodePath, dnaStorage)
self.setupCogdoFlatBuilding(nodePath, dnaStorage)
node.flattenStrong()
|
"""
Various i18n functions.
Helper functions for both the internal translation system
and for TranslateWiki-based translations.
By default messages are assumed to reside in a package called
'scripts.i18n'. In pywikibot 2.0, that package is not packaged
with pywikibot, and pywikibot 2.0 does not have a hard dependency
on any i18n messages. However, there are three user input questions
in pagegenerators which will use i18 messages if they can be loaded.
The default message location may be changed by calling
L{set_message_package} with a package name. The package must contain
an __init__.py, and a message bundle called 'pywikibot' containing
messages. See L{twntranslate} for more information on the messages.
"""
from __future__ import unicode_literals
__version__ = '$Id$'
import sys
import re
import locale
import json
import os
import pkgutil
from collections import defaultdict
from pywikibot import Error
from .plural import plural_rules
import pywikibot
from . import config2 as config
if sys.version_info[0] > 2:
basestring = (str, )
PLURAL_PATTERN = r'{{PLURAL:(?:%\()?([^\)]*?)(?:\)d)?\|(.*?)}}'
_messages_package_name = 'scripts.i18n'
_messages_available = None
_cache = defaultdict(dict)
def set_messages_package(package_name):
"""Set the package name where i18n messages are located."""
global _messages_package_name
global _messages_available
_messages_package_name = package_name
_messages_available = None
def messages_available():
"""
Return False if there are no i18n messages available.
To determine if messages are available, it looks for the package name
set using L{set_messages_package} for a message bundle called 'pywikibot'
containing messages.
@rtype: bool
"""
global _messages_available
if _messages_available is not None:
return _messages_available
try:
__import__(_messages_package_name)
except ImportError:
_messages_available = False
return False
_messages_available = True
return True
def _altlang(code):
"""Define fallback languages for particular languages.
If no translation is available to a specified language, translate() will
try each of the specified fallback languages, in order, until it finds
one with a translation, with 'en' and '_default' as a last resort.
For example, if for language 'xx', you want the preference of languages
to be: xx > fr > ru > en, you let this method return ['fr', 'ru'].
This code is used by other translating methods below.
@param code: The language code
@type code: string
@return: language codes
@rtype: list of str
"""
# Akan
if code in ['ak', 'tw']:
return ['ak', 'tw']
# Amharic
if code in ['aa', 'ti']:
return ['am']
# Arab
if code in ['arc', 'arz', 'so']:
return ['ar']
if code == 'kab':
return ['ar', 'fr']
# Bulgarian
if code in ['cu', 'mk']:
return ['bg', 'sr', 'sh']
# Czech
if code in ['cs', 'sk']:
return ['cs', 'sk']
# German
if code in ['bar', 'frr', 'ksh', 'pdc', 'pfl']:
return ['de']
if code == 'lb':
return ['de', 'fr']
if code in ['als', 'gsw']:
return ['als', 'gsw', 'de']
if code == 'nds':
return ['nds-nl', 'de']
if code in ['dsb', 'hsb']:
return ['hsb', 'dsb', 'de']
if code == 'sli':
return ['de', 'pl']
if code == 'rm':
return ['de', 'it']
if code == 'stq':
return ['nds', 'de']
# Greek
if code in ['grc', 'pnt']:
return ['el']
# Esperanto
if code in ['io', 'nov']:
return ['eo']
# Spanish
if code in ['an', 'arn', 'ast', 'ay', 'ca', 'ext', 'lad', 'nah', 'nv', 'qu',
'yua']:
return ['es']
if code in ['gl', 'gn']:
return ['es', 'pt']
if code == 'eu':
return ['es', 'fr']
if code == 'cbk-zam':
return ['es', 'tl']
# Estonian
if code in ['fiu-vro', 'vro']:
return ['fiu-vro', 'vro', 'et']
if code == 'liv':
return ['et', 'lv']
# Persian (Farsi)
if code == 'ps':
return ['fa']
if code in ['glk', 'mzn']:
return ['glk', 'mzn', 'fa', 'ar']
# Finnish
if code == 'vep':
return ['fi', 'ru']
if code == 'fit':
return ['fi', 'sv']
# French
if code in ['bm', 'br', 'ht', 'kg', 'ln', 'mg', 'nrm', 'pcd',
'rw', 'sg', 'ty', 'wa']:
return ['fr']
if code == 'oc':
return ['fr', 'ca', 'es']
if code in ['co', 'frp']:
return ['fr', 'it']
# Hindi
if code in ['sa']:
return ['hi']
if code in ['ne', 'new']:
return ['ne', 'new', 'hi']
if code in ['bh', 'bho']:
return ['bh', 'bho']
# Indonesian and Malay
if code in ['ace', 'bug', 'bjn', 'id', 'jv', 'ms', 'su']:
return ['id', 'ms', 'jv']
if code == 'map-bms':
return ['jv', 'id', 'ms']
# Inuit languages
if code in ['ik', 'iu']:
return ['iu', 'kl']
if code == 'kl':
return ['da', 'iu', 'no', 'nb']
# Italian
if code in ['eml', 'fur', 'lij', 'lmo', 'nap', 'pms', 'roa-tara', 'sc',
'scn', 'vec']:
return ['it']
# Lithuanian
if code in ['bat-smg', 'sgs']:
return ['bat-smg', 'sgs', 'lt']
# Latvian
if code == 'ltg':
return ['lv']
# Dutch
if code in ['af', 'fy', 'li', 'pap', 'srn', 'vls', 'zea']:
return ['nl']
if code == ['nds-nl']:
return ['nds', 'nl']
# Polish
if code in ['csb', 'szl']:
return ['pl']
# Portuguese
if code in ['fab', 'mwl', 'tet']:
return ['pt']
# Romanian
if code in ['roa-rup', 'rup']:
return ['roa-rup', 'rup', 'ro']
if code == 'mo':
return ['ro']
# Russian and Belarusian
if code in ['ab', 'av', 'ba', 'bxr', 'ce', 'cv', 'inh', 'kk', 'koi', 'krc',
'kv', 'ky', 'lbe', 'lez', 'mdf', 'mhr', 'mn', 'mrj', 'myv',
'os', 'sah', 'tg', 'udm', 'uk', 'xal']:
return ['ru']
if code in ['kbd', 'ady']:
return ['kbd', 'ady', 'ru']
if code == 'tt':
return ['tt-cyrl', 'ru']
if code in ['be', 'be-x-old', 'be-tarask']:
return ['be', 'be-x-old', 'be-tarask', 'ru']
if code == 'kaa':
return ['uz', 'ru']
# Serbocroatian
if code in ['bs', 'hr', 'sh']:
return ['sh', 'hr', 'bs', 'sr', 'sr-el']
if code == 'sr':
return ['sr-el', 'sh', 'hr', 'bs']
# Tagalog
if code in ['bcl', 'ceb', 'ilo', 'pag', 'pam', 'war']:
return ['tl']
# Turkish and Kurdish
if code in ['diq', 'ku']:
return ['ku', 'ku-latn', 'tr']
if code == 'gag':
return ['tr']
if code == 'ckb':
return ['ku']
# Ukrainian
if code in ['crh', 'crh-latn']:
return ['crh', 'crh-latn', 'uk', 'ru']
if code in ['rue']:
return ['uk', 'ru']
# Chinese
if code in ['zh-classical', 'lzh', 'minnan', 'zh-min-nan', 'nan', 'zh-tw',
'zh', 'zh-hans']:
return ['zh', 'zh-hans', 'zh-tw', 'zh-cn', 'zh-classical', 'lzh']
if code in ['cdo', 'gan', 'hak', 'ii', 'wuu', 'za', 'zh-classical', 'lzh',
'zh-cn', 'zh-yue', 'yue']:
return ['zh', 'zh-hans' 'zh-cn', 'zh-tw', 'zh-classical', 'lzh']
# Scandinavian languages
if code in ['da', 'sv']:
return ['da', 'no', 'nb', 'sv', 'nn']
if code in ['fo', 'is']:
return ['da', 'no', 'nb', 'nn', 'sv']
if code == 'nn':
return ['no', 'nb', 'sv', 'da']
if code in ['no', 'nb']:
return ['no', 'nb', 'da', 'nn', 'sv']
if code == 'se':
return ['sv', 'no', 'nb', 'nn', 'fi']
# Other languages
if code in ['bi', 'tpi']:
return ['bi', 'tpi']
if code == 'yi':
return ['he', 'de']
if code in ['ia', 'ie']:
return ['ia', 'la', 'it', 'fr', 'es']
if code == 'xmf':
return ['ka']
if code in ['nso', 'st']:
return ['st', 'nso']
if code in ['kj', 'ng']:
return ['kj', 'ng']
if code in ['meu', 'hmo']:
return ['meu', 'hmo']
if code == ['as']:
return ['bn']
# Default value
return []
class TranslationError(Error, ImportError):
"""Raised when no correct translation could be found."""
# Inherits from ImportError, as this exception is now used
# where previously an ImportError would have been raised,
# and may have been caught by scripts as such.
pass
def _get_translation(lang, twtitle):
"""
Return message of certain twtitle if exists.
For internal use, don't use it directly.
"""
if twtitle in _cache[lang]:
return _cache[lang][twtitle]
message_bundle = twtitle.split('-')[0]
trans_text = None
filename = '%s/%s.json' % (message_bundle, lang)
try:
trans_text = pkgutil.get_data(
_messages_package_name, filename).decode('utf-8')
except (OSError, IOError): # file open can cause several exceptions
_cache[lang][twtitle] = None
return
transdict = json.loads(trans_text)
_cache[lang].update(transdict)
try:
return transdict[twtitle]
except KeyError:
return
def _extract_plural(code, message, parameters):
"""Check for the plural variants in message and replace them.
@param message: the message to be replaced
@type message: unicode string
@param parameters: plural parameters passed from other methods
@type parameters: int, basestring, tuple, list, dict
"""
plural_items = re.findall(PLURAL_PATTERN, message)
if plural_items: # we found PLURAL patterns, process it
if len(plural_items) > 1 and isinstance(parameters, (tuple, list)) and \
len(plural_items) != len(parameters):
raise ValueError("Length of parameter does not match PLURAL "
"occurrences.")
i = 0
for selector, variants in plural_items:
if isinstance(parameters, dict):
num = int(parameters[selector])
elif isinstance(parameters, basestring):
num = int(parameters)
elif isinstance(parameters, (tuple, list)):
num = int(parameters[i])
i += 1
else:
num = parameters
# TODO: check against plural_rules[code]['nplurals']
try:
index = plural_rules[code]['plural'](num)
except KeyError:
index = plural_rules['_default']['plural'](num)
except TypeError:
# we got an int, not a function
index = plural_rules[code]['plural']
repl = variants.split('|')[index]
message = re.sub(PLURAL_PATTERN, repl, message, count=1)
return message
DEFAULT_FALLBACK = ('_default', )
def translate(code, xdict, parameters=None, fallback=False):
"""Return the most appropriate translation from a translation dict.
Given a language code and a dictionary, returns the dictionary's value for
key 'code' if this key exists; otherwise tries to return a value for an
alternative language that is most applicable to use on the wiki in
language 'code' except fallback is False.
The language itself is always checked first, then languages that
have been defined to be alternatives, and finally English. If none of
the options gives result, we just take the one language from xdict which may
not be always the same. When fallback is iterable it'll return None if no
code applies (instead of returning one).
For PLURAL support have a look at the twntranslate method
@param code: The language code
@type code: string or Site object
@param xdict: dictionary with language codes as keys or extended dictionary
with family names as keys containing language dictionaries or
a single (unicode) string. May contain PLURAL tags as
described in twntranslate
@type xdict: dict, string, unicode
@param parameters: For passing (plural) parameters
@type parameters: dict, string, unicode, int
@param fallback: Try an alternate language code. If it's iterable it'll
also try those entries and choose the first match.
@type fallback: boolean or iterable
"""
family = pywikibot.config.family
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
family = code.family.name
code = code.code
# Check whether xdict has multiple projects
if isinstance(xdict, dict):
if family in xdict:
xdict = xdict[family]
elif 'wikipedia' in xdict:
xdict = xdict['wikipedia']
# Get the translated string
if not isinstance(xdict, dict):
trans = xdict
elif not xdict:
trans = None
else:
codes = [code]
if fallback is True:
codes += _altlang(code) + ['_default', 'en']
elif fallback is not False:
codes += list(fallback)
for code in codes:
if code in xdict:
trans = xdict[code]
break
else:
if fallback is not True:
# this shouldn't simply return "any one" code but when fallback
# was True before 65518573d2b0, it did just that. When False it
# did just return None. It's now also returning None in the new
# iterable mode.
return
code = list(xdict.keys())[0]
trans = xdict[code]
if trans is None:
return # return None if we have no translation found
if parameters is None:
return trans
# else we check for PLURAL variants
trans = _extract_plural(code, trans, parameters)
if parameters:
try:
return trans % parameters
except (KeyError, TypeError):
# parameter is for PLURAL variants only, don't change the string
pass
return trans
def twtranslate(code, twtitle, parameters=None, fallback=True):
"""
Translate a message.
The translations are retrieved from json files in messages_package_name.
fallback parameter must be True for i18n and False for L10N or testing
purposes.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: For passing parameters.
@param fallback: Try an alternate language code
@type fallback: boolean
"""
if not messages_available():
raise TranslationError(
'Unable to load messages package %s for bundle %s'
'\nIt can happen due to lack of i18n submodule or files. '
'Read https://mediawiki.org/wiki/PWB/i18n'
% (_messages_package_name, twtitle))
code_needed = False
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
lang = code.code
# check whether we need the language code back
elif isinstance(code, list):
lang = code.pop()
code_needed = True
else:
lang = code
# There are two possible failure modes: the translation dict might not have
# the language altogether, or a specific key could be untranslated. Both
# modes are caught with the KeyError.
langs = [lang]
if fallback:
langs += _altlang(lang) + ['en']
for alt in langs:
trans = _get_translation(alt, twtitle)
if trans:
break
else:
raise TranslationError(
'No English translation has been defined for TranslateWiki key'
' %r\nIt can happen due to lack of i18n submodule or files. '
'Read https://mediawiki.org/wiki/PWB/i18n' % twtitle)
# send the language code back via the given list
if code_needed:
code.append(alt)
if parameters:
return trans % parameters
else:
return trans
def twntranslate(code, twtitle, parameters=None):
r"""Translate a message with plural support.
Support is implemented like in MediaWiki extension. If the TranslateWiki
message contains a plural tag inside which looks like::
{{PLURAL:<number>|<variant1>|<variant2>[|<variantn>]}}
it takes that variant calculated by the plural_rules depending on the number
value. Multiple plurals are allowed.
As an examples, if we had several json dictionaries in test folder like:
en.json:
{
"test-plural": "Bot: Changing %(num)s {{PLURAL:%(num)d|page|pages}}.",
}
fr.json:
{
"test-plural": "Robot: Changer %(descr)s {{PLURAL:num|une page|quelques pages}}.",
}
and so on.
>>> from pywikibot import i18n
>>> i18n.set_messages_package('tests.i18n')
>>> # use a number
>>> str(i18n.twntranslate('en', 'test-plural', 0) % {'num': 'no'})
'Bot: Changing no pages.'
>>> # use a string
>>> str(i18n.twntranslate('en', 'test-plural', '1') % {'num': 'one'})
'Bot: Changing one page.'
>>> # use a dictionary
>>> str(i18n.twntranslate('en', 'test-plural', {'num':2}))
'Bot: Changing 2 pages.'
>>> # use additional format strings
>>> str(i18n.twntranslate('fr', 'test-plural', {'num': 1, 'descr': 'seulement'}))
'Robot: Changer seulement une page.'
>>> # use format strings also outside
>>> str(i18n.twntranslate('fr', 'test-plural', 10) % {'descr': 'seulement'})
'Robot: Changer seulement quelques pages.'
The translations are retrieved from i18n.<package>, based on the callers
import table.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: For passing (plural) parameters.
"""
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
code = code.code
# we send the code via list and get the alternate code back
code = [code]
trans = twtranslate(code, twtitle)
# get the alternate language code modified by twtranslate
lang = code.pop()
# check for PLURAL variants
trans = _extract_plural(lang, trans, parameters)
# we always have a dict for replacement of translatewiki messages
if parameters and isinstance(parameters, dict):
try:
return trans % parameters
except KeyError:
# parameter is for PLURAL variants only, don't change the string
pass
return trans
def twhas_key(code, twtitle):
"""
Check if a message has a translation in the specified language code.
The translations are retrieved from i18n.<package>, based on the callers
import table.
No code fallback is made.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
"""
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
code = code.code
transdict = _get_translation(code, twtitle)
if transdict is None:
return False
return True
def twget_keys(twtitle):
"""
Return all language codes for a special message.
@param twtitle: The TranslateWiki string title, in <package>-<key> format
"""
# obtain the directory containing all the json files for this package
package = twtitle.split("-")[0]
mod = __import__(_messages_package_name, fromlist=[str('__file__')])
pathname = os.path.join(os.path.dirname(mod.__file__), package)
# build a list of languages in that directory
langs = [filename.partition('.')[0]
for filename in sorted(os.listdir(pathname))
if filename.endswith('.json')]
# exclude languages does not have this specific message in that package
# i.e. an incomplete set of translated messages.
return [lang for lang in langs
if lang != 'qqq' and
_get_translation(lang, twtitle)]
def input(twtitle, parameters=None, password=False, fallback_prompt=None):
"""
Ask the user a question, return the user's answer.
The prompt message is retrieved via L{twtranslate} and either uses the
config variable 'userinterface_lang' or the default locale as the language
code.
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: The values which will be applied to the translated text
@param password: Hides the user's input (for password entry)
@param fallback_prompt: The English prompt if i18n is not available.
@rtype: unicode string
"""
if not messages_available():
if not fallback_prompt:
raise TranslationError(
'Unable to load messages package %s for bundle %s'
% (_messages_package_name, twtitle))
else:
prompt = fallback_prompt
else:
code = config.userinterface_lang or \
locale.getdefaultlocale()[0].split('_')[0]
prompt = twtranslate(code, twtitle, parameters)
return pywikibot.input(prompt, password)
|
import sys
from pubnub import PubnubTornado as Pubnub
publish_key = len(sys.argv) > 1 and sys.argv[1] or 'demo'
subscribe_key = len(sys.argv) > 2 and sys.argv[2] or 'demo'
secret_key = len(sys.argv) > 3 and sys.argv[3] or 'demo'
cipher_key = len(sys.argv) > 4 and sys.argv[4] or ''
ssl_on = len(sys.argv) > 5 and bool(sys.argv[5]) or False
pubnub = Pubnub(publish_key=publish_key, subscribe_key=subscribe_key,
secret_key=secret_key, cipher_key=cipher_key, ssl_on=ssl_on)
channel = 'hello_world'
def callback(message):
print(message)
pubnub.here_now(channel, callback=callback, error=callback)
pubnub.start()
|
import numbers
import numpy as np
from ..constants import BOLTZMANN_IN_MEV_K
from ..energy import Energy
class Analysis(object):
r"""Class containing methods for the Data class
Attributes
----------
detailed_balance_factor
Methods
-------
integrate
position
width
scattering_function
dynamic_susceptibility
estimate_background
get_keys
get_bounds
"""
@property
def detailed_balance_factor(self):
r"""Returns the detailed balance factor (sometimes called the Bose
factor)
Parameters
----------
None
Returns
-------
dbf : ndarray
The detailed balance factor (temperature correction)
"""
return 1. - np.exp(-self.Q[:, 3] / BOLTZMANN_IN_MEV_K / self.temp)
def integrate(self, bounds=None, background=None, hkle=True):
r"""Returns the integrated intensity within given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : float
The integrated intensity either over all data, or within
specified boundaries
"""
result = 0
for key in self.get_keys(hkle):
result += np.trapz(self.intensity[self.get_bounds(bounds)] - self.estimate_background(background),
np.squeeze(self.data[key][self.get_bounds(bounds)]))
return result
def position(self, bounds=None, background=None, hkle=True):
r"""Returns the position of a peak within the given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : tup
The result is a tuple with position in each dimension of Q,
(h, k, l, e)
"""
result = ()
for key in self.get_keys(hkle):
_result = 0
for key_integrate in self.get_keys(hkle):
_result += np.trapz(self.data[key][self.get_bounds(bounds)] *
(self.intensity[self.get_bounds(bounds)] - self.estimate_background(background)),
self.data[key_integrate][self.get_bounds(bounds)]) / self.integrate(bounds, background)
result += (np.squeeze(_result),)
if hkle:
return result
else:
return dict((key, value) for key, value in zip(self.get_keys(hkle), result))
def width(self, bounds=None, background=None, fwhm=False, hkle=True):
r"""Returns the mean-squared width of a peak within the given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
fwhm : bool, optional
If True, returns width in fwhm, otherwise in mean-squared width.
Default: False
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : tup
The result is a tuple with the width in each dimension of Q,
(h, k, l, e)
"""
result = ()
for key in self.get_keys(hkle):
_result = 0
for key_integrate in self.get_keys(hkle):
_result += np.trapz((self.data[key][self.get_bounds(bounds)] -
self.position(bounds, background, hkle=False)[key]) ** 2 *
(self.intensity[self.get_bounds(bounds)] - self.estimate_background(background)),
self.data[key_integrate][self.get_bounds(bounds)]) / self.integrate(bounds, background)
if fwhm:
result += (np.sqrt(np.squeeze(_result)) * 2. * np.sqrt(2. * np.log(2.)),)
else:
result += (np.squeeze(_result),)
if hkle:
return result
else:
return dict((key, value) for key, value in zip(self.get_keys(hkle), result))
def scattering_function(self, material, ei):
r"""Returns the neutron scattering function, i.e. the detector counts
scaled by :math:`4 \pi / \sigma_{\mathrm{tot}} * k_i/k_f`.
Parameters
----------
material : object
Definition of the material given by the :py:class:`.Material`
class
ei : float
Incident energy in meV
Returns
-------
counts : ndarray
The detector counts scaled by the total scattering cross section
and ki/kf
"""
ki = Energy(energy=ei).wavevector
kf = Energy(energy=ei - self.e).wavevector
return 4 * np.pi / material.total_scattering_cross_section * ki / kf * self.detector
def dynamic_susceptibility(self, material, ei):
r"""Returns the dynamic susceptibility
:math:`\chi^{\prime\prime}(\mathbf{Q},\hbar\omega)`
Parameters
----------
material : object
Definition of the material given by the :py:class:`.Material`
class
ei : float
Incident energy in meV
Returns
-------
counts : ndarray
The detector counts turned into the scattering function multiplied
by the detailed balance factor
"""
return self.scattering_function(material, ei) * self.detailed_balance_factor
def estimate_background(self, bg_params):
r"""Estimate the background according to ``type`` specified.
Parameters
----------
bg_params : dict
Input dictionary has keys 'type' and 'value'. Types are
* 'constant' : background is the constant given by 'value'
* 'percent' : background is estimated by the bottom x%, where x
is value
* 'minimum' : background is estimated as the detector counts
Returns
-------
background : float or ndarray
Value determined to be the background. Will return ndarray only if
`'type'` is `'constant'` and `'value'` is an ndarray
"""
if isinstance(bg_params, type(None)):
return 0
elif isinstance(bg_params, numbers.Number):
return bg_params
elif bg_params['type'] == 'constant':
return bg_params['value']
elif bg_params['type'] == 'percent':
inten = self.intensity[self.intensity >= 0.]
Npts = int(inten.size * (bg_params['value'] / 100.))
min_vals = inten[np.argsort(inten)[:Npts]]
background = np.average(min_vals)
return background
elif bg_params['type'] == 'minimum':
return min(self.intensity)
else:
return 0
def get_bounds(self, bounds):
r"""Generates a to_fit tuple if bounds is present in kwargs
Parameters
----------
bounds : dict
Returns
-------
to_fit : tuple
Tuple of indices
"""
if bounds is not None:
return np.where(bounds)
else:
return np.where(self.Q[:, 0])
def get_keys(self, hkle):
r"""Returns all of the Dictionary key names
Parameters
----------
hkle : bool
If True only returns keys for h,k,l,e, otherwise returns all keys
Returns
-------
keys : list
:py:attr:`.Data.data` dictionary keys
"""
if hkle:
return [key for key in self.data if key in self.Q_keys.values()]
else:
return [key for key in self.data if key not in self.data_keys.values()]
|
from itertools import imap, chain
def set_name(name, f):
try:
f.__pipetools__name__ = name
except (AttributeError, UnicodeEncodeError):
pass
return f
def get_name(f):
from pipetools.main import Pipe
pipetools_name = getattr(f, '__pipetools__name__', None)
if pipetools_name:
return pipetools_name() if callable(pipetools_name) else pipetools_name
if isinstance(f, Pipe):
return repr(f)
return f.__name__ if hasattr(f, '__name__') else repr(f)
def repr_args(*args, **kwargs):
return ', '.join(chain(
imap('{0!r}'.format, args),
imap('{0[0]}={0[1]!r}'.format, kwargs.iteritems())))
|
import unittest
from PyFoam.Basics.MatplotlibTimelines import MatplotlibTimelines
theSuite=unittest.TestSuite()
|
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error, software_manager
sm = software_manager.SoftwareManager()
class sblim_sfcb(test.test):
"""
Autotest module for testing basic functionality
of sblim_sfcb
@author Wang Tao <wangttao@cn.ibm.com>
"""
version = 1
nfail = 0
path = ''
def initialize(self, test_path=''):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
if not sm.check_installed('gcc'):
logging.debug("gcc missing - trying to install")
sm.install('gcc')
ret_val = subprocess.Popen(['make', 'all'], cwd="%s/sblim_sfcb" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./sblim-sfcb-test.sh'], cwd="%s/sblim_sfcb" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
|
"""
Test cases adapted from the test_bsddb.py module in Python's
regression test suite.
"""
import sys, os, string
import unittest
import tempfile
from test_all import verbose
try:
# For Python 2.3
from bsddb import db, hashopen, btopen, rnopen
except ImportError:
# For earlier Pythons w/distutils pybsddb
from bsddb3 import db, hashopen, btopen, rnopen
class CompatibilityTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
def tearDown(self):
try:
os.remove(self.filename)
except os.error:
pass
def test01_btopen(self):
self.do_bthash_test(btopen, 'btopen')
def test02_hashopen(self):
self.do_bthash_test(hashopen, 'hashopen')
def test03_rnopen(self):
data = string.split("The quick brown fox jumped over the lazy dog.")
if verbose:
print "\nTesting: rnopen"
f = rnopen(self.filename, 'c')
for x in range(len(data)):
f[x+1] = data[x]
getTest = (f[1], f[2], f[3])
if verbose:
print '%s %s %s' % getTest
assert getTest[1] == 'quick', 'data mismatch!'
f[25] = 'twenty-five'
f.close()
del f
f = rnopen(self.filename, 'w')
f[20] = 'twenty'
def noRec(f):
rec = f[15]
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f['a string']
self.assertRaises(TypeError, badKey, f)
del f[3]
rec = f.first()
while rec:
if verbose:
print rec
try:
rec = f.next()
except KeyError:
break
f.close()
def test04_n_flag(self):
f = hashopen(self.filename, 'n')
f.close()
def do_bthash_test(self, factory, what):
if verbose:
print '\nTesting: ', what
f = factory(self.filename, 'c')
if verbose:
print 'creation...'
# truth test
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
f['0'] = ''
f['a'] = 'Guido'
f['b'] = 'van'
f['c'] = 'Rossum'
f['d'] = 'invented'
f['f'] = 'Python'
if verbose:
print '%s %s %s' % (f['a'], f['b'], f['c'])
if verbose:
print 'key ordering...'
f.set_location(f.first()[0])
while 1:
try:
rec = f.next()
except KeyError:
assert rec == f.last(), 'Error, last <> last!'
f.previous()
break
if verbose:
print rec
assert f.has_key('f'), 'Error, missing key!'
f.sync()
f.close()
# truth test
try:
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
except db.DBError:
pass
else:
self.fail("Exception expected")
del f
if verbose:
print 'modification...'
f = factory(self.filename, 'w')
f['d'] = 'discovered'
if verbose:
print 'access...'
for key in f.keys():
word = f[key]
if verbose:
print word
def noRec(f):
rec = f['no such key']
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f[15]
self.assertRaises(TypeError, badKey, f)
f.close()
def test_suite():
return unittest.makeSuite(CompatibilityTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import Pango
_TAB = Gdk.keyval_from_name("Tab")
_ENTER = Gdk.keyval_from_name("Enter")
from .surnamemodel import SurnameModel
from .embeddedlist import EmbeddedList, TEXT_EDIT_COL
from ...ddtargets import DdTargets
from gramps.gen.lib import Surname, NameOriginType
from ...utils import match_primary_mask, no_match_primary_mask
INVISIBLE = dict.fromkeys(list(range(32)))
class SurnameTab(EmbeddedList):
_HANDLE_COL = 5
_DND_TYPE = DdTargets.SURNAME
_MSG = {
'add' : _('Create and add a new surname'),
'del' : _('Remove the selected surname'),
'edit' : _('Edit the selected surname'),
'up' : _('Move the selected surname upwards'),
'down' : _('Move the selected surname downwards'),
}
#index = column in model. Value =
# (name, sortcol in model, width, markup/text
_column_names = [
(_('Prefix'), 0, 150, TEXT_EDIT_COL, -1, None),
(_('Surname'), 1, -1, TEXT_EDIT_COL, -1, None),
(_('Connector'), 2, 100, TEXT_EDIT_COL, -1, None),
]
_column_combo = (_('Origin'), -1, 150, 3) # name, sort, width, modelcol
_column_toggle = (_('Primary', 'Name'), -1, 80, 4)
def __init__(self, dbstate, uistate, track, name, on_change=None,
top_label='<b>%s</b>' % _("Multiple Surnames") ):
self.obj = name
self.on_change = on_change
self.curr_col = -1
self.curr_cellr = None
self.curr_celle = None
EmbeddedList.__init__(self, dbstate, uistate, track, _('Family Surnames'),
SurnameModel, move_buttons=True,
top_label=top_label)
def build_columns(self):
#first the standard text columns with normal method
EmbeddedList.build_columns(self)
# now we add the two special columns
# combobox for type
colno = len(self.columns)
name = self._column_combo[0]
renderer = Gtk.CellRendererCombo()
renderer.set_property('ellipsize', Pango.EllipsizeMode.END)
# set up the comboentry editable
no = NameOriginType()
self.cmborig = Gtk.ListStore(GObject.TYPE_INT, GObject.TYPE_STRING)
self.cmborigmap = no.get_map().copy()
#sort the keys based on the value
keys = sorted(self.cmborigmap, key=lambda x: glocale.sort_key(self.cmborigmap[x]))
for key in keys:
if key != no.get_custom():
self.cmborig.append(row=[key, self.cmborigmap[key]])
additional = self.dbstate.db.get_origin_types()
if additional:
for type in additional:
if type:
self.cmborig.append(row=[no.get_custom(), type])
renderer.set_property("model", self.cmborig)
renderer.set_property("text-column", 1)
renderer.set_property('editable', not self.dbstate.db.readonly)
renderer.connect('editing_started', self.on_edit_start_cmb, colno)
renderer.connect('edited', self.on_orig_edited, self._column_combo[3])
# add to treeview
column = Gtk.TreeViewColumn(name, renderer, text=self._column_combo[3])
column.set_resizable(True)
column.set_sort_column_id(self._column_combo[1])
column.set_min_width(self._column_combo[2])
column.set_expand(False)
self.columns.append(column)
self.tree.append_column(column)
# toggle box for primary
colno += 1
name = self._column_toggle[0]
renderer = Gtk.CellRendererToggle()
renderer.set_property('activatable', True)
renderer.set_property('radio', True)
renderer.connect( 'toggled', self.on_prim_toggled, self._column_toggle[3])
# add to treeview
column = Gtk.TreeViewColumn(name, renderer, active=self._column_toggle[3])
column.set_resizable(False)
column.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
column.set_alignment(0.5)
column.set_sort_column_id(self._column_toggle[1])
column.set_max_width(self._column_toggle[2])
self.columns.append(column)
self.tree.append_column(column)
def setup_editable_col(self):
"""
inherit this and set the variables needed for editable columns
Variable edit_col_funcs needs to be a dictionary from model col_nr to
function to call for
Example:
self.edit_col_funcs ={1: {'edit_start': self.on_edit_start,
'edited': self.on_edited
}}
"""
self.edit_col_funcs = {
0: {'edit_start': self.on_edit_start,
'edited': self.on_edit_inline},
1: {'edit_start': self.on_edit_start,
'edited': self.on_edit_inline},
2: {'edit_start': self.on_edit_start,
'edited': self.on_edit_inline}}
def get_data(self):
return self.obj.get_surname_list()
def is_empty(self):
return len(self.model)==0
def _get_surn_from_model(self):
"""
Return new surname_list for storing in the name based on content of
the model
"""
new_list = []
for idx in range(len(self.model)):
node = self.model.get_iter(idx)
surn = self.model.get_value(node, 5)
surn.set_prefix(self.model.get_value(node, 0))
surn.set_surname(self.model.get_value(node, 1))
surn.set_connector(self.model.get_value(node, 2))
surn.get_origintype().set(self.model.get_value(node, 3))
surn.set_primary(self.model.get_value(node, 4))
new_list += [surn]
return new_list
def update(self):
"""
Store the present data in the model to the name object
"""
new_map = self._get_surn_from_model()
self.obj.set_surname_list(new_map)
# update name in previews
if self.on_change:
self.on_change()
def post_rebuild(self, prebuildpath):
"""
Called when data model has changed, in particular necessary when row
order is updated.
@param prebuildpath: path selected before rebuild, None if none
@type prebuildpath: tree path
"""
if self.on_change:
self.on_change()
def column_order(self):
# order of columns for EmbeddedList. Only the text columns here
return ((1, 0), (1, 1), (1, 2))
def add_button_clicked(self, obj):
"""Add button is clicked, add a surname to the person"""
prim = False
if len(self.obj.get_surname_list()) == 0:
prim = True
node = self.model.append(row=['', '', '', str(NameOriginType()), prim,
Surname()])
self.selection.select_iter(node)
path = self.model.get_path(node)
self.tree.set_cursor_on_cell(path,
focus_column=self.columns[0],
focus_cell=None,
start_editing=True)
self.update()
def del_button_clicked(self, obj):
"""
Delete button is clicked. Remove from the model
"""
(model, node) = self.selection.get_selected()
if node:
self.model.remove(node)
self.update()
def on_edit_start(self, cellr, celle, path, colnr):
""" start of editing. Store stuff so we know when editing ends where we
are
"""
self.curr_col = colnr
self.curr_cellr = cellr
self.curr_celle = celle
def on_edit_start_cmb(self, cellr, celle, path, colnr):
"""
An edit starts in the origin type column
This means a cmb has been created as celle, and we can set up the stuff
we want this cmb to contain: autocompletion, stop edit when selection
in the cmb happens.
"""
self.on_edit_start(cellr, celle, path, colnr)
#set up autocomplete
entry = celle.get_child()
entry.set_width_chars(10)
completion = Gtk.EntryCompletion()
completion.set_model(self.cmborig)
completion.set_minimum_key_length(1)
completion.set_text_column(1)
entry.set_completion(completion)
#
celle.connect('changed', self.on_origcmb_change, path, colnr)
def on_edit_start_toggle(self, cellr, celle, path, colnr):
"""
Edit
"""
self.on_edit_start(cellr, celle, path, colnr)
def on_edit_inline(self, cell, path, new_text, colnr):
"""
Edit is happening. The model is updated and the surname objects updated.
colnr must be the column in the model.
"""
node = self.model.get_iter(path)
text = new_text.translate(INVISIBLE).strip()
self.model.set_value(node, colnr, text)
self.update()
def on_orig_edited(self, cellr, path, new_text, colnr):
"""
An edit is finished in the origin type column. For a cmb in an editor,
the model may only be updated when typing is finished, as editing stops
automatically on update of the model.
colnr must be the column in the model.
"""
self.on_edit_inline(cellr, path, new_text, colnr)
def on_origcmb_change(self, cmb, path, colnr):
"""
A selection occured in the cmb of the origin type column. colnr must
be the column in the model.
"""
act = cmb.get_active()
if act == -1:
return
self.on_orig_edited(None, path,
self.cmborig.get_value(
self.cmborig.get_iter((act,)),1),
colnr)
def on_prim_toggled(self, cell, path, colnr):
"""
Primary surname on path is toggled. colnr must be the col
in the model
"""
#obtain current value
node = self.model.get_iter(path)
old_val = self.model.get_value(node, colnr)
for nr in range(len(self.obj.get_surname_list())):
if nr == int(path[0]):
if old_val:
#True remains True
break
else:
#This value becomes True
self.model.set_value(self.model.get_iter((nr,)), colnr, True)
else:
self.model.set_value(self.model.get_iter((nr,)), colnr, False)
self.update()
return
def edit_button_clicked(self, obj):
""" Edit button clicked
"""
(model, node) = self.selection.get_selected()
if node:
path = self.model.get_path(node)
self.tree.set_cursor_on_cell(path,
focus_column=self.columns[0],
focus_cell=None,
start_editing=True)
def key_pressed(self, obj, event):
"""
Handles the key being pressed.
Here we make sure tab moves to next or previous value in row on TAB
"""
if not EmbeddedList.key_pressed(self, obj, event):
if event.type == Gdk.EventType.KEY_PRESS and event.keyval in (_TAB,):
if no_match_primary_mask(event.get_state(),
Gdk.ModifierType.SHIFT_MASK):
return self.next_cell()
elif match_primary_mask(event.get_state(), Gdk.ModifierType.SHIFT_MASK):
return self.prev_cell()
else:
return
else:
return
return True
def next_cell(self):
"""
Move to the next cell to edit it
"""
(model, node) = self.selection.get_selected()
if node:
path = self.model.get_path(node).get_indices()[0]
nccol = self.curr_col+1
if nccol < 4:
if self.curr_celle:
self.curr_celle.editing_done()
self.tree.set_cursor_on_cell(Gtk.TreePath((path,)),
focus_column=self.columns[nccol],
focus_cell=None,
start_editing=True)
elif nccol == 4:
#go to next line if there is one
if path < len(self.obj.get_surname_list()):
newpath = Gtk.TreePath((path+1,))
self.curr_celle.editing_done()
self.selection.select_path(newpath)
self.tree.set_cursor_on_cell(newpath,
focus_column=self.columns[0],
focus_cell=None,
start_editing=True)
else:
#stop editing
self.curr_celle.editing_done()
return
return True
def prev_cell(self):
"""
Move to the next cell to edit it
"""
(model, node) = self.selection.get_selected()
if node:
path = self.model.get_path(node).get_indices()[0]
if self.curr_col > 0:
self.tree.set_cursor_on_cell(Gtk.TreePath((path,)),
focus_column=self.columns[self.curr_col-1],
focus_cell=None,
start_editing=True)
elif self.curr_col == 0:
#go to prev line if there is one
if path > 0:
newpath = Gtk.TreePath((path-1,))
self.selection.select_path(newpath)
self.tree.set_cursor_on_cell(newpath,
focus_column=self.columns[-2],
focus_cell=None,
start_editing=True)
else:
#stop editing
self.curr_celle.editing_done()
return
return True
|
if __name__ == '__main__':
import sys
sys.path = ['.','..'] + sys.path # HACK to simplify unit testing.
from BTL.translation import _
class BEGIN: # represents special BEGIN location before first next.
pass
from UserDict import DictMixin
from cmap_swig import *
import sys
from weakref import WeakKeyDictionary
LEAK_TEST = False
class CMap(object,DictMixin):
"""In-order mapping. Provides same operations and behavior as a dict,
but provides in-order iteration. Additionally provides operations to
find the nearest key <= or >= a given key.
This provides a significantly wider set of operations than
berkeley db BTrees, but it provides no means for persistence.
LIMITATION: The key must be a python numeric type, e.g., an integer
or a float. The value can be any python object.
Operation: Time Applicable
Complexity: Methods:
---------------------------------------------------
Item insertion: O(log n) append, __setitem__
Item deletion: O(log n + k) __delitem__, erase
Key search: O(log n) __getitem__, get, find,
__contains__
Value search: n/a
Iteration step: amortized O(1), next, prev
worst-case O(log n)
Memory: O(n)
n = number of elements in map. k = number of iterators pointing
into map. CMap assumes there are few iterators in existence at
any given time.
Iterators are not invalidated by insertions. Iterators are
invalidated by deletions only when the key-value pair
referenced is deleted. Deletion has a '+k' because the
__delitem__ searches linearly through the set of iterators
pointing into this map to find any iterator pointing at the
deleted item and then invalidates the iterator.
This class is backed by the C++ STL map class, but conforms
to the Python container interface."""
class _AbstractIterator:
"""Iterates over elements in the map in order."""
def __init__(self, m, si = BEGIN ): # "s.." implies swig object.
"""Creates an iterator pointing to element si in map m.
Do not instantiate directly. Use iterkeys, itervalues, or
iteritems.
The _AbstractIterator takes ownership of any C++ iterator
(i.e., the swig object 'si') and will deallocate it when
the iterator is deallocated.
Examples of typical behavior:
>>> from CMap import *
>>> m = CMap()
>>> m[12] = 6
>>> m[9] = 4
>>> for k in m:
... print int(k)
...
9
12
>>>
Example edge cases (empty map):
>>> from CMap import *
>>> m = CMap()
>>> try:
... i = m.__iter__()
... i.value()
... except IndexError:
... print 'IndexError.'
...
IndexError.
>>> try:
... i.next()
... except StopIteration:
... print 'stopped'
...
stopped
@param map: CMap.
@param node: Node that this iterator will point at. If None
then the iterator points to end(). If BEGIN
then the iterator points to one before the beginning.
"""
assert isinstance(m, CMap)
assert not isinstance(si, CMap._AbstractIterator)
if si == None:
self._si = map_end(m._smap)
else:
self._si = si # C++ iterator wrapped by swig.
self._map = m
m._iterators[self] = 1 # using map as set of weak references.
def __hash__(self):
return id(self)
def __cmp__(self, other):
if not self._si or not other._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN and other._si == BEGIN: return 0
if self._si == BEGIN and other._si != BEGIN: return -1
elif self._si != BEGIN and other._si == BEGIN: return 1
return iter_cmp(self._map._smap, self._si, other._si )
def at_begin(self):
"""equivalent to self == m.begin() where m is a CMap.
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.begin()
>>> i == m.begin()
True
>>> i.at_begin()
True
>>> i == m.end() # no elements so begin()==end()
True
>>> i.at_end()
True
>>> m[6] = 'foo' # insertion does not invalidate iterators.
>>> i = m.begin()
>>> i == m.end()
False
>>> i.value()
'foo'
>>> try: # test at_begin when not at beginning.
... i.next()
... except StopIteration:
... print 'ok'
ok
>>> i.at_begin()
False
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN: # BEGIN is one before begin(). Yuck!!
return False
return map_iter_at_begin(self._map._smap, self._si)
def at_end(self):
"""equivalent to self == m.end() where m is a CMap, but
at_end is faster because it avoids the dynamic memory
alloation in m.end().
>>> from CMap import CMap
>>> m = CMap()
>>> m[6] = 'foo'
>>> i = m.end() # test when at end.
>>> i == m.end()
True
>>> i.at_end()
True
>>> int(i.prev())
6
>>> i.at_end() # testing when not at end.
False
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
return False
return map_iter_at_end(self._map._smap, self._si)
def key(self):
"""@return: the key of the key-value pair referenced by this
iterator.
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
raise IndexError(_("Cannot dereference iterator until after "
"first call to .next."))
elif map_iter_at_end(self._map._smap, self._si):
raise IndexError()
return iter_key(self._si)
def value(self):
"""@return: the value of the key-value pair currently referenced
by this iterator.
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
raise IndexError(_("Cannot dereference iterator until after "
"first call to next."))
elif map_iter_at_end(self._map._smap, self._si):
raise IndexError()
return iter_value(self._si)
def item(self):
"""@return the key-value pair referenced by this iterator.
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
return self.key(), self.value()
def _next(self):
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
self._si = map_begin(self._map._smap)
if map_iter_at_end(self._map._smap,self._si):
raise StopIteration
return
if map_iter_at_end(self._map._smap,self._si):
raise StopIteration
iter_incr(self._si)
if map_iter_at_end(self._map._smap,self._si):
raise StopIteration
def _prev(self):
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
raise StopIteration()
elif map_iter_at_begin(self._map._smap, self._si):
self._si = BEGIN
raise StopIteration
iter_decr(self._si)
def __del__(self):
# Python note: if a reference to x is intentionally
# eliminated using "del x" and there are other references
# to x then __del__ does not get called at this time.
# Only when the last reference is deleted by an intentional
# "del" or when the reference goes out of scope does
# the __del__ method get called.
self._invalidate()
def _invalidate(self):
if self._si == None:
return
try:
del self._map._iterators[self]
except KeyError:
pass # could've been removed because weak reference,
# and because _invalidate is called from __del__.
if self._si != BEGIN:
iter_delete(self._si)
self._si = None
def __iter__(self):
"""If the iterator is itself iteratable then we do things like:
>>> from CMap import CMap
>>> m = CMap()
>>> m[10] = 'foo'
>>> m[11] = 'bar'
>>> for x in m.itervalues():
... print x
...
foo
bar
"""
return self
def __len__(self):
return len(self._map)
class KeyIterator(_AbstractIterator):
def next(self):
"""Returns the next key in the map.
Insertion does not invalidate iterators. Deletion only
invalidates an iterator if the iterator pointed at the
key-value pair being deleted.
This is implemented by moving the iterator and then
dereferencing it. If we dereferenced and then moved
then we would get the odd behavior:
Ex: I have keys [1,2,3]. The iterator i points at 1.
print i.next() # prints 1
print i.next() # prints 2
print i.prev() # prints 3
print i.prev() # prints 2
However, because we move and then dereference, when an
iterator is first created it points to nowhere
so that the first next moves to the first element.
Ex:
>>> from CMap import *
>>> m = CMap()
>>> m[5] = 1
>>> m[8] = 4
>>> i = m.__iter__()
>>> print int(i.next())
5
>>> print int(i.next())
8
>>> print int(i.prev())
5
We are still left with the odd behavior that an
iterator cannot be dereferenced until after the first next().
Ex edge cases:
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.__iter__()
>>> try:
... i.prev()
... except StopIteration:
... print 'StopIteration'
...
StopIteration
>>> m[5]='a'
>>> i = m.iterkeys()
>>> int(i.next())
5
>>> try: i.next()
... except StopIteration: print 'StopIteration'
...
StopIteration
>>> int(i.prev())
5
>>> try: int(i.prev())
... except StopIteration: print 'StopIteration'
...
StopIteration
>>> int(i.next())
5
"""
self._next()
return self.key()
def prev(self):
"""Returns the previous key in the map.
See next() for more detail and examples.
"""
self._prev()
return self.key()
class ValueIterator(_AbstractIterator):
def next(self):
"""@return: next value in the map.
>>> from CMap import *
>>> m = CMap()
>>> m[5] = 10
>>> m[6] = 3
>>> i = m.itervalues()
>>> int(i.next())
10
>>> int(i.next())
3
"""
self._next()
return self.value()
def prev(self):
self._prev()
return self.value()
class ItemIterator(_AbstractIterator):
def next(self):
"""@return: next item in the map's key ordering.
>>> from CMap import CMap
>>> m = CMap()
>>> m[5] = 10
>>> m[6] = 3
>>> i = m.iteritems()
>>> k,v = i.next()
>>> int(k)
5
>>> int(v)
10
>>> k,v = i.next()
>>> int(k)
6
>>> int(v)
3
"""
self._next()
return self.key(), self.value()
def prev(self):
self._prev()
return self.key(), self.value()
def __init__(self, d={} ):
"""Instantiate RBTree containing values from passed dict and
ordered based on cmp.
>>> m = CMap()
>>> len(m)
0
>>> m[5]=2
>>> len(m)
1
>>> print m[5]
2
"""
#self._index = {} # to speed up searches.
self._smap = map_constructor() # C++ map wrapped by swig.
for key, value in d.items():
self[key]=value
self._iterators = WeakKeyDictionary()
# whenever node is deleted. search iterators
# for any iterator that becomes invalid.
def __contains__(self,x):
return self.get(x) != None
def __iter__(self):
"""@return: KeyIterator positioned one before the beginning of the
key ordering so that the first next() returns the first key."""
return CMap.KeyIterator(self)
def begin(self):
"""Returns an iterator pointing at first key-value pair. This
differs from iterkeys, itervalues, and iteritems which return an
iterator pointing one before the first key-value pair.
@return: key iterator to first key-value.
>>> from CMap import *
>>> m = CMap()
>>> m[5.0] = 'a'
>>> i = m.begin()
>>> int(i.key()) # raises no IndexError.
5
>>> i = m.iterkeys()
>>> try:
... i.key()
... except IndexError:
... print 'IndexError raised'
...
IndexError raised
"""
i = CMap.KeyIterator(self, map_begin(self._smap) )
return i
def end(self):
"""Returns an iterator pointing after end of key ordering.
The iterator's prev method will move to the last
key-value pair in the ordering. This in keeping with
the notion that a range is specified as [i,j) where
j is not in the range, and the range [i,j) where i==j
is an empty range.
This operation takes O(1) time.
@return: key iterator one after end.
"""
i = CMap.KeyIterator(self,None) # None means one after last node.
return i
def iterkeys(self):
return CMap.KeyIterator(self)
def itervalues(self):
return CMap.ValueIterator(self)
def iteritems(self):
return CMap.ItemIterator(self)
def __len__(self):
return map_size(self._smap)
def __str__(self):
s = "{"
first = True
for k,v in self.items():
if first:
first = False
else:
s += ", "
if type(v) == str:
s += "%s: '%s'" % (k,v)
else:
s += "%s: %s" % (k,v)
s += "}"
return s
def __repr__(self):
return self.__str__()
def __getitem__(self, key):
# IMPL 1: without _index
return map_find(self._smap,key) # raises KeyError if key not found
# IMPL 2: with _index.
#return iter_value(self._index[key])
def __setitem__(self, key, value):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[6] = 'bar'
>>> m[6]
'bar'
>>>
"""
assert type(key) == int or type(key) == float
# IMPL 1. without _index.
map_set(self._smap,key,value)
## IMPL 2. with _index
## If using indices following allows us to perform only one search.
#i = map_insert_iter(self._smap,key,value)
#if iter_value(i) != value:
# iter_set(i,value)
#else: self._index[key] = i
## END IMPL2
def __delitem__(self, key):
"""Deletes the item with matching key from the map.
This takes O(log n + k) where n is the number of elements
in the map and k is the number of iterators pointing into the map.
Before deleting the item it linearly searches through
all iterators pointing into the map and invalidates any that
are pointing at the item about to be deleted.
>>> from CMap import CMap
>>> m = CMap()
>>> m[12] = 'foo'
>>> m[13] = 'bar'
>>> m[14] = 'boo'
>>> del m[12]
>>> try:
... m[12]
... except KeyError:
... print 'ok'
...
ok
>>> j = m.begin()
>>> int(j.next())
14
>>> i = m.begin()
>>> i.value()
'bar'
>>> del m[13] # delete object referenced by an iterator
>>> try:
... i.value()
... except RuntimeError:
... print 'ok'
ok
>>> j.value() # deletion should not invalidate other iterators.
'boo'
"""
#map_erase( self._smap, key ) # map_erase is dangerous. It could
# delete the node causing an iterator
# to become invalid. --Dave
si = map_find_iter( self._smap, key ) # si = swig'd iterator.
if map_iter_at_end(self._smap, si):
iter_delete(si)
raise KeyError(key)
for i in list(self._iterators):
if iter_cmp( self._smap, i._si, si ) == 0:
i._invalidate()
map_iter_erase( self._smap, si )
iter_delete(si)
#iter_delete( self._index[key] ) # IMPL 2. with _index.
#del self._index[key] # IMPL 2. with _index.
def erase(self, iter):
"""Remove item pointed to by the iterator. All iterators that
point at the erased item including the passed iterator
are immediately invalidated after the deletion completes.
>>> from CMap import CMap
>>> m = CMap()
>>> m[12] = 'foo'
>>> i = m.find(12)
>>> m.erase(i)
>>> len(m) == 0
True
"""
if not iter._si:
raise RuntimeError( _("invalid iterator") )
if iter._si == BEGIN:
raise IndexError(_("Iterator does not point at key-value pair" ))
if self is not iter._map:
raise IndexError(_("Iterator points into a different CMap."))
if map_iter_at_end(self._smap, iter._si):
raise IndexError( _("Cannot erase end() iterator.") )
# invalidate iterators.
for i in list(self._iterators):
if iter._si is not i._si and iiter_cmp( self._smmap, iter._si, i._si ) == 0:
i._invalidate()
# remove item from the map.
map_iter_erase( self._smap, iter._si )
# invalidate last iterator pointing to the deleted location in the map.
iter._invalidate()
def __del__(self):
# invalidate all iterators.
for i in list(self._iterators):
i._invalidate()
map_delete(self._smap)
def get(self, key, default=None):
"""@return value corresponding to specified key or return 'default'
if the key is not found.
"""
try:
return map_find(self._smap,key) # IMPL 1. without _index.
#return iter_value(self._index[key]) # IMPL 2. with _index.
except KeyError:
return default
def keys(self):
"""
>>> from CMap import *
>>> m = CMap()
>>> m[4.0] = 7
>>> m[6.0] = 3
>>> [int(x) for x in m.keys()] # m.keys() but guaranteed integers.
[4, 6]
"""
k = []
for key in self:
k.append(key)
return k
def values(self):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[4.0] = 7
>>> m[6.0] = 3
>>> m.values()
[7, 3]
"""
i = self.itervalues()
v = []
try:
while True:
v.append(i.next())
except StopIteration:
pass
return v
def items(self):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[4.0] = 7
>>> m[6.0] = 3
>>> [(int(x[0]),int(x[1])) for x in m.items()]
[(4, 7), (6, 3)]
"""
i = self.iteritems()
itms = []
try:
while True:
itms.append(i.next())
except StopIteration:
pass
return itms
def has_key(self, key):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[4.0] = 7
>>> if m.has_key(4): print 'ok'
...
ok
>>> if not m.has_key(7): print 'ok'
...
ok
"""
try:
self[key]
except KeyError:
return False
return True
def clear(self):
"""delete all entries
>>> from CMap import CMap
>>> m = CMap()
>>> m[4] = 7
>>> m.clear()
>>> print len(m)
0
"""
self.__del__()
self._smap = map_constructor()
def copy(self):
"""return shallow copy"""
return CMap(self)
def lower_bound(self,key):
"""
Finds smallest key equal to or above the lower bound.
Takes O(log n) time.
@param x: Key of (key, value) pair to be located.
@return: Key Iterator pointing to first item equal to or greater
than key, or end() if no such item exists.
>>> from CMap import CMap
>>> m = CMap()
>>> m[10] = 'foo'
>>> m[15] = 'bar'
>>> i = m.lower_bound(11) # iterator.
>>> int(i.key())
15
>>> i.value()
'bar'
Edge cases:
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.lower_bound(11)
>>> if i == m.end(): print 'ok'
...
ok
>>> m[10] = 'foo'
>>> i = m.lower_bound(11)
>>> if i == m.end(): print 'ok'
...
ok
>>> i = m.lower_bound(9)
>>> if i == m.begin(): print 'ok'
...
ok
"""
return CMap.KeyIterator(self, map_lower_bound( self._smap, key ))
def upper_bound(self, key):
"""
Finds largest key equal to or below the upper bound. In keeping
with the [begin,end) convention, the returned iterator
actually points to the key one above the upper bound.
Takes O(log n) time.
@param x: Key of (key, value) pair to be located.
@return: Iterator pointing to first element equal to or greater than
key, or end() if no such item exists.
>>> from CMap import CMap
>>> m = CMap()
>>> m[10] = 'foo'
>>> m[15] = 'bar'
>>> m[17] = 'choo'
>>> i = m.upper_bound(11) # iterator.
>>> i.value()
'bar'
Edge cases:
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.upper_bound(11)
>>> if i == m.end(): print 'ok'
...
ok
>>> m[10] = 'foo'
>>> i = m.upper_bound(9)
>>> i.value()
'foo'
>>> i = m.upper_bound(11)
>>> if i == m.end(): print 'ok'
...
ok
"""
return CMap.KeyIterator(self, map_upper_bound( self._smap, key ))
def find(self,key):
"""
Finds the item with matching key and returns a KeyIterator
pointing at the item. If no match is found then returns end().
Takes O(log n) time.
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.find(10)
>>> if i == m.end(): print 'ok'
...
ok
>>> m[10] = 'foo'
>>> i = m.find(10)
>>> int(i.key())
10
>>> i.value()
'foo'
"""
return CMap.KeyIterator(self, map_find_iter( self._smap, key ))
def update_key( self, iter, key ):
"""
Modifies the key of the item referenced by iter. If the
key change is small enough that no reordering occurs then
this takes amortized O(1) time. If a reordering occurs then
this takes O(log n).
WARNING!!! The passed iterator MUST be assumed to be invalid
upon return and should be deallocated.
Typical use:
>>> from CMap import CMap
>>> m = CMap()
>>> m[10] = 'foo'
>>> m[8] = 'bar'
>>> i = m.find(10)
>>> m.update_key(i,7) # i is assumed to be invalid upon return.
>>> del i
>>> [(int(x[0]),x[1]) for x in m.items()] # reordering occurred.
[(7, 'foo'), (8, 'bar')]
>>> i = m.find(8)
>>> m.update_key(i,9) # no reordering.
>>> del i
>>> [(int(x[0]),x[1]) for x in m.items()]
[(7, 'foo'), (9, 'bar')]
Edge cases:
>>> i = m.find(7)
>>> i.value()
'foo'
>>> try: # update to key already in the map.
... m.update_key(i,9)
... except KeyError:
... print 'ok'
...
ok
>>> m[7]
'foo'
>>> i = m.iterkeys()
>>> try: # updating an iter pointing at BEGIN.
... m.update_key(i,10)
... except IndexError:
... print 'ok'
...
ok
>>> i = m.end()
>>> try: # updating an iter pointing at end().
... m.update_key(i,10)
... except IndexError:
... print 'ok'
...
ok
"""
assert isinstance(iter,CMap._AbstractIterator)
if iter._si == BEGIN:
raise IndexError( _("Iterator does not point at key-value pair") )
if self is not iter._map:
raise IndexError(_("Iterator points into a different CIndexedMap."))
if map_iter_at_end(self._smap, iter._si):
raise IndexError( _("Cannot update end() iterator.") )
map_iter_update_key(self._smap, iter._si, key)
def append(self, key, value):
"""Performs an insertion with the hint that it probably should
go at the end.
Raises KeyError if the key is already in the map.
>>> from CMap import CMap
>>> m = CMap()
>>> m.append(5.0,'foo') # append to empty map.
>>> len(m)
1
>>> [int(x) for x in m.keys()] # see note (1)
[5]
>>> m.append(10.0, 'bar') # append in-order
>>> [(int(x[0]),x[1]) for x in m.items()]
[(5, 'foo'), (10, 'bar')]
>>> m.append(3.0, 'coo') # out-of-order.
>>> [(int(x[0]),x[1]) for x in m.items()]
[(3, 'coo'), (5, 'foo'), (10, 'bar')]
>>> try:
... m.append(10.0, 'blah') # append key already in map.
... except KeyError:
... print 'ok'
...
ok
>>> [(int(x[0]),x[1]) for x in m.items()]
[(3, 'coo'), (5, 'foo'), (10, 'bar')]
>>>
note (1): int(x[0]) is used because 5.0 can appear as either 5
or 5.0 depending on the version of python.
"""
map_append(self._smap,key,value)
class CIndexedMap(CMap):
"""This is an ordered mapping, exactly like CMap except that it
provides a cross-index allowing average O(1) searches based on value.
This adds the constraint that values must be unique.
Operation: Time Applicable
Complexity: Methods:
---------------------------------------------------
Item insertion: O(log n) append, __setitem__
Item deletion: O(log n + k) __delitem__, erase
Key search: O(log n) __getitem__, get, find,
__contains__
Value search: average O(1) as per dict
Iteration step: amortized O(1), next, prev
worst-case O(log n)
Memory: O(n)
n = number of elements in map. k = number of iterators pointing
into map. CIndexedMap assumes there are few iterators in existence
at any given time.
The hash table increases the factor in the
O(n) memory cost of the Map by a constant
"""
def __init__(self, dict={} ):
CMap.__init__(self,dict)
self._value_index = {} # cross-index. maps value->iterator.
def __setitem__(self, key, value):
"""
>>> from CMap import *
>>> m = CIndexedMap()
>>> m[6] = 'bar'
>>> m[6]
'bar'
>>> int(m.get_key_by_value('bar'))
6
>>> try:
... m[7] = 'bar'
... except ValueError:
... print 'value error'
value error
>>> m[6] = 'foo'
>>> m[6]
'foo'
>>> m[7] = 'bar'
>>> m[7]
'bar'
>>> m[7] = 'bar' # should not raise exception
>>> m[7] = 'goo'
>>> m.get_key_by_value('bar') # should return None.
>>>
"""
assert type(key) == int or type(key) == float
if self._value_index.has_key(value) and \
iter_key(self._value_index[value]) != key:
raise ValueError( _("Value %s already exists. Values must be "
"unique.") % str(value) )
si = map_insert_iter(self._smap,key,value) # si points where insert
# should occur whether
# insert succeeded or not.
# si == "swig iterator"
sival = iter_value(si)
if sival != value: # if insert failed because k already exists
iter_set(si,value) # then force set.
self._value_index[value] = si
viter = self._value_index[sival]
iter_delete(viter) # remove old value from index
del self._value_index[sival]
else: # else insert succeeded so update index.
self._value_index[value] = si
#self._index[key] = si # IMPL 2. with _index.
def __delitem__(self, key):
"""
>>> from CMap import CIndexedMap
>>> m = CIndexedMap()
>>> m[6] = 'bar'
>>> m[6]
'bar'
>>> int(m.get_key_by_value('bar'))
6
>>> del m[6]
>>> if m.get_key_by_value('bar'):
... print 'found'
... else:
... print 'not found.'
not found.
"""
i = map_find_iter( self._smap, key )
if map_iter_at_end( self._smap, i ):
iter_delete(i)
raise KeyError(key)
else:
value = iter_value(i)
for i in list(self._iterators):
if iter_cmp( self._smap, i._si, iter._si ) == 0:
i._invalidate()
map_iter_erase( self._smap, i )
viter = self._value_index[value]
iter_delete(i)
iter_delete( viter )
del self._value_index[value]
#del self._index[key] # IMPL 2. with _index.
assert map_size(self._smap) == len(self._value_index)
def has_value(self, value):
return self._value_index.has_key(value)
def get_key_by_value(self, value):
"""Returns the key cross-indexed from the passed unique value, or
returns None if the value is not in the map."""
si = self._value_index.get(value) # si == "swig iterator"
if si == None: return None
return iter_key(si)
def append( self, key, value ):
"""See CMap.append
>>> from CMap import CIndexedMap
>>> m = CIndexedMap()
>>> m.append(5,'foo')
>>> [(int(x[0]),x[1]) for x in m.items()]
[(5, 'foo')]
>>> m.append(10, 'bar')
>>> [(int(x[0]),x[1]) for x in m.items()]
[(5, 'foo'), (10, 'bar')]
>>> m.append(3, 'coo') # out-of-order.
>>> [(int(x[0]),x[1]) for x in m.items()]
[(3, 'coo'), (5, 'foo'), (10, 'bar')]
>>> int(m.get_key_by_value( 'bar' ))
10
>>> try:
... m.append(10, 'blah') # append key already in map.
... except KeyError:
... print 'ok'
...
ok
>>> [(int(x[0]),x[1]) for x in m.items()]
[(3, 'coo'), (5, 'foo'), (10, 'bar')]
>>> try:
... m.append(10, 'coo') # append value already in map.
... except ValueError:
... print 'ok'
...
ok
"""
if self._value_index.has_key(value) and \
iter_key(self._value_index[value]) != key:
raise ValueError(_("Value %s already exists and value must be "
"unique.") % str(value) )
si = map_append_iter(self._smap,key,value)
if iter_value(si) != value:
iter_delete(si)
raise KeyError(key)
self._value_index[value] = si
def find_key_by_value(self, value):
"""Returns a key iterator cross-indexed from the passed unique value
or end() if no value found.
>>> from Map import *
>>> m = CIndexedMap()
>>> m[6] = 'abc'
>>> i = m.find_key_by_value('abc')
>>> int(i.key())
6
>>> i = m.find_key_by_value('xyz')
>>> if i == m.end(): print 'i points at end()'
i points at end()
"""
si = self._value_index.get(value) # si == "swig iterator."
if si != None:
si = iter_copy(si); # copy else operations like increment on the
# KeyIterator would modify the value index.
return CMap.KeyIterator(self,si)
def copy(self):
"""return shallow copy"""
return CIndexedMap(self)
def update_key( self, iter, key ):
"""
see CMap.update_key.
WARNING!! You MUST assume that the passed iterator is invalidated
upon return.
Typical use:
>>> from CMap import CIndexedMap
>>> m = CIndexedMap()
>>> m[10] = 'foo'
>>> m[8] = 'bar'
>>> i = m.find(10)
>>> m.update_key(i,7) # i is assumed to be invalid upon return.
>>> del i
>>> int(m.get_key_by_value('foo'))
7
>>> [(int(x[0]),x[1]) for x in m.items()] # reordering occurred.
[(7, 'foo'), (8, 'bar')]
>>> i = m.find(8)
>>> m.update_key(i,9) # no reordering.
>>> del i
>>> [(int(x[0]),x[1]) for x in m.items()]
[(7, 'foo'), (9, 'bar')]
Edge cases:
>>> i = m.find(7)
>>> i.value()
'foo'
>>> try:
... m.update_key(i,9)
... except KeyError:
... print 'ok'
...
ok
>>> m[7]
'foo'
>>> int(m.get_key_by_value('foo'))
7
>>> i = m.iterkeys()
>>> try: # updating an iter pointing at BEGIN.
... m.update_key(i,10)
... except IndexError:
... print 'ok'
...
ok
>>> i = m.end()
>>> try: # updating an iter pointing at end().
... m.update_key(i,10)
... except IndexError:
... print 'ok'
...
ok
"""
if not iter._si:
raise RuntimeError( _("invalid iterator") )
if iter._si == BEGIN:
raise IndexError(_("Iterator does not point at key-value pair" ))
if self is not iter._map:
raise IndexError(_("Iterator points into a different "
"CIndexedMap."))
if map_iter_at_end(self._smap, iter._si):
raise IndexError( _("Cannot update end() iterator.") )
si = map_iter_update_key_iter(self._smap, iter._si, key)
# raises KeyError if key already in map.
if si != iter._si: # if map is reordered...
value = iter.value();
val_si = self._value_index[value]
iter_delete(val_si)
self._value_index[value] = si
def erase(self, iter):
"""Remove item pointed to by the iterator. Iterator is immediately
invalidated after the deletion completes."""
if not iter._si:
raise RuntimeError( _("invalid iterator") )
if iter._si == BEGIN:
raise IndexError(_("Iterator does not point at key-value pair." ))
if self is not iter._map:
raise IndexError(_("Iterator points into a different "
"CIndexedMap."))
if map_iter_at_end(self._smap, iter._si):
raise IndexError( _("Cannot update end() iterator.") )
value = iter.value()
CMap.erase(self,iter)
del self._value_index[value]
if __name__ == "__main__":
import doctest
import random
##############################################
# UNIT TESTS
print "Testing module"
doctest.testmod(sys.modules[__name__])
print "doctest complete."
##############################################
# MEMORY LEAK TESTS
if LEAK_TEST:
i = 0
import gc
class X:
x = range(1000) # something moderately big.
# TEST 1. This does not cause memory to grow.
#m = CMap()
#map_insert(m._smap,10,X())
#while True:
# i += 1
# it = map_find_iter( m._smap, 10 )
# iter_delete(it)
# del it
# if i % 100 == 0:
# gc.collect()
# TEST 2: This does not caus a memory leak.
#m = map_constructor_double()
#while True:
# i += 1
# map_insert_double(m,10,5) # here
# it = map_find_iter_double( m, 10 )
# map_iter_erase_double( m, it ) # or here is the problem.
# iter_delete_double(it)
# del it
# #assert len(m) == 0
# assert map_size_double(m) == 0
# if i % 100 == 0:
# gc.collect()
# TEST 3. No memory leak
#m = CMap()
#while True:
# i += 1
# map_insert(m._smap,10,X()) # here
# it = map_find_iter( m._smap, 10 )
# map_iter_erase( m._smap, it ) # or here is the problem.
# iter_delete(it)
# del it
# assert len(m) == 0
# assert map_size(m._smap) == 0
# if i % 100 == 0:
# gc.collect()
# TEST 4: map creation and deletion.
#while True:
# m = map_constructor()
# map_delete(m);
# TEST 5: test iteration.
#m = map_constructor()
#for i in xrange(10):
# map_insert(m,i,X())
#while True:
# i = map_begin(m)
# while not map_iter_at_begin(m,i):
# iter_incr(i)
# iter_delete(i)
# TEST 6:
#m = map_constructor()
#for i in xrange(10):
# map_insert(m,i,X())
#while True:
# map_find( m, random.randint(0,9) )
# TEST 7:
#m = map_constructor()
#for i in xrange(50):
# map_insert( m, i, X() )
#while True:
# for i in xrange(50):
# map_set( m, i, X() )
# TEST 8
# aha! Another leak! Fixed.
#m = map_constructor()
#while True:
# i += 1
# map_insert(m,10,X())
# map_erase(m,10)
# assert map_size(m) == 0
# TEST 9
m = map_constructor()
for i in xrange(50):
map_insert( m, i, X() )
while True:
it = map_find_iter( m, 5 )
map_iter_update_key( m, it, 1000 )
iter_delete(it)
it = map_find_iter( m, 1000 )
map_iter_update_key( m, it, 5)
iter_delete(it)
|
import sys
import time
import re
import os
sys.path.append('/usr/lib/python')
from xen.util.xmlrpclib2 import ServerProxy
from optparse import *
from pprint import pprint
from types import DictType
from getpass import getpass
SERVER_URI = os.environ.get('XAPI_SERVER_URI', 'http://localhost:9363/')
SERVER_USER = os.environ.get('XAPI_SERVER_USER', '')
SERVER_PASS = os.environ.get('XAPI_SERVER_PASS', '')
MB = 1024 * 1024
HOST_INFO_FORMAT = '%-20s: %-50s'
VM_LIST_FORMAT = '%(name_label)-18s %(memory_actual)-5s %(VCPUs_number)-5s'\
' %(power_state)-10s %(uuid)-36s'
SR_LIST_FORMAT = '%(name_label)-18s %(uuid)-36s %(physical_size)-10s' \
'%(type)-10s'
VDI_LIST_FORMAT = '%(name_label)-18s %(uuid)-36s %(virtual_size)-8s'
VBD_LIST_FORMAT = '%(device)-6s %(uuid)-36s %(VDI)-8s'
TASK_LIST_FORMAT = '%(name_label)-18s %(uuid)-36s %(status)-8s %(progress)-4s'
VIF_LIST_FORMAT = '%(name)-8s %(device)-7s %(uuid)-36s %(MAC)-10s'
CONSOLE_LIST_FORMAT = '%(uuid)-36s %(protocol)-8s %(location)-32s'
COMMANDS = {
'host-info': ('', 'Get Xen Host Info'),
'host-set-name': ('', 'Set host name'),
'pif-list': ('', 'List all PIFs'),
'sr-list': ('', 'List all SRs'),
'vbd-list': ('', 'List all VBDs'),
'vbd-create': ('<domname> <pycfg> [opts]',
'Create VBD attached to domname'),
'vdi-create': ('<pycfg> [opts]', 'Create a VDI'),
'vdi-list' : ('', 'List all VDI'),
'vdi-rename': ('<vdi_uuid> <new_name>', 'Rename VDI'),
'vdi-destroy': ('<vdi_uuid>', 'Delete VDI'),
'vif-create': ('<domname> <pycfg>', 'Create VIF attached to domname'),
'vtpm-create' : ('<domname> <pycfg>', 'Create VTPM attached to domname'),
'vm-create': ('<pycfg>', 'Create VM with python config'),
'vm-destroy': ('<domname>', 'Delete VM'),
'vm-list': ('[--long]', 'List all domains.'),
'vm-name': ('<uuid>', 'Name of UUID.'),
'vm-shutdown': ('<name> [opts]', 'Shutdown VM with name'),
'vm-start': ('<name>', 'Start VM with name'),
'vm-uuid': ('<name>', 'UUID of a domain by name.'),
'async-vm-start': ('<name>', 'Start VM asynchronously'),
}
OPTIONS = {
'sr-list': [(('-l', '--long'),
{'action':'store_true',
'help':'List all properties of SR'})
],
'vdi-list': [(('-l', '--long'),
{'action':'store_true',
'help':'List all properties of VDI'})
],
'vif-list': [(('-l', '--long'),
{'action':'store_true',
'help':'List all properties of VIF'})
],
'vm-list': [(('-l', '--long'),
{'action':'store_true',
'help':'List all properties of VMs'})
],
'vm-shutdown': [(('-f', '--force'), {'help': 'Shutdown Forcefully',
'action': 'store_true'})],
'vdi-create': [(('--name-label',), {'help': 'Name for VDI'}),
(('--name-description',), {'help': 'Description for VDI'}),
(('--virtual-size',), {'type': 'int',
'default': 0,
'help': 'Size of VDI in bytes'}),
(('--type',), {'choices': ['system', 'user', 'ephemeral'],
'default': 'system',
'help': 'VDI type'}),
(('--sharable',), {'action': 'store_true',
'help': 'VDI sharable'}),
(('--read-only',), {'action': 'store_true',
'help': 'Read only'}),
(('--sr',), {})],
'vbd-create': [(('--VDI',), {'help': 'UUID of VDI to attach to.'}),
(('--mode',), {'choices': ['RO', 'RW'],
'help': 'device mount mode'}),
(('--driver',), {'choices':['paravirtualised', 'ioemu'],
'help': 'Driver for VBD'}),
(('--device',), {'help': 'Device name on guest domain'})]
}
class OptionError(Exception):
pass
class XenAPIError(Exception):
pass
class IterableValues(Values):
"""Better interface to the list of values from optparse."""
def __iter__(self):
for opt, val in self.__dict__.items():
if opt[0] == '_' or callable(val):
continue
yield opt, val
def parse_args(cmd_name, args, set_defaults = False):
argstring, desc = COMMANDS[cmd_name]
parser = OptionParser(usage = 'xapi %s %s' % (cmd_name, argstring),
description = desc)
if cmd_name in OPTIONS:
for optargs, optkwds in OPTIONS[cmd_name]:
parser.add_option(*optargs, **optkwds)
if set_defaults:
default_values = parser.get_default_values()
defaults = IterableValues(default_values.__dict__)
else:
defaults = IterableValues()
(opts, extraargs) = parser.parse_args(args = list(args),
values = defaults)
return opts, extraargs
def execute(server, fn, args, async = False):
if async:
func = eval('server.Async.%s' % fn)
else:
func = eval('server.%s' % fn)
result = func(*args)
if type(result) != DictType:
raise TypeError("Function returned object of type: %s" %
str(type(result)))
if 'Value' not in result:
raise XenAPIError(*result['ErrorDescription'])
return result['Value']
_initialised = False
_server = None
_session = None
def connect(*args):
global _server, _session, _initialised
if not _initialised:
# try without password or default credentials
try:
_server = ServerProxy(SERVER_URI)
_session = execute(_server.session, 'login_with_password',
(SERVER_USER, SERVER_PASS))
except:
login = raw_input("Login: ")
password = getpass()
creds = (login, password)
_server = ServerProxy(SERVER_URI)
_session = execute(_server.session, 'login_with_password',
creds)
_initialised = True
return (_server, _session)
def _stringify(adict):
return dict([(k, str(v)) for k, v in adict.items()])
def _read_python_cfg(filename):
cfg = {}
execfile(filename, {}, cfg)
return cfg
def resolve_vm(server, session, vm_name):
vm_uuid = execute(server, 'VM.get_by_name_label', (session, vm_name))
if not vm_uuid:
return None
else:
return vm_uuid[0]
def resolve_vdi(server, session, vdi_name):
vdi_uuid = execute(server, 'VDI.get_by_name_label', (session, vdi_name))
if not vdi_uuid:
return None
else:
return vdi_uuid[0]
def xapi_host_info(args, async = False):
server, session = connect()
hosts = execute(server, 'host.get_all', (session,))
for host in hosts: # there is only one, but ..
hostinfo = execute(server, 'host.get_record', (session, host))
print HOST_INFO_FORMAT % ('Name', hostinfo['name_label'])
print HOST_INFO_FORMAT % ('Version', hostinfo['software_version'])
print HOST_INFO_FORMAT % ('CPUs', len(hostinfo['host_CPUs']))
print HOST_INFO_FORMAT % ('VMs', len(hostinfo['resident_VMs']))
print HOST_INFO_FORMAT % ('UUID', host)
for host_cpu_uuid in hostinfo['host_CPUs']:
host_cpu = execute(server, 'host_cpu.get_record',
(session, host_cpu_uuid))
print 'CPU %s Util: %.2f' % (host_cpu['number'],
float(host_cpu['utilisation']))
def xapi_host_set_name(args, async = False):
if len(args) < 1:
raise OptionError("No hostname specified")
server, session = connect()
hosts = execute(server, 'host.get_all', (session,))
if len(hosts) > 0:
execute(server, 'host.set_name_label', (session, hosts[0], args[0]))
print 'Hostname: %s' % execute(server, 'host.get_name_label',
(session, hosts[0]))
def xapi_vm_uuid(args, async = False):
if len(args) < 1:
raise OptionError("No domain name specified")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
print vm_uuid
def xapi_vm_name(args, async = False):
if len(args) < 1:
raise OptionError("No UUID specified")
server, session = connect()
vm_name = execute(server, 'VM.get_name_label', (session, args[0]))
print vm_name
def xapi_vm_list(args, async = False):
opts, args = parse_args('vm-list', args, set_defaults = True)
is_long = opts and opts.long
list_only = args
server, session = connect()
vm_uuids = execute(server, 'VM.get_all', (session,))
if not is_long:
print VM_LIST_FORMAT % {'name_label':'Name',
'memory_actual':'Mem',
'VCPUs_number': 'VCPUs',
'power_state': 'State',
'uuid': 'UUID'}
for uuid in vm_uuids:
vm_info = execute(server, 'VM.get_record', (session, uuid))
# skip domain if we don't want
if list_only and vm_info['name_label'] not in list_only:
continue
if is_long:
vbds = vm_info['VBDs']
vifs = vm_info['VIFs']
vtpms = vm_info['VTPMs']
vif_infos = []
vbd_infos = []
vtpm_infos = []
for vbd in vbds:
vbd_info = execute(server, 'VBD.get_record', (session, vbd))
vbd_infos.append(vbd_info)
for vif in vifs:
vif_info = execute(server, 'VIF.get_record', (session, vif))
vif_infos.append(vif_info)
for vtpm in vtpms:
vtpm_info = execute(server, 'VTPM.get_record', (session, vtpm))
vtpm_infos.append(vtpm_info)
vm_info['VBDs'] = vbd_infos
vm_info['VIFs'] = vif_infos
vm_info['VTPMs'] = vtpm_infos
pprint(vm_info)
else:
print VM_LIST_FORMAT % _stringify(vm_info)
def xapi_vm_create(args, async = False):
if len(args) < 1:
raise OptionError("Configuration file not specified")
filename = args[0]
cfg = _read_python_cfg(filename)
print 'Creating VM from %s ..' % filename
server, session = connect()
uuid = execute(server, 'VM.create', (session, cfg), async = async)
print 'Done. (%s)' % uuid
print uuid
def xapi_vm_destroy(args, async = False):
if len(args) < 1:
raise OptionError("No domain name specified.")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
print 'Destroying VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.destroy', (session, vm_uuid), async = async)
print 'Done.'
def xapi_vm_start(args, async = False):
if len(args) < 1:
raise OptionError("No Domain name specified.")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
print 'Starting VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.start', (session, vm_uuid, False), async = async)
if async:
print 'Task started: %s' % success
else:
print 'Done.'
def xapi_vm_suspend(args, async = False):
if len(args) < 1:
raise OptionError("No Domain name specified.")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
print 'Suspending VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.suspend', (session, vm_uuid), async = async)
if async:
print 'Task started: %s' % success
else:
print 'Done.'
def xapi_vm_resume(args, async = False):
if len(args) < 1:
raise OptionError("No Domain name specified.")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
print 'Resuming VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.resume', (session, vm_uuid, False), async = async)
if async:
print 'Task started: %s' % success
else:
print 'Done.'
def xapi_vm_pause(args, async = False):
if len(args) < 1:
raise OptionError("No Domain name specified.")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
print 'Pausing VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.pause', (session, vm_uuid), async = async)
if async:
print 'Task started: %s' % success
else:
print 'Done.'
def xapi_vm_unpause(args, async = False):
if len(args) < 1:
raise OptionError("No Domain name specified.")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
print 'Pausing VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.unpause', (session, vm_uuid), async = async)
if async:
print 'Task started: %s' % success
else:
print 'Done.'
def xapi_task_list(args, async = False):
server, session = connect()
all_tasks = execute(server, 'task.get_all', (session,))
print TASK_LIST_FORMAT % {'name_label': 'Task Name',
'uuid': 'UUID',
'status': 'Status',
'progress': '%'}
for task_uuid in all_tasks:
task = execute(server, 'task.get_record', (session, task_uuid))
print TASK_LIST_FORMAT % task
def xapi_task_clear(args, async = False):
server, session = connect()
all_tasks = execute(server, 'task.get_all', (session,))
for task_uuid in all_tasks:
success = execute(server, 'task.destroy', (session, task_uuid))
print 'Destroyed Task %s' % task_uuid
def xapi_vm_shutdown(args, async = False):
opts, args = parse_args("vm-shutdown", args, set_defaults = True)
if len(args) < 1:
raise OptionError("No Domain name specified.")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
if opts.force:
print 'Forcefully shutting down VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.hard_shutdown', (session, vm_uuid), async = async)
else:
print 'Shutting down VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.clean_shutdown', (session, vm_uuid), async = async)
if async:
print 'Task started: %s' % success
else:
print 'Done.'
def xapi_vbd_create(args, async = False):
opts, args = parse_args('vbd-create', args)
if len(args) < 2:
raise OptionError("Configuration file and domain not specified")
domname = args[0]
if len(args) > 1:
filename = args[1]
cfg = _read_python_cfg(filename)
else:
cfg = {}
for opt, val in opts:
cfg[opt] = val
print 'Creating VBD ...',
server, session = connect()
vm_uuid = resolve_vm(server, session, domname)
cfg['VM'] = vm_uuid
vbd_uuid = execute(server, 'VBD.create', (session, cfg), async = async)
if async:
print 'Task started: %s' % vbd_uuid
else:
print 'Done. (%s)' % vbd_uuid
def xapi_vif_create(args, async = False):
if len(args) < 2:
raise OptionError("Configuration file not specified")
domname = args[0]
filename = args[1]
cfg = _read_python_cfg(filename)
print 'Creating VIF from %s ..' % filename
server, session = connect()
vm_uuid = resolve_vm(server, session, domname)
cfg['VM'] = vm_uuid
vif_uuid = execute(server, 'VIF.create', (session, cfg), async = async)
if async:
print 'Task started: %s' % vif_uuid
else:
print 'Done. (%s)' % vif_uuid
def xapi_vbd_list(args, async = False):
server, session = connect()
domname = args[0]
dom_uuid = resolve_vm(server, session, domname)
vbds = execute(server, 'VM.get_VBDs', (session, dom_uuid))
print VBD_LIST_FORMAT % {'device': 'Device',
'uuid' : 'UUID',
'VDI': 'VDI'}
for vbd in vbds:
vbd_struct = execute(server, 'VBD.get_record', (session, vbd))
print VBD_LIST_FORMAT % vbd_struct
def xapi_vbd_stats(args, async = False):
server, session = connect()
domname = args[0]
dom_uuid = resolve_vm(server, session, domname)
vbds = execute(server, 'VM.get_VBDs', (session, dom_uuid))
for vbd_uuid in vbds:
print execute(server, 'VBD.get_io_read_kbs', (session, vbd_uuid))
def xapi_vif_list(args, async = False):
server, session = connect()
opts, args = parse_args('vdi-list', args, set_defaults = True)
is_long = opts and opts.long
domname = args[0]
dom_uuid = resolve_vm(server, session, domname)
vifs = execute(server, 'VM.get_VIFs', (session, dom_uuid))
if not is_long:
print VIF_LIST_FORMAT % {'name': 'Name',
'device': 'Device',
'uuid' : 'UUID',
'MAC': 'MAC'}
for vif in vifs:
vif_struct = execute(server, 'VIF.get_record', (session, vif))
print VIF_LIST_FORMAT % vif_struct
else:
for vif in vifs:
vif_struct = execute(server, 'VIF.get_record', (session, vif))
pprint(vif_struct)
def xapi_console_list(args, async = False):
server, session = connect()
opts, args = parse_args('vdi-list', args, set_defaults = True)
is_long = opts and opts.long
domname = args[0]
dom_uuid = resolve_vm(server, session, domname)
consoles = execute(server, 'VM.get_consoles', (session, dom_uuid))
if not is_long:
print CONSOLE_LIST_FORMAT % {'protocol': 'Protocol',
'location': 'Location',
'uuid': 'UUID'}
for console in consoles:
console_struct = execute(server, 'console.get_record',
(session, console))
print CONSOLE_LIST_FORMAT % console_struct
else:
for console in consoles:
console_struct = execute(server, 'console.get_record',
(session, console))
pprint(console_struct)
def xapi_vdi_list(args, async = False):
opts, args = parse_args('vdi-list', args, set_defaults = True)
is_long = opts and opts.long
server, session = connect()
vdis = execute(server, 'VDI.get_all', (session,))
if not is_long:
print VDI_LIST_FORMAT % {'name_label': 'VDI Label',
'uuid' : 'UUID',
'virtual_size': 'Bytes'}
for vdi in vdis:
vdi_struct = execute(server, 'VDI.get_record', (session, vdi))
print VDI_LIST_FORMAT % vdi_struct
else:
for vdi in vdis:
vdi_struct = execute(server, 'VDI.get_record', (session, vdi))
pprint(vdi_struct)
def xapi_sr_list(args, async = False):
opts, args = parse_args('sr-list', args, set_defaults = True)
is_long = opts and opts.long
server, session = connect()
srs = execute(server, 'SR.get_all', (session,))
if not is_long:
print SR_LIST_FORMAT % {'name_label': 'SR Label',
'uuid' : 'UUID',
'physical_size': 'Size (MB)',
'type': 'Type'}
for sr in srs:
sr_struct = execute(server, 'SR.get_record', (session, sr))
sr_struct['physical_size'] = int(sr_struct['physical_size'])/MB
print SR_LIST_FORMAT % sr_struct
else:
for sr in srs:
sr_struct = execute(server, 'SR.get_record', (session, sr))
pprint(sr_struct)
def xapi_sr_rename(args, async = False):
server, session = connect()
sr = execute(server, 'SR.get_by_name_label', (session, args[0]))
execute(server, 'SR.set_name_label', (session, sr[0], args[1]))
def xapi_vdi_create(args, async = False):
opts, args = parse_args('vdi-create', args)
if len(args) > 0:
cfg = _read_python_cfg(args[0])
else:
cfg = {}
for opt, val in opts:
cfg[opt] = val
server, session = connect()
srs = []
if cfg.get('SR'):
srs = execute(server, 'SR.get_by_name_label', (session, cfg['SR']))
else:
srs = execute(server, 'SR.get_all', (session,))
sr = srs[0]
cfg['SR'] = sr
size = cfg['virtual_size']/MB
print 'Creating VDI of size: %dMB ..' % size,
uuid = execute(server, 'VDI.create', (session, cfg), async = async)
if async:
print 'Task started: %s' % uuid
else:
print 'Done. (%s)' % uuid
def xapi_vdi_destroy(args, async = False):
server, session = connect()
if len(args) < 1:
raise OptionError('Not enough arguments')
vdi_uuid = args[0]
print 'Deleting VDI %s' % vdi_uuid
result = execute(server, 'VDI.destroy', (session, vdi_uuid), async = async)
if async:
print 'Task started: %s' % result
else:
print 'Done.'
def xapi_vdi_rename(args, async = False):
server, session = connect()
if len(args) < 2:
raise OptionError('Not enough arguments')
vdi_uuid = execute(server, 'VDI.get_by_name_label', session, args[0])
vdi_name = args[1]
print 'Renaming VDI %s to %s' % (vdi_uuid[0], vdi_name)
result = execute(server, 'VDI.set_name_label',
(session, vdi_uuid[0], vdi_name), async = async)
if async:
print 'Task started: %s' % result
else:
print 'Done.'
def xapi_vtpm_create(args, async = False):
server, session = connect()
domname = args[0]
cfg = _read_python_cfg(args[1])
vm_uuid = resolve_vm(server, session, domname)
cfg['VM'] = vm_uuid
print "Creating vTPM with cfg = %s" % cfg
vtpm_uuid = execute(server, 'VTPM.create', (session, cfg))
print "Done. (%s)" % vtpm_uuid
def xapi_pif_list(args, async = False):
server, session = connect()
pif_uuids = execute(server, 'PIF.get_all', (session,))
for pif_uuid in pif_uuids:
pif = execute(server, 'PIF.get_record', (session, pif_uuid))
print pif
def xapi_debug_wait(args, async = False):
secs = 10
if len(args) > 0:
secs = int(args[0])
server, session = connect()
task_uuid = execute(server, 'debug.wait', (session, secs), async=async)
print 'Task UUID: %s' % task_uuid
def xapi_vm_stat(args, async = False):
domname = args[0]
server, session = connect()
vm_uuid = resolve_vm(server, session, domname)
vif_uuids = execute(server, 'VM.get_VIFs', (session, vm_uuid))
vbd_uuids = execute(server, 'VM.get_VBDs', (session, vm_uuid))
vcpus_utils = execute(server, 'VM.get_VCPUs_utilisation',
(session, vm_uuid))
for vcpu_num in sorted(vcpus_utils.keys()):
print 'CPU %s : %5.2f%%' % (vcpu_num, vcpus_utils[vcpu_num] * 100)
for vif_uuid in vif_uuids:
vif = execute(server, 'VIF.get_record', (session, vif_uuid))
print '%(device)s: rx: %(io_read_kbs)10.2f tx: %(io_write_kbs)10.2f' \
% vif
for vbd_uuid in vbd_uuids:
vbd = execute(server, 'VBD.get_record', (session, vbd_uuid))
print '%(device)s: rd: %(io_read_kbs)10.2f wr: %(io_write_kbs)10.2f' \
% vbd
import cmd
import shlex
class XenAPICmd(cmd.Cmd):
def __init__(self, server, session):
cmd.Cmd.__init__(self)
self.server = server
self.session = session
self.prompt = ">>> "
def default(self, line):
words = shlex.split(line)
if len(words) > 0:
cmd_name = words[0].replace('-', '_')
is_async = 'async' in cmd_name
if is_async:
cmd_name = re.sub('async_', '', cmd_name)
func_name = 'xapi_%s' % cmd_name
func = globals().get(func_name)
if func:
try:
args = tuple(words[1:])
func(args, async = is_async)
return True
except SystemExit:
return False
except OptionError, e:
print 'Error:', str(e)
return False
except Exception, e:
import traceback
traceback.print_exc()
return False
print '*** Unknown command: %s' % words[0]
return False
def do_EOF(self, line):
print
sys.exit(0)
def do_help(self, line):
usage(print_usage = False)
def emptyline(self):
pass
def postcmd(self, stop, line):
return False
def precmd(self, line):
words = shlex.split(line)
if len(words) > 0:
words0 = words[0].replace('-', '_')
return ' '.join([words0] + words[1:])
else:
return line
def shell():
server, session = connect()
x = XenAPICmd(server, session)
x.cmdloop('Xen API Prompt. Type "help" for a list of functions')
def usage(command = None, print_usage = True):
if not command:
if print_usage:
print 'Usage: xapi <subcommand> [options] [args]'
print
print 'Subcommands:'
print
for func in sorted(globals().keys()):
if func.startswith('xapi_'):
command = func[5:].replace('_', '-')
args, description = COMMANDS.get(command, ('', ''))
print '%-16s %-40s' % (command, description)
print
else:
parse_args(command, ['-h'])
def main(args):
# poor man's optparse that doesn't abort on unrecognised opts
options = {}
remaining = []
arg_n = 0
while args:
arg = args.pop(0)
if arg in ('--help', '-h'):
options['help'] = True
elif arg in ('--server', '-s') and args:
options['server'] = args.pop(0)
elif arg in ('--user', '-u') and args:
options['user'] = args.pop(0)
elif arg in ('--password', '-p') and args:
options['password'] = args.pop(0)
else:
remaining.append(arg)
# abort here if these conditions are true
if options.get('help') and not remaining:
usage()
sys.exit(1)
if options.get('help') and remaining:
usage(remaining[0])
sys.exit(1)
if not remaining:
usage()
sys.exit(1)
if options.get('server'):
# it is ugly to use a global, but it is simple
global SERVER_URI
SERVER_URI = options['server']
if options.get('user'):
global SERVER_USER
SERVER_USER = options['user']
if options.get('password'):
global SERVER_PASS
SERVER_PASS = options['password']
subcmd = remaining[0].replace('-', '_')
is_async = 'async' in subcmd
if is_async:
subcmd = re.sub('async_', '', subcmd)
subcmd_func_name = 'xapi_' + subcmd
subcmd_func = globals().get(subcmd_func_name, None)
if subcmd == 'shell':
shell()
elif not subcmd_func or not callable(subcmd_func):
print 'Error: Unable to find subcommand \'%s\'' % subcmd
usage()
sys.exit(1)
try:
subcmd_func(remaining[1:], async = is_async)
except XenAPIError, e:
print 'Error: %s' % str(e.args[0])
sys.exit(2)
except OptionError, e:
print 'Error: %s' % e
sys.exit(0)
if __name__ == "__main__":
import sys
main(sys.argv[1:])
|
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**metadata module.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'ismail@kartoza.com'
__revision__ = '$Format:%H$'
__date__ = '10/12/15'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import json
from types import NoneType
from safe.common.exceptions import MetadataCastError
from safe.metadata.property import BaseProperty
class ListProperty(BaseProperty):
"""A property that accepts list input."""
# if you edit this you need to adapt accordingly xml_value and is_valid
_allowed_python_types = [list, NoneType]
def __init__(self, name, value, xml_path):
super(ListProperty, self).__init__(
name, value, xml_path, self._allowed_python_types)
@classmethod
def is_valid(cls, value):
return True
def cast_from_str(self, value):
try:
return json.loads(value)
except ValueError as e:
raise MetadataCastError(e)
@property
def xml_value(self):
if self.python_type is list:
return json.dumps(self.value)
elif self.python_type is NoneType:
return ''
else:
raise RuntimeError('self._allowed_python_types and self.xml_value'
'are out of sync. This should never happen')
|
from django.conf.urls.defaults import *
"""
Also used in cms.tests.ApphooksTestCase
"""
urlpatterns = patterns('cms.test_utils.project.sampleapp.views',
url(r'^$', 'sample_view', {'message': 'sample root page',}, name='sample-root'),
url(r'^settings/$', 'sample_view', kwargs={'message': 'sample settings page'}, name='sample-settings'),
url(r'^account/$', 'sample_view', {'message': 'sample account page'}, name='sample-account'),
url(r'^account/my_profile/$', 'sample_view', {'message': 'sample my profile page'}, name='sample-profile'),
url(r'^(?P<id>[0-9]+)/$', 'category_view', name='category_view'),
url(r'^notfound/$', 'notfound', name='notfound'),
url(r'^extra_1/$', 'extra_view', {'message': 'test urlconf'}, name='extra_first'),
url(r'^', include('cms.test_utils.project.sampleapp.urls_extra')),
)
|
import os
if not os.environ.get('CI_COMMIT_REF_NAME', '').startswith('PR-'):
print("Not a pull request. Exiting now.")
exit(0)
import subprocess
import gh_post
SIZELIMIT = 10000
TOKEN_ESPRESSO_CI = 'style.patch'
gh_post.delete_comments_by_token(TOKEN_ESPRESSO_CI)
MESSAGE = '''Your pull request does not meet our code formatting \
rules. {header}, please do one of the following:
- You can download a patch with my suggested changes \
[here]({url}/artifacts/raw/style.patch), inspect it and make \
changes manually.
- You can directly apply it to your repository by running \
`curl {url}/artifacts/raw/style.patch | git apply -`.
- You can run `maintainer/CI/fix_style.sh` to automatically fix your coding \
style. This is the same command that I have executed to generate the patch \
above, but it requires certain tools to be installed on your computer.
You can run `gitlab-runner exec docker style` afterwards to check if your \
changes worked out properly.
Please note that there are often multiple ways to correctly format code. \
As I am just a robot, I sometimes fail to identify the most aesthetically \
pleasing way. So please look over my suggested changes and adapt them \
where the style does not make sense.\
'''
if subprocess.call(["git", "diff-index", "--quiet", "HEAD", "--"]) != 0:
patch = subprocess.check_output(['git', '--no-pager', 'diff'])
if len(patch) <= SIZELIMIT:
comment = 'Specifically, I suggest you make the following changes:'
comment += '\n```diff\n'
comment += patch.decode('utf-8').replace('`', r'\`').strip()
comment += '\n```\n'
comment += 'To apply these changes'
else:
comment = 'To fix this'
comment = MESSAGE.format(header=comment, url=gh_post.CI_JOB_URL)
if patch:
assert TOKEN_ESPRESSO_CI in comment
gh_post.post_message(comment)
|
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = """
---
module: ec2_asg
short_description: Create or delete AWS Autoscaling Groups
description:
- Can create or delete AWS Autoscaling Groups
- Works with the ec2_lc module to manage Launch Configurations
version_added: "1.6"
author: "Gareth Rushgrove (@garethr)"
options:
state:
description:
- register or deregister the instance
required: false
choices: ['present', 'absent']
default: present
name:
description:
- Unique name for group to be created or deleted
required: true
load_balancers:
description:
- List of ELB names to use for the group
required: false
availability_zones:
description:
- List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set.
required: false
launch_config_name:
description:
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
required: true
min_size:
description:
- Minimum number of instances in group, if unspecified then the current group value will be used.
required: false
max_size:
description:
- Maximum number of instances in group, if unspecified then the current group value will be used.
required: false
placement_group:
description:
- Physical location of your cluster placement group created in Amazon EC2.
required: false
version_added: "2.3"
default: None
desired_capacity:
description:
- Desired number of instances in group, if unspecified then the current group value will be used.
required: false
replace_all_instances:
description:
- In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuration.
required: false
version_added: "1.8"
default: False
replace_batch_size:
description:
- Number of instances you'd like to replace at a time. Used with replace_all_instances.
required: false
version_added: "1.8"
default: 1
replace_instances:
description:
- List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration.
required: false
version_added: "1.8"
default: None
lc_check:
description:
- Check to make sure instances that are being replaced with replace_instances do not already have the current launch_config.
required: false
version_added: "1.8"
default: True
vpc_zone_identifier:
description:
- List of VPC subnets to use
required: false
default: None
tags:
description:
- A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true.
required: false
default: None
version_added: "1.7"
health_check_period:
description:
- Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
required: false
default: 500 seconds
version_added: "1.7"
health_check_type:
description:
- The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
required: false
default: EC2
version_added: "1.7"
choices: ['EC2', 'ELB']
default_cooldown:
description:
- The number of seconds after a scaling activity completes before another can begin.
required: false
default: 300 seconds
version_added: "2.0"
wait_timeout:
description:
- how long before wait instances to become viable when replaced. Used in conjunction with instance_ids option.
default: 300
version_added: "1.8"
wait_for_instances:
description:
- Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all instances have a lifecycle_state of "InService" and a health_status of "Healthy".
version_added: "1.9"
default: yes
required: False
termination_policies:
description:
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
- For 'Default', when used to create a new autoscaling group, the "Default"i value is used. When used to change an existent autoscaling group, the current termination policies are maintained.
required: false
default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
version_added: "2.0"
notification_topic:
description:
- A SNS topic ARN to send auto scaling notifications to.
default: None
required: false
version_added: "2.2"
notification_types:
description:
- A list of auto scaling events to trigger notifications on.
default: ['autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR']
required: false
version_added: "2.2"
suspend_processes:
description:
- A list of scaling processes to suspend.
required: False
default: []
choices: ['Launch', 'Terminate', 'HealthCheck', 'ReplaceUnhealthy', 'AZRebalance', 'AlarmNotification', 'ScheduledActions', 'AddToLoadBalancer']
version_added: "2.3"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- ec2_asg:
name: special
load_balancers: [ 'lb1', 'lb2' ]
availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
launch_config_name: 'lc-1'
min_size: 1
max_size: 10
desired_capacity: 5
vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
tags:
- environment: production
propagate_at_launch: no
Below is an example of how to assign a new launch config to an ASG and terminate old instances.
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
a rolling fashion with instances using the current launch configuration, "my_new_lc".
This could also be considered a rolling deploy of a pre-baked AMI.
If this is a newly created group, the instances will not be replaced since all instances
will have the current launch configuration.
- name: create launch config
ec2_lc:
name: my_new_lc
image_id: ami-lkajsf
key_name: mykey
region: us-east-1
security_groups: sg-23423
instance_type: m1.small
assign_public_ip: yes
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_all_instances: yes
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
To only replace a couple of instances instead of all of them, supply a list
to "replace_instances":
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_instances:
- i-b345231
- i-24c2931
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
'''
import time
import logging as log
import traceback
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
log.getLogger('boto').setLevel(log.CRITICAL)
try:
import boto.ec2.autoscale
from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, Tag
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity',
'health_check_period', 'health_check_type', 'launch_config_name',
'load_balancers', 'max_size', 'min_size', 'name', 'placement_group',
'termination_policies', 'vpc_zone_identifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
def enforce_required_arguments(module):
''' As many arguments are not required for autoscale group deletion
they cannot be mandatory arguments for the module, so we enforce
them here '''
missing_args = []
for arg in ('min_size', 'max_size', 'launch_config_name'):
if module.params[arg] is None:
missing_args.append(arg)
if missing_args:
module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args))
def get_properties(autoscaling_group):
properties = dict((attr, getattr(autoscaling_group, attr)) for attr in ASG_ATTRIBUTES)
# Ugly hack to make this JSON-serializable. We take a list of boto Tag
# objects and replace them with a dict-representation. Needed because the
# tags are included in ansible's return value (which is jsonified)
if 'tags' in properties and isinstance(properties['tags'], list):
serializable_tags = {}
for tag in properties['tags']:
serializable_tags[tag.key] = [tag.value, tag.propagate_at_launch]
properties['tags'] = serializable_tags
properties['healthy_instances'] = 0
properties['in_service_instances'] = 0
properties['unhealthy_instances'] = 0
properties['pending_instances'] = 0
properties['viable_instances'] = 0
properties['terminating_instances'] = 0
instance_facts = {}
if autoscaling_group.instances:
properties['instances'] = [i.instance_id for i in autoscaling_group.instances]
for i in autoscaling_group.instances:
instance_facts[i.instance_id] = {'health_status': i.health_status,
'lifecycle_state': i.lifecycle_state,
'launch_config_name': i.launch_config_name }
if i.health_status == 'Healthy' and i.lifecycle_state == 'InService':
properties['viable_instances'] += 1
if i.health_status == 'Healthy':
properties['healthy_instances'] += 1
else:
properties['unhealthy_instances'] += 1
if i.lifecycle_state == 'InService':
properties['in_service_instances'] += 1
if i.lifecycle_state == 'Terminating':
properties['terminating_instances'] += 1
if i.lifecycle_state == 'Pending':
properties['pending_instances'] += 1
properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.load_balancers
if getattr(autoscaling_group, "tags", None):
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
return properties
def elb_dreg(asg_connection, module, group_name, instance_id):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
as_group = asg_connection.get_all_groups(names=[group_name])[0]
wait_timeout = module.params.get('wait_timeout')
props = get_properties(as_group)
count = 1
if as_group.load_balancers and as_group.health_check_type == 'ELB':
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
else:
return
for lb in as_group.load_balancers:
elb_connection.deregister_instances(lb, instance_id)
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
count = 0
for lb in as_group.load_balancers:
lb_instances = elb_connection.describe_instance_health(lb)
for i in lb_instances:
if i.instance_id == instance_id and i.state == "InService":
count += 1
log.debug("{0}: {1}, {2}".format(i.instance_id, i.state, i.description))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime()))
def elb_healthy(asg_connection, elb_connection, module, group_name):
healthy_instances = set()
as_group = asg_connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(instance)
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
log.debug("ELB instance status:")
for lb in as_group.load_balancers:
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
except boto.exception.BotoServerError as e:
if e.error_code == 'InvalidInstance':
return None
module.fail_json(msg=str(e))
for i in lb_instances:
if i.state == "InService":
healthy_instances.add(i.instance_id)
log.debug("{0}: {1}".format(i.instance_id, i.state))
return len(healthy_instances)
def wait_for_elb(asg_connection, module, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance
# status as to avoid health_check_grace period that is awarded to ASG instances
as_group = asg_connection.get_all_groups(names=[group_name])[0]
if as_group.load_balancers and as_group.health_check_type == 'ELB':
log.debug("Waiting for ELB to consider instances healthy.")
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
wait_timeout = time.time() + wait_timeout
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
while healthy_instances < as_group.min_size and wait_timeout > time.time():
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
def suspend_processes(as_group, module):
suspend_processes = set(module.params.get('suspend_processes'))
try:
suspended_processes = set([p.process_name for p in as_group.suspended_processes])
except AttributeError:
# New ASG being created, no suspended_processes defined yet
suspended_processes = set()
if suspend_processes == suspended_processes:
return False
resume_processes = list(suspended_processes - suspend_processes)
if resume_processes:
as_group.resume_processes(resume_processes)
if suspend_processes:
as_group.suspend_processes(list(suspend_processes))
return True
def create_autoscaling_group(connection, module):
group_name = module.params.get('name')
load_balancers = module.params['load_balancers']
availability_zones = module.params['availability_zones']
launch_config_name = module.params.get('launch_config_name')
min_size = module.params['min_size']
max_size = module.params['max_size']
placement_group = module.params.get('placement_group')
desired_capacity = module.params.get('desired_capacity')
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
set_tags = module.params.get('tags')
health_check_period = module.params.get('health_check_period')
health_check_type = module.params.get('health_check_type')
default_cooldown = module.params.get('default_cooldown')
wait_for_instances = module.params.get('wait_for_instances')
as_groups = connection.get_all_groups(names=[group_name])
wait_timeout = module.params.get('wait_timeout')
termination_policies = module.params.get('termination_policies')
notification_topic = module.params.get('notification_topic')
notification_types = module.params.get('notification_types')
if not vpc_zone_identifier and not availability_zones:
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
elif vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
asg_tags = []
for tag in set_tags:
for k,v in tag.items():
if k !='propagate_at_launch':
asg_tags.append(Tag(key=k,
value=v,
propagate_at_launch=bool(tag.get('propagate_at_launch', True)),
resource_id=group_name))
if not as_groups:
if not vpc_zone_identifier and not availability_zones:
availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()]
enforce_required_arguments(module)
launch_configs = connection.get_all_launch_configurations(names=[launch_config_name])
if len(launch_configs) == 0:
module.fail_json(msg="No launch config found with name %s" % launch_config_name)
ag = AutoScalingGroup(
group_name=group_name,
load_balancers=load_balancers,
availability_zones=availability_zones,
launch_config=launch_configs[0],
min_size=min_size,
max_size=max_size,
placement_group=placement_group,
desired_capacity=desired_capacity,
vpc_zone_identifier=vpc_zone_identifier,
connection=connection,
tags=asg_tags,
health_check_period=health_check_period,
health_check_type=health_check_type,
default_cooldown=default_cooldown,
termination_policies=termination_policies)
try:
connection.create_auto_scaling_group(ag)
suspend_processes(ag, module)
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
if notification_topic:
ag.put_notification_configuration(notification_topic, notification_types)
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
changed = True
return(changed, asg_properties)
except BotoServerError as e:
module.fail_json(msg="Failed to create Autoscaling Group: %s" % str(e), exception=traceback.format_exc(e))
else:
as_group = as_groups[0]
changed = False
if suspend_processes(as_group, module):
changed = True
for attr in ASG_ATTRIBUTES:
if module.params.get(attr, None) is not None:
module_attr = module.params.get(attr)
if attr == 'vpc_zone_identifier':
module_attr = ','.join(module_attr)
group_attr = getattr(as_group, attr)
# we do this because AWS and the module may return the same list
# sorted differently
if attr != 'termination_policies':
try:
module_attr.sort()
except:
pass
try:
group_attr.sort()
except:
pass
if group_attr != module_attr:
changed = True
setattr(as_group, attr, module_attr)
if len(set_tags) > 0:
have_tags = {}
want_tags = {}
for tag in asg_tags:
want_tags[tag.key] = [tag.value, tag.propagate_at_launch]
dead_tags = []
for tag in as_group.tags:
have_tags[tag.key] = [tag.value, tag.propagate_at_launch]
if tag.key not in want_tags:
changed = True
dead_tags.append(tag)
if dead_tags != []:
connection.delete_tags(dead_tags)
if have_tags != want_tags:
changed = True
connection.create_or_update_tags(asg_tags)
# handle loadbalancers separately because None != []
load_balancers = module.params.get('load_balancers') or []
if load_balancers and as_group.load_balancers != load_balancers:
changed = True
as_group.load_balancers = module.params.get('load_balancers')
if changed:
try:
as_group.update()
except BotoServerError as e:
module.fail_json(msg="Failed to update Autoscaling Group: %s" % str(e), exception=traceback.format_exc(e))
if notification_topic:
try:
as_group.put_notification_configuration(notification_topic, notification_types)
except BotoServerError as e:
module.fail_json(msg="Failed to update Autoscaling Group notifications: %s" % str(e), exception=traceback.format_exc(e))
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
try:
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
except BotoServerError as e:
module.fail_json(msg="Failed to read existing Autoscaling Groups: %s" % str(e), exception=traceback.format_exc(e))
return(changed, asg_properties)
def delete_autoscaling_group(connection, module):
group_name = module.params.get('name')
notification_topic = module.params.get('notification_topic')
if notification_topic:
ag.delete_notification_configuration(notification_topic)
groups = connection.get_all_groups(names=[group_name])
if groups:
group = groups[0]
group.max_size = 0
group.min_size = 0
group.desired_capacity = 0
group.update()
instances = True
while instances:
tmp_groups = connection.get_all_groups(names=[group_name])
if tmp_groups:
tmp_group = tmp_groups[0]
if not tmp_group.instances:
instances = False
time.sleep(10)
group.delete()
while len(connection.get_all_groups(names=[group_name])):
time.sleep(5)
changed=True
return changed
else:
changed=False
return changed
def get_chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
def update_size(group, max_size, min_size, dc):
log.debug("setting ASG sizes")
log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size ))
group.max_size = max_size
group.min_size = min_size
group.desired_capacity = dc
group.update()
def replace(connection, module):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
max_size = module.params.get('max_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
lc_check = module.params.get('lc_check')
replace_instances = module.params.get('replace_instances')
as_group = connection.get_all_groups(names=[group_name])[0]
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
props = get_properties(as_group)
instances = props['instances']
if replace_instances:
instances = replace_instances
#check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None:
min_size = as_group.min_size
if max_size is None:
max_size = as_group.max_size
if desired_capacity is None:
desired_capacity = as_group.desired_capacity
# check to see if instances are replaceable if checking launch configs
new_instances, old_instances = get_instances_by_lc(props, lc_check, instances)
num_new_inst_needed = desired_capacity - len(new_instances)
if lc_check:
if num_new_inst_needed == 0 and old_instances:
log.debug("No new instances needed, but old instances are present. Removing old instances")
terminate_batch(connection, module, old_instances, instances, True)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
changed = True
return(changed, props)
# we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
batch_size = num_new_inst_needed
if not old_instances:
changed = False
return(changed, props)
# set temporary settings and wait for them to be reached
# This should get overwritten if the number of instances left is less than the batch size.
as_group = connection.get_all_groups(names=[group_name])[0]
update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instances = props['instances']
if replace_instances:
instances = replace_instances
log.debug("beginning main loop")
for i in get_chunks(instances, batch_size):
# break out of this loop if we have enough new instances
break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False)
wait_for_term_inst(connection, module, term_instances)
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
if break_early:
log.debug("breaking loop")
break
update_size(as_group, max_size, min_size, desired_capacity)
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
log.debug("Rolling update complete.")
changed=True
return(changed, asg_properties)
def get_instances_by_lc(props, lc_check, initial_instances):
new_instances = []
old_instances = []
# old instances are those that have the old launch config
if lc_check:
for i in props['instances']:
if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']:
new_instances.append(i)
else:
old_instances.append(i)
else:
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
for i in props['instances']:
if i not in initial_instances:
new_instances.append(i)
else:
old_instances.append(i)
log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances))
log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances))
return new_instances, old_instances
def list_purgeable_instances(props, lc_check, replace_instances, initial_instances):
instances_to_terminate = []
instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances'])
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
if lc_check:
for i in instances:
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
instances_to_terminate.append(i)
else:
for i in instances:
if i in initial_instances:
instances_to_terminate.append(i)
return instances_to_terminate
def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False):
batch_size = module.params.get('replace_batch_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
group_name = module.params.get('name')
wait_timeout = int(module.params.get('wait_timeout'))
lc_check = module.params.get('lc_check')
decrement_capacity = False
break_loop = False
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
desired_size = as_group.min_size
new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances)
num_new_inst_needed = desired_capacity - len(new_instances)
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
log.debug("new instances needed: {0}".format(num_new_inst_needed))
log.debug("new instances: {0}".format(new_instances))
log.debug("old instances: {0}".format(old_instances))
log.debug("batch instances: {0}".format(",".join(instances_to_terminate)))
if num_new_inst_needed == 0:
decrement_capacity = True
if as_group.min_size != min_size:
as_group.min_size = min_size
as_group.update()
log.debug("Updating minimum size back to original of {0}".format(min_size))
#if are some leftover old instances, but we are already at capacity with new ones
# we don't want to decrement capacity
if leftovers:
decrement_capacity = False
break_loop = True
instances_to_terminate = old_instances
desired_size = min_size
log.debug("No new instances needed")
if num_new_inst_needed < batch_size and num_new_inst_needed !=0 :
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
decrement_capacity = False
break_loop = False
log.debug("{0} new instances needed".format(num_new_inst_needed))
log.debug("decrementing capacity: {0}".format(decrement_capacity))
for instance_id in instances_to_terminate:
elb_dreg(connection, module, group_name, instance_id)
log.debug("terminating instance: {0}".format(instance_id))
connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity)
# we wait to make sure the machines we marked as Unhealthy are
# no longer in the list
return break_loop, desired_size, instances_to_terminate
def wait_for_term_inst(connection, module, term_instances):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
lc_check = module.params.get('lc_check')
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
count = 1
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
log.debug("waiting for instances to terminate")
count = 0
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instance_facts = props['instance_facts']
instances = ( i for i in instance_facts if i in term_instances)
for i in instances:
lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status']
log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health ))
if lifecycle == 'Terminating' or health == 'Unhealthy':
count += 1
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime())
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop.
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
# now we make sure that we have enough instances in a viable state
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and desired_size > props[prop]:
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
time.sleep(10)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime())
log.debug("Reached {0}: {1}".format(prop, desired_size))
return props
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
min_size=dict(type='int'),
max_size=dict(type='int'),
placement_group=dict(type='str'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
lc_check=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
termination_policies=dict(type='list', default='Default'),
notification_topic=dict(type='str', default=None),
notification_types=dict(type='list', default=[
'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
]),
suspend_processes=dict(type='list', default=[])
),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['replace_all_instances', 'replace_instances']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
if not connection:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
changed = create_changed = replace_changed = False
if state == 'present':
create_changed, asg_properties=create_autoscaling_group(connection, module)
elif state == 'absent':
changed = delete_autoscaling_group(connection, module)
module.exit_json( changed = changed )
if replace_all_instances or replace_instances:
replace_changed, asg_properties=replace(connection, module)
if create_changed or replace_changed:
changed = True
module.exit_json( changed = changed, **asg_properties )
if __name__ == '__main__':
main()
|
import lxml.html as l
import requests
def key_char_parse(char_id):
url = 'https://vndb.org/c' + str(char_id)
page = requests.get(url)
root = l.fromstring(page.text)
name = root.cssselect('.mainbox h1')[0].text
kanji_name = root.cssselect('.mainbox h2.alttitle')[0].text
img = 'https:' + root.cssselect('.mainbox .charimg img')[0].attrib['src']
gender = root.cssselect('.chardetails table thead tr td abbr')[0].attrib['title']
try:
bloodtype = root.cssselect('.chardetails table thead tr td span')[0].text
except IndexError:
bloodtype = None
table = root.cssselect('.chardetails table')[0]
for row in table:
if row.tag == 'tr':
if len(row) == 2:
try:
key = row[0][0].text
except IndexError:
key = row[0].text
value = None
try:
if row[1][0].tag == 'a':
value = row[1][0].text
else:
value = []
for span in row[1]:
if 'charspoil_1' in span.classes:
tag = 'minor spoiler'
elif 'charspoil_2' in span.classes:
tag = 'spoiler'
elif 'sexual' in span.classes:
tag = 'sexual trait'
else:
tag = None
value.append({'value': span[1].text, 'tag': tag})
except IndexError:
value = row[1].text
if key == 'Visual novels':
value = []
for span in row[1]:
if span.tag == 'span':
value.append(span.text + span[0].text)
desc = root.cssselect('.chardetails table td.chardesc')[0][1].text
character = {
'URL': url,
'Name': name,
'Name_J': kanji_name,
'Image': img,
'Gender': gender,
'Blood_Type': bloodtype,
'Description': desc
}
return character
|
"""
pythoner.net
Copyright (C) 2013 PYTHONER.ORG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.contrib import admin
from models import *
class ProfileAdmin(admin.ModelAdmin):
list_display = ('screen_name','city','introduction')
admin.site.register(UserProfile,ProfileAdmin)
|
from django.conf.urls.defaults import *
from indivo.views import *
from indivo.lib.utils import MethodDispatcher
urlpatterns = patterns('',
(r'^$', MethodDispatcher({
'DELETE' : carenet_delete})),
(r'^/rename$', MethodDispatcher({
'POST' : carenet_rename})),
(r'^/record$', MethodDispatcher({'GET':carenet_record})),
# Manage documents
(r'^/documents/', include('indivo.urls.carenet_documents')),
# Manage accounts
(r'^/accounts/$',
MethodDispatcher({
'GET' : carenet_account_list,
'POST' : carenet_account_create
})),
(r'^/accounts/(?P<account_id>[^/]+)$',
MethodDispatcher({ 'DELETE' : carenet_account_delete })),
# Manage apps
(r'^/apps/$',
MethodDispatcher({ 'GET' : carenet_apps_list})),
(r'^/apps/(?P<pha_email>[^/]+)$',
MethodDispatcher({ 'PUT' : carenet_apps_create,
'DELETE': carenet_apps_delete})),
# Permissions Calls
(r'^/accounts/(?P<account_id>[^/]+)/permissions$',
MethodDispatcher({ 'GET' : carenet_account_permissions })),
(r'^/apps/(?P<pha_email>[^/]+)/permissions$',
MethodDispatcher({ 'GET' : carenet_app_permissions })),
# Reporting Calls
(r'^/reports/minimal/procedures/$',
MethodDispatcher({'GET':carenet_procedure_list})),
(r'^/reports/minimal/simple-clinical-notes/$',
MethodDispatcher({'GET':carenet_simple_clinical_notes_list})),
(r'^/reports/minimal/equipment/$',
MethodDispatcher({'GET':carenet_equipment_list})),
(r'^/reports/minimal/measurements/(?P<lab_code>[^/]+)/$',
MethodDispatcher({'GET':carenet_measurement_list})),
(r'^/reports/(?P<data_model>[^/]+)/$',
MethodDispatcher({'GET':carenet_generic_list})),
# Demographics
(r'^/demographics$', MethodDispatcher({'GET': read_demographics_carenet})),
)
|
import json
import logging
import dateutil.parser
import pytz
from werkzeug import urls
from odoo import api, fields, models, _
from odoo.addons.payment.models.payment_acquirer import ValidationError
from odoo.addons.payment_paypal.controllers.main import PaypalController
from odoo.tools.float_utils import float_compare
_logger = logging.getLogger(__name__)
class AcquirerPaypal(models.Model):
_inherit = 'payment.acquirer'
provider = fields.Selection(selection_add=[('paypal', 'Paypal')])
paypal_email_account = fields.Char('Paypal Email ID', required_if_provider='paypal', groups='base.group_user')
paypal_seller_account = fields.Char(
'Paypal Merchant ID', groups='base.group_user',
help='The Merchant ID is used to ensure communications coming from Paypal are valid and secured.')
paypal_use_ipn = fields.Boolean('Use IPN', default=True, help='Paypal Instant Payment Notification', groups='base.group_user')
paypal_pdt_token = fields.Char(string='Paypal PDT Token', help='Payment Data Transfer allows you to receive notification of successful payments as they are made.', groups='base.group_user')
# Server 2 server
paypal_api_enabled = fields.Boolean('Use Rest API', default=False)
paypal_api_username = fields.Char('Rest API Username', groups='base.group_user')
paypal_api_password = fields.Char('Rest API Password', groups='base.group_user')
paypal_api_access_token = fields.Char('Access Token', groups='base.group_user')
paypal_api_access_token_validity = fields.Datetime('Access Token Validity', groups='base.group_user')
# Default paypal fees
fees_dom_fixed = fields.Float(default=0.35)
fees_dom_var = fields.Float(default=3.4)
fees_int_fixed = fields.Float(default=0.35)
fees_int_var = fields.Float(default=3.9)
def _get_feature_support(self):
"""Get advanced feature support by provider.
Each provider should add its technical in the corresponding
key for the following features:
* fees: support payment fees computations
* authorize: support authorizing payment (separates
authorization and capture)
* tokenize: support saving payment data in a payment.tokenize
object
"""
res = super(AcquirerPaypal, self)._get_feature_support()
res['fees'].append('paypal')
return res
@api.model
def _get_paypal_urls(self, environment):
""" Paypal URLS """
if environment == 'prod':
return {
'paypal_form_url': 'https://www.paypal.com/cgi-bin/webscr',
'paypal_rest_url': 'https://api.paypal.com/v1/oauth2/token',
}
else:
return {
'paypal_form_url': 'https://www.sandbox.paypal.com/cgi-bin/webscr',
'paypal_rest_url': 'https://api.sandbox.paypal.com/v1/oauth2/token',
}
@api.multi
def paypal_compute_fees(self, amount, currency_id, country_id):
""" Compute paypal fees.
:param float amount: the amount to pay
:param integer country_id: an ID of a res.country, or None. This is
the customer's country, to be compared to
the acquirer company country.
:return float fees: computed fees
"""
if not self.fees_active:
return 0.0
country = self.env['res.country'].browse(country_id)
if country and self.company_id.country_id.id == country.id:
percentage = self.fees_dom_var
fixed = self.fees_dom_fixed
else:
percentage = self.fees_int_var
fixed = self.fees_int_fixed
fees = (percentage / 100.0 * amount + fixed) / (1 - percentage / 100.0)
return fees
@api.multi
def paypal_form_generate_values(self, values):
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
paypal_tx_values = dict(values)
paypal_tx_values.update({
'cmd': '_xclick',
'business': self.paypal_email_account,
'item_name': '%s: %s' % (self.company_id.name, values['reference']),
'item_number': values['reference'],
'amount': values['amount'],
'currency_code': values['currency'] and values['currency'].name or '',
'address1': values.get('partner_address'),
'city': values.get('partner_city'),
'country': values.get('partner_country') and values.get('partner_country').code or '',
'state': values.get('partner_state') and (values.get('partner_state').code or values.get('partner_state').name) or '',
'email': values.get('partner_email'),
'zip_code': values.get('partner_zip'),
'first_name': values.get('partner_first_name'),
'last_name': values.get('partner_last_name'),
'paypal_return': urls.url_join(base_url, PaypalController._return_url),
'notify_url': urls.url_join(base_url, PaypalController._notify_url),
'cancel_return': urls.url_join(base_url, PaypalController._cancel_url),
'handling': '%.2f' % paypal_tx_values.pop('fees', 0.0) if self.fees_active else False,
'custom': json.dumps({'return_url': '%s' % paypal_tx_values.pop('return_url')}) if paypal_tx_values.get('return_url') else False,
})
return paypal_tx_values
@api.multi
def paypal_get_form_action_url(self):
return self._get_paypal_urls(self.environment)['paypal_form_url']
class TxPaypal(models.Model):
_inherit = 'payment.transaction'
paypal_txn_type = fields.Char('Transaction type')
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
@api.model
def _paypal_form_get_tx_from_data(self, data):
reference, txn_id = data.get('item_number'), data.get('txn_id')
if not reference or not txn_id:
error_msg = _('Paypal: received data with missing reference (%s) or txn_id (%s)') % (reference, txn_id)
_logger.info(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use txn_id ?
txs = self.env['payment.transaction'].search([('reference', '=', reference)])
if not txs or len(txs) > 1:
error_msg = 'Paypal: received data for reference %s' % (reference)
if not txs:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.info(error_msg)
raise ValidationError(error_msg)
return txs[0]
@api.multi
def _paypal_form_get_invalid_parameters(self, data):
invalid_parameters = []
_logger.info('Received a notification from Paypal with IPN version %s', data.get('notify_version'))
if data.get('test_ipn'):
_logger.warning(
'Received a notification from Paypal using sandbox'
),
# TODO: txn_id: shoudl be false at draft, set afterwards, and verified with txn details
if self.acquirer_reference and data.get('txn_id') != self.acquirer_reference:
invalid_parameters.append(('txn_id', data.get('txn_id'), self.acquirer_reference))
# check what is buyed
if float_compare(float(data.get('mc_gross', '0.0')), (self.amount + self.fees), 2) != 0:
invalid_parameters.append(('mc_gross', data.get('mc_gross'), '%.2f' % self.amount)) # mc_gross is amount + fees
if data.get('mc_currency') != self.currency_id.name:
invalid_parameters.append(('mc_currency', data.get('mc_currency'), self.currency_id.name))
if 'handling_amount' in data and float_compare(float(data.get('handling_amount')), self.fees, 2) != 0:
invalid_parameters.append(('handling_amount', data.get('handling_amount'), self.fees))
# check buyer
if self.payment_token_id and data.get('payer_id') != self.payment_token_id.acquirer_ref:
invalid_parameters.append(('payer_id', data.get('payer_id'), self.payment_token_id.acquirer_ref))
# check seller
if data.get('receiver_id') and self.acquirer_id.paypal_seller_account and data['receiver_id'] != self.acquirer_id.paypal_seller_account:
invalid_parameters.append(('receiver_id', data.get('receiver_id'), self.acquirer_id.paypal_seller_account))
if not data.get('receiver_id') or not self.acquirer_id.paypal_seller_account:
# Check receiver_email only if receiver_id was not checked.
# In Paypal, this is possible to configure as receiver_email a different email than the business email (the login email)
# In Odoo, there is only one field for the Paypal email: the business email. This isn't possible to set a receiver_email
# different than the business email. Therefore, if you want such a configuration in your Paypal, you are then obliged to fill
# the Merchant ID in the Paypal payment acquirer in Odoo, so the check is performed on this variable instead of the receiver_email.
# At least one of the two checks must be done, to avoid fraudsters.
if data.get('receiver_email') != self.acquirer_id.paypal_email_account:
invalid_parameters.append(('receiver_email', data.get('receiver_email'), self.acquirer_id.paypal_email_account))
return invalid_parameters
@api.multi
def _paypal_form_validate(self, data):
status = data.get('payment_status')
res = {
'acquirer_reference': data.get('txn_id'),
'paypal_txn_type': data.get('payment_type'),
}
if status in ['Completed', 'Processed']:
_logger.info('Validated Paypal payment for tx %s: set as done' % (self.reference))
try:
# dateutil and pytz don't recognize abbreviations PDT/PST
tzinfos = {
'PST': -8 * 3600,
'PDT': -7 * 3600,
}
date = dateutil.parser.parse(data.get('payment_date'), tzinfos=tzinfos).astimezone(pytz.utc)
except:
date = fields.Datetime.now()
res.update(date=date)
self._set_transaction_done()
return self.write(res)
elif status in ['Pending', 'Expired']:
_logger.info('Received notification for Paypal payment %s: set as pending' % (self.reference))
res.update(state_message=data.get('pending_reason', ''))
self._set_transaction_pending()
return self.write(res)
else:
error = 'Received unrecognized status for Paypal payment %s: %s, set as error' % (self.reference, status)
_logger.info(error)
res.update(state_message=error)
self._set_transaction_cancel()
return self.write(res)
|
import unittest
import weka.core.jvm as jvm
import weka.core.converters as converters
import weka.classifiers as classifiers
import weka.experiments as experiments
import weka.plot.experiments as plot
import wekatests.tests.weka_test as weka_test
class TestExperiments(weka_test.WekaTest):
def test_plot_experiment(self):
"""
Tests the plot_experiment method.
"""
datasets = [self.datafile("bolts.arff"), self.datafile("bodyfat.arff"), self.datafile("autoPrice.arff")]
cls = [
classifiers.Classifier("weka.classifiers.trees.REPTree"),
classifiers.Classifier("weka.classifiers.functions.LinearRegression"),
classifiers.Classifier("weka.classifiers.functions.SMOreg"),
]
outfile = self.tempfile("results-rs.arff")
exp = experiments.SimpleRandomSplitExperiment(
classification=False,
runs=10,
percentage=66.6,
preserve_order=False,
datasets=datasets,
classifiers=cls,
result=outfile)
exp.setup()
exp.run()
# evaluate
loader = converters.loader_for_file(outfile)
data = loader.load_file(outfile)
matrix = experiments.ResultMatrix("weka.experiment.ResultMatrixPlainText")
tester = experiments.Tester("weka.experiment.PairedCorrectedTTester")
tester.resultmatrix = matrix
comparison_col = data.attribute_by_name("Correlation_coefficient").index
tester.instances = data
tester.header(comparison_col)
tester.multi_resultset_full(0, comparison_col)
# plot
plot.plot_experiment(matrix, title="Random split (w/ StdDev)", measure="Correlation coefficient", show_stdev=True, wait=False)
plot.plot_experiment(matrix, title="Random split", measure="Correlation coefficient", wait=False)
def suite():
"""
Returns the test suite.
:return: the test suite
:rtype: unittest.TestSuite
"""
return unittest.TestLoader().loadTestsFromTestCase(TestExperiments)
if __name__ == '__main__':
jvm.start()
unittest.TextTestRunner().run(suite())
jvm.stop()
|
from unittest.mock import patch
from django.test import TestCase
from common.djangoapps.track.backends.mongodb import MongoBackend
class TestMongoBackend(TestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self):
super().setUp()
self.mongo_patcher = patch('common.djangoapps.track.backends.mongodb.MongoClient')
self.mongo_patcher.start()
self.addCleanup(self.mongo_patcher.stop)
self.backend = MongoBackend()
def test_mongo_backend(self):
events = [{'test': 1}, {'test': 2}]
self.backend.send(events[0])
self.backend.send(events[1])
# Check if we inserted events into the database
calls = self.backend.collection.insert.mock_calls
assert len(calls) == 2
# Unpack the arguments and check if the events were used
# as the first argument to collection.insert
def first_argument(call):
_, args, _ = call
return args[0]
assert events[0] == first_argument(calls[0])
assert events[1] == first_argument(calls[1])
|
"""Capa's specialized use of codejail.safe_exec."""
import hashlib
from codejail.safe_exec import SafeExecException, json_safe
from codejail.safe_exec import not_safe_exec as codejail_not_safe_exec
from codejail.safe_exec import safe_exec as codejail_safe_exec
from edx_django_utils.monitoring import function_trace
import six
from six import text_type
from . import lazymod
from .remote_exec import is_codejail_rest_service_enabled, get_remote_exec
CODE_PROLOG = """\
from __future__ import absolute_import, division
import os
os.environ["OPENBLAS_NUM_THREADS"] = "1" # See TNL-6456
import random2 as random_module
import sys
from six.moves import xrange
random = random_module.Random(%r)
random.Random = random_module.Random
sys.modules['random'] = random
"""
ASSUMED_IMPORTS = [
("numpy", "numpy"),
("math", "math"),
("scipy", "scipy"),
("calc", "calc"),
("eia", "eia"),
("chemcalc", "chem.chemcalc"),
("chemtools", "chem.chemtools"),
("miller", "chem.miller"),
("draganddrop", "verifiers.draganddrop"),
]
lazymod_py_file = lazymod.__file__
if lazymod_py_file.endswith("c"):
lazymod_py_file = lazymod_py_file[:-1]
with open(lazymod_py_file) as f:
lazymod_py = f.read()
LAZY_IMPORTS = [lazymod_py]
for name, modname in ASSUMED_IMPORTS:
LAZY_IMPORTS.append("{} = LazyModule('{}')\n".format(name, modname))
LAZY_IMPORTS = "".join(LAZY_IMPORTS)
def update_hash(hasher, obj):
"""
Update a `hashlib` hasher with a nested object.
To properly cache nested structures, we need to compute a hash from the
entire structure, canonicalizing at every level.
`hasher`'s `.update()` method is called a number of times, touching all of
`obj` in the process. Only primitive JSON-safe types are supported.
"""
hasher.update(six.b(str(type(obj))))
if isinstance(obj, (tuple, list)):
for e in obj:
update_hash(hasher, e)
elif isinstance(obj, dict):
for k in sorted(obj):
update_hash(hasher, k)
update_hash(hasher, obj[k])
else:
hasher.update(six.b(repr(obj)))
@function_trace('safe_exec')
def safe_exec(
code,
globals_dict,
random_seed=None,
python_path=None,
extra_files=None,
cache=None,
limit_overrides_context=None,
slug=None,
unsafely=False,
):
"""
Execute python code safely.
`code` is the Python code to execute. It has access to the globals in `globals_dict`,
and any changes it makes to those globals are visible in `globals_dict` when this
function returns.
`random_seed` will be used to see the `random` module available to the code.
`python_path` is a list of filenames or directories to add to the Python
path before execution. If the name is not in `extra_files`, then it will
also be copied into the sandbox.
`extra_files` is a list of (filename, contents) pairs. These files are
created in the sandbox.
`cache` is an object with .get(key) and .set(key, value) methods. It will be used
to cache the execution, taking into account the code, the values of the globals,
and the random seed.
`limit_overrides_context` is an optional string to be used as a key on
the `settings.CODE_JAIL['limit_overrides']` dictionary in order to apply
context-specific overrides to the codejail execution limits.
If `limit_overrides_context` is omitted or not present in limit_overrides,
then use the default limits specified insettings.CODE_JAIL['limits'].
`slug` is an arbitrary string, a description that's meaningful to the
caller, that will be used in log messages.
If `unsafely` is true, then the code will actually be executed without sandboxing.
"""
# Check the cache for a previous result.
if cache:
safe_globals = json_safe(globals_dict)
md5er = hashlib.md5()
md5er.update(repr(code).encode('utf-8'))
update_hash(md5er, safe_globals)
key = "safe_exec.%r.%s" % (random_seed, md5er.hexdigest())
cached = cache.get(key)
if cached is not None:
# We have a cached result. The result is a pair: the exception
# message, if any, else None; and the resulting globals dictionary.
emsg, cleaned_results = cached
globals_dict.update(cleaned_results)
if emsg:
raise SafeExecException(emsg)
return
# Create the complete code we'll run.
code_prolog = CODE_PROLOG % random_seed
if is_codejail_rest_service_enabled():
data = {
"code": code_prolog + LAZY_IMPORTS + code,
"globals_dict": globals_dict,
"python_path": python_path,
"limit_overrides_context": limit_overrides_context,
"slug": slug,
"unsafely": unsafely,
"extra_files": extra_files,
}
emsg, exception = get_remote_exec(data)
else:
# Decide which code executor to use.
if unsafely:
exec_fn = codejail_not_safe_exec
else:
exec_fn = codejail_safe_exec
# Run the code! Results are side effects in globals_dict.
try:
exec_fn(
code_prolog + LAZY_IMPORTS + code,
globals_dict,
python_path=python_path,
extra_files=extra_files,
limit_overrides_context=limit_overrides_context,
slug=slug,
)
except SafeExecException as e:
# Saving SafeExecException e in exception to be used later.
exception = e
emsg = text_type(e)
else:
emsg = None
# Put the result back in the cache. This is complicated by the fact that
# the globals dict might not be entirely serializable.
if cache:
cleaned_results = json_safe(globals_dict)
cache.set(key, (emsg, cleaned_results))
# If an exception happened, raise it now.
if emsg:
raise exception
|
from ddt import ddt, data
from django.core.urlresolvers import reverse
from django.test import TestCase
import mock
from analyticsclient.exceptions import NotFoundError
from courses.tests import SwitchMixin
from courses.tests.test_views import ViewTestMixin, DEMO_COURSE_ID, DEPRECATED_DEMO_COURSE_ID
from courses.tests.utils import convert_list_of_dicts_to_csv, get_mock_api_enrollment_geography_data, \
get_mock_api_enrollment_data, get_mock_api_course_activity, get_mock_api_enrollment_age_data, \
get_mock_api_enrollment_education_data, get_mock_api_enrollment_gender_data
@ddt
class CourseCSVTestMixin(ViewTestMixin):
client = None
column_headings = None
base_file_name = None
def assertIsValidCSV(self, course_id, csv_data):
response = self.client.get(self.path(course_id=course_id))
# Check content type
self.assertResponseContentType(response, 'text/csv')
# Check filename
csv_prefix = u'edX-DemoX-Demo_2014' if course_id == DEMO_COURSE_ID else u'edX-DemoX-Demo_Course'
filename = '{0}--{1}.csv'.format(csv_prefix, self.base_file_name)
self.assertResponseFilename(response, filename)
# Check data
self.assertEqual(response.content, csv_data)
def assertResponseContentType(self, response, content_type):
self.assertEqual(response['Content-Type'], content_type)
def assertResponseFilename(self, response, filename):
self.assertEqual(response['Content-Disposition'], 'attachment; filename="{0}"'.format(filename))
def _test_csv(self, course_id, csv_data):
with mock.patch(self.api_method, return_value=csv_data):
self.assertIsValidCSV(course_id, csv_data)
@data(DEMO_COURSE_ID, DEPRECATED_DEMO_COURSE_ID)
def test_response_no_data(self, course_id):
# Create an "empty" CSV that only has headers
csv_data = convert_list_of_dicts_to_csv([], self.column_headings)
self._test_csv(course_id, csv_data)
@data(DEMO_COURSE_ID, DEPRECATED_DEMO_COURSE_ID)
def test_response(self, course_id):
csv_data = self.get_mock_data(course_id)
csv_data = convert_list_of_dicts_to_csv(csv_data)
self._test_csv(course_id, csv_data)
def test_404(self):
course_id = 'fakeOrg/soFake/Fake_Course'
self.grant_permission(self.user, course_id)
path = reverse(self.viewname, kwargs={'course_id': course_id})
with mock.patch(self.api_method, side_effect=NotFoundError):
response = self.client.get(path, follow=True)
self.assertEqual(response.status_code, 404)
class CourseEnrollmentByCountryCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment_geography'
column_headings = ['count', 'country', 'course_id', 'date']
base_file_name = 'enrollment-location'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_geography_data(course_id)
class CourseEnrollmentCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment'
column_headings = ['count', 'course_id', 'date']
base_file_name = 'enrollment'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_data(course_id)
class CourseEnrollmentModeCSVViewTests(SwitchMixin, CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment'
column_headings = ['count', 'course_id', 'date', 'audit', 'honor', 'professional', 'verified']
base_file_name = 'enrollment'
api_method = 'analyticsclient.course.Course.enrollment'
@classmethod
def setUpClass(cls):
cls.toggle_switch('display_verified_enrollment', True)
def get_mock_data(self, course_id):
return get_mock_api_enrollment_data(course_id)
class CourseEnrollmentDemographicsByAgeCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment_demographics_age'
column_headings = ['birth_year', 'count', 'course_id', 'created', 'date']
base_file_name = 'enrollment-by-birth-year'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_age_data(course_id)
class CourseEnrollmentDemographicsByEducationCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment_demographics_education'
column_headings = ['count', 'course_id', 'created', 'date', 'education_level.name', 'education_level.short_name']
base_file_name = 'enrollment-by-education'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_education_data(course_id)
class CourseEnrollmentByDemographicsGenderCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment_demographics_gender'
column_headings = ['count', 'course_id', 'created', 'date', 'gender']
base_file_name = 'enrollment-by-gender'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_gender_data(course_id)
class CourseEngagementActivityTrendCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:engagement_activity_trend'
column_headings = ['any', 'attempted_problem', 'course_id', 'interval_end', 'interval_start',
'played_video', 'posted_forum']
base_file_name = 'engagement-activity'
api_method = 'analyticsclient.course.Course.activity'
def get_mock_data(self, course_id):
return get_mock_api_course_activity(course_id)
|
import time
from odoo import api, fields, models
class ProductProduct(models.Model):
_inherit = "product.product"
date_from = fields.Date(compute='_compute_product_margin_fields_values', string='Margin Date From')
date_to = fields.Date(compute='_compute_product_margin_fields_values', string='Margin Date To')
invoice_state = fields.Selection(compute='_compute_product_margin_fields_values',
selection=[
('paid', 'Paid'),
('open_paid', 'Open and Paid'),
('draft_open_paid', 'Draft, Open and Paid')
], string='Invoice State', readonly=True)
sale_avg_price = fields.Float(compute='_compute_product_margin_fields_values', string='Avg. Sale Unit Price',
help="Avg. Price in Customer Invoices.")
purchase_avg_price = fields.Float(compute='_compute_product_margin_fields_values', string='Avg. Purchase Unit Price',
help="Avg. Price in Vendor Bills ")
sale_num_invoiced = fields.Float(compute='_compute_product_margin_fields_values', string='# Invoiced in Sale',
help="Sum of Quantity in Customer Invoices")
purchase_num_invoiced = fields.Float(compute='_compute_product_margin_fields_values', string='# Invoiced in Purchase',
help="Sum of Quantity in Vendor Bills")
sales_gap = fields.Float(compute='_compute_product_margin_fields_values', string='Sales Gap',
help="Expected Sale - Turn Over")
purchase_gap = fields.Float(compute='_compute_product_margin_fields_values', string='Purchase Gap',
help="Normal Cost - Total Cost")
turnover = fields.Float(compute='_compute_product_margin_fields_values', string='Turnover',
help="Sum of Multiplication of Invoice price and quantity of Customer Invoices")
total_cost = fields.Float(compute='_compute_product_margin_fields_values', string='Total Cost',
help="Sum of Multiplication of Invoice price and quantity of Vendor Bills ")
sale_expected = fields.Float(compute='_compute_product_margin_fields_values', string='Expected Sale',
help="Sum of Multiplication of Sale Catalog price and quantity of Customer Invoices")
normal_cost = fields.Float(compute='_compute_product_margin_fields_values', string='Normal Cost',
help="Sum of Multiplication of Cost price and quantity of Vendor Bills")
total_margin = fields.Float(compute='_compute_product_margin_fields_values', string='Total Margin',
help="Turnover - Standard price")
expected_margin = fields.Float(compute='_compute_product_margin_fields_values', string='Expected Margin',
help="Expected Sale - Normal Cost")
total_margin_rate = fields.Float(compute='_compute_product_margin_fields_values', string='Total Margin Rate(%)',
help="Total margin * 100 / Turnover")
expected_margin_rate = fields.Float(compute='_compute_product_margin_fields_values', string='Expected Margin (%)',
help="Expected margin * 100 / Expected Sale")
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
"""
Inherit read_group to calculate the sum of the non-stored fields, as it is not automatically done anymore through the XML.
"""
res = super(ProductProduct, self).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy)
fields_list = ['turnover', 'sale_avg_price', 'sale_purchase_price', 'sale_num_invoiced', 'purchase_num_invoiced',
'sales_gap', 'purchase_gap', 'total_cost', 'sale_expected', 'normal_cost', 'total_margin',
'expected_margin', 'total_margin_rate', 'expected_margin_rate']
if any(x in fields for x in fields_list):
# Calculate first for every product in which line it needs to be applied
re_ind = 0
prod_re = {}
tot_products = self.browse([])
for re in res:
if re.get('__domain'):
products = self.search(re['__domain'])
tot_products |= products
for prod in products:
prod_re[prod.id] = re_ind
re_ind += 1
res_val = tot_products._compute_product_margin_fields_values(field_names=[x for x in fields if fields in fields_list])
for key in res_val:
for l in res_val[key]:
re = res[prod_re[key]]
if re.get(l):
re[l] += res_val[key][l]
else:
re[l] = res_val[key][l]
return res
def _compute_product_margin_fields_values(self, field_names=None):
res = {}
if field_names is None:
field_names = []
for val in self:
res[val.id] = {}
date_from = self.env.context.get('date_from', time.strftime('%Y-01-01'))
date_to = self.env.context.get('date_to', time.strftime('%Y-12-31'))
invoice_state = self.env.context.get('invoice_state', 'open_paid')
res[val.id]['date_from'] = date_from
res[val.id]['date_to'] = date_to
res[val.id]['invoice_state'] = invoice_state
states = ()
payment_states = ()
if invoice_state == 'paid':
states = ('posted',)
payment_states = ('paid',)
elif invoice_state == 'open_paid':
states = ('posted',)
payment_states = ('not_paid', 'paid')
elif invoice_state == 'draft_open_paid':
states = ('posted', 'draft')
payment_states = ('not_paid', 'paid')
company_id = self.env.company.id
#Cost price is calculated afterwards as it is a property
self.env['account.move.line'].flush(['price_unit', 'quantity', 'balance', 'product_id', 'display_type'])
self.env['account.move'].flush(['state', 'payment_state', 'move_type', 'invoice_date', 'company_id'])
self.env['product.template'].flush(['list_price'])
sqlstr = """
WITH currency_rate AS ({})
SELECT
SUM(l.price_unit / (CASE COALESCE(cr.rate, 0) WHEN 0 THEN 1.0 ELSE cr.rate END) * l.quantity) / NULLIF(SUM(l.quantity),0) AS avg_unit_price,
SUM(l.quantity * (CASE WHEN i.move_type IN ('out_invoice', 'in_invoice') THEN 1 ELSE -1 END)) AS num_qty,
SUM(ABS(l.balance) * (CASE WHEN i.move_type IN ('out_invoice', 'in_invoice') THEN 1 ELSE -1 END)) AS total,
SUM(l.quantity * pt.list_price * (CASE WHEN i.move_type IN ('out_invoice', 'in_invoice') THEN 1 ELSE -1 END)) AS sale_expected
FROM account_move_line l
LEFT JOIN account_move i ON (l.move_id = i.id)
LEFT JOIN product_product product ON (product.id=l.product_id)
LEFT JOIN product_template pt ON (pt.id = product.product_tmpl_id)
left join currency_rate cr on
(cr.currency_id = i.currency_id and
cr.company_id = i.company_id and
cr.date_start <= COALESCE(i.invoice_date, NOW()) and
(cr.date_end IS NULL OR cr.date_end > COALESCE(i.invoice_date, NOW())))
WHERE l.product_id = %s
AND i.state IN %s
AND i.payment_state IN %s
AND i.move_type IN %s
AND i.invoice_date BETWEEN %s AND %s
AND i.company_id = %s
AND l.display_type IS NULL
AND l.exclude_from_invoice_tab = false
""".format(self.env['res.currency']._select_companies_rates())
invoice_types = ('out_invoice', 'out_refund')
self.env.cr.execute(sqlstr, (val.id, states, payment_states, invoice_types, date_from, date_to, company_id))
result = self.env.cr.fetchall()[0]
res[val.id]['sale_avg_price'] = result[0] and result[0] or 0.0
res[val.id]['sale_num_invoiced'] = result[1] and result[1] or 0.0
res[val.id]['turnover'] = result[2] and result[2] or 0.0
res[val.id]['sale_expected'] = result[3] and result[3] or 0.0
res[val.id]['sales_gap'] = res[val.id]['sale_expected'] - res[val.id]['turnover']
invoice_types = ('in_invoice', 'in_refund')
self.env.cr.execute(sqlstr, (val.id, states, payment_states, invoice_types, date_from, date_to, company_id))
result = self.env.cr.fetchall()[0]
res[val.id]['purchase_avg_price'] = result[0] and result[0] or 0.0
res[val.id]['purchase_num_invoiced'] = result[1] and result[1] or 0.0
res[val.id]['total_cost'] = result[2] and result[2] or 0.0
res[val.id]['normal_cost'] = val.standard_price * res[val.id]['purchase_num_invoiced']
res[val.id]['purchase_gap'] = res[val.id]['normal_cost'] - res[val.id]['total_cost']
res[val.id]['total_margin'] = res[val.id]['turnover'] - res[val.id]['total_cost']
res[val.id]['expected_margin'] = res[val.id]['sale_expected'] - res[val.id]['normal_cost']
res[val.id]['total_margin_rate'] = res[val.id]['turnover'] and res[val.id]['total_margin'] * 100 / res[val.id]['turnover'] or 0.0
res[val.id]['expected_margin_rate'] = res[val.id]['sale_expected'] and res[val.id]['expected_margin'] * 100 / res[val.id]['sale_expected'] or 0.0
for k, v in res[val.id].items():
setattr(val, k, v)
return res
|
import os, datetime, time, re
from itertools import izip
from Bio.Align import MultipleSeqAlignment
from Bio.Seq import Seq
from scipy import stats
import numpy as np
class virus_clean(object):
"""docstring for virus_clean"""
def __init__(self,n_iqd = 5, **kwargs):
'''
parameters
n_std -- number of interquartile distances accepted in molecular clock filter
'''
self.n_iqd = n_iqd
def remove_insertions(self):
'''
remove all columns from the alignment in which the outgroup is gapped
'''
outgroup_ok = np.array(self.sequence_lookup[self.outgroup['strain']])!='-'
for seq in self.viruses:
seq.seq = Seq("".join(np.array(seq.seq)[outgroup_ok]).upper())
def clean_gaps(self):
'''
remove viruses with gaps -- not part of the standard pipeline
'''
self.viruses = filter(lambda x: '-' in x.seq, self.viruses)
def clean_ambiguous(self):
'''
substitute all ambiguous characters with '-',
ancestral inference will interpret this as missing data
'''
for v in self.viruses:
v.seq = Seq(re.sub(r'[BDEFHIJKLMNOPQRSUVWXYZ]', '-',str(v.seq)))
def unique_date(self):
'''
add a unique numerical date to each leaf. uniqueness is achieved adding a small number
'''
from date_util import numerical_date
og = self.sequence_lookup[self.outgroup['strain']]
if hasattr(og, 'date'):
try:
og.num_date = numerical_date(og.date)
except:
print "cannot parse date"
og.num_date="undefined";
for ii, v in enumerate(self.viruses):
if hasattr(v, 'date'):
try:
v.num_date = numerical_date(v.date, self.date_format['fields']) + 1e-7*(ii+1)
except:
print "cannot parse date"
v.num_date="undefined";
def times_from_outgroup(self):
outgroup_date = self.sequence_lookup[self.outgroup['strain']].num_date
return np.array([x.num_date-outgroup_date for x in self.viruses if x.strain])
def distance_from_outgroup(self):
from seq_util import hamming_distance
outgroup_seq = self.sequence_lookup[self.outgroup['strain']].seq
return np.array([hamming_distance(x.seq, outgroup_seq) for x in self.viruses if x.strain])
def clean_distances(self):
"""Remove viruses that don't follow a loose clock """
times = self.times_from_outgroup()
distances = self.distance_from_outgroup()
slope, intercept, r_value, p_value, std_err = stats.linregress(times, distances)
residuals = slope*times + intercept - distances
r_iqd = stats.scoreatpercentile(residuals,75) - stats.scoreatpercentile(residuals,25)
if self.verbose:
print "\tslope: " + str(slope)
print "\tr: " + str(r_value)
print "\tresiduals iqd: " + str(r_iqd)
new_viruses = []
for (v,r) in izip(self.viruses,residuals):
# filter viruses more than n_std standard devitations up or down
if np.abs(r)<self.n_iqd * r_iqd or v.id == self.outgroup["strain"]:
new_viruses.append(v)
else:
if self.verbose>1:
print "\t\tresidual:", r, "\nremoved ",v.strain
self.viruses = MultipleSeqAlignment(new_viruses)
def clean_generic(self):
print "Number of viruses before cleaning:",len(self.viruses)
self.unique_date()
self.remove_insertions()
self.clean_ambiguous()
self.clean_distances()
self.viruses.sort(key=lambda x:x.num_date)
print "Number of viruses after outlier filtering:",len(self.viruses)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.