code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: tasking_dsz.py
import mcl.framework
import mcl.tasking
class dsz:
INTERFACE = 16842801
PFAM = 4129
PROVIDER_ANY = 4129
PROVIDER = 16846881
PROVIDER_FLAV = 16912417
RPC_INFO_SEND = mcl.tasking.RpcInfo(mcl.framework.DSZ, [INTERFACE, PROVIDER_ANY, 0])
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/network/cmd/ping/tasking_dsz.py
|
Python
|
unlicense
| 444
|
import sys
import struct
from optparse import OptionParser
# Starbound Spawn Coordinate Changer
# Based on http://seancode.com/galileo/format/wrldb.html
# usage: `python main.py -x coordinate -y coordinate worldfile'
KEY = bytearray([76, 76, 0, 0, 0, 1, 0, 0, 0, 0, 0])
WORLD_WIDTH_OFFSET = 0
WORLD_HEIGHT_OFFSET = 4
METADATA_LENGTH_OFFSET = 8
METADATA_VERSION_OFFSET = 0
METADATA_SPAWN_X_OFFSET = 4
METADATA_SPAWN_Y_OFFSET = 8
def main(argv):
usage = "usage: %prog [options] world_file"
parser = OptionParser(usage=usage)
parser.add_option("-x", action="store", type="float", dest="new_spawn_x", help="Change X coordinate of spawn", metavar="X_Coordinate")
parser.add_option("-y", action="store", type="float", dest="new_spawn_y", help="Change Y coordinate of spawn", metavar="Y_Coordinate")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
return 1
filename_world = args[0]
file = open(filename_world, "rb")
world_bytes = bytearray(file.read())
file.close()
indices = find_key_indices(world_bytes)
if len(indices) == 0:
print "World header not found, unable to continue."
return 1
elif len(indices) > 1:
print "Multiple matching world headers found, listing/updating all of them just to be sure."
for index in indices:
world_header_length = determine_length_of_packed_bytes(world_bytes, index + len(KEY))
world_header_index = index + len(KEY) + world_header_length
width = get_int_from_bytes(world_bytes, world_header_index + WORLD_WIDTH_OFFSET)
height = get_int_from_bytes(world_bytes, world_header_index + WORLD_HEIGHT_OFFSET)
metadata_length = determine_length_of_packed_bytes(world_bytes, world_header_index + METADATA_LENGTH_OFFSET)
metadata_header_index = world_header_index + METADATA_LENGTH_OFFSET + metadata_length
version = get_int_from_bytes(world_bytes, metadata_header_index + METADATA_VERSION_OFFSET)
spawn_x = get_float_from_bytes(world_bytes, metadata_header_index + METADATA_SPAWN_X_OFFSET)
spawn_y = get_float_from_bytes(world_bytes, metadata_header_index + METADATA_SPAWN_Y_OFFSET)
if options.new_spawn_x == None and options.new_spawn_y == None:
print ""
print "World header at index %d" % world_header_index
print "Width: %d" % width
print "Height: %d" % height
print "Version: %d" % version
print "Spawn X: %s" % str(spawn_x)
print "Spawn Y: %s" % str(spawn_y)
else:
if options.new_spawn_x != None:
new_spawn_x_bytes = struct.pack('!f', options.new_spawn_x)
for i in range(0, 4):
world_bytes[metadata_header_index + METADATA_SPAWN_X_OFFSET + i] = new_spawn_x_bytes[i]
print "Spawn X set to: %s" %str(get_float_from_bytes(world_bytes, metadata_header_index + METADATA_SPAWN_X_OFFSET))
if options.new_spawn_y != None:
new_spawn_y_bytes = struct.pack('!f', options.new_spawn_y)
for i in range(0, 4):
world_bytes[metadata_header_index + METADATA_SPAWN_Y_OFFSET + i] = new_spawn_y_bytes[i]
print "Spawn Y set to: %s" %str(get_float_from_bytes(world_bytes, metadata_header_index + METADATA_SPAWN_Y_OFFSET))
file = open(filename_world, "wb")
file.write(world_bytes)
file.close
# Done
return 0
def find_key_indices(world_bytes):
index = 0
indices = []
while(index != -1):
index = world_bytes.find(KEY, index + 1)
if(index != -1):
indices.append(index)
return indices
def determine_length_of_packed_bytes(world_bytes, index):
length = 1
done = False
current_index = index
while (not done):
if (world_bytes[current_index] >= 128):
length = length + 1
current_index = current_index + 1
else:
done = True
return length
def get_int_from_bytes(world_bytes, index):
return (256**3 * world_bytes[index]) + (256**2 * world_bytes[index + 1]) + (256 * world_bytes[index + 2]) + world_bytes[index + 3]
def get_float_from_bytes(world_bytes, index):
return struct.unpack('!f', str(world_bytes[index : index + 4]))
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
Omnipotence/Starbound-Spawn-Changer
|
main.py
|
Python
|
mit
| 4,458
|
#!/usr/bin/env python
#
#
#
# Copyright (c) 2009 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
import os.path
from django.conf.urls.defaults import *
from django.conf import settings
from django.core.urlresolvers import reverse
from omero_qa.validator import views
# url patterns
urlpatterns = patterns('',
url( r'^upload/$', views.upload, name='validator_upload'),
url( r'^web_upload_processing/$', views.upload_from_web_processing, name='validator_web_upload_processing'),
url( r'^file_list/$', views.file_list, name='validator_file_list'),
url( r'^delete_file/(?P<file_name>.*)/$', views.delete_file, name='validator_delete_file'),
)
|
sbesson/registry
|
omero_qa/validator/urls.py
|
Python
|
agpl-3.0
| 1,407
|
# source: http://stackoverflow.com/questions/2758159/how-to-embed-a-python-interpreter-in-a-pyqt-widget
import sys
import os
import re
import traceback
import platform
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from electrum import util
from electrum.i18n import _
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
class OverlayLabel(QtWidgets.QLabel):
STYLESHEET = '''
QLabel, QLabel link {
color: rgb(0, 0, 0);
background-color: rgb(248, 240, 200);
border: 1px solid;
border-color: rgb(255, 114, 47);
padding: 2px;
}
'''
def __init__(self, text, parent):
super().__init__(text, parent)
self.setMinimumHeight(150)
self.setGeometry(0, 0, self.width(), self.height())
self.setStyleSheet(self.STYLESHEET)
self.setMargin(0)
parent.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setWordWrap(True)
def mousePressEvent(self, e):
self.hide()
def on_resize(self, w):
padding = 2 # px, from the stylesheet above
self.setFixedWidth(w - padding)
class Console(QtWidgets.QPlainTextEdit):
def __init__(self, prompt='>> ', startup_message='', parent=None):
QtWidgets.QPlainTextEdit.__init__(self, parent)
self.prompt = prompt
self.history = []
self.namespace = {}
self.construct = []
self.setGeometry(50, 75, 600, 400)
self.setWordWrapMode(QtGui.QTextOption.WrapAnywhere)
self.setUndoRedoEnabled(False)
self.document().setDefaultFont(QtGui.QFont(MONOSPACE_FONT, 10, QtGui.QFont.Normal))
self.showMessage(startup_message)
self.updateNamespace({'run':self.run_script})
self.set_json(False)
warning_text = "<h1>{}</h1><br>{}<br><br>{}".format(
_("Warning!"),
_("Do not paste code here that you don't understand. Executing the wrong code could lead "
"to your coins being irreversibly lost."),
_("Click here to hide this message.")
)
self.messageOverlay = OverlayLabel(warning_text, self)
def resizeEvent(self, e):
super().resizeEvent(e)
vertical_scrollbar_width = self.verticalScrollBar().width() * self.verticalScrollBar().isVisible()
self.messageOverlay.on_resize(self.width() - vertical_scrollbar_width)
def set_json(self, b):
self.is_json = b
def run_script(self, filename):
with open(filename) as f:
script = f.read()
# eval is generally considered bad practice. use it wisely!
result = eval(script, self.namespace, self.namespace)
def updateNamespace(self, namespace):
self.namespace.update(namespace)
def showMessage(self, message):
self.appendPlainText(message)
self.newPrompt()
def clear(self):
self.setPlainText('')
self.newPrompt()
def newPrompt(self):
if self.construct:
prompt = '.' * len(self.prompt)
else:
prompt = self.prompt
self.completions_pos = self.textCursor().position()
self.completions_visible = False
self.appendPlainText(prompt)
self.moveCursor(QtGui.QTextCursor.End)
def getCommand(self):
doc = self.document()
curr_line = doc.findBlockByLineNumber(doc.lineCount() - 1).text()
curr_line = curr_line.rstrip()
curr_line = curr_line[len(self.prompt):]
return curr_line
def setCommand(self, command):
if self.getCommand() == command:
return
doc = self.document()
curr_line = doc.findBlockByLineNumber(doc.lineCount() - 1).text()
self.moveCursor(QtGui.QTextCursor.End)
for i in range(len(curr_line) - len(self.prompt)):
self.moveCursor(QtGui.QTextCursor.Left, QtGui.QTextCursor.KeepAnchor)
self.textCursor().removeSelectedText()
self.textCursor().insertText(command)
self.moveCursor(QtGui.QTextCursor.End)
def show_completions(self, completions):
if self.completions_visible:
self.hide_completions()
c = self.textCursor()
c.setPosition(self.completions_pos)
completions = map(lambda x: x.split('.')[-1], completions)
t = '\n' + ' '.join(completions)
if len(t) > 500:
t = t[:500] + '...'
c.insertText(t)
self.completions_end = c.position()
self.moveCursor(QtGui.QTextCursor.End)
self.completions_visible = True
def hide_completions(self):
if not self.completions_visible:
return
c = self.textCursor()
c.setPosition(self.completions_pos)
l = self.completions_end - self.completions_pos
for x in range(l): c.deleteChar()
self.moveCursor(QtGui.QTextCursor.End)
self.completions_visible = False
def getConstruct(self, command):
if self.construct:
prev_command = self.construct[-1]
self.construct.append(command)
if not prev_command and not command:
ret_val = '\n'.join(self.construct)
self.construct = []
return ret_val
else:
return ''
else:
if command and command[-1] == (':'):
self.construct.append(command)
return ''
else:
return command
def getHistory(self):
return self.history
def setHisory(self, history):
self.history = history
def addToHistory(self, command):
if command[0:1] == ' ':
return
if command and (not self.history or self.history[-1] != command):
self.history.append(command)
self.history_index = len(self.history)
def getPrevHistoryEntry(self):
if self.history:
self.history_index = max(0, self.history_index - 1)
return self.history[self.history_index]
return ''
def getNextHistoryEntry(self):
if self.history:
hist_len = len(self.history)
self.history_index = min(hist_len, self.history_index + 1)
if self.history_index < hist_len:
return self.history[self.history_index]
return ''
def getCursorPosition(self):
c = self.textCursor()
return c.position() - c.block().position() - len(self.prompt)
def setCursorPosition(self, position):
self.moveCursor(QtGui.QTextCursor.StartOfLine)
for i in range(len(self.prompt) + position):
self.moveCursor(QtGui.QTextCursor.Right)
def register_command(self, c, func):
methods = { c: func}
self.updateNamespace(methods)
def runCommand(self):
command = self.getCommand()
self.addToHistory(command)
command = self.getConstruct(command)
if command:
tmp_stdout = sys.stdout
class stdoutProxy():
def __init__(self, write_func):
self.write_func = write_func
self.skip = False
def flush(self):
pass
def write(self, text):
if not self.skip:
stripped_text = text.rstrip('\n')
self.write_func(stripped_text)
QtCore.QCoreApplication.processEvents()
self.skip = not self.skip
if type(self.namespace.get(command)) == type(lambda:None):
self.appendPlainText("'{}' is a function. Type '{}()' to use it in the Python console."
.format(command, command))
self.newPrompt()
return
sys.stdout = stdoutProxy(self.appendPlainText)
try:
try:
# eval is generally considered bad practice. use it wisely!
result = eval(command, self.namespace, self.namespace)
if result != None:
if self.is_json:
util.print_msg(util.json_encode(result))
else:
self.appendPlainText(repr(result))
except SyntaxError:
# exec is generally considered bad practice. use it wisely!
exec(command, self.namespace, self.namespace)
except SystemExit:
self.close()
except BaseException:
traceback_lines = traceback.format_exc().split('\n')
# Remove traceback mentioning this file, and a linebreak
for i in (3,2,1,-1):
traceback_lines.pop(i)
self.appendPlainText('\n'.join(traceback_lines))
sys.stdout = tmp_stdout
self.newPrompt()
self.set_json(False)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Tab:
self.completions()
return
self.hide_completions()
if event.key() in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
self.runCommand()
return
if event.key() == QtCore.Qt.Key_Home:
self.setCursorPosition(0)
return
if event.key() == QtCore.Qt.Key_PageUp:
return
elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Backspace):
if self.getCursorPosition() == 0:
return
elif event.key() == QtCore.Qt.Key_Up:
self.setCommand(self.getPrevHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_Down:
self.setCommand(self.getNextHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_L and event.modifiers() == QtCore.Qt.ControlModifier:
self.clear()
super(Console, self).keyPressEvent(event)
def completions(self):
cmd = self.getCommand()
lastword = re.split(' |\(|\)',cmd)[-1]
beginning = cmd[0:-len(lastword)]
path = lastword.split('.')
ns = self.namespace.keys()
if len(path) == 1:
ns = ns
prefix = ''
else:
obj = self.namespace.get(path[0])
prefix = path[0] + '.'
ns = dir(obj)
completions = []
for x in ns:
if x[0] == '_':continue
xx = prefix + x
if xx.startswith(lastword):
completions.append(xx)
completions.sort()
if not completions:
self.hide_completions()
elif len(completions) == 1:
self.hide_completions()
self.setCommand(beginning + completions[0])
else:
# find common prefix
p = os.path.commonprefix(completions)
if len(p)>len(lastword):
self.hide_completions()
self.setCommand(beginning + p)
else:
self.show_completions(completions)
welcome_message = '''
---------------------------------------------------------------
Welcome to a primitive Python interpreter.
---------------------------------------------------------------
'''
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
console = Console(startup_message=welcome_message)
console.updateNamespace({'myVar1' : app, 'myVar2' : 1234})
console.show()
sys.exit(app.exec_())
|
cryptapus/electrum
|
electrum/gui/qt/console.py
|
Python
|
mit
| 11,672
|
# Copyright (C) 2016-2018, Raffaele Salmaso <raffaele@salmaso.org>
# Copyright (C) 2013, byteweaver
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of django-coupons nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django import forms
from django.conf.urls import url
from django.utils.translation import gettext_lazy as _
from fluo import admin
from . import views
from .models import Campaign, Coupon, CouponUser
class CouponUserInline(admin.ReadOnlyTabularInline):
model = CouponUser
class CouponAdminForm(forms.ModelForm):
pass
@admin.register(Coupon)
class CouponAdmin(admin.ModelAdmin):
form = CouponAdminForm
generate_coupons_view = views.GenerateCouponsAdminView
list_display = ["code", "type", "_user_count", "value", "_user_limit", "_is_redeemed", "valid_from", "valid_until", "campaign"] # noqa: E501
list_filter = ["type", "action", "campaign", "created_at", "valid_from", "valid_until"]
raw_id_fields = []
search_fields = ["code", "value"]
inlines = [CouponUserInline]
related_search_fields = {
"user": ("pk", "username", "first_name", "last_name", "email"),
"campaign": ("pk", "name"),
}
def _user_count(self, coupon):
return coupon.users.count()
_user_count.short_description = _("user count")
def _user_limit(self, coupon):
return coupon.user_limit
_user_limit.short_description = _("user limit")
def _is_redeemed(self, coupon):
return coupon.is_redeemed
_is_redeemed.short_description = _("is redeemed")
def get_urls(self):
urls = super().get_urls()
my_urls = [
url(r"^generate-coupons$", self.admin_site.admin_view(self.generate_coupons_view.as_view()), name="generate_coupons"), # noqa
]
return my_urls + urls
class CouponInline(admin.ReadOnlyTabularInline):
model = Coupon
exclude = ["created_at", "last_modified_at"]
@admin.register(Campaign)
class CampaignAdmin(admin.ModelAdmin):
list_display = ["name", "num_coupons", "num_coupons_used", "num_coupons_unused", "num_coupons_expired", "created_at"] # noqa
inlines = [CouponInline]
def num_coupons(self, obj):
return obj.coupons.count()
num_coupons.short_description = _("coupons")
def num_coupons_used(self, obj):
return obj.coupons.used().count()
num_coupons_used.short_description = _("used")
def num_coupons_unused(self, obj):
return obj.coupons.unused().count()
num_coupons_unused.short_description = _("unused")
def num_coupons_expired(self, obj):
return obj.coupons.expired().count()
num_coupons_expired.short_description = _("expired")
|
rsalmaso/django-fluo-coupons
|
coupons/admin.py
|
Python
|
bsd-3-clause
| 4,090
|
import os
import mock
from django.conf import settings
from zerver.lib.test_classes import ZulipTestCase
from zproject.email_backends import get_forward_address
class EmailLogTest(ZulipTestCase):
def test_generate_and_clear_email_log(self) -> None:
with self.settings(EMAIL_BACKEND='zproject.email_backends.EmailLogBackEnd'), \
mock.patch('zproject.email_backends.EmailLogBackEnd.send_email_smtp'), \
mock.patch('logging.info', return_value=None), \
self.settings(DEVELOPMENT_LOG_EMAILS=True):
result = self.client_get('/emails/generate/')
self.assertEqual(result.status_code, 302)
self.assertIn('emails', result['Location'])
result = self.client_get("/emails/")
self.assert_in_success_response(["All the emails sent in the Zulip"], result)
result = self.client_get('/emails/clear/')
self.assertEqual(result.status_code, 302)
result = self.client_get(result['Location'])
self.assertIn('manually generate most of the emails by clicking', str(result.content))
def test_forward_address_details(self) -> None:
forward_address = "forward-to@example.com"
result = self.client_post("/emails/", {"forward_address": forward_address})
self.assert_json_success(result)
self.assertEqual(get_forward_address(), forward_address)
with self.settings(EMAIL_BACKEND='zproject.email_backends.EmailLogBackEnd'), \
mock.patch('logging.info', return_value=None):
with mock.patch('zproject.email_backends.EmailLogBackEnd.send_email_smtp'):
result = self.client_get('/emails/generate/')
self.assertEqual(result.status_code, 302)
self.assertIn('emails', result['Location'])
result = self.client_get(result['Location'])
self.assert_in_success_response([forward_address], result)
os.remove(settings.FORWARD_ADDRESS_CONFIG_FILE)
|
jackrzhang/zulip
|
zerver/tests/test_email_log.py
|
Python
|
apache-2.0
| 2,036
|
from sympy import residue, Symbol, Function, sin, S, I, pi, exp, log, pi, factorial
from sympy.utilities.pytest import XFAIL, raises
from sympy.abc import x, y, z, a, s
def test_basic1():
assert residue(1/x, x, 0) == 1
assert residue(-2/x, x, 0) == -2
assert residue(81/x, x, 0) == 81
assert residue(1/x**2, x, 0) == 0
assert residue(0, x, 0) == 0
assert residue(5, x, 0) == 0
assert residue(x, x, 0) == 0
assert residue(x**2, x, 0) == 0
def test_basic2():
assert residue(1/x, x, 1) == 0
assert residue(-2/x, x, 1) == 0
assert residue(81/x, x, -1) == 0
assert residue(1/x**2, x, 1) == 0
assert residue(0, x, 1) == 0
assert residue(5, x, 1) == 0
assert residue(x, x, 1) == 0
assert residue(x**2, x, 5) == 0
def _test_f():
# FIXME: we get infinite recursion here:
f = Function("f")
assert residue(f(x)/x**5, x, 0) == f.diff(x, 4)/24
def test_functions():
assert residue(1/sin(x), x, 0) == 1
assert residue(2/sin(x), x, 0) == 2
assert residue(1/sin(x)**2, x, 0) == 0
assert residue(1/sin(x)**5, x, 0) == S(3)/8
def test_expressions():
assert residue(1/(x + 1), x, 0) == 0
assert residue(1/(x + 1), x, -1) == 1
assert residue(1/(x**2 + 1), x, -1) == 0
assert residue(1/(x**2 + 1), x, I) == -I/2
assert residue(1/(x**2 + 1), x, -I) == I/2
assert residue(1/(x**4 + 1), x, 0) == 0
@XFAIL
def test_expressions_failing():
assert residue(1/(x**4 + 1), x, exp(I*pi/4)) == -(S(1)/4 + I/4)/sqrt(2)
n = Symbol('n', integer=True, positive=True)
assert residue(exp(z)/(z - pi*I/4*a)**n, z, I*pi*a) == \
exp(I*pi*a/4)/factorial(n - 1)
assert residue(1/(x**2 + a**2)**2, x, a*I) == -I/4/a**3
def test_NotImplemented():
raises(NotImplementedError, lambda: residue(exp(1/z), z, 0))
def test_bug():
assert residue(2**(z)*(s + z)*(1 - s - z)/z**2, z, 0) == \
1 + s*log(2) - s**2*log(2) - 2*s
|
kmacinnis/sympy
|
sympy/series/tests/test_residues.py
|
Python
|
bsd-3-clause
| 1,943
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import functools as ft
#import Pickle as cP
import numpy as np
import tqdm
from blessings import Terminal
import err
import utils as U
#import fileops as fops
from utils import print
from streampickle import PickleStreamReader, PickleStreamWriter
from . import sample
from . import traces
from . import state as st
from . import simulatesystem as simsys
from .properties import PropertyChecker
import multiprocessing as mp
import globalopts
logger = logging.getLogger(__name__)
term = Terminal()
# mpl = mp.log_to_stderr()
# mpl.setLevel(logging.INFO)
# TODO: make a module of its own once we add more general property using
# monitors...
def check_prop_violation(prop, trace):
"""check_prop_violation
Parameters
----------
trace :
prop :
Returns
-------
Notes
------
"""
# check using random sims
idx = prop.final_cons.sat(trace.x_array+0.0001)
sat_x, sat_t = trace.x_array[idx], trace.t_array[idx]
if sat_x.size != 0:
print('x0={} -> x={}, t={}'.format(
trace.x_array[0, :],
sat_x[0, :], # the first violating state
sat_t[0], # corresponding time instant
))
return True
else:
return False
# def pickle_res(f, arg):
# return cP.dumps(f(arg), protocol=cP.HIGHEST_PROTOCOL)
def simulate(sys, prop):
if globalopts.opts.par:
return simulate_par(sys, prop)()
else:
return simulate_single(sys, prop)
def f(sys, prop, fd, _):
cs = sample.sample_init_UR(sys, prop, 1)
trace = simsys.simulate_system(sys, prop.T, cs[0])
#fd.write(trace)
if check_prop_violation(prop, trace):
#num_violations += 1
pass
def mp_imap(self, sim, concrete_states):
CHNK = 6250
num_violations = 0
#TODO: concrete_states should be an iterator/generator
#f = ft.partial(pickle_res, sim)
#writer.write(pool.imap_unordered(sim, concrete_states, chunksize=CHNK))
#with fops.StreamWrite(self.fname, mode='wb') as sw:
#
pool = mp.Pool(self.nworkers)
with PickleStreamWriter(self.fname) as writer:
for trace in pool.imap_unordered(sim, concrete_states, chunksize=CHNK):
writer.write(trace)
if check_prop_violation(self.prop, trace):
num_violations += 1
pool.close()
pool.join()
import time
def worker(prop, sim, writer, concrete_states):
print('burden: {}'.format(len(concrete_states)))
ti = time.time()
num_violations = 0
with writer:
for cs in concrete_states:
trace = sim(cs)
writer.write(trace)
if check_prop_violation(prop, trace):
num_violations += 1
tf = time.time()
print('time taken = {}'.format(tf-ti))
return num_violations
def mp_custom(self, sim, concrete_states):
jobs = []
nworkers = self.nworkers
work = len(concrete_states)
work_load = int(work/nworkers)
left_over_jobs = work % nworkers
assert(work_load * nworkers + left_over_jobs == work)
for i in range(nworkers):
fname = self.fname + str(i)
writer = PickleStreamWriter(fname)
cs_slice = concrete_states[i*work_load:(i+1)*work_load]
# if its the last worker, send off all the remaining jobs
if i == nworkers - 1:
cs_slice = concrete_states[i*work_load:]
# else, slice evenly
else:
cs_slice = concrete_states[i*work_load:(i+1)*work_load]
p = mp.Process(target=worker, args=(self.prop, sim, writer, cs_slice))
jobs.append(p)
p.start()
for job in jobs:
job.join()
def numap(self, sim, concrete_states):
num_violations = 0
from numap import NuMap
with PickleStreamWriter(self.fname) as writer:
for trace in NuMap(func=sim, iterable=concrete_states,
ordered=False, stride=1, buffer=1000):
writer.write(trace)
if check_prop_violation(self.prop, trace):
num_violations += 1
def mp_shared_mem(self, num_samples):
CHNK = 6250
pool = mp.Pool(self.nworkers)
fd = open('delme.dump', 'w')
ff = ft.partial(f, self.sys, self.prop, fd)
for trace in pool.imap_unordered(ff, xrange(num_samples), chunksize=CHNK):
pass
fd.close()
pool.close()
pool.join()
def jb_parallel(self, sim, concrete_states):
num_violations = 0
import tempfile
import os
import joblib as jb
from joblib import load, dump
temp_folder = tempfile.mkdtemp()
filename = os.path.join(temp_folder, 'joblib_test.mmap')
if os.path.exists(filename):
os.unlink(filename)
dump(concrete_states, filename)
large_memmap = load(filename, mmap_mode='r+')
with PickleStreamWriter(self.fname) as writer:
for trace in jb.Parallel(n_jobs=self.nworkers, verbose=0, batch_size='auto')(jb.delayed(sim, check_pickle=False)(i) for i in large_memmap):
writer.write(trace)
if check_prop_violation(self.prop, trace):
num_violations += 1
class simulate_par(object):
def __init__(self, sys, prop):
self.par_option = 'mp_custom'
self.sys = sys
self.prop = prop
fname = '{}.simdump'.format(sys.sys_name)
self.fname = globalopts.opts.construct_path(fname)
#self.nworkers = mp.cpu_count()
self.nworkers = int(raw_input('Enter number of workers'))
print('Num workers: {}'.format(self.nworkers))
return
def trace_gen(self):
# This case is different as multiple dumps are produced
if self.par_option == 'mp_custom':
# combine all files
readers = (PickleStreamReader(self.fname+str(i)) for i in range(self.nworkers))
for reader in readers:
for trace in reader.read():
yield trace
else:
reader = PickleStreamReader(self.fname)
for trace in reader.read():
yield trace
def __call__(self):
par_option = self.par_option
num_samples = globalopts.opts.num_sim_samples
concrete_states = sample.sample_init_UR(self.sys, self.prop, num_samples)
sim = ft.partial(simsys.simulate_system, self.sys, self.prop.T)
if par_option == 'mp':
mp_imap(self, sim, concrete_states)
elif par_option == 'mp_custom':
mp_custom(self, sim, concrete_states)
elif par_option == 'mp_shared_mem':
raise NotImplementedError
#mp_shared_mem(self, num_samples)
elif par_option == 'joblib':
jb_parallel()
else:
raise NotImplementedError
#single threaded
#print('number of violations: {}'.format(num_violations))
return self.trace_gen()
def simulate_single(sys, prop):
num_samples = globalopts.opts.num_sim_samples
num_violations = 0
concrete_states = sample.sample_init_UR(sys, prop, num_samples)
trace_list = []
sys_sim = simsys.get_system_simulator(sys)
for i in tqdm.trange(num_samples):
trace = simsys.simulate(sys_sim, prop.T, concrete_states[i])
trace_list.append(trace)
if check_prop_violation(prop, trace):
num_violations += 1
print('violation counter: {}'.format(num_violations))
print('number of violations: {}'.format(num_violations))
return trace_list
def random_test(
A,
system_params,
initial_state_list,
ci_seq_list,
pi_seq_list,
init_d,
initial_controller_state,
sample_ci,
return_vio_only=True
):
# ##!!##logger.debug('random testing...')
logger.debug('initial states :\n{}'.format('\n'.join([str(A.plant_abs.get_ival_cons_abs_state(s0.ps)) for s0 in initial_state_list])))
init_cons = system_params.init_cons
A.prog_bar = False
res = []
# initial_state_set = set(initial_state_list)
if A.num_dims.ci != 0:
if sample_ci:
ci_seq_array = np.array([np.array(ci_seq_list).T]).T
else:
ci_seq_array = np.array(ci_seq_list)
# print('ci_seq_array', ci_seq_array)
# print('ci_seq_array.shape', ci_seq_array.shape)
if A.num_dims.pi != 0:
pi_seq_array = np.array([np.array(pi_seq_list).T]).T
#print(ci_seq_array.shape)
#print(pi_seq_array.shape)
x_array = np.empty((0, A.num_dims.x), dtype=float)
print('checking initial states')
# for abs_state in initial_state_set:
for abs_state in initial_state_list:
ival_cons = A.plant_abs.get_ival_cons_abs_state(abs_state.plant_state)
# ##!!##logger.debug('ival_cons: {}'.format(ival_cons))
# find the intersection b/w the cell and the initial cons
# print('init_cons', init_cons)
ic = ival_cons & init_cons
if (ic is not None) and (not ic.zero_measure):
# scatter the continuous states
x_samples = ic.sample_UR(A.num_samples)
# ##!!##logger.debug('ic: {}'.format(ic))
# ##!!##logger.debug('samples: {}'.format(x_samples))
x_array = np.concatenate((x_array, x_samples))
else:
raise err.Fatal('Can not happen! Invalid states have already been filtered out by filter_invalid_abs_states()')
# # ##!!##logger.debug('{}'.format(samples.x_array))
# ##!!##logger.debug('ignoring abs states: {}'.format(ival_cons))
# ignore the state as it is completely outside the initial
# constraints
#x_array[-1, :] = np.array([0.4, -0.4])
# print(x_array)
print(x_array.shape)
num_samples = len(x_array)
if num_samples == 0:
print(initial_state_list)
print('no valid sample found during random testing. STOP')
return False
else:
# ##!!##logger.debug('num_samples = 0')
print('simulating {} samples'.format(num_samples))
trace_list = [traces.Trace(A.num_dims, A.N+1) for i in range(num_samples)]
s_array = np.tile(initial_controller_state, (num_samples, 1))
# if system_params.pi is not None:
# pi_array = SaMpLe.sample_ival_constraints(system_params.pi, num_samples)
# print(pi_array)
# exit()
# else:
# pi_array = None
t_array = np.tile(0.0, (num_samples, 1))
d_array = np.tile(init_d, (num_samples, 1))
# TODO: initializing pvt states to 0
p_array = np.zeros((num_samples, 1))
# save x_array to print x0 in case an error is found
# TODO: need to do something similar for u,ci,pi
x0_array = x_array
d0_array = d_array
for i, trace in enumerate(trace_list):
trace.append(x_array[i], 0, t_array[i], d_array[i])
# sanity check
if len(x_array) != len(s_array):
raise err.Fatal('internal: how is len(x_array) != len(s_array)?')
# while(simTime < A.T):
sim_num = 0
simTime = 0.0
i = 0
# records the actual pis used. These are printed in case a
# violation is found for reproducibility
pi_seqs_used = []
while sim_num < A.N:
if A.num_dims.ci == 0:
ci_array = np.zeros((num_samples, 0))
else:
if sample_ci:
ci_cons_list = list(ci_seq_array[:, i, :])
ci_cons_list = [ci_cons.tolist()[0] for ci_cons in ci_cons_list]
ci_lb_list = [np.tile(ci_cons.l, (A.num_samples, 1)) for ci_cons in ci_cons_list]
ci_ub_list = [np.tile(ci_cons.h, (A.num_samples, 1)) for ci_cons in ci_cons_list]
ci_cons_lb = ft.reduce(lambda acc_arr, arr: np.concatenate((acc_arr, arr)), ci_lb_list)
ci_cons_ub = ft.reduce(lambda acc_arr, arr: np.concatenate((acc_arr, arr)), ci_ub_list)
random_arr = np.random.rand(num_samples, A.num_dims.ci)
ci_array = ci_cons_lb + random_arr * (ci_cons_ub - ci_cons_lb)
else:
ci_array = ci_seq_array[:, i, :]
ci_array = np.repeat(ci_array, A.num_samples, axis=0)
if A.num_dims.pi == 0:
pi_array = np.zeros((num_samples, 0))
else:
pi_cons_list = list(pi_seq_array[:, i, :])
pi_cons_list = [pi_cons.tolist()[0] for pi_cons in pi_cons_list]
#print(pi_cons_list)
#pi_cons_list = map(A.plant_abs.get_ival_cons_pi_cell, pi_cells)
pi_lb_list = [np.tile(pi_cons.l, (A.num_samples, 1)) for pi_cons in pi_cons_list]
pi_ub_list = [np.tile(pi_cons.h, (A.num_samples, 1)) for pi_cons in pi_cons_list]
pi_cons_lb = ft.reduce(lambda acc_arr, arr: np.concatenate((acc_arr, arr)), pi_lb_list)
pi_cons_ub = ft.reduce(lambda acc_arr, arr: np.concatenate((acc_arr, arr)), pi_ub_list)
#print(pi_cons_lb)
#print(pi_cons_ub)
#U.pause()
random_arr = np.random.rand(num_samples, A.num_dims.pi)
# print('pi_cons_lb.shape:', pi_cons_lb.shape)
# print('pi_cons_ub.shape:', pi_cons_ub.shape)
# print('num_samples', num_samples)
pi_array = pi_cons_lb + random_arr * (pi_cons_ub - pi_cons_lb)
pi_seqs_used.append(pi_array)
(s_array_, u_array) = compute_concrete_controller_output(
A,
system_params.controller_sim,
ci_array,
x_array,
s_array,
num_samples,
)
concrete_states = st.StateArray( # t
# cont_state_array
# abs_state.discrete_state
# abs_state.pvt_stat
t_array,
x_array,
d_array,
p_array,
pi_array,
)
# print(concrete_states)
# enforce property checking even if it has not been requested
# by the user
pc = PropertyChecker(system_params.final_cons)
rchd_concrete_state_array, property_violated_flag = (
system_params.plant_sim.simulate_with_property_checker(
concrete_states,
A.delta_t,
pc
))
for kdx, rchd_state in enumerate(rchd_concrete_state_array.iterable()):
trace = trace_list[kdx]
trace.append(rchd_state.x, rchd_state.pi, rchd_state.t, rchd_state.d)
if property_violated_flag:
print(U.decorate('concretized!'))
for (idx, xf) in enumerate(rchd_concrete_state_array.iterable()):
if xf.x in system_params.final_cons:
res.append(idx)
print(x0_array[idx, :], d0_array[idx, :], '->', '\t', xf.x, xf.d)
tmp_pi = [pi[idx, :] for pi in pi_seqs_used]
print('pi_seq:', tmp_pi)
#if A.num_dims.ci != 0:
# print('ci:', ci_array[idx])
#if A.num_dims.pi != 0:
# print('pi:', pi_array[idx])
break
i += 1
sim_num += 1
# increment simulation time
simTime += A.delta_t
t_array += A.delta_t
concrete_states = rchd_concrete_state_array
x_array = concrete_states.cont_states
d_array = concrete_states.discrete_states
p_array = concrete_states.pvt_states
# u_array =
if return_vio_only:
return list(map(trace_list.__getitem__, res))
else:
#for trace in trace_list:
#print(trace.x_array[0, :])
#exit()
return trace_list, bool(res)
def compute_concrete_controller_output(*args):
return U.inf_list(0), U.inf_list(0)
|
zutshi/S3CAMR
|
src/core/random_testing.py
|
Python
|
bsd-2-clause
| 16,059
|
import pytest
class TestTee:
@pytest.mark.complete("tee ")
def test_1(self, completion):
assert completion
@pytest.mark.complete("tee -", require_longopt=True)
def test_options(self, completion):
assert completion
|
algorythmic/bash-completion
|
test/t/test_tee.py
|
Python
|
gpl-2.0
| 249
|
from . import foo
|
asedunov/intellij-community
|
python/testData/completion/relativeFromImportInNamespacePackage2/nspkg1/a.after.py
|
Python
|
apache-2.0
| 17
|
from django.conf.urls import url, include
from rest_framework import routers
from . import views
# TODO: create /api/v1/ url space for default API urls
router = routers.DefaultRouter()
router.register(r'bids', views.BidViewSet)
router.register(r'claims', views.ClaimViewSet)
router.register(r'votes', views.VoteViewSet)
router.register(r'users', views.UserViewSet)
router.register(r'payouts', views.UserViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
url(
r'^api-auth/',
include('rest_framework.urls', namespace='rest_framework')
)
]
|
codesy/codesy
|
api/urls.py
|
Python
|
agpl-3.0
| 578
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AdminRuleCollectionsOperations:
"""AdminRuleCollectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
network_manager_name: str,
configuration_name: str,
top: Optional[int] = None,
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.RuleCollectionListResult"]:
"""Lists all the rule collections in a security admin configuration, in a paginated format.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_manager_name: The name of the network manager.
:type network_manager_name: str
:param configuration_name: The name of the network manager security Configuration.
:type configuration_name: str
:param top: An optional query parameter which specifies the maximum number of records to be
returned by the server.
:type top: int
:param skip_token: SkipToken is only used if a previous operation returned a partial result. If
a previous response contains a nextLink element, the value of the nextLink element will include
a skipToken parameter that specifies a starting point to use for subsequent calls.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RuleCollectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01_preview.models.RuleCollectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RuleCollectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkManagerName': self._serialize.url("network_manager_name", network_manager_name, 'str'),
'configurationName': self._serialize.url("configuration_name", configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=20, minimum=1)
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RuleCollectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkManagers/{networkManagerName}/securityAdminConfigurations/{configurationName}/ruleCollections'} # type: ignore
async def get(
self,
resource_group_name: str,
network_manager_name: str,
configuration_name: str,
rule_collection_name: str,
**kwargs: Any
) -> "_models.RuleCollection":
"""Gets a network manager security admin configuration rule collection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_manager_name: The name of the network manager.
:type network_manager_name: str
:param configuration_name: The name of the network manager security Configuration.
:type configuration_name: str
:param rule_collection_name: The name of the network manager security Configuration rule
collection.
:type rule_collection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RuleCollection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01_preview.models.RuleCollection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RuleCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkManagerName': self._serialize.url("network_manager_name", network_manager_name, 'str'),
'configurationName': self._serialize.url("configuration_name", configuration_name, 'str'),
'ruleCollectionName': self._serialize.url("rule_collection_name", rule_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RuleCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkManagers/{networkManagerName}/securityAdminConfigurations/{configurationName}/ruleCollections/{ruleCollectionName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
network_manager_name: str,
configuration_name: str,
rule_collection_name: str,
rule_collection: "_models.RuleCollection",
**kwargs: Any
) -> "_models.RuleCollection":
"""Creates or updates an admin rule collection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_manager_name: The name of the network manager.
:type network_manager_name: str
:param configuration_name: The name of the network manager security Configuration.
:type configuration_name: str
:param rule_collection_name: The name of the network manager security Configuration rule
collection.
:type rule_collection_name: str
:param rule_collection: The Rule Collection to create or update.
:type rule_collection: ~azure.mgmt.network.v2021_02_01_preview.models.RuleCollection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RuleCollection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01_preview.models.RuleCollection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RuleCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkManagerName': self._serialize.url("network_manager_name", network_manager_name, 'str'),
'configurationName': self._serialize.url("configuration_name", configuration_name, 'str'),
'ruleCollectionName': self._serialize.url("rule_collection_name", rule_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(rule_collection, 'RuleCollection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RuleCollection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RuleCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkManagers/{networkManagerName}/securityAdminConfigurations/{configurationName}/ruleCollections/{ruleCollectionName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
network_manager_name: str,
configuration_name: str,
rule_collection_name: str,
**kwargs: Any
) -> None:
"""Deletes an admin rule collection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_manager_name: The name of the network manager.
:type network_manager_name: str
:param configuration_name: The name of the network manager security Configuration.
:type configuration_name: str
:param rule_collection_name: The name of the network manager security Configuration rule
collection.
:type rule_collection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkManagerName': self._serialize.url("network_manager_name", network_manager_name, 'str'),
'configurationName': self._serialize.url("configuration_name", configuration_name, 'str'),
'ruleCollectionName': self._serialize.url("rule_collection_name", rule_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkManagers/{networkManagerName}/securityAdminConfigurations/{configurationName}/ruleCollections/{ruleCollectionName}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01_preview/aio/operations/_admin_rule_collections_operations.py
|
Python
|
mit
| 17,796
|
"""Monkeypatching and mocking functionality."""
import os
import re
import sys
import warnings
from contextlib import contextmanager
from pathlib import Path
from typing import Any
from typing import Generator
from typing import List
from typing import MutableMapping
from typing import Optional
from typing import overload
from typing import Tuple
from typing import TypeVar
from typing import Union
from _pytest.compat import final
from _pytest.fixtures import fixture
from _pytest.warning_types import PytestWarning
RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$")
K = TypeVar("K")
V = TypeVar("V")
@fixture
def monkeypatch() -> Generator["MonkeyPatch", None, None]:
"""A convenient fixture for monkey-patching.
The fixture provides these methods to modify objects, dictionaries or
os.environ::
monkeypatch.setattr(obj, name, value, raising=True)
monkeypatch.delattr(obj, name, raising=True)
monkeypatch.setitem(mapping, name, value)
monkeypatch.delitem(obj, name, raising=True)
monkeypatch.setenv(name, value, prepend=False)
monkeypatch.delenv(name, raising=True)
monkeypatch.syspath_prepend(path)
monkeypatch.chdir(path)
All modifications will be undone after the requesting test function or
fixture has finished. The ``raising`` parameter determines if a KeyError
or AttributeError will be raised if the set/deletion operation has no target.
"""
mpatch = MonkeyPatch()
yield mpatch
mpatch.undo()
def resolve(name: str) -> object:
# Simplified from zope.dottedname.
parts = name.split(".")
used = parts.pop(0)
found = __import__(used)
for part in parts:
used += "." + part
try:
found = getattr(found, part)
except AttributeError:
pass
else:
continue
# We use explicit un-nesting of the handling block in order
# to avoid nested exceptions.
try:
__import__(used)
except ImportError as ex:
expected = str(ex).split()[-1]
if expected == used:
raise
else:
raise ImportError(f"import error in {used}: {ex}") from ex
found = annotated_getattr(found, part, used)
return found
def annotated_getattr(obj: object, name: str, ann: str) -> object:
try:
obj = getattr(obj, name)
except AttributeError as e:
raise AttributeError(
"{!r} object at {} has no attribute {!r}".format(
type(obj).__name__, ann, name
)
) from e
return obj
def derive_importpath(import_path: str, raising: bool) -> Tuple[str, object]:
if not isinstance(import_path, str) or "." not in import_path: # type: ignore[unreachable]
raise TypeError(f"must be absolute import path string, not {import_path!r}")
module, attr = import_path.rsplit(".", 1)
target = resolve(module)
if raising:
annotated_getattr(target, attr, ann=module)
return attr, target
class Notset:
def __repr__(self) -> str:
return "<notset>"
notset = Notset()
@final
class MonkeyPatch:
"""Helper to conveniently monkeypatch attributes/items/environment
variables/syspath.
Returned by the :fixture:`monkeypatch` fixture.
:versionchanged:: 6.2
Can now also be used directly as `pytest.MonkeyPatch()`, for when
the fixture is not available. In this case, use
:meth:`with MonkeyPatch.context() as mp: <context>` or remember to call
:meth:`undo` explicitly.
"""
def __init__(self) -> None:
self._setattr: List[Tuple[object, str, object]] = []
self._setitem: List[Tuple[MutableMapping[Any, Any], object, object]] = ([])
self._cwd: Optional[str] = None
self._savesyspath: Optional[List[str]] = None
@classmethod
@contextmanager
def context(cls) -> Generator["MonkeyPatch", None, None]:
"""Context manager that returns a new :class:`MonkeyPatch` object
which undoes any patching done inside the ``with`` block upon exit.
Example:
.. code-block:: python
import functools
def test_partial(monkeypatch):
with monkeypatch.context() as m:
m.setattr(functools, "partial", 3)
Useful in situations where it is desired to undo some patches before the test ends,
such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples
of this see `#3290 <https://github.com/pytest-dev/pytest/issues/3290>`_.
"""
m = cls()
try:
yield m
finally:
m.undo()
@overload
def setattr(
self, target: str, name: object, value: Notset = ..., raising: bool = ...,
) -> None:
...
@overload
def setattr(
self, target: object, name: str, value: object, raising: bool = ...,
) -> None:
...
def setattr(
self,
target: Union[str, object],
name: Union[object, str],
value: object = notset,
raising: bool = True,
) -> None:
"""Set attribute value on target, memorizing the old value.
For convenience you can specify a string as ``target`` which
will be interpreted as a dotted import path, with the last part
being the attribute name. For example,
``monkeypatch.setattr("os.getcwd", lambda: "/")``
would set the ``getcwd`` function of the ``os`` module.
Raises AttributeError if the attribute does not exist, unless
``raising`` is set to False.
"""
__tracebackhide__ = True
import inspect
if isinstance(value, Notset):
if not isinstance(target, str):
raise TypeError(
"use setattr(target, name, value) or "
"setattr(target, value) with target being a dotted "
"import string"
)
value = name
name, target = derive_importpath(target, raising)
else:
if not isinstance(name, str):
raise TypeError(
"use setattr(target, name, value) with name being a string or "
"setattr(target, value) with target being a dotted "
"import string"
)
oldval = getattr(target, name, notset)
if raising and oldval is notset:
raise AttributeError(f"{target!r} has no attribute {name!r}")
# avoid class descriptors like staticmethod/classmethod
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
setattr(target, name, value)
def delattr(
self,
target: Union[object, str],
name: Union[str, Notset] = notset,
raising: bool = True,
) -> None:
"""Delete attribute ``name`` from ``target``.
If no ``name`` is specified and ``target`` is a string
it will be interpreted as a dotted import path with the
last part being the attribute name.
Raises AttributeError it the attribute does not exist, unless
``raising`` is set to False.
"""
__tracebackhide__ = True
import inspect
if isinstance(name, Notset):
if not isinstance(target, str):
raise TypeError(
"use delattr(target, name) or "
"delattr(target) with target being a dotted "
"import string"
)
name, target = derive_importpath(target, raising)
if not hasattr(target, name):
if raising:
raise AttributeError(name)
else:
oldval = getattr(target, name, notset)
# Avoid class descriptors like staticmethod/classmethod.
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
delattr(target, name)
def setitem(self, dic: MutableMapping[K, V], name: K, value: V) -> None:
"""Set dictionary entry ``name`` to value."""
self._setitem.append((dic, name, dic.get(name, notset)))
dic[name] = value
def delitem(self, dic: MutableMapping[K, V], name: K, raising: bool = True) -> None:
"""Delete ``name`` from dict.
Raises ``KeyError`` if it doesn't exist, unless ``raising`` is set to
False.
"""
if name not in dic:
if raising:
raise KeyError(name)
else:
self._setitem.append((dic, name, dic.get(name, notset)))
del dic[name]
def setenv(self, name: str, value: str, prepend: Optional[str] = None) -> None:
"""Set environment variable ``name`` to ``value``.
If ``prepend`` is a character, read the current environment variable
value and prepend the ``value`` adjoined with the ``prepend``
character.
"""
if not isinstance(value, str):
warnings.warn( # type: ignore[unreachable]
PytestWarning(
"Value of environment variable {name} type should be str, but got "
"{value!r} (type: {type}); converted to str implicitly".format(
name=name, value=value, type=type(value).__name__
)
),
stacklevel=2,
)
value = str(value)
if prepend and name in os.environ:
value = value + prepend + os.environ[name]
self.setitem(os.environ, name, value)
def delenv(self, name: str, raising: bool = True) -> None:
"""Delete ``name`` from the environment.
Raises ``KeyError`` if it does not exist, unless ``raising`` is set to
False.
"""
environ: MutableMapping[str, str] = os.environ
self.delitem(environ, name, raising=raising)
def syspath_prepend(self, path) -> None:
"""Prepend ``path`` to ``sys.path`` list of import locations."""
from pkg_resources import fixup_namespace_packages
if self._savesyspath is None:
self._savesyspath = sys.path[:]
sys.path.insert(0, str(path))
# https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171
fixup_namespace_packages(str(path))
# A call to syspathinsert() usually means that the caller wants to
# import some dynamically created files, thus with python3 we
# invalidate its import caches.
# This is especially important when any namespace package is in use,
# since then the mtime based FileFinder cache (that gets created in
# this case already) gets not invalidated when writing the new files
# quickly afterwards.
from importlib import invalidate_caches
invalidate_caches()
def chdir(self, path) -> None:
"""Change the current working directory to the specified path.
Path can be a string or a py.path.local object.
"""
if self._cwd is None:
self._cwd = os.getcwd()
if hasattr(path, "chdir"):
path.chdir()
elif isinstance(path, Path):
# Modern python uses the fspath protocol here LEGACY
os.chdir(str(path))
else:
os.chdir(path)
def undo(self) -> None:
"""Undo previous changes.
This call consumes the undo stack. Calling it a second time has no
effect unless you do more monkeypatching after the undo call.
There is generally no need to call `undo()`, since it is
called automatically during tear-down.
Note that the same `monkeypatch` fixture is used across a
single test function invocation. If `monkeypatch` is used both by
the test function itself and one of the test fixtures,
calling `undo()` will undo all of the changes made in
both functions.
"""
for obj, name, value in reversed(self._setattr):
if value is not notset:
setattr(obj, name, value)
else:
delattr(obj, name)
self._setattr[:] = []
for dictionary, key, value in reversed(self._setitem):
if value is notset:
try:
del dictionary[key]
except KeyError:
pass # Was already deleted, so we have the desired state.
else:
dictionary[key] = value
self._setitem[:] = []
if self._savesyspath is not None:
sys.path[:] = self._savesyspath
self._savesyspath = None
if self._cwd is not None:
os.chdir(self._cwd)
self._cwd = None
|
pexip/os-pytest
|
src/_pytest/monkeypatch.py
|
Python
|
mit
| 12,979
|
#import win32traceutil
import traceback
import sys
import os
import time
import new
# these three are required pre-imported, for pyjamas to work
# with the pyjd imputil etc. awful, i know...
import threading
import encodings
import encodings.cp437
from windows import *
from ctypes import *
from ctypes.wintypes import *
import comtypes
from comtypes import IUnknown, GUID, COMMETHOD
from comtypes.automation import IDispatch, VARIANT
from comtypes.client import wrap, GetModule
from comtypes.client.dynamic import Dispatch
import comtypes.gen
if not hasattr(sys, 'frozen'):
GetModule('atl.dll')
GetModule('shdocvw.dll')
try:
GetModule('msxml2.dll')
except:
pass
try:
GetModule('msxml3.dll')
except:
pass
try:
GetModule('msxml6.dll')
except:
pass
GetModule('mshtml.tlb')
#GetModule('progdlg.tlb')
from comtypes.gen import SHDocVw
from comtypes.gen import MSHTML
try:
from comtypes.gen import MSXML2
except:
pass
try:
from comtypes.gen import MSXML6
except:
pass
try:
from comtypes.gen import MSXML3
except:
pass
atl = windll.atl # If this fails, you need atl.dll
# do this after gen stuff, above
import mshtmlevents
SID_SShellBrowser = GUID("{000214E2-0000-0000-C000-000000000046}")
class IOleWindow(IUnknown):
_case_insensitive_ = True
u'IOleWindow Interface'
_iid_ = GUID('{00000114-0000-0000-C000-000000000046}')
_idlflags_ = []
_methods_ = [
COMMETHOD([], HRESULT, 'GetWindow',
( ['in'], POINTER(c_void_p), 'pHwnd' ))
]
class IOleInPlaceActiveObject(IOleWindow):
_iid_ = GUID("{00000117-0000-0000-C000-000000000046}")
_idlflags_ = []
_methods_ = IOleWindow._methods_ + [
COMMETHOD([], HRESULT, 'TranslateAccelerator',
( ['in'], POINTER(MSG), 'pMsg' ))
]
# http://www.mail-archive.com/comtypes-users@lists.sourceforge.net/msg00439.html
class IServiceProvider(IUnknown):
_iid_ = GUID('{6D5140C1-7436-11CE-8034-00AA006009FA}')
# Overridden QueryService to make it nicer to use (passing it an
# interface and it returns a pointer to that interface)
def QueryService(self, serviceIID, interface):
p = POINTER(interface)()
self._QueryService(byref(serviceIID), byref(interface._iid_), byref(p))
return p
_methods_ = [
COMMETHOD([], HRESULT, 'QueryService',
( ['in'], POINTER(GUID), 'guidService' ),
( ['in'], POINTER(GUID), 'riid' ),
( ['in'], POINTER(c_void_p), 'ppvObject' ))
]
#class IInputObject(IUnknown):
# _iid_= GUID("{68284FAA-6A48-11D0-8C78-00C04FD918B4}")
#
# _methods_= IUnknown._methods_ + [
# (STDMETHOD (HRESULT, "UIActivateIO", BOOL, POINTER(MSG))),
# (STDMETHOD (HRESULT, "HasFocusIO")),
# (STDMETHOD (HRESULT, "TranslateAcceleratorIO", POINTER(MSG)))]
class EventSink(object):
# some DWebBrowserEvents
def OnVisible(self, this, *args):
print "OnVisible", args
def BeforeNavigate(self, this, *args):
print "BeforeNavigate", args
def NavigateComplete(self, this, *args):
print "NavigateComplete", this, args
return
# some DWebBrowserEvents2
def BeforeNavigate2(self, this, *args):
print "BeforeNavigate2", args
def NavigateComplete2(self, this, *args):
print "NavigateComplete2", args
def DocumentComplete(self, this, *args):
print "DocumentComplete", args
if self.workaround_ignore_first_doc_complete == False:
# ignore first about:blank. *sigh*...
# TODO: work out how to parse *args byref VARIANT
# in order to get at the URI.
self.workaround_ignore_first_doc_complete = True
return
self._loaded()
def NewWindow2(self, this, *args):
print "NewWindow2", args
return
v = cast(args[1]._.c_void_p, POINTER(VARIANT))[0]
v.value = True
def NewWindow3(self, this, *args):
print "NewWindow3", args
return
v = cast(args[1]._.c_void_p, POINTER(VARIANT))[0]
v.value = True
fn_txt = """\
def event_fn(self, *args):
#print "event %s", self, args
#print "event callbacks", self._listeners
callbacks = self._listeners.get('%s', [])
for fn in callbacks:
try:
fn(self._sender, Dispatch(args[0]), True)
except:
traceback.print_exc()
sys.stderr.flush()
"""
class EventCaller:
def __init__(self, handler, name):
self.handler = handler
self.name = name
def __call__(self, *args):
callbacks = self.handler._listeners.get(self.name, [])
#print "event", self.name, callbacks
for fn in callbacks:
try:
fn(self.handler._sender, Dispatch(args[0]), True)
except:
sys.stderr.write( traceback.print_exc() )
sys.stderr.flush()
class EventHandler(object):
def __init__(self, sender):
self._sender = sender
self._listeners = {}
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
raise AttributeError(name)
print "EventHandler requested ", name
if name.startswith('_') or name == 'addEventListener':
return self.__dict__[name]
idx = name.find('_on')
if idx >= 0:
if idx > 0:
name = name[idx+1:]
#return EventCaller(self, name)
exec fn_txt % (name[2:], name[2:])
#exec fn_txt % (name[2:])
#print event_fn
return new.instancemethod(event_fn, self)
raise AttributeError(name)
def addEventListener(self, name, fn):
if not self._listeners.has_key(name):
self._listeners[name] = []
self._listeners[name].append(fn)
class Browser(EventSink):
def __init__(self, application, appdir):
EventSink.__init__(self)
self.platform = 'mshtml'
self.application = application
self.appdir = appdir
self.already_initialised = False
self.workaround_ignore_first_doc_complete = False
self.window_handler = None
self.node_handlers = {}
# Create an instance of IE via AtlAxWin.
atl.AtlAxWinInit()
hInstance = GetModuleHandle(None)
self.hwnd = CreateWindowEx(0,
"AtlAxWin",
"about:blank",
WS_OVERLAPPEDWINDOW |
WS_VISIBLE |
WS_HSCROLL | WS_VSCROLL,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
NULL,
NULL,
hInstance,
NULL)
# Get the IWebBrowser2 interface for the IE control.
self.pBrowserUnk = POINTER(IUnknown)()
atl.AtlAxGetControl(self.hwnd, byref(self.pBrowserUnk))
# the wrap call querys for the default interface
self.pBrowser = wrap(self.pBrowserUnk)
self.pBrowser.RegisterAsBrowser = True
self.pBrowser.AddRef()
self.conn = mshtmlevents.GetEvents(self.pBrowser, sink=self,
interface=SHDocVw.DWebBrowserEvents2)
#print "browser HWND", SetFocus(self.pBrowser.HWND)
def _alert(self, txt):
self.getDomWindow().alert(txt)
def load_app(self):
uri = self.application
if uri.find(":") == -1:
# assume file
uri = 'file://'+os.path.abspath(uri)
#print "load_app", uri
self.application = uri
v = byref(VARIANT())
self.pBrowser.Navigate(uri, v, v, v, v)
# Show Window
cw = c_int(self.hwnd)
ShowWindow(cw, c_int(SW_SHOW))
UpdateWindow(cw)
# http://msdn.microsoft.com/en-us/library/aa752126%28VS.85%29.aspx
wba = self.pBrowser.QueryInterface(IServiceProvider)
wn = wrap(wba.QueryService(SID_SShellBrowser, IOleWindow))
hwnd = c_void_p(0)
pHnd = byref(hwnd)
wn.GetWindow(pHnd)
#PostMessage(pHnd.value, WM_SETFOCUS,0,0)
SetFocus(hwnd)
#print self.hwnd, pHnd.value
def getDomDocument(self):
return Dispatch(self.pBrowser.Document)
def getDomWindow(self):
return self.getDomDocument().parentWindow
def _addXMLHttpRequestEventListener(self, node, event_name, event_fn):
print "_addXMLHttpRequestEventListener", event_name
rcvr = mshtmlevents._DispEventReceiver()
rcvr.dispmap = {0: event_fn}
print rcvr
rcvr.sender = node
print rcvr.sender
ifc = rcvr.QueryInterface(IDispatch)
print ifc
v = VARIANT(ifc)
print v
setattr(node, event_name, v)
return ifc
def addEventListener(self, node, event_name, event_fn):
rcvr = mshtmlevents._DispEventReceiver()
rcvr.dispmap = {0: event_fn}
rcvr.sender = node
ifc = rcvr.QueryInterface(IDispatch)
v = VARIANT(ifc)
setattr(node, "on"+event_name, v)
return ifc
rcvr = mshtmlevents.GetDispEventReceiver(MSHTML.HTMLElementEvents2, event_fn, "on%s" % event_name)
rcvr.sender = node
ifc = rcvr.QueryInterface(IDispatch)
node.attachEvent("on%s" % event_name, ifc)
return ifc
def mash_attrib(self, attrib_name):
return attrib_name
def _addWindowEventListener(self, event_name, event_fn):
#print "_addWindowEventListener", event_name, event_fn
#rcvr = mshtmlevents.GetDispEventReceiver(MSHTML.HTMLWindowEvents,
# event_fn, "on%s" % event_name)
#print rcvr
#rcvr.sender = self.getDomWindow()
#print rcvr.sender
#ifc = rcvr.QueryInterface(IDispatch)
#print ifc
#v = VARIANT(ifc)
#print v
#setattr(self.getDomWindow(), "on%s" % event_name, v)
#return ifc
wnd = self.pBrowser.Document.parentWindow
if self.window_handler is None:
self.window_handler = EventHandler(self)
self.window_conn = mshtmlevents.GetEvents(wnd,
sink=self.window_handler,
interface=MSHTML.HTMLWindowEvents2)
self.window_handler.addEventListener(event_name, event_fn)
return event_name # hmmm...
def getXmlHttpRequest(self):
print "getXMLHttpRequest"
o = comtypes.client.CreateObject('MSXML2.XMLHTTP.3.0')
print "getXMLHttpRequest", o
return Dispatch(o)
def getUri(self):
return self.application
def _loaded(self):
#print "loaded"
if self.already_initialised:
return
self.already_initialised = True
self._addWindowEventListener("unload", self.on_unload_callback)
from __pyjamas__ import pygwt_processMetas, set_main_frame
set_main_frame(self)
(pth, app) = os.path.split(self.application)
if self.appdir:
pth = os.path.abspath(self.appdir)
sys.path.append(pth)
def on_unload_callback(self, *args):
PostQuitMessage(0)
global timer_q
timer_q = []
WM_USER_TIMER = RegisterWindowMessage("Timer Notify")
global wv
wv = None
def MainWin(one_event):
# Pump Messages
msg = MSG()
pMsg = pointer(msg)
while 1:
res = GetMessage( pMsg, NULL, 0, 0)
if res == -1:
return 0
if res == 0:
break
if timer_q:
fn = timer_q.pop()
fn()
if msg.message == WM_USER_TIMER:
continue
app = wv.pBrowser.Application
ao = app.QueryInterface(IOleInPlaceActiveObject)
if ao.TranslateAccelerator(pMsg):
#if not TranslateAccelerator(
# wv.hwnd, #handle to receiving window
# NULL, #handle to active accelerator table
# pMsg): #message data
TranslateMessage(pMsg)
DispatchMessage(pMsg)
if one_event:
break
return msg.wParam
def add_timer_queue(fn):
timer_q.append(fn)
PostMessage(c_int(wv.hwnd), UINT(WM_USER_TIMER), WPARAM(0), LPARAM(0xffff))
def is_loaded():
return wv.already_initialised
def run(one_event=False, block=True):
try:
MainWin(one_event) # TODO: ignore block arg for now
except:
sys.stderr.write( traceback.print_exc() )
sys.stderr.flush()
def setup(application, appdir=None, width=800, height=600):
global wv
wv = Browser(application, appdir)
wv.load_app()
while 1:
if is_loaded():
return
run(one_event=True)
|
andreyvit/pyjamas
|
pyjd/mshtml.py
|
Python
|
apache-2.0
| 13,133
|
import prairielearn as pl
import lxml.html
import chevron
import json
import pygraphviz
import numpy as np
ENGINE_DEFAULT = 'dot'
PARAMS_NAME_MATRIX_DEFAULT = None
PARAMS_NAME_LABELS_DEFAULT = None
WEIGHTS_DEFAULT = None
WEIGHTS_DIGITS_DEFAULT = 2
WEIGHTS_PRESENTATION_TYPE_DEFAULT = 'f'
def prepare(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
pl.check_attribs(element, required_attribs=[], optional_attribs=['engine', 'params-name-matrix', 'weights', 'weights-digits', 'weights-presentation-type', 'params-name-labels'])
def graphviz_from_matrix(mat, label, engine, element):
# Get the matrix specific attributes
show_weights = pl.get_boolean_attrib(element, 'weights', WEIGHTS_DEFAULT) # by default display weights for stochastic matrices
digits = pl.get_integer_attrib(element, 'weights-digits', WEIGHTS_DIGITS_DEFAULT) # if displaying weights how many digits to round to
presentation_type = pl.get_string_attrib(element, 'weights-presentation-type', WEIGHTS_PRESENTATION_TYPE_DEFAULT).lower()
# Sanity checking
if (mat.shape[0] != mat.shape[1]):
raise Exception('Non-square adjacency matrix of size (%s,%s) given as input.' % (mat.shape[0], mat.shape[1]))
if label is not None:
mat_label = label
if (mat_label.shape[0] != mat.shape[0]):
raise Exception('Dimension of the label is not consistent with the dimension of matrix' % (mat_label.shape[0], mat.shape[0]))
else:
mat_label = range(mat.shape[1])
# Auto detect showing weights if any of the weights are not 1 or 0
if show_weights is None:
all_ones = True
for x in mat.flatten():
if x != 1 and x != 0:
all_ones = False
show_weights = not all_ones
# Create pygraphviz graph representation
G = pygraphviz.AGraph(directed=True)
for node in mat_label:
G.add_node(node)
for i, out_node in enumerate(mat_label):
for j, in_node in enumerate(mat_label):
x = mat[j, i]
if (x > 0):
if (show_weights):
G.add_edge(out_node, in_node, label=pl.string_from_2darray(x, presentation_type=presentation_type, digits=digits))
else:
G.add_edge(out_node, in_node)
G.layout(engine)
return G.string()
def render(element_html, data):
# Get attribs
element = lxml.html.fragment_fromstring(element_html)
engine = pl.get_string_attrib(element, 'engine', ENGINE_DEFAULT)
input_param = pl.get_string_attrib(element, 'params-name-matrix', PARAMS_NAME_MATRIX_DEFAULT)
input_label = pl.get_string_attrib(element, 'params-name-labels', PARAMS_NAME_LABELS_DEFAULT)
if len(str(element.text)) == 0 and input_param is None:
raise Exception('No graph source given! Must either define graph in HTML or provide source in params.')
graphviz_data = None
if input_param is not None:
mat = np.array(pl.from_json(data['params'][input_param]))
label = None
if input_label is not None:
label = np.array(pl.from_json(data['params'][input_label]))
graphviz_data = json.dumps(graphviz_from_matrix(mat, label, engine, element))
else:
# Read the contents of this element as the data to render
# we dump the string to json to ensure that newlines are
# properly encoded
graphviz_data = json.dumps(str(element.text))
html_params = {
'uuid': pl.get_uuid(),
'workerURL': '/node_modules/viz.js/full.render.js',
'data': graphviz_data,
'engine': engine,
}
with open('pl-graph.mustache') as f:
html = chevron.render(f, html_params).strip()
return html
|
mwest1066/PrairieLearn
|
elements/pl-graph/pl-graph.py
|
Python
|
agpl-3.0
| 3,763
|
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#!/usr/bin/env python
# process_ip_data.py
# Created by Disa Mhembere on 2015-02-27.
# Email: disa@jhu.edu
# Copyright (c) 2015. All rights reserved.
import argparse, os
import pickle
from django.conf import settings
from pipeline.utils.util import sendJobCompleteEmail, sendJobFailureEmail
from pipeline.utils.filesorter import checkFileExtGengraph
from pipeline.procs.run_invariants import run_invariants
from pipeline.utils.util import getFiberID, get_download_path
from mrcap.gengraph import genGraph
# From the object
def process_input_data(derivatives, graph_loc, graphsize, invariants,
proj_dir, to_email):
'''
Extract File name & determine what file corresponds to what for gengraph
@param session: the session dictionary object
'''
"""
if isinstance(session, str) or isinstance(session, unicode):
f = open(session, "rb")
session = pickle.load(f)
f.close()
"""
filesInUploadDir = os.listdir(derivatives)
fiber_fn, data_atlas_fn = checkFileExtGengraph(filesInUploadDir) # Check & sort files
''' Fully qualify file names '''
fiber_fn = os.path.join(derivatives, fiber_fn)
if not data_atlas_fn:
data_atlas_fn = settings.ATLASES.keys()[0]
else:
data_atlas_fn = os.path.join(derivatives, data_atlas_fn)
print "data_atlas_fn %s ..." % data_atlas_fn
Gfn = os.path.join(graph_loc, getFiberID(fiber_fn)) # partial name
if (graphsize).lower().startswith("s"):
Gfn += "smgr.graphml"
graphsize = False # False is small graph
elif graphsize.lower().startswith("b"):
Gfn+="bggr.graphml"
graphsize = True # True is big graph
else: print '[ERROR]: Graphsize Unkwown' # should never happen
try:
genGraph(fiber_fn, data_atlas_fn, Gfn, graphsize, **settings.ATLASES) # FIXME: numfibers = 20000 for tests
except:
msg = "Hello,\n\nYour most recent job failed either because your fiber streamline file or ROI mask was incorrectly formatted."
msg += " Please check both and try again.\n\n"
sendJobFailureEmail(to_email, msg)
return 911
# Run ivariants here
if len(invariants) > 0:
print "Computing invariants {0}".format(invariants)
invariant_fns = run_invariants(invariants, Gfn, graph_loc)
dwnld_loc = get_download_path(proj_dir)
sendJobCompleteEmail(to_email, dwnld_loc)
|
neurodata/ndgrutedb
|
MR-OCP/MROCPdjango/pipeline/procs/process_ip_data.py
|
Python
|
apache-2.0
| 2,932
|
def solve(opr):
return eval(opr)
if __name__ == "__main__":
opr = input().strip()
result = solve(opr)
print(result)
|
avenet/hackerrank
|
contests/university_codesprint_3/a_small_step_toward_calculators.py
|
Python
|
mit
| 134
|
##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Folder interfaces
$Id: interfaces.py 67630 2006-04-27 00:54:03Z jim $
"""
__docformat__ = 'restructuredtext'
from zope.traversing.interfaces import IContainmentRoot
from zope.annotation.interfaces import IAttributeAnnotatable
from zope.app.container.interfaces import IContainer
from zope.app.component.interfaces import IPossibleSite
class IFolder(IContainer, IPossibleSite, IAttributeAnnotatable):
"""The standard Zope Folder object interface."""
class IRootFolder(IFolder, IContainmentRoot):
"""The standard Zope root Folder object interface."""
|
Donkyhotay/MoonPy
|
zope/app/folder/interfaces.py
|
Python
|
gpl-3.0
| 1,201
|
# This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from unittest.mock import MagicMock
import pytest
from wtforms import ValidationError
from indico_payment_paypal.util import validate_business
@pytest.mark.parametrize(('data', 'valid'), (
('foobar', False),
('foo@bar,com', False),
('example@example.com', True),
('X2345A789B12Cx', False),
('X2345A789B12', False),
('1234567890123', True),
('X2345A789B12C', True),
))
def test_validate_business(data, valid):
field = MagicMock(data=data)
if valid:
validate_business(None, field)
else:
with pytest.raises(ValidationError):
validate_business(None, field)
|
indico/indico-plugins
|
payment_paypal/tests/util_test.py
|
Python
|
mit
| 913
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of the Mimelite algorithm.
Paper: https://arxiv.org/abs/2008.03606.
"""
import collections
from typing import Any, Collection, Dict, Optional
import attr
import tensorflow as tf
import tensorflow_federated as tff
def _unpack_data_label(batch):
if isinstance(batch, collections.abc.Mapping):
return batch['x'], batch['y']
elif isinstance(batch, (tuple, list)):
if len(batch) < 2:
raise ValueError('Expecting both data and label from a batch.')
return batch[0], batch[1]
else:
raise ValueError('Unrecognized batch data.')
@attr.s(eq=False)
class OptimizerState(object):
iterations = attr.ib()
weights = attr.ib()
def _noise_fn(noise_std: float, model_weight_specs: Collection[tf.TensorSpec]):
"""Returns random noise to be added for differential privacy."""
def noise_tensor(spec):
random_generator = tf.random.Generator.from_non_deterministic_state()
noise = random_generator.normal(spec.shape, stddev=noise_std)
noise = tf.reshape(noise, spec.shape)
return noise
return tf.nest.map_structure(noise_tensor, model_weight_specs)
def _initialize_optimizer_vars(model, optimizer):
"""Ensures variables holding the state of `optimizer` are created."""
delta = tf.nest.map_structure(tf.zeros_like, _get_weights(model).trainable)
model_weights = _get_weights(model)
grads_and_vars = tf.nest.map_structure(lambda x, v: (x, v), delta,
model_weights.trainable)
optimizer.apply_gradients(grads_and_vars, name='server_update')
assert optimizer.variables()
def _get_weights(model):
if hasattr(model, 'weights'):
return model.weights
else:
return tff.learning.ModelWeights.from_model(model)
def _get_optimizer_state(optimizer):
return OptimizerState(
iterations=optimizer.iterations,
# The first weight of an optimizer is reserved for the iterations count,
# see https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/optimizer_v2/optimizer_v2.py pylint: disable=line-too-long]
weights=tuple(optimizer.weights[1:]))
@attr.s(eq=False, order=False, frozen=True)
class ServerState(object):
"""Structure for state on the server.
Attributes:
model: A `tff.learning.ModelWeights` instance.
optimizer_state: A namedtuple of the optimizer variables.
round_num: The current training round, as a float.
dp_clip_norm: L2 norm to clip client gradients.
dp_noise_std: Standard deviation of Gaussian distribution to sample noise
to add to gradients for differential privacy.
"""
model = attr.ib()
optimizer_state = attr.ib()
round_num = attr.ib()
dp_clip_norm= attr.ib()
dp_noise_std=attr.ib()
# This is a float to avoid type incompatibility when calculating learning rate
# schedules.
class CreatePrivateServerUpdateFn():
"""Returns a tf.function for the client_update.
This "create" fn is necesessary to prevent
"ValueError: Creating variables on a non-first call to a function decorated
with tf.function" errors due to the client optimizer creating variables. This
is really only needed because we test the client_update function directly.
"""
def __init__(self):
self.random_generator = tf.random.Generator.from_non_deterministic_state()
def _noise_fn(self, noise_std: float, model_weight_specs: Collection[tf.TensorSpec]):
"""Returns random noise to be added for differential privacy."""
def noise_tensor(spec):
noise = self.random_generator.normal(spec.shape, stddev=noise_std)
noise = tf.reshape(noise, spec.shape)
return noise
return tf.nest.map_structure(noise_tensor, model_weight_specs)
@tf.function
def __call__(self, model,
optimizer,
server_state,
weights_delta,
server_learning_rate=1.0):
"""Updates `server_state` based on `weights_delta`, increase the round number.
Args:
model: A `tff.learning.Model`.
optimizer: A `tf.keras.optimizers.Optimizer`.
server_state: A `ServerState`, the state to be updated.
weights_delta: An update to the trainable variables of the model.
server_learning_rate: Server learning rate scales the update from clients
before applying to server. Defaults to 1.
Returns:
An updated `ServerState`.
"""
model_weights = _get_weights(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
server_state.model)
model_weight_specs = tf.nest.map_structure(
lambda v: tf.TensorSpec(v.shape, v.dtype), model_weights.trainable)
noise_tensor = self._noise_fn(server_state.dp_noise_std, model_weight_specs)
# Compute new model weights.
new_weights = tf.nest.map_structure(lambda a, b, n: a + server_learning_rate * (b + n),
model_weights.trainable, weights_delta, noise_tensor)
# Set the model weights to the new ones, overriding the update made by
# the optimizer.
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights.trainable,
new_weights)
# Create a new state based on the updated model.
return tff.structure.update_struct(
server_state,
model=model_weights,
round_num=server_state.round_num)
@tf.function
def private_server_update(model,
optimizer,
server_state,
weights_delta,
server_learning_rate=1.0):
"""Updates `server_state` based on `weights_delta`, increase the round number.
Args:
model: A `tff.learning.Model`.
optimizer: A `tf.keras.optimizers.Optimizer`.
server_state: A `ServerState`, the state to be updated.
weights_delta: An update to the trainable variables of the model.
server_learning_rate: Server learning rate scales the update from clients
before applying to server. Defaults to 1.
Returns:
An updated `ServerState`.
"""
model_weights = _get_weights(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
server_state.model)
model_weight_specs = tf.nest.map_structure(
lambda v: tf.TensorSpec(v.shape, v.dtype), model_weights.trainable)
noise_tensor = _noise_fn(server_state.dp_noise_std, model_weight_specs)
# Compute new model weights.
new_weights = tf.nest.map_structure(lambda a, b, n: a + server_learning_rate * (b + n),
model_weights.trainable, weights_delta, noise_tensor)
# Set the model weights to the new ones, overriding the update made by
# the optimizer.
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights.trainable,
new_weights)
# Create a new state based on the updated model.
return tff.structure.update_struct(
server_state,
model=model_weights,
round_num=server_state.round_num)
@tf.function
def public_server_update(model,
optimizer,
server_state,
full_grad,
server_learning_rate=1.0):
"""Updates `server_state` based on `weights_delta`, increase the round number.
Args:
model: A `tff.learning.Model`.
optimizer: A `tf.keras.optimizers.Optimizer`.
server_state: A `ServerState`, the state to be updated.
weights_delta: An update to the trainable variables of the model.
full_grad: Gradient of loss on full data of chosen clients.
server_learning_rate: Server learning rate scales the update from clients
before applying to server. Defaults to 1.
Returns:
An updated `ServerState`.
"""
model_weights = _get_weights(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
server_state.model)
# Server optimizer variables must be initialized prior to invoking this
optimizer_state = _get_optimizer_state(optimizer)
tf.nest.map_structure(lambda v, t: v.assign(t), optimizer_state,
server_state.optimizer_state)
# Apply the update to the model. This is only to update the state of
# the optimizer.
grads_and_vars = zip(full_grad, model_weights.trainable)
optimizer.apply_gradients(grads_and_vars)
# Create a new state based on the updated model.
return tff.structure.update_struct(
server_state,
optimizer_state=optimizer_state,
round_num=server_state.round_num)
@attr.s(eq=False, order=False, frozen=True)
class PrivateClientOutput(object):
"""Structure for outputs returned from clients during federated optimization.
Attributes:
weights_delta: A dictionary of updates to the model's trainable variables.
client_weight: Weights to be used in a weighted mean when aggregating
`weights_delta`.
model_output: A structure matching `tff.learning.Model.report_local_outputs`
reflecting the results of training on the input dataset.
optimizer_output: Additional metrics or other outputs defined by the
optimizer.
"""
weights_delta = attr.ib()
client_weight = attr.ib()
model_output = attr.ib()
optimizer_output = attr.ib()
@attr.s(eq=False, order=False, frozen=True)
class PublicClientOutput(object):
"""Structure for outputs returned from clients during federated optimization.
Attributes:
full_grad: Gradient of loss computed on full client data.
client_weight: Weights to be used in a weighted mean when aggregating
`weights_delta`.
"""
full_grad = attr.ib()
client_weight = attr.ib()
class CreatePrivateClientUpdateFn():
"""Returns a tf.function for the client_update.
This "create" fn is necesessary to prevent
"ValueError: Creating variables on a non-first call to a function decorated
with tf.function" errors due to the client optimizer creating variables. This
is really only needed because we test the client_update function directly.
"""
def __init__(self):
self.grad_sum = None
@tf.function
def __call__(self,
model,
dataset,
initial_weights,
initial_optimizer_state,
optimizer,
client_weight_fn=None,
dp_clip_norm=1.0):
"""Updates client model.
Args:
model: A `tff.learning.Model`.
dataset: A 'tf.data.Dataset'.
initial_weights: A `tff.learning.ModelWeights` from server.
initial_optimizer_state: The variables to assign to the client optimizer.
optimizer: A `tf.keras.optimizer.Optimizer` object, assumed to be
identical to the optimizer used by the server.
client_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the
weight in the federated average of model deltas. If not provided, the
default is the total number of examples processed on device.
dp_clip_norm: L2 norm to clip the client deltas
Returns:
A 'PrivateClientOutput`.
"""
model_weights = _get_weights(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
initial_weights)
# Compute gradient over full data at initial_weights.
# This assumes that the loss is an average over all examples in a batch,
# and that all batches have the same size (otherwise, last batch has a
# slightly higher weight).
num_batches = 0.0
loss_sum = 0.0
# Client optimizer variables must be initialized prior to invoking this
optimizer_state = _get_optimizer_state(optimizer)
num_examples = tf.constant(0, dtype=tf.int32)
for batch in iter(dataset):
# keep optimizer state fixed to initial values.
tf.nest.map_structure(lambda v, t: v.assign(t), optimizer_state,
initial_optimizer_state)
with tf.GradientTape() as tape:
output = model.forward_pass(batch)
grads = tape.gradient(output.loss, model_weights.trainable)
grads_and_vars = zip(grads, model_weights.trainable)
optimizer.apply_gradients(grads_and_vars)
if hasattr(output, 'num_examples'):
batch_size = tf.cast(output.num_examples, dtype=tf.int32)
else:
batch_x, _ = _unpack_data_label(batch)
batch_size = tf.shape(batch_x)[0]
num_examples+=batch_size
loss_sum += output.loss * tf.cast(batch_size, tf.float32)
aggregated_outputs = loss_sum
weights_delta = tf.nest.map_structure(lambda a, b: a - b,
model_weights.trainable,
initial_weights.trainable)
if client_weight_fn is None:
client_weight = tf.cast(num_examples, dtype=tf.float32)
else:
client_weight = client_weight_fn(aggregated_outputs)
optimizer_output = collections.OrderedDict([('num_examples', num_examples)])
clip_norm = tf.cast(dp_clip_norm, tf.float32)
if tf.less(tf.constant(0, tf.float32), clip_norm):
flatten_weights_delta = tf.nest.flatten(weights_delta)
clipped_flatten_weights_delta, _ = tf.clip_by_global_norm(
flatten_weights_delta, clip_norm)
weights_delta = tf.nest.pack_sequence_as(weights_delta,
clipped_flatten_weights_delta)
return PrivateClientOutput(
weights_delta=weights_delta,
client_weight=client_weight,
model_output=loss_sum / client_weight,
optimizer_output=optimizer_output)
class CreatePublicClientUpdateFn():
"""Returns a tf.function for the client_update.
This "create" fn is necesessary to prevent
"ValueError: Creating variables on a non-first call to a function decorated
with tf.function" errors due to the client optimizer creating variables. This
is really only needed because we test the client_update function directly.
"""
def __init__(self):
self.grad_sum = None
@tf.function
def __call__(self,
model,
dataset,
initial_weights,
initial_optimizer_state,
optimizer,
client_weight_fn=None):
"""Updates client model.
Args:
model: A `tff.learning.Model`.
dataset: A 'tf.data.Dataset'.
initial_weights: A `tff.learning.ModelWeights` from server.
initial_optimizer_state: The variables to assign to the client optimizer.
optimizer: A `tf.keras.optimizer.Optimizer` object, assumed to be
identical to the optimizer used by the server.
client_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the
weight in the federated average of model deltas. If not provided, the
default is the total number of examples processed on device.
Returns:
A 'PublicClientOutput`.
"""
model_weights = _get_weights(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
initial_weights)
# Compute gradient over full data at initial_weights.
# This assumes that the loss is an average over all examples in a batch,
# and that all batches have the same size (otherwise, last batch has a
# slightly higher weight).
num_batches = 0.0
if self.grad_sum is None:
self.grad_sum = tf.nest.map_structure(
lambda x: tf.Variable(tf.zeros_like(x)), model_weights.trainable)
tf.nest.map_structure(
lambda v, t: v.assign(t), self.grad_sum,
tf.nest.map_structure(tf.zeros_like, model_weights.trainable))
for batch in iter(dataset):
num_batches += 1.0
with tf.GradientTape() as tape:
output = model.forward_pass(batch)
tf.nest.map_structure(lambda v, t: v.assign_add(t), self.grad_sum,
tape.gradient(output.loss, model_weights.trainable))
if num_batches > 0.0:
full_grad = tf.nest.map_structure(lambda a: a / num_batches,
self.grad_sum)
else:
# In case a client dataset is empty, just return an all 0s full gradient.
full_grad = tf.nest.map_structure(tf.zeros_like, model_weights.trainable)
return PublicClientOutput(
full_grad=full_grad,
client_weight=num_batches)
def build_server_init_fn(model_fn, optimizer_fn, dp_clip_norm, dp_noise_std, base_lr, server_momentum):
"""Builds a `tff.tf_computation` that returns the initial `ServerState`.
The attributes `ServerState.model`, `ServerState.optimizer_state`, and
`ServerState.optimizer_state` are initialized via their constructor
functions. The attribute `ServerState.round_num` is set to 0.0.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`.
optimizer_fn: A no-arg function that returns a
`tf.keras.optimizers.Optimizer`.
dp_clip_norm: L2 norm to clip client gradients.
dp_noise_std: Standard deviation of Gaussian distribution to sample noise
to add to gradients for differential privacy.
base_lr: Learning rate for server optimizer
server_momentum: Momentum for server optimizer
Returns:
A `tff.tf_computation` that returns initial `ServerState`.
"""
@tff.tf_computation
def server_init_tf():
optimizer = optimizer_fn(learning_rate=base_lr,momentum=server_momentum)
model = model_fn()
_initialize_optimizer_vars(model, optimizer)
return ServerState(
model=_get_weights(model),
optimizer_state=_get_optimizer_state(optimizer),
round_num=0,
dp_clip_norm=dp_clip_norm,
dp_noise_std=dp_noise_std)
return server_init_tf
def build_averaging_process(model_fn,
update_type = 'private',
optimizer_fn=tf.keras.optimizers.SGD,
base_lr=0.1,
server_lr=1.0,
server_momentum=0.0,
dp_clip_norm=1.0,
dp_noise_std=0.0,
client_weight_fn=None):
"""Builds the TFF computations for optimization using federated averaging.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`.
update_type: String to denote whether process operates on private or public
data.
optimizer_fn: A function that accepts a `learning_rate` argument and returns
a `tf.keras.optimizers.Optimizer` instance. Must return an optimizer with
`iterations` and `weights` attributes. This is the base optimizer whose
updates are split between the client and server in the Mime/Mimelite
algorithms.
base_lr: A scalar learning rate or a function that accepts a float
`round_num` argument and returns a learning rate for the base optimizer.
server_lr: A scalar learning rate or a function that accepts a float
`round_num` argument and returns a learning rate for applying weight
updates to server model.
server_momentum: A scalar momentum parameter for the server optimizer.
dp_clip_norm: L2 norm to clip deltas of clients to.
dp_noise_std: Standard deviation of Gaussian distribution to sample noise
to add to gradients for differential privacy.
client_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the weight
in the federated average of the client models. If not provided, the
default is the total number of examples processed on device.
Returns:
A `tff.templates.IterativeProcess`.
"""
base_lr_schedule = base_lr
if not callable(base_lr_schedule):
base_lr_schedule = lambda round_num: base_lr
server_lr_schedule = server_lr
if not callable(server_lr_schedule):
server_lr_schedule = lambda round_num: server_lr
dummy_model = model_fn()
server_init_tf = build_server_init_fn(model_fn, optimizer_fn, dp_clip_norm, dp_noise_std, server_lr, server_momentum)
server_state_type = server_init_tf.type_signature.result
model_weights_type = server_state_type.model
optimizer_state_type = server_state_type.optimizer_state
round_num_type = server_state_type.round_num
clip_norm_type = server_state_type.dp_clip_norm
tf_dataset_type = tff.SequenceType(dummy_model.input_spec)
model_input_type = tff.SequenceType(dummy_model.input_spec)
federated_dataset_type = tff.type_at_clients(tf_dataset_type)
@tff.tf_computation(model_input_type, model_weights_type,
optimizer_state_type, round_num_type, clip_norm_type)
def private_client_update_fn(tf_dataset, initial_model_weights,
initial_optimizer_state, round_num, clip_norm):
"""Performs a private client update."""
model = model_fn()
base_lr = base_lr_schedule(round_num)
optimizer = optimizer_fn(learning_rate=base_lr, momentum=server_momentum)
# We initialize the client optimizer variables to avoid creating them
# within the scope of the tf.function client_update.
_initialize_optimizer_vars(model, optimizer)
client_update = CreatePrivateClientUpdateFn()
return client_update(model, tf_dataset, initial_model_weights,
initial_optimizer_state, optimizer, client_weight_fn, clip_norm)
@tff.tf_computation(model_input_type, model_weights_type,
optimizer_state_type, round_num_type)
def public_client_update_fn(tf_dataset, initial_model_weights,
initial_optimizer_state, round_num):
"""Performs a public client update."""
model = model_fn()
base_lr = base_lr_schedule(round_num)
optimizer = optimizer_fn(learning_rate=base_lr, momentum=server_momentum)
# We initialize the client optimizer variables to avoid creating them
# within the scope of the tf.function client_update.
_initialize_optimizer_vars(model, optimizer)
client_update = CreatePublicClientUpdateFn()
return client_update(model, tf_dataset, initial_model_weights,
initial_optimizer_state, optimizer, client_weight_fn)
@tff.tf_computation(server_state_type, model_weights_type.trainable)
def private_server_update_fn(server_state, model_delta):
model = model_fn()
server_lr = server_lr_schedule(server_state.round_num)
base_lr = base_lr_schedule(server_state.round_num)
optimizer = optimizer_fn(learning_rate=base_lr, momentum=server_momentum)
# We initialize the server optimizer variables to avoid creating them
# within the scope of the tf.function server_update.
_initialize_optimizer_vars(model, optimizer)
private_server_update = CreatePrivateServerUpdateFn()
return private_server_update(model, optimizer, server_state, model_delta,
server_lr)
@tff.tf_computation(server_state_type,
model_weights_type.trainable)
def public_server_update_fn(server_state, full_grad):
model = model_fn()
server_lr = server_lr_schedule(server_state.round_num)
base_lr = base_lr_schedule(server_state.round_num)
optimizer = optimizer_fn(learning_rate=base_lr, momentum=server_momentum)
# We initialize the server optimizer variables to avoid creating them
# within the scope of the tf.function server_update.
_initialize_optimizer_vars(model, optimizer)
return public_server_update(model, optimizer, server_state, full_grad,
server_lr)
@tff.federated_computation(
tff.type_at_server(server_state_type),
tff.type_at_clients(tf_dataset_type))
def run_one_round_public(server_state, federated_dataset):
"""Orchestration logic for one round of computation.
Args:
server_state: A `ServerState`.
federated_dataset: A federated `tf.Dataset` with placement `tff.CLIENTS`.
Returns:
A tuple of updated `ServerState` and the result of
`tff.learning.Model.federated_output_computation`.
"""
client_model = tff.federated_broadcast(server_state.model)
optimizer_state = tff.federated_broadcast(server_state.optimizer_state)
client_round_num = tff.federated_broadcast(server_state.round_num)
client_outputs = tff.federated_map(
public_client_update_fn,
(federated_dataset, client_model, optimizer_state, client_round_num))
full_grad = tff.federated_mean(
client_outputs.full_grad)
server_state = tff.federated_map(public_server_update_fn,
(server_state, full_grad))
return server_state
@tff.federated_computation(
tff.type_at_server(server_state_type),
tff.type_at_clients(tf_dataset_type))
def run_one_round_private(server_state, federated_dataset):
"""Orchestration logic for one round of computation.
Args:
server_state: A `ServerState`.
federated_dataset: A federated `tf.Dataset` with placement `tff.CLIENTS`.
Returns:
A tuple of updated `ServerState` and the result of
`tff.learning.Model.federated_output_computation`.
"""
client_model = tff.federated_broadcast(server_state.model)
optimizer_state = tff.federated_broadcast(server_state.optimizer_state)
client_round_num = tff.federated_broadcast(server_state.round_num)
client_dp_clip_norm = tff.federated_broadcast(server_state.dp_clip_norm)
client_outputs = tff.federated_map(
private_client_update_fn,
(federated_dataset, client_model, optimizer_state, client_round_num, client_dp_clip_norm))
model_delta = tff.federated_mean(
client_outputs.weights_delta)
server_state = tff.federated_map(private_server_update_fn,
(server_state, model_delta))
return server_state
@tff.federated_computation
def server_init_tff():
"""Orchestration logic for server model initialization."""
return tff.federated_value(server_init_tf(), tff.SERVER)
if update_type == 'private':
return tff.templates.IterativeProcess(
initialize_fn=server_init_tff, next_fn=run_one_round_private)
elif update_type == 'public':
return tff.templates.IterativeProcess(
initialize_fn=server_init_tff, next_fn=run_one_round_public)
|
google-research/public-data-in-dpfl
|
mimelite.py
|
Python
|
apache-2.0
| 26,760
|
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/hydro/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/hydro/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/opt/ros/hydro".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/opt/ros/hydro/stacks/qbo_webi/build/devel/env.sh')
output_filename = '/opt/ros/hydro/stacks/qbo_webi/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
HailStorm32/Q.bo_stacks
|
qbo_webi/build/catkin_generated/generate_cached_setup.py
|
Python
|
lgpl-2.1
| 1,266
|
#!/usr/bin/env python
"""This module compiles the lecture notes."""
import glob
import os
import subprocess
import argparse
import shutil
def compile_single(is_update):
"""Compile a single lecture."""
for task in ["pdflatex", "bibtex", "pdflatex", "pdflatex"]:
cmd = [task, "main"]
subprocess.check_call(cmd)
if is_update:
shutil.copy(
"main.pdf", "../../distribution/" + os.getcwd().split("/")[-1] + ".pdf"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(" Create slides for lecture")
parser.add_argument(
"--update", action="store_true", dest="update", help="update public slides"
)
is_complete = "lectures" == os.getcwd().split("/")[-1]
is_update = parser.parse_args().update
if is_complete:
for dirname in glob.glob("0*"):
os.chdir(dirname)
compile_single(is_update)
os.chdir("../")
# I also want to have a complete deck of slides available. This is not intended
# for public distribution.
fnames = []
for fname in sorted(glob.glob("0*")):
fnames += [fname + "/main.pdf"]
cmd = "pdftk " + " ".join(fnames) + " cat output course_deck.pdf"
subprocess.check_call(cmd, shell=True)
if is_update:
shutil.copy("course_deck.pdf", "../distribution/course_deck.pdf")
else:
compile_single(is_update)
|
grmToolbox/grmpy
|
promotion/grmpy_tutorial/create_slides.py
|
Python
|
mit
| 1,444
|
from datetime import datetime
import os
import re
import unittest
from landscape.lib import testing
from landscape.lib.sysstats import (
MemoryStats, CommandError, get_logged_in_users, get_uptime,
get_thermal_zones, LoginInfoReader, BootTimes)
from landscape.lib.testing import append_login_data
SAMPLE_MEMORY_INFO = """
MemTotal: 1546436 kB
MemFree: 23452 kB
Buffers: 41656 kB
Cached: 807628 kB
SwapCached: 17572 kB
Active: 1030792 kB
Inactive: 426892 kB
HighTotal: 0 kB
HighFree: 0 kB
LowTotal: 1546436 kB
LowFree: 23452 kB
SwapTotal: 1622524 kB
SwapFree: 1604936 kB
Dirty: 1956 kB
Writeback: 0 kB
Mapped: 661772 kB
Slab: 54980 kB
CommitLimit: 2395740 kB
Committed_AS: 1566888 kB
PageTables: 2728 kB
VmallocTotal: 516088 kB
VmallocUsed: 5660 kB
VmallocChunk: 510252 kB
"""
class BaseTestCase(testing.TwistedTestCase, testing.FSTestCase,
unittest.TestCase):
pass
class MemoryStatsTest(BaseTestCase):
def test_get_memory_info(self):
filename = self.makeFile(SAMPLE_MEMORY_INFO)
memstats = MemoryStats(filename)
self.assertEqual(memstats.total_memory, 1510)
self.assertEqual(memstats.free_memory, 852)
self.assertEqual(memstats.used_memory, 658)
self.assertEqual(memstats.total_swap, 1584)
self.assertEqual(memstats.free_swap, 1567)
self.assertEqual(memstats.used_swap, 17)
self.assertEqual("%.2f" % memstats.free_memory_percentage, "56.42")
self.assertEqual("%.2f" % memstats.free_swap_percentage, "98.93")
self.assertEqual("%.2f" % memstats.used_memory_percentage, "43.58")
self.assertEqual("%.2f" % memstats.used_swap_percentage, "1.07")
def test_get_memory_info_without_swap(self):
sample = re.subn(r"Swap(Free|Total): *\d+ kB", r"Swap\1: 0",
SAMPLE_MEMORY_INFO)[0]
filename = self.makeFile(sample)
memstats = MemoryStats(filename)
self.assertEqual(memstats.total_swap, 0)
self.assertEqual(memstats.free_swap, 0)
self.assertEqual(memstats.used_swap, 0)
self.assertEqual(memstats.used_swap_percentage, 0)
self.assertEqual(memstats.free_swap_percentage, 0)
self.assertEqual(type(memstats.used_swap_percentage), float)
self.assertEqual(type(memstats.free_swap_percentage), float)
class FakeWhoQTest(testing.HelperTestCase, BaseTestCase):
helpers = [testing.EnvironSaverHelper]
def fake_who(self, users):
dirname = self.makeDir()
os.environ["PATH"] = "%s:%s" % (dirname, os.environ["PATH"])
self.who_path = os.path.join(dirname, "who")
who = open(self.who_path, "w")
who.write("#!/bin/sh\n")
who.write("test x$1 = x-q || echo missing-parameter\n")
who.write("echo %s\n" % users)
who.write("echo '# users=%d'\n" % len(users.split()))
who.close()
os.chmod(self.who_path, 0o770)
class LoggedInUsersTest(FakeWhoQTest):
def test_one_user(self):
self.fake_who("joe")
result = get_logged_in_users()
result.addCallback(self.assertEqual, ["joe"])
return result
def test_one_user_multiple_times(self):
self.fake_who("joe joe joe joe")
result = get_logged_in_users()
result.addCallback(self.assertEqual, ["joe"])
return result
def test_many_users(self):
self.fake_who("joe moe boe doe")
result = get_logged_in_users()
result.addCallback(self.assertEqual, ["boe", "doe", "joe", "moe"])
return result
def test_command_error(self):
self.fake_who("")
who = open(self.who_path, "w")
who.write("#!/bin/sh\necho ERROR 1>&2\nexit 1\n")
who.close()
result = get_logged_in_users()
def assert_failure(failure):
failure.trap(CommandError)
self.assertEqual(str(failure.value), "ERROR\n")
result.addErrback(assert_failure)
return result
class UptimeTest(BaseTestCase):
"""Test for parsing /proc/uptime data."""
def test_valid_uptime_file(self):
"""Test ensures that we can read a valid /proc/uptime file."""
proc_file = self.makeFile("17608.24 16179.25")
self.assertEqual("%0.2f" % get_uptime(proc_file),
"17608.24")
class ProcfsThermalZoneTest(BaseTestCase):
def setUp(self):
super(ProcfsThermalZoneTest, self).setUp()
self.thermal_zone_path = self.makeDir()
def get_thermal_zones(self):
return list(get_thermal_zones(self.thermal_zone_path))
def write_thermal_zone(self, name, temperature):
zone_path = os.path.join(self.thermal_zone_path, name)
if not os.path.isdir(zone_path):
os.mkdir(zone_path)
file = open(os.path.join(zone_path, "temperature"), "w")
file.write("temperature: " + temperature)
file.close()
class GetProcfsThermalZonesTest(ProcfsThermalZoneTest):
def test_non_existent_thermal_zone_directory(self):
thermal_zones = list(get_thermal_zones("/non-existent/thermal_zone"))
self.assertEqual(thermal_zones, [])
def test_empty_thermal_zone_directory(self):
self.assertEqual(self.get_thermal_zones(), [])
def test_one_thermal_zone(self):
self.write_thermal_zone("THM0", "50 C")
thermal_zones = self.get_thermal_zones()
self.assertEqual(len(thermal_zones), 1)
self.assertEqual(thermal_zones[0].name, "THM0")
self.assertEqual(thermal_zones[0].temperature, "50 C")
self.assertEqual(thermal_zones[0].temperature_value, 50)
self.assertEqual(thermal_zones[0].temperature_unit, "C")
self.assertEqual(thermal_zones[0].path,
os.path.join(self.thermal_zone_path, "THM0"))
def test_two_thermal_zones(self):
self.write_thermal_zone("THM0", "50 C")
self.write_thermal_zone("THM1", "51 C")
thermal_zones = self.get_thermal_zones()
self.assertEqual(len(thermal_zones), 2)
self.assertEqual(thermal_zones[0].temperature, "50 C")
self.assertEqual(thermal_zones[0].temperature_value, 50)
self.assertEqual(thermal_zones[0].temperature_unit, "C")
self.assertEqual(thermal_zones[1].temperature, "51 C")
self.assertEqual(thermal_zones[1].temperature_value, 51)
self.assertEqual(thermal_zones[1].temperature_unit, "C")
def test_badly_formatted_temperature(self):
self.write_thermal_zone("THM0", "SOMETHING BAD")
thermal_zones = self.get_thermal_zones()
self.assertEqual(len(thermal_zones), 1)
self.assertEqual(thermal_zones[0].temperature, "SOMETHING BAD")
self.assertEqual(thermal_zones[0].temperature_value, None)
self.assertEqual(thermal_zones[0].temperature_unit, None)
def test_badly_formatted_with_missing_space(self):
self.write_thermal_zone("THM0", "SOMETHINGBAD")
thermal_zones = self.get_thermal_zones()
self.assertEqual(len(thermal_zones), 1)
self.assertEqual(thermal_zones[0].temperature, "SOMETHINGBAD")
self.assertEqual(thermal_zones[0].temperature_value, None)
self.assertEqual(thermal_zones[0].temperature_unit, None)
def test_temperature_file_with_missing_label(self):
self.write_thermal_zone("THM0", "SOMETHINGBAD")
temperature_path = os.path.join(self.thermal_zone_path,
"THM0/temperature")
file = open(temperature_path, "w")
file.write("bad-label: foo bar\n")
file.close()
thermal_zones = self.get_thermal_zones()
self.assertEqual(len(thermal_zones), 1)
self.assertEqual(thermal_zones[0].temperature, None)
self.assertEqual(thermal_zones[0].temperature_value, None)
self.assertEqual(thermal_zones[0].temperature_unit, None)
class ThermalZoneTest(BaseTestCase):
def setUp(self):
super(ThermalZoneTest, self).setUp()
self.thermal_zone_path = self.makeDir()
def get_thermal_zones(self):
return list(get_thermal_zones(self.thermal_zone_path))
def write_thermal_zone(self, name, temperature):
zone_path = os.path.join(self.thermal_zone_path, name)
if not os.path.isdir(zone_path):
os.mkdir(zone_path)
file = open(os.path.join(zone_path, "temp"), "w")
file.write(temperature)
file.close()
class GetSysfsThermalZonesTest(ThermalZoneTest):
def test_non_existent_thermal_zone_directory(self):
thermal_zones = list(get_thermal_zones("/non-existent/thermal_zone"))
self.assertEqual(thermal_zones, [])
def test_empty_thermal_zone_directory(self):
self.assertEqual(self.get_thermal_zones(), [])
def test_one_thermal_zone(self):
self.write_thermal_zone("THM0", "50000")
thermal_zones = self.get_thermal_zones()
self.assertEqual(len(thermal_zones), 1)
self.assertEqual(thermal_zones[0].name, "THM0")
self.assertEqual(thermal_zones[0].temperature, "50.0 C")
self.assertEqual(thermal_zones[0].temperature_value, 50.0)
self.assertEqual(thermal_zones[0].temperature_unit, "C")
self.assertEqual(thermal_zones[0].path,
os.path.join(self.thermal_zone_path, "THM0"))
def test_two_thermal_zones(self):
self.write_thermal_zone("THM0", "50000")
self.write_thermal_zone("THM1", "51000")
thermal_zones = self.get_thermal_zones()
self.assertEqual(len(thermal_zones), 2)
self.assertEqual(thermal_zones[0].temperature, "50.0 C")
self.assertEqual(thermal_zones[0].temperature_value, 50.0)
self.assertEqual(thermal_zones[0].temperature_unit, "C")
self.assertEqual(thermal_zones[1].temperature, "51.0 C")
self.assertEqual(thermal_zones[1].temperature_value, 51.0)
self.assertEqual(thermal_zones[1].temperature_unit, "C")
def test_non_int_temperature(self):
self.write_thermal_zone("THM0", "50432")
thermal_zones = self.get_thermal_zones()
self.assertEqual(len(thermal_zones), 1)
self.assertEqual(thermal_zones[0].name, "THM0")
self.assertEqual(thermal_zones[0].temperature, "50.4 C")
self.assertEqual(thermal_zones[0].temperature_value, 50.432)
self.assertEqual(thermal_zones[0].temperature_unit, "C")
self.assertEqual(thermal_zones[0].path,
os.path.join(self.thermal_zone_path, "THM0"))
def test_badly_formatted_temperature(self):
self.write_thermal_zone("THM0", "SOMETHING BAD")
thermal_zones = self.get_thermal_zones()
self.assertEqual(len(thermal_zones), 1)
self.assertEqual(thermal_zones[0].temperature, None)
self.assertEqual(thermal_zones[0].temperature_value, None)
self.assertEqual(thermal_zones[0].temperature_unit, None)
def test_read_error(self):
self.write_thermal_zone("THM0", "50000")
temperature_path = os.path.join(self.thermal_zone_path,
"THM0/temp")
os.chmod(temperature_path, 0o200) # --w-------
thermal_zones = self.get_thermal_zones()
self.assertEqual(len(thermal_zones), 1)
self.assertEqual(thermal_zones[0].temperature, None)
self.assertEqual(thermal_zones[0].temperature_value, None)
self.assertEqual(thermal_zones[0].temperature_unit, None)
class LoginInfoReaderTest(BaseTestCase):
"""Tests for login info file reader."""
def test_read_empty_file(self):
"""Test ensures the reader is resilient to empty files."""
filename = self.makeFile("")
file = open(filename, "rb")
try:
reader = LoginInfoReader(file)
self.assertEqual(reader.read_next(), None)
finally:
file.close()
def test_read_login_info(self):
"""Test ensures the reader can read login info."""
filename = self.makeFile("")
append_login_data(filename, login_type=1, pid=100, tty_device="/dev/",
id="1", username="jkakar", hostname="localhost",
termination_status=0, exit_status=0, session_id=1,
entry_time_seconds=105, entry_time_milliseconds=10,
remote_ip_address=[192, 168, 42, 102])
append_login_data(filename, login_type=1, pid=101, tty_device="/dev/",
id="1", username="root", hostname="localhost",
termination_status=0, exit_status=0, session_id=2,
entry_time_seconds=235, entry_time_milliseconds=17,
remote_ip_address=[192, 168, 42, 102])
file = open(filename, "rb")
try:
reader = LoginInfoReader(file)
info = reader.read_next()
self.assertEqual(info.login_type, 1)
self.assertEqual(info.pid, 100)
self.assertEqual(info.tty_device, "/dev/")
self.assertEqual(info.id, "1")
self.assertEqual(info.username, "jkakar")
self.assertEqual(info.hostname, "localhost")
self.assertEqual(info.termination_status, 0)
self.assertEqual(info.exit_status, 0)
self.assertEqual(info.session_id, 1)
self.assertEqual(info.entry_time, datetime.utcfromtimestamp(105))
# FIXME Test IP address handling. -jk
info = reader.read_next()
self.assertEqual(info.login_type, 1)
self.assertEqual(info.pid, 101)
self.assertEqual(info.tty_device, "/dev/")
self.assertEqual(info.id, "1")
self.assertEqual(info.username, "root")
self.assertEqual(info.hostname, "localhost")
self.assertEqual(info.termination_status, 0)
self.assertEqual(info.exit_status, 0)
self.assertEqual(info.session_id, 2)
self.assertEqual(info.entry_time, datetime.utcfromtimestamp(235))
# FIXME Test IP address handling. -jk
info = reader.read_next()
self.assertEqual(info, None)
finally:
file.close()
def test_login_info_iterator(self):
"""Test ensures iteration behaves correctly."""
filename = self.makeFile("")
append_login_data(filename)
append_login_data(filename)
file = open(filename, "rb")
try:
reader = LoginInfoReader(file)
count = 0
for info in reader.login_info():
count += 1
self.assertEqual(count, 2)
finally:
file.close()
class BootTimesTest(BaseTestCase):
def test_fallback_to_uptime(self):
"""
When no data is available in C{/var/log/wtmp}
L{BootTimes.get_last_boot_time} falls back to C{/proc/uptime}.
"""
wtmp_filename = self.makeFile("")
append_login_data(wtmp_filename, tty_device="~", username="shutdown",
entry_time_seconds=535)
self.assertTrue(BootTimes(filename=wtmp_filename).get_last_boot_time())
|
CanonicalLtd/landscape-client
|
landscape/lib/tests/test_sysstats.py
|
Python
|
gpl-2.0
| 15,312
|
from __future__ import unicode_literals
import django
from future.builtins import int, zip
from functools import reduce
from operator import ior, iand
from string import punctuation
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Manager, Q, CharField, TextField
from django.db.models.manager import ManagerDescriptor
from django.db.models.query import QuerySet
from django.contrib.sites.managers import CurrentSiteManager as DjangoCSM
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.utils.sites import current_site_id
from mezzanine.utils.urls import home_slug
if django.VERSION >= (1, 10):
class ManagerDescriptor(ManagerDescriptor):
"""
This class exists purely to skip the abstract model check
in the __get__ method of Django's ManagerDescriptor.
"""
def __get__(self, instance, cls=None):
if instance is not None:
raise AttributeError(
"Manager isn't accessible via %s instances" % cls.__name__
)
# In ManagerDescriptor.__get__, an exception is raised here
# if cls is abstract
if cls._meta.swapped:
raise AttributeError(
"Manager isn't available; "
"'%s.%s' has been swapped for '%s'" % (
cls._meta.app_label,
cls._meta.object_name,
cls._meta.swapped,
)
)
return cls._meta.managers_map[self.manager.name]
class PublishedManager(Manager):
"""
Provides filter for restricting items returned by status and
publish date when the given user is not a staff member.
"""
def published(self, for_user=None):
"""
For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified.
"""
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all()
return self.filter(
Q(publish_date__lte=now()) | Q(publish_date__isnull=True),
Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True),
Q(status=CONTENT_STATUS_PUBLISHED))
def get_by_natural_key(self, slug):
return self.get(slug=slug)
def search_fields_to_dict(fields):
"""
In ``SearchableQuerySet`` and ``SearchableManager``, search fields
can either be a sequence, or a dict of fields mapped to weights.
This function converts sequences to a dict mapped to even weights,
so that we're consistently dealing with a dict of fields mapped to
weights, eg: ("title", "content") -> {"title": 1, "content": 1}
"""
if not fields:
return {}
try:
int(list(dict(fields).values())[0])
except (TypeError, ValueError):
fields = dict(zip(fields, [1] * len(fields)))
return fields
class SearchableQuerySet(QuerySet):
"""
QuerySet providing main search functionality for
``SearchableManager``.
"""
def __init__(self, *args, **kwargs):
self._search_ordered = False
self._search_terms = set()
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableQuerySet, self).__init__(*args, **kwargs)
def search(self, query, search_fields=None):
"""
Build a queryset matching words in the given search query,
treating quoted terms as exact phrases and taking into
account + and - symbols as modifiers controlling which terms
to require and exclude.
"""
# ### DETERMINE FIELDS TO SEARCH ###
# Use search_fields arg if given, otherwise use search_fields
# initially configured by the manager class.
if search_fields:
self._search_fields = search_fields_to_dict(search_fields)
if not self._search_fields:
return self.none()
# ### BUILD LIST OF TERMS TO SEARCH FOR ###
# Remove extra spaces, put modifiers inside quoted terms.
terms = " ".join(query.split()).replace("+ ", "+") \
.replace('+"', '"+') \
.replace("- ", "-") \
.replace('-"', '"-') \
.split('"')
# Strip punctuation other than modifiers from terms and create
# terms list, first from quoted terms and then remaining words.
terms = [("" if t[0:1] not in "+-" else t[0:1]) + t.strip(punctuation)
for t in terms[1::2] + "".join(terms[::2]).split()]
# Remove stop words from terms that aren't quoted or use
# modifiers, since words with these are an explicit part of
# the search query. If doing so ends up with an empty term
# list, then keep the stop words.
terms_no_stopwords = [t for t in terms if t.lower() not in
settings.STOP_WORDS]
get_positive_terms = lambda terms: [t.lower().strip(punctuation)
for t in terms if t[0:1] != "-"]
positive_terms = get_positive_terms(terms_no_stopwords)
if positive_terms:
terms = terms_no_stopwords
else:
positive_terms = get_positive_terms(terms)
# Append positive terms (those without the negative modifier)
# to the internal list for sorting when results are iterated.
if not positive_terms:
return self.none()
else:
self._search_terms.update(positive_terms)
# ### BUILD QUERYSET FILTER ###
# Create the queryset combining each set of terms.
excluded = [reduce(iand, [~Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "-"]
required = [reduce(ior, [Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "+"]
optional = [reduce(ior, [Q(**{"%s__icontains" % f: t}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] not in "+-"]
queryset = self
if excluded:
queryset = queryset.filter(reduce(iand, excluded))
if required:
queryset = queryset.filter(reduce(iand, required))
# Optional terms aren't relevant to the filter if there are
# terms that are explicitly required.
elif optional:
queryset = queryset.filter(reduce(ior, optional))
return queryset.distinct()
def _clone(self, *args, **kwargs):
"""
Ensure attributes are copied to subsequent queries.
"""
for attr in ("_search_terms", "_search_fields", "_search_ordered"):
kwargs[attr] = getattr(self, attr)
return super(SearchableQuerySet, self)._clone(*args, **kwargs)
def order_by(self, *field_names):
"""
Mark the filter as being ordered if search has occurred.
"""
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super(SearchableQuerySet, self).order_by(*field_names)
def iterator(self):
"""
If search has occurred and no ordering has occurred, decorate
each result with the number of search terms so that it can be
sorted by the number of occurrence of terms.
In the case of search fields that span model relationships, we
cannot accurately match occurrences without some very
complicated traversal code, which we won't attempt. So in this
case, namely when there are no matches for a result (count=0),
and search fields contain relationships (double underscores),
we assume one match for one of the fields, and use the average
weight of all search fields with relationships.
"""
results = super(SearchableQuerySet, self).iterator()
if self._search_terms and not self._search_ordered:
results = list(results)
for i, result in enumerate(results):
count = 0
related_weights = []
for (field, weight) in self._search_fields.items():
if "__" in field:
related_weights.append(weight)
for term in self._search_terms:
field_value = getattr(result, field, None)
if field_value:
count += field_value.lower().count(term) * weight
if not count and related_weights:
count = int(sum(related_weights) / len(related_weights))
results[i].result_count = count
return iter(results)
return results
class SearchableManager(Manager):
"""
Manager providing a chainable queryset.
Adapted from http://www.djangosnippets.org/snippets/562/
search method supports spanning across models that subclass the
model being used to search.
"""
def __init__(self, *args, **kwargs):
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableManager, self).__init__(*args, **kwargs)
def get_search_fields(self):
"""
Returns the search field names mapped to weights as a dict.
Used in ``get_queryset`` below to tell ``SearchableQuerySet``
which search fields to use. Also used by ``DisplayableAdmin``
to populate Django admin's ``search_fields`` attribute.
Search fields can be populated via
``SearchableManager.__init__``, which then get stored in
``SearchableManager._search_fields``, which serves as an
approach for defining an explicit set of fields to be used.
Alternatively and more commonly, ``search_fields`` can be
defined on models themselves. In this case, we look at the
model and all its base classes, and build up the search
fields from all of those, so the search fields are implicitly
built up from the inheritence chain.
Finally if no search fields have been defined at all, we
fall back to any fields that are ``CharField`` or ``TextField``
instances.
"""
search_fields = self._search_fields.copy()
if not search_fields:
for cls in reversed(self.model.__mro__):
super_fields = getattr(cls, "search_fields", {})
search_fields.update(search_fields_to_dict(super_fields))
if not search_fields:
search_fields = []
for f in self.model._meta.fields:
if isinstance(f, (CharField, TextField)):
search_fields.append(f.name)
search_fields = search_fields_to_dict(search_fields)
return search_fields
def get_queryset(self):
search_fields = self.get_search_fields()
return SearchableQuerySet(self.model, search_fields=search_fields)
def contribute_to_class(self, model, name):
"""
Newer versions of Django explicitly prevent managers being
accessed from abstract classes, which is behaviour the search
API has always relied on. Here we reinstate it.
"""
super(SearchableManager, self).contribute_to_class(model, name)
setattr(model, name, ManagerDescriptor(self))
def search(self, *args, **kwargs):
"""
Proxy to queryset's search method for the manager's model and
any models that subclass from this manager's model if the
model is abstract.
"""
if not settings.SEARCH_MODEL_CHOICES:
# No choices defined - build a list of leaf models (those
# without subclasses) that inherit from Displayable.
models = [m for m in apps.get_models()
if issubclass(m, self.model)]
parents = reduce(ior, [set(m._meta.get_parent_list())
for m in models])
models = [m for m in models if m not in parents]
elif getattr(self.model._meta, "abstract", False):
# When we're combining model subclasses for an abstract
# model (eg Displayable), we only want to use models that
# are represented by the ``SEARCH_MODEL_CHOICES`` setting.
# Now this setting won't contain an exact list of models
# we should use, since it can define superclass models such
# as ``Page``, so we check the parent class list of each
# model when determining whether a model falls within the
# ``SEARCH_MODEL_CHOICES`` setting.
search_choices = set()
models = set()
parents = set()
errors = []
for name in settings.SEARCH_MODEL_CHOICES:
try:
model = apps.get_model(*name.split(".", 1))
except LookupError:
errors.append(name)
else:
search_choices.add(model)
if errors:
raise ImproperlyConfigured("Could not load the model(s) "
"%s defined in the 'SEARCH_MODEL_CHOICES' setting."
% ", ".join(errors))
for model in apps.get_models():
# Model is actually a subclasses of what we're
# searching (eg Displayabale)
is_subclass = issubclass(model, self.model)
# Model satisfies the search choices list - either
# there are no search choices, model is directly in
# search choices, or its parent is.
this_parents = set(model._meta.get_parent_list())
in_choices = not search_choices or model in search_choices
in_choices = in_choices or this_parents & search_choices
if is_subclass and (in_choices or not search_choices):
# Add to models we'll seach. Also maintain a parent
# set, used below for further refinement of models
# list to search.
models.add(model)
parents.update(this_parents)
# Strip out any models that are superclasses of models,
# specifically the Page model which will generally be the
# superclass for all custom content types, since if we
# query the Page model as well, we will get duplicate
# results.
models -= parents
else:
models = [self.model]
all_results = []
user = kwargs.pop("for_user", None)
for model in models:
try:
queryset = model.objects.published(for_user=user)
except AttributeError:
queryset = model.objects.get_queryset()
all_results.extend(queryset.search(*args, **kwargs))
return sorted(all_results, key=lambda r: r.result_count, reverse=True)
class CurrentSiteManager(DjangoCSM):
"""
Extends Django's site manager to first look up site by ID stored in
the request, the session, then domain for the current request
(accessible via threadlocals in ``mezzanine.core.request``), the
environment variable ``MEZZANINE_SITE_ID`` (which can be used by
management commands with the ``--site`` arg, finally falling back
to ``settings.SITE_ID`` if none of those match a site.
"""
use_in_migrations = False
def __init__(self, field_name=None, *args, **kwargs):
super(DjangoCSM, self).__init__(*args, **kwargs)
self.__field_name = field_name
self.__is_validated = False
def get_queryset(self):
if not self.__is_validated:
self._get_field_name()
lookup = {self.__field_name + "__id__exact": current_site_id()}
return super(DjangoCSM, self).get_queryset().filter(**lookup)
class DisplayableManager(CurrentSiteManager, PublishedManager,
SearchableManager):
"""
Manually combines ``CurrentSiteManager``, ``PublishedManager``
and ``SearchableManager`` for the ``Displayable`` model.
"""
def url_map(self, for_user=None, **kwargs):
"""
Returns a dictionary of urls mapped to Displayable subclass
instances, including a fake homepage instance if none exists.
Used in ``mezzanine.core.sitemaps``.
"""
class Home:
title = _("Home")
home = Home()
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in apps.get_models():
if issubclass(model, self.model):
for item in (model.objects.published(for_user=for_user)
.filter(**kwargs)
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items
|
sjdines/mezzanine
|
mezzanine/core/managers.py
|
Python
|
bsd-2-clause
| 17,312
|
import os.path
from unittest2 import TestCase
from dingus import Dingus
from mule.base import Mule
from mule import conf
from mule.tasks import run_test, mule_setup, mule_teardown
def dingus_calls_to_dict(obj):
# remap dingus calls into a useable dict
calls = {}
for name, args, kwargs, obj in obj:
if name not in calls:
calls[name] = []
calls[name].append((args, kwargs, obj))
return calls
class TestRunnerTestCase(TestCase):
def test_discovery(self):
mule = Mule()
jobs = list(mule.discover_tests(os.path.dirname(__file__)))
self.assertGreater(len(jobs), 0)
self.assertTrue('mule.tests.TestRunnerTestCase' in ['%s.%s' % (j.__module__, j.__name__) for j in jobs])
# def test_process(self):
# # TODO: process() needs broken down so it can be better tested
# mule = Mule()
# result = mule.process([self.__class__], 'echo $TEST')
# self.assertEquals(len(result), 1)
# result = result[0]
# self.assertTrue('retcode' in result)
# self.assertTrue('timeStarted' in result)
# self.assertTrue('timeFinished' in result)
# self.assertTrue('build_id' in result)
# self.assertTrue('stdout' in result)
# self.assertTrue('stderr' in result)
# self.assertTrue('job' in result)
# self.assertEquals(result['job'], 'tests.TestRunnerTestCase')
# self.assertEquals(result['stdout'], 'tests.TestRunnerTestCase')
# self.assertGreater(result['timeFinished'], result['timeStarted'])
class RunTestTestCase(TestCase):
def test_subprocess(self):
result = run_test('build_id', 'echo $TEST', 'job')
self.assertTrue('retcode' in result)
self.assertTrue('timeStarted' in result)
self.assertTrue('timeFinished' in result)
self.assertTrue('build_id' in result)
self.assertTrue('stdout' in result)
self.assertTrue('stderr' in result)
self.assertTrue('job' in result)
self.assertEquals(result['job'], 'job')
self.assertEquals(result['stdout'], 'job')
self.assertGreater(result['timeFinished'], result['timeStarted'])
def test_callback(self):
bar = []
def foo(result):
bar.append(result)
result = run_test('build_id', 'echo $TEST', 'job', foo)
self.assertEquals(len(bar), 1)
result = bar[0]
self.assertTrue('retcode' in result)
self.assertTrue('timeStarted' in result)
self.assertTrue('timeFinished' in result)
self.assertTrue('build_id' in result)
self.assertTrue('stdout' in result)
self.assertTrue('stderr' in result)
self.assertTrue('job' in result)
self.assertEquals(result['job'], 'job')
self.assertEquals(result['stdout'], 'job')
self.assertGreater(result['timeFinished'], result['timeStarted'])
class PanelTestCase(TestCase):
def test_provision(self):
panel = Dingus('Panel')
result = mule_setup(panel, 1)
self.assertEquals(result, {
"status": "fail",
"reason": "worker is already in use"
})
# Ensure we're now in the default queue
queue = Dingus('Queue')
queue.name = conf.DEFAULT_QUEUE
panel.consumer.task_consumer.queues = [queue]
result = mule_setup(panel, 1)
self.assertTrue('build_id' in result)
self.assertEquals(result['build_id'], 1)
self.assertTrue('status' in result)
self.assertEquals(result['status'], 'ok')
calls = dingus_calls_to_dict(panel.consumer.task_consumer.calls)
self.assertTrue('cancel_by_queue' in calls)
self.assertTrue(len(calls['cancel_by_queue']), 1)
call = calls['cancel_by_queue'][0]
self.assertTrue(len(call[0]), 1)
self.assertTrue(call[0][0], conf.DEFAULT_QUEUE)
self.assertTrue('consume' in calls)
self.assertTrue(len(calls['consume']), 1)
self.assertTrue('add_consumer_from_dict' in calls)
self.assertTrue(len(calls['add_consumer_from_dict']), 1)
call = calls['add_consumer_from_dict'][0]
self.assertTrue('queue' in call[1])
self.assertEquals(call[1]['queue'], '%s-1' % conf.BUILD_QUEUE_PREFIX)
def test_teardown(self):
panel = Dingus('Panel')
result = mule_teardown(panel, 1)
self.assertTrue('build_id' in result)
self.assertEquals(result['build_id'], 1)
self.assertTrue('status' in result)
self.assertEquals(result['status'], 'ok')
calls = dingus_calls_to_dict(panel.consumer.task_consumer.calls)
self.assertTrue('cancel_by_queue' in calls)
self.assertTrue(len(calls['cancel_by_queue']), 1)
call = calls['cancel_by_queue'][0]
self.assertTrue(len(call[0]), 1)
self.assertTrue(call[0][0], '%s-1' % conf.BUILD_QUEUE_PREFIX)
self.assertTrue('consume' in calls)
self.assertTrue(len(calls['consume']), 1)
self.assertTrue('add_consumer_from_dict' in calls)
self.assertTrue(len(calls['add_consumer_from_dict']), 1)
call = calls['add_consumer_from_dict'][0]
self.assertTrue('queue' in call[1])
self.assertEquals(call[1]['queue'], conf.DEFAULT_QUEUE)
|
disqus/mule
|
mule/tests.py
|
Python
|
apache-2.0
| 5,367
|
import importlib
import glob
import os
import logging
def load_all_codecs():
# Loading all codecs in this folder
for module in glob.glob(os.path.join(os.path.dirname(__file__), "*.py")):
module = os.path.basename(module)
if not module.startswith("_") and module != "utils.py" \
and module != "base_codec.py":
logging.info("utils.py(codec_interface): Loading \"{}\"".format(module))
importlib.import_module("."+module[:-3], "batman.codec_interface")
|
ElegantMonkey/Bat-man
|
batman/codec_interface/utils.py
|
Python
|
gpl-3.0
| 511
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Moodle Development Kit
Copyright (c) 2012 Frédéric Massart - FMCorz.net
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
http://github.com/FMCorz/mdk
"""
import os
import re
import logging
import shutil
import subprocess
import json
from tempfile import gettempdir
from .tools import getMDLFromCommitMessage, mkdir, process, parseBranch
from .db import DB
from .config import Conf
from .git import Git, GitException
from .exceptions import InstallException, UpgradeNotAllowed
from .jira import Jira, JiraException
from .scripts import Scripts
C = Conf()
class Moodle(object):
identifier = None
path = None
installed = False
version = None
config = None
_dbo = None
_git = None
_loaded = False
_cos_hasstash = False
_cos_oldbranch = None
_reservedKeywords = [
'branch',
'identifier',
'installed',
'integration',
'maturity',
'path',
'release',
'stablebranch',
'version'
]
def __init__(self, path, identifier=None):
self.path = path
self.identifier = identifier
self.version = {}
self.config = {}
self._load()
def addConfig(self, name, value):
"""Add a parameter to the config file
Will attempt to write them before the inclusion of lib/setup.php"""
configFile = os.path.join(self.path, 'config.php')
if not os.path.isfile(configFile):
return None
if name in self._reservedKeywords:
raise Exception('Cannot use reserved keywords for settings in config.php')
if type(value) == bool:
value = 'true' if value else 'false'
elif type(value) in (dict, list):
value = "json_decode('" + json.dumps(value) + "', true)"
elif type(value) != int:
value = "'" + str(value) + "'"
value = str(value)
try:
f = open(configFile, 'r')
lines = f.readlines()
f.close()
for i, line in enumerate(lines):
if re.search(r'^// MDK Edit\.$', line.rstrip()):
break
elif re.search(r'require_once.*/lib/setup\.php', line):
lines.insert(i, '// MDK Edit.\n')
lines.insert(i + 1, '\n')
# As we've added lines, let's move the index
break
i += 1
if i > len(lines):
i = len(lines)
lines.insert(i, '$CFG->%s = %s;\n' % (name, value))
f = open(configFile, 'w')
f.writelines(lines)
f.close()
except:
raise Exception('Error while writing to config file')
self.reload()
def branch_compare(self, branch, compare='>='):
"""Compare the branch of the current instance with the one passed"""
try:
branch = int(branch)
except:
raise Exception('Could not convert branch to int, got %s' % branch)
b = self.get('branch')
if b == None:
raise Exception('Error while reading the branch')
elif b == 'master':
b = C.get('masterBranch')
b = int(b)
if compare == '>=':
return b >= branch
elif compare == '>':
return b > branch
elif compare == '=' or compare == '==':
return b == branch
if compare == '<=':
return b <= branch
elif compare == '<':
return b < branch
return False
def checkout_stable(self, checkout=True):
"""Checkout the stable branch, do a stash if required. Needs to be called again to pop the stash!"""
# Checkout the branch
if checkout:
stablebranch = self.get('stablebranch')
if self.currentBranch() == stablebranch:
self._cos_oldbranch = None
return True
self._cos_oldbranch = self.currentBranch()
self._cos_hasstash = False
# Stash
stash = self.git().stash(untracked=True)
if stash[0] != 0:
raise Exception('Error while stashing your changes')
if not stash[1].startswith('No local changes'):
self._cos_hasstash = True
# Checkout STABLE
if not self.git().checkout(stablebranch):
raise Exception('Could not checkout %s' % stablebranch)
# Checkout the previous branch
elif self._cos_oldbranch != None:
if not self.git().checkout(self._cos_oldbranch):
raise Exception('Could not checkout working branch %s' % self._cos_oldbranch)
# Unstash
if self._cos_hasstash:
pop = self.git().stash('pop')
if pop[0] != 0:
raise Exception('Error while popping the stash. Probably got conflicts.')
self._cos_hasstash = False
def cli(self, cli, args='', **kwargs):
"""Executes a command line tool script"""
cli = os.path.join(self.get('path'), cli.lstrip('/'))
if not os.path.isfile(cli):
raise Exception('Could not find script to call')
if type(args) == 'list':
args = ' '.join(args)
cmd = '%s %s %s' % (C.get('php'), cli, args)
return process(cmd, cwd=self.get('path'), **kwargs)
def currentBranch(self):
"""Returns the current branch on the git repository"""
return self.git().currentBranch()
def dbo(self):
"""Returns a Database object"""
if self._dbo == None:
engine = self.get('dbtype')
db = self.get('dbname')
if engine != None and db != None:
try:
self._dbo = DB(engine, C.get('db.%s' % engine))
except:
pass
return self._dbo
def generateBranchName(self, issue, suffix='', version=''):
"""Generates a branch name"""
mdl = re.sub(r'(MDL|mdl)(-|_)?', '', issue)
if version == '':
version = self.get('branch')
args = {
'issue': mdl,
'version': version
}
branch = C.get('wording.branchFormat') % args
if suffix != None and suffix != '':
branch += C.get('wording.branchSuffixSeparator') + suffix
return branch
def get(self, param, default=None):
"""Returns a property of this instance"""
info = self.info()
try:
return info[param]
except:
return default
def git(self):
"""Returns a Git object"""
if self._git == None:
self._git = Git(self.path, C.get('git'))
if not self._git.isRepository():
raise Exception('Could not find the Git repository')
return self._git
def headcommit(self, branch=None):
"""Try to resolve the head commit of branch of this instance"""
if branch == None:
branch = self.currentBranch()
if branch == 'HEAD':
raise Exception('Cannot update the tracker when on detached branch')
smartSearch = C.get('smartHeadCommitSearch')
# Parsing the branch
parsedbranch = parseBranch(branch)
if parsedbranch:
issue = 'MDL-%s' % (parsedbranch['issue'])
else:
logging.debug('Cannot smart resolve using the branch %s' % (branch))
smartSearch = False
headcommit = None
try:
# Trying to smart guess the last commit needed
if smartSearch:
commits = self.git().log(since=branch, count=C.get('smartHeadCommitLimit'), format='%s_____%H').split('\n')[:-1]
# Looping over the last commits to find the commit messages that match the MDL-12345.
candidate = None
for commit in commits:
match = getMDLFromCommitMessage(commit) == issue
if not candidate and not match:
# The first commit does not match a hash, let's ignore this method.
break
candidate = commit.split('_____')[-1]
if not match:
# The commit does not match any more, we found it!
headcommit = candidate
break
# We could not smart find the last commit, let's use the default mechanism.
if not headcommit:
upstreamremote = C.get('upstreamRemote')
stablebranch = self.get('stablebranch')
headcommit = self.git().hashes(ref='%s/%s' % (upstreamremote, stablebranch), limit=1, format='%H')[0]
except GitException:
logging.warning('Could not resolve the head commit')
headcommit = False
return headcommit
def initPHPUnit(self, force=False, prefix=None):
"""Initialise the PHPUnit environment"""
raise Exception('This method is deprecated, use phpunit.PHPUnit.init() instead.')
def initBehat(self, switchcompletely=False, force=False, prefix=None, faildumppath=None):
"""Initialise the Behat environment"""
if self.branch_compare(25, '<'):
raise Exception('Behat is only available from Moodle 2.5')
# Force switch completely for PHP < 5.4
(none, phpVersion, none) = process('%s -r "echo version_compare(phpversion(), \'5.4\');"' % (C.get('php')))
if int(phpVersion) <= 0:
switchcompletely = True
# Set Behat data root
behat_dataroot = self.get('dataroot') + '_behat'
self.updateConfig('behat_dataroot', behat_dataroot)
# Set Behat DB prefix
currentPrefix = self.get('behat_prefix')
behat_prefix = prefix or 'zbehat_'
# Set behat_faildump_path
currentFailDumpPath = self.get('behat_faildump_path')
if faildumppath and currentFailDumpPath != faildumppath:
self.updateConfig('behat_faildump_path', faildumppath)
elif (not faildumppath and currentFailDumpPath):
self.removeConfig('behat_faildump_path')
if not currentPrefix or force:
self.updateConfig('behat_prefix', behat_prefix)
elif currentPrefix != behat_prefix and self.get('dbtype') != 'oci':
# Warn that a prefix is already set and we did not change it.
# No warning for Oracle as we need to set it to something else.
logging.warning('Behat prefix not changed, already set to \'%s\', expected \'%s\'.' % (currentPrefix, behat_prefix))
# Switch completely?
if self.branch_compare(26, '<'):
if switchcompletely:
self.updateConfig('behat_switchcompletely', switchcompletely)
self.updateConfig('behat_wwwroot', self.get('wwwroot'))
else:
self.removeConfig('behat_switchcompletely')
self.removeConfig('behat_wwwroot')
else:
# Defining wwwroot.
wwwroot = '%s://%s/' % (C.get('scheme'), C.get('behat.host'))
if C.get('path') != '' and C.get('path') != None:
wwwroot = wwwroot + C.get('path') + '/'
wwwroot = wwwroot + self.identifier
currentWwwroot = self.get('behat_wwwroot')
if not currentWwwroot or force:
self.updateConfig('behat_wwwroot', wwwroot)
elif currentWwwroot != wwwroot:
logging.warning('Behat wwwroot not changed, already set to \'%s\', expected \'%s\'.' % (currentWwwroot, wwwroot))
# Force a cache purge
self.purge()
# Force dropping the tables if there are any.
if force:
result = self.cli('admin/tool/behat/cli/util.php', args='--drop', stdout=None, stderr=None)
if result[0] != 0:
raise Exception('Error while initialising Behat. Please try manually.')
# Run the init script.
result = self.cli('admin/tool/behat/cli/init.php', stdout=None, stderr=None)
if result[0] != 0:
raise Exception('Error while initialising Behat. Please try manually.')
# Force a cache purge
self.purge()
def info(self):
"""Returns a dictionary of information about this instance"""
self._load()
info = {
'path': self.path,
'installed': self.isInstalled(),
'identifier': self.identifier
}
for (k, v) in list(self.config.items()):
info[k] = v
for (k, v) in list(self.version.items()):
info[k] = v
return info
def install(self, dbname=None, engine=None, dataDir=None, fullname=None, dropDb=False, wwwroot=None):
"""Launch the install script of an Instance"""
if self.isInstalled():
raise InstallException('Instance already installed!')
if not wwwroot:
raise InstallException('Cannot install without a value for wwwroot')
if dataDir == None or not os.path.isdir(dataDir):
raise InstallException('Cannot install instance without knowing where the data directory is')
if dbname == None:
dbname = re.sub(r'[^a-zA-Z0-9]', '', self.identifier).lower()
prefixDbname = C.get('db.namePrefix')
if prefixDbname:
dbname = prefixDbname + dbname
dbname = dbname[:28]
if engine == None:
engine = C.get('defaultEngine')
if fullname == None:
fullname = self.identifier.replace('-', ' ').replace('_', ' ').title()
fullname = fullname + ' ' + C.get('wording.%s' % engine)
dboptions = C.get('db.%s' % engine)
if engine in ('mysqli', 'mariadb') and self.branch_compare(31):
dboptions['charset'] = 'utf8mb4'
logging.info('Creating database...')
db = DB(engine, dboptions)
if db.dbexists(dbname):
if dropDb:
db.dropdb(dbname)
db.createdb(dbname)
else:
raise InstallException('Cannot install an instance on an existing database (%s)' % dbname)
else:
db.createdb(dbname)
db.selectdb(dbname)
logging.info('Installing %s...' % self.identifier)
cli = 'admin/cli/install.php'
params = (wwwroot, dataDir, engine, dbname, C.get('db.%s.user' % engine), C.get('db.%s.passwd' % engine), C.get('db.%s.host' % engine), C.get('db.%s.port' % engine), C.get('db.tablePrefix'), fullname, self.identifier, C.get('login'), C.get('passwd'))
args = '--wwwroot="%s" --dataroot="%s" --dbtype="%s" --dbname="%s" --dbuser="%s" --dbpass="%s" --dbhost="%s" --dbport="%s" --prefix="%s" --fullname="%s" --shortname="%s" --adminuser="%s" --adminpass="%s" --allow-unstable --agree-license --non-interactive' % params
result = self.cli(cli, args, stdout=None, stderr=None)
if result[0] != 0:
raise InstallException('Error while running the install, please manually fix the problem.\n- Command was: %s %s %s' % (C.get('php'), cli, args))
configFile = os.path.join(self.path, 'config.php')
os.chmod(configFile, 0o666)
try:
if C.get('path') != '' and C.get('path') != None:
self.addConfig('sessioncookiepath', '/%s/%s/' % (C.get('path'), self.identifier))
else:
self.addConfig('sessioncookiepath', '/%s/' % self.identifier)
except Exception:
logging.warning('Could not append $CFG->sessioncookiepath to config.php')
# Add forced $CFG to the config.php if some are globally defined.
forceCfg = C.get('forceCfg')
if isinstance(forceCfg, dict):
for cfgKey, cfgValue in forceCfg.items():
try:
logging.info('Setting up forced $CFG->%s to \'%s\' in config.php', cfgKey, cfgValue)
self.addConfig(cfgKey, cfgValue)
except Exception:
logging.warning('Could not append $CFG->%s to config.php', cfgKey)
self.reload()
def isInstalled(self):
"""Returns whether this instance is installed or not"""
# Reload the configuration if necessary.
self._load()
return self.installed == True
@staticmethod
def isInstance(path):
"""Check whether the path is a Moodle web directory"""
version = os.path.join(path, 'version.php')
try:
f = open(version, 'r')
lines = f.readlines()
f.close()
except:
return False
found = False
for line in lines:
if line.find('MOODLE VERSION INFORMATION') > -1:
found = True
break
if not found:
return False
return True
def isIntegration(self):
"""Returns whether an instance is an integration one or not"""
r = C.get('upstreamRemote') or 'upstream'
if not self.git().getRemote(r):
r = 'origin'
remote = self.git().getConfig('remote.%s.url' % r)
if remote != None and remote.endswith('integration.git'):
return True
return False
def isStable(self):
"""Assume an instance is stable if not integration"""
return not self.isIntegration()
def _load(self):
"""Loads the information"""
if not self.isInstance(self.path):
return False
if self._loaded:
return True
# Extracts information from version.php
self.version = {}
version = os.path.join(self.path, 'version.php')
if os.path.isfile(version):
reVersion = re.compile(r'^\s*\$version\s*=\s*([0-9.]+)\s*;')
reRelease = re.compile(r'^\s*\$release\s*=\s*(?P<brackets>[\'"])?(.+)(?P=brackets)\s*;')
reMaturity = re.compile(r'^\s*\$maturity\s*=\s*([a-zA-Z0-9_]+)\s*;')
reBranch = re.compile(r'^\s*\$branch\s*=\s*(?P<brackets>[\'"])?([0-9]+)(?P=brackets)\s*;')
f = open(version, 'r')
for line in f:
if reVersion.search(line):
self.version['version'] = reVersion.search(line).group(1)
elif reRelease.search(line):
self.version['release'] = reRelease.search(line).group(2)
elif reMaturity.search(line):
self.version['maturity'] = reMaturity.search(line).group(1).replace('MATURITY_', '').lower()
elif reBranch.search(line):
self.version['branch'] = reBranch.search(line).group(2)
# Several checks about the branch
try:
# Has it been set?
branch = self.version['branch']
except:
self.version['branch'] = self.version['release'].replace('.', '')[0:2]
branch = self.version['branch']
if int(branch) >= int(C.get('masterBranch')):
self.version['branch'] = 'master'
# Stable branch
if self.version['branch'] == 'master':
self.version['stablebranch'] = 'master'
else:
self.version['stablebranch'] = 'MOODLE_%s_STABLE' % self.version['branch']
# Integration or stable?
self.version['integration'] = self.isIntegration()
f.close()
else:
# Should never happen
raise Exception('This does not appear to be a Moodle instance')
# Extracts parameters from config.php, does not handle params over multiple lines
self.config = {}
config = os.path.join(self.path, 'config.php')
if os.path.isfile(config):
self.installed = True
prog = re.compile(r'^\s*\$CFG->([a-z_]+)\s*=\s*((?P<brackets>[\'"])?(.+)(?P=brackets)|([0-9.]+)|(true|false|null))\s*;$', re.I)
try:
f = open(config, 'r')
for line in f:
match = prog.search(line)
if match == None:
continue
if match.group(5) != None:
# Number
value = float(match.group(5)) if '.' in str(match.group(5)) else int(match.group(5))
elif match.group(6) != None:
# Boolean or null
value = str(match.group(6)).lower()
if value == 'true':
value = True
elif value == 'false':
value = False
else:
value = None
else:
# Likely to be a string
value = match.group(4)
self.config[match.group(1)] = value
f.close()
except IOError:
self.installed = False
logging.error('Could not read config file')
else:
self.installed = False
self._loaded = True
return True
def purge(self, manual=False):
"""Purge the cache of an instance"""
if not self.isInstalled():
raise Exception('Instance not installed, cannot purge.')
elif self.branch_compare('22', '<'):
raise Exception('Instance does not support cache purging.')
try:
dataroot = self.get('dataroot', False)
if manual and dataroot != False:
logging.debug('Removing directories [dataroot]/cache and [dataroot]/localcache')
shutil.rmtree(os.path.join(dataroot, 'cache'), True)
shutil.rmtree(os.path.join(dataroot, 'localcache'), True)
self.cli('admin/cli/purge_caches.php', stderr=None, stdout=None)
except Exception:
raise Exception('Error while purging cache!')
def pushPatch(self, branch=None):
"""Push a patch on the tracker, and remove the previous one"""
if branch == None:
branch = self.currentBranch()
if branch == 'HEAD':
raise Exception('Cannot create a patch from a detached branch')
# Parsing the branch
parsedbranch = parseBranch(branch)
if not parsedbranch:
raise Exception('Could not extract issue number from %s' % branch)
issue = 'MDL-%s' % (parsedbranch['issue'])
headcommit = self.headcommit(branch)
# Creating a patch file.
fileName = branch + '.mdk.patch'
tmpPatchFile = os.path.join(gettempdir(), fileName)
if self.git().createPatch('%s...%s' % (headcommit, branch), saveTo=tmpPatchFile):
J = Jira()
# Checking if file with same name exists.
existingAttachmentId = None
existingAttachments = J.getIssue(issue, fields='attachment')
for existingAttachment in existingAttachments.get('fields', {}).get('attachment', {}):
if existingAttachment.get('filename') == fileName:
# Found an existing attachment with the same name, we keep track of it.
existingAttachmentId = existingAttachment.get('id')
break
# Pushing patch to the tracker.
try:
logging.info('Uploading %s to the tracker' % (fileName))
J.upload(issue, tmpPatchFile)
except JiraException:
logging.error('Error while uploading the patch to the tracker')
return False
else:
if existingAttachmentId != None:
# On success, deleting file that was there before.
try:
logging.info('Deleting older patch...')
J.deleteAttachment(existingAttachmentId)
except JiraException:
logging.info('Could not delete older attachment')
else:
logging.error('Could not create a patch file')
return False
return True
def reload(self):
"""Sets the value to be reloaded"""
self._loaded = False
def removeConfig(self, name):
"""Remove a configuration setting from the config file."""
configFile = os.path.join(self.path, 'config.php')
if not os.path.isfile(configFile):
return None
try:
f = open(configFile, 'r')
lines = f.readlines()
f.close()
for line in lines:
if re.search(r'\$CFG->%s\s*=.*;' % (name), line):
lines.remove(line)
break
f = open(configFile, 'w')
f.writelines(lines)
f.close()
except:
raise Exception('Error while writing to config file')
self.reload()
def runScript(self, scriptname, arguments=None, **kwargs):
"""Runs a script on the instance"""
return Scripts.run(scriptname, self.get('path'), arguments=arguments, cmdkwargs=kwargs)
def update(self, remote=None):
"""Update the instance from the remote"""
if remote == None:
remote = C.get('upstreamRemote')
# Fetch
if not self.git().fetch(remote):
raise Exception('Could not fetch remote %s' % remote)
# Checkout stable
self.checkout_stable(True)
# Reset HARD
upstream = '%s/%s' % (remote, self.get('stablebranch'))
if not self.git().reset(to=upstream, hard=True):
raise Exception('Error while executing git reset.')
# Return to previous branch
self.checkout_stable(False)
def updateConfig(self, name, value):
"""Update a setting in the config file."""
self.removeConfig(name)
self.addConfig(name, value)
def uninstall(self):
"""Uninstall the instance"""
if not self.isInstalled():
raise Exception('The instance is not installed')
# Delete the content in moodledata
dataroot = self.get('dataroot')
if os.path.isdir(dataroot):
logging.debug('Deleting dataroot content (%s)' % (dataroot))
shutil.rmtree(dataroot)
mkdir(dataroot, 0o777)
# Drop the database
dbname = self.get('dbname')
if self.dbo().dbexists(dbname):
logging.debug('Droping database (%s)' % (dbname))
self.dbo().dropdb(dbname)
# Remove the config file
configFile = os.path.join(self.get('path'), 'config.php')
if os.path.isfile(configFile):
logging.debug('Deleting config.php')
os.remove(configFile)
def updateTrackerGitInfo(self, branch=None, ref=None):
"""Updates the git info on the tracker issue"""
if branch == None:
branch = self.currentBranch()
if branch == 'HEAD':
raise Exception('Cannot update the tracker when on detached branch')
# Parsing the branch
parsedbranch = parseBranch(branch)
if not parsedbranch:
raise Exception('Could not extract issue number from %s' % branch)
issue = 'MDL-%s' % (parsedbranch['issue'])
version = parsedbranch['version']
# Get the jira config
repositoryurl = self.git().updateUnauthenticatedGithub(C.get('repositoryUrl'))
diffurltemplate = C.get('diffUrlTemplate')
stablebranch = self.get('stablebranch')
# Get the hash of the last upstream commit
headcommit = None
logging.info('Searching for the head commit...')
if ref:
try:
headcommit = self.git().hashes(ref=ref, limit=1, format='%H')[0]
except GitException:
logging.warning('Could not resolve a head commit using the reference: %s' % (ref))
headcommit = None
# No reference was passed, or it was invalid.
if not headcommit:
headcommit = self.headcommit(branch)
# Head commit not resolved
if not headcommit:
logging.error('Head commit not resolved, aborting update of tracker fields')
return False
headcommit = headcommit[:10]
logging.debug('Head commit resolved to %s' % (headcommit))
J = Jira()
diffurl = diffurltemplate.replace('%branch%', branch).replace('%stablebranch%', stablebranch).replace('%headcommit%', headcommit)
fieldrepositoryurl = C.get('tracker.fieldnames.repositoryurl')
fieldbranch = C.get('tracker.fieldnames.%s.branch' % version)
fielddiffurl = C.get('tracker.fieldnames.%s.diffurl' % version)
if not fieldrepositoryurl or not fieldbranch or not fielddiffurl:
logging.error('Cannot set tracker fields for this version (%s). The field names are not set in the config file.', version)
else:
logging.info('Setting tracker fields: \n %s: %s \n %s: %s \n %s: %s' %
(fieldrepositoryurl, repositoryurl, fieldbranch, branch, fielddiffurl, diffurl))
J.setCustomFields(issue, {fieldrepositoryurl: repositoryurl, fieldbranch: branch, fielddiffurl: diffurl})
def upgrade(self, nocheckout=False):
"""Calls the upgrade script"""
if not self.isInstalled():
raise Exception('Cannot upgrade an instance which is not installed.')
elif not self.branch_compare(20):
raise Exception('Upgrade command line tool not supported by this version.')
elif os.path.isfile(os.path.join(self.get('path'), '.noupgrade')):
raise UpgradeNotAllowed('Upgrade not allowed, found .noupgrade.')
# Checkout stable
if not nocheckout:
self.checkout_stable(True)
cli = '/admin/cli/upgrade.php'
args = '--non-interactive --allow-unstable'
result = self.cli(cli, args, stdout=None, stderr=None)
if result[0] != 0:
raise Exception('Error while running the upgrade.')
# Return to previous branch
if not nocheckout:
self.checkout_stable(False)
def uninstallPlugins(self, name):
"""Calls the CLI to uninstall a plugin"""
if not self.branch_compare(37):
raise Exception('Uninstalling plugins is only available from Moodle 3.7.')
cli = '/admin/cli/uninstall_plugins.php'
args = '--plugins=' + name + ' --run'
result = self.cli(cli, args, stdout=subprocess.PIPE, stderr=None)
try:
resultstring = result[1].split('\n', 1)
cannotuninstall = resultstring[0].rfind('Can not be uninstalled')
if cannotuninstall != -1:
raise Exception('The plugin could not be uninstalled: %s' % result[1])
except IndexError as e:
# We should always have some text returned and so should not end up here. And we
# raise the exception as we're unsure whether the plugin was uninstalled properly
# so it's better to halt the process.
logging.error('The plugin uninstall cli code has changed and I need to be updated.')
raise e
|
FMCorz/mdk
|
mdk/moodle.py
|
Python
|
gpl-3.0
| 32,035
|
import os
import unittest
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import ImageField, ValidationError
from django.test import SimpleTestCase
try:
from PIL import Image
except ImportError:
Image = None
def get_img_path(path):
return os.path.join(os.path.abspath(os.path.join(__file__, '..', '..')), 'tests', path)
@unittest.skipUnless(Image, "Pillow is required to test ImageField")
class ImageFieldTest(SimpleTestCase):
def test_imagefield_annotate_with_image_after_clean(self):
f = ImageField()
img_path = get_img_path('filepath_test_files/1x1.png')
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.png', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('PNG', uploaded_file.image.format)
self.assertEqual('image/png', uploaded_file.content_type)
def test_imagefield_annotate_with_bitmap_image_after_clean(self):
"""
This also tests the situation when Pillow doesn't detect the MIME type
of the image (#24948).
"""
from PIL.BmpImagePlugin import BmpImageFile
try:
Image.register_mime(BmpImageFile.format, None)
f = ImageField()
img_path = get_img_path('filepath_test_files/1x1.bmp')
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.bmp', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('BMP', uploaded_file.image.format)
self.assertIsNone(uploaded_file.content_type)
finally:
Image.register_mime(BmpImageFile.format, 'image/bmp')
def test_file_extension_validation(self):
f = ImageField()
img_path = get_img_path('filepath_test_files/1x1.png')
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.txt', img_data)
with self.assertRaisesMessage(ValidationError, "File extension 'txt' is not allowed."):
f.clean(img_file)
|
edmorley/django
|
tests/forms_tests/field_tests/test_imagefield.py
|
Python
|
bsd-3-clause
| 2,282
|
#
# key.py - OpenSSL wrapper
# Source: git://github.com/joric/brutus.git
# which was forked from git://github.com/samrushing/caesure.git
#
import ctypes
import ctypes.util
ssl = ctypes.cdll.LoadLibrary (ctypes.util.find_library ('ssl') or 'libeay32')
# this specifies the curve used with ECDSA.
NID_secp256k1 = 714 # from openssl/obj_mac.h
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def check_result (val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p (val)
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.errcheck = check_result
class CKey:
def __init__(self):
self.POINT_CONVERSION_COMPRESSED = 2
self.POINT_CONVERSION_UNCOMPRESSED = 4
self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
def __del__(self):
if ssl:
ssl.EC_KEY_free(self.k)
self.k = None
def generate(self, secret=None):
if secret:
self.prikey = secret
priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
group = ssl.EC_KEY_get0_group(self.k)
pub_key = ssl.EC_POINT_new(group)
ctx = ssl.BN_CTX_new()
ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
ssl.EC_KEY_set_private_key(self.k, priv_key)
ssl.EC_KEY_set_public_key(self.k, pub_key)
ssl.EC_POINT_free(pub_key)
ssl.BN_CTX_free(ctx)
return self.k
else:
return ssl.EC_KEY_generate_key(self.k)
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def sign(self, hash):
sig_size = ssl.ECDSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size)
sig_size0 = ctypes.POINTER(ctypes.c_int)()
assert 1 == ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
return mb_sig.raw
def verify(self, hash, sig):
return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k)
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
ssl.EC_KEY_set_conv_form(self.k, form)
if __name__ == '__main__':
# ethalone keys
ec_secret = '' + \
'a0dc65ffca799873cbea0ac274015b9526505daaaed385155425f7337704883e'
ec_private = '308201130201010420' + \
'a0dc65ffca799873cbea0ac274015b9526505daaaed385155425f7337704883e' + \
'a081a53081a2020101302c06072a8648ce3d0101022100' + \
'fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f' + \
'300604010004010704410479be667ef9dcbbac55a06295ce870b07029bfcdb2d' + \
'ce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a6' + \
'8554199c47d08ffb10d4b8022100' + \
'fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141' + \
'020101a14403420004' + \
'0791dc70b75aa995213244ad3f4886d74d61ccd3ef658243fcad14c9ccee2b0a' + \
'a762fbc6ac0921b8f17025bb8458b92794ae87a133894d70d7995fc0b6b5ab90'
k = CKey()
k.generate (ec_secret.decode('hex'))
k.set_compressed(True)
print k.get_privkey ().encode('hex')
print k.get_pubkey().encode('hex')
print k.get_secret().encode('hex')
|
0dayZh/python-bitcoinlib
|
bitcoin/key.py
|
Python
|
mit
| 4,073
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
import threading
import sys
import os
from typing import TYPE_CHECKING
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtWidgets import (QTextEdit, QVBoxLayout, QLabel, QGridLayout, QHBoxLayout,
QRadioButton, QCheckBox, QLineEdit)
from electrum_grs.gui.qt.util import (read_QIcon, WindowModalDialog, WaitingDialog, OkButton,
CancelButton, Buttons, icon_path, WWLabel, CloseButton)
from electrum_grs.gui.qt.qrcodewidget import QRCodeWidget
from electrum_grs.gui.qt.amountedit import AmountEdit
from electrum_grs.gui.qt.main_window import StatusBarButton
from electrum_grs.gui.qt.installwizard import InstallWizard
from electrum_grs.i18n import _
from electrum_grs.plugin import hook
from electrum_grs.util import is_valid_email
from electrum_grs.logging import Logger
from electrum_grs.base_wizard import GoBack, UserCancelled
from .trustedcoin import TrustedCoinPlugin, server
if TYPE_CHECKING:
from electrum_grs.gui.qt.main_window import ElectrumWindow
from electrum_grs.wallet import Abstract_Wallet
class TOS(QTextEdit):
tos_signal = pyqtSignal()
error_signal = pyqtSignal(object)
class HandlerTwoFactor(QObject, Logger):
def __init__(self, plugin, window):
QObject.__init__(self)
self.plugin = plugin
self.window = window
Logger.__init__(self)
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.plugin.wallet_class):
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].can_sign(tx, ignore_watching_only=True):
self.logger.info("twofactor: xpub3 not needed")
return
window = self.window.top_level_window()
auth_code = self.plugin.auth_dialog(window)
WaitingDialog(parent=window,
message=_('Waiting for TrustedCoin server to sign transaction...'),
task=lambda: wallet.on_otp(tx, auth_code),
on_success=lambda *args: on_success(tx),
on_error=on_failure)
class Plugin(TrustedCoinPlugin):
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
@hook
def load_wallet(self, wallet: 'Abstract_Wallet', window: 'ElectrumWindow'):
if not isinstance(wallet, self.wallet_class):
return
wallet.handler_2fa = HandlerTwoFactor(self, window)
if wallet.can_sign_without_server():
msg = ' '.join([
_('This wallet was restored from seed, and it contains two master private keys.'),
_('Therefore, two-factor authentication is disabled.')
])
action = lambda: window.show_message(msg)
else:
action = partial(self.settings_dialog, window)
button = StatusBarButton(read_QIcon("trustedcoin-status.png"),
_("TrustedCoin"), action)
window.statusBar().addPermanentWidget(button)
self.start_request_thread(window.wallet)
def auth_dialog(self, window):
d = WindowModalDialog(window, _("Authorization"))
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
msg = _('If you have lost your second factor, you need to restore your wallet from seed in order to request a new code.')
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
wallet.handler_2fa.prompt_user_for_otp(wallet, tx, on_success, on_failure)
def waiting_dialog_for_billing_info(self, window, *, on_finished=None):
def task():
return self.request_billing_info(window.wallet, suppress_connection_error=False)
def on_error(exc_info):
e = exc_info[1]
window.show_error("{header}\n{exc}\n\n{tor}"
.format(header=_('Error getting TrustedCoin account info.'),
exc=repr(e),
tor=_('If you keep experiencing network problems, try using a Tor proxy.')))
return WaitingDialog(parent=window,
message=_('Requesting account info from TrustedCoin server...'),
task=task,
on_success=on_finished,
on_error=on_error)
@hook
def abort_send(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
return
if wallet.billing_info is None:
self.waiting_dialog_for_billing_info(window)
return True
return False
def settings_dialog(self, window):
self.waiting_dialog_for_billing_info(window,
on_finished=partial(self.show_settings_dialog, window))
def show_settings_dialog(self, window, success):
if not success:
window.show_message(_('Server not reachable.'))
return
wallet = window.wallet
d = WindowModalDialog(window, _("TrustedCoin Information"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(icon_path("trustedcoin-status.png")))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a small fee to co-sign transactions. The fee depends on how many prepaid transactions you buy. An extra output is added to your transaction every time you run out of prepaid transactions.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
price_per_tx = wallet.price_per_tx
n_prepay = wallet.num_prepay()
i = 0
for k, v in sorted(price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Pay every %d transactions:"%k), i, 0)
grid.addWidget(QLabel(window.format_amount(v/k) + ' ' + window.base_unit() + "/tx"), i, 1)
b = QRadioButton()
b.setChecked(k == n_prepay)
b.clicked.connect(lambda b, k=k: self.config.set_key('trustedcoin_prepay', k, True))
grid.addWidget(b, i, 2)
i += 1
n = wallet.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has {} prepaid transactions.").format(n)), i, 0)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def go_online_dialog(self, wizard: InstallWizard):
msg = [
_("Your wallet file is: {}.").format(os.path.abspath(wizard.path)),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "{}" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum-GRS.").format(_('Cancel')),
_('If you are online, click on "{}" to continue.').format(_('Next'))
]
msg = '\n\n'.join(msg)
wizard.reset_stack()
try:
wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('accept_terms_of_use'))
except (GoBack, UserCancelled):
# user clicked 'Cancel' and decided to move wallet file manually
storage, db = wizard.create_storage(wizard.path)
raise
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = TOS()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
tos_received = False
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
next_button = window.next_button
prior_button_text = next_button.text()
next_button.setText(_('Accept'))
def request_TOS():
try:
tos = server.get_terms_of_service()
except Exception as e:
self.logger.exception('Could not retrieve Terms of Service')
tos_e.error_signal.emit(_('Could not retrieve Terms of Service:')
+ '\n' + repr(e))
return
self.TOS = tos
tos_e.tos_signal.emit()
def on_result():
tos_e.setText(self.TOS)
nonlocal tos_received
tos_received = True
set_enabled()
def on_error(msg):
window.show_error(str(msg))
window.terminate()
def set_enabled():
next_button.setEnabled(tos_received and is_valid_email(email_e.text()))
tos_e.tos_signal.connect(on_result)
tos_e.error_signal.connect(on_error)
t = threading.Thread(target=request_TOS)
t.setDaemon(True)
t.start()
email_e.textChanged.connect(set_enabled)
email_e.setFocus(True)
window.exec_layout(vbox, next_enabled=False)
next_button.setText(prior_button_text)
email = str(email_e.text())
self.create_remote_key(email, window)
def request_otp_dialog(self, window, short_id, otp_secret, xpub3):
vbox = QVBoxLayout()
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
l = QLabel("Please scan the following QR code in Google Authenticator. You may as well use the following key: %s"%otp_secret)
l.setWordWrap(True)
vbox.addWidget(l)
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel(
"This wallet is already registered with TrustedCoin. "
"To finalize wallet creation, please enter your Google Authenticator Code. "
)
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(WWLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
pw.setMaximumWidth(50)
hbox.addWidget(pw)
vbox.addLayout(hbox)
cb_lost = QCheckBox(_("I have lost my Google Authenticator account"))
cb_lost.setToolTip(_("Check this box to request a new secret. You will need to retype your seed."))
vbox.addWidget(cb_lost)
cb_lost.setVisible(otp_secret is None)
def set_enabled():
b = True if cb_lost.isChecked() else len(pw.text()) == 6
window.next_button.setEnabled(b)
pw.textChanged.connect(set_enabled)
cb_lost.toggled.connect(set_enabled)
window.exec_layout(vbox, next_enabled=False, raise_on_cancel=False)
self.check_otp(window, short_id, otp_secret, xpub3, pw.get_amount(), cb_lost.isChecked())
|
GroestlCoin/electrum-grs
|
electrum_grs/plugins/trustedcoin/qt.py
|
Python
|
gpl-3.0
| 13,381
|
#pylint: disable=invalid-name
"""
Classes for each reduction step. Those are kept separately
from the the interface class so that the DgsReduction class could
be used independently of the interface implementation
"""
from __future__ import (absolute_import, division, print_function)
import os
import xml.dom.minidom
from reduction_gui.reduction.scripter import BaseScriptElement
class SampleSetupScript(BaseScriptElement):
sample_file = ""
live_button = False
output_wsname = ""
detcal_file = ""
relocate_dets = False
incident_energy_guess = ""
use_ei_guess = False
tzero_guess = 0.0
monitor1_specid = ""
monitor2_specid = ""
rebin_et = False
et_range_low = ""
et_range_width = ""
et_range_high = ""
et_is_distribution = True
hardmask_file = ""
grouping_file = ""
show_workspaces = False
savedir = ""
def __init__(self, inst_name):
super(SampleSetupScript, self).__init__()
self.set_default_pars(inst_name)
self.reset()
def set_default_pars(self, inst_name):
from . import dgs_utils
ip = dgs_utils.InstrumentParameters(inst_name)
SampleSetupScript.monitor1_specid = str(int(ip.get_parameter("ei-mon1-spec")))
SampleSetupScript.monitor2_specid = str(int(ip.get_parameter("ei-mon2-spec")))
def to_script(self):
script = ""
if not self.live_button:
script += "SampleInputFile=\"%s\",\n" % self.sample_file
else:
script += "SampleInputWorkspace=input,\n"
tmp_wsname = ""
if self.output_wsname == SampleSetupScript.output_wsname:
# Make a default name from the incoming file
tmp = os.path.split(os.path.splitext(str(self.sample_file))[0])[-1]
tmp_wsname = tmp + "_spe"
else:
tmp_wsname = self.output_wsname
script += "OutputWorkspace=\"%s\",\n" % tmp_wsname
if self.detcal_file != SampleSetupScript.detcal_file:
script += "DetCalFilename=\"%s\",\n" % self.detcal_file
if self.relocate_dets != SampleSetupScript.relocate_dets:
script += "RelocateDetectors=%s,\n" % self.relocate_dets
if self.incident_energy_guess != SampleSetupScript.incident_energy_guess:
script += "IncidentEnergyGuess=%s,\n" % float(self.incident_energy_guess)
if self.use_ei_guess != SampleSetupScript.use_ei_guess:
script += "UseIncidentEnergyGuess=%s,\n" % self.use_ei_guess
if self.tzero_guess != SampleSetupScript.tzero_guess:
script += "TimeZeroGuess=%s,\n" % str(self.tzero_guess)
if self.monitor1_specid != SampleSetupScript.monitor1_specid:
try:
temp1 = int(self.monitor1_specid)
script += "Monitor1SpecId=%s,\n" % temp1
except ValueError:
pass
if self.monitor2_specid != SampleSetupScript.monitor2_specid:
try:
temp2 = int(self.monitor2_specid)
script += "Monitor2SpecId=%s,\n" % temp2
except ValueError:
pass
if self.et_range_low != SampleSetupScript.et_range_low or \
self.et_range_width != SampleSetupScript.et_range_width or \
self.et_range_high != SampleSetupScript.et_range_high:
script += "EnergyTransferRange=\"%s,%s,%s\",\n" % (self.et_range_low,
self.et_range_width,
self.et_range_high)
if self.et_is_distribution != SampleSetupScript.et_is_distribution:
script += "SofPhiEIsDistribution=%s,\n" % self.et_is_distribution
if self.hardmask_file != SampleSetupScript.hardmask_file:
script += "HardMaskFile=\"%s\",\n" % self.hardmask_file
if self.grouping_file != SampleSetupScript.grouping_file:
script += "GroupingFile=\"%s\",\n" % self.grouping_file
if self.show_workspaces:
script += "ShowIntermediateWorkspaces=%s,\n" % self.show_workspaces
if self.savedir != SampleSetupScript.savedir:
script += "OutputDirectory=\"%s\",\n" % self.savedir
return script
def to_xml(self):
"""
Create XML from the current data.
"""
xml_str = "<SampleSetup>\n"
xml_str += " <sample_input_file>%s</sample_input_file>\n" % self.sample_file
xml_str += " <live_button>%s</live_button>\n" % self.live_button
xml_str += " <output_wsname>%s</output_wsname>\n" % self.output_wsname
xml_str += " <detcal_file>%s</detcal_file>\n" % self.detcal_file
xml_str += " <relocate_dets>%s</relocate_dets>\n" % self.relocate_dets
xml_str += " <incident_energy_guess>%s</incident_energy_guess>\n" % self.incident_energy_guess
xml_str += " <use_ei_guess>%s</use_ei_guess>\n" % str(self.use_ei_guess)
xml_str += " <tzero_guess>%s</tzero_guess>\n" % str(self.tzero_guess)
xml_str += " <monitor1_specid>%s</monitor1_specid>\n" % self.monitor1_specid
xml_str += " <monitor2_specid>%s</monitor2_specid>\n" % self.monitor2_specid
xml_str += " <et_range>\n"
xml_str += " <low>%s</low>\n" % self.et_range_low
xml_str += " <width>%s</width>\n" % self.et_range_width
xml_str += " <high>%s</high>\n" % self.et_range_high
xml_str += " </et_range>\n"
xml_str += " <sofphie_is_distribution>%s</sofphie_is_distribution>\n" % str(self.et_is_distribution)
xml_str += " <hardmask_file>%s</hardmask_file>\n" % self.hardmask_file
xml_str += " <grouping_file>%s</grouping_file>\n" % self.grouping_file
xml_str += " <show_workspaces>%s</show_workspaces>\n" % self.show_workspaces
xml_str += " <savedir>%s</savedir>\n" % self.savedir
xml_str += "</SampleSetup>\n"
return xml_str
def from_xml(self, xml_str):
"""
Read in data from XML
@param xml_str: text to read the data from
"""
dom = xml.dom.minidom.parseString(xml_str)
element_list = dom.getElementsByTagName("SampleSetup")
if len(element_list) > 0:
instrument_dom = element_list[0]
self.sample_file = BaseScriptElement.getStringElement(instrument_dom,
"sample_input_file",
default=SampleSetupScript.sample_file)
self.live_button = BaseScriptElement.getBoolElement(instrument_dom,
"live_button",
default=SampleSetupScript.live_button)
self.output_wsname = BaseScriptElement.getStringElement(instrument_dom,
"output_wsname",
default=SampleSetupScript.output_wsname)
self.detcal_file = BaseScriptElement.getStringElement(instrument_dom,
"detcal_file",
default=SampleSetupScript.detcal_file)
self.relocate_dets = BaseScriptElement.getBoolElement(instrument_dom,
"relocate_dets",
default=SampleSetupScript.relocate_dets)
self.incident_energy_guess = BaseScriptElement.getStringElement(instrument_dom,
"incident_energy_guess",
default=SampleSetupScript.incident_energy_guess)
self.use_ei_guess = BaseScriptElement.getBoolElement(instrument_dom,
"use_ei_guess",
default=SampleSetupScript.use_ei_guess)
self.tzero_guess = BaseScriptElement.getFloatElement(instrument_dom,
"tzero_guess",
default=SampleSetupScript.tzero_guess)
self.monitor1_specid = BaseScriptElement.getStringElement(instrument_dom,
"monitor1_specid",
default=SampleSetupScript.monitor1_specid)
self.monitor2_specid = BaseScriptElement.getStringElement(instrument_dom,
"monitor2_specid",
default=SampleSetupScript.monitor2_specid)
self.et_range_low = BaseScriptElement.getStringElement(instrument_dom,
"et_range/low",
default=SampleSetupScript.et_range_low)
self.et_range_width = BaseScriptElement.getStringElement(instrument_dom,
"et_range/width",
default=SampleSetupScript.et_range_width)
self.et_range_high = BaseScriptElement.getStringElement(instrument_dom,
"et_range/high",
default=SampleSetupScript.et_range_high)
self.et_is_distribution = BaseScriptElement.getBoolElement(instrument_dom,
"sofphie_is_distribution",
default=SampleSetupScript.et_is_distribution)
self.hardmask_file = BaseScriptElement.getStringElement(instrument_dom,
"hardmask_file",
default=SampleSetupScript.hardmask_file)
self.grouping_file = BaseScriptElement.getStringElement(instrument_dom,
"grouping_file",
default=SampleSetupScript.grouping_file)
self.show_workspaces = BaseScriptElement.getBoolElement(instrument_dom,
"show_workspaces",
default=SampleSetupScript.show_workspaces)
self.savedir = BaseScriptElement.getStringElement(instrument_dom,
"savedir",
default=SampleSetupScript.savedir)
def reset(self):
"""
Reset state
"""
self.sample_file = SampleSetupScript.sample_file
self.live_button = SampleSetupScript.live_button
self.output_wsname = SampleSetupScript.output_wsname
self.detcal_file = SampleSetupScript.detcal_file
self.relocate_dets = SampleSetupScript.relocate_dets
self.incident_energy_guess = SampleSetupScript.incident_energy_guess
self.use_ei_guess = SampleSetupScript.use_ei_guess
self.tzero_guess = SampleSetupScript.tzero_guess
self.monitor1_specid = SampleSetupScript.monitor1_specid
self.monitor2_specid = SampleSetupScript.monitor2_specid
self.rebin_et = SampleSetupScript.rebin_et
self.et_range_low = SampleSetupScript.et_range_low
self.et_range_width = SampleSetupScript.et_range_width
self.et_range_high = SampleSetupScript.et_range_high
self.et_is_distribution = SampleSetupScript.et_is_distribution
self.hardmask_file = SampleSetupScript.hardmask_file
self.grouping_file = SampleSetupScript.grouping_file
self.show_workspaces = SampleSetupScript.show_workspaces
self.savedir = SampleSetupScript.savedir
|
ScreamingUdder/mantid
|
scripts/Interface/reduction_gui/reduction/inelastic/dgs_sample_data_setup_script.py
|
Python
|
gpl-3.0
| 12,549
|
# -*- coding: utf-8 -*-
"""
Example that fails to execute
=============================
This example demonstrates a code block that raises an error and how any code
blocks that follow are not executed.
When scripts fail, their gallery thumbnail is replaced with the broken
image stamp. This allows easy identification in the gallery display.
You will also get the python traceback of the failed code block.
"""
# Code source: Óscar Nájera
# License: BSD 3 clause
# sphinx_gallery_line_numbers = True
import numpy as np
import matplotlib.pyplot as plt
plt.pcolormesh(np.random.randn(100, 100))
# %%
# This next block will raise a NameError
iae
# %%
# Sphinx gallery will stop executing the remaining code blocks after
# the exception has occurred in the example script. Nevertheless the
# html will still render all the example annotated text and
# code blocks, but no output will be shown.
# %%
# Here is another error raising block but will not be executed
plt.plot('Strings are not a valid argument for the plot function')
|
Eric89GXL/sphinx-gallery
|
examples/no_output/plot_raise.py
|
Python
|
bsd-3-clause
| 1,038
|
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import urllib
import urllib2
# TODO(termie): abstract away app engine specifics
from google.appengine.api import urlfetch
from django.conf import settings
from common import exception
from common.protocol import base
class _DevRpc(object):
def get_result(self):
pass
class PshbConnection(base.Connection):
def __init__(self, endpoint):
self.endpoint = endpoint
def publish_async(self, urls):
if settings.MANAGE_PY:
logging.info('pshb.publish(%s, %s)', self.endpoint, self.urls)
return _DevRpc()
rpc = urlfetch.create_rpc()
def _callback():
result = rpc.get_result()
if result.status_code == 204:
return
raise exception.ServiceError(result.content)
rpc.callback = _callback
data = urllib.urlencode(
{'hub.url': urls, 'hub.mode': 'publish'}, doseq=True)
urlfetch.make_fetch_call(rpc, self.endpoint, method='POST', payload=data)
return rpc
|
termie/jaikuengine
|
common/protocol/pshb.py
|
Python
|
apache-2.0
| 1,524
|
import time
import collections
from django.core.exceptions import ImproperlyConfigured
from rest_framework.parsers import JSONParser
from rest_framework.exceptions import ParseError, NotAuthenticated
from framework.auth import signing
from api.base.utils import is_bulk_request
from api.base.renderers import JSONAPIRenderer
from api.base.exceptions import JSONAPIException
NO_RELATIONSHIPS_ERROR = 'Request must include /data/relationships.'
NO_DATA_ERROR = 'Request must include /data.'
NO_TYPE_ERROR = 'Request must include /type.'
NO_ID_ERROR = 'Request must include /data/id.'
class JSONAPIParser(JSONParser):
"""
Parses JSON-serialized data. Overrides media_type.
"""
media_type = 'application/vnd.api+json'
renderer_class = JSONAPIRenderer
@staticmethod
def get_relationship(data, related_resource):
target_type = data.get('type')
if not target_type:
raise JSONAPIException(
source={'pointer': 'data/relationships/{}/data/type'.format(related_resource)},
detail=NO_TYPE_ERROR,
)
id = data.get('id')
return {'id': id, 'target_type': target_type}
# Overrides JSONParser
def flatten_relationships(self, relationships):
"""
Flattens relationships dictionary which has information needed to create related resource objects.
Validates that formatting of relationships dictionary is correct.
"""
if not isinstance(relationships, dict):
raise ParseError()
# Can only create one type of relationship.
related_resource = relationships.keys()[0]
if not isinstance(relationships[related_resource], dict) or related_resource == 'data':
raise ParseError()
data = relationships[related_resource].get('data')
if not data:
raise JSONAPIException(source={'pointer': 'data/relationships/{}/data'.format(related_resource)}, detail=NO_DATA_ERROR)
if isinstance(data, list):
return [self.get_relationship(item, related_resource) for item in data]
else:
return self.get_relationship(data, related_resource)
def flatten_data(self, resource_object, parser_context, is_list):
"""
Flattens data objects, making attributes and relationships fields the same level as id and type.
"""
relationships = resource_object.get('relationships')
is_relationship = parser_context.get('is_relationship')
# allow skip type check for legacy api version
legacy_type_allowed = parser_context.get('legacy_type_allowed', False)
request_method = parser_context['request'].method
if is_relationship and request_method == 'POST':
if not relationships:
raise JSONAPIException(source={'pointer': '/data/relationships'}, detail=NO_RELATIONSHIPS_ERROR)
object_id = resource_object.get('id')
object_type = resource_object.get('type')
type_required = not (
legacy_type_allowed and parser_context['request'].version < 2.7 and request_method == 'PATCH'
)
# For validating type and id for bulk delete:
if is_list and request_method == 'DELETE':
if object_id is None:
raise JSONAPIException(source={'pointer': '/data/id'}, detail=NO_ID_ERROR)
if type_required and object_type is None:
raise JSONAPIException(source={'pointer': '/data/type'}, detail=NO_TYPE_ERROR)
attributes = resource_object.get('attributes')
parsed = {'id': object_id, 'type': object_type}
if attributes:
parsed.update(attributes)
if relationships:
relationships = self.flatten_relationships(relationships)
if isinstance(relationships, list):
relationship_values = []
relationship_key = None
for relationship in relationships:
for key, value in relationship.items():
relationship_values.append(value)
relationship_key = key
relationship = {relationship_key: relationship_values}
parsed.update(relationship)
else:
parsed.update(relationships)
return parsed
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON and returns the resulting data.
"""
result = super(JSONAPIParser, self).parse(stream, media_type=media_type, parser_context=parser_context)
if not isinstance(result, dict):
raise ParseError()
data = result.get('data', {})
if data:
if is_bulk_request(parser_context['request']):
if not isinstance(data, list):
raise ParseError('Expected a list of items but got type "dict".')
data_collection = []
data_collection.extend([self.flatten_data(data_object, parser_context, is_list=True) for data_object in data])
return data_collection
else:
if not isinstance(data, collections.Mapping):
raise ParseError('Expected a dictionary of items.')
return self.flatten_data(data, parser_context, is_list=False)
else:
raise JSONAPIException(source={'pointer': '/data'}, detail=NO_DATA_ERROR)
def flatten_multiple_relationships(self, parser, relationships):
rel = {}
for resource in relationships:
ret = super(parser, self).flatten_relationships({resource: relationships[resource]})
if isinstance(ret, list):
rel[resource] = []
for item in ret:
if item.get('target_type') and item.get('id'):
rel[resource].append(item['id'])
else:
if ret.get('target_type') and ret.get('id'):
rel[resource] = ret['id']
return rel
class JSONAPIParserForRegularJSON(JSONAPIParser):
"""
Allows same processing as JSONAPIParser to occur for requests with application/json media type.
"""
media_type = 'application/json'
class JSONAPIRelationshipParser(JSONParser):
"""
Parses JSON-serialized data for relationship endpoints. Overrides media_type.
"""
media_type = 'application/vnd.api+json'
def parse(self, stream, media_type=None, parser_context=None):
res = super(JSONAPIRelationshipParser, self).parse(stream, media_type, parser_context)
if not isinstance(res, dict):
raise ParseError('Request body must be dictionary')
data = res.get('data')
if data:
if not isinstance(data, list):
raise ParseError('Data must be an array')
for i, datum in enumerate(data):
if datum.get('id') is None:
raise JSONAPIException(source={'pointer': '/data/{}/id'.format(str(i))}, detail=NO_ID_ERROR)
if datum.get('type') is None:
raise JSONAPIException(source={'pointer': '/data/{}/type'.format(str(i))}, detail=NO_TYPE_ERROR)
return {'data': data}
return {'data': []}
class JSONAPIRelationshipParserForRegularJSON(JSONAPIRelationshipParser):
"""
Allows same processing as JSONAPIRelationshipParser to occur for requests with application/json media type.
"""
media_type = 'application/json'
class JSONAPIOnetoOneRelationshipParser(JSONParser):
"""
Parses JSON-serialized data for relationship endpoints. Overrides media_type.
"""
media_type = 'application/vnd.api+json'
def parse(self, stream, media_type=None, parser_context=None):
res = super(JSONAPIOnetoOneRelationshipParser, self).parse(stream, media_type, parser_context)
if not isinstance(res, dict):
raise ParseError('Request body must be dictionary')
data = res.get('data')
# allow skip type check for legacy api version
legacy_type_allowed = parser_context.get('legacy_type_allowed', True)
type_required = not (
legacy_type_allowed and
parser_context['request'].version < 2.7 and
parser_context['request'].method == 'PATCH'
)
if data:
id_ = data.get('id')
type_ = data.get('type')
if id_ is None:
raise JSONAPIException(source={'pointer': '/data/id'}, detail=NO_ID_ERROR)
if type_required and type_ is None:
raise JSONAPIException(source={'pointer': '/data/type'}, detail=NO_TYPE_ERROR)
return data
return {'type': None, 'id': None}
class JSONAPIOnetoOneRelationshipParserForRegularJSON(JSONAPIOnetoOneRelationshipParser):
"""
Allows same processing as JSONAPIRelationshipParser to occur for requests with application/json media type.
"""
media_type = 'application/json'
class JSONAPIMultipleRelationshipsParser(JSONAPIParser):
"""
If edits are made to this class, be sure to check JSONAPIMultipleRelationshipsParserForRegularJSON to see if corresponding
edits should be made there.
"""
def flatten_relationships(self, relationships):
return self.flatten_multiple_relationships(JSONAPIMultipleRelationshipsParser, relationships)
class JSONAPIMultipleRelationshipsParserForRegularJSON(JSONAPIParserForRegularJSON):
"""
Allows same processing as JSONAPIMultipleRelationshipsParser to occur for requests with application/json media type.
"""
def flatten_relationships(self, relationships):
return self.flatten_multiple_relationships(JSONAPIMultipleRelationshipsParserForRegularJSON, relationships)
class HMACSignedParser(JSONParser):
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON. Validates the 'signature' in the payload then returns the resulting data.
"""
data = super(HMACSignedParser, self).parse(stream, media_type=media_type, parser_context=parser_context)
try:
sig = data['signature']
payload = signing.unserialize_payload(data['payload'])
exp_time = payload['time']
except (KeyError, ValueError):
raise JSONAPIException(detail='Invalid Payload')
if not signing.default_signer.verify_payload(sig, payload):
raise NotAuthenticated
if time.time() > exp_time:
raise JSONAPIException(detail='Signature has expired')
return payload
class SearchParser(JSONAPIParser):
def parse(self, stream, media_type=None, parser_context=None):
try:
view = parser_context['view']
except KeyError:
raise ImproperlyConfigured('SearchParser requires "view" context.')
data = super(SearchParser, self).parse(stream, media_type=media_type, parser_context=parser_context)
if not data:
raise JSONAPIException(detail='Invalid Payload')
res = {
'query': {
'bool': {},
},
}
sort = parser_context['request'].query_params.get('sort')
if sort:
res['sort'] = [{
sort.lstrip('-'): {
'order': 'desc' if sort.startswith('-') else 'asc',
},
}]
try:
q = data.pop('q')
except KeyError:
pass
else:
res['query']['bool'].update({
'must': {
'query_string': {
'query': q,
'fields': view.search_fields,
},
},
})
if any(data.values()):
res['query']['bool'].update({'filter': []})
for key, val in data.items():
if val is not None:
if isinstance(val, list):
res['query']['bool']['filter'].append({'terms': {key: val}})
else:
res['query']['bool']['filter'].append({'term': {key: val}})
return res
|
mattclark/osf.io
|
api/base/parsers.py
|
Python
|
apache-2.0
| 12,275
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from boxbranding import getImageVersion, getMachineBrand, getMachineName
from os import system, access, R_OK
from os.path import ismount
import re
import six
from enigma import eConsoleAppContainer, eTimer
from twisted.web import client
from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Screens.HelpMenu import HelpableScreen
from Screens.TaskView import JobView
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.Sources.List import List
from Components.FileList import FileList
from Components.ScrollLabel import ScrollLabel
from Components.Harddisk import harddiskmanager
from Components.Task import Task, Job, job_manager, Condition
from Tools.Directories import resolveFilename, SCOPE_HDD, SCOPE_MEDIA
from Tools.HardwareInfo import HardwareInfo
from Tools.Downloader import downloadWithProgress
class ImageDownloadJob(Job):
def __init__(self, url, filename, device=None, mountpoint="/"):
Job.__init__(self, _("Download .NFI-files for USB-flasher"))
if device:
if ismount(mountpoint):
UmountTask(self, mountpoint)
MountTask(self, device, mountpoint)
ImageDownloadTask(self, url, mountpoint + filename)
ImageDownloadTask(self, url[:-4] + ".nfo", mountpoint + filename[:-4] + ".nfo")
#if device:
#UmountTask(self, mountpoint)
def retry(self):
self.tasks[0].args += self.tasks[0].retryargs
Job.retry(self)
class MountTask(Task):
def __init__(self, job, device, mountpoint):
Task.__init__(self, job, "mount")
self.setTool("mount")
options = "rw,sync"
self.mountpoint = mountpoint
self.args += [device, mountpoint, "-o" + options]
self.weighting = 1
def processOutput(self, data):
print("[MountTask] output:", data)
class UmountTask(Task):
def __init__(self, job, mountpoint):
Task.__init__(self, job, "mount")
self.setTool("umount")
self.args += [mountpoint]
self.weighting = 1
class DownloaderPostcondition(Condition):
def __init__(self):
pass
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return self.error_message
class ImageDownloadTask(Task):
def __init__(self, job, url, path):
Task.__init__(self, job, _("Downloading"))
self.postconditions.append(DownloaderPostcondition())
self.job = job
self.url = url
self.path = path
self.error_message = ""
self.last_recvbytes = 0
self.error_message = None
self.download = None
self.aborted = False
def run(self, callback):
self.callback = callback
self.download = downloadWithProgress(self.url, self.path)
self.download.addProgress(self.download_progress)
self.download.start().addCallback(self.download_finished).addErrback(self.download_failed)
print("[ImageDownloadTask] downloading", self.url, "to", self.path)
def abort(self):
print("[ImageDownloadTask] aborting", self.url)
if self.download:
self.download.stop()
self.aborted = True
def download_progress(self, recvbytes, totalbytes):
#print "[update_progress] recvbytes=%d, totalbytes=%d" % (recvbytes, totalbytes)
if (recvbytes - self.last_recvbytes) > 10000: # anti-flicker
self.progress = int(100 * (float(recvbytes) / float(totalbytes)))
self.name = _("Downloading") + ' ' + "%d of %d kBytes" % (recvbytes / 1024, totalbytes / 1024)
self.last_recvbytes = recvbytes
def download_failed(self, failure_instance=None, error_message=""):
self.error_message = error_message
if error_message == "" and failure_instance is not None:
self.error_message = failure_instance.getErrorMessage()
Task.processFinished(self, 1)
def download_finished(self, string=""):
if self.aborted:
self.finish(aborted=True)
else:
Task.processFinished(self, 0)
class StickWizardJob(Job):
def __init__(self, path):
Job.__init__(self, _("USB stick wizard"))
self.path = path
self.device = path
while self.device[-1:] == "/" or self.device[-1:].isdigit():
self.device = self.device[:-1]
box = HardwareInfo().get_device_name()
url = "https://www.dreamboxupdate.com/download/opendreambox/dreambox-nfiflasher-%s.tar.bz2" % box
self.downloadfilename = "/tmp/dreambox-nfiflasher-%s.tar.bz2" % box
self.imagefilename = "/tmp/nfiflash_%s.img" % box
#UmountTask(self, device)
PartitionTask(self)
ImageDownloadTask(self, url, self.downloadfilename)
UnpackTask(self)
CopyTask(self)
class PartitionTaskPostcondition(Condition):
def __init__(self):
pass
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return {
task.ERROR_BLKRRPART: "Device or resource busy",
task.ERROR_UNKNOWN: task.errormsg
}[task.error]
class PartitionTask(Task):
ERROR_UNKNOWN, ERROR_BLKRRPART = list(range(2))
def __init__(self, job):
Task.__init__(self, job, "partitioning")
self.postconditions.append(PartitionTaskPostcondition())
self.job = job
self.setTool("sfdisk")
self.args += [self.job.device]
self.weighting = 10
self.initial_input = "0 - 0x6 *\n;\n;\n;\ny"
self.errormsg = ""
def run(self, callback):
Task.run(self, callback)
def processOutput(self, data):
print("[PartitionTask] output:", data)
if data.startswith("BLKRRPART:"):
self.error = self.ERROR_BLKRRPART
else:
self.error = self.ERROR_UNKNOWN
self.errormsg = data
class UnpackTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Unpacking USB flasher image...")
self.job = job
self.setTool("tar")
self.args += ["-xjvf", self.job.downloadfilename]
self.weighting = 80
self.end = 80
self.delayTimer = eTimer()
self.delayTimer.callback.append(self.progress_increment)
def run(self, callback):
Task.run(self, callback)
self.delayTimer.start(950, False)
def progress_increment(self):
self.progress += 1
def processOutput(self, data):
print("[UnpackTask] output: \'%s\'" % data)
self.job.imagefilename = data
def afterRun(self):
self.delayTimer.callback.remove(self.progress_increment)
class CopyTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Copying USB flasher boot image to stick...")
self.job = job
self.setTool("dd")
self.args += ["if=%s" % self.job.imagefilename, "of=%s1" % self.job.device]
self.weighting = 20
self.end = 20
self.delayTimer = eTimer()
self.delayTimer.callback.append(self.progress_increment)
def run(self, callback):
Task.run(self, callback)
self.delayTimer.start(100, False)
def progress_increment(self):
self.progress += 1
def processOutput(self, data):
print("[CopyTask] output:", data)
def afterRun(self):
self.delayTimer.callback.remove(self.progress_increment)
class NFOViewer(Screen):
skin = """
<screen name="NFOViewer" position="center,center" size="610,410" title="Changelog" >
<widget name="changelog" position="10,10" size="590,380" font="Regular;16" />
</screen>"""
def __init__(self, session, nfo):
Screen.__init__(self, session)
self["changelog"] = ScrollLabel(nfo)
self["ViewerActions"] = ActionMap(["SetupActions", "ColorActions", "DirectionActions"],
{
"green": self.exit,
"red": self.exit,
"ok": self.exit,
"cancel": self.exit,
"down": self.pageDown,
"up": self.pageUp
})
def pageUp(self):
self["changelog"].pageUp()
def pageDown(self):
self["changelog"].pageDown()
def exit(self):
self.close(False)
class feedDownloader:
def __init__(self, feed_base, box, OE_vers):
print("[feedDownloader::init] feed_base=%s, box=%s" % (feed_base, box))
self.feed_base = feed_base
self.OE_vers = OE_vers
self.box = box
def getList(self, callback, errback):
self.urlbase = "%s/%s/%s/images/" % (self.feed_base, self.OE_vers, self.box)
print("[getList]", self.urlbase)
self.callback = callback
self.errback = errback
client.getPage(self.urlbase).addCallback(self.feed_finished).addErrback(self.feed_failed)
def feed_failed(self, failure_instance):
print("[feed_failed]", str(failure_instance))
self.errback(failure_instance.getErrorMessage())
def feed_finished(self, feedhtml):
print("[feed_finished]")
fileresultmask = re.compile("<a class=[\'\"]nfi[\'\"] href=[\'\"](?P<url>.*?)[\'\"]>(?P<name>.*?.nfi)</a>", re.DOTALL)
searchresults = fileresultmask.finditer(feedhtml)
fileresultlist = []
if searchresults:
for x in searchresults:
url = x.group("url")
if url[0:7] != "http://":
url = self.urlbase + x.group("url")
name = x.group("name")
entry = (name, url)
fileresultlist.append(entry)
self.callback(fileresultlist, self.OE_vers)
class DeviceBrowser(Screen, HelpableScreen):
skin = """
<screen name="DeviceBrowser" position="center,center" size="520,430" title="Please select target medium" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="message" render="Label" position="5,50" size="510,150" font="Regular;16" />
<widget name="filelist" position="5,210" size="510,220" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, startdir, message="", showDirectories=True, showFiles=True, showMountpoints=True, matchingPattern="", useServiceRef=False, inhibitDirs=False, inhibitMounts=False, isTop=False, enableWrapAround=False, additionalExtensions=None):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText()
self["message"] = StaticText(message)
self.filelist = FileList(startdir, showDirectories=showDirectories, showFiles=showFiles, showMountpoints=showMountpoints, matchingPattern=matchingPattern, useServiceRef=useServiceRef, inhibitDirs=inhibitDirs, inhibitMounts=inhibitMounts, isTop=isTop, enableWrapAround=enableWrapAround, additionalExtensions=additionalExtensions)
self["filelist"] = self.filelist
self["FilelistActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.use,
"red": self.exit,
"ok": self.ok,
"cancel": self.exit
})
hotplugNotifier.append(self.hotplugCB)
self.onShown.append(self.updateButton)
self.onClose.append(self.removeHotplug)
def hotplugCB(self, dev, action):
print("[hotplugCB]", dev, action)
self.updateButton()
def updateButton(self):
if self["filelist"].getFilename() or self["filelist"].getCurrentDirectory():
self["key_green"].text = _("Use")
else:
self["key_green"].text = ""
def removeHotplug(self):
print("[removeHotplug]")
hotplugNotifier.remove(self.hotplugCB)
def ok(self):
if self.filelist.canDescent():
if self["filelist"].showMountpoints == True and self["filelist"].showDirectories == False:
self.use()
else:
self.filelist.descent()
def use(self):
print("[use]", self["filelist"].getCurrentDirectory(), self["filelist"].getFilename())
if self["filelist"].getCurrentDirectory() is not None:
if self.filelist.canDescent() and self["filelist"].getFilename() and len(self["filelist"].getFilename()) > len(self["filelist"].getCurrentDirectory()):
self.filelist.descent()
self.close(self["filelist"].getCurrentDirectory())
elif self["filelist"].getFilename():
self.close(self["filelist"].getFilename())
def exit(self):
self.close(False)
(ALLIMAGES, RELEASE, EXPERIMENTAL, STICK_WIZARD, START) = list(range(5))
class NFIDownload(Screen):
skin = """
<screen name="NFIDownload" position="center,center" size="610,410" title="NFIDownload" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#18188b" transparent="1" />
<ePixmap pixmap="border_menu_350.png" position="5,50" zPosition="1" size="350,300" transparent="1" alphatest="on" />
<widget source="menu" render="Listbox" position="15,60" size="330,290" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"templates":
{"default": (25, [
MultiContentEntryText(pos = (2, 2), size = (330, 24), flags = RT_HALIGN_LEFT, text = 1), # index 0 is the MenuText,
], True, "showOnDemand")
},
"fonts": [gFont("Regular", 22)],
"itemHeight": 25
}
</convert>
</widget>
<widget source="menu" render="Listbox" position="360,50" size="240,300" scrollbarMode="showNever" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"templates":
{"default": (300, [
MultiContentEntryText(pos = (2, 2), size = (240, 300), flags = RT_HALIGN_CENTER|RT_VALIGN_CENTER|RT_WRAP, text = 2), # index 2 is the Description,
], False, "showNever")
},
"fonts": [gFont("Regular", 22)],
"itemHeight": 300
}
</convert>
</widget>
<widget source="status" render="Label" position="5,360" zPosition="10" size="600,50" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, destdir=None):
Screen.__init__(self, session)
#self.skin_path = plugin_path
#self.menu = args
self.box = HardwareInfo().get_device_name()
self.feed_base = "http://www.dreamboxupdate.com/opendreambox" #/1.5/%s/images/" % self.box
self.usbmountpoint = resolveFilename(SCOPE_MEDIA) + "usb/"
self.menulist = []
self["menu"] = List(self.menulist)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText()
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["status"] = StaticText(_("Please wait... Loading list..."))
self["shortcuts"] = ActionMap(["OkCancelActions", "ColorActions", "ShortcutActions", "DirectionActions"],
{
"ok": self.keyOk,
"green": self.keyOk,
"red": self.keyRed,
"blue": self.keyBlue,
"up": self.keyUp,
"upRepeated": self.keyUp,
"downRepeated": self.keyDown,
"down": self.keyDown,
"cancel": self.close,
}, -1)
self.onShown.append(self.go)
self.feedlists = [[], [], []]
self.branch = START
self.container = eConsoleAppContainer()
self.container.dataAvail.append(self.tool_avail)
self.taskstring = ""
self.image_idx = 0
self.nfofilename = ""
self.nfo = ""
self.target_dir = None
def tool_avail(self, string):
string = six.ensure_str(string)
print("[tool_avail]" + string)
self.taskstring += string
def go(self):
self.onShown.remove(self.go)
self.umountCallback = self.getMD5
self.umount()
def getMD5(self):
url = "https://www.dreamboxupdate.com/download/opendreambox/dreambox-nfiflasher-%s-md5sums" % self.box
client.getPage(url).addCallback(self.md5sums_finished).addErrback(self.feed_failed)
def md5sums_finished(self, data):
print("[md5sums_finished]", data)
self.stickimage_md5 = data
self.checkUSBStick()
def keyRed(self):
if self.branch == START:
self.close()
else:
self.branch = START
self["menu"].setList(self.menulist)
#elif self.branch == ALLIMAGES or self.branch == STICK_WIZARD:
def keyBlue(self):
if self.nfo != "":
self.session.open(NFOViewer, self.nfo)
def keyOk(self):
print("[keyOk]", self["menu"].getCurrent())
current = self["menu"].getCurrent()
if current:
if self.branch == START:
currentEntry = current[0]
if currentEntry == RELEASE:
self.image_idx = 0
self.branch = RELEASE
self.askDestination()
elif currentEntry == EXPERIMENTAL:
self.image_idx = 0
self.branch = EXPERIMENTAL
self.askDestination()
elif currentEntry == ALLIMAGES:
self.branch = ALLIMAGES
self.listImages()
elif currentEntry == STICK_WIZARD:
self.askStartWizard()
elif self.branch == ALLIMAGES:
self.image_idx = self["menu"].getIndex()
self.askDestination()
self.updateButtons()
def keyUp(self):
self["menu"].selectPrevious()
self.updateButtons()
def keyDown(self):
self["menu"].selectNext()
self.updateButtons()
def updateButtons(self):
current = self["menu"].getCurrent()
if current:
if self.branch == START:
self["key_red"].text = _("Close")
currentEntry = current[0]
if currentEntry in (RELEASE, EXPERIMENTAL):
self.nfo_download(currentEntry, 0)
self["key_green"].text = _("Download")
else:
self.nfofilename = ""
self.nfo = ""
self["key_blue"].text = ""
self["key_green"].text = _("continue")
elif self.branch == ALLIMAGES:
self["key_red"].text = _("Back")
self["key_green"].text = _("Download")
self.nfo_download(ALLIMAGES, self["menu"].getIndex())
def listImages(self):
print("[listImages]")
imagelist = []
mask = re.compile("%s/(?P<OE_vers>1\.\d)/%s/images/(?P<branch>.*?)-%s_(?P<version>.*?).nfi" % (self.feed_base, self.box, self.box), re.DOTALL)
for name, url in self.feedlists[ALLIMAGES]:
result = mask.match(url)
if result:
if result.group("version").startswith("20"):
version = (result.group("version")[:4] + '-' + result.group("version")[4:6] + '-' + result.group("version")[6:8])
else:
version = result.group("version")
description = "\nOpendreambox %s\n%s image\n%s\n" % (result.group("OE_vers"), result.group("branch"), version)
imagelist.append((url, name, _("Download %s from server") % description, None))
self["menu"].setList(imagelist)
def getUSBPartitions(self):
allpartitions = [(r.description, r.mountpoint) for r in harddiskmanager.getMountedPartitions(onlyhotplug=True)]
print("[getUSBPartitions]", allpartitions)
usbpartition = []
for x in allpartitions:
print(x, x[1] == '/', x[0].find("USB"), access(x[1], R_OK))
if x[1] != '/' and x[0].find("USB") > -1: # and access(x[1], R_OK) is True:
usbpartition.append(x)
return usbpartition
def askDestination(self):
usbpartition = self.getUSBPartitions()
if len(usbpartition) == 1:
self.target_dir = usbpartition[0][1]
self.ackDestinationDevice(device_description=usbpartition[0][0])
else:
self.openDeviceBrowser()
def openDeviceBrowser(self):
self.session.openWithCallback(self.DeviceBrowserClosed, DeviceBrowser, None, showDirectories=True, showMountpoints=True, inhibitMounts=["/autofs/sr0/"])
def DeviceBrowserClosed(self, path):
print("[DeviceBrowserClosed]", str(path))
self.target_dir = path
if path:
self.ackDestinationDevice()
else:
self.keyRed()
def ackDestinationDevice(self, device_description=None):
if device_description is None:
dev = self.target_dir
else:
dev = device_description
message = _("Do you want to download the image to %s ?") % dev
choices = [(_("Yes"), self.ackedDestination), (_("List of storage devices"), self.openDeviceBrowser), (_("Cancel"), self.keyRed)]
self.session.openWithCallback(self.ackDestination_query, ChoiceBox, title=message, list=choices)
def ackDestination_query(self, choice):
print("[ackDestination_query]", choice)
if isinstance(choice, tuple):
choice[1]()
else:
self.keyRed()
def ackedDestination(self):
print("[ackedDestination]", self.branch, self.target_dir)
self.container.setCWD(resolveFilename(SCOPE_MEDIA) + "usb/")
if self.target_dir[:8] == "/autofs/":
self.target_dir = "/dev/" + self.target_dir[8:-1]
if self.branch == STICK_WIZARD:
job = StickWizardJob(self.target_dir)
job.afterEvent = "close"
job_manager.AddJob(job)
job_manager.failed_jobs = []
self.session.openWithCallback(self.StickWizardCB, JobView, job, afterEventChangeable=False)
elif self.branch != STICK_WIZARD:
url = self.feedlists[self.branch][self.image_idx][1]
filename = self.feedlists[self.branch][self.image_idx][0]
print("[getImage] start downloading %s to %s" % (url, filename))
if self.target_dir.startswith("/dev/"):
job = ImageDownloadJob(url, filename, self.target_dir, self.usbmountpoint)
else:
job = ImageDownloadJob(url, filename, None, self.target_dir)
job.afterEvent = "close"
job_manager.AddJob(job)
job_manager.failed_jobs = []
self.session.openWithCallback(self.ImageDownloadCB, JobView, job, afterEventChangeable=False)
def StickWizardCB(self, ret=None):
print("[StickWizardCB]", ret)
# print job_manager.active_jobs, job_manager.failed_jobs, job_manager.job_classes, job_manager.in_background, job_manager.active_job
if len(job_manager.failed_jobs) == 0:
self.session.open(MessageBox, _("The USB stick was prepared to be bootable.\nNow you can download an NFI image file!"), type=MessageBox.TYPE_INFO)
if len(self.feedlists[ALLIMAGES]) == 0:
self.getFeed()
else:
self.setMenu()
else:
self.umountCallback = self.checkUSBStick
self.umount()
def ImageDownloadCB(self, ret):
print("[ImageDownloadCB]", ret)
# print job_manager.active_jobs, job_manager.failed_jobs, job_manager.job_classes, job_manager.in_background, job_manager.active_job
if len(job_manager.failed_jobs) == 0:
self.session.openWithCallback(self.askBackupCB, MessageBox, _("The wizard can backup your current settings. Do you want to do a backup now?"), MessageBox.TYPE_YESNO)
else:
self.umountCallback = self.keyRed
self.umount()
def askBackupCB(self, ret):
if ret:
from Plugins.SystemPlugins.SoftwareManager.BackupRestore import BackupScreen
class USBBackupScreen(BackupScreen):
def __init__(self, session, usbmountpoint):
BackupScreen.__init__(self, session, runBackup=True)
self.backuppath = usbmountpoint
self.fullbackupfilename = self.backuppath + "/" + self.backupfile
self.session.openWithCallback(self.showHint, USBBackupScreen, self.usbmountpoint)
else:
self.showHint()
def showHint(self, ret=None):
self.session.open(MessageBox, _("To update your %s %s firmware, please follow these steps:\n1) Turn off your box with the rear power switch and make sure the bootable USB stick is plugged in.\n2) Turn mains back on and hold the DOWN button on the front panel pressed for 10 seconds.\n3) Wait for bootup and follow instructions of the wizard.") % (getMachineBrand(), getMachineName()), type=MessageBox.TYPE_INFO)
self.umountCallback = self.keyRed
self.umount()
def getFeed(self):
self.feedDownloader15 = feedDownloader(self.feed_base, self.box, OE_vers="1.5")
self.feedDownloader16 = feedDownloader(self.feed_base, self.box, OE_vers="1.6")
self.feedlists = [[], [], []]
self.feedDownloader15.getList(self.gotFeed, self.feed_failed)
self.feedDownloader16.getList(self.gotFeed, self.feed_failed)
def feed_failed(self, message=""):
self["status"].text = _("Could not connect to %s %s .NFI image feed server:") % (getMachineBrand(), getMachineName()) + "\n" + str(message) + "\n" + _("Please check your network settings!")
def gotFeed(self, feedlist, OE_vers):
print("[gotFeed]", OE_vers)
releaselist = []
experimentallist = []
for name, url in feedlist:
if name.find("release") > -1:
releaselist.append((name, url))
if name.find("experimental") > -1:
experimentallist.append((name, url))
self.feedlists[ALLIMAGES].append((name, url))
if OE_vers == "1.6":
self.feedlists[RELEASE] = releaselist + self.feedlists[RELEASE]
self.feedlists[EXPERIMENTAL] = experimentallist + self.feedlists[RELEASE]
elif OE_vers == "1.5":
self.feedlists[RELEASE] = self.feedlists[RELEASE] + releaselist
self.feedlists[EXPERIMENTAL] = self.feedlists[EXPERIMENTAL] + experimentallist
self.setMenu()
def checkUSBStick(self):
self.target_dir = None
allpartitions = [(r.description, r.mountpoint) for r in harddiskmanager.getMountedPartitions(onlyhotplug=True)]
print("[checkUSBStick] found partitions:", allpartitions)
usbpartition = []
for x in allpartitions:
print(x, x[1] == '/', x[0].find("USB"), access(x[1], R_OK))
if x[1] != '/' and x[0].find("USB") > -1: # and access(x[1], R_OK) is True:
usbpartition.append(x)
print(usbpartition)
if len(usbpartition) == 1:
self.target_dir = usbpartition[0][1]
self.md5_passback = self.getFeed
self.md5_failback = self.askStartWizard
self.md5verify(self.stickimage_md5, self.target_dir)
elif not usbpartition:
print("[NFIFlash] needs to create usb flasher stick first!")
self.askStartWizard()
else:
self.askStartWizard()
def askStartWizard(self):
self.branch = STICK_WIZARD
message = _("""This plugin creates a USB stick which can be used to update the firmware of your %s %s without the need for a network or WLAN connection.
First, a USB stick needs to be prepared so that it becomes bootable.
In the next step, an NFI image file can be downloaded from the update server and saved on the USB stick.
If you already have a prepared bootable USB stick, please insert it now. Otherwise plug in a USB stick with a minimum size of 64 MB!""") % (getMachineBrand(), getMachineName())
self.session.openWithCallback(self.wizardDeviceBrowserClosed, DeviceBrowser, None, message, showDirectories=True, showMountpoints=True, inhibitMounts=["/", "/autofs/sr0/", "/autofs/sda1/", "/media/hdd/", "/media/net/", self.usbmountpoint, "/media/dvd/"])
def wizardDeviceBrowserClosed(self, path):
print("[wizardDeviceBrowserClosed]", path)
self.target_dir = path
if path:
self.md5_passback = self.getFeed
self.md5_failback = self.wizardQuery
self.md5verify(self.stickimage_md5, self.target_dir)
else:
self.close()
def wizardQuery(self):
print("[wizardQuery]")
description = self.target_dir
for name, dev in self.getUSBPartitions():
if dev == self.target_dir:
description = name
message = _("You have chosen to create a new .NFI flasher bootable USB stick. This will repartition the USB stick and therefore all data on it will be erased.") + "\n"
message += _("The following device was found:\n\n%s\n\nDo you want to write the USB flasher to this stick?") % description
choices = [(_("Yes"), self.ackedDestination), (_("List of storage devices"), self.askStartWizard), (_("Cancel"), self.close)]
self.session.openWithCallback(self.ackDestination_query, ChoiceBox, title=message, list=choices)
def setMenu(self):
self.menulist = []
try:
latest_release = "Release %s (Opendreambox 1.5)" % self.feedlists[RELEASE][0][0][-9:-4]
self.menulist.append((RELEASE, _("Get latest release image"), _("Download %s from server") % latest_release, None))
except IndexError:
pass
try:
dat = self.feedlists[EXPERIMENTAL][0][0][-12:-4]
latest_experimental = "Experimental %s-%s-%s (Opendreambox 1.6)" % (dat[:4], dat[4:6], dat[6:])
self.menulist.append((EXPERIMENTAL, _("Get latest experimental image"), _("Download %s from server") % latest_experimental, None))
except IndexError:
pass
self.menulist.append((ALLIMAGES, _("Select an image to be downloaded"), _("Select desired image from feed list"), None))
self.menulist.append((STICK_WIZARD, _("USB stick wizard"), _("Prepare another USB stick for image flashing"), None))
self["menu"].setList(self.menulist)
self["status"].text = _("Currently installed image") + ": %s" % (getImageVersion())
self.branch = START
self.updateButtons()
def nfo_download(self, branch, idx):
nfourl = (self.feedlists[branch][idx][1])[:-4] + ".nfo"
self.nfofilename = (self.feedlists[branch][idx][0])[:-4] + ".nfo"
print("[check_for_NFO]", nfourl)
client.getPage(nfourl).addCallback(self.nfo_finished).addErrback(self.nfo_failed)
def nfo_failed(self, failure_instance):
print("[nfo_failed] " + str(failure_instance))
self["key_blue"].text = ""
self.nfofilename = ""
self.nfo = ""
def nfo_finished(self, nfodata=""):
print("[nfo_finished] " + str(nfodata))
self["key_blue"].text = _("Changelog")
self.nfo = nfodata
def md5verify(self, md5, path):
cmd = "md5sum -c -s"
print("[verify_md5]", md5, path, cmd)
self.container.setCWD(path)
self.container.appClosed.append(self.md5finished)
self.container.execute(cmd)
self.container.write(md5)
self.container.dataSent.append(self.md5ready)
def md5ready(self, retval):
self.container.sendEOF()
def md5finished(self, retval):
print("[md5finished]", str(retval))
self.container.appClosed.remove(self.md5finished)
self.container.dataSent.remove(self.md5ready)
if retval == 0:
print("check passed! calling", repr(self.md5_passback))
self.md5_passback()
else:
print("check failed! calling", repr(self.md5_failback))
self.md5_failback()
def umount(self):
cmd = "umount " + self.usbmountpoint
print("[umount]", cmd)
self.container.setCWD('/')
self.container.appClosed.append(self.umountFinished)
self.container.execute(cmd)
def umountFinished(self, retval):
print("[umountFinished]", str(retval))
self.container.appClosed.remove(self.umountFinished)
self.umountCallback()
def main(session, **kwargs):
session.open(NFIDownload, resolveFilename(SCOPE_HDD))
def filescan_open(list, session, **kwargs):
dev = "/dev/" + list[0].path.rsplit('/', 1)[0][7:]
print("mounting device " + dev + " to /media/usb...")
usbmountpoint = resolveFilename(SCOPE_MEDIA) + "usb/"
system("mount %s %s -o rw,sync" % (dev, usbmountpoint))
session.open(NFIDownload, usbmountpoint)
def filescan(**kwargs):
from Components.Scanner import Scanner, ScanPath
return \
Scanner(mimetypes=["application/x-dream-image"],
paths_to_scan=[
ScanPath(path="", with_subdirs=False),
],
name="NFI",
description =(_("Download .NFI-files for USB-flasher") + "..."),
openfnc=filescan_open, )
|
openatv/enigma2
|
lib/python/Plugins/SystemPlugins/NFIFlash/downloader.py
|
Python
|
gpl-2.0
| 30,623
|
from django.contrib import admin
from mailer.models import Message, DontSendEntry, MessageLog
class MessageAdmin(admin.ModelAdmin):
list_display = ["id", "to_addresses", "subject", "when_added", "priority"]
class DontSendEntryAdmin(admin.ModelAdmin):
list_display = ["to_address", "when_added"]
class MessageLogAdmin(admin.ModelAdmin):
list_display = ["id", "to_addresses", "subject", "when_attempted", "result"]
admin.site.register(Message, MessageAdmin)
admin.site.register(DontSendEntry, DontSendEntryAdmin)
admin.site.register(MessageLog, MessageLogAdmin)
|
maweis1981/hey001
|
mayversion/mayversion/mailer/admin.py
|
Python
|
lgpl-3.0
| 580
|
import re
from parsimonious.grammar import Grammar
import parsimonious.exceptions
class ParseError(Exception):
pass
def is_bdl_scenario(contents):
return contents.startswith("#!benchDL\n")
def convert(contents, env):
ast = transform(lex(contents))
return substitute(ast, env)
def get_includes(ir):
if isinstance(ir, list):
return reduce(lambda a, x: a + get_includes(x), ir, [])
if isinstance(ir, dict):
if ir["function"] == "include_resource" and isinstance(ir["args"][1], str):
return [ir["args"][0:2]]
return []
def get_num_of_workers(ir):
if isinstance(ir, list):
return reduce(lambda a, x: a + get_num_of_workers(x), ir, 0)
if isinstance(ir, dict):
if ir["function"] == "pool" and isinstance(ir["args"]["size"], int):
return ir["args"]["size"]
return 0
def add_indents(text):
brackets = []
indents = [0]
result = []
comma = None
for line in text.splitlines():
old_len = len(brackets)
tline = line
while len(tline) > 0:
if tline.startswith("\\\\"):
tline = tline[2:]
continue
if tline.startswith("\\\""):
tline = tline[2:]
continue
char = tline[0]
tline = tline[1:]
if comma != None and char == comma:
comma = None
elif char in ['"', "'"] and comma is None:
comma = char
if comma == None:
if (char == "]" and brackets[-1] == "[") or (char == ")" and brackets[-1] == "("):
brackets.pop()
if char == "(" or char == "[":
brackets.append(char)
if char == "#":
break
if old_len == 0:
i = get_indent(line)
if (i == -1) or (i == indents[-1]):
result.append(line)
continue
if i > indents[-1]:
result.append("_INDENT_" + line)
indents.append(i)
continue
while i < indents[-1]:
indents.pop()
line = "_DEDENT_ " + line
if i != indents[-1]:
raise ParseError("Incorrect indentation")
result.append(line)
line = ""
while 0 < indents[-1]:
indents.pop()
line = "_DEDENT_ " + line
if line != "":
result.append(line)
return "\n".join(result)
def get_indent(line):
spaces = len(re.match(r"\s*", line).group())
return -1 if (len(line) <= spaces) or (line[spaces] == "#") else spaces
def substitute(ir, env):
if isinstance(ir, list):
return map(lambda a: substitute(a, env), ir)
if isinstance(ir, dict):
ir = {k: substitute(v, env) for k, v in ir.items()}
if "function" in ir:
if ir["function"] in ["var", "numvar"] and ir["args"][0] in env:
return env[ir["args"][0]]
elif ir["function"] in ["var", "numvar"] and len(ir["args"]) > 1:
return ir["args"][1]
return ir
def transform(ast):
if ast.expr_name == "number":
if ast.text[-1] in "KMGT":
num = ast.text[0:-1]
if ast.text[-1] == "K":
mult = 1000
elif ast.text[-1] == "M":
mult = 1000000
elif ast.text[-1] == "G":
mult = 1000000000
else:
mult = 1000000000000
else:
num = ast.text
mult = 1
return [float(num)*mult] if "." in num else [int(num)*mult]
elif ast.expr_name == "boolean":
return [ast.text=="true"]
elif ast.expr_name == "string":
return [ast.text[1:-1].replace(r'\"', '"').replace(r'\\\\', '\\\\')] # "something"
elif ast.expr_name == "atom":
return [ast.text]
else:
lis = reduce(lambda a, x: a + transform(x), ast.children, [])
if ast.expr_name == "list":
return [lis]
if ast.expr_name == "multiline":
return [{"function": lis[0], "args":lis[1], "children": lis[2:]}]
if ast.expr_name == "single":
return [{"function": lis[0], "args":lis[1:]}]
elif ast.expr_name == "map":
return [dict(lis)]
elif ast.expr_name in ["tuple", "kv"]:
if len(lis) == 2:
return [tuple(lis)]
else:
return [(lis[0], lis[1:])]
return lis
def lex(text):
grammar = Grammar("""\
entry = _ (statement _)* _
statement = multiline / single
multiline = atom _ args _ ":" _ "_INDENT_" _ (statement _)+ "_DEDENT_"
single = atom _ args
atom = ~"[a-z][0-9a-zA-Z_]*" / ("'" ~"[^']*" "'")
_ = ~"\s*" (~"#[^\\r\\n]*\s*")*
args = ( _ map ) / ( _ "(" _ term (_ "," _ term)* _ ")" ) / (_ "(" _ ")")
map = "(" _ kv (_ "," _ kv)* _ ")"
list = ( _ "[" _ term (_ "," _ term)* _ "]" ) / ( _ "[" _ "]")
kv = term _ "=" _ term _
term = unumber / logic_exp / single / list / string / atom / number
logic_exp = logic_priority / logic_unary / logic_plain
logic_priority = "(" _ logic_exp _ ")" _ (logic_binary _ logic_exp _)*
logic_unary = "not" _ logic_exp _
logic_binary = "and" / "or"
logic_plain = logic_op _ (logic_binary _ logic_exp _)*
logic_op = (single / string / number) _ ("<=" / ">=" / "<" / ">" / "==" / "!=" / "<>" / "/=") _ (single / string / number)
string = '"' ~r'(\\\\.|[^\\\\"])*' '"'
number = ~"[0-9]+(\.[0-9]+)?(e\-?[0-9]+)?[GKMT]?"
unumber = (number / single) _ atom
""")
try:
return grammar.parse(add_indents(text))
except parsimonious.exceptions.ParseError as e:
raise ParseError(e)
|
timofey-barmin/mzbench
|
lib/bdl_utils.py
|
Python
|
bsd-3-clause
| 5,783
|
import numpy as np
import pytest
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace["np"] = np
|
jpn--/larch
|
larch/conftest.py
|
Python
|
gpl-3.0
| 129
|
"""
Server Technology Power Strips
"""
from basicpowerstrip import BasicPowerStrip
from clusto.drivers.devices.common import IPMixin, SNMPMixin
from clusto.drivers.resourcemanagers import IPManager
from clusto.exceptions import DriverException
import re
class PowerTowerXM(BasicPowerStrip, IPMixin, SNMPMixin):
"""
Provides support for Power Tower XL/XM
Power Port designations start with 1 at the upper left (.aa1) down to 32
at the bottom right (.bb8).
"""
_driver_name = "powertowerxm"
_properties = {'withslave':0}
_portmeta = { 'pwr-nema-L5': { 'numports':2 },
'pwr-nema-5' : { 'numports':16, },
'nic-eth' : { 'numports':1, },
'console-serial' : { 'numports':1, },
}
_portmap = {'aa1':1,'aa2':2,'aa3':3,'aa4':4,'aa5':5,'aa6':6,'aa7':7,'aa8':8,
'ab1':9,'ab2':10,'ab3':11,'ab4':12,'ab5':13,'ab6':14,'ab7':15,
'ab8':16,'ba1':17,'ba2':18,'ba3':19,'ba4':20,'ba5':21,'ba6':22,
'ba7':23,'ba8':24,'bb1':25,'bb2':26,'bb3':27,'bb4':28,'bb5':29,
'bb6':30,'bb7':31,'bb8':32}
_outlet_states = ['idleOff', 'idleOn', 'wakeOff', 'wakeOn', 'off', 'on', 'lockedOff', 'reboot', 'shutdown', 'pendOn', 'pendOff', 'minimumOff', 'minimumOn', 'eventOff', 'eventOn', 'eventReboot', 'eventShutdown']
def _ensure_portnum(self, porttype, portnum):
"""map powertower port names to clusto port numbers"""
if not self._portmeta.has_key(porttype):
msg = "No port %s:%s exists on %s." % (porttype, str(num), self.name)
raise ConnectionException(msg)
if isinstance(portnum, int):
num = portnum
else:
if portnum.startswith('.'):
portnum = portnum[1:]
if self._portmap.has_key(portnum):
num = self._portmap[portnum]
else:
msg = "No port %s:%s exists on %s." % (porttype, str(num),
self.name)
raise ConnectionException(msg)
numports = self._portmeta[porttype]
if self.withslave:
if porttype in ['mains', 'pwr']:
numports *= 2
if num < 0 or num >= numports:
msg = "No port %s:%s exists on %s." % (porttype, str(num),
self.name)
raise ConnectionException(msg)
return num
def _get_port_oid(self, outlet):
for oid, value in self._snmp_walk('1.3.6.1.4.1.1718.3.2.3.1.2'):
if value.lower() == outlet:
return oid
def get_outlet_state(self, outlet):
oid = self._get_port_oid(outlet)
oid = oid.replace('1.3.6.1.4.1.1718.3.2.3.1.2', '1.3.6.1.4.1.1718.3.2.3.1.10')
state = self._snmp_get(oid)
return self._outlet_states[int(state)]
def set_outlet_state(self, outlet, state, session=None):
oid = self._get_port_oid(outlet)
oid = oid.replace('1.3.6.1.4.1.1718.3.2.3.1.2', '1.3.6.1.4.1.1718.3.2.3.1.11')
r = self._snmp_set(oid, state)
if r.PDU.varbindlist[0].value.val != state:
raise DriverException('Unable to set SNMP state')
def set_power_off(self, porttype, portnum):
if porttype != 'pwr-nema-5':
raise DriverException('Cannot turn off ports of type: %s' % str(porttype))
portnum = portnum.lstrip('.').lower()
state = self.set_outlet_state(portnum, 2)
def set_power_on(self, porttype, portnum):
if porttype != 'pwr-nema-5':
raise DriverException('Cannot turn off ports of type: %s' % str(porttype))
portnum = portnum.lstrip('.').lower()
state = self.set_outlet_state(portnum, 1)
def reboot(self, porttype, portnum):
if porttype != 'pwr-nema-5':
raise DriverException('Cannot reboot ports of type: %s' % str(porttype))
portnum = portnum.lstrip('.').lower()
state = self.get_outlet_state(portnum)
nextstate = None
if state == 'off':
nextstate = 1
if state in ('idleOn', 'on', 'wakeOn'):
nextstate = 3
if not nextstate:
raise DriverException('Outlet in unexpected state: %s' % state)
self.set_outlet_state(portnum, nextstate)
|
rongoro/clusto
|
src/clusto/drivers/devices/powerstrips/servertech.py
|
Python
|
bsd-3-clause
| 4,454
|
# Copyright 2017 Ben Walsh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
from . import _cfastcsv
def _check_args(sep, headers, nthreads):
if not isinstance(sep, str):
raise ValueError('Invalid separator %r' % (sep,))
if nthreads is None:
nthreads = multiprocessing.cpu_count()
elif nthreads <= 0:
raise ValueError('Invalid nthreads %s' % nthreads)
nheaders = 1 if headers else 0
return nthreads, nheaders
def load(filename, sep=',', headers=True, nthreads=None, flags=0, col_to_type=None,
missing_int_val=0, missing_float_val=0.0):
if not isinstance(filename, str):
raise ValueError('Invalid filename %r' % (filename,))
nthreads, nheaders = _check_args(sep, headers, nthreads)
return _cfastcsv.parse_file(filename, sep, nthreads, flags,
nheaders, missing_int_val, missing_float_val,
col_to_type)
def loads(s, sep=',', headers=True, nthreads=None, flags=0, col_to_type=None,
missing_int_val=0, missing_float_val=0.0):
nthreads, nheaders = _check_args(sep, headers, nthreads)
return _cfastcsv.parse_csv(s, sep, nthreads, flags,
nheaders, missing_int_val, missing_float_val,
col_to_type)
|
walshb/camog
|
camog/_csv.py
|
Python
|
apache-2.0
| 1,839
|
"""
Redis Backends
------------------
Provides backends for talking to `Redis <http://redis.io>`_.
"""
from __future__ import absolute_import
from dogpile.cache.api import CacheBackend, NO_VALUE
from dogpile.cache.compat import pickle, u
redis = None
__all__ = 'RedisBackend',
class RedisBackend(CacheBackend):
"""A `Redis <http://redis.io/>`_ backend, using the
`redis-py <http://pypi.python.org/pypi/redis/>`_ backend.
Example configuration::
from dogpile.cache import make_region
region = make_region().configure(
'dogpile.cache.redis',
arguments = {
'host': 'localhost',
'port': 6379,
'db': 0,
'redis_expiration_time': 60*60*2, # 2 hours
'distributed_lock':True
}
)
Arguments accepted in the arguments dictionary:
:param url: string. If provided, will override separate host/port/db
params. The format is that accepted by ``StrictRedis.from_url()``.
.. versionadded:: 0.4.1
:param host: string, default is ``localhost``.
:param password: string, default is no password.
.. versionadded:: 0.4.1
:param port: integer, default is ``6379``.
:param db: integer, default is ``0``.
:param redis_expiration_time: integer, number of seconds after setting
a value that Redis should expire it. This should be larger than dogpile's
cache expiration. By default no expiration is set.
:param distributed_lock: boolean, when True, will use a
redis-lock as the dogpile lock.
Use this when multiple
processes will be talking to the same redis instance.
When left at False, dogpile will coordinate on a regular
threading mutex.
:param lock_timeout: integer, number of seconds after acquiring a lock that
Redis should expire it. This argument is only valid when
``distributed_lock`` is ``True``.
.. versionadded:: 0.5.0
:param socket_timeout: float, seconds for socket timeout.
Default is None (no timeout).
.. versionadded:: 0.5.4
:param lock_sleep: integer, number of seconds to sleep when failed to
acquire a lock. This argument is only valid when
``distributed_lock`` is ``True``.
.. versionadded:: 0.5.0
:param connection_pool: ``redis.ConnectionPool`` object. If provided,
this object supersedes other connection arguments passed to the
``redis.StrictRedis`` instance, including url and/or host as well as
socket_timeout, and will be passed to ``redis.StrictRedis`` as the
source of connectivity.
.. versionadded:: 0.5.4
"""
def __init__(self, arguments):
self._imports()
self.url = arguments.pop('url', None)
self.host = arguments.pop('host', 'localhost')
self.password = arguments.pop('password', None)
self.port = arguments.pop('port', 6379)
self.db = arguments.pop('db', 0)
self.distributed_lock = arguments.get('distributed_lock', False)
self.socket_timeout = arguments.pop('socket_timeout', None)
self.lock_timeout = arguments.get('lock_timeout', None)
self.lock_sleep = arguments.get('lock_sleep', 0.1)
self.redis_expiration_time = arguments.pop('redis_expiration_time', 0)
self.connection_pool = arguments.get('connection_pool', None)
self.client = self._create_client()
def _imports(self):
# defer imports until backend is used
global redis
import redis
def _create_client(self):
if self.connection_pool is not None:
# the connection pool already has all other connection
# options present within, so here we disregard socket_timeout
# and others.
return redis.StrictRedis(connection_pool=self.connection_pool)
args = {}
if self.socket_timeout:
args['socket_timeout'] = self.socket_timeout
if self.url is not None:
args.update(url=self.url)
return redis.StrictRedis.from_url(**args)
else:
args.update(
host=self.host, password=self.password,
port=self.port, db=self.db
)
return redis.StrictRedis(**args)
def get_mutex(self, key):
if self.distributed_lock:
return self.client.lock(u('_lock{0}').format(key),
self.lock_timeout, self.lock_sleep)
else:
return None
def get(self, key):
value = self.client.get(key)
if value is None:
return NO_VALUE
return pickle.loads(value)
def get_multi(self, keys):
values = self.client.mget(keys)
return [pickle.loads(v) if v is not None else NO_VALUE
for v in values]
def set(self, key, value):
if self.redis_expiration_time:
self.client.setex(key, self.redis_expiration_time,
pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
else:
self.client.set(key, pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
def set_multi(self, mapping):
mapping = dict(
(k, pickle.dumps(v, pickle.HIGHEST_PROTOCOL))
for k, v in mapping.items()
)
if not self.redis_expiration_time:
self.client.mset(mapping)
else:
pipe = self.client.pipeline()
for key, value in mapping.items():
pipe.setex(key, self.redis_expiration_time, value)
pipe.execute()
def delete(self, key):
self.client.delete(key)
def delete_multi(self, keys):
self.client.delete(*keys)
|
dantebarba/docker-media-server
|
plex/Subliminal.bundle/Contents/Libraries/Shared/dogpile/cache/backends/redis.py
|
Python
|
gpl-3.0
| 5,743
|
import os
import platform
import subprocess
import datetime as dt
import time
import calendar
import sys
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# General Parameters - Tools - Proxy Network - Output Directory
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Path declaration to the motu-client.py opensource-TOOLS to connect to MOTU CopernicusMarineHub.
# Input the 'motu-client.py' absolute path. By default, usually in "Downloads" dir. after having followed the article on "python basic requirements":
# http://marine.copernicus.eu/faq/what-are-the-motu-and-python-requirements/?idpage=169
motu_cl = 'C:/Users\Sam\Downloads/motu-client-python-1.5.00-20180223190259664-bin.tar/motu-client-python/motu-client.py'
# File to log unsuccessful data extraction request(s)
logfile = 'logfile.txt'
# Copernicus Marine API Key - Login Credentials
username_cmems = 'XXX'
password_cmems = 'XXX'
# Proxy Configuration
# Please replace "False" by "True" if you use a proxy to connect to internet and fill in the below variables.
proxy_flag = False
proxy_server_url = "http://your_proxy_address"
proxy_server_port = "8080"
proxy_user_login = "your_proxy_login"
proxy_user_password = "your_proxy_password"
# Output directory name to store the Copernicus Marine data - (do not use whitespace character)
# If only 'copernicus' is given (not in absolute path), then it will be converted automatically into '$HOME/copernicus/'
local_storage_directory_name = 'glorys_data'
# - - - - - - - - - - - - - - - - - - - - - - - - -
# Product(s), Dataset(s) and MOTU server Parameters
# - - - - - - - - - - - - - - - - - - - - - - - - -
# CMEMS MOTU server ID & Service ID
# /!\ To find the information about the motu server name, you can simply rely on the "VIEW SCRIPT" button of the Copernicus Marine Online Catalogue (http://marine.copernicus.eu), using its DataExtraction WebInterface (also called GUI). It will generate the parameters based on your selection/extraction.
# Please refer to this article to understand how to call/trigger this webservice/feature to generate the right parameters : http://marine.copernicus.eu/faq/how-to-write-and-run-the-script-to-download-cmems-products-through-subset-or-direct-download-mechanisms/?idpage=169
# -m MOTU, --motu=MOTU the motu server to use (url)
# -s SERVICE_ID, --service-id=SERVICE_ID
# The service identifier (string)
motu_serv_id = "http://nrtcmems.mercator-ocean.fr/motu-web/Motu"
service_prod_id = "GLOBAL_ANALYSIS_FORECAST_PHY_001_025-TDS"
# CMEMS Dataset ID and Variables
# Define a dict to get {file name(Type_): [variable(-v), dataset(-d)]}
# (more details on how to get these parameters here http://bit.ly/2cUe9dT) - dead link
# /!\ Same comment as above. Please check this article for other examples : http://marine.copernicus.eu/faq/can-you-give-a-few-examples-of-command-lines-to-download/?idpage=169
# I would also highly recommend you to check this one to get an in-depth understanding of how it works
# (/!\ all CMEMS products are NOT hosted by a single server - they are grouped by product family, and you can always rely on the "VIEW SCRIPT" button to get the right parameters)
# -v VARIABLE, --variable=VARIABLE
# The variable (list of strings)
# -d PRODUCT_ID, --product-id=PRODUCT_ID
# The product (data set) to download (string)
dict_id = {"Northward-Velocity_dailymean": \
["-v vo", "-d global-analysis-forecast-phy-001-025"],\
"Temperature_hourly": \
["-v sea_water_potential_temperature", "-d global-analysis-forecast-phy-001-025-hourly-t-u-v-ssh"]
}
# And I can already image your next question : What if I'd like to download several variables from different datasets?
# Well, Let's take an example then !
# Let's say that you want to download from the daily dataset global-analysis-forecast-phy-001-024, the salinity, the Sea Surface Height, and the Temperature.
# And you also want to download the same variables (except salinity which is not available) for the hourly dataset global-analysis-forecast-phy-001-024-hourly-t-u-v-ssh.
# Then it will give us the following dictionary :
# (to use it, just uncomment the following lines - deleting the "#" at the beginning)
# dict_id = {"Salinity_daily_": \
# ["-v so", "-d global-analysis-forecast-phy-001-024"], \
# "SeaSurfaceHeight_daily_": \
# ["-v zos", "-d global-analysis-forecast-phy-001-024"], \
# "Temperature_daily_": \
# ["-v thetao", "-d global-analysis-forecast-phy-001-024"], \
# "SeaSurfaceHeight_hourly_": \
# ["-v zos", "-d global-analysis-forecast-phy-001-024-hourly-t-u-v-ssh"], \
# "Temperature_hourly_": \
# ["-v thetao", "-d global-analysis-forecast-phy-001-024-hourly-t-u-v-ssh"], \
# "Eastward-Velocity_dailymean_": \
# ["-v uo", "-d global-analysis-forecast-phy-001-024"]
# }
# - - - - - - - - - - - - - - - - - - - - - -
# Geographical Area Parameters and Timerange
# - - - - - - - - - - - - - - - - - - - - - -
# -y LATITUDE_MIN, --latitude-min=LATITUDE_MIN
# The min latitude (float in the interval [-90 ; 90])
# -Y LATITUDE_MAX, --latitude-max=LATITUDE_MAX
# The max latitude (float in the interval [-90 ; 90])
# -x LONGITUDE_MIN, --longitude-min=LONGITUDE_MIN
# The min longitude (float in the interval [-180 ; 180])
# -X LONGITUDE_MAX, --longitude-max=LONGITUDE_MAX
# The max longitude (float in the interval [-180 ; 180])
# -z DEPTH_MIN, --depth-min=DEPTH_MIN
# The min depth (float in the interval [0 ; 2e31] or
# string 'Surface')
# -Z DEPTH_MAX, --depth-max=DEPTH_MAX
# The max depth (float in the interval [0 ; 2e31] or
# string 'Surface')
# Area : x east-west longitude, y north-south latitude, z depth
xmin_longitude = "-45"
xmax_longitude = "-20"
ymin_latitude = "57"
ymax_latitude = "61"
zmin_depth = "0.494"
zmax_depth = "0.4942"
# Date - Timerange
yyyystart = 2007
mmstart = 01
yyyyend = 2007
mmend = 12
hhstart = " 12:00:00"
hhend = " 12:00:00"
dd = 1
# Output files
out_path= "C:\Users\Sam\Downloads\glorys_data"
pre_name= "TestPythonExtr_"
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Main Program
#
# Motu Client Call through Python Loop
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Specific comment For WINDOWS USER:
# If you're using this script for the first time, you
# shouldn't be worried by the following. Just save your
# script (ctrl + s), quit (alt + F4) and launch it
# (WinKey + R then input cmd then ENTER) by typing
# "C:\Python27\python script_name.py"
#
# For users, be careful if you have to modify the lines below.
# CMEMS Central Service Desk will be happy to help you
# either via email (servicedesk.cmems@mercator-ocean.eu)
# or via the CMEMS Forum (http://bit.ly/1L1Iy5f)
# Get PYTHON PATH depending on OS
if platform.system() == "Windows":
PYTHON = "C:/Python27/ArcGIS10.2/python.exe"
else:
PYTHON = "/usr/bin/python"
# Check motu-client.py file exists
if not os.path.exists(motu_cl):
print "\n[ERROR] Path to motu-client.py cannot be found: %s\n\n[INFO] Please correct value of 'motu_cl' variable."%motu_cl
print "\n\n[INFO] If you haven't downloaded the motu-client-python yet, get the latest version here:\nhttps://github.com/clstoulouse/motu-client-python/releases/latest\n"
sys.exit()
# Check if output directory is well formated and if it exists, otherwise create it
absolute_path_substring = ['/home/', 'C:\\']
if local_storage_directory_name[-1] != '/':
local_storage_directory_name = local_storage_directory_name + "/"
if not any(x in local_storage_directory_name for x in absolute_path_substring):
local_storage_directory_name = os.path.expanduser('~') + "/" + local_storage_directory_name
if not os.path.exists(local_storage_directory_name):
os.makedirs(local_storage_directory_name)
# Flags to let the server clears the buffer - better to be respectful when retrieving OPEN data
buffer_flag = False
cmd_flag = False
# Error Handle on dates (to illustrate an if statement >)
if yyyystart>yyyyend:
print "[ERROR] in [Date Parameters]"
print """Please double check your date parameters, specifically the "yyyystart" which is currently greater than "yyyyend."""
print """End of data extraction service."""
sys.exit()
# Other variable definitions to be compatible with deprecated script versions still available on the Internet
pre_name = "CMEMS_" + (serv_id.split()[1]).split("-")[0] + "_"
log_cmems = "-u " + username_cmems
pwd_cmems = "-p " + password_cmems
motu_id = "-m " + motu_serv_id
serv_id = "-s " + service_prod_id
pre_fic_cmd = "-f "+ pre_name
out_cmd = "-o " + local_storage_directory_name
proxy_user = "--proxy-user " + proxy_user_login
proxy_pwd = "--proxy-pwd " + proxy_user_password
proxy_server = "--proxy-server " + proxy_server_url + ":" + proxy_server_port
xmin = "-x " + xmin_longitude
xmax = "-X " + xmax_longitude
ymin = "-y " + ymin_latitude
ymax = "-Y " + ymax_latitude
zmin = "-z " + zmin_depth
zmax = "-Z " + zmax_depth
# To illustrate a simple Error Handle to delete a file when desired
#try:
# os.remove(out_cmd.split()[1] + logfile)
#except OSError:
# print ""
print"\n+----------------------------+\n| ! - CONNEXION TO CMEMS HUB |\n+----------------------------+\n\n"
# To illustrate a For_Loop in order to generate download requests for several datasets held in a product
#for key, value in dict_id.iteritems():
# if buffer_flag:
# print "Little pause to let the server clearing the buffer, it will automatically resume once it's completed.\nNot mandatory but server-friendly <span class="Emoticon Emoticon1"><span>:-)</span></span>\n"
# time.sleep(2)
# buffer_flag = False
# Date declaration
date_start = dt.datetime(yyyystart,mmstart,dd,0,0)
date_end = dt.datetime(yyyyend,mmend,dd,0,0)
# To illustrate a While_Loop in order to extract dailymean data, packed by month (Jan., Fev., Mar. etc...),
# for as many download requests as number of months available in the timerange.
while (date_start<=date_end):
date_end_cmd = (dt.datetime(date_start.year, date_start.month,\
calendar.monthrange(date_start.year, date_start.month)[1]))
date_cmd = ' -t \"' + date_start.strftime("%Y-%m-%d") + hhstart + '\"'\
+' -T \"' + date_end_cmd.strftime("%Y-%m-%d") + hhend + '\"'
fic_cmd = pre_fic_cmd + key + "_" + date_end_cmd.strftime("%Y-%m") + ".nc"
ficout = pre_name + key + "_" + date_end_cmd.strftime("%Y-%m") + ".nc"
print "----------------------------------\n- ! - Processing dataset request : %s"%ficout
print "----------------------------------\n"
if not os.path.exists(out_cmd.split()[1] + ficout):
if proxy_flag:
if not zmin_depth:
cmd = ' '.join([PYTHON, motu_cl, log_cmems, pwd_cmems,\
motu_id, serv_id, value[1],\
xmin, xmax, ymin, ymax,\
date_cmd, value[0], out_cmd, fic_cmd,\
proxy_server, proxy_user, proxy_pwd, "-q"])
else:
cmd = ' '.join([PYTHON, motu_cl, log_cmems, pwd_cmems,\
motu_id, serv_id, value[1],\
xmin, xmax, ymin, ymax, zmin, zmax,\
date_cmd, value[0], out_cmd, fic_cmd,\
proxy_server, proxy_user, proxy_pwd, "-q"])
else:
if not zmin_depth:
cmd = ' '.join([PYTHON, motu_cl, log_cmems, pwd_cmems,\
motu_id, serv_id, value[1],\
xmin, xmax, ymin, ymax,\
date_cmd, value[0], out_cmd, fic_cmd, "-q"])
else:
cmd = ' '.join([PYTHON, motu_cl, log_cmems, pwd_cmems,\
motu_id, serv_id, value[1],\
xmin, xmax, ymin, ymax, zmin, zmax,\
date_cmd, value[0], out_cmd, fic_cmd, "-q"])
print "## MOTU API COMMAND ##"
print cmd
print "\n[INFO] CMEMS server is checking both your credentials and command syntax. If successful, it will extract the data and create your dataset on the fly. Please wait. \n"
subpro=subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
message,erreur = subpro.communicate()
stat = subpro.returncode
if stat != 0:
print "-- ERROR Incorrect Credentials :\n %s"%message
with open(out_cmd.split()[1] + logfile,'a') as mylog:
mylog.write("Error : %s NOK\nDue to : %s"%(ficout,message))
if 'HTTP Error 400' in message:
print '''[INFO] Copernicus Marine USERNAME ('username_cmems') and/or PASSWORD ('password_cmems') are incorrect.\n\n[INFO] To execute the MOTU API COMMAND from your shell/terminal, please note the following rules:\n
On *nix OS, you must use the single quote, otherwise it may expand specific characters.
[...] -u 'string' or --user='string' [...]\n
On Windows OS, you must use the double quote, because single quotes are treated literally.
[...] -p "string" or --pwd="string" [...]\n'''
sys.exit()
if 'HTTP Error 407' in message:
print '''[INFO] Proxy Authentication Required to connect to the Central Authentication System https://cmems-cas.cls.fr/cas/login\n\n[INFO] Check the value of proxy_flag (it should be True).\n\n[INFO] Double check your proxy settings:\n --proxy-server=PROXY_SERVER\n the proxy server (url)\n --proxy-user=PROXY_USER\n the proxy user (string)\n --proxy-pwd=PROXY_PWD\n the proxy password (string)\n\n[INFO] If your proxy credentials are correct but your proxy password (string) contains a '@' then replace it by '%%40' '''
print '''[INFO] This issue is raised due either a misconfiguration in proxy settings or a network issue. If it persists, please contact your network administrator.'''
sys.exit()
print """[INFO] Failed data extraction has been logged.\n"""
else:
if "[ERROR]" in message:
print "-- ERROR Downloading command :\n %s"%message
with open(out_cmd.split()[1] + logfile,'a') as mylog:
mylog.write("Error : %s NOK\nDue to : %s"%(ficout,message))
print """[INFO] Failed data extraction has been logged.\n"""
else:
print "-- MOTU Download successful :\n %s OK\n"%fic_cmd.split()[1]
cmd_flag = True
else:
print "-- This data for %s has already been downloaded in %s --\n"% (fic_cmd.split()[1],out_cmd.split()[1])
cmd_flag = False
date_start = date_end_cmd + dt.timedelta(days=1)
if cmd_flag:
buffer_flag = True
cmd_flag = False
if not os.path.exists(out_cmd.split()[1]+logfile):
print "\n------------------------------------------------\n - ! - Your Copernicus Dataset(s) are located in %s\n------------------------------------------------\n"%(out_cmd.split()[1])
else :
print "## [ERROR] ##"
print "/!\\ Some download requests failed. Please see recommendation in %s%s"%(out_cmd.split()[1], logfile)
print "+--------------------------------------------+\n| ! - CONNEXION TO CMEMS HUB HAS BEEN CLOSED |\n+--------------------------------------------+\n"
#------------------------------------------------- End of Script -----------------------------------------------------
|
HoboSci/OBIS-Capelin
|
Loop_on_date_python_script.py
|
Python
|
mit
| 16,260
|
from __future__ import absolute_import
from Components.VariableValue import VariableValue
from Components.Renderer.Renderer import Renderer
from enigma import eSlider
class Progress(VariableValue, Renderer):
def __init__(self):
Renderer.__init__(self)
VariableValue.__init__(self)
self.__start = 0
self.__end = 100
GUI_WIDGET = eSlider
def changed(self, what):
if what[0] == self.CHANGED_CLEAR:
(self.range, self.value) = ((0, 1), 0)
return
range = self.source.range or 100
value = self.source.value
if value is None:
value = 0
if range > 2**31-1:
range = 2**31-1
if value > range:
value = range
if value < 0:
value = 0
(self.range, self.value) = ((0, range), value)
def postWidgetCreate(self, instance):
instance.setRange(self.__start, self.__end)
def setRange(self, range):
(self.__start, self.__end) = range
if self.instance is not None:
self.instance.setRange(self.__start, self.__end)
def getRange(self):
return self.__start, self.__end
range = property(getRange, setRange)
|
atvcaptain/enigma2
|
lib/python/Components/Renderer/Progress.py
|
Python
|
gpl-2.0
| 1,048
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Monte Carlo Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.bayesflow.python.ops import monte_carlo_impl as monte_carlo_lib
from tensorflow.contrib.bayesflow.python.ops.monte_carlo_impl import _get_samples
from tensorflow.contrib.distributions.python.ops import mvn_diag as mvn_diag_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
layers = layers_lib
mc = monte_carlo_lib
class ExpectationImportanceSampleTest(test.TestCase):
def test_normal_integral_mean_and_var_correctly_estimated(self):
n = int(1e6)
with self.test_session():
mu_p = constant_op.constant([-1.0, 1.0], dtype=dtypes.float64)
mu_q = constant_op.constant([0.0, 0.0], dtype=dtypes.float64)
sigma_p = constant_op.constant([0.5, 0.5], dtype=dtypes.float64)
sigma_q = constant_op.constant([1.0, 1.0], dtype=dtypes.float64)
p = normal_lib.Normal(loc=mu_p, scale=sigma_p)
q = normal_lib.Normal(loc=mu_q, scale=sigma_q)
# Compute E_p[X].
e_x = mc.expectation_importance_sampler(
f=lambda x: x, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
# Compute E_p[X^2].
e_x2 = mc.expectation_importance_sampler(
f=math_ops.square, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
stddev = math_ops.sqrt(e_x2 - math_ops.square(e_x))
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
# Convergence of mean is +- 0.003 if n = 100M
# Convergence of stddev is +- 0.00001 if n = 100M
self.assertEqual(p.batch_shape, e_x.get_shape())
self.assertAllClose(p.mean().eval(), e_x.eval(), rtol=0.01)
self.assertAllClose(p.stddev().eval(), stddev.eval(), rtol=0.02)
def test_multivariate_normal_prob_positive_product_of_components(self):
# Test that importance sampling can correctly estimate the probability that
# the product of components in a MultivariateNormal are > 0.
n = 1000
with self.test_session():
p = mvn_diag_lib.MultivariateNormalDiag(
loc=[0.], scale_diag=[1.0, 1.0])
q = mvn_diag_lib.MultivariateNormalDiag(
loc=[0.5], scale_diag=[3., 3.])
# Compute E_p[X_1 * X_2 > 0], with X_i the ith component of X ~ p(x).
# Should equal 1/2 because p is a spherical Gaussian centered at (0, 0).
def indicator(x):
x1_times_x2 = math_ops.reduce_prod(x, reduction_indices=[-1])
return 0.5 * (math_ops.sign(x1_times_x2) + 1.0)
prob = mc.expectation_importance_sampler(
f=indicator, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
# Convergence is +- 0.004 if n = 100k.
self.assertEqual(p.batch_shape, prob.get_shape())
self.assertAllClose(0.5, prob.eval(), rtol=0.05)
class ExpectationImportanceSampleLogspaceTest(test.TestCase):
def test_normal_distribution_second_moment_estimated_correctly(self):
# Test the importance sampled estimate against an analytical result.
n = int(1e6)
with self.test_session():
mu_p = constant_op.constant([0.0, 0.0], dtype=dtypes.float64)
mu_q = constant_op.constant([-1.0, 1.0], dtype=dtypes.float64)
sigma_p = constant_op.constant([1.0, 2 / 3.], dtype=dtypes.float64)
sigma_q = constant_op.constant([1.0, 1.0], dtype=dtypes.float64)
p = normal_lib.Normal(loc=mu_p, scale=sigma_p)
q = normal_lib.Normal(loc=mu_q, scale=sigma_q)
# Compute E_p[X^2].
# Should equal [1, (2/3)^2]
log_e_x2 = mc.expectation_importance_sampler_logspace(
log_f=lambda x: math_ops.log(math_ops.square(x)),
log_p=p.log_prob,
sampling_dist_q=q,
n=n,
seed=42)
e_x2 = math_ops.exp(log_e_x2)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertEqual(p.batch_shape, e_x2.get_shape())
self.assertAllClose([1., (2 / 3.)**2], e_x2.eval(), rtol=0.02)
class GetSamplesTest(test.TestCase):
"""Test the private method 'get_samples'."""
def test_raises_if_both_z_and_n_are_none(self):
with self.test_session():
dist = normal_lib.Normal(loc=0., scale=1.)
z = None
n = None
seed = None
with self.assertRaisesRegexp(ValueError, 'exactly one'):
_get_samples(dist, z, n, seed)
def test_raises_if_both_z_and_n_are_not_none(self):
with self.test_session():
dist = normal_lib.Normal(loc=0., scale=1.)
z = dist.sample(seed=42)
n = 1
seed = None
with self.assertRaisesRegexp(ValueError, 'exactly one'):
_get_samples(dist, z, n, seed)
def test_returns_n_samples_if_n_provided(self):
with self.test_session():
dist = normal_lib.Normal(loc=0., scale=1.)
z = None
n = 10
seed = None
z = _get_samples(dist, z, n, seed)
self.assertEqual((10,), z.get_shape())
def test_returns_z_if_z_provided(self):
with self.test_session():
dist = normal_lib.Normal(loc=0., scale=1.)
z = dist.sample(10, seed=42)
n = None
seed = None
z = _get_samples(dist, z, n, seed)
self.assertEqual((10,), z.get_shape())
class ExpectationTest(test.TestCase):
def test_works_correctly(self):
with self.test_session() as sess:
x = constant_op.constant([-1e6, -100, -10, -1, 1, 10, 100, 1e6])
p = normal_lib.Normal(loc=x, scale=1.)
# We use the prefex "efx" to mean "E_p[f(X)]".
f = lambda u: u
efx_true = x
samples = p.sample(int(1e5), seed=1)
efx_reparam = mc.expectation(f, samples, p.log_prob)
efx_score = mc.expectation(f, samples, p.log_prob,
use_reparametrization=False)
[
efx_true_,
efx_reparam_,
efx_score_,
efx_true_grad_,
efx_reparam_grad_,
efx_score_grad_,
] = sess.run([
efx_true,
efx_reparam,
efx_score,
gradients_impl.gradients(efx_true, x)[0],
gradients_impl.gradients(efx_reparam, x)[0],
gradients_impl.gradients(efx_score, x)[0],
])
self.assertAllEqual(np.ones_like(efx_true_grad_), efx_true_grad_)
self.assertAllClose(efx_true_, efx_reparam_, rtol=0.005, atol=0.)
self.assertAllClose(efx_true_, efx_score_, rtol=0.005, atol=0.)
self.assertAllEqual(np.ones_like(efx_true_grad_, dtype=np.bool),
np.isfinite(efx_reparam_grad_))
self.assertAllEqual(np.ones_like(efx_true_grad_, dtype=np.bool),
np.isfinite(efx_score_grad_))
self.assertAllClose(efx_true_grad_, efx_reparam_grad_,
rtol=0.03, atol=0.)
# Variance is too high to be meaningful, so we'll only check those which
# converge.
self.assertAllClose(efx_true_grad_[2:-2],
efx_score_grad_[2:-2],
rtol=0.05, atol=0.)
if __name__ == '__main__':
test.main()
|
mixturemodel-flow/tensorflow
|
tensorflow/contrib/bayesflow/python/kernel_tests/monte_carlo_test.py
|
Python
|
apache-2.0
| 8,131
|
import numpy as np
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import CategoricalHyperparameter
from ParamSklearn.components.base import \
ParamSklearnPreprocessingAlgorithm
from ParamSklearn.constants import *
class Balancing(ParamSklearnPreprocessingAlgorithm):
def __init__(self, strategy, random_state=None):
self.strategy = strategy
def fit(self, X, y=None):
return self
def transform(self, X):
return X
def get_weights(self, Y, classifier, preprocessor, init_params, fit_params):
if init_params is None:
init_params = {}
if fit_params is None:
fit_params = {}
# Classifiers which require sample weights:
# We can have adaboost in here, because in the fit method,
# the sample weights are normalized:
# https://github.com/scikit-learn/scikit-learn/blob/0.15.X/sklearn/ensemble/weight_boosting.py#L121
clf_ = ['adaboost', 'gradient_boosting']
pre_ = []
if classifier in clf_ or preprocessor in pre_:
if len(Y.shape) > 1:
offsets = [2 ** i for i in range(Y.shape[1])]
Y_ = np.sum(Y * offsets, axis=1)
else:
Y_ = Y
unique, counts = np.unique(Y_, return_counts=True)
cw = 1. / counts
cw = cw / np.mean(cw)
sample_weights = np.ones(Y_.shape)
for i, ue in enumerate(unique):
mask = Y_ == ue
sample_weights[mask] *= cw[i]
if classifier in clf_:
fit_params['classifier:sample_weight'] = sample_weights
if preprocessor in pre_:
fit_params['preprocessor:sample_weight'] = sample_weights
# Classifiers which can adjust sample weights themselves via the
# argument `class_weight`
clf_ = ['decision_tree', 'extra_trees', 'liblinear_svc',
'libsvm_svc', 'random_forest', 'sgd']
pre_ = ['liblinear_svc_preprocessor',
'extra_trees_preproc_for_classification']
if classifier in clf_:
init_params['classifier:class_weight'] = 'auto'
if preprocessor in pre_:
init_params['preprocessor:class_weight'] = 'auto'
clf_ = ['ridge']
if classifier in clf_:
class_weights = {}
unique, counts = np.unique(Y, return_counts=True)
cw = 1. / counts
cw = cw / np.mean(cw)
for i, ue in enumerate(unique):
class_weights[ue] = cw[i]
if classifier in clf_:
init_params['classifier:class_weight'] = class_weights
return init_params, fit_params
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'Balancing',
'name': 'Balancing Imbalanced Class Distributions',
'handles_missing_values': True,
'handles_nominal_values': True,
'handles_numerical_features': True,
'prefers_data_scaled': False,
'prefers_data_normalized': False,
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
'handles_sparse': True,
'handles_dense': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA, SIGNED_DATA),
'output': (INPUT,),
'preferred_dtype': None}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
# TODO add replace by zero!
strategy = CategoricalHyperparameter(
"strategy", ["none", "weighting"], default="none")
cs = ConfigurationSpace()
cs.add_hyperparameter(strategy)
return cs
def __str__(self):
name = self.get_properties()['name']
return "ParamSklearn %s" % name
|
automl/paramsklearn
|
ParamSklearn/components/data_preprocessing/balancing.py
|
Python
|
bsd-3-clause
| 4,086
|
"""
Support for the worldtides.info API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.worldtidesinfo/
"""
from datetime import timedelta
import logging
import time
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_ATTRIBUTION = "Data provided by WorldTides"
DEFAULT_NAME = 'WorldTidesInfo'
SCAN_INTERVAL = timedelta(seconds=3600)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the WorldTidesInfo sensor."""
name = config.get(CONF_NAME)
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
key = config.get(CONF_API_KEY)
if None in (lat, lon):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
tides = WorldTidesInfoSensor(name, lat, lon, key)
tides.update()
if tides.data.get('error') == 'No location found':
_LOGGER.error("Location not available")
return
add_entities([tides])
class WorldTidesInfoSensor(Entity):
"""Representation of a WorldTidesInfo sensor."""
def __init__(self, name, lat, lon, key):
"""Initialize the sensor."""
self._name = name
self._lat = lat
self._lon = lon
self._key = key
self.data = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of this device."""
attr = {ATTR_ATTRIBUTION: CONF_ATTRIBUTION}
if 'High' in str(self.data['extremes'][0]['type']):
attr['high_tide_time_utc'] = self.data['extremes'][0]['date']
attr['high_tide_height'] = self.data['extremes'][0]['height']
attr['low_tide_time_utc'] = self.data['extremes'][1]['date']
attr['low_tide_height'] = self.data['extremes'][1]['height']
elif 'Low' in str(self.data['extremes'][0]['type']):
attr['high_tide_time_utc'] = self.data['extremes'][1]['date']
attr['high_tide_height'] = self.data['extremes'][1]['height']
attr['low_tide_time_utc'] = self.data['extremes'][0]['date']
attr['low_tide_height'] = self.data['extremes'][0]['height']
return attr
@property
def state(self):
"""Return the state of the device."""
if self.data:
if 'High' in str(self.data['extremes'][0]['type']):
tidetime = time.strftime('%I:%M %p', time.localtime(
self.data['extremes'][0]['dt']))
return "High tide at {}".format(tidetime)
if 'Low' in str(self.data['extremes'][0]['type']):
tidetime = time.strftime('%I:%M %p', time.localtime(
self.data['extremes'][0]['dt']))
return "Low tide at {}".format(tidetime)
return None
return None
def update(self):
"""Get the latest data from WorldTidesInfo API."""
start = int(time.time())
resource = ('https://www.worldtides.info/api?extremes&length=86400'
'&key={}&lat={}&lon={}&start={}').format(
self._key, self._lat, self._lon, start)
try:
self.data = requests.get(resource, timeout=10).json()
_LOGGER.debug("Data: %s", self.data)
_LOGGER.debug(
"Tide data queried with start time set to: %s", start)
except ValueError as err:
_LOGGER.error(
"Error retrieving data from WorldTidesInfo: %s", err.args)
self.data = None
|
tinloaf/home-assistant
|
homeassistant/components/sensor/worldtidesinfo.py
|
Python
|
apache-2.0
| 4,233
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2007 Martin Reisenhofer <martin.reisenhofer@funkring.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_analytic_account(osv.Model):
_inherit = "account.analytic.account"
_columns = {
"salesman_id" : fields.many2one("res.users", "Salesman")
}
|
funkring/fdoo
|
addons-funkring/commission/account_analytic.py
|
Python
|
agpl-3.0
| 1,146
|
#!/usr/bin/env python
"""
fs.tests: testcases for the fs module
"""
from __future__ import with_statement
# Send any output from the logging module to stdout, so it will
# be captured by nose and reported appropriately
import sys
import logging
logging.basicConfig(level=logging.ERROR, stream=sys.stdout)
from fs.base import *
from fs.path import *
from fs.errors import *
from fs.filelike import StringIO
import datetime
import unittest
import os
import os.path
import pickle
import random
import copy
import time
try:
import threading
except ImportError:
import dummy_threading as threading
import six
from six import PY3, b
class FSTestCases(object):
"""Base suite of testcases for filesystem implementations.
Any FS subclass should be capable of passing all of these tests.
To apply the tests to your own FS implementation, simply use FSTestCase
as a mixin for your own unittest.TestCase subclass and have the setUp
method set self.fs to an instance of your FS implementation.
NB. The Filesystem being tested must have a capacity of at least 3MB.
This class is designed as a mixin so that it's not detected by test
loading tools such as nose.
"""
def check(self, p):
"""Check that a file exists within self.fs"""
return self.fs.exists(p)
def test_invalid_chars(self):
"""Check paths validate ok"""
# Will have to be overriden selectively for custom validepath methods
self.assertEqual(self.fs.validatepath(''), None)
self.assertEqual(self.fs.validatepath('.foo'), None)
self.assertEqual(self.fs.validatepath('foo'), None)
self.assertEqual(self.fs.validatepath('foo/bar'), None)
self.assert_(self.fs.isvalidpath('foo/bar'))
def test_meta(self):
"""Checks getmeta / hasmeta are functioning"""
# getmeta / hasmeta are hard to test, since there is no way to validate
# the implementation's response
meta_names = ["read_only",
"network",
"unicode_paths"]
stupid_meta = 'thismetashouldnotexist!"r$$%^&&*()_+'
self.assertRaises(NoMetaError, self.fs.getmeta, stupid_meta)
self.assertFalse(self.fs.hasmeta(stupid_meta))
self.assertEquals(None, self.fs.getmeta(stupid_meta, None))
self.assertEquals(3.14, self.fs.getmeta(stupid_meta, 3.14))
for meta_name in meta_names:
try:
meta = self.fs.getmeta(meta_name)
self.assertTrue(self.fs.hasmeta(meta_name))
except NoMetaError:
self.assertFalse(self.fs.hasmeta(meta_name))
def test_root_dir(self):
self.assertTrue(self.fs.isdir(""))
self.assertTrue(self.fs.isdir("/"))
# These may be false (e.g. empty dict) but mustn't raise errors
self.fs.getinfo("")
self.assertTrue(self.fs.getinfo("/") is not None)
def test_getsyspath(self):
try:
syspath = self.fs.getsyspath("/")
except NoSysPathError:
pass
else:
self.assertTrue(isinstance(syspath, unicode))
syspath = self.fs.getsyspath("/", allow_none=True)
if syspath is not None:
self.assertTrue(isinstance(syspath, unicode))
def test_debug(self):
str(self.fs)
repr(self.fs)
self.assert_(hasattr(self.fs, 'desc'))
def test_open_on_directory(self):
self.fs.makedir("testdir")
try:
f = self.fs.open("testdir")
except ResourceInvalidError:
pass
except Exception:
raise
ecls = sys.exc_info()[0]
assert False, "%s raised instead of ResourceInvalidError" % (ecls,)
else:
f.close()
assert False, "ResourceInvalidError was not raised"
def test_writefile(self):
self.assertRaises(ResourceNotFoundError, self.fs.open, "test1.txt")
f = self.fs.open("test1.txt", "wb")
f.write(b("testing"))
f.close()
self.assertTrue(self.check("test1.txt"))
f = self.fs.open("test1.txt", "rb")
self.assertEquals(f.read(), b("testing"))
f.close()
f = self.fs.open("test1.txt", "wb")
f.write(b("test file overwrite"))
f.close()
self.assertTrue(self.check("test1.txt"))
f = self.fs.open("test1.txt", "rb")
self.assertEquals(f.read(), b("test file overwrite"))
f.close()
def test_createfile(self):
test = b('now with content')
self.fs.createfile("test.txt")
self.assert_(self.fs.exists("test.txt"))
self.assertEqual(self.fs.getcontents("test.txt", "rb"), b(''))
self.fs.setcontents("test.txt", test)
self.fs.createfile("test.txt")
self.assertEqual(self.fs.getcontents("test.txt", "rb"), test)
self.fs.createfile("test.txt", wipe=True)
self.assertEqual(self.fs.getcontents("test.txt", "rb"), b(''))
def test_setcontents(self):
# setcontents() should accept both a string...
self.fs.setcontents("hello", b("world"))
self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents("hello", StringIO(b("to you, good sir!")))
self.assertEquals(self.fs.getcontents(
"hello", "rb"), b("to you, good sir!"))
# setcontents() should accept both a string...
self.fs.setcontents("hello", b("world"), chunk_size=2)
self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents("hello", StringIO(
b("to you, good sir!")), chunk_size=2)
self.assertEquals(self.fs.getcontents(
"hello", "rb"), b("to you, good sir!"))
self.fs.setcontents("hello", b(""))
self.assertEquals(self.fs.getcontents("hello", "rb"), b(""))
def test_setcontents_async(self):
# setcontents() should accept both a string...
self.fs.setcontents_async("hello", b("world")).wait()
self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents_async("hello", StringIO(
b("to you, good sir!"))).wait()
self.assertEquals(self.fs.getcontents("hello"), b("to you, good sir!"))
self.fs.setcontents_async("hello", b("world"), chunk_size=2).wait()
self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents_async("hello", StringIO(
b("to you, good sir!")), chunk_size=2).wait()
self.assertEquals(self.fs.getcontents(
"hello", "rb"), b("to you, good sir!"))
def test_isdir_isfile(self):
self.assertFalse(self.fs.exists("dir1"))
self.assertFalse(self.fs.isdir("dir1"))
self.assertFalse(self.fs.isfile("a.txt"))
self.fs.setcontents("a.txt", b(''))
self.assertFalse(self.fs.isdir("dir1"))
self.assertTrue(self.fs.exists("a.txt"))
self.assertTrue(self.fs.isfile("a.txt"))
self.assertFalse(self.fs.exists("a.txt/thatsnotadir"))
self.fs.makedir("dir1")
self.assertTrue(self.fs.isdir("dir1"))
self.assertTrue(self.fs.exists("dir1"))
self.assertTrue(self.fs.exists("a.txt"))
self.fs.remove("a.txt")
self.assertFalse(self.fs.exists("a.txt"))
def test_listdir(self):
def check_unicode(items):
for item in items:
self.assertTrue(isinstance(item, unicode))
self.fs.setcontents(u"a", b(''))
self.fs.setcontents("b", b(''))
self.fs.setcontents("foo", b(''))
self.fs.setcontents("bar", b(''))
# Test listing of the root directory
d1 = self.fs.listdir()
self.assertEqual(len(d1), 4)
self.assertEqual(sorted(d1), [u"a", u"b", u"bar", u"foo"])
check_unicode(d1)
d1 = self.fs.listdir("")
self.assertEqual(len(d1), 4)
self.assertEqual(sorted(d1), [u"a", u"b", u"bar", u"foo"])
check_unicode(d1)
d1 = self.fs.listdir("/")
self.assertEqual(len(d1), 4)
check_unicode(d1)
# Test listing absolute paths
d2 = self.fs.listdir(absolute=True)
self.assertEqual(len(d2), 4)
self.assertEqual(sorted(d2), [u"/a", u"/b", u"/bar", u"/foo"])
check_unicode(d2)
# Create some deeper subdirectories, to make sure their
# contents are not inadvertantly included
self.fs.makedir("p/1/2/3", recursive=True)
self.fs.setcontents("p/1/2/3/a", b(''))
self.fs.setcontents("p/1/2/3/b", b(''))
self.fs.setcontents("p/1/2/3/foo", b(''))
self.fs.setcontents("p/1/2/3/bar", b(''))
self.fs.makedir("q")
# Test listing just files, just dirs, and wildcards
dirs_only = self.fs.listdir(dirs_only=True)
files_only = self.fs.listdir(files_only=True)
contains_a = self.fs.listdir(wildcard="*a*")
self.assertEqual(sorted(dirs_only), [u"p", u"q"])
self.assertEqual(sorted(files_only), [u"a", u"b", u"bar", u"foo"])
self.assertEqual(sorted(contains_a), [u"a", u"bar"])
check_unicode(dirs_only)
check_unicode(files_only)
check_unicode(contains_a)
# Test listing a subdirectory
d3 = self.fs.listdir("p/1/2/3")
self.assertEqual(len(d3), 4)
self.assertEqual(sorted(d3), [u"a", u"b", u"bar", u"foo"])
check_unicode(d3)
# Test listing a subdirectory with absoliute and full paths
d4 = self.fs.listdir("p/1/2/3", absolute=True)
self.assertEqual(len(d4), 4)
self.assertEqual(sorted(d4), [u"/p/1/2/3/a", u"/p/1/2/3/b", u"/p/1/2/3/bar", u"/p/1/2/3/foo"])
check_unicode(d4)
d4 = self.fs.listdir("p/1/2/3", full=True)
self.assertEqual(len(d4), 4)
self.assertEqual(sorted(d4), [u"p/1/2/3/a", u"p/1/2/3/b", u"p/1/2/3/bar", u"p/1/2/3/foo"])
check_unicode(d4)
# Test that appropriate errors are raised
self.assertRaises(ResourceNotFoundError, self.fs.listdir, "zebra")
self.assertRaises(ResourceInvalidError, self.fs.listdir, "foo")
def test_listdirinfo(self):
def check_unicode(items):
for (nm, info) in items:
self.assertTrue(isinstance(nm, unicode))
def check_equal(items, target):
names = [nm for (nm, info) in items]
self.assertEqual(sorted(names), sorted(target))
self.fs.setcontents(u"a", b(''))
self.fs.setcontents("b", b(''))
self.fs.setcontents("foo", b(''))
self.fs.setcontents("bar", b(''))
# Test listing of the root directory
d1 = self.fs.listdirinfo()
self.assertEqual(len(d1), 4)
check_equal(d1, [u"a", u"b", u"bar", u"foo"])
check_unicode(d1)
d1 = self.fs.listdirinfo("")
self.assertEqual(len(d1), 4)
check_equal(d1, [u"a", u"b", u"bar", u"foo"])
check_unicode(d1)
d1 = self.fs.listdirinfo("/")
self.assertEqual(len(d1), 4)
check_equal(d1, [u"a", u"b", u"bar", u"foo"])
check_unicode(d1)
# Test listing absolute paths
d2 = self.fs.listdirinfo(absolute=True)
self.assertEqual(len(d2), 4)
check_equal(d2, [u"/a", u"/b", u"/bar", u"/foo"])
check_unicode(d2)
# Create some deeper subdirectories, to make sure their
# contents are not inadvertantly included
self.fs.makedir("p/1/2/3", recursive=True)
self.fs.setcontents("p/1/2/3/a", b(''))
self.fs.setcontents("p/1/2/3/b", b(''))
self.fs.setcontents("p/1/2/3/foo", b(''))
self.fs.setcontents("p/1/2/3/bar", b(''))
self.fs.makedir("q")
# Test listing just files, just dirs, and wildcards
dirs_only = self.fs.listdirinfo(dirs_only=True)
files_only = self.fs.listdirinfo(files_only=True)
contains_a = self.fs.listdirinfo(wildcard="*a*")
check_equal(dirs_only, [u"p", u"q"])
check_equal(files_only, [u"a", u"b", u"bar", u"foo"])
check_equal(contains_a, [u"a", u"bar"])
check_unicode(dirs_only)
check_unicode(files_only)
check_unicode(contains_a)
# Test listing a subdirectory
d3 = self.fs.listdirinfo("p/1/2/3")
self.assertEqual(len(d3), 4)
check_equal(d3, [u"a", u"b", u"bar", u"foo"])
check_unicode(d3)
# Test listing a subdirectory with absoliute and full paths
d4 = self.fs.listdirinfo("p/1/2/3", absolute=True)
self.assertEqual(len(d4), 4)
check_equal(d4, [u"/p/1/2/3/a", u"/p/1/2/3/b", u"/p/1/2/3/bar", u"/p/1/2/3/foo"])
check_unicode(d4)
d4 = self.fs.listdirinfo("p/1/2/3", full=True)
self.assertEqual(len(d4), 4)
check_equal(d4, [u"p/1/2/3/a", u"p/1/2/3/b", u"p/1/2/3/bar", u"p/1/2/3/foo"])
check_unicode(d4)
# Test that appropriate errors are raised
self.assertRaises(ResourceNotFoundError, self.fs.listdirinfo, "zebra")
self.assertRaises(ResourceInvalidError, self.fs.listdirinfo, "foo")
def test_walk(self):
self.fs.setcontents('a.txt', b('hello'))
self.fs.setcontents('b.txt', b('world'))
self.fs.makeopendir('foo').setcontents('c', b('123'))
sorted_walk = sorted([(d, sorted(fs)) for (d, fs) in self.fs.walk()])
self.assertEquals(sorted_walk,
[("/", ["a.txt", "b.txt"]),
("/foo", ["c"])])
# When searching breadth-first, shallow entries come first
found_a = False
for _, files in self.fs.walk(search="breadth"):
if "a.txt" in files:
found_a = True
if "c" in files:
break
assert found_a, "breadth search order was wrong"
# When searching depth-first, deep entries come first
found_c = False
for _, files in self.fs.walk(search="depth"):
if "c" in files:
found_c = True
if "a.txt" in files:
break
assert found_c, "depth search order was wrong: " + \
str(list(self.fs.walk(search="depth")))
def test_walk_wildcard(self):
self.fs.setcontents('a.txt', b('hello'))
self.fs.setcontents('b.txt', b('world'))
self.fs.makeopendir('foo').setcontents('c', b('123'))
self.fs.makeopendir('.svn').setcontents('ignored', b(''))
for dir_path, paths in self.fs.walk(wildcard='*.txt'):
for path in paths:
self.assert_(path.endswith('.txt'))
for dir_path, paths in self.fs.walk(wildcard=lambda fn: fn.endswith('.txt')):
for path in paths:
self.assert_(path.endswith('.txt'))
def test_walk_dir_wildcard(self):
self.fs.setcontents('a.txt', b('hello'))
self.fs.setcontents('b.txt', b('world'))
self.fs.makeopendir('foo').setcontents('c', b('123'))
self.fs.makeopendir('.svn').setcontents('ignored', b(''))
for dir_path, paths in self.fs.walk(dir_wildcard=lambda fn: not fn.endswith('.svn')):
for path in paths:
self.assert_('.svn' not in path)
def test_walkfiles(self):
self.fs.makeopendir('bar').setcontents('a.txt', b('123'))
self.fs.makeopendir('foo').setcontents('b', b('123'))
self.assertEquals(sorted(
self.fs.walkfiles()), ["/bar/a.txt", "/foo/b"])
self.assertEquals(sorted(self.fs.walkfiles(
dir_wildcard="*foo*")), ["/foo/b"])
self.assertEquals(sorted(self.fs.walkfiles(
wildcard="*.txt")), ["/bar/a.txt"])
def test_walkdirs(self):
self.fs.makeopendir('bar').setcontents('a.txt', b('123'))
self.fs.makeopendir('foo').makeopendir(
"baz").setcontents('b', b('123'))
self.assertEquals(sorted(self.fs.walkdirs()), [
"/", "/bar", "/foo", "/foo/baz"])
self.assertEquals(sorted(self.fs.walkdirs(
wildcard="*foo*")), ["/", "/foo", "/foo/baz"])
def test_unicode(self):
alpha = u"\N{GREEK SMALL LETTER ALPHA}"
beta = u"\N{GREEK SMALL LETTER BETA}"
self.fs.makedir(alpha)
self.fs.setcontents(alpha + "/a", b(''))
self.fs.setcontents(alpha + "/" + beta, b(''))
self.assertTrue(self.check(alpha))
self.assertEquals(sorted(self.fs.listdir(alpha)), ["a", beta])
def test_makedir(self):
check = self.check
self.fs.makedir("a")
self.assertTrue(check("a"))
self.assertRaises(
ParentDirectoryMissingError, self.fs.makedir, "a/b/c")
self.fs.makedir("a/b/c", recursive=True)
self.assert_(check("a/b/c"))
self.fs.makedir("foo/bar/baz", recursive=True)
self.assert_(check("foo/bar/baz"))
self.fs.makedir("a/b/child")
self.assert_(check("a/b/child"))
self.assertRaises(DestinationExistsError, self.fs.makedir, "/a/b")
self.fs.makedir("/a/b", allow_recreate=True)
self.fs.setcontents("/a/file", b(''))
self.assertRaises(ResourceInvalidError, self.fs.makedir, "a/file")
def test_remove(self):
self.fs.setcontents("a.txt", b(''))
self.assertTrue(self.check("a.txt"))
self.fs.remove("a.txt")
self.assertFalse(self.check("a.txt"))
self.assertRaises(ResourceNotFoundError, self.fs.remove, "a.txt")
self.fs.makedir("dir1")
self.assertRaises(ResourceInvalidError, self.fs.remove, "dir1")
self.fs.setcontents("/dir1/a.txt", b(''))
self.assertTrue(self.check("dir1/a.txt"))
self.fs.remove("dir1/a.txt")
self.assertFalse(self.check("/dir1/a.txt"))
def test_removedir(self):
check = self.check
self.fs.makedir("a")
self.assert_(check("a"))
self.fs.removedir("a")
self.assertRaises(ResourceNotFoundError, self.fs.removedir, "a")
self.assert_(not check("a"))
self.fs.makedir("a/b/c/d", recursive=True)
self.assertRaises(DirectoryNotEmptyError, self.fs.removedir, "a/b")
self.fs.removedir("a/b/c/d")
self.assert_(not check("a/b/c/d"))
self.fs.removedir("a/b/c")
self.assert_(not check("a/b/c"))
self.fs.removedir("a/b")
self.assert_(not check("a/b"))
# Test recursive removal of empty parent dirs
self.fs.makedir("foo/bar/baz", recursive=True)
self.fs.removedir("foo/bar/baz", recursive=True)
self.assert_(not check("foo/bar/baz"))
self.assert_(not check("foo/bar"))
self.assert_(not check("foo"))
self.fs.makedir("foo/bar/baz", recursive=True)
self.fs.setcontents("foo/file.txt", b("please don't delete me"))
self.fs.removedir("foo/bar/baz", recursive=True)
self.assert_(not check("foo/bar/baz"))
self.assert_(not check("foo/bar"))
self.assert_(check("foo/file.txt"))
# Ensure that force=True works as expected
self.fs.makedir("frollic/waggle", recursive=True)
self.fs.setcontents("frollic/waddle.txt", b("waddlewaddlewaddle"))
self.assertRaises(DirectoryNotEmptyError, self.fs.removedir, "frollic")
self.assertRaises(
ResourceInvalidError, self.fs.removedir, "frollic/waddle.txt")
self.fs.removedir("frollic", force=True)
self.assert_(not check("frollic"))
# Test removing unicode dirs
kappa = u"\N{GREEK CAPITAL LETTER KAPPA}"
self.fs.makedir(kappa)
self.assert_(self.fs.isdir(kappa))
self.fs.removedir(kappa)
self.assertRaises(ResourceNotFoundError, self.fs.removedir, kappa)
self.assert_(not self.fs.isdir(kappa))
self.fs.makedir(pathjoin("test", kappa), recursive=True)
self.assert_(check(pathjoin("test", kappa)))
self.fs.removedir("test", force=True)
self.assert_(not check("test"))
def test_rename(self):
check = self.check
# test renaming a file in the same directory
self.fs.setcontents("foo.txt", b("Hello, World!"))
self.assert_(check("foo.txt"))
self.fs.rename("foo.txt", "bar.txt")
self.assert_(check("bar.txt"))
self.assert_(not check("foo.txt"))
# test renaming a directory in the same directory
self.fs.makedir("dir_a")
self.fs.setcontents("dir_a/test.txt", b("testerific"))
self.assert_(check("dir_a"))
self.fs.rename("dir_a", "dir_b")
self.assert_(check("dir_b"))
self.assert_(check("dir_b/test.txt"))
self.assert_(not check("dir_a/test.txt"))
self.assert_(not check("dir_a"))
# test renaming a file into a different directory
self.fs.makedir("dir_a")
self.fs.rename("dir_b/test.txt", "dir_a/test.txt")
self.assert_(not check("dir_b/test.txt"))
self.assert_(check("dir_a/test.txt"))
# test renaming a file into a non-existent directory
self.assertRaises(ParentDirectoryMissingError,
self.fs.rename, "dir_a/test.txt", "nonexistent/test.txt")
def test_info(self):
test_str = b("Hello, World!")
self.fs.setcontents("info.txt", test_str)
info = self.fs.getinfo("info.txt")
self.assertEqual(info['size'], len(test_str))
self.fs.desc("info.txt")
self.assertRaises(ResourceNotFoundError, self.fs.getinfo, "notafile")
self.assertRaises(
ResourceNotFoundError, self.fs.getinfo, "info.txt/inval")
def test_infokeys(self):
test_str = b("Hello, World!")
self.fs.setcontents("info.txt", test_str)
info = self.fs.getinfo("info.txt")
for k, v in info.iteritems():
self.assertEqual(self.fs.getinfokeys('info.txt', k), {k: v})
test_info = {}
if 'modified_time' in info:
test_info['modified_time'] = info['modified_time']
if 'size' in info:
test_info['size'] = info['size']
self.assertEqual(self.fs.getinfokeys('info.txt', 'size', 'modified_time'), test_info)
self.assertEqual(self.fs.getinfokeys('info.txt', 'thiscantpossiblyexistininfo'), {})
def test_getsize(self):
test_str = b("*") * 23
self.fs.setcontents("info.txt", test_str)
size = self.fs.getsize("info.txt")
self.assertEqual(size, len(test_str))
def test_movefile(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path):
self.fs.setcontents(path, contents)
def checkcontents(path):
check_contents = self.fs.getcontents(path, "rb")
self.assertEqual(check_contents, contents)
return contents == check_contents
self.fs.makedir("foo/bar", recursive=True)
makefile("foo/bar/a.txt")
self.assert_(check("foo/bar/a.txt"))
self.assert_(checkcontents("foo/bar/a.txt"))
self.fs.move("foo/bar/a.txt", "foo/b.txt")
self.assert_(not check("foo/bar/a.txt"))
self.assert_(check("foo/b.txt"))
self.assert_(checkcontents("foo/b.txt"))
self.fs.move("foo/b.txt", "c.txt")
self.assert_(not check("foo/b.txt"))
self.assert_(check("/c.txt"))
self.assert_(checkcontents("/c.txt"))
makefile("foo/bar/a.txt")
self.assertRaises(
DestinationExistsError, self.fs.move, "foo/bar/a.txt", "/c.txt")
self.assert_(check("foo/bar/a.txt"))
self.assert_(check("/c.txt"))
self.fs.move("foo/bar/a.txt", "/c.txt", overwrite=True)
self.assert_(not check("foo/bar/a.txt"))
self.assert_(check("/c.txt"))
def test_movedir(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path):
self.fs.setcontents(path, contents)
self.assertRaises(ResourceNotFoundError, self.fs.movedir, "a", "b")
self.fs.makedir("a")
self.fs.makedir("b")
makefile("a/1.txt")
makefile("a/2.txt")
makefile("a/3.txt")
self.fs.makedir("a/foo/bar", recursive=True)
makefile("a/foo/bar/baz.txt")
self.fs.movedir("a", "copy of a")
self.assert_(self.fs.isdir("copy of a"))
self.assert_(check("copy of a/1.txt"))
self.assert_(check("copy of a/2.txt"))
self.assert_(check("copy of a/3.txt"))
self.assert_(check("copy of a/foo/bar/baz.txt"))
self.assert_(not check("a/1.txt"))
self.assert_(not check("a/2.txt"))
self.assert_(not check("a/3.txt"))
self.assert_(not check("a/foo/bar/baz.txt"))
self.assert_(not check("a/foo/bar"))
self.assert_(not check("a/foo"))
self.assert_(not check("a"))
self.fs.makedir("a")
self.assertRaises(
DestinationExistsError, self.fs.movedir, "copy of a", "a")
self.fs.movedir("copy of a", "a", overwrite=True)
self.assert_(not check("copy of a"))
self.assert_(check("a/1.txt"))
self.assert_(check("a/2.txt"))
self.assert_(check("a/3.txt"))
self.assert_(check("a/foo/bar/baz.txt"))
def test_cant_copy_from_os(self):
sys_executable = os.path.abspath(os.path.realpath(sys.executable))
self.assertRaises(FSError, self.fs.copy, sys_executable, "py.exe")
def test_copyfile(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path, contents=contents):
self.fs.setcontents(path, contents)
def checkcontents(path, contents=contents):
check_contents = self.fs.getcontents(path, "rb")
self.assertEqual(check_contents, contents)
return contents == check_contents
self.fs.makedir("foo/bar", recursive=True)
makefile("foo/bar/a.txt")
self.assert_(check("foo/bar/a.txt"))
self.assert_(checkcontents("foo/bar/a.txt"))
# import rpdb2; rpdb2.start_embedded_debugger('password');
self.fs.copy("foo/bar/a.txt", "foo/b.txt")
self.assert_(check("foo/bar/a.txt"))
self.assert_(check("foo/b.txt"))
self.assert_(checkcontents("foo/bar/a.txt"))
self.assert_(checkcontents("foo/b.txt"))
self.fs.copy("foo/b.txt", "c.txt")
self.assert_(check("foo/b.txt"))
self.assert_(check("/c.txt"))
self.assert_(checkcontents("/c.txt"))
makefile("foo/bar/a.txt", b("different contents"))
self.assert_(checkcontents("foo/bar/a.txt", b("different contents")))
self.assertRaises(
DestinationExistsError, self.fs.copy, "foo/bar/a.txt", "/c.txt")
self.assert_(checkcontents("/c.txt"))
self.fs.copy("foo/bar/a.txt", "/c.txt", overwrite=True)
self.assert_(checkcontents("foo/bar/a.txt", b("different contents")))
self.assert_(checkcontents("/c.txt", b("different contents")))
def test_copydir(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path):
self.fs.setcontents(path, contents)
def checkcontents(path):
check_contents = self.fs.getcontents(path)
self.assertEqual(check_contents, contents)
return contents == check_contents
self.fs.makedir("a")
self.fs.makedir("b")
makefile("a/1.txt")
makefile("a/2.txt")
makefile("a/3.txt")
self.fs.makedir("a/foo/bar", recursive=True)
makefile("a/foo/bar/baz.txt")
self.fs.copydir("a", "copy of a")
self.assert_(check("copy of a/1.txt"))
self.assert_(check("copy of a/2.txt"))
self.assert_(check("copy of a/3.txt"))
self.assert_(check("copy of a/foo/bar/baz.txt"))
checkcontents("copy of a/1.txt")
self.assert_(check("a/1.txt"))
self.assert_(check("a/2.txt"))
self.assert_(check("a/3.txt"))
self.assert_(check("a/foo/bar/baz.txt"))
checkcontents("a/1.txt")
self.assertRaises(DestinationExistsError, self.fs.copydir, "a", "b")
self.fs.copydir("a", "b", overwrite=True)
self.assert_(check("b/1.txt"))
self.assert_(check("b/2.txt"))
self.assert_(check("b/3.txt"))
self.assert_(check("b/foo/bar/baz.txt"))
checkcontents("b/1.txt")
def test_copydir_with_dotfile(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path):
self.fs.setcontents(path, contents)
self.fs.makedir("a")
makefile("a/1.txt")
makefile("a/2.txt")
makefile("a/.hidden.txt")
self.fs.copydir("a", "copy of a")
self.assert_(check("copy of a/1.txt"))
self.assert_(check("copy of a/2.txt"))
self.assert_(check("copy of a/.hidden.txt"))
self.assert_(check("a/1.txt"))
self.assert_(check("a/2.txt"))
self.assert_(check("a/.hidden.txt"))
def test_readwriteappendseek(self):
def checkcontents(path, check_contents):
read_contents = self.fs.getcontents(path, "rb")
self.assertEqual(read_contents, check_contents)
return read_contents == check_contents
test_strings = [b("Beautiful is better than ugly."),
b("Explicit is better than implicit."),
b("Simple is better than complex.")]
all_strings = b("").join(test_strings)
self.assertRaises(ResourceNotFoundError, self.fs.open, "a.txt", "r")
self.assert_(not self.fs.exists("a.txt"))
f1 = self.fs.open("a.txt", "wb")
pos = 0
for s in test_strings:
f1.write(s)
pos += len(s)
self.assertEqual(pos, f1.tell())
f1.close()
self.assert_(self.fs.exists("a.txt"))
self.assert_(checkcontents("a.txt", all_strings))
f2 = self.fs.open("b.txt", "wb")
f2.write(test_strings[0])
f2.close()
self.assert_(checkcontents("b.txt", test_strings[0]))
f3 = self.fs.open("b.txt", "ab")
# On win32, tell() gives zero until you actually write to the file
# self.assertEquals(f3.tell(),len(test_strings[0]))
f3.write(test_strings[1])
self.assertEquals(f3.tell(), len(test_strings[0])+len(test_strings[1]))
f3.write(test_strings[2])
self.assertEquals(f3.tell(), len(all_strings))
f3.close()
self.assert_(checkcontents("b.txt", all_strings))
f4 = self.fs.open("b.txt", "wb")
f4.write(test_strings[2])
f4.close()
self.assert_(checkcontents("b.txt", test_strings[2]))
f5 = self.fs.open("c.txt", "wb")
for s in test_strings:
f5.write(s+b("\n"))
f5.close()
f6 = self.fs.open("c.txt", "rb")
for s, t in zip(f6, test_strings):
self.assertEqual(s, t+b("\n"))
f6.close()
f7 = self.fs.open("c.txt", "rb")
f7.seek(13)
word = f7.read(6)
self.assertEqual(word, b("better"))
f7.seek(1, os.SEEK_CUR)
word = f7.read(4)
self.assertEqual(word, b("than"))
f7.seek(-9, os.SEEK_END)
word = f7.read(7)
self.assertEqual(word, b("complex"))
f7.close()
self.assertEqual(self.fs.getcontents("a.txt", "rb"), all_strings)
def test_truncate(self):
def checkcontents(path, check_contents):
read_contents = self.fs.getcontents(path, "rb")
self.assertEqual(read_contents, check_contents)
return read_contents == check_contents
self.fs.setcontents("hello", b("world"))
checkcontents("hello", b("world"))
self.fs.setcontents("hello", b("hi"))
checkcontents("hello", b("hi"))
self.fs.setcontents("hello", b("1234567890"))
checkcontents("hello", b("1234567890"))
with self.fs.open("hello", "rb+") as f:
f.truncate(7)
checkcontents("hello", b("1234567"))
with self.fs.open("hello", "rb+") as f:
f.seek(5)
f.truncate()
checkcontents("hello", b("12345"))
def test_truncate_to_larger_size(self):
with self.fs.open("hello", "wb") as f:
f.truncate(30)
self.assertEquals(self.fs.getsize("hello"), 30)
# Some file systems (FTPFS) don't support both reading and writing
if self.fs.getmeta('file.read_and_write', True):
with self.fs.open("hello", "rb+") as f:
f.seek(25)
f.write(b("123456"))
with self.fs.open("hello", "rb") as f:
f.seek(25)
self.assertEquals(f.read(), b("123456"))
def test_write_past_end_of_file(self):
if self.fs.getmeta('file.read_and_write', True):
with self.fs.open("write_at_end", "wb") as f:
f.seek(25)
f.write(b("EOF"))
with self.fs.open("write_at_end", "rb") as f:
self.assertEquals(f.read(), b("\x00")*25 + b("EOF"))
def test_with_statement(self):
# This is a little tricky since 'with' is actually new syntax.
# We use eval() to make this method safe for old python versions.
import sys
if sys.version_info[0] >= 2 and sys.version_info[1] >= 5:
# A successful 'with' statement
contents = "testing the with statement"
code = "from __future__ import with_statement\n"
code += "with self.fs.open('f.txt','wb-') as testfile:\n"
code += " testfile.write(contents)\n"
code += "self.assertEquals(self.fs.getcontents('f.txt', 'rb'),contents)"
code = compile(code, "<string>", 'exec')
eval(code)
# A 'with' statement raising an error
contents = "testing the with statement"
code = "from __future__ import with_statement\n"
code += "with self.fs.open('f.txt','wb-') as testfile:\n"
code += " testfile.write(contents)\n"
code += " raise ValueError\n"
code = compile(code, "<string>", 'exec')
self.assertRaises(ValueError, eval, code, globals(), locals())
self.assertEquals(self.fs.getcontents('f.txt', 'rb'), contents)
def test_pickling(self):
if self.fs.getmeta('pickle_contents', True):
self.fs.setcontents("test1", b("hello world"))
fs2 = pickle.loads(pickle.dumps(self.fs))
self.assert_(fs2.isfile("test1"))
fs3 = pickle.loads(pickle.dumps(self.fs, -1))
self.assert_(fs3.isfile("test1"))
else:
# Just make sure it doesn't throw an exception
fs2 = pickle.loads(pickle.dumps(self.fs))
def test_big_file(self):
"""Test handling of a big file (1MB)"""
chunk_size = 1024 * 256
num_chunks = 4
def chunk_stream():
"""Generate predictable-but-randomy binary content."""
r = random.Random(0)
randint = r.randint
int2byte = six.int2byte
for _i in xrange(num_chunks):
c = b("").join(int2byte(randint(
0, 255)) for _j in xrange(chunk_size//8))
yield c * 8
f = self.fs.open("bigfile", "wb")
try:
for chunk in chunk_stream():
f.write(chunk)
finally:
f.close()
chunks = chunk_stream()
f = self.fs.open("bigfile", "rb")
try:
try:
while True:
if chunks.next() != f.read(chunk_size):
assert False, "bigfile was corrupted"
except StopIteration:
if f.read() != b(""):
assert False, "bigfile was corrupted"
finally:
f.close()
def test_settimes(self):
def cmp_datetimes(d1, d2):
"""Test datetime objects are the same to within the timestamp accuracy"""
dts1 = time.mktime(d1.timetuple())
dts2 = time.mktime(d2.timetuple())
return int(dts1) == int(dts2)
d1 = datetime.datetime(2010, 6, 20, 11, 0, 9, 987699)
d2 = datetime.datetime(2010, 7, 5, 11, 0, 9, 500000)
self.fs.setcontents('/dates.txt', b('check dates'))
# If the implementation supports settimes, check that the times
# can be set and then retrieved
try:
self.fs.settimes('/dates.txt', d1, d2)
except UnsupportedError:
pass
else:
info = self.fs.getinfo('/dates.txt')
self.assertTrue(cmp_datetimes(d1, info['accessed_time']))
self.assertTrue(cmp_datetimes(d2, info['modified_time']))
def test_removeroot(self):
self.assertRaises(RemoveRootError, self.fs.removedir, "/")
def test_zero_read(self):
"""Test read(0) returns empty string"""
self.fs.setcontents('foo.txt', b('Hello, World'))
with self.fs.open('foo.txt', 'rb') as f:
self.assert_(len(f.read(0)) == 0)
with self.fs.open('foo.txt', 'rt') as f:
self.assert_(len(f.read(0)) == 0)
# May be disabled - see end of file
class ThreadingTestCases(object):
"""Testcases for thread-safety of FS implementations."""
# These are either too slow to be worth repeating,
# or cannot possibly break cross-thread.
_dont_retest = ("test_pickling", "test_multiple_overwrite",)
__lock = threading.RLock()
def _yield(self):
# time.sleep(0.001)
# Yields without a delay
time.sleep(0)
def _lock(self):
self.__lock.acquire()
def _unlock(self):
self.__lock.release()
def _makeThread(self, func, errors):
def runThread():
try:
func()
except Exception:
errors.append(sys.exc_info())
thread = threading.Thread(target=runThread)
thread.daemon = True
return thread
def _runThreads(self, *funcs):
check_interval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
errors = []
threads = [self._makeThread(f, errors) for f in funcs]
for t in threads:
t.start()
for t in threads:
t.join()
for (c, e, t) in errors:
raise e, None, t
finally:
sys.setcheckinterval(check_interval)
def test_setcontents_threaded(self):
def setcontents(name, contents):
f = self.fs.open(name, "wb")
self._yield()
try:
f.write(contents)
self._yield()
finally:
f.close()
def thread1():
c = b("thread1 was 'ere")
setcontents("thread1.txt", c)
self.assertEquals(self.fs.getcontents("thread1.txt", 'rb'), c)
def thread2():
c = b("thread2 was 'ere")
setcontents("thread2.txt", c)
self.assertEquals(self.fs.getcontents("thread2.txt", 'rb'), c)
self._runThreads(thread1, thread2)
def test_setcontents_threaded_samefile(self):
def setcontents(name, contents):
f = self.fs.open(name, "wb")
self._yield()
try:
f.write(contents)
self._yield()
finally:
f.close()
def thread1():
c = b("thread1 was 'ere")
setcontents("threads.txt", c)
self._yield()
self.assertEquals(self.fs.listdir("/"), ["threads.txt"])
def thread2():
c = b("thread2 was 'ere")
setcontents("threads.txt", c)
self._yield()
self.assertEquals(self.fs.listdir("/"), ["threads.txt"])
def thread3():
c = b("thread3 was 'ere")
setcontents("threads.txt", c)
self._yield()
self.assertEquals(self.fs.listdir("/"), ["threads.txt"])
try:
self._runThreads(thread1, thread2, thread3)
except ResourceLockedError:
# that's ok, some implementations don't support concurrent writes
pass
def test_cases_in_separate_dirs(self):
class TestCases_in_subdir(self.__class__, unittest.TestCase):
"""Run all testcases against a subdir of self.fs"""
def __init__(this, subdir):
super(TestCases_in_subdir, this).__init__("test_listdir")
this.subdir = subdir
for meth in dir(this):
if not meth.startswith("test_"):
continue
if meth in self._dont_retest:
continue
if not hasattr(FSTestCases, meth):
continue
if self.fs.exists(subdir):
self.fs.removedir(subdir, force=True)
self.assertFalse(self.fs.isdir(subdir))
self.assertTrue(self.fs.isdir("/"))
self.fs.makedir(subdir)
self._yield()
getattr(this, meth)()
@property
def fs(this):
return self.fs.opendir(this.subdir)
def check(this, p):
return self.check(pathjoin(this.subdir, relpath(p)))
def thread1():
TestCases_in_subdir("thread1")
def thread2():
TestCases_in_subdir("thread2")
def thread3():
TestCases_in_subdir("thread3")
self._runThreads(thread1, thread2, thread3)
def test_makedir_winner(self):
errors = []
def makedir():
try:
self.fs.makedir("testdir")
except DestinationExistsError, e:
errors.append(e)
def makedir_noerror():
try:
self.fs.makedir("testdir", allow_recreate=True)
except DestinationExistsError, e:
errors.append(e)
def removedir():
try:
self.fs.removedir("testdir")
except (ResourceNotFoundError, ResourceLockedError), e:
errors.append(e)
# One thread should succeed, one should error
self._runThreads(makedir, makedir)
self.assertEquals(len(errors), 1)
self.fs.removedir("testdir")
# One thread should succeed, two should error
errors = []
self._runThreads(makedir, makedir, makedir)
if len(errors) != 2:
raise AssertionError(errors)
self.fs.removedir("testdir")
# All threads should succeed
errors = []
self._runThreads(makedir_noerror, makedir_noerror, makedir_noerror)
self.assertEquals(len(errors), 0)
self.assertTrue(self.fs.isdir("testdir"))
self.fs.removedir("testdir")
# makedir() can beat removedir() and vice-versa
errors = []
self._runThreads(makedir, removedir)
if self.fs.isdir("testdir"):
self.assertEquals(len(errors), 1)
self.assertFalse(isinstance(errors[0], DestinationExistsError))
self.fs.removedir("testdir")
else:
self.assertEquals(len(errors), 0)
def test_concurrent_copydir(self):
self.fs.makedir("a")
self.fs.makedir("a/b")
self.fs.setcontents("a/hello.txt", b("hello world"))
self.fs.setcontents("a/guido.txt", b("is a space alien"))
self.fs.setcontents("a/b/parrot.txt", b("pining for the fiords"))
def copydir():
self._yield()
self.fs.copydir("a", "copy of a")
def copydir_overwrite():
self._yield()
self.fs.copydir("a", "copy of a", overwrite=True)
# This should error out since we're not overwriting
self.assertRaises(
DestinationExistsError, self._runThreads, copydir, copydir)
self.assert_(self.fs.isdir('a'))
self.assert_(self.fs.isdir('a'))
copydir_overwrite()
self.assert_(self.fs.isdir('a'))
# This should run to completion and give a valid state, unless
# files get locked when written to.
try:
self._runThreads(copydir_overwrite, copydir_overwrite)
except ResourceLockedError:
pass
self.assertTrue(self.fs.isdir("copy of a"))
self.assertTrue(self.fs.isdir("copy of a/b"))
self.assertEqual(self.fs.getcontents(
"copy of a/b/parrot.txt", 'rb'), b("pining for the fiords"))
self.assertEqual(self.fs.getcontents(
"copy of a/hello.txt", 'rb'), b("hello world"))
self.assertEqual(self.fs.getcontents(
"copy of a/guido.txt", 'rb'), b("is a space alien"))
def test_multiple_overwrite(self):
contents = [b("contents one"), b(
"contents the second"), b("number three")]
def thread1():
for i in xrange(30):
for c in contents:
self.fs.setcontents("thread1.txt", c)
self.assertEquals(self.fs.getsize("thread1.txt"), len(c))
self.assertEquals(self.fs.getcontents(
"thread1.txt", 'rb'), c)
def thread2():
for i in xrange(30):
for c in contents:
self.fs.setcontents("thread2.txt", c)
self.assertEquals(self.fs.getsize("thread2.txt"), len(c))
self.assertEquals(self.fs.getcontents(
"thread2.txt", 'rb'), c)
self._runThreads(thread1, thread2)
# Uncomment to temporarily disable threading tests
# class ThreadingTestCases(object):
# _dont_retest = ()
|
duedil-ltd/pyfilesystem
|
fs/tests/__init__.py
|
Python
|
bsd-3-clause
| 46,382
|
import json
from django.test import TestCase
from django.utils.http import urlquote
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Permission
from django.core.files.uploadedfile import SimpleUploadedFile
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailimages.utils import generate_signature
from .utils import Image, get_test_image_file
class TestImageIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages_index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/index.html')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
def test_ordering(self):
orderings = ['title', '-created_at']
for ordering in orderings:
response = self.get({'ordering': ordering})
self.assertEqual(response.status_code, 200)
class TestImageAddView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages_add_image'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages_add_image'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
def test_add(self):
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages_index'))
# Check that the image was created
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 1)
# Test that size was populated correctly
image = images.first()
self.assertEqual(image.width, 640)
self.assertEqual(image.height, 480)
def test_add_no_file_selected(self):
response = self.post({
'title': "Test image",
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# The form should have an error
self.assertFormError(response, 'form', 'file', "This field is required.")
class TestImageEditView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_edit_image', args=(self.image.id,)), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages_edit_image', args=(self.image.id,)), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
def test_edit(self):
response = self.post({
'title': "Edited",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages_index'))
# Check that the image was edited
image = Image.objects.get(id=self.image.id)
self.assertEqual(image.title, "Edited")
def test_with_missing_image_file(self):
self.image.file.delete(False)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
class TestImageDeleteView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_delete_image', args=(self.image.id,)), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages_delete_image', args=(self.image.id,)), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/confirm_delete.html')
def test_delete(self):
response = self.post({
'hello': 'world'
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages_index'))
# Check that the image was deleted
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 0)
class TestImageChooserView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages_chooser'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.js')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestImageChooserChosenView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_image_chosen', args=(self.image.id,)), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/image_chosen.js')
# TODO: Test posting
class TestImageChooserUploadView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages_chooser_upload'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.js')
def test_upload(self):
response = self.client.post(reverse('wagtailimages_chooser_upload'), {
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# Check response
self.assertEqual(response.status_code, 200)
# Check that the image was created
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 1)
# Test that size was populated correctly
image = images.first()
self.assertEqual(image.width, 640)
self.assertEqual(image.height, 480)
def test_upload_no_file_selected(self):
response = self.client.post(reverse('wagtailimages_chooser_upload'), {
'title': "Test image",
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
# The form should have an error
self.assertFormError(response, 'uploadform', 'file', "This field is required.")
class TestMultipleImageUploader(TestCase, WagtailTestUtils):
"""
This tests the multiple image upload views located in wagtailimages/views/multiple.py
"""
def setUp(self):
self.login()
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def test_add(self):
"""
This tests that the add view responds correctly on a GET request
"""
# Send request
response = self.client.get(reverse('wagtailimages_add_multiple'))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/multiple/add.html')
def test_add_post(self):
"""
This tests that a POST request to the add view saves the image and returns an edit form
"""
response = self.client.post(reverse('wagtailimages_add_multiple'), {
'files[]': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertTemplateUsed(response, 'wagtailimages/multiple/edit_form.html')
# Check image
self.assertIn('image', response.context)
self.assertEqual(response.context['image'].title, 'test.png')
# Check form
self.assertIn('form', response.context)
self.assertEqual(response.context['form'].initial['title'], 'test.png')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], response.context['image'].id)
self.assertTrue(response_json['success'])
def test_add_post_noajax(self):
"""
This tests that only AJAX requests are allowed to POST to the add view
"""
response = self.client.post(reverse('wagtailimages_add_multiple'), {})
# Check response
self.assertEqual(response.status_code, 400)
def test_add_post_nofile(self):
"""
This tests that the add view checks for a file when a user POSTs to it
"""
response = self.client.post(reverse('wagtailimages_add_multiple'), {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 400)
def test_add_post_badfile(self):
"""
This tests that the add view checks for a file when a user POSTs to it
"""
response = self.client.post(reverse('wagtailimages_add_multiple'), {
'files[]': SimpleUploadedFile('test.png', b"This is not an image!"),
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertNotIn('image_id', response_json)
self.assertNotIn('form', response_json)
self.assertIn('success', response_json)
self.assertIn('error_message', response_json)
self.assertFalse(response_json['success'])
self.assertEqual(response_json['error_message'], "Not a supported image format. Supported formats: GIF, JPEG, PNG.")
def test_edit_get(self):
"""
This tests that a GET request to the edit view returns a 405 "METHOD NOT ALLOWED" response
"""
# Send request
response = self.client.get(reverse('wagtailimages_edit_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 405)
def test_edit_post(self):
"""
This tests that a POST request to the edit view edits the image
"""
# Send request
response = self.client.post(reverse('wagtailimages_edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "New title!",
('image-%d-tags' % self.image.id): "",
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertNotIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertTrue(response_json['success'])
def test_edit_post_noajax(self):
"""
This tests that a POST request to the edit view without AJAX returns a 400 response
"""
# Send request
response = self.client.post(reverse('wagtailimages_edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "New title!",
('image-%d-tags' % self.image.id): "",
})
# Check response
self.assertEqual(response.status_code, 400)
def test_edit_post_validation_error(self):
"""
This tests that a POST request to the edit page returns a json document with "success=False"
and a form with the validation error indicated
"""
# Send request
response = self.client.post(reverse('wagtailimages_edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "", # Required
('image-%d-tags' % self.image.id): "",
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertTemplateUsed(response, 'wagtailimages/multiple/edit_form.html')
# Check that a form error was raised
self.assertFormError(response, 'form', 'title', "This field is required.")
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertFalse(response_json['success'])
def test_delete_get(self):
"""
This tests that a GET request to the delete view returns a 405 "METHOD NOT ALLOWED" response
"""
# Send request
response = self.client.get(reverse('wagtailimages_delete_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 405)
def test_delete_post(self):
"""
This tests that a POST request to the delete view deletes the image
"""
# Send request
response = self.client.post(reverse('wagtailimages_delete_multiple', args=(self.image.id, )), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Make sure the image is deleted
self.assertFalse(Image.objects.filter(id=self.image.id).exists())
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertTrue(response_json['success'])
def test_edit_post_noajax(self):
"""
This tests that a POST request to the delete view without AJAX returns a 400 response
"""
# Send request
response = self.client.post(reverse('wagtailimages_delete_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 400)
class TestURLGeneratorView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
This tests that the view responds correctly for a user with edit permissions on this image
"""
# Get
response = self.client.get(reverse('wagtailimages_url_generator', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/url_generator.html')
def test_get_bad_permissions(self):
"""
This tests that the view gives a 403 if a user without correct permissions attemts to access it
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get
response = self.client.get(reverse('wagtailimages_url_generator', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 403)
class TestGenerateURLView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
This tests that the view responds correctly for a user with edit permissions on this image
"""
# Get
response = self.client.get(reverse('wagtailimages_generate_url', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
content_json = json.loads(response.content.decode())
self.assertEqual(set(content_json.keys()), set(['url', 'preview_url']))
expected_url = 'http://localhost/images/%(signature)s/%(image_id)d/fill-800x600/' % {
'signature': urlquote(generate_signature(self.image.id, 'fill-800x600').decode()),
'image_id': self.image.id,
}
self.assertEqual(content_json['url'], expected_url)
expected_preview_url = reverse('wagtailimages_preview', args=(self.image.id, 'fill-800x600'))
self.assertEqual(content_json['preview_url'], expected_preview_url)
def test_get_bad_permissions(self):
"""
This tests that the view gives a 403 if a user without correct permissions attemts to access it
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get
response = self.client.get(reverse('wagtailimages_generate_url', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 403)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'You do not have permission to generate a URL for this image.',
}))
def test_get_bad_image(self):
"""
This tests that the view gives a 404 response if a user attempts to use it with an image which doesn't exist
"""
# Get
response = self.client.get(reverse('wagtailimages_generate_url', args=(self.image.id + 1, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 404)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'Cannot find image.',
}))
def test_get_bad_filter_spec(self):
"""
This tests that the view gives a 400 response if the user attempts to use it with an invalid filter spec
"""
# Get
response = self.client.get(reverse('wagtailimages_generate_url', args=(self.image.id, 'bad-filter-spec')))
# Check response
self.assertEqual(response.status_code, 400)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'Invalid filter spec.',
}))
class TestPreviewView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
Test a valid GET request to the view
"""
# Get the image
response = self.client.get(reverse('wagtailimages_preview', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'image/jpeg')
def test_get_invalid_filter_spec(self):
"""
Test that an invalid filter spec returns a 400 response
This is very unlikely to happen in reality. A user would have
to create signature for the invalid filter spec which can't be
done with Wagtails built in URL generator. We should test it
anyway though.
"""
# Get the image
response = self.client.get(reverse('wagtailimages_preview', args=(self.image.id, 'bad-filter-spec')))
# Check response
self.assertEqual(response.status_code, 400)
|
jorge-marques/wagtail
|
wagtail/wagtailimages/tests/test_admin_views.py
|
Python
|
bsd-3-clause
| 22,610
|
from __future__ import absolute_import
import unittest
class TestRange1d(unittest.TestCase):
def setUp(self):
from bokeh.models.ranges import Range1d
self.range1d = Range1d
def test_init(self):
self.assertRaises(ValueError, self.range1d, 1, 2, start=1, end=2)
self.assertRaises(ValueError, self.range1d, 1, 2, 3)
range1d = self.range1d(1, 2)
assert range1d
range1d = self.range1d(start=1, end=2)
assert range1d
class TestFactorRange(unittest.TestCase):
def setUp(self):
from bokeh.models.ranges import FactorRange
self.factorRange = FactorRange
def test_init(self):
self.assertRaises(ValueError, self.factorRange, [1, 2, 3], factors=[1, 2, 3])
self.assertRaises(ValueError, self.factorRange, [1, 2, 3, 4])
factorRange = self.factorRange(1, 2)
assert factorRange
factorRange = self.factorRange(factors=[1, 2, 3, 4, 5])
assert factorRange
if __name__ == "__main__":
unittest.main()
|
htygithub/bokeh
|
bokeh/tests/test_models_ranges.py
|
Python
|
bsd-3-clause
| 1,038
|
from djangoappengine.main.application import main, application, make_profileable
if __name__ == '__main__':
main()
|
purpleidea/macaronic-net
|
djangoappengine/main/main.py
|
Python
|
agpl-3.0
| 120
|
from ._base import FunctionBase
class Function(FunctionBase):
name = 'clear'
doc = 'Clears TerminalWidgetSystem data'
methods_subclass = {}
def handle_input(self, term_system, term_globals, exec_locals, text):
term_system.data = []
|
Bakterija/mmplayer
|
mmplayer/kivy_soil/terminal_widget/functions/clear.py
|
Python
|
mit
| 258
|
#!/usr/bin/python
import mraa
import time
import sys
sys.path.append(r'~/programy/python/')
from robot import *
forward(1)
lrotate(1)
forward(1)
lrotate(1)
back(1)
rrotate(1)
back(1)
rrotate(1)
quit()
|
jipech/SparkFun_H-Bridge_Block_for_Edison_Python_Library
|
example.py
|
Python
|
gpl-2.0
| 205
|
from copy import copy
import numpy as np
from nipy.core.image.image import Image
class ImageList(object):
''' Class to contain ND image as list of (N-1)D images '''
def __init__(self, images=None):
"""
A lightweight implementation of a list of images.
Parameters
----------
images : iterable
a iterable and sliceale object whose items are meant to be
images, this is checked by asserting that each has a
`coordmap` attribute
>>> import numpy as np
>>> from nipy.testing import funcfile
>>> from nipy.core.api import Image, ImageList
>>> from nipy.io.api import load_image
>>> funcim = load_image(funcfile)
>>> ilist = ImageList(funcim)
>>> sublist = ilist[2:5]
Slicing an ImageList returns a new ImageList
>>> isinstance(sublist, ImageList)
True
Indexing an ImageList returns a new Image
>>> newimg = ilist[2]
>>> isinstance(newimg, Image)
True
>>> isinstance(newimg, ImageList)
False
>>> np.asarray(sublist).shape
(3, 2, 20, 20)
>>> np.asarray(newimg).shape
(2, 20, 20)
"""
if images is None:
self.list = []
return
for im in images:
if not hasattr(im, "coordmap"):
raise ValueError("expecting each element of images "
" to have a 'coordmap' attribute")
self.list = images
@classmethod
def from_image(klass, image, axis=-1):
if axis is None:
raise ValueError('axis must be array axis no or -1')
imlist = []
coordmap = image.coordmap
data = np.asarray(image)
data = np.rollaxis(data, axis)
imlist = [Image(dataslice, copy(coordmap))
for dataslice in data]
return klass(imlist)
def __setitem__(self, index, value):
"""
self.list[index] = value
"""
self.list[index] = value
def __getitem__(self, index):
"""
self.list[index]
"""
if type(index) is type(1):
return self.list[index]
else:
return ImageList(images=self.list[index])
def __getslice__(self, i, j):
"""
Return another ImageList instance consisting with
images self.list[i:j]
"""
return ImageList(images=self.list[i:j])
def __array__(self):
"""Return data in ndarray. Called through numpy.array.
Examples
--------
>>> import numpy as np
>>> from nipy.testing import funcfile
>>> from nipy.core.api import ImageList
>>> from nipy.io.api import load_image
>>> funcim = load_image(funcfile)
>>> ilist = ImageList(funcim)
>>> np.asarray(ilist).shape
(20, 2, 20, 20)
"""
return np.asarray([np.asarray(im) for im in self.list])
def __iter__(self):
self._iter = iter(self.list)
return self
def next(self):
return self._iter.next()
|
yarikoptic/NiPy-OLD
|
nipy/core/image/image_list.py
|
Python
|
bsd-3-clause
| 3,183
|
from .. import gen_or_fn
import unittest
from unittest.mock import Mock, patch
class RunGenOrFn(unittest.TestCase):
""" Test cases of RunGenOrFn """
def test_fn(self):
""" Test that a normal function is run properly """
fn = Mock()
args = range(3)
kwargs = {'one': 1, 'two': 2, 'three': 3}
generator = gen_or_fn.RunGenOrFn(fn, *args, **kwargs)
self.assertRaises(StopIteration, next, generator)
fn.assert_called_once_with(*args, **kwargs)
def test_generatorFn(self):
""" Test that a generator function is run properly """
fn = Mock()
args = range(3)
kwargs = {'one': 1, 'two': 2, 'three': 3}
responses = ['a', 'b', 'c']
generator = gen_or_fn.RunGenOrFn(self.dummy_generator, *args, **kwargs)
try:
i = 0
yieldedValue = next(generator)
self.assertEqual(args[i], yieldedValue)
while True:
yieldedValue = generator.send(responses[i])
i += 1
self.assertEqual(args[i], yieldedValue)
except StopIteration:
pass
self.assertEqual(self.args, tuple(args))
self.assertEqual(self.kwargs, kwargs)
self.assertEqual(responses, self.responses)
def dummy_generator(self, *args, **kwargs):
""" Helper class that acts as a dummy generator """
self.args = args
self.kwargs = kwargs
self.responses = []
for value in args:
response = yield value
self.responses.append(response)
class GenOrFn(unittest.TestCase):
""" Test cases of GenOrFn """
@patch('kao_generator.gen_or_fn.KaoGenerator')
def test_generatorBuilt(self, KaoGeneratorMock):
""" Test that the generator is built properly """
expected = Mock()
KaoGeneratorMock.return_value = expected
fn = Mock()
args = range(3)
kwargs = {'one': 1, 'two': 2, 'three': 3}
actual = gen_or_fn.GenOrFn(fn, *args, **kwargs)
KaoGeneratorMock.assert_called_once_with(gen_or_fn.RunGenOrFn, fn, *args, **kwargs)
self.assertEqual(actual, expected)
|
cloew/KaoGenerator
|
kao_generator/Test/test_gen_or_fn.py
|
Python
|
mit
| 2,262
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for depthwise convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _DepthwiseConv2dNumpyBasic(x1, x2, strides):
"""Compute depthwise_conv2d using Numpy.
This allows use to test TensorFlow's depthwise_conv2d by comparing to the
Numpy version.
Args:
x1: The input Numpy array, in NHWC format.
x2: The filter Numpy array.
strides: A Python list of 4 elements representing the strides.
Returns:
The depthwise conv2d output as a Numpy array.
"""
n, h, w, c = x1.shape
fh, fw, c2, o = x2.shape
assert c == c2
_, sh, sw, _ = strides
out_rows = (h - fh + sh) // sh
out_cols = (w - fw + sw) // sw
out = np.zeros([n, out_rows, out_cols, c * o])
for i in range(out_rows):
for j in range(out_cols):
for k in range(c):
start_height = i * sh
end_height = start_height + fh
start_width = j * sw
end_width = start_width + fw
# multiplied_slice.shape: (b, fh, fw, o)
multiplied_slice = (
x1[:, start_height:end_height, start_width:end_width, k, np.newaxis]
* x2[:, :, k, :])
# Set a slice of b * o elements of 'out'.
out[:, i, j, k * o:(k + 1) * o] = np.sum(multiplied_slice, axis=(1, 2))
return out
def _DepthwiseConv2dNumpy(x1, x2, strides, padding, data_format, dilations):
"""Compute depthwise_conv2d using Numpy.
This allows use to test TensorFlow's depthwise_conv2d by comparing to the
Numpy version.
Unlike `_DepthwiseConv2dNumpyBasic`, this supports more advanced features
like padding.
Args:
x1: The input Numpy array.
x2: The filter Numpy array.
strides: A Python list of 4 elements representing the strides.
padding: The padding. "SAME", "VALID", or a list of explicit paddings.
data_format: "NHWC" or "NCHW".
dilations: A list of 2 elements, representing the dilations.
Returns:
The depthwise conv2d as a Numpy array.
"""
if data_format == "NCHW":
# Transpose arguments to NHWC format.
x1 = np.transpose(x1, (0, 3, 1, 2))
strides = [strides[0], strides[3], strides[1], strides[2]]
if dilations:
dilations = [dilations[0], dilations[3], dilations[1], dilations[2]]
if dilations:
# Dilate the filter so _DepthwiseConv2dNumpyBasic doesn't have to deal with
# dilations.
fh, fw, c, o = x2.shape
new_fh = (fh - 1) * dilations[0] + 1
new_fw = (fw - 1) * dilations[1] + 1
new_x2 = np.zeros((new_fh, new_fw, c, o))
for i in range(fh):
for j in range(fw):
new_x2[i * dilations[0], j * dilations[1], : :] = x2[i, j, :, :]
x2 = new_x2
# Pad input so _DepthwiseConv2dNumpyBasic doesn't have to deal with padding.
if padding == "SAME":
def PaddingsForDim(input_dim, filter_dim, stride):
"""Computes paddings for a single dimension."""
if input_dim % stride == 0:
total_padding = max(filter_dim - stride, 0)
else:
total_padding = max(filter_dim - (input_dim % stride), 0)
pad_before = total_padding // 2
pad_after = total_padding - pad_before
return pad_before, pad_after
padding = [(0, 0),
PaddingsForDim(x1.shape[1], x2.shape[0], strides[1]),
PaddingsForDim(x1.shape[2], x2.shape[1], strides[2]),
(0, 0)]
elif padding == "VALID":
padding = [(0, 0)] * 4
x1 = np.pad(x1, padding, "constant")
y = _DepthwiseConv2dNumpyBasic(x1, x2, strides)
if data_format == "NCHW":
# Transpose back to NCHW format.
y = np.transpose(y, (0, 2, 3, 1))
return y
def ConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
Returns:
List of tuples (input_size, filter_size, out_size, stride, padding,
dilations), the depthwise convolution parameters.
"""
def Config(input_size, filter_size, out_size, stride=1, padding="SAME",
dilations=None):
return input_size, filter_size, out_size, stride, padding, dilations
return [
Config([4, 5, 5, 48], [1, 1, 48, 2], [4, 5, 5, 96]),
Config([4, 8, 8, 84], [1, 3, 84, 1], [4, 8, 8, 84]),
Config([4, 17, 17, 48], [3, 1, 48, 4], [4, 17, 17, 192]),
Config([4, 9, 27, 8], [3, 3, 8, 1], [4, 9, 27, 8]),
Config([4, 31, 31, 7], [3, 3, 7, 1], [4, 31, 31, 7]),
Config([4, 35, 35, 2], [5, 5, 2, 1], [4, 35, 35, 2]),
Config([4, 147, 147, 2], [3, 3, 2, 8], [4, 49, 49, 16], 3,
padding="VALID"),
Config([3, 299, 299, 3], [3, 2, 3, 8], [3, 150, 150, 24], 2),
Config([5, 183, 183, 1], [5, 5, 1, 2], [5, 92, 92, 2], 2),
Config([5, 183, 183, 1], [5, 5, 1, 2], [5, 183, 183, 2],
dilations=[2, 2]),
Config([5, 41, 35, 2], [4, 7, 2, 2], [5, 32, 23, 4], padding="VALID",
dilations=[3, 2]),
]
def ConfigsToTestExplicit():
"""Iterator for different convolution shapes, strides and explicit paddings.
Returns:
List of tuples (input_size, filter_size, out_size, stride, padding,
dilations), the depthwise convolution parameters.
"""
def Config(input_size, filter_size, out_size, stride=1, padding=None,
dilations=None):
return input_size, filter_size, out_size, stride, padding, dilations
return [
Config([4, 5, 5, 48], [1, 1, 48, 2], [4, 8, 12, 96],
padding=[[1, 2], [3, 4]]),
Config([4, 1, 1, 3], [3, 3, 3, 2], [4, 29, 39, 6],
padding=[[10, 20], [15, 25]]),
Config([4, 9, 27, 8], [3, 3, 8, 1], [4, 14, 31, 8],
padding=[[3, 4], [4, 2]]),
Config([4, 31, 31, 7], [3, 3, 7, 1], [4, 29, 29, 7],
padding=[[0, 0], [0, 0]]),
Config([3, 299, 299, 3], [3, 2, 3, 8], [3, 150, 153, 24], 2,
padding=[[1, 2], [3, 5]]),
Config([5, 183, 183, 1], [5, 5, 1, 2], [5, 62, 60, 2], 3,
padding=[[3, 2], [1, 0]]),
Config([5, 29, 31, 1], [5, 4, 1, 2], [5, 26, 23, 2],
padding=[[3, 2], [1, 0]], dilations=[2, 3]),
# These cases test the kernels in depthwise_conv_op_gpu.h which are used
# if the input size is small.
Config([4, 5, 5, 48], [3, 3, 48, 1], [4, 5, 5, 48],
padding=[[0, 2], [0, 2]]),
Config([1, 8, 7, 2], [8, 7, 2, 1], [1, 8, 7, 2],
padding=[[0, 7], [3, 3]]),
Config([2, 4, 3, 2], [3, 2, 2, 1], [2, 4, 3, 2],
padding=[[2, 0], [1, 0]]),
]
def CheckGradConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
compute_gradient_error() is very expensive. So the configs should be
relatively small.
Returns:
List of tuples (input_size, filter_size, out_size, stride, padding,
dilations), the depthwise convolution parameters.
"""
def Config(input_size, filter_size, out_size, stride=1, padding="SAME",
dilations=None):
return input_size, filter_size, out_size, stride, padding, dilations
return [
Config([2, 5, 8, 1], [4, 4, 1, 2], [2, 5, 8, 2]),
Config([4, 5, 5, 1], [2, 2, 1, 2], [4, 2, 2, 2], 2, padding="VALID"),
Config([2, 4, 4, 2], [3, 1, 2, 2], [2, 4, 4, 4]),
Config([1, 15, 15, 2], [1, 3, 2, 1], [1, 15, 15, 2]),
Config([2, 15, 16, 1], [3, 3, 1, 2], [2, 5, 5, 2], 3, padding="VALID"),
Config([2, 5, 8, 1], [4, 3, 1, 2], [2, 5, 8, 2], dilations=[1, 2]),
# These cases test the kernels in depthwise_conv_op_gpu.h which are used
# if the input size is small.
Config([1, 3, 1, 2], [2, 1, 2, 1], [1, 3, 1, 2]),
Config([2, 2, 3, 2], [2, 1, 2, 1], [2, 2, 3, 2]),
Config([2, 2, 3, 1], [2, 2, 1, 1], [2, 2, 3, 1]),
]
def CheckGradConfigsToTestExplicit():
"""Iterator for different convolution shapes, strides and explicit paddings.
compute_gradient_error() is very expensive. So the configs should be
relatively small.
Returns:
List of tuples (input_size, filter_size, out_size, stride, padding,
dilations), the depthwise convolution parameters.
"""
def Config(input_size, filter_size, out_size, stride=1, padding=None,
dilations=None):
return input_size, filter_size, out_size, stride, padding, dilations
return [
Config([2, 5, 8, 1], [4, 4, 1, 2], [2, 3, 10, 2],
padding=[[0, 1], [2, 3]]),
Config([4, 5, 5, 1], [2, 2, 1, 2], [4, 4, 5, 2], 2,
padding=[[3, 1], [5, 0]]),
Config([2, 4, 4, 2], [3, 1, 2, 2], [2, 7, 11, 4],
padding=[[4, 1], [3, 4]]),
Config([1, 15, 15, 2], [1, 3, 2, 1], [1, 18, 23, 2],
padding=[[3, 0], [2, 8]]),
Config([2, 15, 16, 1], [3, 3, 1, 2], [2, 5, 8, 2], 3,
padding=[[0, 0], [10, 0]]),
Config([2, 5, 8, 1], [3, 4, 1, 2], [2, 5, 10, 2],
padding=[[3, 1], [2, 3]], dilations=[2, 1]),
# These cases test the kernels in depthwise_conv_op_gpu.h which are used
# if the input size is small.
Config([2, 4, 3, 2], [3, 2, 2, 1], [2, 4, 3, 2],
padding=[[2, 0], [1, 0]]),
]
class DepthwiseConv2DTest(test.TestCase):
# This tests depthwise_conv2d and depthwise_conv2d_native
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_type,
use_gpu,
grouped_conv=False,
data_format="NHWC",
dilations=None):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
data_type: The data type to use.
use_gpu: Whether to use GPU.
grouped_conv: Whether to use cuDNN 7's grouped convolution.
data_format: The data_format of the input. "NHWC" or "NCHW".
dilations: A list of 2 elements, representing the dilations.
"""
input_size = 1
filter_size = 1
for s in tensor_in_sizes:
input_size *= s
for s in filter_in_sizes:
filter_size *= s
# Initializes the input and filter tensor with numbers incrementing from 1.
x1 = [f * 1.0 / input_size for f in range(1, input_size + 1)]
x1 = np.array(x1).reshape(tensor_in_sizes)
x2 = [f * 1.0 / filter_size for f in range(1, filter_size + 1)]
x2 = np.array(x2).reshape(filter_in_sizes)
# Compute reference result
strides = [1, stride, stride, 1]
if isinstance(padding, list):
padding = [(0, 0)] + padding + [(0, 0)]
np_result = _DepthwiseConv2dNumpy(x1, x2, strides, padding, "NHWC",
dilations)
ops.reset_default_graph()
graph = ops.get_default_graph()
with self.session(graph=graph, use_gpu=use_gpu) as sess:
tolerance = {
dtypes.float16: 4e-2,
dtypes.float32: 1e-5,
dtypes.float64: 1e-12,
}[data_type]
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=data_type)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=data_type)
if data_format == "NCHW":
# Transpose from NHWC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
if isinstance(padding, list):
padding = [padding[0], padding[3], padding[1], padding[2]]
# depthwise_conv2d_native does not support dilations except on TPUs.
if dilations is None:
with sess.graph._kernel_label_map({
"DepthwiseConv2dNative": "cudnn_grouped_convolution"
} if grouped_conv else {}):
conv_native = nn_ops.depthwise_conv2d_native(
t1,
t2,
strides=strides,
data_format=data_format,
padding=padding)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
try:
# The Numpy array from calling depthwise_conv2d_native
native_result = self.evaluate(conv_native)
except errors.InvalidArgumentError as e:
# Grouped convolution kernel is only registered for cuDNN 7. Silently
# return when we are running on an earlier version or without GPU.
if e.message.startswith(
"No OpKernel was registered to support Op "
"'DepthwiseConv2dNative'"):
tf_logging.warn("Skipping grouped convolution test")
return
raise e
conv_interface = nn_impl.depthwise_conv2d(
t1, t2, strides=strides, padding=padding,
data_format=data_format, dilations=dilations)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_interface = array_ops.transpose(conv_interface, [0, 2, 3, 1])
# The Numpy array from calling depthwise_conv2d
interface_result = self.evaluate(conv_interface)
if dilations is None:
self.assertAllClose(native_result, np_result, atol=tolerance, rtol=0.)
self.assertAllClose(interface_result, np_result, atol=tolerance, rtol=0.)
@test_util.run_v1_only("b/120545219")
@test_util.run_cuda_only
def testDepthwiseConv2DCudnn(self):
for index, (input_size, filter_size, _, stride,
padding, dilations) in enumerate(ConfigsToTest()):
# The CuDNN depthwise conv is turned on only when input/output is NCHW and
# float16(half). See cudnn release note 7.6.3.
tf_logging.info(
"Testing DepthwiseConv2DCudnn, %dth config: %r * %r, stride: %d, "
"padding: %s", index, input_size, filter_size, stride, padding)
data_type = dtypes.float16
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
use_gpu=True,
data_format="NCHW",
dilations=dilations)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2D(self):
for index, (input_size, filter_size, _, stride,
padding, dilations) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2D, %dth config: %r * %r, stride: %d, padding: "
"%s", index, input_size, filter_size, stride, padding)
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
for data_type in ([dtypes.float32] + optional_float64):
tf_logging.info("Testing without grouped_conv")
self._VerifyValues(
input_size, filter_size, stride, padding, data_type, use_gpu=True,
dilations=dilations)
tf_logging.info("Testing with grouped_conv")
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
use_gpu=True,
grouped_conv=True,
dilations=dilations)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DWithUnknownShape(self):
# GitHub issue 22110.
if not test.is_gpu_available():
return
with self.session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32)
f = np.ones([1, 1, 1, 1], np.float32)
v = nn_impl.depthwise_conv2d(
x, f, [1, 1, 1, 1], "VALID", rate=[2, 1], data_format="NCHW")
self.assertAllEqual(
np.ones([1, 1, 1, 1], np.float32),
v.eval(feed_dict={x: np.ones([1, 1, 1, 1], np.float32)}))
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, _, stride,
padding, dilations) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFormat, %dth config: %r * %r, stride: %d, "
"padding: %s", index, input_size, filter_size, stride, padding)
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
for data_type in ([dtypes.float32] + optional_float64):
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
use_gpu=True,
data_format="NCHW",
dilations=dilations)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DExplicit(self):
for index, (input_size, filter_size, _, stride,
padding, dilations) in enumerate(ConfigsToTestExplicit()):
tf_logging.info(
"Testing DepthwiseConv2D, %dth config: %r * %r, stride: %d, padding: "
"%s", index, input_size, filter_size, stride, padding)
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
data_formats = ["NHWC", "NCHW"] if test.is_gpu_available() else ["NHWC"]
for data_type in [dtypes.float16, dtypes.float32] + optional_float64:
for data_format in data_formats:
self._VerifyValues(
input_size, filter_size, stride, padding, data_type, use_gpu=True,
data_format=data_format, dilations=dilations)
# This is testing against hand calculated results.
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected, use_gpu):
"""Verifies the output values of the depthwise convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether to use GPU.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.cached_session(use_gpu=use_gpu) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_ops.depthwise_conv2d_native(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = self.evaluate(conv)
tf_logging.info("value = %r", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=False)
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=True)
# Gradient checkers. This tests depthwise gradient computations for both
# BackpropFilter and BackpropInput by comparing gradients computed by the
# depthwise gradient ops with the gradients computed numerically (details can
# be found in the compute_gradient_error().
# Note this check is very expensive so the input should not be too big.
def _ConstructAndTestGradient(self,
input_shape,
filter_shape,
output_shape,
stride,
padding,
data_type,
test_input,
use_gpu,
grouped_conv=False,
data_format="NHWC",
dilations=None):
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
input_np = np.array(input_data).reshape(input_shape)
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
filter_np = np.array(filter_data).reshape(filter_shape)
ops.reset_default_graph()
graph = ops.get_default_graph()
with self.session(graph=graph, use_gpu=use_gpu) as sess:
tolerance = {
dtypes.float16: 4e-0,
dtypes.float32: 8e-4,
dtypes.float64: 1e-12,
}[data_type]
input_tensor = constant_op.constant(
input_np, shape=input_shape, dtype=data_type, name="input")
filter_tensor = constant_op.constant(
filter_np, shape=filter_shape, dtype=data_type, name="filter")
native_input = input_tensor
strides = [1, stride, stride, 1]
if isinstance(padding, list):
padding = [(0, 0)] + padding + [(0, 0)]
if data_format == "NCHW":
# Transpose from NHWC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_input = array_ops.transpose(input_tensor, [0, 3, 1, 2])
input_shape = [
input_shape[0], input_shape[3], input_shape[1], input_shape[2]
]
output_shape = [
output_shape[0], output_shape[3], output_shape[1], output_shape[2]
]
strides = [1, 1, stride, stride]
if isinstance(padding, list):
padding = [padding[0], padding[3], padding[1], padding[2]]
with sess.graph._kernel_label_map({
"DepthwiseConv2dNative": "cudnn_grouped_convolution",
"DepthwiseConv2dNativeBackpropInput": "cudnn_grouped_convolution",
"DepthwiseConv2dNativeBackpropFilter": "cudnn_grouped_convolution",
} if grouped_conv else {}):
depthwise_conv2d = nn_impl.depthwise_conv2d(
native_input,
filter_tensor,
strides,
padding,
data_format=data_format,
dilations=dilations,
name="depthwise_conv2d")
self.assertEqual(output_shape, depthwise_conv2d.get_shape())
try:
if test_input:
err = gradient_checker.compute_gradient_error(
native_input, input_shape, depthwise_conv2d, output_shape)
else:
err = gradient_checker.compute_gradient_error(
filter_tensor, filter_shape, depthwise_conv2d, output_shape)
except errors.InvalidArgumentError as e:
# Grouped convolution kernel is only registered for cuDNN 7. Silently
# return when we are running on an earlier version or without GPU.
if grouped_conv and e.message.startswith(
"No OpKernel was registered to support Op 'DepthwiseConv2dNative'"):
tf_logging.warn("Skipping grouped convolution test")
return
raise e
tf_logging.info(
"data_type: %r, use_gpu: %r, grouped_conv: %r, error = %f", data_type,
use_gpu, grouped_conv, err)
self.assertLess(err, tolerance)
@test_util.run_v1_only("b/120545219")
@test_util.run_cuda_only
def testDepthwiseConv2DInputGradCudnn(self):
for index, (input_size, filter_size, output_size, stride,
padding, dilations) in enumerate(CheckGradConfigsToTest()):
# The CuDNN depthwise conv (input gradient) is turned on only when
# stride = 1, input/output is NCHW and float16(half). See cudnn release
# note 7.6.3.
if stride != 1:
continue
tf_logging.info(
"Testing DepthwiseConv2DInputGradCudnn, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
data_type = dtypes.float16
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True,
data_format="NCHW",
dilations=dilations)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DInputGrad(self):
for index, (input_size, filter_size, output_size, stride,
padding, dilations) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DInputGrad, %dth config: %r * %r, stride: %d, "
"padding: %s", index, input_size, filter_size, stride, padding)
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
for data_type in ([dtypes.float32] + optional_float64):
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True,
dilations=dilations)
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True,
grouped_conv=True,
dilations=dilations)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DInputGradFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, output_size, stride,
padding, dilations) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DInputGradFormat, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
for data_type in ([dtypes.float32] + optional_float64):
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True,
data_format="NCHW",
dilations=dilations)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DInputGradExplicit(self):
for index, (input_size, filter_size, output_size, stride, padding,
dilations) in enumerate(CheckGradConfigsToTestExplicit()):
tf_logging.info(
"Testing DepthwiseConv2DInputGradExplicit, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
data_formats = ["NHWC", "NCHW"] if test.is_gpu_available() else ["NHWC"]
for data_type in [dtypes.float16, dtypes.float32] + optional_float64:
for data_format in data_formats:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True,
data_format=data_format,
dilations=dilations)
@test_util.run_v1_only("b/120545219")
@test_util.run_cuda_only
def testDepthwiseConv2DFilterGradCudnn(self):
for index, (input_size, filter_size, output_size, stride,
padding, dilations) in enumerate(CheckGradConfigsToTest()):
# The CuDNN depthwise conv (filter gradient) is turned on only when
# input/output is float16(half). See cudnn release note 7.6.3.
tf_logging.info(
"Testing DepthwiseConv2DFilterGradCudnn, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
data_type = dtypes.float16
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=False,
use_gpu=True,
data_format="NCHW",
dilations=dilations)
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=False,
use_gpu=True,
data_format="NHWC",
dilations=dilations)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DFilterGrad(self):
for index, (input_size, filter_size, output_size, stride,
padding, dilations) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGrad, %dth config: %r * %r, stride: "
"%d, padding: %s", index, input_size, filter_size, stride, padding)
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
for data_type in ([dtypes.float32] + optional_float64):
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=False,
use_gpu=True,
dilations=dilations)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DFilterGradFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, output_size, stride,
padding, dilations) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGradFormat, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
for data_type in ([dtypes.float32] + optional_float64):
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=False,
use_gpu=True,
data_format="NCHW",
dilations=dilations)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DFilterGradExplicit(self):
for index, (input_size, filter_size, output_size, stride, padding,
dilations) in enumerate(CheckGradConfigsToTestExplicit()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGradExplicit, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
data_formats = ["NHWC", "NCHW"] if test.is_gpu_available() else ["NHWC"]
for data_type in [dtypes.float16, dtypes.float32] + optional_float64:
for data_format in data_formats:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=False,
use_gpu=True,
data_format=data_format,
dilations=dilations)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
stride, padding, dtype):
x1 = np.random.rand(*filter_sizes).astype(dtype)
x2 = np.random.rand(*output_sizes).astype(dtype)
if isinstance(padding, list):
padding = [(0, 0)] + padding + [(0, 0)]
def _GetVal(use_gpu):
with self.cached_session(use_gpu=use_gpu):
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = self.evaluate(backprop)
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DInputGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding, dilations) in enumerate(ConfigsToTest()):
if dilations:
continue
tf_logging.info(
"Testing DepthwiseConv2DInputGradCompare, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
padding, "float32")
# double datatype is currently not supported for convolution ops
# on the ROCm platform
if test.is_built_with_rocm():
continue
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
padding, "float64")
def testDepthwiseConv2DInputGradExplicitCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding, dilations) in enumerate(ConfigsToTestExplicit()):
if dilations:
continue
tf_logging.info(
"Testing DepthwiseConv2DInputGradCompare, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
padding, "float32")
# double datatype is currently not supported for convolution ops
# on the ROCm platform
if test.is_built_with_rocm():
continue
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
padding, "float64")
def _CompareBackpropFilter(self, input_sizes, filter_sizes, output_sizes,
stride, padding, dtype):
x0 = np.random.rand(*input_sizes).astype(dtype)
x2 = np.random.rand(*output_sizes).astype(dtype)
if isinstance(padding, list):
padding = [(0, 0)] + padding + [(0, 0)]
def _GetVal(use_gpu):
with self.cached_session(use_gpu=use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = self.evaluate(backprop)
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DFilterGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding, dilations) in enumerate(ConfigsToTest()):
if dilations:
continue
tf_logging.info(
"Testing DepthwiseConv2DFilterGradCompare, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
self._CompareBackpropFilter(input_size, filter_size, output_size, stride,
padding, "float32")
# double datatype is currently not supported for convolution ops
# on the ROCm platform
if test.is_built_with_rocm():
continue
self._CompareBackpropFilter(input_size, filter_size, output_size, stride,
padding, "float64")
def testDepthwiseConv2DFilterGradExplicitCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding, dilations) in enumerate(ConfigsToTestExplicit()):
if dilations:
continue
tf_logging.info(
"Testing DepthwiseConv2DFilterGradCompare, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
self._CompareBackpropFilter(input_size, filter_size, output_size, stride,
padding, "float32")
# double datatype is currently not supported for convolution ops
# on the ROCm platform
if test.is_built_with_rocm():
continue
self._CompareBackpropFilter(input_size, filter_size, output_size, stride,
padding, "float64")
if __name__ == "__main__":
test.main()
|
gunan/tensorflow
|
tensorflow/python/kernel_tests/depthwise_conv_op_test.py
|
Python
|
apache-2.0
| 40,520
|
## PETRglobals.py [module]
##
# Global variable initializations for the PETRARCH event coder
#
# SYSTEM REQUIREMENTS
# This program has been successfully run under Mac OS 10.10; it is standard Python 2.7
# so it should also run in Unix or Windows.
#
# INITIAL PROVENANCE:
# Programmer: Philip A. Schrodt
# Parus Analytics
# Charlottesville, VA, 22901 U.S.A.
# http://eventdata.parusanalytics.com
#
# GitHub repository: https://github.com/openeventdata/petrarch
#
# Copyright (c) 2014 Philip A. Schrodt. All rights reserved.
#
# This project is part of the Open Event Data Alliance tool set; earlier developments
# were funded in part by National Science Foundation grant SES-1259190
#
# This code is covered under the MIT license
#
# REVISION HISTORY:
# 22-Nov-13: Initial version -- ptab.verbsonly.py
# 28-Apr-14: Latest version
# 20-Nov-14: WriteActorRoot/Text added
# ------------------------------------------------------------------------
# Global variables are listed below: additional details on their structure can
# be found in various function definitions. The various options are described
# in more detail in the config.ini file.
VerbDict = {'verbs':{}, 'phrases':{}, 'transformations' : {}} # verb dictionary
P1VerbDict = {'verbs':{},'phrases':{}} ## PETRARCH 1 style verb dictionary
ActorDict = {} # actor dictionary
ActorCodes = [] # actor code list
AgentDict = {} # agent dictionary
DiscardList = {} # discard list
InternalCodingOntology = {}
IssueList = []
IssueCodes = []
ConfigFileName = ""
VerbFileName = "" # verb dictionary
P1VerbFileName = "" # PETRARCH 1 style verb dictionary
ActorFileList = [] # actor dictionary
AgentFileList = [] # agent dictionary
DiscardFileName = "" # discard list
TextFileList = [] # current text or validation file
EventFileName = "" # event output file
IssueFileName = "" # issues list
InternalCodingOntologyFileName = "" #internal coding ontology mapping file
# element followed by attribute and content pairs for XML line
AttributeList = []
# NULL CODING OPTIONS
NullVerbs = False # Only get verb phrases that are not in the dictionary but are associated with coded noun phrases
NullActors = False # Only get actor phrases that are not in the dictionary but associated with coded verb phrases
NewActorLength = 0 # Maximum length for new actors extracted from noun phrases
# CODING OPTIONS
# Defaults are more or less equivalent to TABARI
RequireDyad = False # Events require a non-null source and target
StoponError = False # Raise stop exception on errors rather than recovering
# OUTPUT OPTIONS
WriteActorRoot = False # Include actor root in event record
WriteActorText = False # Include actor text in event record
WriteEventText = False # Include event text in event record
RunTimeString = '' # used in error and debugging files -- just set it once
# INTERFACE OPTIONS: these can be changed in config.ini
# The default -- all false -- is equivalent to an A)utocode in TABARI
CodeBySentence = False
PauseBySentence = False
PauseByStory = False
# COMMA OPTION : These adjust the length (in words) of comma-delimited clauses
# that are eliminated from the parse. To deactivate, set the max to zero.
# Defaults, based on TABARI, are in ()
# comma_min : internal clause minimum length [2]
# comma_max : internal clause maximum length [8]
# comma_bmin : initial ("begin") clause minimum length [0]
# comma_bmax : initial clause maximum length [0 : deactivated by default]
# comma_emin : terminal ("end") clause minimum length [2]
# comma_emax : terminal clause maximum length [8]
CommaMin = 2
CommaMax = 8
CommaBMin = 0
CommaBMax = 0
CommaEMin = 2
CommaEMax = 8
stanfordnlp = ''
udpipemodel = ''
CodeWithPetrarch1 = True # if code with petrarch1 verb dictionaries
CodeWithPetrarch2 = True # if code with petrarch2 verb dictionaries
# TEMPORARY VARIABLES
# <14.11.20> Temporary in the sense that these won't be needed when we eventually
# refactor so that codes are some sort of structure other than a string
CodePrimer = '=#=' # separates actor code from root and text strings
RootPrimer = CodePrimer + ':' # start of root string
TextPrimer = CodePrimer + '+' # start of text string
|
openeventdata/UniversalPetrarch
|
UniversalPetrarch/PETRglobals.py
|
Python
|
mit
| 4,242
|
# Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
from starcluster import static
config_template = """\
####################################
## StarCluster Configuration File ##
####################################
[global]
# Configure the default cluster template to use when starting a cluster
# defaults to 'smallcluster' defined below. This template should be usable
# out-of-the-box provided you've configured your keypair correctly
DEFAULT_TEMPLATE=smallcluster
# enable experimental features for this release
#ENABLE_EXPERIMENTAL=True
# number of seconds to wait when polling instances (default: 30s)
#REFRESH_INTERVAL=15
# specify a web browser to launch when viewing spot history plots
#WEB_BROWSER=chromium
# split the config into multiple files
#INCLUDE=~/.starcluster/aws, ~/.starcluster/keys, ~/.starcluster/vols
#############################################
## AWS Credentials and Connection Settings ##
#############################################
[aws info]
# This is the AWS credentials section (required).
# These settings apply to all clusters
# replace these with your AWS keys
AWS_ACCESS_KEY_ID = #your_aws_access_key_id
AWS_SECRET_ACCESS_KEY = #your_secret_access_key
# replace this with your account number
AWS_USER_ID= #your userid
# Uncomment to specify a different Amazon AWS region (OPTIONAL)
# (defaults to us-east-1 if not specified)
# NOTE: AMIs have to be migrated!
#AWS_REGION_NAME = eu-west-1
#AWS_REGION_HOST = ec2.eu-west-1.amazonaws.com
# Uncomment these settings when creating an instance-store (S3) AMI (OPTIONAL)
#EC2_CERT = /path/to/your/cert-asdf0as9df092039asdfi02089.pem
#EC2_PRIVATE_KEY = /path/to/your/pk-asdfasd890f200909.pem
# Uncomment these settings to use a proxy host when connecting to AWS
#AWS_PROXY = your.proxyhost.com
#AWS_PROXY_PORT = 8080
#AWS_PROXY_USER = yourproxyuser
#AWS_PROXY_PASS = yourproxypass
###########################
## Defining EC2 Keypairs ##
###########################
# Sections starting with "key" define your keypairs. See "starcluster createkey
# --help" for instructions on how to create a new keypair. Section name should
# match your key name e.g.:
[key mykey]
KEY_LOCATION=~/.ssh/mykey.rsa
# You can of course have multiple keypair sections
# [key myotherkey]
# KEY_LOCATION=~/.ssh/myotherkey.rsa
################################
## Defining Cluster Templates ##
################################
# Sections starting with "cluster" represent a cluster template. These
# "templates" are a collection of settings that define a single cluster
# configuration and are used when creating and configuring a cluster. You can
# change which template to use when creating your cluster using the -c option
# to the start command:
#
# $ starcluster start -c mediumcluster mycluster
#
# If a template is not specified then the template defined by DEFAULT_TEMPLATE
# in the [global] section above is used. Below is the "default" template named
# "smallcluster". You can rename it but dont forget to update the
# DEFAULT_TEMPLATE setting in the [global] section above. See the next section
# on defining multiple templates.
[cluster smallcluster]
# change this to the name of one of the keypair sections defined above
KEYNAME = mykey
# number of ec2 instances to launch
CLUSTER_SIZE = 2
# create the following user on the cluster
CLUSTER_USER = sgeadmin
# optionally specify shell (defaults to bash)
# (options: %(shells)s)
CLUSTER_SHELL = bash
# Uncomment to prepent the cluster tag to the dns name of all nodes created
# using this cluster config. ie: mycluster-master and mycluster-node001
# If you choose to enable this option, it's recommended that you enable it in
# the DEFAULT_TEMPLATE so all nodes will automatically have the prefix
# DNS_PREFIX = True
# AMI to use for cluster nodes. These AMIs are for the us-east-1 region.
# Use the 'listpublic' command to list StarCluster AMIs in other regions
# The base i386 StarCluster AMI is %(x86_ami)s
# The base x86_64 StarCluster AMI is %(x86_64_ami)s
# The base HVM StarCluster AMI is %(hvm_ami)s
NODE_IMAGE_ID = %(x86_64_ami)s
# instance type for all cluster nodes
# (options: %(instance_types)s)
NODE_INSTANCE_TYPE = m1.small
# Launch cluster in a VPC subnet (OPTIONAL)
#SUBNET_IDS=subnet-99999999
# Uncomment to assign public IPs to cluster nodes (VPC-ONLY) (OPTIONAL)
# WARNING: Using public IPs with a VPC requires:
# 1. An internet gateway attached to the VPC
# 2. A route table entry linked to the VPC's internet gateway and associated
# with the VPC subnet with a destination CIDR block of 0.0.0.0/0
# WARNING: Public IPs allow direct access to your VPC nodes from the internet
#PUBLIC_IPS=True
# Uncomment to disable installing/configuring a queueing system on the
# cluster (SGE)
#DISABLE_QUEUE=True
# Uncomment to specify a different instance type for the master node (OPTIONAL)
# (defaults to NODE_INSTANCE_TYPE if not specified)
#MASTER_INSTANCE_TYPE = m1.small
# Uncomment to specify a separate AMI to use for the master node. (OPTIONAL)
# (defaults to NODE_IMAGE_ID if not specified)
#MASTER_IMAGE_ID = %(x86_64_ami)s (OPTIONAL)
# availability zone to launch the cluster in (OPTIONAL)
# (automatically determined based on volumes (if any) or
# selected by Amazon if not specified)
#AVAILABILITY_ZONE = us-east-1c
# list of volumes to attach to the master node (OPTIONAL)
# these volumes, if any, will be NFS shared to the worker nodes
# see "Configuring EBS Volumes" below on how to define volume sections
#VOLUMES = oceandata, biodata
# list of plugins to load after StarCluster's default setup routines (OPTIONAL)
# see "Configuring StarCluster Plugins" below on how to define plugin sections
#PLUGINS = myplugin, myplugin2
# list of permissions (or firewall rules) to apply to the cluster's security
# group (OPTIONAL).
#PERMISSIONS = ssh, http
# Uncomment to always create a spot cluster when creating a new cluster from
# this template. The following example will place a $0.50 bid for each spot
# request.
#SPOT_BID = 0.50
# Uncomment to specify one or more userdata scripts to use when launching
# cluster instances. Supports cloudinit. All scripts combined must be less than
# 16KB
#USERDATA_SCRIPTS = /path/to/script1, /path/to/script2
###########################################
## Defining Additional Cluster Templates ##
###########################################
# You can also define multiple cluster templates. You can either supply all
# configuration options as with smallcluster above, or create an
# EXTENDS=<cluster_name> variable in the new cluster section to use all
# settings from <cluster_name> as defaults. Below are example templates that
# use the EXTENDS feature:
# [cluster mediumcluster]
# Declares that this cluster uses smallcluster as defaults
# EXTENDS=smallcluster
# This section is the same as smallcluster except for the following settings:
# KEYNAME=myotherkey
# NODE_INSTANCE_TYPE = c1.xlarge
# CLUSTER_SIZE=8
# VOLUMES = biodata2
# [cluster largecluster]
# Declares that this cluster uses mediumcluster as defaults
# EXTENDS=mediumcluster
# This section is the same as mediumcluster except for the following variables:
# CLUSTER_SIZE=16
#############################
## Configuring EBS Volumes ##
#############################
# StarCluster can attach one or more EBS volumes to the master and then
# NFS_share these volumes to all of the worker nodes. A new [volume] section
# must be created for each EBS volume you wish to use with StarCluser. The
# section name is a tag for your volume. This tag is used in the VOLUMES
# setting of a cluster template to declare that an EBS volume is to be mounted
# and nfs shared on the cluster. (see the commented VOLUMES setting in the
# example 'smallcluster' template above) Below are some examples of defining
# and configuring EBS volumes to be used with StarCluster:
# Sections starting with "volume" define your EBS volumes
# [volume biodata]
# attach vol-c9999999 to /home on master node and NFS-shre to worker nodes
# VOLUME_ID = vol-c999999
# MOUNT_PATH = /home
# Same volume as above, but mounts to different location
# [volume biodata2]
# VOLUME_ID = vol-c999999
# MOUNT_PATH = /opt/
# Another volume example
# [volume oceandata]
# VOLUME_ID = vol-d7777777
# MOUNT_PATH = /mydata
# By default StarCluster will attempt first to mount the entire volume device,
# failing that it will try the first partition. If you have more than one
# partition you will need to set the PARTITION number, e.g.:
# [volume oceandata]
# VOLUME_ID = vol-d7777777
# MOUNT_PATH = /mydata
# PARTITION = 2
############################################
## Configuring Security Group Permissions ##
############################################
# Sections starting with "permission" define security group rules to
# automatically apply to newly created clusters. IP_PROTOCOL in the following
# examples can be can be: tcp, udp, or icmp. CIDR_IP defaults to 0.0.0.0/0 or
# "open to the # world"
# open port 80 on the cluster to the world
# [permission http]
# IP_PROTOCOL = tcp
# FROM_PORT = 80
# TO_PORT = 80
# open https on the cluster to the world
# [permission https]
# IP_PROTOCOL = tcp
# FROM_PORT = 443
# TO_PORT = 443
# open port 80 on the cluster to an ip range using CIDR_IP
# [permission http]
# IP_PROTOCOL = tcp
# FROM_PORT = 80
# TO_PORT = 80
# CIDR_IP = 18.0.0.0/8
# restrict ssh access to a single ip address (<your_ip>)
# [permission ssh]
# IP_PROTOCOL = tcp
# FROM_PORT = 22
# TO_PORT = 22
# CIDR_IP = <your_ip>/32
#####################################
## Configuring StarCluster Plugins ##
#####################################
# Sections starting with "plugin" define a custom python class which perform
# additional configurations to StarCluster's default routines. These plugins
# can be assigned to a cluster template to customize the setup procedure when
# starting a cluster from this template (see the commented PLUGINS setting in
# the 'smallcluster' template above). Below is an example of defining a user
# plugin called 'myplugin':
# [plugin myplugin]
# NOTE: myplugin module must either live in ~/.starcluster/plugins or be
# on your PYTHONPATH
# SETUP_CLASS = myplugin.SetupClass
# extra settings are passed as __init__ arguments to your plugin:
# SOME_PARAM_FOR_MY_PLUGIN = 1
# SOME_OTHER_PARAM = 2
######################
## Built-in Plugins ##
######################
# The following plugins ship with StarCluster and should work out-of-the-box.
# Uncomment as needed. Don't forget to update your PLUGINS list!
# See http://star.mit.edu/cluster/docs/latest/plugins for plugin details.
#
# Use this plugin to install one or more packages on all nodes
# [plugin pkginstaller]
# SETUP_CLASS = starcluster.plugins.pkginstaller.PackageInstaller
# # list of apt-get installable packages
# PACKAGES = mongodb, python-pymongo
#
# Use this plugin to create one or more cluster users and download all user ssh
# keys to $HOME/.starcluster/user_keys/<cluster>-<region>.tar.gz
# [plugin createusers]
# SETUP_CLASS = starcluster.plugins.users.CreateUsers
# NUM_USERS = 30
# # you can also comment out NUM_USERS and specify exact usernames, e.g.
# # usernames = linus, tux, larry
# DOWNLOAD_KEYS = True
#
# Use this plugin to configure the Condor queueing system
# [plugin condor]
# SETUP_CLASS = starcluster.plugins.condor.CondorPlugin
#
# The SGE plugin is enabled by default and not strictly required. Only use this
# if you want to tweak advanced settings in which case you should also set
# DISABLE_QUEUE=TRUE in your cluster template. See the plugin doc for more
# details.
# [plugin sge]
# SETUP_CLASS = starcluster.plugins.sge.SGEPlugin
# MASTER_IS_EXEC_HOST = False
#
# The IPCluster plugin configures a parallel IPython cluster with optional
# web notebook support. This allows you to run Python code in parallel with low
# latency message passing via ZeroMQ.
# [plugin ipcluster]
# SETUP_CLASS = starcluster.plugins.ipcluster.IPCluster
# # Enable the IPython notebook server (optional)
# ENABLE_NOTEBOOK = True
# # Set a password for the notebook for increased security
# # This is optional but *highly* recommended
# NOTEBOOK_PASSWD = a-secret-password
# # Set a custom directory for storing/loading notebooks (optional)
# NOTEBOOK_DIRECTORY = /path/to/notebook/dir
# # Set a custom packer. Must be one of 'json', 'pickle', or 'msgpack'
# # This is optional.
# PACKER = pickle
#
# Use this plugin to create a cluster SSH "dashboard" using tmux. The plugin
# creates a tmux session on the master node that automatically connects to all
# the worker nodes over SSH. Attaching to the session shows a separate window
# for each node and each window is logged into the node via SSH.
# [plugin tmux]
# SETUP_CLASS = starcluster.plugins.tmux.TmuxControlCenter
#
# Use this plugin to change the default MPI implementation on the
# cluster from OpenMPI to MPICH2.
# [plugin mpich2]
# SETUP_CLASS = starcluster.plugins.mpich2.MPICH2Setup
#
# Configure a hadoop cluster. (includes dumbo setup)
# [plugin hadoop]
# SETUP_CLASS = starcluster.plugins.hadoop.Hadoop
#
# Configure a distributed MySQL Cluster
# [plugin mysqlcluster]
# SETUP_CLASS = starcluster.plugins.mysql.MysqlCluster
# NUM_REPLICAS = 2
# DATA_MEMORY = 80M
# INDEX_MEMORY = 18M
# DUMP_FILE = test.sql
# DUMP_INTERVAL = 60
# DEDICATED_QUERY = True
# NUM_DATA_NODES = 2
#
# Install and setup an Xvfb server on each cluster node
# [plugin xvfb]
# SETUP_CLASS = starcluster.plugins.xvfb.XvfbSetup
#
""" % {
'x86_ami': static.BASE_AMI_32,
'x86_64_ami': static.BASE_AMI_64,
'hvm_ami': static.BASE_AMI_HVM,
'instance_types': ', '.join(static.INSTANCE_TYPES.keys()),
'shells': ', '.join(static.AVAILABLE_SHELLS.keys()),
}
DASHES = '-' * 10
copy_below = ' '.join([DASHES, 'COPY BELOW THIS LINE', DASHES])
end_copy = ' '.join([DASHES, 'END COPY', DASHES])
copy_paste_template = '\n'.join([copy_below, config_template, end_copy]) + '\n'
|
cancan101/StarCluster
|
starcluster/templates/config.py
|
Python
|
lgpl-3.0
| 14,546
|
import os
from datetime import timedelta
from environs import Env
from marshmallow.validate import OneOf, Email
# Read .env if any
env = Env(expand_vars=True)
env.read_env(env.path("DOTENV", ".env"), override=False)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def make_path(rel: str) -> str:
"""Helper to build paths from the project's root"""
return os.path.join(BASE_DIR, rel.replace('/', os.path.sep))
# --------------------------------------------------------------------------
# Application Configuration
# --------------------------------------------------------------------------
EXPORTS_DIR = make_path('exports')
MAX_ONGOING_TIME = timedelta(minutes=15)
MAX_PAYMENT_TIME = timedelta(hours=1)
MAX_VALIDATION_TIME = timedelta(days=30)
API_MODEL_CACHE_TIMEOUT = timedelta(minutes=30)
VALID_TVA = {0, 5.5, 10, 20}
# --------------------------------------------------------------------------
# Services Configuration
# --------------------------------------------------------------------------
# PayUTC Payment services
PAYUTC = {
'app_key': env.str("PAYUTC_APP_KEY"),
'mail': env.str("PAYUTC_MAIL", validate=[Email()]),
'password': env.str("PAYUTC_PASSWORD"),
}
# Portail des Assos OAuth config
OAUTH = {
'portal': {
'client_id': env.str("PORTAL_ID"),
'client_secret': env.str("PORTAL_KEY"),
'redirect_uri': env.str("PORTAL_CALLBACK"),
'base_url': 'https://assos.utc.fr/api/v1/',
'authorize_url': 'https://assos.utc.fr/oauth/authorize',
'access_token_url': 'https://assos.utc.fr/oauth/token',
'login_url': 'https://assos.utc.fr/login',
'logout_url': 'https://assos.utc.fr/logout',
# TODO Set scope to user-get-assos-members-joined-now',
'scope': 'user-get-assos user-get-info user-get-roles',
},
}
# Database server
DATABASES = {
'default': env.dj_db_url("DATABASE_URL"),
}
# Email server
email = env.dj_email_url("EMAIL_URL")
EMAIL_HOST = email["EMAIL_HOST"]
EMAIL_PORT = email["EMAIL_PORT"]
EMAIL_HOST_USER = email["EMAIL_HOST_USER"]
EMAIL_HOST_PASSWORD = email["EMAIL_HOST_PASSWORD"]
EMAIL_USE_SSL = email["EMAIL_USE_SSL"]
EMAIL_USE_TLS = email["EMAIL_USE_TLS"]
# --------------------------------------------------------------------------
# Debug & Security
# --------------------------------------------------------------------------
STAGE = env.str("STAGE", "prod", validate=[OneOf(["prod", "test", "dev"])])
DEBUG = STAGE in {"dev", "test"}
SECRET_KEY = env.str("SECRET_KEY")
# Base url
BASE_URL = env.str("BASE_URL", None)
FORCE_SCRIPT_NAME = BASE_URL
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", ["assos.utc.fr"])
HTTPS_ENABLED = env.bool("HTTPS_ENABLED", STAGE == "prod")
SECURE_SSL_REDIRECT = HTTPS_ENABLED
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = HTTPS_ENABLED
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
# Cross Site Request Foregery protection
CSRF_COOKIE_SECURE = HTTPS_ENABLED
CSRF_TRUSTED_ORIGINS = ALLOWED_HOSTS
CSRF_COOKIE_HTTPONLY = False # False to enable the use of cookies in ajax requests
CSRF_USE_SESSIONS = False # False to enable the use of cookies in ajax requests
# Cross-Origin Resource Sharing protection
CORS_ORIGIN_ALLOW_ALL = DEBUG
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = env.list("CORS_ORIGIN_WHITELIST", [ f"https://{url}" for url in ALLOWED_HOSTS ])
if env.bool("RUN_THROUGH_PROXY", False):
USE_X_FORWARDED_HOST = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# --------------------------------------------------------------------------
# Django REST Configuration
# --------------------------------------------------------------------------
BROWSABLE_API_WITH_FORMS = env.bool("BROWSABLE_API_WITH_FORMS", DEBUG)
APPEND_SLASH = False
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'authentication.oauth.OAuthAuthentication',
),
'EXCEPTION_HANDLER': 'core.exceptions.exception_handler',
'PAGE_SIZE': 10,
'DEFAULT_PAGINATION_CLASS': 'core.views.Pagination',
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'core.utils.BrowsableAPIRenderer',
),
'TEST_REQUEST_RENDERER_CLASSES': (
'rest_framework.renderers.MultiPartRenderer',
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.TemplateHTMLRenderer'
),
}
# --------------------------------------------------------------------------
# Django Configuration
# --------------------------------------------------------------------------
INSTALLED_APPS = [
# Django
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django_extensions',
# Django REST
'rest_framework',
'corsheaders',
# Woolly
'woolly_api.admin.AdminConfig',
'core',
'authentication',
'sales',
'payment',
]
# Urls & WSGI
ROOT_URLCONF = 'woolly_api.urls'
WSGI_APPLICATION = 'woolly_api.wsgi.application'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Authentication
LOGIN_URL = 'login'
AUTH_USER_MODEL = 'authentication.User'
# Only to access web admin panel
AUTHENTICATION_BACKENDS = (
'authentication.oauth.OAuthBackend',
)
AUTH_PASSWORD_VALIDATORS = (
{ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', },
{ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', },
{ 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', },
{ 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', },
)
# Internationalization
USE_I18N = True
USE_L10N = True
USE_TZ = True
LANGUAGE_CODE = 'fr'
TIME_ZONE = 'Europe/Paris'
# --------------------------------------------------------------------------
# Static Files & Templates
# --------------------------------------------------------------------------
STATIC_URL = f'{BASE_URL}/static/' if BASE_URL else '/static/'
STATIC_ROOT = make_path('static/')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (
make_path('templates/'),
),
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.debug',
'django.template.context_processors.request',
],
},
},
]
# --------------------------------------------------------------------------
# Logging
# --------------------------------------------------------------------------
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '[{asctime}] {levelname} - {message}',
'style': '{',
},
'verbose': {
'format': '[{asctime}] {levelname} in {filename}@{lineno} - {message}',
'style': '{',
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'file': {
'level': 'WARNING',
'filters': ['require_debug_false'],
'class': 'logging.handlers.RotatingFileHandler',
'filename': make_path('woolly.log'),
'maxBytes': 1024 * 1024 * 15, # 15MB
'backupCount': 5,
'formatter': 'verbose',
},
},
'loggers': {
'': {
'handlers': ['console', 'file'],
'level': 'WARNING',
},
'woolly': {
'handlers': ['console', 'file'],
'level': env.log_level("LOG_LEVEL", "INFO"),
'propagate': True,
}
},
}
|
simde-utc/woolly-api
|
woolly_api/settings.py
|
Python
|
gpl-3.0
| 8,899
|
import struct
import GLWindow
import ModernGL
# Window & Context
wnd = GLWindow.create_window()
ctx = ModernGL.create_context()
# Shaders & Program
prog = ctx.program(
ctx.vertex_shader('''
#version 330
in vec2 vert;
in vec3 vert_color;
out vec3 frag_color;
uniform vec2 scale;
uniform float rotation;
void main() {
frag_color = vert_color;
mat2 rot = mat2(
cos(rotation), sin(rotation),
-sin(rotation), cos(rotation)
);
gl_Position = vec4((rot * vert) * scale, 0.0, 1.0);
}
'''),
ctx.fragment_shader('''
#version 330
in vec3 frag_color;
out vec4 color;
void main() {
color = vec4(frag_color, 1.0);
}
'''),
])
# Uniforms
scale = prog.uniforms['scale']
rotation = prog.uniforms['rotation']
width, height = wnd.size
scale.value = (height / width * 0.75, 0.75)
# Buffer
vbo = ctx.buffer(struct.pack(
'15f',
1.0, 0.0,
1.0, 0.0, 0.0,
-0.5, 0.86,
0.0, 1.0, 0.0,
-0.5, -0.86,
0.0, 0.0, 1.0,
))
# Put everything together
vao = ctx.simple_vertex_array(prog, vbo, ['vert', 'vert_color'])
# Main loop
while wnd.update():
ctx.viewport = wnd.viewport
ctx.clear(0.9, 0.9, 0.9)
rotation.value = wnd.time
vao.render()
|
cprogrammer1994/ModernGL
|
examples/old-examples/GLWindow/02_uniforms_and_attributes.py
|
Python
|
mit
| 1,381
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0002_task_jplag_up_to_date'),
('checker', '0003_isabellechecker_trusted_theories'),
]
operations = [
migrations.CreateModel(
name='ScalaBuilder',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('order', models.IntegerField(help_text='Determines the order in wich the checker will start. Not necessary continuously!')),
('public', models.BooleanField(default=True, help_text='Test results are displayed to the submitter.')),
('required', models.BooleanField(default=False, help_text='The test must be passed to submit the solution.')),
('always', models.BooleanField(default=True, help_text='The test will run on submission time.')),
('critical', models.BooleanField(default=False, help_text='If this test fails, do not display further test results.')),
('_flags', models.CharField(default=b'-Wall', help_text='Compiler flags', max_length=1000, blank=True)),
('_output_flags', models.CharField(default=b'-o %s', help_text="Output flags. '%s' will be replaced by the program name.", max_length=1000, blank=True)),
('_libs', models.CharField(default=b'', help_text='Compiler libraries', max_length=1000, blank=True)),
('_file_pattern', models.CharField(default=b'^[a-zA-Z0-9_]*$', help_text='Regular expression describing all source files to be passed to the compiler.', max_length=1000)),
('_main_required', models.BooleanField(default=True, help_text='Is a submission required to provide a main method?')),
('task', models.ForeignKey(to='tasks.Task')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='textchecker',
name='choices',
field=models.IntegerField(default=1, verbose_name=b'Select:', choices=[(0, b'The text must not be in the solution'), (1, b'The text has to be in the solution')]),
),
migrations.AlterField(
model_name='textchecker',
name='text',
field=models.TextField(),
),
]
|
lohner/Praktomat
|
src/checker/migrations/0004_textchecker_and_scalabuilder.py
|
Python
|
gpl-2.0
| 2,540
|
# from fractions import gcd
def gcd(a, b):
while b:
a, b = b, a % b
return a
def lcm(a, b):
return a / gcd(a, b) * b
|
warmsea/tc-srm
|
snippets/fractions.py
|
Python
|
mit
| 139
|
"""Cache lines from files.
This is intended to read lines from modules imported -- hence if a filename
is not found, it will look down the module search path for a file by
that name.
"""
import sys
import os
__all__ = ["getline", "clearcache", "checkcache"]
def getline(filename, lineno, module_globals=None):
lines = getlines(filename, module_globals)
if 1 <= lineno <= len(lines):
return lines[lineno-1]
else:
return ''
# The cache
cache = {} # The cache
def clearcache():
"""Clear the cache entirely."""
global cache
cache = {}
def getlines(filename, module_globals=None):
"""Get the lines for a file from the cache.
Update the cache if it doesn't contain an entry for this file already."""
if filename in cache:
return cache[filename][2]
else:
return updatecache(filename, module_globals)
def checkcache(filename=None):
"""Discard cache entries that are out of date.
(This is not checked upon each call!)"""
if filename is None:
filenames = cache.keys()
else:
if filename in cache:
filenames = [filename]
else:
return
for filename in filenames:
size, mtime, lines, fullname = cache[filename]
if mtime is None:
continue # no-op for files loaded via a __loader__
try:
stat = os.stat(fullname)
except os.error:
del cache[filename]
continue
if size != stat.st_size or mtime != stat.st_mtime:
del cache[filename]
def updatecache(filename, module_globals=None):
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
if filename in cache:
del cache[filename]
if not filename or filename[0] + filename[-1] == '<>':
return []
fullname = filename
try:
stat = os.stat(fullname)
except os.error, msg:
basename = filename
# Try for a __loader__, if available
if module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return []
cache[filename] = (
len(data), None,
[line+'\n' for line in data.splitlines()], fullname
)
return cache[filename][2]
# Try looking through the module search path, which is only useful
# when handling a relative filename.
if os.path.isabs(filename):
return []
for dirname in sys.path:
# When using imputil, sys.path may contain things other than
# strings; ignore them when it happens.
try:
fullname = os.path.join(dirname, basename)
except (TypeError, AttributeError):
# Not sufficiently string-like to do anything useful with.
pass
else:
try:
stat = os.stat(fullname)
break
except os.error:
pass
else:
# No luck
## print '*** Cannot stat', filename, ':', msg
return []
try:
fp = open(fullname, 'rU')
lines = fp.readlines()
fp.close()
except IOError, msg:
## print '*** Cannot open', fullname, ':', msg
return []
size, mtime = stat.st_size, stat.st_mtime
cache[filename] = size, mtime, lines, fullname
return lines
|
MicroTrustRepos/microkernel
|
src/l4/pkg/python/contrib/Lib/linecache.py
|
Python
|
gpl-2.0
| 4,055
|
# Components.py - Abstract class for any Lustre filesystem components.
# Copyright (C) 2010-2015 CEA
#
# This file is part of shine
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import sys
from itertools import ifilter, groupby
from operator import attrgetter, itemgetter
from ClusterShell.NodeSet import NodeSet
# Constants for component states.
# Error codes should have the largest values, see FileSystem._check_errors()
MOUNTED = 0
EXTERNAL = 1
RECOVERING = 2
OFFLINE = 3
INPROGRESS = 4
CLIENT_ERROR = 5
TARGET_ERROR = 6
RUNTIME_ERROR = 7
INACTIVE = 8
MIGRATED = 9
NO_DEVICE = 10
from Shine.Lustre import ComponentError
from Shine.Lustre.Server import ServerGroup
from Shine.Lustre.Actions.Status import Status
from Shine.Lustre.Actions.Execute import Execute
class Component(object):
"""
Abstract class for all common part of all Lustre filesystem components.
"""
# Text name for this component
TYPE = "(should be overridden)"
# Each component knows which component it depends on.
# Its start order should be this component start order + 1.
# This value will be use to sort the components when starting.
START_ORDER = 0
# Order used when displaying a list of components
DISPLAY_ORDER = 0
# Text mapping for each possible states
STATE_TEXT_MAP = {}
def __init__(self, fs, server, enabled = True, mode = 'managed',
active = 'manual'):
# File system
self.fs = fs
# Each component resides on one server
self.server = server
# Status
self.state = None
# Enabled or not
self.action_enabled = enabled
# List of running action
self._running_actions = []
# Component behaviour change depending on its mode.
self._mode = mode
# Component active state
self.active = active
@property
def label(self):
"""
Return the component label.
It contains the filesystem name and component type.
"""
return "%s-%s" % (self.fs.fs_name, self.TYPE)
def allservers(self):
"""
Return all servers this target can run on. On standard component
there is only one server.
"""
return ServerGroup([self.server])
def uniqueid(self):
"""Return a unique string representing this component."""
return "%s-%s" % (self.label, ','.join(self.server.nids))
def longtext(self):
"""
Return a string describing this component, for output purposes.
"""
return self.label
def update_server(self):
"""
Compute the server to display for the component.
This method does nothing on all components except for Target ones.
"""
pass
def update(self, other):
"""
Update my serializable fields from other/distant object.
"""
self.state = other.state
def sanitize_state(self, nodes=None):
"""
Clean component state if it is wrong.
"""
if self.state is None:
self.state = RUNTIME_ERROR
# At this step, there should be no more INPROGRESS component.
# If yes, this is a bug, change state to RUNTIME_ERROR.
# INPROGRESS management could be change using running action
# list.
# Starting with v1.3, there is no more code setting INPROGRESS.
# This is for compatibility with older clients.
elif self.state == INPROGRESS:
actions = ""
if len(self._list_action()):
actions = "actions: " + ", ".join(self._list_action())
print >> sys.stderr, "ERROR: bad state for %s: %d %s" % \
(self.label, self.state, actions)
self.state = RUNTIME_ERROR
def __getstate__(self):
odict = self.__dict__.copy()
del odict['fs']
return odict
def __setstate__(self, state):
self.__dict__.update(state)
self.fs = None
#
# Component behaviour
#
def capable(self, action):
# Do I implement this method?
#XXX: Presently, the check do not check this is callable.
# This is used for testing 'label' by example.
return hasattr(self, action)
def is_external(self):
return self._mode == 'external'
def is_active(self):
return self.active != 'no'
#
# Component printing methods.
#
def text_statusonly(self):
"""
Return a string version of the component state, only.
"""
return Component.text_status(self)
def text_status(self):
"""
Return a human text form for the component state.
"""
return self.STATE_TEXT_MAP.get(self.state, "BUG STATE %s" % self.state)
#
# State checking methods.
#
def lustre_check(self):
"""
Check component health at Lustre level.
"""
raise NotImplementedError("Component must implement this.")
def full_check(self, mountdata=True):
"""
Check component states, at Lustre level, and any other required ones.
"""
self.lustre_check()
#
# Inprogress action methods
#
def _add_action(self, act):
"""
Add the named action to the running action list.
"""
self._running_actions.append(act)
def _del_action(self, act):
"""
Remove the named action from the running action list.
"""
self._running_actions.remove(act)
def _list_action(self):
"""
Return the running action list.
"""
return self._running_actions
#
# Event raising method
#
def action_event(self, act, status, result=None):
"""Send an event."""
if status == 'start':
self._add_action(act.NAME)
elif status in ('done', 'timeout', 'failed'):
self._del_action(act.NAME)
self.fs.local_event('comp', info=act.info(), status=status,
result=result)
#
# Helper methods to check component state in Actions.
#
def is_started(self):
"""Return True if the component is started."""
return self.state == MOUNTED
def is_stopped(self):
"""Return True if the component is stopped."""
return self.state == OFFLINE
#
# Component common actions
#
def status(self, **kwargs):
"""Check component status."""
return Status(self, **kwargs)
def execute(self, **kwargs):
"""Exec a custom command."""
return Execute(self, **kwargs)
class ComponentGroup(object):
"""
Gather and efficiently manipulate list of Components.
"""
def __init__(self, iterable=None):
self._elems = {}
if iterable:
self._elems = dict([(comp.uniqueid(), comp) for comp in iterable])
def __len__(self):
return len(self._elems)
def __iter__(self):
return self._elems.itervalues()
def __contains__(self, comp):
return comp.uniqueid() in self._elems
def __getitem__(self, key):
return self._elems[key]
def __str__(self):
return str(self.labels())
def add(self, component):
"""
Add a new component to the group.
Raises a KeyError if a component
with the same uniqueid() is already added.
"""
if component in self:
raise KeyError("A component with id %s already exists." %
component.uniqueid())
self._elems[component.uniqueid()] = component
def update(self, iterable):
"""
Insert all components from iterable.
"""
for comp in iterable:
self.add(comp)
def __or__(self, other):
"""
Implements the | operator. So s | t returns a new group with
elements from both s and t.
"""
if not isinstance(other, ComponentGroup):
return NotImplemented
grp = ComponentGroup()
grp.update(iter(self))
grp.update(iter(other))
return grp
#
# Useful getters
#
def labels(self):
"""Return a NodeSet containing all component label."""
return NodeSet.fromlist((comp.label for comp in self))
def servers(self):
"""Return a NodeSet containing all component servers."""
return NodeSet.fromlist((comp.server.hostname for comp in self))
def allservers(self):
"""Return a NodeSet containing all component servers and fail
servers."""
servers = self.servers()
for comp in self.filter(supports='failservers'):
servers.update(comp.failservers.nodeset())
return servers
#
# Filtering methods
#
def filter(self, supports=None, key=None):
"""
Returns a new ComponentGroup instance containing only the component
that matches the filtering rules.
Your own filtering rule could be defined using the key argument.
Example: Return only the OST from the group
>>> group.filter(key=lambda t: t.TYPE == OST.TYPE)
"""
if supports and not key:
filter_key = lambda x: x.capable(supports)
elif supports and key:
filter_key = lambda x: key(x) and x.capable(supports)
else:
filter_key = key
return ComponentGroup(ifilter(filter_key, iter(self)))
def enabled(self):
"""Uses filter() to return only the enabled components."""
key = attrgetter('action_enabled')
return self.filter(key=key)
def managed(self, supports=None, inactive=False):
"""Uses filter() to return only the enabled and managed components."""
if inactive == True:
# targets that are inactive _and_ external are also selected
key = lambda comp: comp.action_enabled and \
((not comp.is_external()) or \
(comp.is_external() and not comp.is_active()))
else:
key = lambda comp: comp.action_enabled and \
not comp.is_external() and \
comp.is_active()
return self.filter(supports, key=key)
#
# Grouping methods
#
def groupby(self, attr=None, key=None, reverse=False):
"""Return an iterator over the group components.
The component will be grouped using one of their attribute or using a
custom key.
Example #1: Group component by type
>>> for comp_type, comp_list in group.groupby(attr='TYPE'):
...
Example #2: Group component first by type, then by server
>>> key = lambda t: (t.TYPE, t.server)
>>> for comp_type, comp_list in group.groupby(key=key):
...
"""
assert (not (attr and key)), "Unsupported: attr and supports"
if key is None and attr is not None:
key = attrgetter(attr)
# Sort the components using the key, and then group results
# using the same key.
sortlist = sorted(iter(self), key=key, reverse=reverse)
grouped = groupby(sortlist, key)
return ((grpkey, ComponentGroup(comps)) for grpkey, comps in grouped)
def groupbyallservers(self):
"""
Group components per server taking into account
all possible servers for each component.
"""
# Create a list of (server, component) tuples
srvcomps = []
for comp in self:
for srv in comp.allservers():
srvcomps.append((srv, comp))
# Sort the components using the server name in each tuple as key,
# and then, group results using the same key.
sortlist = sorted(srvcomps, key=itemgetter(0))
grouped = groupby(sortlist, key=itemgetter(0))
return ((grpkey, ComponentGroup(map(itemgetter(1), tpl)))
for grpkey, tpl in grouped)
def groupbyserver(self, allservers=False):
"""Uses groupby() to group component per server."""
if allservers is False:
return self.groupby(attr='server')
else:
return self.groupbyallservers()
|
stanford-rc/shine
|
lib/Shine/Lustre/Component.py
|
Python
|
gpl-2.0
| 12,978
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.core.plotseds Plot SEDs for the output of a SKIRT simulation.
#
# This script plots SEDs listed in SKIRT \c prefix_instr_sed.dat output files. All SEDs resulting from
# a particular simulation are plotted on the same axes. The result is saved as a PDF file placed next
# to the original files, and with a similar name constructed as \c prefix_sed.pdf (i.e. leaving out the
# instrument names).
#
# The script expects the complete output of a SKIRT simulation to be present (including log file etc.).
# If there are no arguments, the script processes all simulation output sets residing in the current directory.
# If the first argument contains a slash, the script processes all simulation output sets in the indicated directory.
# If the first argument does not contain a slash, the script processes just the simulation in the current directory
# with the indicated prefix.
#
# By default both axes are autoscaled; you can hardcode specific ranges in the script.
# -----------------------------------------------------------------
# Import standard modules
import sys
# Import the relevant PTS classes and modules
from pts.core.simulation.simulation import createsimulations
from pts.core.plot.seds import plotseds
# -----------------------------------------------------------------
# a value of None means that the axis is autoscaled;
# alternatively specify a range through a tuple with min and max values
xlim = None
ylim = None
#xlim = ( 5e-2, 1e3 )
#ylim = ( 1e-13, 1e-9 )
# -----------------------------------------------------------------
print "Starting plotseds..."
# get the command-line argument specifying the simulation(s)
argument = sys.argv[1] if len(sys.argv) > 1 else ""
# construct the list of simulation objects and make the plots
for simulation in createsimulations(argument):
plotseds(simulation, xlim=xlim, ylim=ylim)
print "Finished plotseds"
# -----------------------------------------------------------------
|
SKIRT/PTS
|
do/core/plotseds.py
|
Python
|
agpl-3.0
| 2,288
|
"""
Studio Index, home and dashboard pages. These are the starting pages for users.
"""
from bok_choy.page_object import PageObject
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from common.test.acceptance.pages.studio import BASE_URL
from common.test.acceptance.pages.studio.login import LoginPage
from common.test.acceptance.pages.studio.signup import SignupPage
from common.test.acceptance.pages.studio.utils import HelpMixin
class HeaderMixin(object):
"""
Mixin class used for the pressing buttons in the header.
"""
def click_sign_up(self):
"""
Press the Sign Up button in the header.
"""
next_page = SignupPage(self.browser)
self.q(css='.action-signup')[0].click()
return next_page.wait_for_page()
def click_sign_in(self):
"""
Press the Sign In button in the header.
"""
next_page = LoginPage(self.browser)
self.q(css='.action-signin')[0].click()
return next_page.wait_for_page()
class IndexPage(PageObject, HeaderMixin, HelpMixin):
"""
Home page for Studio when not logged in.
"""
url = BASE_URL + "/"
def is_browser_on_page(self):
return self.q(css='.wrapper-text-welcome').visible
class DashboardPage(PageObject, HelpMixin):
"""
Studio Dashboard page with courses.
The user must be logged in to access this page.
"""
url = BASE_URL + "/course/"
def is_browser_on_page(self):
return self.q(css='.content-primary').visible
@property
def course_runs(self):
"""
The list of course run metadata for all displayed courses
Returns an empty string if there are none
"""
return self.q(css='.course-run>.value').text
@property
def has_processing_courses(self):
return self.q(css='.courses-processing').present
def create_rerun(self, course_key):
"""
Clicks the create rerun link of the course specified by course_key
'Re-run course' link doesn't show up until you mouse over that course in the course listing
"""
actions = ActionChains(self.browser)
button_name = self.browser.find_element_by_css_selector('.rerun-button[href$="' + course_key + '"]')
actions.move_to_element(button_name)
actions.click(button_name)
actions.perform()
def click_course_run(self, run):
"""
Clicks on the course with run given by run.
"""
self.q(css='.course-run .value').filter(lambda el: el.text == run)[0].click()
# Clicking on course with run will trigger an ajax event
self.wait_for_ajax()
def scroll_to_course(self, course_key):
"""
Scroll down to the course element
"""
element = '[data-course-key*="{}"]'.format(course_key)
self.scroll_to_element(element)
def has_new_library_button(self):
"""
(bool) is the "New Library" button present?
"""
return self.q(css='.new-library-button').present
def click_new_library(self):
"""
Click on the "New Library" button
"""
self.q(css='.new-library-button').first.click()
self.wait_for_ajax()
def is_new_library_form_visible(self):
"""
Is the new library form visisble?
"""
return self.q(css='.wrapper-create-library').visible
def fill_new_library_form(self, display_name, org, number):
"""
Fill out the form to create a new library.
Must have called click_new_library() first.
"""
field = lambda fn: self.q(css=u'.wrapper-create-library #new-library-{}'.format(fn))
field('name').fill(display_name)
field('org').fill(org)
field('number').fill(number)
def is_new_library_form_valid(self):
"""
Is the new library form ready to submit?
"""
return (
self.q(css='.wrapper-create-library .new-library-save:not(.is-disabled)').present and
not self.q(css='.wrapper-create-library .wrap-error.is-shown').present
)
def submit_new_library_form(self):
"""
Submit the new library form.
"""
self.q(css='.wrapper-create-library .new-library-save').click()
@property
def new_course_button(self):
"""
Returns "New Course" button.
"""
return self.q(css='.new-course-button')
def is_new_course_form_visible(self):
"""
Is the new course form visible?
"""
return self.q(css='.wrapper-create-course').visible
def click_new_course_button(self):
"""
Click "New Course" button
"""
self.q(css='.new-course-button').first.click()
self.wait_for_ajax()
def fill_new_course_form(self, display_name, org, number, run):
"""
Fill out the form to create a new course.
"""
field = lambda fn: self.q(css=u'.wrapper-create-course #new-course-{}'.format(fn))
field('name').fill(display_name)
field('org').fill(org)
field('number').fill(number)
field('run').fill(run)
def is_new_course_form_valid(self):
"""
Returns `True` if new course form is valid otherwise `False`.
"""
return (
self.q(css='.wrapper-create-course .new-course-save:not(.is-disabled)').present and
not self.q(css='.wrapper-create-course .wrap-error.is-shown').present
)
def submit_new_course_form(self):
"""
Submit the new course form.
"""
self.q(css='.wrapper-create-course .new-course-save').first.click()
self.wait_for_ajax()
@property
def error_notification(self):
"""
Returns error notification element.
"""
return self.q(css='.wrapper-notification-error.is-shown')
@property
def error_notification_message(self):
"""
Returns text of error message.
"""
self.wait_for_element_visibility(
".wrapper-notification-error.is-shown .message", "Error message is visible"
)
return self.error_notification.results[0].find_element_by_css_selector('.message').text
@property
def course_org_field(self):
"""
Returns course organization input.
"""
return self.q(css='.wrapper-create-course #new-course-org')
def select_item_in_autocomplete_widget(self, item_text):
"""
Selects item in autocomplete where text of item matches item_text.
"""
self.wait_for_element_visibility(
".ui-autocomplete .ui-menu-item", "Autocomplete widget is visible"
)
self.q(css='.ui-autocomplete .ui-menu-item a').filter(lambda el: el.text == item_text)[0].click()
def list_courses(self, archived=False):
"""
List all the courses found on the page's list of courses.
"""
# Workaround Selenium/Firefox bug: `.text` property is broken on invisible elements
tab_selector = u'#course-index-tabs .{} a'.format('archived-courses-tab' if archived else 'courses-tab')
self.wait_for_element_presence(tab_selector, "Courses Tab")
self.q(css=tab_selector).click()
div2info = lambda element: {
'name': element.find_element_by_css_selector('.course-title').text,
'org': element.find_element_by_css_selector('.course-org .value').text,
'number': element.find_element_by_css_selector('.course-num .value').text,
'run': element.find_element_by_css_selector('.course-run .value').text,
'url': element.find_element_by_css_selector('a.course-link').get_attribute('href'),
}
course_list_selector = u'.{} li.course-item'.format('archived-courses' if archived else 'courses')
return self.q(css=course_list_selector).map(div2info).results
def has_course(self, org, number, run, archived=False):
"""
Returns `True` if course for given org, number and run exists on the page otherwise `False`
"""
for course in self.list_courses(archived):
if course['org'] == org and course['number'] == number and course['run'] == run:
return True
return False
def list_libraries(self):
"""
Click the tab to display the available libraries, and return detail of them.
"""
# Workaround Selenium/Firefox bug: `.text` property is broken on invisible elements
library_tab_css = '#course-index-tabs .libraries-tab'
self.wait_for_element_presence(library_tab_css, "Libraries tab")
self.q(css=library_tab_css).click()
if self.q(css='.list-notices.libraries-tab').present:
# No libraries are available.
self.wait_for_element_presence('.libraries-tab .new-library-button', "new library tab")
return []
div2info = lambda element: {
'name': element.find_element_by_css_selector('.course-title').text,
'link_element': element.find_element_by_css_selector('.course-title'),
'org': element.find_element_by_css_selector('.course-org .value').text,
'number': element.find_element_by_css_selector('.course-num .value').text,
'url': element.find_element_by_css_selector('a.library-link').get_attribute('href'),
}
self.wait_for_element_visibility('.libraries li.course-item', "Switch to library tab")
return self.q(css='.libraries li.course-item').map(div2info).results
def has_library(self, **kwargs):
"""
Does the page's list of libraries include a library matching kwargs?
"""
for lib in self.list_libraries():
if all([lib[key] == kwargs[key] for key in kwargs]):
return True
return False
def click_library(self, name):
"""
Click on the library with the given name.
"""
for lib in self.list_libraries():
if lib['name'] == name:
lib['link_element'].click()
@property
def language_selector(self):
"""
return language selector
"""
self.wait_for_element_visibility(
'#settings-language-value',
'Language selector element is available'
)
return self.q(css='#settings-language-value')
@property
def course_creation_error_message(self):
"""
Returns the course creation error
"""
self.wait_for_element_visibility(
'#course_creation_error>p',
'Length error is present'
)
return self.q(css='#course_creation_error>p').text[0]
def is_create_button_disabled(self):
"""
Returns: True if Create button is disbaled
"""
self.wait_for_element_presence(
'.action.action-primary.new-course-save.is-disabled',
"Create button is disabled"
)
return True
class HomePage(DashboardPage):
"""
Home page for Studio when logged in.
"""
url = BASE_URL + "/home/"
class AccessibilityPage(IndexPage):
"""
Home page for Studio when logged in.
"""
url = BASE_URL + "/accessibility"
def is_browser_on_page(self):
"""
Is the page header visible?
"""
return self.q(css='#root h2').visible
def header_text_on_page(self):
"""
Check that the page header has the right text.
"""
return 'Individualized Accessibility Process for Course Creators' in self.q(css='#root h2').text
def fill_form(self, email, name, message):
"""
Fill the accessibility feedback form out.
"""
email_input = self.q(css='#root input#email')
name_input = self.q(css='#root input#fullName')
message_input = self.q(css='#root textarea#message')
email_input.fill(email)
name_input.fill(name)
message_input.fill(message)
# Tab off the message textarea to trigger any error messages
message_input[0].send_keys(Keys.TAB)
def submit_form(self):
"""
Click the submit button on the accessibiltiy feedback form.
"""
button = self.q(css='#root section button')[0]
button.click()
self.wait_for_element_visibility('#root div.alert-dialog', 'Form submission alert is visible')
def leave_field_blank(self, field_id, field_type='input'):
"""
To simulate leaving a field blank, click on the field, then press TAB to move off focus off the field.
"""
field = self.q(css=u'#root {}#{}'.format(field_type, field_id))[0]
field.click()
field.send_keys(Keys.TAB)
def alert_has_text(self, text=''):
"""
Check that the alert dialog contains the specified text.
"""
return text in self.q(css='#root div.alert-dialog').text
def error_message_is_shown_with_text(self, field_id, text=''):
"""
Check that at least one error message is shown and at least one contains the specified text.
"""
selector = u'#root div#error-{}'.format(field_id)
self.wait_for_element_visibility(selector, 'An error message is visible')
error_messages = self.q(css=selector)
for message in error_messages:
if text in message.text:
return True
return False
|
cpennington/edx-platform
|
common/test/acceptance/pages/studio/index.py
|
Python
|
agpl-3.0
| 13,539
|
from PyQt4 import QtCore, QtGui
import shared
import re
import sys
import inspect
from helper_sql import *
from addresses import decodeAddress
from foldertree import AccountMixin
from pyelliptic.openssl import OpenSSL
from utils import str_broadcast_subscribers
import time
def getSortedAccounts():
configSections = filter(lambda x: x != 'bitmessagesettings', shared.config.sections())
configSections.sort(cmp =
lambda x,y: cmp(unicode(shared.config.get(x, 'label'), 'utf-8').lower(), unicode(shared.config.get(y, 'label'), 'utf-8').lower())
)
return configSections
def accountClass(address):
if not shared.config.has_section(address):
if address == str_broadcast_subscribers:
subscription = BroadcastAccount(address)
if subscription.type != 'broadcast':
return None
else:
subscription = SubscriptionAccount(address)
if subscription.type != 'subscription':
return None
return subscription
try:
gateway = shared.config.get(address, "gateway")
for name, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):
# obj = g(address)
if issubclass(cls, GatewayAccount) and cls.gatewayName == gateway:
return cls(address)
# general gateway
return GatewayAccount(address)
except:
pass
# no gateway
return BMAccount(address)
class AccountColor(AccountMixin):
def __init__(self, address, type = None):
self.isEnabled = True
self.address = address
if type is None:
if shared.safeConfigGetBoolean(self.address, 'mailinglist'):
self.type = "mailinglist"
elif shared.safeConfigGetBoolean(self.address, 'chan'):
self.type = "chan"
elif sqlQuery(
'''select label from subscriptions where address=?''', self.address):
self.type = 'subscription'
else:
self.type = "normal"
else:
self.type = type
class BMAccount(object):
def __init__(self, address = None):
self.address = address
self.type = 'normal'
if shared.config.has_section(address):
if shared.safeConfigGetBoolean(self.address, 'chan'):
self.type = "chan"
elif shared.safeConfigGetBoolean(self.address, 'mailinglist'):
self.type = "mailinglist"
elif self.address == str_broadcast_subscribers:
self.type = 'broadcast'
else:
queryreturn = sqlQuery(
'''select label from subscriptions where address=?''', self.address)
if queryreturn:
self.type = 'subscription'
def getLabel(self, address = None):
if address is None:
address = self.address
label = address
if shared.config.has_section(address):
label = shared.config.get(address, 'label')
queryreturn = sqlQuery(
'''select label from addressbook where address=?''', address)
if queryreturn != []:
for row in queryreturn:
label, = row
else:
queryreturn = sqlQuery(
'''select label from subscriptions where address=?''', address)
if queryreturn != []:
for row in queryreturn:
label, = row
return label
def parseMessage(self, toAddress, fromAddress, subject, message):
self.toAddress = toAddress
self.fromAddress = fromAddress
self.subject = subject
self.message = message
self.fromLabel = self.getLabel(fromAddress)
self.toLabel = self.getLabel(toAddress)
class SubscriptionAccount(BMAccount):
pass
class BroadcastAccount(BMAccount):
pass
class GatewayAccount(BMAccount):
gatewayName = None
def __init__(self, address):
super(BMAccount, self).__init__(address)
def send(self):
status, addressVersionNumber, streamNumber, ripe = decodeAddress(self.toAddress)
ackdata = OpenSSL.rand(32)
t = ()
sqlExecute(
'''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''',
'',
self.toAddress,
ripe,
self.fromAddress,
self.subject,
self.message,
ackdata,
int(time.time()), # sentTime (this will never change)
int(time.time()), # lastActionTime
0, # sleepTill time. This will get set when the POW gets done.
'msgqueued',
0, # retryNumber
'sent', # folder
2, # encodingtype
shared.config.getint('bitmessagesettings', 'ttl')
)
shared.workerQueue.put(('sendmessage', self.toAddress))
def parseMessage(self, toAddress, fromAddress, subject, message):
super(BMAccount, self).parseMessage(toAddress, fromAddress, subject, message)
class MailchuckAccount(GatewayAccount):
# set "gateway" in keys.dat to this
gatewayName = "mailchuck"
registrationAddress = "BM-2cVYYrhaY5Gbi3KqrX9Eae2NRNrkfrhCSA"
unregistrationAddress = "BM-2cVMAHTRjZHCTPMue75XBK5Tco175DtJ9J"
relayAddress = "BM-2cWim8aZwUNqxzjMxstnUMtVEUQJeezstf"
regExpIncoming = re.compile("(.*)MAILCHUCK-FROM::(\S+) \| (.*)")
regExpOutgoing = re.compile("(\S+) (.*)")
def __init__(self, address):
super(GatewayAccount, self).__init__(address)
def createMessage(self, toAddress, fromAddress, subject, message):
self.subject = toAddress + " " + subject
self.toAddress = self.relayAddress
self.fromAddress = fromAddress
self.message = message
def register(self, email):
self.toAddress = self.registrationAddress
self.subject = email
self.message = ""
self.fromAddress = self.address
self.send()
def unregister(self):
self.toAddress = self.unregistrationAddress
self.subject = ""
self.message = ""
self.fromAddress = self.address
self.send()
def parseMessage(self, toAddress, fromAddress, subject, message):
super(GatewayAccount, self).parseMessage(toAddress, fromAddress, subject, message)
if fromAddress == self.relayAddress:
matches = self.regExpIncoming.search(subject)
if not matches is None:
self.subject = ""
if not matches.group(1) is None:
self.subject += matches.group(1)
if not matches.group(3) is None:
self.subject += matches.group(3)
if not matches.group(2) is None:
self.fromLabel = matches.group(2)
self.fromAddress = matches.group(2)
if toAddress == self.relayAddress:
matches = self.regExpOutgoing.search(subject)
if not matches is None:
if not matches.group(2) is None:
self.subject = matches.group(2)
if not matches.group(1) is None:
self.toLabel = matches.group(1)
self.toAddress = matches.group(1)
|
Atheros1/PyBitmessage
|
src/bitmessageqt/account.py
|
Python
|
mit
| 7,343
|
import csv
from django.conf import settings
from ._utils import AstaporCommand, validate_number_cols
from specimens.models import TaxonRank, Taxon, TaxonStatus, SPECIES_RANK_NAME, SUBGENUS_RANK_NAME
MODELS_TO_TRUNCATE = [Taxon, TaxonRank, TaxonStatus]
def create_initial_ranks():
TaxonRank.objects.bulk_create([
TaxonRank(name='Kingdom'),
TaxonRank(name='Phylum'),
TaxonRank(name='Class'),
TaxonRank(name='Order'),
TaxonRank(name='Family'),
TaxonRank(name='Genus'),
TaxonRank(name=SUBGENUS_RANK_NAME),
TaxonRank(name=SPECIES_RANK_NAME)
])
class Command(AstaporCommand):
help = 'Import taxonomy from a CSV file.'
def add_arguments(self, parser):
parser.add_argument('csv_file')
parser.add_argument(
'--truncate',
action='store_true',
dest='truncate',
default=False,
help='Truncate all tables prior to import',
)
def handle(self, *args, **options):
self.w('Importing data from file...')
with open(options['csv_file']) as csv_file:
if options['truncate']:
for model in MODELS_TO_TRUNCATE:
self.w('Truncate model {name} ...'.format(name=model.__name__), ending='')
model.objects.all().delete()
self.w(self.style.SUCCESS('OK'))
self.w('Creating initial ranks...')
create_initial_ranks()
for i, row in enumerate(csv.DictReader(csv_file, delimiter=',')):
validate_number_cols(row, settings.EXPECTED_NUMBER_COLS_SCIENTIFICNAMES)
self.w('Processing row #{i}...'.format(i=i), ending='')
species_status, _ = TaxonStatus.objects.get_or_create(name=row['Status'])
# Starting from the higher ranks
kingdom, _ = Taxon.objects.get_or_create(name=row['Kingdom'].strip(),
rank=TaxonRank.objects.get(name="Kingdom"))
phylum, _ = Taxon.objects.get_or_create(name=row['Phylum'].strip(),
rank=TaxonRank.objects.get(name="Phylum"),
parent=kingdom)
class_taxon, _ = Taxon.objects.get_or_create(name=row['Class'].strip(),
rank=TaxonRank.objects.get(name="Class"),
parent=phylum)
order_taxon, _ = Taxon.objects.get_or_create(name=row['Order'].strip(),
rank=TaxonRank.objects.get(name="Order"),
parent=class_taxon)
family, _ = Taxon.objects.get_or_create(name=row['Family'].strip(),
rank=TaxonRank.objects.get(name="Family"),
parent=order_taxon)
genus, _ = Taxon.objects.get_or_create(name=row['Genus'].strip(),
rank=TaxonRank.objects.get(name="Genus"),
parent=family)
# Subgenus rank is optional
subgenus_source = row['Subgenus'].strip()
if subgenus_source:
subgenus, _ = Taxon.objects.get_or_create(name=subgenus_source,
rank=TaxonRank.objects.get(name=SUBGENUS_RANK_NAME),
parent=genus)
species_parent = subgenus if subgenus_source else genus
species, _ = Taxon.objects.get_or_create(name=row['Species'].strip(),
rank=TaxonRank.objects.get(name=SPECIES_RANK_NAME),
parent=species_parent,
status=species_status,
aphia_id=row['Aphia_ID'].strip(),
authority=row['Authority'].strip())
self.w(self.style.SUCCESS('OK'))
|
BelgianBiodiversityPlatform/Astapor
|
website/specimens/management/commands/import_taxonomy.py
|
Python
|
bsd-2-clause
| 4,434
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
import adhocracy4.images.fields
class Migration(migrations.Migration):
dependencies = [
('meinberlin_organisations', '0003_logo-for-newsletter'),
]
operations = [
migrations.AlterField(
model_name='organisation',
name='logo',
field=adhocracy4.images.fields.ConfiguredImageField('logo', upload_to='organisation/logos', help_prefix='The image will be shown in the newsletter in the banner.', blank=True, verbose_name='Logo'),
),
]
|
liqd/a4-meinberlin
|
meinberlin/apps/organisations/migrations/0004_changed_upload_path.py
|
Python
|
agpl-3.0
| 637
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import six
import sys
import unittest
from io import StringIO
from itertools import dropwhile
from mock import patch, call
from airflow import configuration, models
from airflow.utils import db
from airflow.contrib.hooks.spark_sql_hook import SparkSqlHook
def get_after(sentinel, iterable):
"""Get the value after `sentinel` in an `iterable`"""
truncated = dropwhile(lambda el: el != sentinel, iterable)
next(truncated)
return next(truncated)
class TestSparkSqlHook(unittest.TestCase):
_config = {
'conn_id': 'spark_default',
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'name': 'spark-job',
'num_executors': 10,
'verbose': True,
'sql': ' /path/to/sql/file.sql ',
'conf': 'key=value,PROP=VALUE'
}
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='spark_default', conn_type='spark',
host='yarn://yarn-master')
)
def test_build_command(self):
hook = SparkSqlHook(**self._config)
# The subprocess requires an array but we build the cmd by joining on a space
cmd = ' '.join(hook._prepare_command(""))
# Check all the parameters
assert "--executor-cores {}".format(self._config['executor_cores']) in cmd
assert "--executor-memory {}".format(self._config['executor_memory']) in cmd
assert "--keytab {}".format(self._config['keytab']) in cmd
assert "--name {}".format(self._config['name']) in cmd
assert "--num-executors {}".format(self._config['num_executors']) in cmd
sql_path = get_after('-f', hook._prepare_command(""))
assert self._config['sql'].strip() == sql_path
# Check if all config settings are there
for kv in self._config['conf'].split(","):
k, v = kv.split('=')
assert "--conf {0}={1}".format(k, v) in cmd
if self._config['verbose']:
assert "--verbose" in cmd
@patch('airflow.contrib.hooks.spark_sql_hook.subprocess.Popen')
def test_spark_process_runcmd(self, mock_popen):
# Given
mock_popen.return_value.stdout = six.StringIO('Spark-sql communicates using stdout')
mock_popen.return_value.stderr = six.StringIO('stderr')
mock_popen.return_value.wait.return_value = 0
# When
hook = SparkSqlHook(
conn_id='spark_default',
sql='SELECT 1'
)
with patch.object(hook.log, 'debug') as mock_debug:
with patch.object(hook.log, 'info') as mock_info:
hook.run_query()
mock_debug.assert_called_with(
'Spark-Sql cmd: %s',
['spark-sql', '-e', 'SELECT 1', '--master', 'yarn', '--name', 'default-name', '--verbose', '--queue', 'default']
)
mock_info.assert_called_with(
'Spark-sql communicates using stdout'
)
# Then
self.assertEqual(
mock_popen.mock_calls[0],
call(['spark-sql', '-e', 'SELECT 1', '--master', 'yarn', '--name', 'default-name', '--verbose', '--queue', 'default'], stderr=-2, stdout=-1)
)
if __name__ == '__main__':
unittest.main()
|
danielvdende/incubator-airflow
|
tests/contrib/hooks/test_spark_sql_hook.py
|
Python
|
apache-2.0
| 4,171
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2017 The OctoPrint Project - Released under terms of the AGPLv3 License"
import requests
import logging
from ..exceptions import ConfigurationInvalid
BRANCH_HEAD_URL = "https://api.bitbucket.org/2.0/repositories/{user}/{repo}/commit/{branch}"
logger = logging.getLogger("octoprint.plugins.softwareupdate.version_checks.bitbucket_commit")
def _get_latest_commit(user, repo, branch):
r = requests.get(BRANCH_HEAD_URL.format(user=user, repo=repo, branch=branch))
if not r.status_code == requests.codes.ok:
return None
reference = r.json()
if not "hash" in reference:
return None
return reference["hash"]
def get_latest(target, check):
if "user" not in check or "repo" not in check:
raise ConfigurationInvalid("Update configuration for %s of type bitbucket_commit needs all of user and repo" % target)
branch = "master"
if "branch" in check:
branch = check["branch"]
current = None
if "current" in check:
current = check["current"]
remote_commit = _get_latest_commit(check["user"], check["repo"], branch)
information = dict(
local=dict(name="Commit {commit}".format(commit=current if current is not None else "unknown"), value=current),
remote=dict(name="Commit {commit}".format(commit=remote_commit if remote_commit is not None else "unknown"), value=remote_commit)
)
is_current = (current is not None and current == remote_commit) or remote_commit is None
logger.debug("Target: %s, local: %s, remote: %s" % (target, current, remote_commit))
return information, is_current
|
beeverycreative/BEEweb
|
src/octoprint/plugins/softwareupdate/version_checks/bitbucket_commit.py
|
Python
|
agpl-3.0
| 1,717
|
# Copyright (C) 2015-2019 Magenta ApS, https://magenta.dk.
# Contact: info@magenta.dk.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import shutil
import os
import tempfile
from base64 import b64encode
import gzip
# Outputs the Authorization headers for the given SAML assertion token
if len(sys.argv) > 1:
assertion_file = sys.argv[1]
else:
assertion_file = 'test_auth_data/sample-saml2-assertion.xml'
(handle, tmpfilename) = tempfile.mkstemp('.gz')
with open(assertion_file, 'rb') as f_in, gzip.open(tmpfilename, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
with open(tmpfilename) as f:
zipped_data = f.read()
print("Authorization: saml-gzipped %s" % b64encode(zipped_data))
os.remove(tmpfilename)
|
magenta-aps/mox
|
oio_rest/oio_rest/utils/encode_token.py
|
Python
|
mpl-2.0
| 893
|
#! /usr/bin/python
# -*- coding: utf-8
STYLE = {
'fore': {
'black': 30, 'red': 31, 'green': 32, 'yellow': 33,
'blue': 34, 'purple': 35, 'cyan': 36, 'white': 37,
},
'back': {
'black': 40, 'red': 41, 'green': 42, 'yellow': 43,
'blue': 44, 'purple': 45, 'cyan': 46, 'white': 47,
},
'mode': {
'bold': 1, 'underline': 4, 'blink': 5, 'invert': 7,
},
'default': {
'end': 0,
}
}
def use_style(string, mode='', fore='', back=''):
mode = '%s' % STYLE['mode'][mode] if STYLE['mode'].has_key(mode) else ''
fore = '%s' % STYLE['fore'][fore] if STYLE['fore'].has_key(fore) else ''
back = '%s' % STYLE['back'][back] if STYLE['back'].has_key(back) else ''
style = ';'.join([s for s in [mode, fore, back] if s])
style = '\033[%sm' % style if style else ''
end = '\033[%sm' % STYLE['default']['end'] if style else ''
return '%s%s%s' % (style, string, end)
def test():
print use_style('Normal')
print use_style('Bold', mode='bold')
print use_style('Underline & red text', mode='underline', fore='red')
print use_style('Invert & green back', mode='reverse', back='green')
print use_style('Black text & White back', fore='black', back='white')
if __name__ == '__main__':
test()
|
doudounannan/python-crawl
|
style.py
|
Python
|
mit
| 1,372
|
# -*- Mode:Python -*-
##########################################################################
# #
# This file is part of AVANGO. #
# #
# Copyright 1997 - 2008 Fraunhofer-Gesellschaft zur Foerderung der #
# angewandten Forschung (FhG), Munich, Germany. #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
##########################################################################
from ._volume import *
import avango.nodefactory
nodes = avango.nodefactory.NodeFactory('av::gua::volume::')
|
jakobharlan/avango
|
avango-volume/python/volume/__init__.py
|
Python
|
lgpl-3.0
| 1,634
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('member_guid', models.CharField(max_length=200, null=True, blank=True)),
('user_token', models.CharField(max_length=200, null=True, blank=True)),
('member', models.ForeignKey(to=settings.AUTH_USER_MODEL, unique=True)),
],
options={
},
bases=(models.Model,),
),
]
|
ekivemark/my_device
|
bbp/bbp/member/migrations/0001_initial.py
|
Python
|
apache-2.0
| 862
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
BatchPanel.py
---------------------
Date : November 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'November 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import json
from qgis.PyQt import uic
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QTableWidgetItem, QComboBox, QLineEdit, QHeaderView, QFileDialog, QMessageBox
from qgis.core import QgsApplication
from processing.gui.FileSelectionPanel import FileSelectionPanel
from processing.gui.CrsSelectionPanel import CrsSelectionPanel
from processing.gui.ExtentSelectionPanel import ExtentSelectionPanel
from processing.gui.FixedTablePanel import FixedTablePanel
from processing.gui.PointSelectionPanel import PointSelectionPanel
from processing.gui.BatchInputSelectionPanel import BatchInputSelectionPanel
from processing.gui.BatchOutputSelectionPanel import BatchOutputSelectionPanel
from processing.gui.GeometryPredicateSelectionPanel import GeometryPredicateSelectionPanel
from processing.core.parameters import ParameterFile
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterExtent
from processing.core.parameters import ParameterCrs
from processing.core.parameters import ParameterPoint
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterFixedTable
from processing.core.parameters import ParameterMultipleInput
from processing.core.parameters import ParameterGeometryPredicate
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetBatchPanel.ui'))
class BatchPanel(BASE, WIDGET):
PARAMETERS = "PARAMETERS"
OUTPUTS = "OUTPUTS"
def __init__(self, parent, alg):
super(BatchPanel, self).__init__(None)
self.setupUi(self)
self.btnAdvanced.hide()
# Set icons
self.btnAdd.setIcon(QgsApplication.getThemeIcon('/symbologyAdd.svg'))
self.btnRemove.setIcon(QgsApplication.getThemeIcon('/symbologyRemove.svg'))
self.btnOpen.setIcon(QgsApplication.getThemeIcon('/mActionFileOpen.svg'))
self.btnSave.setIcon(QgsApplication.getThemeIcon('/mActionFileSave.svg'))
self.btnAdvanced.setIcon(QIcon(os.path.join(pluginPath, 'images', 'alg.png')))
self.alg = alg
self.parent = parent
self.btnAdd.clicked.connect(self.addRow)
self.btnRemove.clicked.connect(self.removeRows)
self.btnOpen.clicked.connect(self.load)
self.btnSave.clicked.connect(self.save)
self.btnAdvanced.toggled.connect(self.toggleAdvancedMode)
self.tblParameters.horizontalHeader().sectionDoubleClicked.connect(
self.fillParameterValues)
self.initWidgets()
def initWidgets(self):
# If there are advanced parameters — show corresponding button
for param in self.alg.parameters:
if param.isAdvanced:
self.btnAdvanced.show()
break
# Determine column count
nOutputs = self.alg.getVisibleOutputsCount() + 1
if nOutputs == 1:
nOutputs = 0
self.tblParameters.setColumnCount(
self.alg.getVisibleParametersCount() + nOutputs)
# Table headers
column = 0
for param in self.alg.parameters:
self.tblParameters.setHorizontalHeaderItem(
column, QTableWidgetItem(param.description))
if param.isAdvanced:
self.tblParameters.setColumnHidden(column, True)
column += 1
for out in self.alg.outputs:
if not out.hidden:
self.tblParameters.setHorizontalHeaderItem(
column, QTableWidgetItem(out.description))
column += 1
# Last column for indicating if output will be added to canvas
if self.alg.getVisibleOutputsCount():
self.tblParameters.setHorizontalHeaderItem(
column, QTableWidgetItem(self.tr('Load in QGIS')))
# Add three empty rows by default
for i in xrange(3):
self.addRow()
self.tblParameters.horizontalHeader().setResizeMode(QHeaderView.Interactive)
self.tblParameters.horizontalHeader().setDefaultSectionSize(250)
self.tblParameters.horizontalHeader().setMinimumSectionSize(150)
self.tblParameters.horizontalHeader().setResizeMode(QHeaderView.ResizeToContents)
self.tblParameters.verticalHeader().setResizeMode(QHeaderView.ResizeToContents)
self.tblParameters.horizontalHeader().setStretchLastSection(True)
def getWidgetFromParameter(self, param, row, col):
if isinstance(param, (ParameterRaster, ParameterVector, ParameterTable,
ParameterMultipleInput)):
item = BatchInputSelectionPanel(param, row, col, self)
elif isinstance(param, ParameterBoolean):
item = QComboBox()
item.addItem(self.tr('Yes'))
item.addItem(self.tr('No'))
if param.default:
item.setCurrentIndex(0)
else:
item.setCurrentIndex(1)
elif isinstance(param, ParameterSelection):
item = QComboBox()
item.addItems(param.options)
elif isinstance(param, ParameterFixedTable):
item = FixedTablePanel(param)
elif isinstance(param, ParameterExtent):
item = ExtentSelectionPanel(self.parent, self.alg, param.default)
elif isinstance(param, ParameterPoint):
item = PointSelectionPanel(self.parent, param.default)
elif isinstance(param, ParameterCrs):
item = CrsSelectionPanel(param.default)
elif isinstance(param, ParameterFile):
item = FileSelectionPanel(param.isFolder)
elif isinstance(param, ParameterGeometryPredicate):
item = GeometryPredicateSelectionPanel(param.enabledPredicates, rows=1)
width = max(self.tblParameters.columnWidth(col),
item.sizeHint().width())
self.tblParameters.setColumnWidth(col, width)
else:
item = QLineEdit()
try:
item.setText(unicode(param.default))
except:
pass
return item
def load(self):
filename = unicode(QFileDialog.getOpenFileName(self,
self.tr('Open batch'), None,
self.tr('JSON files (*.json)')))
if filename:
with open(filename) as f:
values = json.load(f)
else:
# If the user clicked on the cancel button.
return
self.tblParameters.setRowCount(0)
try:
for row, alg in enumerate(values):
self.addRow()
params = alg[self.PARAMETERS]
outputs = alg[self.OUTPUTS]
column = 0
for param in self.alg.parameters:
if param.hidden:
continue
widget = self.tblParameters.cellWidget(row, column)
if param.name in params:
value = params[param.name]
self.setValueInWidget(widget, value)
column += 1
for out in self.alg.outputs:
if out.hidden:
continue
widget = self.tblParameters.cellWidget(row, column)
if out.name in outputs:
value = outputs[out.name]
self.setValueInWidget(widget, value)
column += 1
except TypeError:
QMessageBox.critical(
self,
self.tr('Error'),
self.tr('An error occurred while reading your file.'))
def setValueInWidget(self, widget, value):
if isinstance(widget, (BatchInputSelectionPanel, QLineEdit, FileSelectionPanel)):
widget.setText(unicode(value))
elif isinstance(widget, (BatchOutputSelectionPanel, GeometryPredicateSelectionPanel)):
widget.setValue(unicode(value))
elif isinstance(widget, QComboBox):
idx = widget.findText(unicode(value))
if idx != -1:
widget.setCurrentIndex(idx)
elif isinstance(widget, ExtentSelectionPanel):
if value is not None:
widget.setExtentFromString(value)
else:
widget.setExtentFromString('')
elif isinstance(widget, CrsSelectionPanel):
widget.setAuthId(value)
def save(self):
toSave = []
for row in range(self.tblParameters.rowCount()):
algParams = {}
algOutputs = {}
col = 0
alg = self.alg.getCopy()
for param in alg.parameters:
if param.hidden:
continue
if isinstance(param, ParameterExtent):
col += 1
continue
widget = self.tblParameters.cellWidget(row, col)
if not self.setParamValue(param, widget, alg):
self.parent.lblProgress.setText(
self.tr('<b>Missing parameter value: %s (row %d)</b>') % (param.description, row + 1))
return
algParams[param.name] = param.getValueAsCommandLineParameter()
col += 1
col = 0
for param in alg.parameters:
if param.hidden:
continue
if isinstance(param, ParameterExtent):
widget = self.tblParameters.cellWidget(row, col)
if not self.setParamValue(param, widget, alg):
self.parent.lblProgress.setText(
self.tr('<b>Missing parameter value: %s (row %d)</b>') % (param.description, row + 1))
return
algParams[param.name] = unicode(param.value())
col += 1
for out in alg.outputs:
if out.hidden:
continue
widget = self.tblParameters.cellWidget(row, col)
text = widget.getValue()
if text.strip() != '':
algOutputs[out.name] = text.strip()
col += 1
else:
self.parent.lblProgress.setText(
self.tr('<b>Wrong or missing parameter value: %s (row %d)</b>') % (out.description, row + 1))
return
toSave.append({self.PARAMETERS: algParams, self.OUTPUTS: algOutputs})
filename = unicode(QFileDialog.getSaveFileName(self,
self.tr('Save batch'),
None,
self.tr('JSON files (*.json)')))
if filename:
if not filename.endswith('.json'):
filename += '.json'
with open(filename, 'w') as f:
json.dump(toSave, f)
def setParamValue(self, param, widget, alg=None):
if isinstance(param, (ParameterRaster, ParameterVector, ParameterTable,
ParameterMultipleInput)):
value = widget.getText()
if unicode(value).strip() == '':
value = None
return param.setValue(value)
elif isinstance(param, ParameterBoolean):
return param.setValue(widget.currentIndex() == 0)
elif isinstance(param, ParameterSelection):
return param.setValue(widget.currentIndex())
elif isinstance(param, ParameterFixedTable):
return param.setValue(widget.table)
elif isinstance(param, ParameterExtent):
if alg is not None:
widget.useNewAlg(alg)
return param.setValue(widget.getValue())
elif isinstance(param, (ParameterCrs, ParameterFile)):
return param.setValue(widget.getValue())
elif isinstance(param, ParameterGeometryPredicate):
return param.setValue(widget.value())
else:
return param.setValue(widget.text())
def addRow(self):
self.tblParameters.setRowCount(self.tblParameters.rowCount() + 1)
row = self.tblParameters.rowCount() - 1
column = 0
for param in self.alg.parameters:
if param.hidden:
continue
self.tblParameters.setCellWidget(
row, column, self.getWidgetFromParameter(param, row, column))
column += 1
for out in self.alg.outputs:
if out.hidden:
continue
self.tblParameters.setCellWidget(
row, column, BatchOutputSelectionPanel(
out, self.alg, row, column, self))
column += 1
if self.alg.getVisibleOutputsCount():
item = QComboBox()
item.addItem(self.tr('Yes'))
item.addItem(self.tr('No'))
item.setCurrentIndex(0)
self.tblParameters.setCellWidget(row, column, item)
def removeRows(self):
#~ self.tblParameters.setUpdatesEnabled(False)
#~ indexes = self.tblParameters.selectionModel().selectedIndexes()
#~ indexes.sort()
#~ for i in reversed(indexes):
#~ self.tblParameters.model().removeRow(i.row())
#~ self.tblParameters.setUpdatesEnabled(True)
if self.tblParameters.rowCount() > 2:
self.tblParameters.setRowCount(self.tblParameters.rowCount() - 1)
def fillParameterValues(self, column):
widget = self.tblParameters.cellWidget(0, column)
if isinstance(widget, QComboBox):
widgetValue = widget.currentIndex()
for row in range(1, self.tblParameters.rowCount()):
self.tblParameters.cellWidget(row, column).setCurrentIndex(widgetValue)
elif isinstance(widget, ExtentSelectionPanel):
widgetValue = widget.getValue()
for row in range(1, self.tblParameters.rowCount()):
if widgetValue is not None:
self.tblParameters.cellWidget(row, column).setExtentFromString(widgetValue)
else:
self.tblParameters.cellWidget(row, column).setExtentFromString('')
elif isinstance(widget, CrsSelectionPanel):
widgetValue = widget.getValue()
for row in range(1, self.tblParameters.rowCount()):
self.tblParameters.cellWidget(row, column).setAuthId(widgetValue)
elif isinstance(widget, FileSelectionPanel):
widgetValue = widget.getValue()
for row in range(1, self.tblParameters.rowCount()):
self.tblParameters.cellWidget(row, column).setText(widgetValue)
elif isinstance(widget, QLineEdit):
widgetValue = widget.text()
for row in range(1, self.tblParameters.rowCount()):
self.tblParameters.cellWidget(row, column).setText(widgetValue)
elif isinstance(widget, BatchInputSelectionPanel):
widgetValue = widget.getText()
for row in range(1, self.tblParameters.rowCount()):
self.tblParameters.cellWidget(row, column).setText(widgetValue)
elif isinstance(widget, GeometryPredicateSelectionPanel):
widgetValue = widget.value()
for row in range(1, self.tblParameters.rowCount()):
self.tblParameters.cellWidget(row, column).setValue(widgetValue)
else:
pass
def toggleAdvancedMode(self, checked):
for column, param in enumerate(self.alg.parameters):
if param.isAdvanced:
self.tblParameters.setColumnHidden(column, not checked)
|
AsgerPetersen/QGIS
|
python/plugins/processing/gui/BatchPanel.py
|
Python
|
gpl-2.0
| 17,132
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2019 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Shared objects of the various writer classes.
For now, this includes enhancement configuration utilities.
"""
import logging
import os
import warnings
from typing import Optional
import dask.array as da
import numpy as np
import xarray as xr
import yaml
try:
from yaml import UnsafeLoader
except ImportError:
from yaml import Loader as UnsafeLoader # type: ignore
from trollimage.xrimage import XRImage
from trollsift import parser
from satpy import CHUNK_SIZE
from satpy._config import config_search_paths, glob_config
from satpy.aux_download import DataDownloadMixin
from satpy.plugin_base import Plugin
from satpy.resample import get_area_def
from satpy.utils import recursive_dict_update
LOG = logging.getLogger(__name__)
def read_writer_config(config_files, loader=UnsafeLoader):
"""Read the writer `config_files` and return the info extracted."""
conf = {}
LOG.debug('Reading %s', str(config_files))
for config_file in config_files:
with open(config_file) as fd:
conf.update(yaml.load(fd.read(), Loader=loader))
try:
writer_info = conf['writer']
except KeyError:
raise KeyError(
"Malformed config file {}: missing writer 'writer'".format(
config_files))
writer_info['config_files'] = config_files
return writer_info
def load_writer_configs(writer_configs, **writer_kwargs):
"""Load the writer from the provided `writer_configs`."""
try:
writer_info = read_writer_config(writer_configs)
writer_class = writer_info['writer']
except (ValueError, KeyError, yaml.YAMLError):
raise ValueError("Invalid writer configs: "
"'{}'".format(writer_configs))
init_kwargs, kwargs = writer_class.separate_init_kwargs(writer_kwargs)
writer = writer_class(config_files=writer_configs,
**init_kwargs)
return writer, kwargs
def load_writer(writer, **writer_kwargs):
"""Find and load writer `writer` in the available configuration files."""
config_fn = writer + ".yaml" if "." not in writer else writer
config_files = config_search_paths(os.path.join("writers", config_fn))
writer_kwargs.setdefault("config_files", config_files)
if not writer_kwargs['config_files']:
raise ValueError("Unknown writer '{}'".format(writer))
try:
return load_writer_configs(writer_kwargs['config_files'],
**writer_kwargs)
except ValueError:
raise ValueError("Writer '{}' does not exist or could not be "
"loaded".format(writer))
def configs_for_writer(writer=None):
"""Generate writer configuration files for one or more writers.
Args:
writer (Optional[str]): Yield configs only for this writer
Returns: Generator of lists of configuration files
"""
if writer is not None:
if not isinstance(writer, (list, tuple)):
writer = [writer]
# given a config filename or writer name
config_files = [w if w.endswith('.yaml') else w + '.yaml' for w in writer]
else:
writer_configs = glob_config(os.path.join('writers', '*.yaml'))
config_files = set(writer_configs)
for config_file in config_files:
config_basename = os.path.basename(config_file)
writer_configs = config_search_paths(
os.path.join("writers", config_basename))
if not writer_configs:
LOG.warning("No writer configs found for '%s'", writer)
continue
yield writer_configs
def available_writers(as_dict=False):
"""Available writers based on current configuration.
Args:
as_dict (bool): Optionally return writer information as a dictionary.
Default: False
Returns: List of available writer names. If `as_dict` is `True` then
a list of dictionaries including additionally writer information
is returned.
"""
writers = []
for writer_configs in configs_for_writer():
try:
writer_info = read_writer_config(writer_configs)
except (KeyError, IOError, yaml.YAMLError):
LOG.warning("Could not import writer config from: %s", writer_configs)
LOG.debug("Error loading YAML", exc_info=True)
continue
writers.append(writer_info if as_dict else writer_info['name'])
return writers
def _determine_mode(dataset):
if "mode" in dataset.attrs:
return dataset.attrs["mode"]
if dataset.ndim == 2:
return "L"
if dataset.shape[0] == 2:
return "LA"
if dataset.shape[0] == 3:
return "RGB"
if dataset.shape[0] == 4:
return "RGBA"
raise RuntimeError("Can't determine 'mode' of dataset: %s" %
str(dataset))
def _burn_overlay(img, image_metadata, area, cw_, overlays):
"""Burn the overlay in the image array."""
del image_metadata
cw_.add_overlay_from_dict(overlays, area, background=img)
return img
def add_overlay(orig_img, area, coast_dir, color=None, width=None, resolution=None,
level_coast=None, level_borders=None, fill_value=None,
grid=None, overlays=None):
"""Add coastline, political borders and grid(graticules) to image.
Uses ``color`` for feature colors where ``color`` is a 3-element tuple
of integers between 0 and 255 representing (R, G, B).
.. warning::
This function currently loses the data mask (alpha band).
``resolution`` is chosen automatically if None (default),
otherwise it should be one of:
+-----+-------------------------+---------+
| 'f' | Full resolution | 0.04 km |
+-----+-------------------------+---------+
| 'h' | High resolution | 0.2 km |
+-----+-------------------------+---------+
| 'i' | Intermediate resolution | 1.0 km |
+-----+-------------------------+---------+
| 'l' | Low resolution | 5.0 km |
+-----+-------------------------+---------+
| 'c' | Crude resolution | 25 km |
+-----+-------------------------+---------+
``grid`` is a dictionary with key values as documented in detail in pycoast
eg. overlay={'grid': {'major_lonlat': (10, 10),
'write_text': False,
'outline': (224, 224, 224),
'width': 0.5}}
Here major_lonlat is plotted every 10 deg for both longitude and latitude,
no labels for the grid lines are plotted, the color used for the grid lines
is light gray, and the width of the gratucules is 0.5 pixels.
For grid if aggdraw is used, font option is mandatory, if not
``write_text`` is set to False::
font = aggdraw.Font('black', '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf',
opacity=127, size=16)
"""
if area is None:
raise ValueError("Area of image is None, can't add overlay.")
from pycoast import ContourWriterAGG
if isinstance(area, str):
area = get_area_def(area)
LOG.info("Add coastlines and political borders to image.")
old_args = [color, width, resolution, grid, level_coast, level_borders]
if any(arg is not None for arg in old_args):
warnings.warn("'color', 'width', 'resolution', 'grid', 'level_coast', 'level_borders'"
" arguments will be deprecated soon. Please use 'overlays' instead.", DeprecationWarning)
if hasattr(orig_img, 'convert'):
# image must be in RGB space to work with pycoast/pydecorate
res_mode = ('RGBA' if orig_img.final_mode(fill_value).endswith('A') else 'RGB')
orig_img = orig_img.convert(res_mode)
elif not orig_img.mode.startswith('RGB'):
raise RuntimeError("'trollimage' 1.6+ required to support adding "
"overlays/decorations to non-RGB data.")
if overlays is None:
overlays = _create_overlays_dict(color, width, grid, level_coast, level_borders)
cw_ = ContourWriterAGG(coast_dir)
new_image = orig_img.apply_pil(_burn_overlay, res_mode,
None, {'fill_value': fill_value},
(area, cw_, overlays), None)
return new_image
def _create_overlays_dict(color, width, grid, level_coast, level_borders):
"""Fill in the overlays dict."""
overlays = dict()
# fill with sensible defaults
general_params = {'outline': color or (0, 0, 0),
'width': width or 0.5}
for key, val in general_params.items():
if val is not None:
overlays.setdefault('coasts', {}).setdefault(key, val)
overlays.setdefault('borders', {}).setdefault(key, val)
if level_coast is None:
level_coast = 1
overlays.setdefault('coasts', {}).setdefault('level', level_coast)
if level_borders is None:
level_borders = 1
overlays.setdefault('borders', {}).setdefault('level', level_borders)
if grid is not None:
if 'major_lonlat' in grid and grid['major_lonlat']:
major_lonlat = grid.pop('major_lonlat')
minor_lonlat = grid.pop('minor_lonlat', major_lonlat)
grid.update({'Dlonlat': major_lonlat, 'dlonlat': minor_lonlat})
for key, val in grid.items():
overlays.setdefault('grid', {}).setdefault(key, val)
return overlays
def add_text(orig, dc, img, text):
"""Add text to an image using the pydecorate package.
All the features of pydecorate's ``add_text`` are available.
See documentation of :doc:`pydecorate:index` for more info.
"""
LOG.info("Add text to image.")
dc.add_text(**text)
arr = da.from_array(np.array(img) / 255.0, chunks=CHUNK_SIZE)
new_data = xr.DataArray(arr, dims=['y', 'x', 'bands'],
coords={'y': orig.data.coords['y'],
'x': orig.data.coords['x'],
'bands': list(img.mode)},
attrs=orig.data.attrs)
return XRImage(new_data)
def add_logo(orig, dc, img, logo):
"""Add logos or other images to an image using the pydecorate package.
All the features of pydecorate's ``add_logo`` are available.
See documentation of :doc:`pydecorate:index` for more info.
"""
LOG.info("Add logo to image.")
dc.add_logo(**logo)
arr = da.from_array(np.array(img) / 255.0, chunks=CHUNK_SIZE)
new_data = xr.DataArray(arr, dims=['y', 'x', 'bands'],
coords={'y': orig.data.coords['y'],
'x': orig.data.coords['x'],
'bands': list(img.mode)},
attrs=orig.data.attrs)
return XRImage(new_data)
def add_scale(orig, dc, img, scale):
"""Add scale to an image using the pydecorate package.
All the features of pydecorate's ``add_scale`` are available.
See documentation of :doc:`pydecorate:index` for more info.
"""
LOG.info("Add scale to image.")
dc.add_scale(**scale)
arr = da.from_array(np.array(img) / 255.0, chunks=CHUNK_SIZE)
new_data = xr.DataArray(arr, dims=['y', 'x', 'bands'],
coords={'y': orig.data.coords['y'],
'x': orig.data.coords['x'],
'bands': list(img.mode)},
attrs=orig.data.attrs)
return XRImage(new_data)
def add_decorate(orig, fill_value=None, **decorate):
"""Decorate an image with text and/or logos/images.
This call adds text/logos in order as given in the input to keep the
alignment features available in pydecorate.
An example of the decorate config::
decorate = {
'decorate': [
{'logo': {'logo_path': <path to a logo>, 'height': 143, 'bg': 'white', 'bg_opacity': 255}},
{'text': {'txt': start_time_txt,
'align': {'top_bottom': 'bottom', 'left_right': 'right'},
'font': <path to ttf font>,
'font_size': 22,
'height': 30,
'bg': 'black',
'bg_opacity': 255,
'line': 'white'}}
]
}
Any numbers of text/logo in any order can be added to the decorate list,
but the order of the list is kept as described above.
Note that a feature given in one element, eg. bg (which is the background color)
will also apply on the next elements unless a new value is given.
align is a special keyword telling where in the image to start adding features, top_bottom is either top or bottom
and left_right is either left or right.
"""
LOG.info("Decorate image.")
# Need to create this here to possible keep the alignment
# when adding text and/or logo with pydecorate
if hasattr(orig, 'convert'):
# image must be in RGB space to work with pycoast/pydecorate
orig = orig.convert('RGBA' if orig.mode.endswith('A') else 'RGB')
elif not orig.mode.startswith('RGB'):
raise RuntimeError("'trollimage' 1.6+ required to support adding "
"overlays/decorations to non-RGB data.")
img_orig = orig.pil_image(fill_value=fill_value)
from pydecorate import DecoratorAGG
dc = DecoratorAGG(img_orig)
# decorate need to be a list to maintain the alignment
# as ordered in the list
img = orig
if 'decorate' in decorate:
for dec in decorate['decorate']:
if 'logo' in dec:
img = add_logo(img, dc, img_orig, logo=dec['logo'])
elif 'text' in dec:
img = add_text(img, dc, img_orig, text=dec['text'])
elif 'scale' in dec:
img = add_scale(img, dc, img_orig, scale=dec['scale'])
return img
def get_enhanced_image(dataset, enhance=None, overlay=None, decorate=None,
fill_value=None):
"""Get an enhanced version of `dataset` as an :class:`~trollimage.xrimage.XRImage` instance.
Args:
dataset (xarray.DataArray): Data to be enhanced and converted to an image.
enhance (bool or Enhancer): Whether to automatically enhance
data to be more visually useful and to fit inside the file
format being saved to. By default this will default to using
the enhancement configuration files found using the default
:class:`~satpy.writers.Enhancer` class. This can be set to
`False` so that no enhancments are performed. This can also
be an instance of the :class:`~satpy.writers.Enhancer` class
if further custom enhancement is needed.
overlay (dict): Options for image overlays. See :func:`add_overlay`
for available options.
decorate (dict): Options for decorating the image. See
:func:`add_decorate` for available options.
fill_value (int or float): Value to use when pixels are masked or
invalid. Default of `None` means to create an alpha channel.
See :meth:`~trollimage.xrimage.XRImage.finalize` for more
details. Only used when adding overlays or decorations. Otherwise
it is up to the caller to "finalize" the image before using it
except if calling ``img.show()`` or providing the image to
a writer as these will finalize the image.
.. versionchanged:: 0.10
Deprecated `enhancement_config_file` and 'enhancer' in favor of
`enhance`. Pass an instance of the `Enhancer` class to `enhance`
instead.
"""
if enhance is False:
# no enhancement
enhancer = None
elif enhance is None or enhance is True:
# default enhancement
enhancer = Enhancer()
else:
# custom enhancer
enhancer = enhance
# Create an image for enhancement
img = to_image(dataset)
if enhancer is None or enhancer.enhancement_tree is None:
LOG.debug("No enhancement being applied to dataset")
else:
if dataset.attrs.get("sensor", None):
enhancer.add_sensor_enhancements(dataset.attrs["sensor"])
enhancer.apply(img, **dataset.attrs)
if overlay is not None:
img = add_overlay(img, dataset.attrs['area'], fill_value=fill_value, **overlay)
if decorate is not None:
img = add_decorate(img, fill_value=fill_value, **decorate)
return img
def show(dataset, **kwargs):
"""Display the dataset as an image."""
img = get_enhanced_image(dataset.squeeze(), **kwargs)
img.show()
return img
def to_image(dataset):
"""Convert ``dataset`` into a :class:`~trollimage.xrimage.XRImage` instance.
Convert the ``dataset`` into an instance of the
:class:`~trollimage.xrimage.XRImage` class. This function makes no other
changes. To get an enhanced image, possibly with overlays and decoration,
see :func:`~get_enhanced_image`.
Args:
dataset (xarray.DataArray): Data to be converted to an image.
Returns:
Instance of :class:`~trollimage.xrimage.XRImage`.
"""
dataset = dataset.squeeze()
if dataset.ndim < 2:
raise ValueError("Need at least a 2D array to make an image.")
return XRImage(dataset)
def split_results(results):
"""Split results.
Get sources, targets and delayed objects to separate lists from a list of
results collected from (multiple) writer(s).
"""
from dask.delayed import Delayed
def flatten(results):
out = []
if isinstance(results, (list, tuple)):
for itm in results:
out.extend(flatten(itm))
return out
return [results]
sources = []
targets = []
delayeds = []
for res in flatten(results):
if isinstance(res, da.Array):
sources.append(res)
elif isinstance(res, Delayed):
delayeds.append(res)
else:
targets.append(res)
return sources, targets, delayeds
def compute_writer_results(results):
"""Compute all the given dask graphs `results` so that the files are saved.
Args:
results (iterable): Iterable of dask graphs resulting from calls to
`scn.save_datasets(..., compute=False)`
"""
if not results:
return
sources, targets, delayeds = split_results(results)
# one or more writers have targets that we need to close in the future
if targets:
delayeds.append(da.store(sources, targets, compute=False))
if delayeds:
da.compute(delayeds)
if targets:
for target in targets:
if hasattr(target, 'close'):
target.close()
class Writer(Plugin, DataDownloadMixin):
"""Base Writer class for all other writers.
A minimal writer subclass should implement the `save_dataset` method.
"""
def __init__(self, name=None, filename=None, base_dir=None, **kwargs):
"""Initialize the writer object.
Args:
name (str): A name for this writer for log and error messages.
If this writer is configured in a YAML file its name should
match the name of the YAML file. Writer names may also appear
in output file attributes.
filename (str): Filename to save data to. This filename can and
should specify certain python string formatting fields to
differentiate between data written to the files. Any
attributes provided by the ``.attrs`` of a DataArray object
may be included. Format and conversion specifiers provided by
the :class:`trollsift <trollsift.parser.StringFormatter>`
package may also be used. Any directories in the provided
pattern will be created if they do not exist. Example::
{platform_name}_{sensor}_{name}_{start_time:%Y%m%d_%H%M%S}.tif
base_dir (str):
Base destination directories for all created files.
kwargs (dict): Additional keyword arguments to pass to the
:class:`~satpy.plugin_base.Plugin` class.
"""
# Load the config
Plugin.__init__(self, **kwargs)
self.info = self.config.get('writer', {})
if 'file_pattern' in self.info:
warnings.warn("Writer YAML config is using 'file_pattern' which "
"has been deprecated, use 'filename' instead.")
self.info['filename'] = self.info.pop('file_pattern')
if 'file_pattern' in kwargs:
warnings.warn("'file_pattern' has been deprecated, use 'filename' instead.", DeprecationWarning)
filename = kwargs.pop('file_pattern')
# Use options from the config file if they weren't passed as arguments
self.name = self.info.get("name", None) if name is None else name
self.file_pattern = self.info.get("filename", None) if filename is None else filename
if self.name is None:
raise ValueError("Writer 'name' not provided")
self.filename_parser = self.create_filename_parser(base_dir)
self.register_data_files()
@classmethod
def separate_init_kwargs(cls, kwargs):
"""Help separating arguments between init and save methods.
Currently the :class:`~satpy.scene.Scene` is passed one set of
arguments to represent the Writer creation and saving steps. This is
not preferred for Writer structure, but provides a simpler interface
to users. This method splits the provided keyword arguments between
those needed for initialization and those needed for the ``save_dataset``
and ``save_datasets`` method calls.
Writer subclasses should try to prefer keyword arguments only for the
save methods only and leave the init keyword arguments to the base
classes when possible.
"""
# FUTURE: Don't pass Scene.save_datasets kwargs to init and here
init_kwargs = {}
kwargs = kwargs.copy()
for kw in ['base_dir', 'filename', 'file_pattern']:
if kw in kwargs:
init_kwargs[kw] = kwargs.pop(kw)
return init_kwargs, kwargs
def create_filename_parser(self, base_dir):
"""Create a :class:`trollsift.parser.Parser` object for later use."""
# just in case a writer needs more complex file patterns
# Set a way to create filenames if we were given a pattern
if base_dir and self.file_pattern:
file_pattern = os.path.join(base_dir, self.file_pattern)
else:
file_pattern = self.file_pattern
return parser.Parser(file_pattern) if file_pattern else None
@staticmethod
def _prepare_metadata_for_filename_formatting(attrs):
if isinstance(attrs.get('sensor'), set):
attrs['sensor'] = '-'.join(sorted(attrs['sensor']))
def get_filename(self, **kwargs):
"""Create a filename where output data will be saved.
Args:
kwargs (dict): Attributes and other metadata to use for formatting
the previously provided `filename`.
"""
if self.filename_parser is None:
raise RuntimeError("No filename pattern or specific filename provided")
self._prepare_metadata_for_filename_formatting(kwargs)
output_filename = self.filename_parser.compose(kwargs)
dirname = os.path.dirname(output_filename)
if dirname and not os.path.isdir(dirname):
LOG.info("Creating output directory: {}".format(dirname))
os.makedirs(dirname)
return output_filename
def save_datasets(self, datasets, compute=True, **kwargs):
"""Save all datasets to one or more files.
Subclasses can use this method to save all datasets to one single
file or optimize the writing of individual datasets. By default
this simply calls `save_dataset` for each dataset provided.
Args:
datasets (iterable): Iterable of `xarray.DataArray` objects to
save using this writer.
compute (bool): If `True` (default), compute all of the saves to
disk. If `False` then the return value is either
a :doc:`dask:delayed` object or two lists to
be passed to a :func:`dask.array.store` call.
See return values below for more details.
**kwargs: Keyword arguments to pass to `save_dataset`. See that
documentation for more details.
Returns:
Value returned depends on `compute` keyword argument. If
`compute` is `True` the value is the result of a either a
:func:`dask.array.store` operation or a :doc:`dask:delayed`
compute, typically this is `None`. If `compute` is `False` then
the result is either a :doc:`dask:delayed` object that can be
computed with `delayed.compute()` or a two element tuple of
sources and targets to be passed to :func:`dask.array.store`. If
`targets` is provided then it is the caller's responsibility to
close any objects that have a "close" method.
"""
results = []
for ds in datasets:
results.append(self.save_dataset(ds, compute=False, **kwargs))
if compute:
LOG.info("Computing and writing results...")
return compute_writer_results([results])
targets, sources, delayeds = split_results([results])
if delayeds:
# This writer had only delayed writes
return delayeds
else:
return targets, sources
def save_dataset(self, dataset, filename=None, fill_value=None,
compute=True, **kwargs):
"""Save the ``dataset`` to a given ``filename``.
This method must be overloaded by the subclass.
Args:
dataset (xarray.DataArray): Dataset to save using this writer.
filename (str): Optionally specify the filename to save this
dataset to. If not provided then `filename`
which can be provided to the init method will be
used and formatted by dataset attributes.
fill_value (int or float): Replace invalid values in the dataset
with this fill value if applicable to
this writer.
compute (bool): If `True` (default), compute and save the dataset.
If `False` return either a :doc:`dask:delayed`
object or tuple of (source, target). See the
return values below for more information.
**kwargs: Other keyword arguments for this particular writer.
Returns:
Value returned depends on `compute`. If `compute` is `True` then
the return value is the result of computing a
:doc:`dask:delayed` object or running :func:`dask.array.store`.
If `compute` is `False` then the returned value is either a
:doc:`dask:delayed` object that can be computed using
`delayed.compute()` or a tuple of (source, target) that should be
passed to :func:`dask.array.store`. If target is provided the the
caller is responsible for calling `target.close()` if the target
has this method.
"""
raise NotImplementedError(
"Writer '%s' has not implemented dataset saving" % (self.name, ))
class ImageWriter(Writer):
"""Base writer for image file formats."""
def __init__(self, name=None, filename=None, base_dir=None, enhance=None, **kwargs):
"""Initialize image writer object.
Args:
name (str): A name for this writer for log and error messages.
If this writer is configured in a YAML file its name should
match the name of the YAML file. Writer names may also appear
in output file attributes.
filename (str): Filename to save data to. This filename can and
should specify certain python string formatting fields to
differentiate between data written to the files. Any
attributes provided by the ``.attrs`` of a DataArray object
may be included. Format and conversion specifiers provided by
the :class:`trollsift <trollsift.parser.StringFormatter>`
package may also be used. Any directories in the provided
pattern will be created if they do not exist. Example::
{platform_name}_{sensor}_{name}_{start_time:%Y%m%d_%H%M%S}.tif
base_dir (str):
Base destination directories for all created files.
enhance (bool or Enhancer): Whether to automatically enhance
data to be more visually useful and to fit inside the file
format being saved to. By default this will default to using
the enhancement configuration files found using the default
:class:`~satpy.writers.Enhancer` class. This can be set to
`False` so that no enhancments are performed. This can also
be an instance of the :class:`~satpy.writers.Enhancer` class
if further custom enhancement is needed.
kwargs (dict): Additional keyword arguments to pass to the
:class:`~satpy.writer.Writer` base class.
.. versionchanged:: 0.10
Deprecated `enhancement_config_file` and 'enhancer' in favor of
`enhance`. Pass an instance of the `Enhancer` class to `enhance`
instead.
"""
super(ImageWriter, self).__init__(name, filename, base_dir, **kwargs)
if enhance is False:
# No enhancement
self.enhancer = False
elif enhance is None or enhance is True:
# default enhancement
enhancement_config = self.info.get("enhancement_config", None)
self.enhancer = Enhancer(enhancement_config_file=enhancement_config)
else:
# custom enhancer
self.enhancer = enhance
@classmethod
def separate_init_kwargs(cls, kwargs):
"""Separate the init kwargs."""
# FUTURE: Don't pass Scene.save_datasets kwargs to init and here
init_kwargs, kwargs = super(ImageWriter, cls).separate_init_kwargs(kwargs)
for kw in ['enhancement_config', 'enhance']:
if kw in kwargs:
init_kwargs[kw] = kwargs.pop(kw)
return init_kwargs, kwargs
def save_dataset(self, dataset, filename=None, fill_value=None,
overlay=None, decorate=None, compute=True, **kwargs):
"""Save the ``dataset`` to a given ``filename``.
This method creates an enhanced image using :func:`get_enhanced_image`.
The image is then passed to :meth:`save_image`. See both of these
functions for more details on the arguments passed to this method.
"""
img = get_enhanced_image(dataset.squeeze(), enhance=self.enhancer, overlay=overlay,
decorate=decorate, fill_value=fill_value)
return self.save_image(img, filename=filename, compute=compute, fill_value=fill_value, **kwargs)
def save_image(
self,
img: XRImage,
filename: Optional[str] = None,
compute: bool = True,
**kwargs
):
"""Save Image object to a given ``filename``.
Args:
img (trollimage.xrimage.XRImage): Image object to save to disk.
filename (str): Optionally specify the filename to save this
dataset to. It may include string formatting
patterns that will be filled in by dataset
attributes.
compute (bool): If `True` (default), compute and save the dataset.
If `False` return either a :doc:`dask:delayed`
object or tuple of (source, target). See the
return values below for more information.
**kwargs: Other keyword arguments to pass to this writer.
Returns:
Value returned depends on `compute`. If `compute` is `True` then
the return value is the result of computing a
:doc:`dask:delayed` object or running :func:`dask.array.store`.
If `compute` is `False` then the returned value is either a
:doc:`dask:delayed` object that can be computed using
`delayed.compute()` or a tuple of (source, target) that should be
passed to :func:`dask.array.store`. If target is provided the the
caller is responsible for calling `target.close()` if the target
has this method.
"""
raise NotImplementedError("Writer '%s' has not implemented image saving" % (self.name,))
class DecisionTree(object):
"""Structure to search for nearest match from a set of parameters.
This class is used to find the best configuration section by matching
a set of attributes. The provided dictionary contains a mapping of
"section name" to "decision" dictionaries. Each decision dictionary
contains the attributes that will be used for matching plus any
additional keys that could be useful when matched. This class will
search these decisions and return the one with the most matching
parameters to the attributes passed to the
:meth:`~satpy.writers.DecisionTree.find_match` method.
Note that decision sections are provided as a dict instead of a list
so that they can be overwritten or updated by doing the equivalent
of a ``current_dicts.update(new_dicts)``.
Examples:
Decision sections are provided as a dictionary of dictionaries.
The returned match will be the first result found by searching
provided `match_keys` in order.
::
decisions = {
'first_section': {
'a': 1,
'b': 2,
'useful_key': 'useful_value',
},
'second_section': {
'a': 5,
'useful_key': 'other_useful_value1',
},
'third_section': {
'b': 4,
'useful_key': 'other_useful_value2',
},
}
tree = DecisionTree(decisions, ('a', 'b'))
tree.find_match(a=5, b=2) # second_section dict
tree.find_match(a=1, b=2) # first_section dict
tree.find_match(a=5, b=4) # second_section dict
tree.find_match(a=3, b=2) # no match
"""
any_key = None
def __init__(self, decision_dicts, match_keys, multival_keys=None):
"""Init the decision tree.
Args:
decision_dicts (dict): Dictionary of dictionaries. Each
sub-dictionary contains key/value pairs that can be
matched from the `find_match` method. Sub-dictionaries
can include additional keys outside of the ``match_keys``
provided to act as the "result" of a query. The keys of
the root dict are arbitrary.
match_keys (list): Keys of the provided dictionary to use for
matching.
multival_keys (list): Keys of `match_keys` that can be provided
as multiple values.
A multi-value key can be specified as a single value
(typically a string) or a set. If a set, it will be sorted
and converted to a tuple and then used for matching.
When querying the tree, these keys will
be searched for exact multi-value results (the sorted tuple)
and if not found then each of the values will be searched
individually in alphabetical order.
"""
self._match_keys = match_keys
self._multival_keys = multival_keys or []
self._tree = {}
if not isinstance(decision_dicts, (list, tuple)):
decision_dicts = [decision_dicts]
self.add_config_to_tree(*decision_dicts)
def add_config_to_tree(self, *decision_dicts):
"""Add a configuration to the tree."""
conf = {}
for decision_dict in decision_dicts:
conf = recursive_dict_update(conf, decision_dict)
self._build_tree(conf)
def _build_tree(self, conf):
"""Build the tree.
Create a tree structure of dicts where each level represents the
possible matches for a specific ``match_key``. When finding matches
we will iterate through the tree matching each key that we know about.
The last dict in the "tree" will contain the configure section whose
match values led down that path in the tree.
See :meth:`DecisionTree.find_match` for more information.
"""
for _section_name, sect_attrs in conf.items():
# Set a path in the tree for each section in the config files
curr_level = self._tree
for match_key in self._match_keys:
# or None is necessary if they have empty strings
this_attr_val = sect_attrs.get(match_key, self.any_key) or None
if match_key in self._multival_keys and isinstance(this_attr_val, list):
this_attr_val = tuple(sorted(this_attr_val))
is_last_key = match_key == self._match_keys[-1]
level_needs_init = this_attr_val not in curr_level
if is_last_key:
# if we are at the last attribute, then assign the value
# set the dictionary of attributes because the config is
# not persistent
curr_level[this_attr_val] = sect_attrs
elif level_needs_init:
curr_level[this_attr_val] = {}
curr_level = curr_level[this_attr_val]
@staticmethod
def _convert_query_val_to_hashable(query_val):
_sorted_query_val = sorted(query_val)
query_vals = [tuple(_sorted_query_val)] + _sorted_query_val
query_vals += query_val
return query_vals
def _get_query_values(self, query_dict, curr_match_key):
query_val = query_dict[curr_match_key]
if curr_match_key in self._multival_keys and isinstance(query_val, set):
query_vals = self._convert_query_val_to_hashable(query_val)
else:
query_vals = [query_val]
return query_vals
def _find_match_if_known(self, curr_level, remaining_match_keys, query_dict):
match = None
curr_match_key = remaining_match_keys[0]
if curr_match_key not in query_dict:
return match
query_vals = self._get_query_values(query_dict, curr_match_key)
for query_val in query_vals:
if query_val not in curr_level:
continue
match = self._find_match(curr_level[query_val],
remaining_match_keys[1:],
query_dict)
if match is not None:
break
return match
def _find_match(self, curr_level, remaining_match_keys, query_dict):
"""Find a match."""
if len(remaining_match_keys) == 0:
# we're at the bottom level, we must have found something
return curr_level
match = self._find_match_if_known(
curr_level, remaining_match_keys, query_dict)
if match is None and self.any_key in curr_level:
# if we couldn't find it using the attribute then continue with
# the other attributes down the 'any' path
match = self._find_match(
curr_level[self.any_key],
remaining_match_keys[1:],
query_dict)
return match
def find_match(self, **query_dict):
"""Find a match.
Recursively search through the tree structure for a path that matches
the provided match parameters.
"""
try:
match = self._find_match(self._tree, self._match_keys, query_dict)
except (KeyError, IndexError, ValueError):
LOG.debug("Match exception:", exc_info=True)
LOG.error("Error when finding matching decision section")
if match is None:
# only possible if no default section was provided
raise KeyError("No decision section found for %s" %
(query_dict.get("uid", None),))
return match
class EnhancementDecisionTree(DecisionTree):
"""The enhancement decision tree."""
def __init__(self, *decision_dicts, **kwargs):
"""Init the decision tree."""
match_keys = kwargs.pop("match_keys",
("name",
"reader",
"platform_name",
"sensor",
"standard_name",
"units",
))
self.prefix = kwargs.pop("config_section", "enhancements")
multival_keys = kwargs.pop("multival_keys", ["sensor"])
super(EnhancementDecisionTree, self).__init__(
decision_dicts, match_keys, multival_keys)
def add_config_to_tree(self, *decision_dict):
"""Add configuration to tree."""
conf = {}
for config_file in decision_dict:
if os.path.isfile(config_file):
with open(config_file) as fd:
enhancement_config = yaml.load(fd, Loader=UnsafeLoader)
if enhancement_config is None:
# empty file
continue
enhancement_section = enhancement_config.get(
self.prefix, {})
if not enhancement_section:
LOG.debug("Config '{}' has no '{}' section or it is empty".format(config_file, self.prefix))
continue
conf = recursive_dict_update(conf, enhancement_section)
elif isinstance(config_file, dict):
conf = recursive_dict_update(conf, config_file)
else:
LOG.debug("Loading enhancement config string")
d = yaml.load(config_file, Loader=UnsafeLoader)
if not isinstance(d, dict):
raise ValueError(
"YAML file doesn't exist or string is not YAML dict: {}".format(config_file))
conf = recursive_dict_update(conf, d)
self._build_tree(conf)
def find_match(self, **query_dict):
"""Find a match."""
try:
return super(EnhancementDecisionTree, self).find_match(**query_dict)
except KeyError:
# give a more understandable error message
raise KeyError("No enhancement configuration found for %s" %
(query_dict.get("uid", None),))
class Enhancer(object):
"""Helper class to get enhancement information for images."""
def __init__(self, enhancement_config_file=None):
"""Initialize an Enhancer instance.
Args:
enhancement_config_file: The enhancement configuration to apply, False to leave as is.
"""
self.enhancement_config_file = enhancement_config_file
# Set enhancement_config_file to False for no enhancements
if self.enhancement_config_file is None:
# it wasn't specified in the config or in the kwargs, we should
# provide a default
config_fn = os.path.join("enhancements", "generic.yaml")
self.enhancement_config_file = config_search_paths(config_fn)
if not self.enhancement_config_file:
# They don't want any automatic enhancements
self.enhancement_tree = None
else:
if not isinstance(self.enhancement_config_file, (list, tuple)):
self.enhancement_config_file = [self.enhancement_config_file]
self.enhancement_tree = EnhancementDecisionTree(*self.enhancement_config_file)
self.sensor_enhancement_configs = []
def get_sensor_enhancement_config(self, sensor):
"""Get the sensor-specific config."""
if isinstance(sensor, str):
# one single sensor
sensor = [sensor]
for sensor_name in sensor:
config_fn = os.path.join("enhancements", sensor_name + ".yaml")
config_files = config_search_paths(config_fn)
# Note: Enhancement configuration files can't overwrite individual
# options, only entire sections are overwritten
for config_file in config_files:
yield config_file
def add_sensor_enhancements(self, sensor):
"""Add sensor-specific enhancements."""
# XXX: Should we just load all enhancements from the base directory?
new_configs = []
for config_file in self.get_sensor_enhancement_config(sensor):
if config_file not in self.sensor_enhancement_configs:
self.sensor_enhancement_configs.append(config_file)
new_configs.append(config_file)
if new_configs:
self.enhancement_tree.add_config_to_tree(*new_configs)
def apply(self, img, **info):
"""Apply the enhancements."""
enh_kwargs = self.enhancement_tree.find_match(**info)
LOG.debug("Enhancement configuration options: %s" %
(str(enh_kwargs['operations']), ))
for operation in enh_kwargs['operations']:
fun = operation['method']
args = operation.get('args', [])
kwargs = operation.get('kwargs', {})
fun(img, *args, **kwargs)
# img.enhance(**enh_kwargs)
|
pytroll/satpy
|
satpy/writers/__init__.py
|
Python
|
gpl-3.0
| 47,315
|
name = input("Insert your name: ")
print("Ciao , name")
|
lstorchi/teaching
|
helloworld/name.py
|
Python
|
gpl-3.0
| 57
|
import os
os.chdir("../resources") # Change the current directory
cwd = os.getcwd() # Get the current working directory (cwd)
files = os.listdir(cwd) # Get all the files in that directory
file_day1 = open("day1", "r")
where_is_santa = 0
class walker:
direction = 90;
pos_list = []
x = 0;
y = 0;
part2_foundit = False
part2 = 0
# Change the currect direction of the walker
def turn(self, heading):
if heading == "L":
self.direction += 90
elif heading == "R":
self.direction -= 90
self.direction = self.direction % 360
return self.direction
def walk(self, distance):
for i in range(distance):
if self.direction == 0:
self.x += 1
elif self.direction == 90:
self.y += 1
elif self.direction == 180:
self.x -= 1
elif self.direction == 270:
self.y -= 1
if {self.x, self.y} not in self.pos_list: #If the coords are not already in the list
self.pos_list.append({self.x, self.y})
elif self.part2_foundit is False:
# If I had more time this should be made cleaner lol ...
part2 = self.shortest_path()
self.part2_foundit = True
print "part2 answer is ",part2
break;
def shortest_path(self):
return abs(0-self.x) + abs(0-self.y);
def readfile(self, file):
data = file_day1.read()
for i in data.split(", "):
dist = int(i[1:])
dir = i[0]
human.turn(dir)
human.walk(dist)
return human.shortest_path()
human = walker()
where_is_bunny = human.readfile(file_day1)
file_day1.close()
|
Mymoza/advent-of-code-2016
|
days/day1.py
|
Python
|
mit
| 1,811
|
import inspect
import example
print(inspect.getsource(example.A))
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_language_tools/inspect_getsource_class.py
|
Python
|
apache-2.0
| 67
|
"""Tests of openedx.features.discounts.applicability"""
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import ddt
import pytest
import pytz
from django.contrib.sites.models import Site
from django.utils.timezone import now
from edx_toggles.toggles.testutils import override_waffle_flag
from enterprise.models import EnterpriseCustomer, EnterpriseCustomerUser
from mock import Mock, patch
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from common.djangoapps.entitlements.tests.factories import CourseEntitlementFactory
from lms.djangoapps.experiments.models import ExperimentData
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.features.discounts.models import DiscountRestrictionConfig
from openedx.features.discounts.utils import REV1008_EXPERIMENT_ID
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..applicability import DISCOUNT_APPLICABILITY_FLAG, _is_in_holdback_and_bucket, can_receive_discount
@ddt.ddt
class TestApplicability(ModuleStoreTestCase):
"""
Applicability determines if this combination of user and course can receive a discount. Make
sure that all of the business conditions work.
"""
def setUp(self):
super(TestApplicability, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.site, _ = Site.objects.get_or_create(domain='example.com')
self.user = UserFactory.create()
self.course = CourseFactory.create(run='test', display_name='test')
CourseModeFactory.create(course_id=self.course.id, mode_slug='verified')
now_time = datetime.now(tz=pytz.UTC).strftime(u"%Y-%m-%d %H:%M:%S%z")
ExperimentData.objects.create(
user=self.user, experiment_id=REV1008_EXPERIMENT_ID, key=str(self.course.id), value=now_time
)
holdback_patcher = patch(
'openedx.features.discounts.applicability._is_in_holdback_and_bucket', return_value=False
)
self.mock_holdback = holdback_patcher.start()
self.addCleanup(holdback_patcher.stop)
def test_can_receive_discount(self):
# Right now, no one should be able to receive the discount
applicability = can_receive_discount(user=self.user, course=self.course)
assert applicability is False
@override_waffle_flag(DISCOUNT_APPLICABILITY_FLAG, active=True)
def test_can_receive_discount_course_requirements(self):
"""
Ensure first purchase offer banner only displays for courses with a non-expired verified mode
"""
CourseEnrollmentFactory(
is_active=True,
course_id=self.course.id,
user=self.user
)
applicability = can_receive_discount(user=self.user, course=self.course)
assert applicability is True
no_verified_mode_course = CourseFactory(end=now() + timedelta(days=30))
applicability = can_receive_discount(user=self.user, course=no_verified_mode_course)
assert applicability is False
course_that_has_ended = CourseFactory(end=now() - timedelta(days=30))
applicability = can_receive_discount(user=self.user, course=course_that_has_ended)
assert applicability is False
disabled_course = CourseFactory()
CourseModeFactory.create(course_id=disabled_course.id, mode_slug='verified') # lint-amnesty, pylint: disable=no-member
disabled_course_overview = CourseOverview.get_from_id(disabled_course.id) # lint-amnesty, pylint: disable=no-member
DiscountRestrictionConfig.objects.create(disabled=True, course=disabled_course_overview)
applicability = can_receive_discount(user=self.user, course=disabled_course)
assert applicability is False
@ddt.data(*(
[[]] +
[[mode] for mode in CourseMode.ALL_MODES] +
[
[mode1, mode2]
for mode1 in CourseMode.ALL_MODES
for mode2 in CourseMode.ALL_MODES
if mode1 != mode2
]
))
@override_waffle_flag(DISCOUNT_APPLICABILITY_FLAG, active=True)
def test_can_receive_discount_previous_verified_enrollment(self, existing_enrollments):
"""
Ensure that only users who have not already purchased courses receive the discount.
"""
CourseEnrollmentFactory(
is_active=True,
course_id=self.course.id,
user=self.user
)
for mode in existing_enrollments:
CourseEnrollmentFactory.create(mode=mode, user=self.user)
applicability = can_receive_discount(user=self.user, course=self.course)
assert applicability == all(mode in CourseMode.UPSELL_TO_VERIFIED_MODES for mode in existing_enrollments)
@ddt.data(
None,
CourseMode.VERIFIED,
CourseMode.PROFESSIONAL,
)
@override_waffle_flag(DISCOUNT_APPLICABILITY_FLAG, active=True)
def test_can_receive_discount_entitlement(self, entitlement_mode):
"""
Ensure that only users who have not already purchased courses receive the discount.
"""
CourseEnrollmentFactory(
is_active=True,
course_id=self.course.id,
user=self.user
)
if entitlement_mode is not None:
CourseEntitlementFactory.create(mode=entitlement_mode, user=self.user)
applicability = can_receive_discount(user=self.user, course=self.course)
assert applicability == (entitlement_mode is None)
@override_waffle_flag(DISCOUNT_APPLICABILITY_FLAG, active=True)
def test_can_receive_discount_false_enterprise(self):
"""
Ensure that enterprise users do not receive the discount.
"""
enterprise_customer = EnterpriseCustomer.objects.create(
name='Test EnterpriseCustomer',
site=self.site
)
EnterpriseCustomerUser.objects.create(
user_id=self.user.id,
enterprise_customer=enterprise_customer
)
applicability = can_receive_discount(user=self.user, course=self.course)
assert applicability is False
@override_waffle_flag(DISCOUNT_APPLICABILITY_FLAG, active=True)
def test_holdback_denies_discount(self):
"""
Ensure that users in the holdback do not receive the discount.
"""
self.mock_holdback.return_value = True
applicability = can_receive_discount(user=self.user, course=self.course)
assert not applicability
@ddt.data(
(0, True),
(1, False),
)
@ddt.unpack
@pytest.mark.skip(reason="fix under work by revenue team")
def test_holdback_group_ids(self, group_number, in_holdback):
with patch('openedx.features.discounts.applicability.stable_bucketing_hash_group', return_value=group_number):
assert _is_in_holdback_and_bucket(self.user) == in_holdback
@pytest.mark.skip(reason="fix under work by revenue team")
def test_holdback_expiry(self):
with patch('openedx.features.discounts.applicability.stable_bucketing_hash_group', return_value=0):
with patch(
'openedx.features.discounts.applicability.datetime',
Mock(now=Mock(return_value=datetime(2020, 8, 1, 0, 1, tzinfo=pytz.UTC)), wraps=datetime),
):
assert not _is_in_holdback_and_bucket(self.user)
|
stvstnfrd/edx-platform
|
openedx/features/discounts/tests/test_applicability.py
|
Python
|
agpl-3.0
| 7,633
|
from flask_appbuilder.security.sqla.models import User
from sqlalchemy import Column, String
class MyUser(User):
extra = Column(String(256))
|
rpiotti/Flask-AppBuilder
|
examples/quickhowto2/app/sec_models.py
|
Python
|
bsd-3-clause
| 147
|
"""SCons.Tool.packaging.msi
The msi packager.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/msi.py 2014/07/05 09:42:21 garyo"
import os
import SCons
from SCons.Action import Action
from SCons.Builder import Builder
from xml.dom.minidom import *
from xml.sax.saxutils import escape
from SCons.Tool.packaging import stripinstallbuilder
#
# Utility functions
#
def convert_to_id(s, id_set):
""" Some parts of .wxs need an Id attribute (for example: The File and
Directory directives. The charset is limited to A-Z, a-z, digits,
underscores, periods. Each Id must begin with a letter or with a
underscore. Google for "CNDL0015" for information about this.
Requirements:
* the string created must only contain chars from the target charset.
* the string created must have a minimal editing distance from the
original string.
* the string created must be unique for the whole .wxs file.
Observation:
* There are 62 chars in the charset.
Idea:
* filter out forbidden characters. Check for a collision with the help
of the id_set. Add the number of the number of the collision at the
end of the created string. Furthermore care for a correct start of
the string.
"""
charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.'
if s[0] in '0123456789.':
s += '_'+s
id = [c for c in s if c in charset]
# did we already generate an id for this file?
try:
return id_set[id][s]
except KeyError:
# no we did not so initialize with the id
if id not in id_set: id_set[id] = { s : id }
# there is a collision, generate an id which is unique by appending
# the collision number
else: id_set[id][s] = id + str(len(id_set[id]))
return id_set[id][s]
def is_dos_short_file_name(file):
""" examine if the given file is in the 8.3 form.
"""
fname, ext = os.path.splitext(file)
proper_ext = len(ext) == 0 or (2 <= len(ext) <= 4) # the ext contains the dot
proper_fname = file.isupper() and len(fname) <= 8
return proper_ext and proper_fname
def gen_dos_short_file_name(file, filename_set):
""" see http://support.microsoft.com/default.aspx?scid=kb;en-us;Q142982
These are no complete 8.3 dos short names. The ~ char is missing and
replaced with one character from the filename. WiX warns about such
filenames, since a collision might occur. Google for "CNDL1014" for
more information.
"""
# guard this to not confuse the generation
if is_dos_short_file_name(file):
return file
fname, ext = os.path.splitext(file) # ext contains the dot
# first try if it suffices to convert to upper
file = file.upper()
if is_dos_short_file_name(file):
return file
# strip forbidden characters.
forbidden = '."/[]:;=, '
fname = [c for c in fname if c not in forbidden]
# check if we already generated a filename with the same number:
# thisis1.txt, thisis2.txt etc.
duplicate, num = not None, 1
while duplicate:
shortname = "%s%s" % (fname[:8-len(str(num))].upper(),\
str(num))
if len(ext) >= 2:
shortname = "%s%s" % (shortname, ext[:4].upper())
duplicate, num = shortname in filename_set, num+1
assert( is_dos_short_file_name(shortname) ), 'shortname is %s, longname is %s' % (shortname, file)
filename_set.append(shortname)
return shortname
def create_feature_dict(files):
""" X_MSI_FEATURE and doc FileTag's can be used to collect files in a
hierarchy. This function collects the files into this hierarchy.
"""
dict = {}
def add_to_dict( feature, file ):
if not SCons.Util.is_List( feature ):
feature = [ feature ]
for f in feature:
if f not in dict:
dict[ f ] = [ file ]
else:
dict[ f ].append( file )
for file in files:
if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ):
add_to_dict(file.PACKAGING_X_MSI_FEATURE, file)
elif hasattr( file, 'PACKAGING_DOC' ):
add_to_dict( 'PACKAGING_DOC', file )
else:
add_to_dict( 'default', file )
return dict
def generate_guids(root):
""" generates globally unique identifiers for parts of the xml which need
them.
Component tags have a special requirement. Their UUID is only allowed to
change if the list of their contained resources has changed. This allows
for clean removal and proper updates.
To handle this requirement, the uuid is generated with an md5 hashing the
whole subtree of a xml node.
"""
from hashlib import md5
# specify which tags need a guid and in which attribute this should be stored.
needs_id = { 'Product' : 'Id',
'Package' : 'Id',
'Component' : 'Guid',
}
# find all XMl nodes matching the key, retrieve their attribute, hash their
# subtree, convert hash to string and add as a attribute to the xml node.
for (key,value) in needs_id.items():
node_list = root.getElementsByTagName(key)
attribute = value
for node in node_list:
hash = md5(node.toxml()).hexdigest()
hash_str = '%s-%s-%s-%s-%s' % ( hash[:8], hash[8:12], hash[12:16], hash[16:20], hash[20:] )
node.attributes[attribute] = hash_str
def string_wxsfile(target, source, env):
return "building WiX file %s"%( target[0].path )
def build_wxsfile(target, source, env):
""" compiles a .wxs file from the keywords given in env['msi_spec'] and
by analyzing the tree of source nodes and their tags.
"""
file = open(target[0].abspath, 'w')
try:
# Create a document with the Wix root tag
doc = Document()
root = doc.createElement( 'Wix' )
root.attributes['xmlns']='http://schemas.microsoft.com/wix/2003/01/wi'
doc.appendChild( root )
filename_set = [] # this is to circumvent duplicates in the shortnames
id_set = {} # this is to circumvent duplicates in the ids
# Create the content
build_wxsfile_header_section(root, env)
build_wxsfile_file_section(root, source, env['NAME'], env['VERSION'], env['VENDOR'], filename_set, id_set)
generate_guids(root)
build_wxsfile_features_section(root, source, env['NAME'], env['VERSION'], env['SUMMARY'], id_set)
build_wxsfile_default_gui(root)
build_license_file(target[0].get_dir(), env)
# write the xml to a file
file.write( doc.toprettyxml() )
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for MSI is missing.' % e.args[0] )
#
# setup function
#
def create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set):
""" Create the wix default target directory layout and return the innermost
directory.
We assume that the XML tree delivered in the root argument already contains
the Product tag.
Everything is put under the PFiles directory property defined by WiX.
After that a directory with the 'VENDOR' tag is placed and then a
directory with the name of the project and its VERSION. This leads to the
following TARGET Directory Layout:
C:\<PFiles>\<Vendor>\<Projectname-Version>\
Example: C:\Programme\Company\Product-1.2\
"""
doc = Document()
d1 = doc.createElement( 'Directory' )
d1.attributes['Id'] = 'TARGETDIR'
d1.attributes['Name'] = 'SourceDir'
d2 = doc.createElement( 'Directory' )
d2.attributes['Id'] = 'ProgramFilesFolder'
d2.attributes['Name'] = 'PFiles'
d3 = doc.createElement( 'Directory' )
d3.attributes['Id'] = 'VENDOR_folder'
d3.attributes['Name'] = escape( gen_dos_short_file_name( VENDOR, filename_set ) )
d3.attributes['LongName'] = escape( VENDOR )
d4 = doc.createElement( 'Directory' )
project_folder = "%s-%s" % ( NAME, VERSION )
d4.attributes['Id'] = 'MY_DEFAULT_FOLDER'
d4.attributes['Name'] = escape( gen_dos_short_file_name( project_folder, filename_set ) )
d4.attributes['LongName'] = escape( project_folder )
d1.childNodes.append( d2 )
d2.childNodes.append( d3 )
d3.childNodes.append( d4 )
root.getElementsByTagName('Product')[0].childNodes.append( d1 )
return d4
#
# mandatory and optional file tags
#
def build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set, id_set):
""" builds the Component sections of the wxs file with their included files.
Files need to be specified in 8.3 format and in the long name format, long
filenames will be converted automatically.
Features are specficied with the 'X_MSI_FEATURE' or 'DOC' FileTag.
"""
root = create_default_directory_layout( root, NAME, VERSION, VENDOR, filename_set )
components = create_feature_dict( files )
factory = Document()
def get_directory( node, dir ):
""" returns the node under the given node representing the directory.
Returns the component node if dir is None or empty.
"""
if dir == '' or not dir:
return node
Directory = node
dir_parts = dir.split(os.path.sep)
# to make sure that our directory ids are unique, the parent folders are
# consecutively added to upper_dir
upper_dir = ''
# walk down the xml tree finding parts of the directory
dir_parts = [d for d in dir_parts if d != '']
for d in dir_parts[:]:
already_created = [c for c in Directory.childNodes
if c.nodeName == 'Directory'
and c.attributes['LongName'].value == escape(d)]
if already_created != []:
Directory = already_created[0]
dir_parts.remove(d)
upper_dir += d
else:
break
for d in dir_parts:
nDirectory = factory.createElement( 'Directory' )
nDirectory.attributes['LongName'] = escape( d )
nDirectory.attributes['Name'] = escape( gen_dos_short_file_name( d, filename_set ) )
upper_dir += d
nDirectory.attributes['Id'] = convert_to_id( upper_dir, id_set )
Directory.childNodes.append( nDirectory )
Directory = nDirectory
return Directory
for file in files:
drive, path = os.path.splitdrive( file.PACKAGING_INSTALL_LOCATION )
filename = os.path.basename( path )
dirname = os.path.dirname( path )
h = {
# tagname : default value
'PACKAGING_X_MSI_VITAL' : 'yes',
'PACKAGING_X_MSI_FILEID' : convert_to_id(filename, id_set),
'PACKAGING_X_MSI_LONGNAME' : filename,
'PACKAGING_X_MSI_SHORTNAME' : gen_dos_short_file_name(filename, filename_set),
'PACKAGING_X_MSI_SOURCE' : file.get_path(),
}
# fill in the default tags given above.
for k,v in [ (k, v) for (k,v) in h.items() if not hasattr(file, k) ]:
setattr( file, k, v )
File = factory.createElement( 'File' )
File.attributes['LongName'] = escape( file.PACKAGING_X_MSI_LONGNAME )
File.attributes['Name'] = escape( file.PACKAGING_X_MSI_SHORTNAME )
File.attributes['Source'] = escape( file.PACKAGING_X_MSI_SOURCE )
File.attributes['Id'] = escape( file.PACKAGING_X_MSI_FILEID )
File.attributes['Vital'] = escape( file.PACKAGING_X_MSI_VITAL )
# create the <Component> Tag under which this file should appear
Component = factory.createElement('Component')
Component.attributes['DiskId'] = '1'
Component.attributes['Id'] = convert_to_id( filename, id_set )
# hang the component node under the root node and the file node
# under the component node.
Directory = get_directory( root, dirname )
Directory.childNodes.append( Component )
Component.childNodes.append( File )
#
# additional functions
#
def build_wxsfile_features_section(root, files, NAME, VERSION, SUMMARY, id_set):
""" This function creates the <features> tag based on the supplied xml tree.
This is achieved by finding all <component>s and adding them to a default target.
It should be called after the tree has been built completly. We assume
that a MY_DEFAULT_FOLDER Property is defined in the wxs file tree.
Furthermore a top-level with the name and VERSION of the software will be created.
An PACKAGING_X_MSI_FEATURE can either be a string, where the feature
DESCRIPTION will be the same as its title or a Tuple, where the first
part will be its title and the second its DESCRIPTION.
"""
factory = Document()
Feature = factory.createElement('Feature')
Feature.attributes['Id'] = 'complete'
Feature.attributes['ConfigurableDirectory'] = 'MY_DEFAULT_FOLDER'
Feature.attributes['Level'] = '1'
Feature.attributes['Title'] = escape( '%s %s' % (NAME, VERSION) )
Feature.attributes['Description'] = escape( SUMMARY )
Feature.attributes['Display'] = 'expand'
for (feature, files) in create_feature_dict(files).items():
SubFeature = factory.createElement('Feature')
SubFeature.attributes['Level'] = '1'
if SCons.Util.is_Tuple(feature):
SubFeature.attributes['Id'] = convert_to_id( feature[0], id_set )
SubFeature.attributes['Title'] = escape(feature[0])
SubFeature.attributes['Description'] = escape(feature[1])
else:
SubFeature.attributes['Id'] = convert_to_id( feature, id_set )
if feature=='default':
SubFeature.attributes['Description'] = 'Main Part'
SubFeature.attributes['Title'] = 'Main Part'
elif feature=='PACKAGING_DOC':
SubFeature.attributes['Description'] = 'Documentation'
SubFeature.attributes['Title'] = 'Documentation'
else:
SubFeature.attributes['Description'] = escape(feature)
SubFeature.attributes['Title'] = escape(feature)
# build the componentrefs. As one of the design decision is that every
# file is also a component we walk the list of files and create a
# reference.
for f in files:
ComponentRef = factory.createElement('ComponentRef')
ComponentRef.attributes['Id'] = convert_to_id( os.path.basename(f.get_path()), id_set )
SubFeature.childNodes.append(ComponentRef)
Feature.childNodes.append(SubFeature)
root.getElementsByTagName('Product')[0].childNodes.append(Feature)
def build_wxsfile_default_gui(root):
""" this function adds a default GUI to the wxs file
"""
factory = Document()
Product = root.getElementsByTagName('Product')[0]
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_Mondo'
Product.childNodes.append(UIRef)
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_ErrorProgressText'
Product.childNodes.append(UIRef)
def build_license_file(directory, spec):
""" creates a License.rtf file with the content of "X_MSI_LICENSE_TEXT"
in the given directory
"""
name, text = '', ''
try:
name = spec['LICENSE']
text = spec['X_MSI_LICENSE_TEXT']
except KeyError:
pass # ignore this as X_MSI_LICENSE_TEXT is optional
if name!='' or text!='':
file = open( os.path.join(directory.get_path(), 'License.rtf'), 'w' )
file.write('{\\rtf')
if text!='':
file.write(text.replace('\n', '\\par '))
else:
file.write(name+'\\par\\par')
file.write('}')
file.close()
#
# mandatory and optional package tags
#
def build_wxsfile_header_section(root, spec):
""" Adds the xml file node which define the package meta-data.
"""
# Create the needed DOM nodes and add them at the correct position in the tree.
factory = Document()
Product = factory.createElement( 'Product' )
Package = factory.createElement( 'Package' )
root.childNodes.append( Product )
Product.childNodes.append( Package )
# set "mandatory" default values
if 'X_MSI_LANGUAGE' not in spec:
spec['X_MSI_LANGUAGE'] = '1033' # select english
# mandatory sections, will throw a KeyError if the tag is not available
Product.attributes['Name'] = escape( spec['NAME'] )
Product.attributes['Version'] = escape( spec['VERSION'] )
Product.attributes['Manufacturer'] = escape( spec['VENDOR'] )
Product.attributes['Language'] = escape( spec['X_MSI_LANGUAGE'] )
Package.attributes['Description'] = escape( spec['SUMMARY'] )
# now the optional tags, for which we avoid the KeyErrror exception
if 'DESCRIPTION' in spec:
Package.attributes['Comments'] = escape( spec['DESCRIPTION'] )
if 'X_MSI_UPGRADE_CODE' in spec:
Package.attributes['X_MSI_UPGRADE_CODE'] = escape( spec['X_MSI_UPGRADE_CODE'] )
# We hardcode the media tag as our current model cannot handle it.
Media = factory.createElement('Media')
Media.attributes['Id'] = '1'
Media.attributes['Cabinet'] = 'default.cab'
Media.attributes['EmbedCab'] = 'yes'
root.getElementsByTagName('Product')[0].childNodes.append(Media)
# this builder is the entry-point for .wxs file compiler.
wxs_builder = Builder(
action = Action( build_wxsfile, string_wxsfile ),
ensure_suffix = '.wxs' )
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
DESCRIPTION, SUMMARY, VENDOR, X_MSI_LANGUAGE, **kw):
# make sure that the Wix Builder is in the environment
SCons.Tool.Tool('wix').generate(env)
# get put the keywords for the specfile compiler. These are the arguments
# given to the package function and all optional ones stored in kw, minus
# the the source, target and env one.
loc = locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# strip the install builder from the source files
target, source = stripinstallbuilder(target, source, env)
# put the arguments into the env and call the specfile builder.
env['msi_spec'] = kw
specfile = wxs_builder(* [env, target, source], **kw)
# now call the WiX Tool with the built specfile added as a source.
msifile = env.WiX(target, specfile)
# return the target and source tuple.
return (msifile, source+[specfile])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
dezelin/scons
|
scons-local/SCons/Tool/packaging/msi.py
|
Python
|
mit
| 20,208
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Builds datasets for edge-supervised Python AST tasks.
This module is designed for tasks where we are given a dataset of ASTs, and a
set of ordered pairs of AST nodes representing the target directed edges. For
instance, the target edges may be syntactic traversals (where does this return
statement return from?) or dataflow analysis (where does this variable get
written to?).
Given a task of that form, this module handles encoding that graph into NDArrays
so that we can train an automaton layer to produce those directed edges as its
marginal distribution.
"""
from typing import List, Tuple, TypeVar, Optional
import dataclasses
import numpy as np
from gfsa import automaton_builder
from gfsa import graph_types
from gfsa import jax_util
from gfsa import sparse_operator
T = TypeVar("T")
@jax_util.register_dataclass_pytree
@dataclasses.dataclass
class GraphBundle:
"""A (possibly padded or batched) combination of automaton and regular graphs.
May or may not be padded and/or batched. When padded, we require that
everything is padded with zeros (for dense things, zero values; for sparse
things, both zero indices and zero values).
Attributes:
automaton_graph: The graph as encoded for an automaton.
graph_metadata: Metadata about the true size of this graph.
node_types: <int[num_nodes]> specifying the node type of each node.
edges: Sparse operator mapping from an array of per-edge-type values to a
[num_nodes, num_nodes] adjacency matrix.
"""
automaton_graph: automaton_builder.EncodedGraph
graph_metadata: automaton_builder.EncodedGraphMetadata
node_types: jax_util.NDArray
edges: sparse_operator.SparseCoordOperator
def convert_graph_with_edges(
graph,
edges,
builder,
):
"""Convert a graph with edges into an GraphBundle.
The order of nodes in the returned example is guaranteed to match the
order of the keys in `graph`.
Args:
graph: Graph to encode.
edges: List of (source, dest, edge_type) pairs to add to the non-automaton
graph representation (i.e. GNN edges or targets).
builder: Builder to use to convert the graph.
Returns:
Encoded example.
"""
# Encode the graph.
encoded_graph, encoded_metadata = builder.encode_graph(graph, as_jax=False)
# Look up node types.
node_types = []
for node_info in graph.values():
node_types.append(builder.node_type_to_index[node_info.node_type])
node_types = np.array(node_types, dtype=np.int32)
# Build the indices for the edges.
if edges:
src_dest_pairs = []
edge_types = []
id_to_index_map = {node_id: i for i, node_id in enumerate(graph)}
for src_id, dest_id, edge_type in edges:
src_idx = id_to_index_map[src_id]
dest_idx = id_to_index_map[dest_id]
src_dest_pairs.append((src_idx, dest_idx))
edge_types.append(edge_type)
edge_operator = sparse_operator.SparseCoordOperator(
input_indices=np.array(edge_types, dtype=np.int32).reshape([-1, 1]),
output_indices=np.array(src_dest_pairs, dtype=np.int32),
values=np.ones([len(edges)], dtype=np.int32),
)
else:
# Handle case where there are no edges.
edge_operator = sparse_operator.SparseCoordOperator(
input_indices=np.empty([0, 1], dtype=np.int32),
output_indices=np.empty([0, 2], dtype=np.int32),
values=np.empty([0], dtype=np.int32),
)
return GraphBundle(
automaton_graph=encoded_graph,
graph_metadata=encoded_metadata,
node_types=node_types,
edges=edge_operator)
@jax_util.register_dataclass_pytree
@dataclasses.dataclass
class PaddingConfig:
"""Configuration specifying how examples get padded to a constant shape.
Attributes:
static_max_metadata: EncodedGraphMetadata for the padded graph size,
specifying the maximum number of nodes (input tagged and untagged) that
can appear. The transition matrix will be padded to these sizes in order
to batch together multiple graph solves.
max_initial_transitions: Maximum number of transitions from initial states
to in-tagged states; equivalently, the maximum number of nonzero entries
in initial_to_in_tagged.
max_in_tagged_transitions: Maximum number of transitions from in-tagged
states to in-tagged states; equivalently, the maximum number of nonzero
entries in in_tagged_to_in_tagged.
max_edges: Maximum number of edges in the graph.
"""
static_max_metadata: automaton_builder.EncodedGraphMetadata
max_initial_transitions: int
max_in_tagged_transitions: int
max_edges: int
def pad_example(example,
config,
allow_failure = False):
"""Pad an example so that it has a static shape determined by the config.
The shapes of all NDArrays in the output will be fully determined by the
config. Note that we do not pad the metadata or num_targets fields, since
those are already of static shape; the values in those fields can be used
to determine which elements of the other fields are padding and which elements
are not.
Args:
example: The example to pad.
config: Configuration specifying the desired padding size.
allow_failure: If True, returns None instead of failing if example is too
large.
Returns:
A padded example with static shape.
Raises:
ValueError: If the graph is too big to pad to this size.
"""
# Check the size of the example.
if example.graph_metadata.num_nodes > config.static_max_metadata.num_nodes:
if allow_failure:
return None
raise ValueError("Example has too many nodes")
if (example.graph_metadata.num_input_tagged_nodes >
config.static_max_metadata.num_input_tagged_nodes):
if allow_failure:
return None
raise ValueError("Example has too many input-tagged nodes")
if (example.automaton_graph.initial_to_in_tagged.values.shape[0] >
config.max_initial_transitions):
if allow_failure:
return None
raise ValueError("Example has too many initial transitions")
if (example.automaton_graph.in_tagged_to_in_tagged.values.shape[0] >
config.max_in_tagged_transitions):
if allow_failure:
return None
raise ValueError("Example has too many in-tagged transitions")
if example.edges.values.shape[0] > config.max_edges:
if allow_failure:
return None
raise ValueError("Example has too many edges")
# Pad it out.
return GraphBundle(
automaton_graph=automaton_builder.EncodedGraph(
initial_to_in_tagged=example.automaton_graph.initial_to_in_tagged
.pad_nonzeros(config.max_initial_transitions),
initial_to_special=jax_util.pad_to(
example.automaton_graph.initial_to_special,
config.static_max_metadata.num_nodes),
in_tagged_to_in_tagged=(
example.automaton_graph.in_tagged_to_in_tagged.pad_nonzeros(
config.max_in_tagged_transitions)),
in_tagged_to_special=jax_util.pad_to(
example.automaton_graph.in_tagged_to_special,
config.static_max_metadata.num_input_tagged_nodes),
in_tagged_node_indices=jax_util.pad_to(
example.automaton_graph.in_tagged_node_indices,
config.static_max_metadata.num_input_tagged_nodes),
),
graph_metadata=example.graph_metadata,
node_types=jax_util.pad_to(example.node_types,
config.static_max_metadata.num_nodes),
edges=example.edges.pad_nonzeros(config.max_edges),
)
def zeros_like_padded_example(config):
"""Build an GraphBundle containing only zeros.
This can be useful to initialize model parameters, or do tests.
Args:
config: Configuration specifying the desired padding size.
Returns:
An "example" filled with zeros of the given size.
"""
return GraphBundle(
automaton_graph=automaton_builder.EncodedGraph(
initial_to_in_tagged=sparse_operator.SparseCoordOperator(
input_indices=np.zeros(
shape=(config.max_initial_transitions, 1), dtype=np.int32),
output_indices=np.zeros(
shape=(config.max_initial_transitions, 2), dtype=np.int32),
values=np.zeros(
shape=(config.max_initial_transitions,), dtype=np.float32),
),
initial_to_special=np.zeros(
shape=(config.static_max_metadata.num_nodes,), dtype=np.int32),
in_tagged_to_in_tagged=sparse_operator.SparseCoordOperator(
input_indices=np.zeros(
shape=(config.max_in_tagged_transitions, 1), dtype=np.int32),
output_indices=np.zeros(
shape=(config.max_in_tagged_transitions, 2), dtype=np.int32),
values=np.zeros(
shape=(config.max_in_tagged_transitions,), dtype=np.float32),
),
in_tagged_to_special=np.zeros(
shape=(config.static_max_metadata.num_input_tagged_nodes,),
dtype=np.int32),
in_tagged_node_indices=np.zeros(
shape=(config.static_max_metadata.num_input_tagged_nodes,),
dtype=np.int32),
),
graph_metadata=automaton_builder.EncodedGraphMetadata(
num_nodes=0, num_input_tagged_nodes=0),
node_types=np.zeros(
shape=(config.static_max_metadata.num_nodes,), dtype=np.int32),
edges=sparse_operator.SparseCoordOperator(
input_indices=np.zeros(shape=(config.max_edges, 1), dtype=np.int32),
output_indices=np.zeros(shape=(config.max_edges, 2), dtype=np.int32),
values=np.zeros(shape=(config.max_edges,), dtype=np.int32),
),
)
|
google-research/google-research
|
gfsa/datasets/graph_bundle.py
|
Python
|
apache-2.0
| 10,309
|
#!/usr/bin/env python
'''
Copyright (c) 2013-2014, Magnus Skjegstad (magnus@skjegstad.com) / FFI
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
from subprocess import Popen, PIPE
from iwtool import IW
from ucitool import UCI
from wifihelpers import WifiHelpers
from minstrel import Minstrel
from executor import Executor
class Radio(object):
""" Wrapper class for all radio object. Contains UCI and IW helper objects. """
def __init__(self, devid=0, sshclient=None):
""" Initiate class and sub-classes. Dev id is the device id counting from zero, e.g. wlan0 becomes 0. Sshclient is an optional paramiko.SSHClient() object used to execute commands. If sshclient==None, commands are execute """
self.executor = Executor(sshclient)
phy_dev = "radio" + str(devid)
wlan_dev = "wlan" + str(devid)
self.uci = UCI(devicename=phy_dev, executor=self.executor)
self.minstrel_path= "/sys/kernel/debug/ieee80211/phy%s/netdev:wlan%s/stations/" % (devid, devid)
self.deviceid = int(devid)
self.iw = IW(devicename=wlan_dev, executor=self.executor)
self.minstrel = Minstrel(minstrel_path=self.minstrel_path, executor=self.executor)
def reconfigure(self, new_channel=None, new_tx_power=None):
""" Reconfigure the radio (channel, tx power) and restart the wifi driver. This takes the AP down for a few seconds. Parameters set to None are left unchanged. """
if new_tx_power != None:
self.uci.set_tx_power(new_tx_power)
if new_channel != None:
self.uci.set_channel(new_channel)
self.uci.commit_config()
self.uci.reload_wifi_config()
def get_summary(self):
""" Summarize radio settings (channel, tx_power, ssid etc) """
res = {}
res['channel'] = self.uci.get_channel() # channel
res['tx_power'] = self.minstrel.get_tx_power() # transmission power
res['type'] = self.uci.get_operation_mode() # operation mode, e.g.
res['mode'] = self.uci.get_wifi_mode() # wifi mode, e.g. 11ag
info = self.iw.get_info()
res['addr'] = info['addr'] # mac addr
res['info'] = info['info'] # information string
res['interface'] = info['interface'] # wifi interface, e.g. wlan0
res['phy'] = info['wiphy']
res['ssid'] = self.uci.get_wifi_ssid()
return res
def get_stations(self):
""" Get info about users (stations) connected to the AP """
stations = self.iw.get_stations()
result = []
minstrel = self.minstrel.get_frame_error_rates()
for station in stations:
result.append({
'addr': station['addr'],
'tx_rate': WifiHelpers.get_numbers_from_str(station['tx bitrate'])[0],
'rx_rate': WifiHelpers.get_numbers_from_str(station['rx bitrate'])[0],
'signal': WifiHelpers.get_numbers_from_str(station['signal'])[0],
'signal_avg': WifiHelpers.get_numbers_from_str(station['signal avg'])[0],
'fer': minstrel[station['addr']]['current'],
'fer_avg' : minstrel[station['addr']]['avg']})
return result
def get_fer(self):
""" Get frame error rate for all stations """
return self.minstrel.get_frame_error_rates()
|
MagnusS/pyradac
|
src/pyradac/openwrttoolbox.py
|
Python
|
bsd-2-clause
| 4,561
|
import fileinput
def str_to_int(s):
return([ int(x) for x in s.split() ])
# args = [ 'line 1', 'line 2', ... ]
def proc_input(args):
f = str_to_int(args[1])
s = str_to_int(args[3])
return (f, s)
def solve(args, verbose=False):
(f, s) = proc_input(args)
post = [ ('f', x) for x in f ] + [ ('s', x) for x in s ]
# sort by shot distance
post.sort(key=lambda x: x[1])
n_f = n_s = 0
max_a = 3 * len(f)
res_b = 3 * len(s)
max_diff = max_a - res_b
last = None
for x in xrange(len(post)):
t = post[x]
if t[0] == 'f':
n_f += 1
else:
n_s += 1
if t != last:
a = 2 * n_f + (3 * (len(f) - n_f))
b = 2 * n_s + (3 * (len(s) - n_s))
diff = a - b
if diff > max_diff:
max_a = a
res_b = b
max_diff = diff
elif diff == max_diff:
if a > max_a:
max_a = a
res_b = b
last = t
if verbose:
print '%s:%s' % (max_a, res_b)
return (max_a, res_b)
def test():
assert(str_to_int('1 2 3') == [ 1, 2, 3 ])
assert(solve([ '3', '1 2 3', '2', '5 6' ]) == (9, 6))
assert(solve([ '5', '6 7 8 9 10', '5', '1 2 3 4 5' ]) == (15, 10))
if __name__ == '__main__':
from sys import argv
if argv.pop() == 'test':
test()
else:
solve(list(fileinput.input()), verbose=True)
|
cripplet/practice
|
codeforces/493/attempt/c_basketball.py
|
Python
|
mit
| 1,215
|
# -*- coding: utf-8 -*-
__version__ = '0.1.0'
__version_info__ = tuple([int(num) if num.isdigit() else num for num in __version__.replace('-', '.', 1).split('.')])
default_app_config = 'core.apps.CoreConfig'
|
jordij/menorkayak
|
core/__init__.py
|
Python
|
mit
| 209
|
#!/usr/bin/env python3
'''
This script is used if you want to filter a GFF3 file by IDs but also maintain any relationships
defined within the file. That is, if you have a list of mRNA identifiers and want to keep them,
but also wanted to keep any associated exons, CDS, etc.
Because common GFF3 usage doesn't guarantee that features are defined before they are referenced
as a parent, this creates a graph of objects for all features on a molecule first, then iterates
through to filter them. It doesn't matter on what feature level you pass IDs, the entire graph
of vertical relatives for that feature will be retained.
What happens to everything else? Obviously all the other feature graphs are ommitted, but currently
so are any associated comments, sequence data, etc.
Follow the GFF3 specification!
Author: Joshua Orvis
'''
import argparse
import os
import biocodegff
from operator import itemgetter
def main():
parser = argparse.ArgumentParser( description='Filters the features of a GFF3 file by IDs while keeping related features.')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to the input GFF3' )
parser.add_argument('-o', '--output_file', type=str, required=True, help='Path to an output file to be created' )
parser.add_argument('-l', '--list_file', type=str, required=True, help='A file with one identifier per line')
args = parser.parse_args()
molgraph = biocodegff.parse_gff3_by_relationship( args.input_file )
id_list = parse_id_list( args.list_file )
fout = open(args.output_file, mode='wt', encoding='utf-8')
for mol_id in molgraph:
mol = molgraph[mol_id]
for (feat_id, feat) in sorted(mol.items(), key=lambda mol: int(mol[1]['fmin'])):
## used to filter whether this one is exported
buffer = list()
keep = False
buffer.append( feat['cols'] )
if feat_id in id_list:
keep = True
id_list[feat_id] = True
for child in feat['children']:
buffer.append( child['cols'] )
if child['id'] is not None and child['id'] in id_list:
keep = True
id_list[ child['id'] ] = True
if keep:
for cols in buffer:
row = '\t'.join(cols)
fout.write( row )
fout.write( '\n' )
def parse_id_list(file):
ids = dict()
for line in open(file):
## value initialized to False, later set True if the script actually finds it
ids[line.rstrip()] = False
return ids
if __name__ == '__main__':
main()
|
jonathancrabtree/biocode
|
gff/filter_gff3_by_id_list.py
|
Python
|
gpl-3.0
| 2,752
|
#!/usr/bin/env python
import re
data = re.compile(r'^([a-z]+_[a-z]+_[0-9]{3}): (.*) *$')
comment = re.compile(r'^#')
variable = re.compile(r'\$([a-z_]+)\$')
voted = re.compile(r'.*\$vote_tally\$.*')
passed = re.compile(r'.*(?!not )(passed|adopted).*\$vote_tally\$.*', re.IGNORECASE)
failed = re.compile(r'.*(failed|not adopted|not passed).*\$vote_tally\$.*', re.IGNORECASE)
voted_codes = []
passed_codes = []
failed_codes = []
numbers = []
with open('action_codes') as action_codes:
action_codes_str = action_codes.read()
for line in action_codes_str.split('\n'):
if comment.match(line):
continue
if data.match(line):
match = data.match(line)
number = match.group(1)
numbers.append(number)
if voted.match(match.group(2)):
voted_codes.append(number)
if passed.match(match.group(2)):
passed_codes.append(number)
if failed.match(match.group(2)):
failed_codes.append(number)
print("voted = %s" % voted_codes)
print("passed = %s" % passed_codes)
print("failed = %s" % failed_codes)
|
votervoice/openstates
|
openstates/ks/action_codes_scrape.py
|
Python
|
gpl-3.0
| 1,139
|
"""
Convenience script triggering the P2.5 -> P3.0 migrations
XXX - NOTE - XXX
This is meant to migrate a P2.5 based Opencore site database to one
that will work on a P3.0 or better Opencore instance. THE 'unmake-sites'
SCRIPT MUST BE RUN BEFORE THIS SCRIPT, OR THIS SCRIPT WILL FAIL.
Sometimes even the 'unmake-sites' script will fail, in which case you'll
need to follow the directions in that script's doc string to get things
going.
"""
from AccessControl.SecurityManagement import newSecurityManager
from AccessControl.SpecialUsers import system
from Products.CMFCore.utils import getToolByName
import sys
import transaction
newSecurityManager(None, system)
try:
portal = sys.argv[1]
except IndexError:
portal = 'openplans'
from Testing.makerequest import makerequest
app=makerequest(app)
portal = app._getOb(portal)
def run_plone_migrations(context):
"""
Runs the migrations that are registered w/ Plone's
portal_migrations tool.
"""
migtool = getToolByName(context, 'portal_migration')
if not migtool.needUpgrading():
return
inst_version = migtool.getInstanceVersion()
if 'svn' in inst_version:
# it's an unreleased version, bump down the version number and
# use forced upgrade
inst_version = inst_version.split()[0]
from Products.CMFPlone.MigrationTool import _upgradePaths
for vfrom, vto in _upgradePaths.items():
if vto[0] == inst_version:
inst_version = vfrom
break
req = context.REQUEST
req.environ['REQUEST_METHOD'] = 'POST'
req.form = {'force_instance_version': inst_version}
req.force_instance_version = inst_version
result = migtool.upgrade(REQUEST=req)
else:
result = migtool.upgrade()
if not migtool.needUpgrading():
transaction.get().note('Plone migrations run')
transaction.commit()
else:
raise RuntimeError, "Plone migrations failed"
def run_opencore_upgrades(context):
"""
Runs any upgrades that have been registered with the portal_setup
tool for the opencore default profile.
"""
profile_id = 'opencore.configuration:default'
setuptool = getToolByName(context, 'portal_setup')
steps = setuptool.listUpgrades(profile_id)
if steps:
steps = steps[0]
step_ids = [step['id'] for step in steps]
req = context.REQUEST
req.form = {'profile_id': profile_id,
'upgrades': step_ids,
'show_old': 1}
setuptool.manage_doUpgrades(req)
transaction.get().note('OpenCore upgrade run')
transaction.commit()
run_plone_migrations(portal)
run_opencore_upgrades(portal)
print "OPENCORE MIGRATION SUCCESS"
|
socialplanning/opencore
|
migrations/migrate-p3.py
|
Python
|
gpl-3.0
| 2,761
|
# pylint: disable=protected-access
"""
Unit tests for SafeCookieData
"""
import itertools
from time import time
import ddt
import six
from django.test import TestCase
from mock import patch
from six.moves import range # pylint: disable=ungrouped-imports
from ..middleware import SafeCookieData, SafeCookieError
from .test_utils import TestSafeSessionsLogMixin
@ddt.ddt
class TestSafeCookieData(TestSafeSessionsLogMixin, TestCase):
"""
Test class for SafeCookieData
"""
def setUp(self):
super(TestSafeCookieData, self).setUp()
self.session_id = 'test_session_id'
self.user_id = 'test_user_id'
self.safe_cookie_data = SafeCookieData.create(self.session_id, self.user_id)
def assert_cookie_data_equal(self, cookie_data1, cookie_data2):
"""
Asserts equivalency of the given cookie datas by comparing
their member variables.
"""
self.assertDictEqual(cookie_data1.__dict__, cookie_data2.__dict__)
#---- Test Success ----#
@ddt.data(
*itertools.product(
['test_session_id', '1', '100'],
['test_user_id', None, 0, 1, 100],
)
)
@ddt.unpack
def test_success(self, session_id, user_id):
# create and verify
safe_cookie_data_1 = SafeCookieData.create(session_id, user_id)
self.assertTrue(safe_cookie_data_1.verify(user_id))
# serialize
serialized_value = six.text_type(safe_cookie_data_1)
# parse and verify
safe_cookie_data_2 = SafeCookieData.parse(serialized_value)
self.assertTrue(safe_cookie_data_2.verify(user_id))
# compare
self.assert_cookie_data_equal(safe_cookie_data_1, safe_cookie_data_2)
def test_version(self):
self.assertEqual(self.safe_cookie_data.version, SafeCookieData.CURRENT_VERSION)
def test_serialize(self):
serialized_value = six.text_type(self.safe_cookie_data)
for field_value in six.itervalues(self.safe_cookie_data.__dict__):
self.assertIn(six.text_type(field_value), serialized_value)
#---- Test Parse ----#
@ddt.data(['1', 'session_id', 'key_salt', 'signature'], ['1', 's', 'k', 'sig'])
def test_parse_success(self, cookie_data_fields):
self.assert_cookie_data_equal(
SafeCookieData.parse(SafeCookieData.SEPARATOR.join(cookie_data_fields)),
SafeCookieData(*cookie_data_fields),
)
def test_parse_success_serialized(self):
serialized_value = six.text_type(self.safe_cookie_data)
self.assert_cookie_data_equal(
SafeCookieData.parse(serialized_value),
self.safe_cookie_data,
)
@ddt.data('1', '1|s', '1|s|k', '1|s|k|sig|extra', '73453', 's90sfs')
def test_parse_error(self, serialized_value):
with self.assert_parse_error():
with self.assertRaises(SafeCookieError):
SafeCookieData.parse(serialized_value)
@ddt.data(0, 2, -1, 'invalid_version')
def test_parse_invalid_version(self, version):
serialized_value = '{}|session_id|key_salt|signature'.format(version)
with self.assert_logged(r"SafeCookieData version .* is not supported."):
with self.assertRaises(SafeCookieError):
SafeCookieData.parse(serialized_value)
#---- Test Create ----#
@ddt.data(None, '')
def test_create_invalid_session_id(self, session_id):
with self.assert_invalid_session_id():
with self.assertRaises(SafeCookieError):
SafeCookieData.create(session_id, self.user_id)
@ddt.data(None, '')
def test_create_no_user_id(self, user_id):
with self.assert_logged('SafeCookieData received empty user_id', 'debug'):
safe_cookie_data = SafeCookieData.create(self.session_id, user_id)
self.assertTrue(safe_cookie_data.verify(user_id))
#---- Test Verify ----#
def test_verify_success(self):
self.assertTrue(self.safe_cookie_data.verify(self.user_id))
#- Test verify: expiration -#
def test_verify_expired_signature(self):
three_weeks_from_now = time() + 60 * 60 * 24 * 7 * 3
with patch('time.time', return_value=three_weeks_from_now):
with self.assert_signature_error_logged('Signature age'):
self.assertFalse(self.safe_cookie_data.verify(self.user_id))
#- Test verify: incorrect user -#
@ddt.data(None, 'invalid_user_id', -1)
def test_verify_incorrect_user_id(self, user_id):
with self.assert_incorrect_user_logged():
self.assertFalse(self.safe_cookie_data.verify(user_id))
@ddt.data('version', 'session_id')
def test_verify_incorrect_field_value(self, field_name):
setattr(self.safe_cookie_data, field_name, 'incorrect_cookie_value')
with self.assert_incorrect_user_logged():
self.assertFalse(self.safe_cookie_data.verify(self.user_id))
#- Test verify: incorrect signature -#
def test_verify_another_signature(self):
another_cookie_data = SafeCookieData.create(self.session_id, self.user_id) # different key_salt and expiration
self.safe_cookie_data.signature = another_cookie_data.signature
with self.assert_incorrect_signature_logged():
self.assertFalse(self.safe_cookie_data.verify(self.user_id))
def test_verify_incorrect_key_salt(self):
self.safe_cookie_data.key_salt = 'incorrect_cookie_value'
with self.assert_incorrect_signature_logged():
self.assertFalse(self.safe_cookie_data.verify(self.user_id))
@ddt.data(
*itertools.product(
list(range(0, 100, 25)),
list(range(5, 20, 5)),
)
)
@ddt.unpack
def test_verify_corrupt_signed_data(self, start, length):
def make_corrupt(signature, start, end):
"""
Replaces characters in the given signature starting
at the start offset until the end offset.
"""
return signature[start:end] + 'x' * (end - start) + signature[end:]
self.safe_cookie_data.signature = make_corrupt(
self.safe_cookie_data.signature, start, start + length
)
with self.assert_incorrect_signature_logged():
self.assertFalse(self.safe_cookie_data.verify(self.user_id))
#- Test verify: corrupt signature -#
def test_verify_corrupt_signature(self):
self.safe_cookie_data.signature = 'corrupt_signature'
with self.assert_signature_error_logged('No .* found in value'):
self.assertFalse(self.safe_cookie_data.verify(self.user_id))
#---- Test Digest ----#
def test_digest_success(self):
# Should return the same digest twice.
self.assertEqual(
self.safe_cookie_data._compute_digest(self.user_id),
self.safe_cookie_data._compute_digest(self.user_id),
)
@ddt.data('another_user', 0, None)
def test_digest_incorrect_user(self, incorrect_user):
self.assertNotEqual(
self.safe_cookie_data._compute_digest(self.user_id),
self.safe_cookie_data._compute_digest(incorrect_user)
)
@ddt.data(
*itertools.product(
['version', 'session_id'],
['incorrect_value', 0, None],
)
)
@ddt.unpack
def test_digest_incorrect_field_value(self, field_name, incorrect_field_value):
digest = self.safe_cookie_data._compute_digest(self.user_id)
setattr(self.safe_cookie_data, field_name, incorrect_field_value)
self.assertNotEqual(
digest,
self.safe_cookie_data._compute_digest(self.user_id)
)
|
cpennington/edx-platform
|
openedx/core/djangoapps/safe_sessions/tests/test_safe_cookie_data.py
|
Python
|
agpl-3.0
| 7,693
|
# Copyright (c) 2019 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import platform
import traceback
import faulthandler
import tempfile
import os
import os.path
import uuid
import json
import locale
from typing import cast, Any
try:
from sentry_sdk.hub import Hub
from sentry_sdk.utils import event_from_exception
from sentry_sdk import configure_scope, add_breadcrumb
with_sentry_sdk = True
except ImportError:
with_sentry_sdk = False
from PyQt5.QtCore import QT_VERSION_STR, PYQT_VERSION_STR, QUrl
from PyQt5.QtWidgets import QDialog, QDialogButtonBox, QVBoxLayout, QLabel, QTextEdit, QGroupBox, QCheckBox, QPushButton
from PyQt5.QtGui import QDesktopServices
from UM.Application import Application
from UM.Logger import Logger
from UM.View.GL.OpenGL import OpenGL
from UM.i18n import i18nCatalog
from UM.Resources import Resources
from cura import ApplicationMetadata
catalog = i18nCatalog("cura")
home_dir = os.path.expanduser("~")
MYPY = False
if MYPY:
CuraDebugMode = False
else:
try:
from cura.CuraVersion import CuraDebugMode
except ImportError:
CuraDebugMode = False # [CodeStyle: Reflecting imported value]
# List of exceptions that should not be considered "fatal" and abort the program.
# These are primarily some exception types that we simply skip
skip_exception_types = [
SystemExit,
KeyboardInterrupt,
GeneratorExit
]
class CrashHandler:
def __init__(self, exception_type, value, tb, has_started = True):
self.exception_type = exception_type
self.value = value
self.traceback = tb
self.has_started = has_started
self.dialog = None # Don't create a QDialog before there is a QApplication
self.cura_version = None
self.cura_locale = None
Logger.log("c", "An uncaught error has occurred!")
for line in traceback.format_exception(exception_type, value, tb):
for part in line.rstrip("\n").split("\n"):
Logger.log("c", part)
self.data = {}
# If Cura has fully started, we only show fatal errors.
# If Cura has not fully started yet, we always show the early crash dialog. Otherwise, Cura will just crash
# without any information.
if has_started and exception_type in skip_exception_types:
return
if with_sentry_sdk:
with configure_scope() as scope:
scope.set_tag("during_startup", not has_started)
if not has_started:
self._send_report_checkbox = None
self.early_crash_dialog = self._createEarlyCrashDialog()
self.dialog = QDialog()
self._createDialog()
@staticmethod
def pruneSensitiveData(obj: Any) -> Any:
if isinstance(obj, str):
return obj.replace("\\\\", "\\").replace(home_dir, "<user_home>")
if isinstance(obj, list):
return [CrashHandler.pruneSensitiveData(item) for item in obj]
if isinstance(obj, dict):
return {k: CrashHandler.pruneSensitiveData(v) for k, v in obj.items()}
return obj
@staticmethod
def sentryBeforeSend(event, hint):
return CrashHandler.pruneSensitiveData(event)
def _createEarlyCrashDialog(self):
dialog = QDialog()
dialog.setMinimumWidth(500)
dialog.setMinimumHeight(170)
dialog.setWindowTitle(catalog.i18nc("@title:window", "Cura can't start"))
dialog.finished.connect(self._closeEarlyCrashDialog)
layout = QVBoxLayout(dialog)
label = QLabel()
label.setText(catalog.i18nc("@label crash message", """<p><b>Oops, Ultimaker Cura has encountered something that doesn't seem right.</p></b>
<p>We encountered an unrecoverable error during start up. It was possibly caused by some incorrect configuration files. We suggest to backup and reset your configuration.</p>
<p>Backups can be found in the configuration folder.</p>
<p>Please send us this Crash Report to fix the problem.</p>
"""))
label.setWordWrap(True)
layout.addWidget(label)
# "send report" check box and show details
self._send_report_checkbox = QCheckBox(catalog.i18nc("@action:button", "Send crash report to Ultimaker"), dialog)
self._send_report_checkbox.setChecked(True)
show_details_button = QPushButton(catalog.i18nc("@action:button", "Show detailed crash report"), dialog)
show_details_button.setMaximumWidth(200)
show_details_button.clicked.connect(self._showDetailedReport)
show_configuration_folder_button = QPushButton(catalog.i18nc("@action:button", "Show configuration folder"), dialog)
show_configuration_folder_button.setMaximumWidth(200)
show_configuration_folder_button.clicked.connect(self._showConfigurationFolder)
layout.addWidget(self._send_report_checkbox)
layout.addWidget(show_details_button)
layout.addWidget(show_configuration_folder_button)
# "backup and start clean" and "close" buttons
buttons = QDialogButtonBox()
buttons.addButton(QDialogButtonBox.Close)
buttons.addButton(catalog.i18nc("@action:button", "Backup and Reset Configuration"), QDialogButtonBox.AcceptRole)
buttons.rejected.connect(self._closeEarlyCrashDialog)
buttons.accepted.connect(self._backupAndStartClean)
layout.addWidget(buttons)
return dialog
def _closeEarlyCrashDialog(self):
if self._send_report_checkbox.isChecked():
self._sendCrashReport()
os._exit(1)
def _backupAndStartClean(self):
"""Backup the current resource directories and create clean ones."""
Resources.factoryReset()
self.early_crash_dialog.close()
def _showConfigurationFolder(self):
path = Resources.getConfigStoragePath()
QDesktopServices.openUrl(QUrl.fromLocalFile( path ))
def _showDetailedReport(self):
self.dialog.exec_()
def _createDialog(self):
"""Creates a modal dialog."""
self.dialog.setMinimumWidth(640)
self.dialog.setMinimumHeight(640)
self.dialog.setWindowTitle(catalog.i18nc("@title:window", "Crash Report"))
# if the application has not fully started, this will be a detailed report dialog which should not
# close the application when it's closed.
if self.has_started:
self.dialog.finished.connect(self._close)
layout = QVBoxLayout(self.dialog)
layout.addWidget(self._messageWidget())
layout.addWidget(self._informationWidget())
layout.addWidget(self._exceptionInfoWidget())
layout.addWidget(self._logInfoWidget())
layout.addWidget(self._buttonsWidget())
def _close(self):
os._exit(1)
def _messageWidget(self):
label = QLabel()
label.setText(catalog.i18nc("@label crash message", """<p><b>A fatal error has occurred in Cura. Please send us this Crash Report to fix the problem</p></b>
<p>Please use the "Send report" button to post a bug report automatically to our servers</p>
"""))
return label
def _informationWidget(self):
group = QGroupBox()
group.setTitle(catalog.i18nc("@title:groupbox", "System information"))
layout = QVBoxLayout()
label = QLabel()
try:
from UM.Application import Application
self.cura_version = Application.getInstance().getVersion()
self.cura_locale = Application.getInstance().getPreferences().getValue("general/language")
except:
self.cura_version = catalog.i18nc("@label unknown version of Cura", "Unknown")
self.cura_locale = "??_??"
self.data["cura_version"] = self.cura_version
self.data["os"] = {"type": platform.system(), "version": platform.version()}
self.data["qt_version"] = QT_VERSION_STR
self.data["pyqt_version"] = PYQT_VERSION_STR
self.data["locale_os"] = locale.getlocale(locale.LC_MESSAGES)[0] if hasattr(locale, "LC_MESSAGES") else \
locale.getdefaultlocale()[0]
self.data["locale_cura"] = self.cura_locale
try:
from cura.CuraApplication import CuraApplication
plugins = CuraApplication.getInstance().getPluginRegistry()
self.data["plugins"] = {
plugin_id: plugins.getMetaData(plugin_id)["plugin"]["version"]
for plugin_id in plugins.getInstalledPlugins() if not plugins.isBundledPlugin(plugin_id)
}
except:
self.data["plugins"] = {"[FAILED]": "0.0.0"}
crash_info = "<b>" + catalog.i18nc("@label Cura version number", "Cura version") + ":</b> " + str(self.cura_version) + "<br/>"
crash_info += "<b>" + catalog.i18nc("@label", "Cura language") + ":</b> " + str(self.cura_locale) + "<br/>"
crash_info += "<b>" + catalog.i18nc("@label", "OS language") + ":</b> " + str(self.data["locale_os"]) + "<br/>"
crash_info += "<b>" + catalog.i18nc("@label Type of platform", "Platform") + ":</b> " + str(platform.platform()) + "<br/>"
crash_info += "<b>" + catalog.i18nc("@label", "Qt version") + ":</b> " + str(QT_VERSION_STR) + "<br/>"
crash_info += "<b>" + catalog.i18nc("@label", "PyQt version") + ":</b> " + str(PYQT_VERSION_STR) + "<br/>"
crash_info += "<b>" + catalog.i18nc("@label OpenGL version", "OpenGL") + ":</b> " + str(self._getOpenGLInfo()) + "<br/>"
label.setText(crash_info)
layout.addWidget(label)
group.setLayout(layout)
if with_sentry_sdk:
with configure_scope() as scope:
scope.set_tag("qt_version", QT_VERSION_STR)
scope.set_tag("pyqt_version", PYQT_VERSION_STR)
scope.set_tag("os", platform.system())
scope.set_tag("os_version", platform.version())
scope.set_tag("locale_os", self.data["locale_os"])
scope.set_tag("locale_cura", self.cura_locale)
scope.set_tag("is_enterprise", ApplicationMetadata.IsEnterpriseVersion)
scope.set_context("plugins", self.data["plugins"])
user_id = uuid.getnode() # On all of Cura's supported platforms, this returns the MAC address which is pseudonymical information (!= anonymous).
user_id %= 2 ** 16 # So to make it anonymous, apply a bitmask selecting only the last 16 bits.
# This prevents it from being traceable to a specific user but still gives somewhat of an idea of whether it's just the same user hitting the same crash over and over again, or if it's widespread.
scope.set_user({"id": str(user_id)})
return group
def _getOpenGLInfo(self):
opengl_instance = OpenGL.getInstance()
if not opengl_instance:
self.data["opengl"] = {"version": "n/a", "vendor": "n/a", "type": "n/a"}
return catalog.i18nc("@label", "Not yet initialized<br/>")
info = "<ul>"
info += catalog.i18nc("@label OpenGL version", "<li>OpenGL Version: {version}</li>").format(version = opengl_instance.getOpenGLVersion())
info += catalog.i18nc("@label OpenGL vendor", "<li>OpenGL Vendor: {vendor}</li>").format(vendor = opengl_instance.getGPUVendorName())
info += catalog.i18nc("@label OpenGL renderer", "<li>OpenGL Renderer: {renderer}</li>").format(renderer = opengl_instance.getGPUType())
info += "</ul>"
self.data["opengl"] = {"version": opengl_instance.getOpenGLVersion(), "vendor": opengl_instance.getGPUVendorName(), "type": opengl_instance.getGPUType()}
active_machine_definition_id = "unknown"
active_machine_manufacturer = "unknown"
try:
from cura.CuraApplication import CuraApplication
application = cast(CuraApplication, Application.getInstance())
machine_manager = application.getMachineManager()
global_stack = machine_manager.activeMachine
if global_stack is None:
active_machine_definition_id = "empty"
active_machine_manufacturer = "empty"
else:
active_machine_definition_id = global_stack.definition.getId()
active_machine_manufacturer = global_stack.definition.getMetaDataEntry("manufacturer", "unknown")
except:
pass
if with_sentry_sdk:
with configure_scope() as scope:
scope.set_tag("opengl_version", opengl_instance.getOpenGLVersion())
scope.set_tag("gpu_vendor", opengl_instance.getGPUVendorName())
scope.set_tag("gpu_type", opengl_instance.getGPUType())
scope.set_tag("active_machine", active_machine_definition_id)
scope.set_tag("active_machine_manufacturer", active_machine_manufacturer)
return info
def _exceptionInfoWidget(self):
group = QGroupBox()
group.setTitle(catalog.i18nc("@title:groupbox", "Error traceback"))
layout = QVBoxLayout()
text_area = QTextEdit()
trace_list = traceback.format_exception(self.exception_type, self.value, self.traceback)
trace = "".join(trace_list)
text_area.setText(trace)
text_area.setReadOnly(True)
layout.addWidget(text_area)
group.setLayout(layout)
# Parsing all the information to fill the dictionary
summary = ""
if len(trace_list) >= 1:
summary = trace_list[len(trace_list)-1].rstrip("\n")
module = [""]
if len(trace_list) >= 2:
module = trace_list[len(trace_list)-2].rstrip("\n").split("\n")
module_split = module[0].split(", ")
filepath_directory_split = module_split[0].split("\"")
filepath = ""
if len(filepath_directory_split) > 1:
filepath = filepath_directory_split[1]
directory, filename = os.path.split(filepath)
line = ""
if len(module_split) > 1:
line = int(module_split[1].lstrip("line "))
function = ""
if len(module_split) > 2:
function = module_split[2].lstrip("in ")
code = ""
if len(module) > 1:
code = module[1].lstrip(" ")
# Using this workaround for a cross-platform path splitting
split_path = []
folder_name = ""
# Split until reach folder "cura"
while folder_name != "cura":
directory, folder_name = os.path.split(directory)
if not folder_name:
break
split_path.append(folder_name)
# Look for plugins. If it's not a plugin, the current cura version is set
isPlugin = False
module_version = self.cura_version
module_name = "Cura"
if split_path.__contains__("plugins"):
isPlugin = True
# Look backwards until plugin.json is found
directory, name = os.path.split(filepath)
while not os.listdir(directory).__contains__("plugin.json"):
directory, name = os.path.split(directory)
json_metadata_file = os.path.join(directory, "plugin.json")
try:
with open(json_metadata_file, "r", encoding = "utf-8") as f:
try:
metadata = json.loads(f.read())
module_version = metadata["version"]
module_name = metadata["name"]
except json.decoder.JSONDecodeError:
# Not throw new exceptions
Logger.logException("e", "Failed to parse plugin.json for plugin %s", name)
except:
# Not throw new exceptions
pass
exception_dict = dict()
exception_dict["traceback"] = {"summary": summary, "full_trace": trace}
exception_dict["location"] = {"path": filepath, "file": filename, "function": function, "code": code, "line": line,
"module_name": module_name, "version": module_version, "is_plugin": isPlugin}
self.data["exception"] = exception_dict
if with_sentry_sdk:
with configure_scope() as scope:
scope.set_tag("is_plugin", isPlugin)
scope.set_tag("module", module_name)
return group
def _logInfoWidget(self):
group = QGroupBox()
group.setTitle(catalog.i18nc("@title:groupbox", "Logs"))
layout = QVBoxLayout()
text_area = QTextEdit()
tmp_file_fd, tmp_file_path = tempfile.mkstemp(prefix = "cura-crash", text = True)
os.close(tmp_file_fd)
with open(tmp_file_path, "w", encoding = "utf-8") as f:
faulthandler.dump_traceback(f, all_threads=True)
with open(tmp_file_path, "r", encoding = "utf-8") as f:
logdata = f.read()
text_area.setText(logdata)
text_area.setReadOnly(True)
layout.addWidget(text_area)
group.setLayout(layout)
self.data["log"] = logdata
return group
def _buttonsWidget(self):
buttons = QDialogButtonBox()
buttons.addButton(QDialogButtonBox.Close)
# Like above, this will be served as a separate detailed report dialog if the application has not yet been
# fully loaded. In this case, "send report" will be a check box in the early crash dialog, so there is no
# need for this extra button.
if self.has_started:
buttons.addButton(catalog.i18nc("@action:button", "Send report"), QDialogButtonBox.AcceptRole)
buttons.accepted.connect(self._sendCrashReport)
buttons.rejected.connect(self.dialog.close)
return buttons
def _sendCrashReport(self):
if with_sentry_sdk:
try:
hub = Hub.current
if not Logger.getLoggers():
# No loggers have been loaded yet, so we don't have any breadcrumbs :(
# So add them manually so we at least have some info...
add_breadcrumb(level = "info", message = "SentryLogging was not initialised yet")
for log_type, line in Logger.getUnloggedLines():
add_breadcrumb(message=line)
event, hint = event_from_exception((self.exception_type, self.value, self.traceback))
hub.capture_event(event, hint=hint)
hub.flush()
except Exception as e: # We don't want any exception to cause problems
Logger.logException("e", "An exception occurred while trying to send crash report")
if not self.has_started:
print("An exception occurred while trying to send crash report: %s" % e)
else:
msg = "SentrySDK is not available and the report could not be sent."
Logger.logException("e", msg)
if not self.has_started:
print(msg)
print("Exception type: {}".format(self.exception_type))
print("Value: {}".format(self.value))
print("Traceback: {}".format(self.traceback))
os._exit(1)
def show(self):
# must run the GUI code on the Qt thread, otherwise the widgets on the dialog won't react correctly.
Application.getInstance().callLater(self._show)
def _show(self):
# When the exception is in the skip_exception_types list, the dialog is not created, so we don't need to show it
if self.dialog:
self.dialog.exec_()
os._exit(1)
|
Ultimaker/Cura
|
cura/CrashHandler.py
|
Python
|
lgpl-3.0
| 19,761
|
import numpy as np
from .base import classifier
from .base import regressor
from .utils import toIndex, fromIndex, to1ofK, from1ofK
from numpy import asarray as arr
from numpy import atleast_2d as twod
from numpy import asmatrix as mat
################################################################################
## NNETCLASSIFY ################################################################
################################################################################
def _add1(X):
return np.hstack( (np.ones((X.shape[0],1)),X) )
class nnetClassify(classifier):
"""A simple neural network classifier
Attributes:
classes: list of class (target) identifiers for the classifier
layers : list of layer sizes [N,S1,S2,...,C], where N = # of input features, S1 = # of hidden nodes
in layer 1, ... , and C = the number of classes, or 1 for a binary classifier
weights: list of numpy arrays containing each layer's weights, size e.g. (S1,N), (S2,S1), etc.
"""
def __init__(self, *args, **kwargs):
"""Constructor for nnetClassify (neural net classifier).
Parameters: see the "train" function; calls "train" if arguments passed
Properties:
classes : list of identifiers for each class
wts : list of coefficients (weights) for each layer of the NN
activation : function for layer activation function & derivative
"""
self.classes = []
self.wts = []
#self.set_activation(activation.lower())
#self.init_weights(sizes, init.lower(), X, Y)
self.Sig = lambda Z: np.tanh(Z) ## TODO: make flexible
self.dSig= lambda Z: 1.0 - np.tanh(Z)**2 # (internal layers nonlinearity & derivative)
#self.Sig0 = self.Sig
#self.dSig0= self.dSig
self.Sig0 = lambda Z: 1.0/(1.0 + np.exp(-Z)) # final layer nonlinearity & derivative
self.dSig0= lambda Z: np.exp(-Z) / (1.0+np.exp(-Z))**2
if len(args) or len(kwargs): # if we were given optional arguments,
self.train(*args, **kwargs) # just pass them through to "train"
def __repr__(self):
to_return = 'Multi-layer perceptron (neural network) classifier\nLayers [{}]'.format(self.get_layers())
return to_return
def __str__(self):
to_return = 'Multi-layer perceptron (neural network) classifier\nLayers [{}]'.format(self.get_layers())
return to_return
def nLayers(self):
return len(self.wts)
@property
def layers(self):
"""Return list of layer sizes, [N,H1,H2,...,C]
N = # of input features
Hi = # of hidden nodes in layer i
C = # of output nodes (usually # of classes or 1)
"""
if len(self.wts):
layers = [self.wts[l].shape[1] for l in range(len(self.wts))]
layers.append( self.wts[-1].shape[0] )
else:
layers = []
return layers
@layers.setter
def layers(self, layers):
raise NotImplementedError
# adapt / change size of weight matrices (?)
## CORE METHODS ################################################################
def predictSoft(self, X):
"""Make 'soft' (per-class confidence) predictions of the neural network on data X.
Args:
X : MxN numpy array containing M data points with N features each
Returns:
P : MxC numpy array of C class probabilities for each of the M data
"""
X = arr(X) # convert to numpy if needed
L = self.nLayers() # get number of layers
Z = _add1(X) # initialize: input features + constant term
for l in range(L - 1): # for all *except output* layer:
Z = Z.dot( self.wts[l].T ) # compute linear response of next layer
Z = _add1( self.Sig(Z) ) # apply activation function & add constant term
Z = Z.dot( self.wts[L - 1].T ) # compute output layer linear response
Z = self.Sig0(Z) # apply output layer activation function
if Z.shape[1]==1: Z = np.hstack( (2.0*self.Sig0(0.0)-Z,Z) ) # if binary classifier, make Mx2
return Z
def train(self, X, Y, init='zeros', stepsize=.01, stopTol=1e-4, stopIter=5000):
"""Train the neural network.
Args:
X : MxN numpy array containing M data points with N features each
Y : Mx1 numpy array of targets (class labels) for each data point in X
sizes : [Nin, Nh1, ... , Nout]
Nin is the number of features, Nout is the number of outputs,
which is the number of classes. Member weights are {W1, ... , WL-1},
where W1 is Nh1 x Nin, etc.
init : str
'none', 'zeros', or 'random'. inits the neural net weights.
stepsize : scalar
The stepsize for gradient descent (decreases as 1 / iter).
stopTol : scalar
Tolerance for stopping criterion.
stopIter : int
The maximum number of steps before stopping.
activation : str
'logistic', 'htangent', or 'custom'. Sets the activation functions.
"""
if self.wts[0].shape[1] - 1 != len(X[0]):
raise ValueError('layer[0] must equal the number of columns of X (number of features)')
self.classes = self.classes if len(self.classes) else np.unique(Y)
if len(self.classes) != self.wts[-1].shape[0]: # and (self.wts[-1].shape[0]!=1 or len(self.classes)!=2):
raise ValueError('layers[-1] must equal the number of classes in Y, or 1 for binary Y')
M,N = mat(X).shape # d = dim of data, n = number of data points
C = len(self.classes) # number of classes
L = len(self.wts) # get number of layers
Y_tr_k = to1ofK(Y,self.classes) # convert Y to 1-of-K format
# outer loop of stochastic gradient descent
it = 1 # iteration number
nextPrint = 1 # next time to print info
done = 0 # end of loop flag
J01, Jsur = [],[] # misclassification rate & surrogate loss values
while not done:
step_i = float(stepsize) / it # step size evolution; classic 1/t decrease
# stochastic gradient update (one pass)
for j in range(M):
A,Z = self.__responses(twod(X[j,:])) # compute all layers' responses, then backdrop
delta = (Z[L] - Y_tr_k[j,:]) * arr(self.dSig0(Z[L])) # take derivative of output layer
for l in range(L - 1, -1, -1):
grad = delta.T.dot( Z[l] ) # compute gradient on current layer wts
delta = delta.dot(self.wts[l]) * arr(self.dSig(Z[l])) # propagate gradient down
delta = delta[:,1:] # discard constant feature
self.wts[l] -= step_i * grad # take gradient step on current layer wts
J01.append( self.err_k(X, Y_tr_k) ) # error rate (classification)
Jsur.append( self.mse_k(X, Y_tr_k) ) # surrogate (mse on output)
if it >= nextPrint:
print('it {} : Jsur = {}, J01 = {}'.format(it,Jsur[-1],J01[-1]))
nextPrint *= 2
# check if finished
done = (it > 1) and (np.abs(Jsur[-1] - Jsur[-2]) < stopTol) or it >= stopIter
it += 1
def err_k(self, X, Y):
"""Compute misclassification error rate. Assumes Y in 1-of-k form. """
return self.err(X, from1ofK(Y,self.classes).ravel())
def mse(self, X, Y):
"""Compute mean squared error of predictor 'obj' on test data (X,Y). """
return mse_k(X, to1ofK(Y))
def mse_k(self, X, Y):
"""Compute mean squared error of predictor; assumes Y is in 1-of-k format. """
return np.power(Y - self.predictSoft(X), 2).sum(1).mean(0)
## MUTATORS ####################################################################
def setActivation(self, method, sig=None, d_sig=None, sig_0=None, d_sig_0=None):
# def setActivation(self, method, sig=None, sig0=None):
"""
This method sets the activation functions.
Parameters
----------
method : string, {'logistic' , 'htangent', 'custom'} -- which activation type
Optional arguments for "custom" activation:
sig : function object F(z) returns activation function & its derivative at z (as a tuple)
sig0: activation function object F(z) for final layer of the nnet
"""
method = method.lower()
if method == 'logistic':
self.Sig = lambda z: twod(1 / (1 + np.exp(-z)))
self.dSig = lambda z: twod(np.multiply(self.Sig(z), (1 - self.Sig(z))))
# self.sig_0 = self.sig
# self.d_sig_0 = self.d_sig
elif method == 'htangent':
self.Sig = lambda z: twod(np.tanh(z))
self.dSig = lambda z: twod(1 - np.power(np.tanh(z), 2))
# self.sig_0 = self.sig
# self.d_sig_0 = self.d_sig
elif method == 'custom':
self.Sig = sig
self.dSig = d_sig
if sig_0 is not None:
self.sig_0 = sig_0
if d_sig_0 is not None:
self.d_sig_0 = d_sig_0
else:
raise ValueError('NNetClassify.set_activation: ' + str(method) + ' is not a valid option for method')
self.activation = method
def set_layers(self, sizes, init='random'):
"""
Set layers sizes to sizes.
Parameters
----------
sizes : [int]
List containing sizes.
init : str (optional)
Weight initialization method.
"""
self.init_weights(sizes, init, None, None)
def init_weights(self, sizes, init, X, Y):
"""
This method sets layer sizes and initializes the weights of the neural network
sizes = [Ninput, N1, N2, ... , Noutput], where Ninput = # of input features, and Nouput = # classes
init = {'zeros', 'random'} : initialize to all zeros or small random values (breaks symmetry)
"""
init = init.lower()
if init == 'none':
pass
elif init == 'zeros':
self.wts = [np.zeros((sizes[i + 1],sizes[i] + 1)) for i in range(len(sizes) - 1)]
elif init == 'random':
self.wts = [.0025 * np.random.randn(sizes[i+1],sizes[i]+1) for i in range(len(sizes) - 1)]
else:
raise ValueError('NNetClassify.init_weights: ' + str(init) + ' is not a valid option for init')
## HELPERS #####################################################################
def __responses(self, Xin):
"""
Helper function that gets linear sum from previous layer (A) and
saturated activation responses (Z) for a data point. Used in:
train
"""
L = len(self.wts)
A = [arr([1.0])] # initialize (layer 0)
Z = [_add1(Xin)] # input to next layer: original features
for l in range(1, L):
A.append( Z[l - 1].dot(self.wts[l - 1].T) ) # linear response of previous later
Z.append( _add1(self.Sig(A[l])) ) # apply activation & add constant feature
A.append( Z[L - 1].dot(self.wts[L - 1].T) ) # linear response, output layer
Z.append( self.Sig0(A[L]) ) # apply activation (saturate for classifier, not regressor)
return A,Z
################################################################################
################################################################################
################################################################################
class nnetRegress(regressor):
"""A simple neural network regressor
Attributes:
layers (list): layer sizes [N,S1,S2,...,C], where N = # of input features,
S1 = # of hidden nodes in layer 1, ... , and C = the number of
classes, or 1 for a binary classifier
weights (list): list of numpy arrays containing each layer's weights, sizes
(S1,N), (S2,S1), etc.
"""
def __init__(self, *args, **kwargs):
"""Constructor for nnetRegress (neural net regressor).
Parameters: see the "train" function; calls "train" if arguments passed
Properties:
wts : list of coefficients (weights) for each layer of the NN
activation : function for layer activation function & derivative
"""
self.wts = []
#self.set_activation(activation.lower())
#self.init_weights(sizes, init.lower(), X, Y)
self.Sig = lambda Z: np.tanh(Z) ## TODO: make flexible
self.dSig= lambda Z: 1.0 - np.tanh(Z)**2 # (internal layers nonlinearity & derivative)
#self.Sig0 = self.Sig
#self.dSig0= self.dSig
self.Sig0 = lambda Z: Z # final layer nonlinearity & derivative
self.dSig0= lambda Z: 1.0+0*Z #
if len(args) or len(kwargs): # if we were given optional arguments,
self.train(*args, **kwargs) # just pass them through to "train"
def __repr__(self):
to_return = 'Multi-layer perceptron (neural network) regressor\nLayers [{}]'.format(self.get_layers())
return to_return
def __str__(self):
to_return = 'Multi-layer perceptron (neural network) regressor\nLayers [{}]'.format(self.get_layers())
return to_return
def nLayers(self):
return len(self.wts)
@property
def layers(self):
"""Return list of layer sizes, [N,H1,H2,...,C]
N = # of input features
Hi = # of hidden nodes in layer i
C = # of output nodes (usually 1)
"""
if len(self.wts):
layers = [self.wts[l].shape[1] for l in range(len(self.wts))]
layers.append( self.wts[-1].shape[0] )
else:
layers = []
return layers
@layers.setter
def layers(self, layers):
raise NotImplementedError
# adapt / change size of weight matrices (?)
## CORE METHODS ################################################################
def predict(self, X):
"""Make predictions of the neural network on data X.
"""
X = arr(X) # convert to numpy if needed
L = self.nLayers() # get number of layers
Z = _add1(X) # initialize: input features + constant term
for l in range(L - 1): # for all *except output* layer:
Z = Z.dot( self.wts[l].T ) # compute linear response of next layer
Z = _add1( self.Sig(Z) ) # apply activation function & add constant term
Z = Z.dot( self.wts[L - 1].T ) # compute output layer linear response
Z = self.Sig0(Z) # apply output layer activation function
return Z
def train(self, X, Y, init='zeros', stepsize=.01, stopTol=1e-4, stopIter=5000):
"""Train the neural network.
Args:
X : MxN numpy array containing M data points with N features each
Y : Mx1 numpy array of targets for each data point in X
sizes (list of int): [Nin, Nh1, ... , Nout]
Nin is the number of features, Nout is the number of outputs,
which is the number of target dimensions (usually 1). Weights are {W1, ... , WL-1},
where W1 is Nh1 x Nin, etc.
init (str): 'none', 'zeros', or 'random'. inits the neural net weights.
stepsize (float): The stepsize for gradient descent (decreases as 1 / iter).
stopTol (float): Tolerance for stopping criterion.
stopIter (int): The maximum number of steps before stopping.
activation (str): 'logistic', 'htangent', or 'custom'. Sets the activation functions.
"""
if self.wts[0].shape[1] - 1 != len(X[0]):
raise ValueError('layer[0] must equal the number of columns of X (number of features)')
if self.wts[-1].shape[0] > 1 and self.wts[-1].shape[0] != Y.shape[1]:
raise ValueError('layers[-1] must equal the number of classes in Y, or 1 for binary Y')
M,N = arr(X).shape # d = dim of data, n = number of data points
L = len(self.wts) # get number of layers
Y = arr(Y)
Y2d = Y if len(Y.shape)>1 else Y[:,np.newaxis]
# outer loop of stochastic gradient descent
it = 1 # iteration number
nextPrint = 1 # next time to print info
done = 0 # end of loop flag
Jsur = [] # misclassification rate & surrogate loss values
while not done:
step_i = (2.0*stepsize) / (2.0+it) # step size evolution; classic 1/t decrease
# stochastic gradient update (one pass)
for j in range(M):
A,Z = self.__responses(twod(X[j,:])) # compute all layers' responses, then backdrop
delta = (Z[L] - Y2d[j,:]) * arr(self.dSig0(Z[L])) # take derivative of output layer
for l in range(L - 1, -1, -1):
grad = delta.T.dot( Z[l] ) # compute gradient on current layer wts
delta = delta.dot(self.wts[l]) * arr(self.dSig(Z[l])) # propagate gradient down
delta = delta[:,1:] # discard constant feature
self.wts[l] -= step_i * grad # take gradient step on current layer wts
Jsur.append( self.mse(X, Y2d) ) # surrogate (mse on output)
if it >= nextPrint:
print('it {} : J = {}'.format(it,Jsur[-1]))
nextPrint *= 2
# check if finished
done = (it > 1) and (np.abs(Jsur[-1] - Jsur[-2]) < stopTol) or it >= stopIter
it += 1
## MUTATORS ####################################################################
#def set_activation(self, method, sig=None, d_sig=None, sig_0=None, d_sig_0=None):
def setActivation(self, method, sig=None, sig0=None):
""" This method sets the activation functions.
Args:
method : string, {'logistic' , 'htangent', 'custom'} -- which activation type
Optional arguments for "custom" activation:
sig : f'n object F(z) returns activation function & its derivative at z (as a tuple)
sig0: activation function object F(z) for final layer of the nnet
"""
raise NotImplementedError # unfinished / tested
method = method.lower()
if method == 'logistic':
self.sig = lambda z: twod(1 / (1 + np.exp(-z)))
self.d_sig = lambda z: twod(np.multiply(self.sig(z), (1 - self.sig(z))))
self.sig_0 = self.sig
self.d_sig_0 = self.d_sig
elif method == 'htangent':
self.sig = lambda z: twod(np.tanh(z))
self.d_sig = lambda z: twod(1 - np.power(np.tanh(z), 2))
self.sig_0 = self.sig
self.d_sig_0 = self.d_sig
elif method == 'custom':
self.sig = sig
self.d_sig = d_sig
self.sig_0 = sig_0
self.d_sig_0 = d_sig_0
else:
raise ValueError('nnetRegress.set_activation: ' + str(method) + ' is not a valid option for method')
self.activation = method
def set_layers(self, sizes, init='random'):
"""Set layers sizes to sizes.
Args:
sizes (int): List containing sizes.
init (str, optional): Weight initialization method.
"""
self.init_weights(sizes, init, None, None)
def init_weights(self, sizes, init, X, Y):
"""Set layer sizes and initialize the weights of the neural network
Args:
sizes (list of int): [Nin, N1, N2, ... , Nout], where Nin = # of input features, and Nou = # classes
init (str): {'zeros', 'random'} initialize to all zeros or small random values (breaks symmetry)
"""
init = init.lower()
if init == 'none':
pass
elif init == 'zeros':
self.wts = arr([np.zeros((sizes[i + 1],sizes[i] + 1)) for i in range(len(sizes) - 1)], dtype=object)
elif init == 'random':
self.wts = [.0025 * np.random.randn(sizes[i+1],sizes[i]+1) for i in range(len(sizes) - 1)]
else:
raise ValueError('nnetRegress.init_weights: ' + str(init) + ' is not a valid option for init')
## HELPERS #####################################################################
def __responses(self, Xin):
"""
Helper function that gets linear sum from previous layer (A) and
saturated activation responses (Z) for a data point. Used in:
train
"""
L = len(self.wts)
A = [arr([1.0])] # initialize (layer 0)
Z = [_add1(Xin)] # input to next layer: original features
for l in range(1, L):
A.append( Z[l - 1].dot(self.wts[l - 1].T) ) # linear response of previous later
Z.append( _add1(self.Sig(A[l])) ) # apply activation & add constant feature
A.append( Z[L - 1].dot(self.wts[L - 1].T) ) # linear response, output layer
Z.append( self.Sig0(A[L]) ) # apply activation (saturate for classifier, not regressor)
return A,Z
################################################################################
################################################################################
################################################################################
|
sameersingh/ml-discussions
|
week9/mltools/nnet.py
|
Python
|
apache-2.0
| 22,387
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oscar.models.fields
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0001_initial'),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AutomaticProductList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Title')),
('description', models.TextField(verbose_name='Description', blank=True)),
('link_url', oscar.models.fields.ExtendedURLField(verbose_name='Link URL', blank=True)),
('link_text', models.CharField(max_length=255, verbose_name='Link text', blank=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('method', models.CharField(max_length=128, verbose_name='Method', choices=[('Bestselling', 'Bestselling products'), ('RecentlyAdded', 'Recently added products')])),
('num_products', models.PositiveSmallIntegerField(default=4, verbose_name='Number of Products')),
],
options={
'verbose_name_plural': 'Automatic product lists',
'verbose_name': 'Automatic product list',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HandPickedProductList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Title')),
('description', models.TextField(verbose_name='Description', blank=True)),
('link_url', oscar.models.fields.ExtendedURLField(verbose_name='Link URL', blank=True)),
('link_text', models.CharField(max_length=255, verbose_name='Link text', blank=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Hand Picked Product Lists',
'verbose_name': 'Hand Picked Product List',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Name')),
('link_url', oscar.models.fields.ExtendedURLField(verbose_name='Link URL', help_text='This is where this promotion links to', blank=True)),
('image', models.ImageField(upload_to='images/promotions/', max_length=255, verbose_name='Image')),
('date_created', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Image',
'verbose_name': 'Image',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='KeywordPromotion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('position', models.CharField(verbose_name='Position', max_length=100, help_text='Position on page')),
('display_order', models.PositiveIntegerField(default=0, verbose_name='Display Order')),
('clicks', models.PositiveIntegerField(default=0, verbose_name='Clicks')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('keyword', models.CharField(max_length=200, verbose_name='Keyword')),
('filter', models.CharField(max_length=200, verbose_name='Filter', blank=True)),
('content_type', models.ForeignKey(to='contenttypes.ContentType', on_delete=models.CASCADE)),
],
options={
'ordering': ['-clicks'],
'verbose_name_plural': 'Keyword Promotions',
'verbose_name': 'Keyword Promotion',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MultiImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Name')),
('date_created', models.DateTimeField(auto_now_add=True)),
('images', models.ManyToManyField(blank=True, help_text='Choose the Image content blocks that this block will use. (You may need to create some first).', to='promotions.Image', null=True)),
],
options={
'verbose_name_plural': 'Multi Images',
'verbose_name': 'Multi Image',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OrderedProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_order', models.PositiveIntegerField(default=0, verbose_name='Display Order')),
],
options={
'ordering': ('display_order',),
'verbose_name_plural': 'Ordered product',
'verbose_name': 'Ordered product',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OrderedProductList',
fields=[
('handpickedproductlist_ptr', models.OneToOneField(parent_link=True,
serialize=False,
auto_created=True,
primary_key=True,
to='promotions.HandPickedProductList',
on_delete=models.CASCADE)),
('display_order', models.PositiveIntegerField(default=0, verbose_name='Display Order')),
],
options={
'ordering': ('display_order',),
'verbose_name_plural': 'Ordered Product Lists',
'verbose_name': 'Ordered Product List',
},
bases=('promotions.handpickedproductlist',),
),
migrations.CreateModel(
name='PagePromotion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('position', models.CharField(verbose_name='Position', max_length=100, help_text='Position on page')),
('display_order', models.PositiveIntegerField(default=0, verbose_name='Display Order')),
('clicks', models.PositiveIntegerField(default=0, verbose_name='Clicks')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('page_url', oscar.models.fields.ExtendedURLField(max_length=128, verify_exists=True, db_index=True, verbose_name='Page URL')),
('content_type', models.ForeignKey(to='contenttypes.ContentType', on_delete=models.CASCADE)),
],
options={
'ordering': ['-clicks'],
'verbose_name_plural': 'Page Promotions',
'verbose_name': 'Page Promotion',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RawHTML',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Name')),
('display_type', models.CharField(verbose_name='Display type', max_length=128, help_text='This can be used to have different types of HTML blocks (eg different widths)', blank=True)),
('body', models.TextField(verbose_name='HTML')),
('date_created', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Raw HTML',
'verbose_name': 'Raw HTML',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SingleProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Name')),
('description', models.TextField(verbose_name='Description', blank=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('product', models.ForeignKey(to='catalogue.Product', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Single product',
'verbose_name': 'Single product',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TabbedBlock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Title')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
],
options={
'verbose_name_plural': 'Tabbed Blocks',
'verbose_name': 'Tabbed Block',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='orderedproductlist',
name='tabbed_block',
field=models.ForeignKey(verbose_name='Tabbed Block', related_name='tabs', to='promotions.TabbedBlock', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='orderedproduct',
name='list',
field=models.ForeignKey(verbose_name='List', to='promotions.HandPickedProductList', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='orderedproduct',
name='product',
field=models.ForeignKey(verbose_name='Product', to='catalogue.Product', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='orderedproduct',
unique_together=set([('list', 'product')]),
),
migrations.AddField(
model_name='handpickedproductlist',
name='products',
field=models.ManyToManyField(through='promotions.OrderedProduct', blank=True, verbose_name='Products', to='catalogue.Product', null=True),
preserve_default=True,
),
]
|
vicky2135/lucious
|
src/oscar/apps/promotions/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 11,439
|
# Returns a list of file paths contained within given parent folder
|
anarkhede/Nark
|
File_org_preproc/filelist.py
|
Python
|
gpl-3.0
| 68
|
from __future__ import print_function
import warnings
from setuptools import setup, find_packages, Extension
from setuptools.command.install import install
import numpy
# Because many people neglected to run the pylearn2/utils/setup.py script
# separately, we compile the necessary Cython extensions here but because
# Cython is not a strict dependency, we issue a warning when it is not
# available.
try:
from Cython.Distutils import build_ext
cython_available = True
except ImportError:
warnings.warn("Cython was not found and hence pylearn2.utils._window_flip "
"and pylearn2.utils._video and classes that depend on them "
"(e.g. pylearn2.train_extensions.window_flip) will not be "
"available")
cython_available = False
if cython_available:
cmdclass = {'build_ext': build_ext}
ext_modules = [Extension("pylearn2.utils._window_flip",
["pylearn2/utils/_window_flip.pyx"],
include_dirs=[numpy.get_include()]),
Extension("pylearn2.utils._video",
["pylearn2/utils/_video.pyx"],
include_dirs=[numpy.get_include()])]
else:
cmdclass = {}
ext_modules = []
setup(
cmdclass=cmdclass,
ext_modules=ext_modules,
name='pylearn2',
version='0.1dev',
packages=find_packages(),
description='A machine learning library built on top of Theano.',
license='BSD 3-clause license',
long_description=open('README.rst', 'rb').read().decode('utf8'),
install_requires=[
'numpy',
'pyyaml',
'argparse',
'six',
"Theano"],
scripts=['bin/pylearn2-plot-monitor', 'bin/pylearn2-print-monitor',
'bin/pylearn2-show-examples', 'bin/pylearn2-show-weights',
'bin/pylearn2-train'],
package_data={
'': ['*.cu', '*.cuh', '*.h'],
},
)
|
Refefer/pylearn2
|
setup.py
|
Python
|
bsd-3-clause
| 1,946
|
'''
Load options from the command line
Sam Geen, July 2013
'''
import sys
def Arg1(default=None):
'''
Read the first argument
default: Default value to return if no argument is found
Return: First argument in sys.argv (minus program name) or default if none
'''
if len(sys.argv) < 2:
return default
else:
return sys.argv[1]
if __name__=="__main__":
print "Test Arg1():"
print Arg1()
print Arg1("Bumface")
|
samgeen/Hamu
|
Utils/CommandLine.py
|
Python
|
mit
| 464
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import commah
# Here's an example run script, modify as preferred
commah.run('WMAP5',
zi=0,
Mi=[1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14],
z=[0, 0.5, 1, 1.5, 2],
filename='WMAP5_Test.txt')
|
astroduff/commah
|
run.py
|
Python
|
bsd-3-clause
| 320
|
from progressivis import Scheduler, Every
from progressivis.vis import ScatterPlot
from progressivis.io import CSVLoader
from progressivis.datasets import get_dataset
def filter_(df):
l = df['pickup_longitude']
return df[(l < -70) & (l > -80) ]
def print_len(x):
if x is not None:
print(len(x))
#log_level()
try:
s = scheduler
except:
s = Scheduler()
csv = CSVLoader(get_dataset('bigfile'),header=None,index_col=False,force_valid_ids=True,scheduler=s)
pr = Every(scheduler=s)
pr.input.df = csv.output.table
scatterplot = ScatterPlot(x_column='_1', y_column='_2', scheduler=s)
scatterplot.create_dependent_modules(csv,'table')
if __name__=='__main__':
csv.start()
s.join()
print(len(csv.df()))
|
jdfekete/progressivis
|
examples/test_scatterplot.py
|
Python
|
bsd-2-clause
| 739
|
"""A generic client for creating and managing transformations.
See the information about transformation parameters below.
"""
import six
import json
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.Utilities.JEncode import encode
from DIRAC.Core.Utilities.PromptUser import promptUser
from DIRAC.Core.Base.API import API
from DIRAC.TransformationSystem.Client.BodyPlugin.BaseBody import BaseBody
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.RequestManagementSystem.Client.Operation import Operation
COMPONENT_NAME = "Transformation"
class Transformation(API):
#############################################################################
def __init__(self, transID=0, transClient=None):
"""c'tor"""
super(Transformation, self).__init__()
self.paramTypes = {
"TransformationID": six.integer_types,
"TransformationName": six.string_types,
"Status": six.string_types,
"Description": six.string_types,
"LongDescription": six.string_types,
"Type": six.string_types,
"Plugin": six.string_types,
"AgentType": six.string_types,
"FileMask": six.string_types,
"TransformationGroup": six.string_types,
"GroupSize": six.integer_types + (float,),
"InheritedFrom": six.integer_types,
"Body": six.string_types,
"MaxNumberOfTasks": six.integer_types,
"EventsPerTask": six.integer_types,
}
self.paramValues = {
"TransformationID": 0,
"TransformationName": "",
"Status": "New",
"Description": "",
"LongDescription": "",
"Type": "",
"Plugin": "Standard",
"AgentType": "Manual",
"FileMask": "",
"TransformationGroup": "General",
"GroupSize": 1,
"InheritedFrom": 0,
"Body": "",
"MaxNumberOfTasks": 0,
"EventsPerTask": 0,
}
# the metaquery parameters are neither part of the transformation parameters nor the additional parameters, so
# special treatment is necessary
self.inputMetaQuery = None
self.outputMetaQuery = None
self.ops = Operations()
self.supportedPlugins = self.ops.getValue(
"Transformations/AllowedPlugins", ["Broadcast", "Standard", "BySize", "ByShare"]
)
if not transClient:
self.transClient = TransformationClient()
else:
self.transClient = transClient
self.serverURL = self.transClient.getServer()
self.exists = False
if transID:
self.paramValues["TransformationID"] = transID
res = self.getTransformation()
if res["OK"]:
self.exists = True
elif res["Message"] == "Transformation does not exist":
raise AttributeError("TransformationID %d does not exist" % transID)
else:
self.paramValues["TransformationID"] = 0
gLogger.fatal(
"Failed to get transformation from database", "%s @ %s" % (transID, self.transClient.serverURL)
)
def getServer(self):
return self.serverURL
def reset(self, transID=0):
self.__init__(transID)
self.transClient.setServer(self.serverURL)
return S_OK()
def setTargetSE(self, seList):
return self.__setSE("TargetSE", seList)
def setSourceSE(self, seList):
return self.__setSE("SourceSE", seList)
def setBody(self, body):
"""check that the body is a string, or using the proper syntax for multiple operations,
or is a BodyPlugin object
:param body: transformation body, for example
.. code :: python
body = [ ( "ReplicateAndRegister", { "SourceSE":"FOO-SRM", "TargetSE":"BAR-SRM" }),
( "RemoveReplica", { "TargetSE":"FOO-SRM" } ),
]
:type body: string or list of tuples (or lists) of string and dictionaries or a Body plugin (:py:class:`DIRAC.TransformationSystem.Client.BodyPlugin.BaseBody.BaseBody`)
:raises TypeError: If the structure is not as expected
:raises ValueError: If unknown attribute for the :class:`~DIRAC.RequestManagementSystem.Client.Operation.Operation`
is used
:returns: S_OK, S_ERROR
"""
self.item_called = "Body"
# Simple single operation body case
if isinstance(body, six.string_types):
return self.__setParam(body)
# BodyPlugin case
elif isinstance(body, BaseBody):
return self.__setParam(encode(body))
if not isinstance(body, (list, tuple)):
raise TypeError("Expected list or string, but %r is %s" % (body, type(body)))
# MultiOperation body case
for tup in body:
if not isinstance(tup, (tuple, list)):
raise TypeError("Expected tuple or list, but %r is %s" % (tup, type(tup)))
if len(tup) != 2:
raise TypeError("Expected 2-tuple, but %r is length %d" % (tup, len(tup)))
if not isinstance(tup[0], six.string_types):
raise TypeError("Expected string, but first entry in tuple %r is %s" % (tup, type(tup[0])))
if not isinstance(tup[1], dict):
raise TypeError("Expected dictionary, but second entry in tuple %r is %s" % (tup, type(tup[0])))
for par, val in tup[1].items():
if not isinstance(par, six.string_types):
raise TypeError("Expected string, but key in dictionary %r is %s" % (par, type(par)))
if par not in Operation.ATTRIBUTE_NAMES:
raise ValueError("Unknown attribute for Operation: %s" % par)
if not isinstance(val, six.string_types + six.integer_types + (float, list, tuple, dict)):
raise TypeError("Cannot encode %r, in json" % (val))
return self.__setParam(json.dumps(body))
def setInputMetaQuery(self, query):
"""Set the input meta query.
:param dict query: dictionary to use for input meta query
"""
self.inputMetaQuery = query
return S_OK()
def setOutputMetaQuery(self, query):
"""Set the output meta query.
:param dict query: dictionary to use for output meta query
"""
self.outputMetaQuery = query
return S_OK()
def __setSE(self, seParam, seList):
if isinstance(seList, six.string_types):
try:
seList = eval(seList)
except Exception:
seList = seList.split(",")
elif isinstance(seList, (list, dict, tuple)):
seList = list(seList)
else:
return S_ERROR("Bad argument type")
res = self.__checkSEs(seList)
if not res["OK"]:
return res
self.item_called = seParam
return self.__setParam(seList)
def __getattr__(self, name):
if name.find("get") == 0:
item = name[3:]
self.item_called = item
return self.__getParam
if name.find("set") == 0:
item = name[3:]
self.item_called = item
return self.__setParam
raise AttributeError(name)
def __getParam(self):
if self.item_called == "Available":
return S_OK(list(self.paramTypes))
if self.item_called == "Parameters":
return S_OK(self.paramValues)
if self.item_called in self.paramValues:
return S_OK(self.paramValues[self.item_called])
raise AttributeError("Unknown parameter for transformation: %s" % self.item_called)
def __setParam(self, value):
change = False
if self.item_called in self.paramTypes:
if self.paramValues[self.item_called] != value:
if isinstance(value, self.paramTypes[self.item_called]):
change = True
else:
raise TypeError(
"%s %s %s expected one of %s"
% (self.item_called, value, type(value), self.paramTypes[self.item_called])
)
else:
if self.item_called not in self.paramValues:
change = True
else:
if self.paramValues[self.item_called] != value:
change = True
if not change:
gLogger.verbose("No change of parameter %s required" % self.item_called)
else:
gLogger.verbose("Parameter %s to be changed" % self.item_called)
transID = self.paramValues["TransformationID"]
if self.exists and transID:
res = self.transClient.setTransformationParameter(transID, self.item_called, value)
if not res["OK"]:
return res
self.paramValues[self.item_called] = value
return S_OK()
def getTransformation(self, printOutput=False):
transID = self.paramValues["TransformationID"]
if not transID:
gLogger.fatal("No TransformationID known")
return S_ERROR()
res = self.transClient.getTransformation(transID, extraParams=True)
if not res["OK"]:
if printOutput:
self._prettyPrint(res)
return res
transParams = res["Value"]
for paramName, paramValue in transParams.items():
setter = None
setterName = "set%s" % paramName
if hasattr(self, setterName) and callable(getattr(self, setterName)):
setter = getattr(self, setterName)
if not setterName:
gLogger.error("Unable to invoke setter %s, it isn't a member function" % setterName)
continue
setter(paramValue)
if printOutput:
gLogger.info("No printing available yet")
return S_OK(transParams)
def getTransformationLogging(self, printOutput=False):
transID = self.paramValues["TransformationID"]
if not transID:
gLogger.fatal("No TransformationID known")
return S_ERROR()
res = self.transClient.getTransformationLogging(transID)
if not res["OK"]:
if printOutput:
self._prettyPrint(res)
return res
loggingList = res["Value"]
if printOutput:
self._printFormattedDictList(
loggingList, ["Message", "MessageDate", "AuthorDN"], "MessageDate", "MessageDate"
)
return S_OK(loggingList)
def extendTransformation(self, nTasks, printOutput=False):
return self.__executeOperation("extendTransformation", nTasks, printOutput=printOutput)
def cleanTransformation(self, printOutput=False):
res = self.__executeOperation("cleanTransformation", printOutput=printOutput)
if res["OK"]:
self.paramValues["Status"] = "Cleaned"
return res
def deleteTransformation(self, printOutput=False):
res = self.__executeOperation("deleteTransformation", printOutput=printOutput)
if res["OK"]:
self.reset()
return res
def addFilesToTransformation(self, lfns, printOutput=False):
return self.__executeOperation("addFilesToTransformation", lfns, printOutput=printOutput)
def setFileStatusForTransformation(self, status, lfns, printOutput=False):
return self.__executeOperation("setFileStatusForTransformation", status, lfns, printOutput=printOutput)
def getTransformationTaskStats(self, printOutput=False):
return self.__executeOperation("getTransformationTaskStats", printOutput=printOutput)
def getTransformationStats(self, printOutput=False):
return self.__executeOperation("getTransformationStats", printOutput=printOutput)
def deleteTasks(self, taskMin, taskMax, printOutput=False):
return self.__executeOperation("deleteTasks", taskMin, taskMax, printOutput=printOutput)
def addTaskForTransformation(self, lfns=[], se="Unknown", printOutput=False):
return self.__executeOperation("addTaskForTransformation", lfns, se, printOutput=printOutput)
def setTaskStatus(self, taskID, status, printOutput=False):
return self.__executeOperation("setTaskStatus", taskID, status, printOutput=printOutput)
def __executeOperation(self, operation, *parms, **kwds):
transID = self.paramValues["TransformationID"]
if not transID:
gLogger.fatal("No TransformationID known")
return S_ERROR()
printOutput = kwds.pop("printOutput")
fcn = None
if hasattr(self.transClient, operation) and callable(getattr(self.transClient, operation)):
fcn = getattr(self.transClient, operation)
if not fcn:
return S_ERROR("Unable to invoke %s, it isn't a member funtion of TransformationClient")
res = fcn(transID, *parms, **kwds)
if printOutput:
self._prettyPrint(res)
return res
def getTransformationFiles(
self,
fileStatus=[],
lfns=[],
outputFields=[
"FileID",
"LFN",
"Status",
"TaskID",
"TargetSE",
"UsedSE",
"ErrorCount",
"InsertedTime",
"LastUpdate",
],
orderBy="FileID",
printOutput=False,
):
condDict = {"TransformationID": self.paramValues["TransformationID"]}
if fileStatus:
condDict["Status"] = fileStatus
if lfns:
condDict["LFN"] = lfns
res = self.transClient.getTransformationFiles(condDict=condDict)
if not res["OK"]:
if printOutput:
self._prettyPrint(res)
return res
if printOutput:
if not outputFields:
gLogger.info("Available fields are: %s" % res["ParameterNames"].join(" "))
elif not res["Value"]:
gLogger.info("No tasks found for selection")
else:
self._printFormattedDictList(res["Value"], outputFields, "FileID", orderBy)
return res
def getTransformationTasks(
self,
taskStatus=[],
taskIDs=[],
outputFields=[
"TransformationID",
"TaskID",
"ExternalStatus",
"ExternalID",
"TargetSE",
"CreationTime",
"LastUpdateTime",
],
orderBy="TaskID",
printOutput=False,
):
condDict = {"TransformationID": self.paramValues["TransformationID"]}
if taskStatus:
condDict["ExternalStatus"] = taskStatus
if taskIDs:
condDict["TaskID"] = taskIDs
res = self.transClient.getTransformationTasks(condDict=condDict)
if not res["OK"]:
if printOutput:
self._prettyPrint(res)
return res
if printOutput:
if not outputFields:
gLogger.info("Available fields are: %s" % res["ParameterNames"].join(" "))
elif not res["Value"]:
gLogger.info("No tasks found for selection")
else:
self._printFormattedDictList(res["Value"], outputFields, "TaskID", orderBy)
return res
#############################################################################
def getTransformations(
self,
transID=[],
transStatus=[],
outputFields=["TransformationID", "Status", "AgentType", "TransformationName", "CreationDate"],
orderBy="TransformationID",
printOutput=False,
):
condDict = {}
if transID:
condDict["TransformationID"] = transID
if transStatus:
condDict["Status"] = transStatus
res = self.transClient.getTransformations(condDict=condDict)
if not res["OK"]:
if printOutput:
self._prettyPrint(res)
return res
if printOutput:
if not outputFields:
gLogger.info("Available fields are: %s" % res["ParameterNames"].join(" "))
elif not res["Value"]:
gLogger.info("No tasks found for selection")
else:
self._printFormattedDictList(res["Value"], outputFields, "TransformationID", orderBy)
return res
#############################################################################
def getAuthorDNfromProxy(self):
"""gets the AuthorDN and username of the transformation from the uploaded proxy"""
username = ""
author = ""
res = getProxyInfo()
if res["OK"]:
author = res["Value"]["identity"]
username = res["Value"]["username"]
else:
gLogger.error("Unable to get uploaded proxy Info %s " % res["Message"])
return S_ERROR(res["Message"])
res = {"username": username, "authorDN": author}
return S_OK(res)
#############################################################################
def getTransformationsByUser(
self,
authorDN="",
userName="",
transID=[],
transStatus=[],
outputFields=["TransformationID", "Status", "AgentType", "TransformationName", "CreationDate", "AuthorDN"],
orderBy="TransformationID",
printOutput=False,
):
condDict = {}
if authorDN == "":
res = self.getAuthorDNfromProxy()
if not res["OK"]:
gLogger.error(res["Message"])
return S_ERROR(res["Message"])
else:
foundUserName = res["Value"]["username"]
foundAuthor = res["Value"]["authorDN"]
# If the username whom created the uploaded proxy is different than the provided username report error and exit
if not (userName == "" or userName == foundUserName):
gLogger.error(
"Couldn't resolve the authorDN for user '%s' from the uploaded proxy (proxy created by '%s')"
% (userName, foundUserName)
)
return S_ERROR(
"Couldn't resolve the authorDN for user '%s' from the uploaded proxy (proxy created by '%s')"
% (userName, foundUserName)
)
userName = foundUserName
authorDN = foundAuthor
gLogger.info(
"Will list transformations created by user '%s' with status '%s'"
% (userName, ", ".join(transStatus))
)
else:
gLogger.info(
"Will list transformations created by '%s' with status '%s'" % (authorDN, ", ".join(transStatus))
)
condDict["AuthorDN"] = authorDN
if transID:
condDict["TransformationID"] = transID
if transStatus:
condDict["Status"] = transStatus
res = self.transClient.getTransformations(condDict=condDict)
if not res["OK"]:
if printOutput:
self._prettyPrint(res)
return res
if printOutput:
if not outputFields:
gLogger.info("Available fields are: %s" % res["ParameterNames"].join(" "))
elif not res["Value"]:
gLogger.info("No tasks found for selection")
else:
self._printFormattedDictList(res["Value"], outputFields, "TransformationID", orderBy)
return res
#############################################################################
def getSummaryTransformations(self, transID=[]):
"""Show the summary for a list of Transformations
Fields starting with 'F' ('J') refers to files (jobs).
Proc. stand for processed.
"""
condDict = {"TransformationID": transID}
orderby = []
start = 0
maxitems = len(transID)
paramShowNames = [
"TransformationID",
"Type",
"Status",
"Files_Total",
"Files_PercentProcessed",
"Files_Processed",
"Files_Unused",
"Jobs_TotalCreated",
"Jobs_Waiting",
"Jobs_Running",
"Jobs_Done",
"Jobs_Failed",
"Jobs_Stalled",
]
# Below, the header used for each field in the printing: short to fit in one line
paramShowNamesShort = [
"TransID",
"Type",
"Status",
"F_Total",
"F_Proc.(%)",
"F_Proc.",
"F_Unused",
"J_Created",
"J_Wait",
"J_Run",
"J_Done",
"J_Fail",
"J_Stalled",
]
dictList = []
result = self.transClient.getTransformationSummaryWeb(condDict, orderby, start, maxitems)
if not result["OK"]:
self._prettyPrint(result)
return result
if result["Value"]["TotalRecords"] > 0:
try:
paramNames = result["Value"]["ParameterNames"]
for paramValues in result["Value"]["Records"]:
paramShowValues = map(lambda pname: paramValues[paramNames.index(pname)], paramShowNames)
showDict = dict(zip(paramShowNamesShort, paramShowValues))
dictList.append(showDict)
except Exception as x:
print("Exception %s " % str(x))
if not len(dictList) > 0:
gLogger.error("No found transformations satisfying input condition")
return S_ERROR("No found transformations satisfying input condition")
else:
print(
self._printFormattedDictList(
dictList, paramShowNamesShort, paramShowNamesShort[0], paramShowNamesShort[0]
)
)
return S_OK(dictList)
#############################################################################
def addTransformation(self, addFiles=True, printOutput=False):
"""Add transformation to the transformation system.
Sets all parameters currently assigned to the transformation.
:param bool addFiles: if True, immediately perform input data query
:param bool printOutput: if True, print information about transformation
"""
res = self._checkCreation()
if not res["OK"]:
return self._errorReport(res, "Failed transformation sanity check")
if printOutput:
gLogger.info("Will attempt to create transformation with the following parameters")
self._prettyPrint(self.paramValues)
res = self.transClient.addTransformation(
self.paramValues["TransformationName"],
self.paramValues["Description"],
self.paramValues["LongDescription"],
self.paramValues["Type"],
self.paramValues["Plugin"],
self.paramValues["AgentType"],
self.paramValues["FileMask"],
transformationGroup=self.paramValues["TransformationGroup"],
groupSize=self.paramValues["GroupSize"],
inheritedFrom=self.paramValues["InheritedFrom"],
body=self.paramValues["Body"],
maxTasks=self.paramValues["MaxNumberOfTasks"],
eventsPerTask=self.paramValues["EventsPerTask"],
addFiles=addFiles,
inputMetaQuery=self.inputMetaQuery,
outputMetaQuery=self.outputMetaQuery,
)
if not res["OK"]:
if printOutput:
self._prettyPrint(res)
return res
transID = res["Value"]
self.exists = True
self.setTransformationID(transID)
gLogger.notice("Created transformation %d" % transID)
for paramName, paramValue in self.paramValues.items():
if paramName not in self.paramTypes:
res = self.transClient.setTransformationParameter(transID, paramName, paramValue)
if not res["OK"]:
gLogger.error("Failed to add parameter", "%s %s" % (paramName, res["Message"]))
gLogger.notice("To add this parameter later please execute the following.")
gLogger.notice("oTransformation = Transformation(%d)" % transID)
gLogger.notice("oTransformation.set%s(...)" % paramName)
return S_OK(transID)
def _checkCreation(self):
"""Few checks"""
if self.paramValues["TransformationID"]:
gLogger.info("You are currently working with an active transformation definition.")
gLogger.info("If you wish to create a new transformation reset the TransformationID.")
gLogger.info("oTransformation.reset()")
return S_ERROR()
requiredParameters = ["TransformationName", "Description", "LongDescription", "Type"]
for parameter in requiredParameters:
if not self.paramValues[parameter]:
gLogger.info("%s is not defined for this transformation. This is required..." % parameter)
self.paramValues[parameter] = input("Please enter the value of " + parameter + " ")
plugin = self.paramValues["Plugin"]
if plugin:
if plugin not in self.supportedPlugins:
gLogger.info("The selected Plugin (%s) is not known to the transformation agent." % plugin)
res = self.__promptForParameter("Plugin", choices=self.supportedPlugins, default="Standard")
if not res["OK"]:
return res
self.paramValues["Plugin"] = res["Value"]
plugin = self.paramValues["Plugin"]
return S_OK()
def _checkBySizePlugin(self):
return self._checkStandardPlugin()
def _checkBySharePlugin(self):
return self._checkStandardPlugin()
def _checkStandardPlugin(self):
groupSize = self.paramValues["GroupSize"]
if groupSize <= 0:
gLogger.info("The GroupSize was found to be less than zero. It has been set to 1.")
res = self.setGroupSize(1)
if not res["OK"]:
return res
return S_OK()
def _checkBroadcastPlugin(self):
gLogger.info(
"The Broadcast plugin requires the following parameters be set: %s" % (", ".join(["SourceSE", "TargetSE"]))
)
requiredParams = ["SourceSE", "TargetSE"]
for requiredParam in requiredParams:
if not self.paramValues.get(requiredParam):
paramValue = input("Please enter " + requiredParam + " ")
setter = None
setterName = "set%s" % requiredParam
if hasattr(self, setterName) and callable(getattr(self, setterName)):
setter = getattr(self, setterName)
if not setter:
return S_ERROR("Unable to invoke %s, this function hasn't been implemented." % setterName)
ses = paramValue.replace(",", " ").split()
res = setter(ses)
if not res["OK"]:
return res
return S_OK()
def __checkSEs(self, seList):
res = gConfig.getSections("/Resources/StorageElements")
if not res["OK"]:
return self._errorReport(res, "Failed to get possible StorageElements")
missing = set(seList) - set(res["Value"])
if missing:
for se in missing:
gLogger.error("StorageElement %s is not known" % se)
return S_ERROR("%d StorageElements not known" % len(missing))
return S_OK()
def __promptForParameter(self, parameter, choices=[], default="", insert=True):
res = promptUser("Please enter %s" % parameter, choices=choices, default=default)
if not res["OK"]:
return self._errorReport(res)
gLogger.notice("%s will be set to '%s'" % (parameter, res["Value"]))
paramValue = res["Value"]
if insert:
setter = None
setterName = "set%s" % parameter
if hasattr(self, setterName) and callable(getattr(self, setterName)):
setter = getattr(self, setterName)
if not setter:
return S_ERROR("Unable to invoke %s, it isn't a member function of Transformation!")
res = setter(paramValue)
if not res["OK"]:
return res
return S_OK(paramValue)
|
DIRACGrid/DIRAC
|
src/DIRAC/TransformationSystem/Client/Transformation.py
|
Python
|
gpl-3.0
| 28,989
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.