repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
sencha/chromium-spacewalk | build/android/install_emulator_deps.py | 43 | 10192 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Installs deps for using SDK emulator for testing.
The script will download the SDK and system images, if they are not present, and
install and enable KVM, if virtualization has been enabled in the BIOS.
"""
import logging
import optparse
import os
import re
import shutil
import sys
from pylib import cmd_helper
from pylib import constants
from pylib import pexpect
from pylib.utils import run_tests_helper
# Android API level
DEFAULT_ANDROID_API_LEVEL = constants.ANDROID_SDK_VERSION
# From the Android Developer's website.
# Keep this up to date; the user can install older API levels as necessary.
SDK_BASE_URL = 'http://dl.google.com/android/adt'
SDK_ZIP = 'adt-bundle-linux-x86_64-20131030.zip'
# pylint: disable=C0301
# Android x86 system image from the Intel website:
# http://software.intel.com/en-us/articles/intel-eula-x86-android-4-2-jelly-bean-bin
# These don't exist prior to Android-15.
# As of 08 Nov 2013, Android-19 is not yet available either.
X86_IMG_URLS = {
15: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-15_r01.zip',
16: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-16_r01.zip',
17: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-17_r01.zip',
18: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-18_r01.zip',
19: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-19_r01.zip'}
#pylint: enable=C0301
def CheckSDK():
"""Check if SDK is already installed.
Returns:
True if the emulator SDK directory (src/android_emulator_sdk/) exists.
"""
return os.path.exists(constants.EMULATOR_SDK_ROOT)
def CheckSDKPlatform(api_level=DEFAULT_ANDROID_API_LEVEL):
"""Check if the "SDK Platform" for the specified API level is installed.
This is necessary in order for the emulator to run when the target
is specified.
Args:
api_level: the Android API level to check; defaults to the latest API.
Returns:
True if the platform is already installed.
"""
android_binary = os.path.join(constants.EMULATOR_SDK_ROOT,
'sdk', 'tools', 'android')
pattern = re.compile('id: [0-9]+ or "android-%d"' % api_level)
try:
exit_code, stdout = cmd_helper.GetCmdStatusAndOutput(
[android_binary, 'list'])
if exit_code != 0:
raise Exception('\'android list\' command failed')
for line in stdout.split('\n'):
if pattern.match(line):
return True
return False
except OSError:
logging.exception('Unable to execute \'android list\'')
return False
def CheckX86Image(api_level=DEFAULT_ANDROID_API_LEVEL):
"""Check if Android system images have been installed.
Args:
api_level: the Android API level to check for; defaults to the latest API.
Returns:
True if sdk/system-images/android-<api_level>/x86 exists inside
EMULATOR_SDK_ROOT.
"""
api_target = 'android-%d' % api_level
return os.path.exists(os.path.join(constants.EMULATOR_SDK_ROOT,
'sdk', 'system-images',
api_target, 'x86'))
def CheckKVM():
"""Quickly check whether KVM is enabled.
Returns:
True iff /dev/kvm exists (Linux only).
"""
return os.path.exists('/dev/kvm')
def RunKvmOk():
"""Run kvm-ok as root to check that KVM is properly enabled after installation
of the required packages.
Returns:
True iff KVM is enabled (/dev/kvm exists). On failure, returns False
but also print detailed information explaining why KVM isn't enabled
(e.g. CPU doesn't support it, or BIOS disabled it).
"""
try:
# Note: kvm-ok is in /usr/sbin, so always use 'sudo' to run it.
return not cmd_helper.RunCmd(['sudo', 'kvm-ok'])
except OSError:
logging.info('kvm-ok not installed')
return False
def GetSDK():
"""Download the SDK and unzip it into EMULATOR_SDK_ROOT."""
logging.info('Download Android SDK.')
sdk_url = '%s/%s' % (SDK_BASE_URL, SDK_ZIP)
try:
cmd_helper.RunCmd(['curl', '-o', '/tmp/sdk.zip', sdk_url])
print 'curled unzipping...'
rc = cmd_helper.RunCmd(['unzip', '-o', '/tmp/sdk.zip', '-d', '/tmp/'])
if rc:
raise Exception('ERROR: could not download/unzip Android SDK.')
# Get the name of the sub-directory that everything will be extracted to.
dirname, _ = os.path.splitext(SDK_ZIP)
zip_dir = '/tmp/%s' % dirname
# Move the extracted directory to EMULATOR_SDK_ROOT
shutil.move(zip_dir, constants.EMULATOR_SDK_ROOT)
finally:
os.unlink('/tmp/sdk.zip')
def InstallKVM():
"""Installs KVM packages."""
rc = cmd_helper.RunCmd(['sudo', 'apt-get', 'install', 'kvm'])
if rc:
logging.critical('ERROR: Did not install KVM. Make sure hardware '
'virtualization is enabled in BIOS (i.e. Intel VT-x or '
'AMD SVM).')
# TODO(navabi): Use modprobe kvm-amd on AMD processors.
rc = cmd_helper.RunCmd(['sudo', 'modprobe', 'kvm-intel'])
if rc:
logging.critical('ERROR: Did not add KVM module to Linux Kernel. Make sure '
'hardware virtualization is enabled in BIOS.')
# Now check to ensure KVM acceleration can be used.
if not RunKvmOk():
logging.critical('ERROR: Can not use KVM acceleration. Make sure hardware '
'virtualization is enabled in BIOS (i.e. Intel VT-x or '
'AMD SVM).')
def GetX86Image(api_level=DEFAULT_ANDROID_API_LEVEL):
"""Download x86 system image from Intel's website.
Args:
api_level: the Android API level to download for.
"""
logging.info('Download x86 system image directory into sdk directory.')
# TODO(andrewhayden): Use python tempfile lib instead
temp_file = '/tmp/x86_img_android-%d.zip' % api_level
if api_level not in X86_IMG_URLS:
raise Exception('ERROR: no URL known for x86 image for android-%s' %
api_level)
try:
cmd_helper.RunCmd(['curl', '-o', temp_file, X86_IMG_URLS[api_level]])
rc = cmd_helper.RunCmd(['unzip', '-o', temp_file, '-d', '/tmp/'])
if rc:
raise Exception('ERROR: Could not download/unzip image zip.')
api_target = 'android-%d' % api_level
sys_imgs = os.path.join(constants.EMULATOR_SDK_ROOT, 'sdk',
'system-images', api_target, 'x86')
logging.info('Deploying system image to %s' % sys_imgs)
shutil.move('/tmp/x86', sys_imgs)
finally:
os.unlink(temp_file)
def GetSDKPlatform(api_level=DEFAULT_ANDROID_API_LEVEL):
"""Update the SDK to include the platform specified.
Args:
api_level: the Android API level to download
"""
android_binary = os.path.join(constants.EMULATOR_SDK_ROOT,
'sdk', 'tools', 'android')
pattern = re.compile('\s*([0-9]+)- SDK Platform Android [\.,0-9]+, API %d.*' %
api_level)
# Example:
# 2- SDK Platform Android 4.3, API 18, revision 2
exit_code, stdout = cmd_helper.GetCmdStatusAndOutput(
[android_binary, 'list', 'sdk'])
if exit_code != 0:
raise Exception('\'android list sdk\' command return %d' % exit_code)
for line in stdout.split('\n'):
match = pattern.match(line)
if match:
index = match.group(1)
print('package %s corresponds to platform level %d' % (index, api_level))
# update sdk --no-ui --filter $INDEX
update_command = [android_binary,
'update', 'sdk', '--no-ui', '--filter', index]
update_command_str = ' '.join(update_command)
logging.info('running update command: %s' % update_command_str)
update_process = pexpect.spawn(update_command_str)
# TODO(andrewhayden): Do we need to bug the user about this?
if update_process.expect('Do you accept the license') != 0:
raise Exception('License agreement check failed')
update_process.sendline('y')
if update_process.expect('Done. 1 package installed.') == 0:
print('Successfully installed platform for API level %d' % api_level)
return
else:
raise Exception('Failed to install platform update')
raise Exception('Could not find android-%d update for the SDK!' % api_level)
def main(argv):
opt_parser = optparse.OptionParser(
description='Install dependencies for running the Android emulator')
opt_parser.add_option('--api-level', dest='api_level',
help='The API level (e.g., 19 for Android 4.4) to ensure is available',
type='int', default=DEFAULT_ANDROID_API_LEVEL)
opt_parser.add_option('-v', dest='verbose', action='store_true',
help='enable verbose logging')
options, _ = opt_parser.parse_args(argv[1:])
# run_tests_helper will set logging to INFO or DEBUG
# We achieve verbose output by configuring it with 2 (==DEBUG)
verbosity = 1
if (options.verbose):
verbosity = 2
logging.basicConfig(level=logging.INFO,
format='# %(asctime)-15s: %(message)s')
run_tests_helper.SetLogLevel(verbose_count=verbosity)
# Calls below will download emulator SDK and/or system images only if needed.
if CheckSDK():
logging.info('android_emulator_sdk/ already exists, skipping download.')
else:
GetSDK()
# Check target. The target has to be installed in order to run the emulator.
if CheckSDKPlatform(options.api_level):
logging.info('SDK platform android-%d already present, skipping.' %
options.api_level)
else:
logging.info('SDK platform android-%d not present, installing.' %
options.api_level)
GetSDKPlatform(options.api_level)
# Download the x86 system image only if needed.
if CheckX86Image(options.api_level):
logging.info('x86 image for android-%d already present, skipping.' %
options.api_level)
else:
GetX86Image(options.api_level)
# Make sure KVM packages are installed and enabled.
if CheckKVM():
logging.info('KVM already installed and enabled.')
else:
InstallKVM()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
Habasari/sms-tools | lectures/03-Fourier-properties/plots-code/convolution-2.py | 24 | 1259 | import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft, fftshift
plt.figure(1, figsize=(9.5, 7))
M = 64
N = 64
x1 = np.hanning(M)
x2 = np.cos(2*np.pi*2/M*np.arange(M))
y1 = x1*x2
mY1 = 20 * np.log10(np.abs(fftshift(fft(y1, N))))
plt.subplot(3,2,1)
plt.title('x1 (hanning)')
plt.plot(np.arange(-M/2, M/2), x1, 'b', lw=1.5)
plt.axis([-M/2,M/2,0,1])
plt.subplot(3,2,2)
plt.title('x2 (cosine)')
plt.plot(np.arange(-M/2, M/2),x2, 'b', lw=1.5)
plt.axis([-M/2,M/2,-1,1])
mX1 = 20 * np.log10(np.abs(fftshift(fft(x1, M)))/M)
plt.subplot(3,2,3)
plt.title('X1')
plt.plot(np.arange(-N/2, N/2),mX1, 'r', lw=1.5)
plt.axis([-N/2,N/2,-80,max(mX1)])
mX2 = 20 * np.log10(np.abs(fftshift(fft(x2, M)))/M)
plt.subplot(3,2,4)
plt.title('X2')
plt.plot(np.arange(-N/2, N/2),mX2, 'r', lw=1.5)
plt.axis([-N/2,N/2,-80,max(mX2)])
plt.subplot(3,2,5)
plt.title('DFT(x1 x x2)')
plt.plot(np.arange(-N/2, N/2),mY1, 'r', lw=1.5)
plt.axis([-N/2,N/2,-80,max(mY1)])
Y2 = np.convolve(fftshift(fft(x1, M)), fftshift(fft(x2, M)))
mY2 = 20 * np.log10(np.abs(Y2)) - 40
plt.subplot(3,2,6)
plt.title('X1 * X2')
plt.plot(np.arange(-N/2, N/2),mY2[M/2:M+M/2], 'r', lw=1.5)
plt.axis([-N/2,N/2,-80,max(mY2)])
plt.tight_layout()
plt.savefig('convolution-2.png')
plt.show()
| agpl-3.0 |
carnell69/kuma | vendor/packages/translate/convert/accesskey.py | 25 | 8025 | # -*- coding: utf-8 -*-
#
# Copyright 2002-2009,2011 Zuza Software Foundation
#
# This file is part of The Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""functions used to manipulate access keys in strings"""
from translate.storage.placeables.general import XMLEntityPlaceable
DEFAULT_ACCESSKEY_MARKER = u"&"
class UnitMixer(object):
"""Helper to mix separately defined labels and accesskeys into one unit."""
def __init__(self, labelsuffixes, accesskeysuffixes):
self.labelsuffixes = labelsuffixes
self.accesskeysuffixes = accesskeysuffixes
def match_entities(self, index):
"""Populates mixedentities from the index."""
#: Entities which have a .label/.title and .accesskey combined
mixedentities = {}
for entity in index:
for labelsuffix in self.labelsuffixes:
if entity.endswith(labelsuffix):
entitybase = entity[:entity.rfind(labelsuffix)]
# see if there is a matching accesskey in this line,
# making this a mixed entity
for akeytype in self.accesskeysuffixes:
if (entitybase + akeytype) in index:
# add both versions to the list of mixed entities
mixedentities[entity] = {}
mixedentities[entitybase+akeytype] = {}
# check if this could be a mixed entity (labelsuffix and
# ".accesskey")
return mixedentities
def mix_units(self, label_unit, accesskey_unit, target_unit):
"""Mix the given units into the given target_unit if possible.
Might return None if no match is possible.
"""
target_unit.addlocations(label_unit.getlocations())
target_unit.addlocations(accesskey_unit.getlocations())
target_unit.msgidcomment = target_unit._extract_msgidcomments() + \
label_unit._extract_msgidcomments()
target_unit.msgidcomment = target_unit._extract_msgidcomments() + \
accesskey_unit._extract_msgidcomments()
target_unit.addnote(label_unit.getnotes("developer"), "developer")
target_unit.addnote(accesskey_unit.getnotes("developer"), "developer")
target_unit.addnote(label_unit.getnotes("translator"), "translator")
target_unit.addnote(accesskey_unit.getnotes("translator"), "translator")
label = label_unit.source
accesskey = accesskey_unit.source
label = combine(label, accesskey)
if label is None:
return None
target_unit.source = label
target_unit.target = ""
return target_unit
def find_mixed_pair(self, mixedentities, store, unit):
entity = unit.getid()
if entity not in mixedentities:
return None, None
# depending on what we come across first, work out the label
# and the accesskey
labelentity, accesskeyentity = None, None
for labelsuffix in self.labelsuffixes:
if entity.endswith(labelsuffix):
entitybase = entity[:entity.rfind(labelsuffix)]
for akeytype in self.accesskeysuffixes:
if (entitybase + akeytype) in store.id_index:
labelentity = entity
accesskeyentity = labelentity[:labelentity.rfind(labelsuffix)] + akeytype
break
else:
for akeytype in self.accesskeysuffixes:
if entity.endswith(akeytype):
accesskeyentity = entity
for labelsuffix in self.labelsuffixes:
labelentity = accesskeyentity[:accesskeyentity.rfind(akeytype)] + labelsuffix
if labelentity in store.id_index:
break
else:
labelentity = None
accesskeyentity = None
return (labelentity, accesskeyentity)
def extract(string, accesskey_marker=DEFAULT_ACCESSKEY_MARKER):
"""Extract the label and accesskey from a label+accesskey string
The function will also try to ignore &entities; which would obviously not
contain accesskeys.
:type string: Unicode
:param string: A string that might contain a label with accesskey marker
:type accesskey_marker: Char
:param accesskey_marker: The character that is used to prefix an access key
"""
assert isinstance(string, unicode)
assert isinstance(accesskey_marker, unicode)
assert len(accesskey_marker) == 1
if string == u"":
return u"", u""
accesskey = u""
label = string
marker_pos = 0
while marker_pos >= 0:
marker_pos = string.find(accesskey_marker, marker_pos)
if marker_pos != -1:
marker_pos += 1
if marker_pos == len(string):
break
if (accesskey_marker == '&' and
XMLEntityPlaceable.regex.match(string[marker_pos-1:])):
continue
label = string[:marker_pos-1] + string[marker_pos:]
if string[marker_pos] != " ": # FIXME weak filtering
accesskey = string[marker_pos]
return label, accesskey
def combine(label, accesskey,
accesskey_marker=DEFAULT_ACCESSKEY_MARKER):
"""Combine a label and and accesskey to form a label+accesskey string
We place an accesskey marker before the accesskey in the label and this
creates a string with the two combined e.g. "File" + "F" = "&File"
The case of the accesskey is preferred unless no match is found, in which
case the alternate case is used.
:type label: unicode
:param label: a label
:type accesskey: unicode char
:param accesskey: The accesskey
:rtype: unicode or None
:return: label+accesskey string or None if uncombineable
"""
assert isinstance(label, unicode)
assert isinstance(accesskey, unicode)
if len(accesskey) == 0:
return None
searchpos = 0
accesskeypos = -1
in_entity = False
accesskeyaltcasepos = -1
if accesskey.isupper():
accesskey_alt_case = accesskey.lower()
else:
accesskey_alt_case = accesskey.upper()
while (accesskeypos < 0) and searchpos < len(label):
searchchar = label[searchpos]
if searchchar == '&':
in_entity = True
elif searchchar == ';' or searchchar == " ":
in_entity = False
if not in_entity:
if searchchar == accesskey: # Prefer supplied case
accesskeypos = searchpos
elif searchchar == accesskey_alt_case: # Other case otherwise
if accesskeyaltcasepos == -1:
# only want to remember first altcasepos
accesskeyaltcasepos = searchpos
# note: we keep on looping through in hope
# of exact match
searchpos += 1
# if we didn't find an exact case match, use an alternate one if available
if accesskeypos == -1:
accesskeypos = accesskeyaltcasepos
# now we want to handle whatever we found...
if accesskeypos >= 0:
return label[:accesskeypos] + accesskey_marker + label[accesskeypos:]
# can't currently mix accesskey if it's not in label
return None
| mpl-2.0 |
alfredoavanzosc/odoomrp-wip-1 | product_variant_cost/__openerp__.py | 16 | 1350 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "Product Variant Cost",
"version": "1.0",
"depends": [
"product",
"stock_account"
],
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"category": "Product",
"website": "http://www.odoomrp.com",
"summary": "",
"data": [
"views/product_view.xml",
"views/stock_quant_view.xml"
],
"installable": True,
"post_init_hook": "load_cost_price_on_variant",
}
| agpl-3.0 |
jbbskinny/sympy | sympy/assumptions/handlers/ntheory.py | 52 | 7347 | """
Handlers for keys related to number theory: prime, even, odd, etc.
"""
from __future__ import print_function, division
from sympy.assumptions import Q, ask
from sympy.assumptions.handlers import CommonHandler
from sympy.ntheory import isprime
from sympy.core import S, Float
class AskPrimeHandler(CommonHandler):
"""
Handler for key 'prime'
Test that an expression represents a prime number. When the
expression is a number the result, when True, is subject to
the limitations of isprime() which is used to return the result.
"""
@staticmethod
def Expr(expr, assumptions):
return expr.is_prime
@staticmethod
def _number(expr, assumptions):
# helper method
try:
i = int(expr.round())
if not (expr - i).equals(0):
raise TypeError
except TypeError:
return False
return isprime(expr)
@staticmethod
def Basic(expr, assumptions):
# Just use int(expr) once
# https://github.com/sympy/sympy/issues/4561
# is solved
if expr.is_number:
return AskPrimeHandler._number(expr, assumptions)
@staticmethod
def Mul(expr, assumptions):
if expr.is_number:
return AskPrimeHandler._number(expr, assumptions)
for arg in expr.args:
if ask(Q.integer(arg), assumptions):
pass
else:
break
else:
# a product of integers can't be a prime
return False
@staticmethod
def Pow(expr, assumptions):
"""
Integer**Integer -> !Prime
"""
if expr.is_number:
return AskPrimeHandler._number(expr, assumptions)
if ask(Q.integer(expr.exp), assumptions) and \
ask(Q.integer(expr.base), assumptions):
return False
@staticmethod
def Integer(expr, assumptions):
return isprime(expr)
Rational, Infinity, NegativeInfinity, ImaginaryUnit = [staticmethod(CommonHandler.AlwaysFalse)]*4
@staticmethod
def Float(expr, assumptions):
return AskPrimeHandler._number(expr, assumptions)
@staticmethod
def NumberSymbol(expr, assumptions):
return AskPrimeHandler._number(expr, assumptions)
class AskCompositeHandler(CommonHandler):
@staticmethod
def Expr(expr, assumptions):
return expr.is_composite
@staticmethod
def Basic(expr, assumptions):
_positive = ask(Q.positive(expr), assumptions)
if _positive:
_integer = ask(Q.integer(expr), assumptions)
if _integer:
_prime = ask(Q.prime(expr), assumptions)
if _prime is None:
return
# Positive integer which is not prime is not
# necessarily composite
if expr.equals(1):
return False
return not _prime
else:
return _integer
else:
return _positive
class AskEvenHandler(CommonHandler):
@staticmethod
def Expr(expr, assumptions):
return expr.is_even
@staticmethod
def _number(expr, assumptions):
# helper method
try:
i = int(expr.round())
if not (expr - i).equals(0):
raise TypeError
except TypeError:
return False
if isinstance(expr, (float, Float)):
return False
return i % 2 == 0
@staticmethod
def Basic(expr, assumptions):
if expr.is_number:
return AskEvenHandler._number(expr, assumptions)
@staticmethod
def Mul(expr, assumptions):
"""
Even * Integer -> Even
Even * Odd -> Even
Integer * Odd -> ?
Odd * Odd -> Odd
Even * Even -> Even
Integer * Integer -> Even if Integer + Integer = Odd
-> ? otherwise
"""
if expr.is_number:
return AskEvenHandler._number(expr, assumptions)
even, odd, irrational, acc = False, 0, False, 1
for arg in expr.args:
# check for all integers and at least one even
if ask(Q.integer(arg), assumptions):
if ask(Q.even(arg), assumptions):
even = True
elif ask(Q.odd(arg), assumptions):
odd += 1
elif not even and acc != 1:
if ask(Q.odd(acc + arg), assumptions):
even = True
elif ask(Q.irrational(arg), assumptions):
# one irrational makes the result False
# two makes it undefined
if irrational:
break
irrational = True
else:
break
acc = arg
else:
if irrational:
return False
if even:
return True
if odd == len(expr.args):
return False
@staticmethod
def Add(expr, assumptions):
"""
Even + Odd -> Odd
Even + Even -> Even
Odd + Odd -> Even
"""
if expr.is_number:
return AskEvenHandler._number(expr, assumptions)
_result = True
for arg in expr.args:
if ask(Q.even(arg), assumptions):
pass
elif ask(Q.odd(arg), assumptions):
_result = not _result
else:
break
else:
return _result
@staticmethod
def Pow(expr, assumptions):
if expr.is_number:
return AskEvenHandler._number(expr, assumptions)
if ask(Q.integer(expr.exp), assumptions):
if ask(Q.positive(expr.exp), assumptions):
return ask(Q.even(expr.base), assumptions)
elif ask(~Q.negative(expr.exp) & Q.odd(expr.base), assumptions):
return False
elif expr.base is S.NegativeOne:
return False
@staticmethod
def Integer(expr, assumptions):
return not bool(expr.p & 1)
Rational, Infinity, NegativeInfinity, ImaginaryUnit = [staticmethod(CommonHandler.AlwaysFalse)]*4
@staticmethod
def NumberSymbol(expr, assumptions):
return AskEvenHandler._number(expr, assumptions)
@staticmethod
def Abs(expr, assumptions):
if ask(Q.real(expr.args[0]), assumptions):
return ask(Q.even(expr.args[0]), assumptions)
@staticmethod
def re(expr, assumptions):
if ask(Q.real(expr.args[0]), assumptions):
return ask(Q.even(expr.args[0]), assumptions)
@staticmethod
def im(expr, assumptions):
if ask(Q.real(expr.args[0]), assumptions):
return True
class AskOddHandler(CommonHandler):
"""
Handler for key 'odd'
Test that an expression represents an odd number
"""
@staticmethod
def Expr(expr, assumptions):
return expr.is_odd
@staticmethod
def Basic(expr, assumptions):
_integer = ask(Q.integer(expr), assumptions)
if _integer:
_even = ask(Q.even(expr), assumptions)
if _even is None:
return None
return not _even
return _integer
| bsd-3-clause |
rahul67/hue | desktop/core/ext-py/python-dateutil-2.4.2/dateutil/rrule.py | 103 | 52785 | # -*- coding: utf-8 -*-
"""
The rrule module offers a small, complete, and very fast, implementation of
the recurrence rules documented in the
`iCalendar RFC <http://www.ietf.org/rfc/rfc2445.txt>`_,
including support for caching of results.
"""
import itertools
import datetime
import calendar
import sys
from fractions import gcd
from six import advance_iterator, integer_types
from six.moves import _thread
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = list(range(7))
# Imported on demand.
easter = None
parser = None
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
if n == 0:
raise ValueError("Can't create weekday with n == 0")
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class rrulebase(object):
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = _thread.allocate_lock()
self._cache_gen = self._iter()
self._cache_complete = False
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(advance_iterator(gen))
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxsize,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = advance_iterator(gen)
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
""" Returns the number of recurrences in this set. It will have go
trough the whole recurrence, if this hasn't been done before. """
if self._len is None:
for x in self:
pass
return self._len
def before(self, dt, inc=False):
""" Returns the last recurrence before the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
""" Returns the first recurrence after the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def between(self, after, before, inc=False):
""" Returns all the occurrences of the rrule between after and before.
The inc keyword defines what happens if after and/or before are
themselves occurrences. With inc=True, they will be included in the
list, if they are found in the recurrence set. """
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
"""
That's the base of the rrule operation. It accepts all the keywords
defined in the RFC as its constructor parameters (except byday,
which was renamed to byweekday) and more. The constructor prototype is::
rrule(freq)
Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
or SECONDLY.
Additionally, it supports the following keyword arguments:
:param cache:
If given, it must be a boolean value specifying to enable or disable
caching of results. If you will use the same rrule instance multiple
times, enabling caching will improve the performance considerably.
:param dtstart:
The recurrence start. Besides being the base for the recurrence,
missing parameters in the final recurrence instances will also be
extracted from this date. If not given, datetime.now() will be used
instead.
:param interval:
The interval between each freq iteration. For example, when using
YEARLY, an interval of 2 means once every two years, but with HOURLY,
it means once every two hours. The default interval is 1.
:param wkst:
The week start day. Must be one of the MO, TU, WE constants, or an
integer, specifying the first day of the week. This will affect
recurrences based on weekly periods. The default week start is got
from calendar.firstweekday(), and may be modified by
calendar.setfirstweekday().
:param count:
How many occurrences will be generated.
:param until:
If given, this must be a datetime instance, that will specify the
limit of the recurrence. If a recurrence instance happens to be the
same as the datetime instance given in the until keyword, this will
be the last occurrence.
:param bysetpos:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each given integer will specify an occurrence
number, corresponding to the nth occurrence of the rule inside the
frequency period. For example, a bysetpos of -1 if combined with a
MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
result in the last work day of every month.
:param bymonth:
If given, it must be either an integer, or a sequence of integers,
meaning the months to apply the recurrence to.
:param bymonthday:
If given, it must be either an integer, or a sequence of integers,
meaning the month days to apply the recurrence to.
:param byyearday:
If given, it must be either an integer, or a sequence of integers,
meaning the year days to apply the recurrence to.
:param byweekno:
If given, it must be either an integer, or a sequence of integers,
meaning the week numbers to apply the recurrence to. Week numbers
have the meaning described in ISO8601, that is, the first week of
the year is that containing at least four days of the new year.
:param byweekday:
If given, it must be either an integer (0 == MO), a sequence of
integers, one of the weekday constants (MO, TU, etc), or a sequence
of these constants. When given, these variables will define the
weekdays where the recurrence will be applied. It's also possible to
use an argument n for the weekday instances, which will mean the nth
occurrence of this weekday in the period. For example, with MONTHLY,
or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
first friday of the month where the recurrence happens. Notice that in
the RFC documentation, this is specified as BYDAY, but was renamed to
avoid the ambiguity of that keyword.
:param byhour:
If given, it must be either an integer, or a sequence of integers,
meaning the hours to apply the recurrence to.
:param byminute:
If given, it must be either an integer, or a sequence of integers,
meaning the minutes to apply the recurrence to.
:param bysecond:
If given, it must be either an integer, or a sequence of integers,
meaning the seconds to apply the recurrence to.
:param byeaster:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each integer will define an offset from the
Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
Sunday itself. This is an extension to the RFC specification.
"""
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
super(rrule, self).__init__(cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if wkst is None:
self._wkst = calendar.firstweekday()
elif isinstance(wkst, integer_types):
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif isinstance(bysetpos, integer_types):
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if (byweekno is None and byyearday is None and bymonthday is None and
byweekday is None and byeaster is None):
if freq == YEARLY:
if bymonth is None:
bymonth = dtstart.month
bymonthday = dtstart.day
elif freq == MONTHLY:
bymonthday = dtstart.day
elif freq == WEEKLY:
byweekday = dtstart.weekday()
# bymonth
if bymonth is None:
self._bymonth = None
else:
if isinstance(bymonth, integer_types):
bymonth = (bymonth,)
self._bymonth = tuple(sorted(set(bymonth)))
# byyearday
if byyearday is None:
self._byyearday = None
else:
if isinstance(byyearday, integer_types):
byyearday = (byyearday,)
self._byyearday = tuple(sorted(set(byyearday)))
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if isinstance(byeaster, integer_types):
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(sorted(byeaster))
else:
self._byeaster = None
# bymonthay
if bymonthday is None:
self._bymonthday = ()
self._bynmonthday = ()
else:
if isinstance(bymonthday, integer_types):
bymonthday = (bymonthday,)
self._bymonthday = tuple(sorted(set([x for x in bymonthday if x > 0])))
self._bynmonthday = tuple(sorted(set([x for x in bymonthday if x < 0])))
# byweekno
if byweekno is None:
self._byweekno = None
else:
if isinstance(byweekno, integer_types):
byweekno = (byweekno,)
self._byweekno = tuple(sorted(set(byweekno)))
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
else:
# If it's one of the valid non-sequence types, convert to a
# single-element sequence before the iterator that builds the
# byweekday set.
if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
byweekday = (byweekday,)
self._byweekday = set()
self._bynweekday = set()
for wday in byweekday:
if isinstance(wday, integer_types):
self._byweekday.add(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.add(wday.weekday)
else:
self._bynweekday.add((wday.weekday, wday.n))
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
if self._byweekday is not None:
self._byweekday = tuple(sorted(self._byweekday))
if self._bynweekday is not None:
self._bynweekday = tuple(sorted(self._bynweekday))
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = set((dtstart.hour,))
else:
self._byhour = None
else:
if isinstance(byhour, integer_types):
byhour = (byhour,)
if freq == HOURLY:
self._byhour = self.__construct_byset(start=dtstart.hour,
byxxx=byhour,
base=24)
else:
self._byhour = set(byhour)
self._byhour = tuple(sorted(self._byhour))
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = set((dtstart.minute,))
else:
self._byminute = None
else:
if isinstance(byminute, integer_types):
byminute = (byminute,)
if freq == MINUTELY:
self._byminute = self.__construct_byset(start=dtstart.minute,
byxxx=byminute,
base=60)
else:
self._byminute = set(byminute)
self._byminute = tuple(sorted(self._byminute))
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = ((dtstart.second,))
else:
self._bysecond = None
else:
if isinstance(bysecond, integer_types):
bysecond = (bysecond,)
self._bysecond = set(bysecond)
if freq == SECONDLY:
self._bysecond = self.__construct_byset(start=dtstart.second,
byxxx=bysecond,
base=60)
else:
self._bysecond = set(bysecond)
self._bysecond = tuple(sorted(self._bysecond))
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY: ii.ydayset,
MONTHLY: ii.mdayset,
WEEKLY: ii.wdayset,
DAILY: ii.ddayset,
HOURLY: ii.ddayset,
MINUTELY: ii.ddayset,
SECONDLY: ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY: ii.htimeset,
MINUTELY: ii.mtimeset,
SECONDLY: ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday and
-ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday and
-ii.nextyearlen+i-ii.yearlen not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal+i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
if byhour:
ndays, hour = self.__mod_distance(value=hour,
byxxx=self._byhour,
base=24)
else:
ndays, hour = divmod(hour+interval, 24)
if ndays:
day += ndays
fixday = True
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
valid = False
rep_rate = (24*60)
for j in range(rep_rate // gcd(interval, rep_rate)):
if byminute:
nhours, minute = \
self.__mod_distance(value=minute,
byxxx=self._byminute,
base=60)
else:
nhours, minute = divmod(minute+interval, 60)
div, hour = divmod(hour+nhours, 24)
if div:
day += div
fixday = True
filtered = False
if not byhour or hour in byhour:
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval and ' +
'byhour resulting in empty rule.')
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399-(hour*3600+minute*60+second))
// interval)*interval)
rep_rate = (24*3600)
valid = False
for j in range(0, rep_rate // gcd(interval, rep_rate)):
if bysecond:
nminutes, second = \
self.__mod_distance(value=second,
byxxx=self._bysecond,
base=60)
else:
nminutes, second = divmod(second+interval, 60)
div, minute = divmod(minute+nminutes, 60)
if div:
hour += div
div, hour = divmod(hour, 24)
if div:
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval, ' +
'byhour and byminute resulting in empty' +
' rule.')
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
def __construct_byset(self, start, byxxx, base):
"""
If a `BYXXX` sequence is passed to the constructor at the same level as
`FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
specifications which cannot be reached given some starting conditions.
This occurs whenever the interval is not coprime with the base of a
given unit and the difference between the starting position and the
ending position is not coprime with the greatest common denominator
between the interval and the base. For example, with a FREQ of hourly
starting at 17:00 and an interval of 4, the only valid values for
BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
coprime.
:param start:
Specifies the starting position.
:param byxxx:
An iterable containing the list of allowed values.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
This does not preserve the type of the iterable, returning a set, since
the values should be unique and the order is irrelevant, this will
speed up later lookups.
In the event of an empty set, raises a :exception:`ValueError`, as this
results in an empty rrule.
"""
cset = set()
# Support a single byxxx value.
if isinstance(byxxx, integer_types):
byxxx = (byxxx, )
for num in byxxx:
i_gcd = gcd(self._interval, base)
# Use divmod rather than % because we need to wrap negative nums.
if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
cset.add(num)
if len(cset) == 0:
raise ValueError("Invalid rrule byxxx generates an empty set.")
return cset
def __mod_distance(self, value, byxxx, base):
"""
Calculates the next value in a sequence where the `FREQ` parameter is
specified along with a `BYXXX` parameter at the same "level"
(e.g. `HOURLY` specified with `BYHOUR`).
:param value:
The old value of the component.
:param byxxx:
The `BYXXX` set, which should have been generated by
`rrule._construct_byset`, or something else which checks that a
valid rule is present.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
If a valid value is not found after `base` iterations (the maximum
number before the sequence would start to repeat), this raises a
:exception:`ValueError`, as no valid values were found.
This returns a tuple of `divmod(n*interval, base)`, where `n` is the
smallest number of `interval` repetitions until the next specified
value in `byxxx` is found.
"""
accumulator = 0
for ii in range(1, base + 1):
# Using divmod() over % to account for negative intervals
div, value = divmod(value + self._interval, base)
accumulator += div
if value in byxxx:
return (accumulator, value)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365+calendar.isleap(year)
self.nextyearlen = 365+calendar.isleap(year+1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
# no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1, 1, 1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst) % 7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen +
(lyearweekday-rr._wkst) % 7) % 7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and (month != self.lastmonth or
year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday) % 7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday) % 7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return list(range(self.yearlen)), 0, self.yearlen
def mdayset(self, year, month, day):
dset = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
dset[i] = i
return dset, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
dset = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
dset[i] = i
i += 1
# if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return dset, start, i
def ddayset(self, year, month, day):
dset = [None]*self.yearlen
i = datetime.date(year, month, day).toordinal()-self.yearordinal
dset[i] = i
return dset, i, i+1
def htimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
tset.sort()
return tset
def mtimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
tset.sort()
return tset
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
""" The rruleset type allows more complex recurrence setups, mixing
multiple rules, dates, exclusion rules, and exclusion dates. The type
constructor takes the following keyword arguments:
:param cache: If True, caching of results will be enabled, improving
performance of multiple queries considerably. """
class _genitem(object):
def __init__(self, genlist, gen):
try:
self.dt = advance_iterator(gen)
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def __next__(self):
try:
self.dt = advance_iterator(self.gen)
except StopIteration:
self.genlist.remove(self)
next = __next__
def __lt__(self, other):
return self.dt < other.dt
def __gt__(self, other):
return self.dt > other.dt
def __eq__(self, other):
return self.dt == other.dt
def __ne__(self, other):
return self.dt != other.dt
def __init__(self, cache=False):
super(rruleset, self).__init__(cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
def rrule(self, rrule):
""" Include the given :py:class:`rrule` instance in the recurrence set
generation. """
self._rrule.append(rrule)
def rdate(self, rdate):
""" Include the given :py:class:`datetime` instance in the recurrence
set generation. """
self._rdate.append(rdate)
def exrule(self, exrule):
""" Include the given rrule instance in the recurrence set exclusion
list. Dates which are part of the given recurrence rules will not
be generated, even if some inclusive rrule or rdate matches them.
"""
self._exrule.append(exrule)
def exdate(self, exdate):
""" Include the given datetime instance in the recurrence set
exclusion list. Dates included that way will not be generated,
even if some inclusive rrule or rdate matches them. """
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate))
for gen in [iter(x) for x in self._rrule]:
self._genitem(rlist, gen)
rlist.sort()
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate))
for gen in [iter(x) for x in self._exrule]:
self._genitem(exlist, gen)
exlist.sort()
lastdt = None
total = 0
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
advance_iterator(exlist[0])
exlist.sort()
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
advance_iterator(ritem)
rlist.sort()
self._len = total
class _rrulestr(object):
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
"FR": 4, "SA": 5, "SU": 6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError("invalid until date")
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg):
l = []
for wday in value.split(','):
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n:
n = int(n)
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError("unknown parameter name")
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError("unknown parameter '%s'" % name)
except (KeyError, ValueError):
raise ValueError("invalid '%s': %s" % (name, value))
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
s = s.upper()
if not s.strip():
raise ValueError("empty string")
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError("unsupported RRULE parm: "+parm)
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError("unsupported EXRULE parm: "+parm)
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
exdatevals.append(value)
elif name == "DTSTART":
for parm in parms:
raise ValueError("unsupported DTSTART parm: "+parm)
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
raise ValueError("unsupported property: "+name)
if (forceset or len(rrulevals) > 1 or rdatevals
or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
rset = rruleset(cache=cache)
for value in rrulevals:
rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
rset.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
rset.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
rset.rdate(dtstart)
return rset
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
| apache-2.0 |
insiderr/insiderr-app | ios-patches/basemodules/twisted/internet/test/test_iocp.py | 28 | 5185 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.iocpreactor}.
"""
import errno
from array import array
from struct import pack
from socket import AF_INET6, AF_INET, SOCK_STREAM, SOL_SOCKET, error, socket
from zope.interface.verify import verifyClass
from twisted.trial import unittest
from twisted.python.log import msg
from twisted.internet.interfaces import IPushProducer
try:
from twisted.internet.iocpreactor import iocpsupport as _iocp, tcp, udp
from twisted.internet.iocpreactor.reactor import IOCPReactor, EVENTS_PER_LOOP, KEY_NORMAL
from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
from twisted.internet.iocpreactor.const import SO_UPDATE_ACCEPT_CONTEXT
from twisted.internet.iocpreactor.abstract import FileHandle
except ImportError:
skip = 'This test only applies to IOCPReactor'
try:
socket(AF_INET6, SOCK_STREAM).close()
except error, e:
ipv6Skip = str(e)
else:
ipv6Skip = None
class SupportTests(unittest.TestCase):
"""
Tests for L{twisted.internet.iocpreactor.iocpsupport}, low-level reactor
implementation helpers.
"""
def _acceptAddressTest(self, family, localhost):
"""
Create a C{SOCK_STREAM} connection to localhost using a socket with an
address family of C{family} and assert that the result of
L{iocpsupport.get_accept_addrs} is consistent with the result of
C{socket.getsockname} and C{socket.getpeername}.
"""
msg("family = %r" % (family,))
port = socket(family, SOCK_STREAM)
self.addCleanup(port.close)
port.bind(('', 0))
port.listen(1)
client = socket(family, SOCK_STREAM)
self.addCleanup(client.close)
client.setblocking(False)
try:
client.connect((localhost, port.getsockname()[1]))
except error, (errnum, message):
self.assertIn(errnum, (errno.EINPROGRESS, errno.EWOULDBLOCK))
server = socket(family, SOCK_STREAM)
self.addCleanup(server.close)
buff = array('c', '\0' * 256)
self.assertEqual(
0, _iocp.accept(port.fileno(), server.fileno(), buff, None))
server.setsockopt(
SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, pack('P', server.fileno()))
self.assertEqual(
(family, client.getpeername()[:2], client.getsockname()[:2]),
_iocp.get_accept_addrs(server.fileno(), buff))
def test_ipv4AcceptAddress(self):
"""
L{iocpsupport.get_accept_addrs} returns a three-tuple of address
information about the socket associated with the file descriptor passed
to it. For a connection using IPv4:
- the first element is C{AF_INET}
- the second element is a two-tuple of a dotted decimal notation IPv4
address and a port number giving the peer address of the connection
- the third element is the same type giving the host address of the
connection
"""
self._acceptAddressTest(AF_INET, '127.0.0.1')
def test_ipv6AcceptAddress(self):
"""
Like L{test_ipv4AcceptAddress}, but for IPv6 connections. In this case:
- the first element is C{AF_INET6}
- the second element is a two-tuple of a hexadecimal IPv6 address
literal and a port number giving the peer address of the connection
- the third element is the same type giving the host address of the
connection
"""
self._acceptAddressTest(AF_INET6, '::1')
if ipv6Skip is not None:
test_ipv6AcceptAddress.skip = ipv6Skip
class IOCPReactorTestCase(unittest.TestCase):
def test_noPendingTimerEvents(self):
"""
Test reactor behavior (doIteration) when there are no pending time
events.
"""
ir = IOCPReactor()
ir.wakeUp()
self.assertFalse(ir.doIteration(None))
def test_reactorInterfaces(self):
"""
Verify that IOCP socket-representing classes implement IReadWriteHandle
"""
self.assertTrue(verifyClass(IReadWriteHandle, tcp.Connection))
self.assertTrue(verifyClass(IReadWriteHandle, udp.Port))
def test_fileHandleInterfaces(self):
"""
Verify that L{Filehandle} implements L{IPushProducer}.
"""
self.assertTrue(verifyClass(IPushProducer, FileHandle))
def test_maxEventsPerIteration(self):
"""
Verify that we don't lose an event when more than EVENTS_PER_LOOP
events occur in the same reactor iteration
"""
class FakeFD:
counter = 0
def logPrefix(self):
return 'FakeFD'
def cb(self, rc, bytes, evt):
self.counter += 1
ir = IOCPReactor()
fd = FakeFD()
event = _iocp.Event(fd.cb, fd)
for _ in range(EVENTS_PER_LOOP + 1):
ir.port.postEvent(0, KEY_NORMAL, event)
ir.doIteration(None)
self.assertEqual(fd.counter, EVENTS_PER_LOOP)
ir.doIteration(0)
self.assertEqual(fd.counter, EVENTS_PER_LOOP + 1)
| gpl-3.0 |
georgyberdyshev/ascend | pygtk/help.py | 1 | 1206 | import webbrowser
import os.path
import platform
import config
class Help:
def __init__(self,helproot=None,url=None):
print "HELPROOT =",config.HELPROOT
self.goonline=False
if url is not None:
self.webhelproot = url
self.goonline = True
else:
self.webhelproot = config.WEBHELPROOT
if helproot==None:
if platform.system()=="Windows":
import _winreg
x=_winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
y= _winreg.OpenKey(x,r"SOFTWARE\ASCEND")
_regpath,t = _winreg.QueryValueEx(y,"Install_Dir")
_winreg.CloseKey(y)
_winreg.CloseKey(x)
self.helproot = os.path.join(_regpath,"book.pdf")
else:
self.helproot = os.path.expanduser(config.HELPROOT)
else:
self.helproot = helproot
if not os.path.exists(self.helproot):
print "LOCAL HELP FILES NOT FOUND, WILL USE ONLINE COPY"
self.goonline = True
def run(self,topic=None):
_b = webbrowser.get()
if self.goonline:
_u = self.webhelproot
else:
_p = os.path.join(self.helproot)
_u = "file://"+_p
print "OPENING WEB PAGE: %s..." % _u
_b.open(_u,autoraise=1);
print "BACK FROM WEB CALL"
if __name__ == "__main__":
_h = Help()
_h.run()
| gpl-2.0 |
captiosus/treadmill | tests/cli/ssh_test.py | 1 | 3164 | """Unit test for treadmill.cli.ssh
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import click
import click.testing
from gevent import queue as g_queue
import mock
from treadmill import plugin_manager
class BadExit(Exception):
"""Test exception"""
pass
# W0212: don't compain about protected member access
# C0103: don't complain about invalid variable name 'q'
# pylint: disable=W0212,C0103
class SshTest(unittest.TestCase):
"""Mock test for treadmill.cli.ssh"""
def setUp(self):
"""Setup common test variables"""
self.runner = click.testing.CliRunner()
self.ssh_mod = plugin_manager.load('treadmill.cli', 'ssh')
self.ssh_cli = self.ssh_mod.init()
@mock.patch('treadmill.cli.ssh.run_ssh', mock.Mock())
@mock.patch('treadmill.checkout.connect')
def test_wait_for_ssh(self, mock_conn):
"""Test the _wait_for_ssh() helper func."""
# if the connection attempts fail
mock_conn.return_value = False
q = g_queue.JoinableQueue(items=[('dummy.host', 1234), ('host', 1234)])
self.ssh_mod._wait_for_ssh(q, 'ssh', 'cmd', timeout=0, attempts=10)
self.assertEqual(mock_conn.call_count, 10)
# check that "task_done()" has been invoked as many times as elements
# have been read from the queue
with self.assertRaises(ValueError):
q.task_done()
# if the connection attempt is successful
mock_conn.reset_mock()
mock_conn.return_value = True
q = g_queue.JoinableQueue(items=[('dummy.host', 1234), ('host', 1234)])
self.ssh_mod._wait_for_ssh(q, 'ssh', 'cmd', timeout=0, attempts=5)
self.assertEqual(mock_conn.call_count, 1)
# first two attempts fail, then new endpoint info is taken from the
# queue and the new attempt is successful
mock_conn.reset_mock()
# connection attempt successful only at the third (host_B) attempt
mock_conn.side_effect = lambda host, port: host == 'host_B'
q = g_queue.JoinableQueue(items=[('dummy.host', 1234),
('host_A', 1234),
('host_B', 1234)])
self.ssh_mod._wait_for_ssh(q, 'ssh', 'cmd', timeout=0, attempts=5)
self.assertEqual(mock_conn.call_count, 3)
with self.assertRaises(ValueError):
q.task_done()
@mock.patch('treadmill.cli.bad_exit', side_effect=BadExit())
def test_run_unix(self, bad_exit):
"""Test run_unix()."""
with self.assertRaises(BadExit):
self.ssh_mod.run_unix('host', 'port', 'no_such_ssh_cmd', 'cmd')
self.assertTrue(bad_exit.called())
@mock.patch('treadmill.cli.bad_exit', side_effect=BadExit())
def test_run_putty(self, bad_exit):
"""Test run_putty()."""
with self.assertRaises(BadExit):
self.ssh_mod.run_putty('host', 'port', 'no_such_putty_cmd', 'cmd')
self.assertTrue(bad_exit.called())
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
EricMuller/mynotes-backend | requirements/twisted/Twisted-17.1.0/build/lib.linux-x86_64-3.5/twisted/internet/_signals.py | 62 | 2718 | # -*- test-case-name: twisted.internet.test.test_sigchld -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module is used to integrate child process termination into a
reactor event loop. This is a challenging feature to provide because
most platforms indicate process termination via SIGCHLD and do not
provide a way to wait for that signal and arbitrary I/O events at the
same time. The naive implementation involves installing a Python
SIGCHLD handler; unfortunately this leads to other syscalls being
interrupted (whenever SIGCHLD is received) and failing with EINTR
(which almost no one is prepared to handle). This interruption can be
disabled via siginterrupt(2) (or one of the equivalent mechanisms);
however, if the SIGCHLD is delivered by the platform to a non-main
thread (not a common occurrence, but difficult to prove impossible),
the main thread (waiting on select() or another event notification
API) may not wake up leading to an arbitrary delay before the child
termination is noticed.
The basic solution to all these issues involves enabling SA_RESTART
(ie, disabling system call interruption) and registering a C signal
handler which writes a byte to a pipe. The other end of the pipe is
registered with the event loop, allowing it to wake up shortly after
SIGCHLD is received. See L{twisted.internet.posixbase._SIGCHLDWaker}
for the implementation of the event loop side of this solution. The
use of a pipe this way is known as the U{self-pipe
trick<http://cr.yp.to/docs/selfpipe.html>}.
From Python version 2.6, C{signal.siginterrupt} and C{signal.set_wakeup_fd}
provide the necessary C signal handler which writes to the pipe to be
registered with C{SA_RESTART}.
"""
from __future__ import division, absolute_import
import signal
def installHandler(fd):
"""
Install a signal handler which will write a byte to C{fd} when
I{SIGCHLD} is received.
This is implemented by installing a SIGCHLD handler that does nothing,
setting the I{SIGCHLD} handler as not allowed to interrupt system calls,
and using L{signal.set_wakeup_fd} to do the actual writing.
@param fd: The file descriptor to which to write when I{SIGCHLD} is
received.
@type fd: C{int}
"""
if fd == -1:
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
else:
def noopSignalHandler(*args):
pass
signal.signal(signal.SIGCHLD, noopSignalHandler)
signal.siginterrupt(signal.SIGCHLD, False)
return signal.set_wakeup_fd(fd)
def isDefaultHandler():
"""
Determine whether the I{SIGCHLD} handler is the default or not.
"""
return signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL
| mit |
b0ri5/fssw-googlecode | scons/scons-local-2.0.1/SCons/Scanner/Dir.py | 61 | 3804 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Dir.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Node.FS
import SCons.Scanner
def only_dirs(nodes):
is_Dir = lambda n: isinstance(n.disambiguate(), SCons.Node.FS.Dir)
return list(filter(is_Dir, nodes))
def DirScanner(**kw):
"""Return a prototype Scanner instance for scanning
directories for on-disk files"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = only_dirs
return SCons.Scanner.Base(scan_on_disk, "DirScanner", **kw)
def DirEntryScanner(**kw):
"""Return a prototype Scanner instance for "scanning"
directory Nodes for their in-memory entries"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = None
return SCons.Scanner.Base(scan_in_memory, "DirEntryScanner", **kw)
skip_entry = {}
skip_entry_list = [
'.',
'..',
'.sconsign',
# Used by the native dblite.py module.
'.sconsign.dblite',
# Used by dbm and dumbdbm.
'.sconsign.dir',
# Used by dbm.
'.sconsign.pag',
# Used by dumbdbm.
'.sconsign.dat',
'.sconsign.bak',
# Used by some dbm emulations using Berkeley DB.
'.sconsign.db',
]
for skip in skip_entry_list:
skip_entry[skip] = 1
skip_entry[SCons.Node.FS._my_normcase(skip)] = 1
do_not_scan = lambda k: k not in skip_entry
def scan_on_disk(node, env, path=()):
"""
Scans a directory for on-disk files and directories therein.
Looking up the entries will add these to the in-memory Node tree
representation of the file system, so all we have to do is just
that and then call the in-memory scanning function.
"""
try:
flist = node.fs.listdir(node.abspath)
except (IOError, OSError):
return []
e = node.Entry
for f in filter(do_not_scan, flist):
# Add ./ to the beginning of the file name so if it begins with a
# '#' we don't look it up relative to the top-level directory.
e('./' + f)
return scan_in_memory(node, env, path)
def scan_in_memory(node, env, path=()):
"""
"Scans" a Node.FS.Dir for its in-memory entries.
"""
try:
entries = node.entries
except AttributeError:
# It's not a Node.FS.Dir (or doesn't look enough like one for
# our purposes), which can happen if a target list containing
# mixed Node types (Dirs and Files, for example) has a Dir as
# the first entry.
return []
entry_list = sorted(filter(do_not_scan, list(entries.keys())))
return [entries[n] for n in entry_list]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-3.0 |
bsipocz/astropy | astropy/io/votable/util.py | 1 | 6198 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Various utilities and cookbook-like things.
"""
# STDLIB
import codecs
import contextlib
import io
import re
import gzip
from distutils import version
__all__ = [
'convert_to_writable_filelike',
'stc_reference_frames',
'coerce_range_list_param',
]
@contextlib.contextmanager
def convert_to_writable_filelike(fd, compressed=False):
"""
Returns a writable file-like object suitable for streaming output.
Parameters
----------
fd : file path string or writable file-like object
May be:
- a file path, in which case it is opened, and the file
object is returned.
- an object with a :meth:``write`` method, in which case that
object is returned.
compressed : bool, optional
If `True`, create a gzip-compressed file. (Default is `False`).
Returns
-------
fd : writable file-like object
"""
if isinstance(fd, str):
if fd.endswith('.gz') or compressed:
with gzip.GzipFile(fd, 'wb') as real_fd:
encoded_fd = io.TextIOWrapper(real_fd, encoding='utf8')
yield encoded_fd
encoded_fd.flush()
real_fd.flush()
return
else:
with open(fd, 'wt', encoding='utf8') as real_fd:
yield real_fd
return
elif hasattr(fd, 'write'):
assert callable(fd.write)
if compressed:
fd = gzip.GzipFile(fileobj=fd)
# If we can't write Unicode strings, use a codecs.StreamWriter
# object
needs_wrapper = False
try:
fd.write('')
except TypeError:
needs_wrapper = True
if not hasattr(fd, 'encoding') or fd.encoding is None:
needs_wrapper = True
if needs_wrapper:
yield codecs.getwriter('utf-8')(fd)
fd.flush()
else:
yield fd
fd.flush()
return
else:
raise TypeError("Can not be coerced to writable file-like object")
# <http://www.ivoa.net/Documents/REC/DM/STC-20071030.html>
stc_reference_frames = set([
'FK4', 'FK5', 'ECLIPTIC', 'ICRS', 'GALACTIC', 'GALACTIC_I', 'GALACTIC_II',
'SUPER_GALACTIC', 'AZ_EL', 'BODY', 'GEO_C', 'GEO_D', 'MAG', 'GSE', 'GSM',
'SM', 'HGC', 'HGS', 'HEEQ', 'HRTN', 'HPC', 'HPR', 'HCC', 'HGI',
'MERCURY_C', 'VENUS_C', 'LUNA_C', 'MARS_C', 'JUPITER_C_III',
'SATURN_C_III', 'URANUS_C_III', 'NEPTUNE_C_III', 'PLUTO_C', 'MERCURY_G',
'VENUS_G', 'LUNA_G', 'MARS_G', 'JUPITER_G_III', 'SATURN_G_III',
'URANUS_G_III', 'NEPTUNE_G_III', 'PLUTO_G', 'UNKNOWNFrame'])
def coerce_range_list_param(p, frames=None, numeric=True):
"""
Coerces and/or verifies the object *p* into a valid range-list-format parameter.
As defined in `Section 8.7.2 of Simple
Spectral Access Protocol
<http://www.ivoa.net/Documents/REC/DAL/SSA-20080201.html>`_.
Parameters
----------
p : str or sequence
May be a string as passed verbatim to the service expecting a
range-list, or a sequence. If a sequence, each item must be
either:
- a numeric value
- a named value, such as, for example, 'J' for named
spectrum (if the *numeric* kwarg is False)
- a 2-tuple indicating a range
- the last item my be a string indicating the frame of
reference
frames : sequence of str, optional
A sequence of acceptable frame of reference keywords. If not
provided, the default set in ``set_reference_frames`` will be
used.
numeric : bool, optional
TODO
Returns
-------
parts : tuple
The result is a tuple:
- a string suitable for passing to a service as a range-list
argument
- an integer counting the number of elements
"""
def str_or_none(x):
if x is None:
return ''
if numeric:
x = float(x)
return str(x)
def numeric_or_range(x):
if isinstance(x, tuple) and len(x) == 2:
return '{}/{}'.format(str_or_none(x[0]), str_or_none(x[1]))
else:
return str_or_none(x)
def is_frame_of_reference(x):
return isinstance(x, str)
if p is None:
return None, 0
elif isinstance(p, (tuple, list)):
has_frame_of_reference = len(p) > 1 and is_frame_of_reference(p[-1])
if has_frame_of_reference:
points = p[:-1]
else:
points = p[:]
out = ','.join([numeric_or_range(x) for x in points])
length = len(points)
if has_frame_of_reference:
if frames is not None and p[-1] not in frames:
raise ValueError(
"'{}' is not a valid frame of reference".format(p[-1]))
out += ';' + p[-1]
length += 1
return out, length
elif isinstance(p, str):
number = r'([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)?'
if not numeric:
number = r'(' + number + ')|([A-Z_]+)'
match = re.match(
'^' + number + r'([,/]' + number +
r')+(;(?P<frame>[<A-Za-z_0-9]+))?$',
p)
if match is None:
raise ValueError(f"'{p}' is not a valid range list")
frame = match.groupdict()['frame']
if frames is not None and frame is not None and frame not in frames:
raise ValueError(
f"'{frame}' is not a valid frame of reference")
return p, p.count(',') + p.count(';') + 1
try:
float(p)
return str(p), 1
except TypeError:
raise ValueError(f"'{p}' is not a valid range list")
def version_compare(a, b):
"""
Compare two VOTable version identifiers.
"""
def version_to_tuple(v):
if v[0].lower() == 'v':
v = v[1:]
return version.StrictVersion(v)
av = version_to_tuple(a)
bv = version_to_tuple(b)
# Can't use cmp because it was removed from Python 3.x
return (av > bv) - (av < bv)
| bsd-3-clause |
valkyriesavage/invenio | modules/bibauthorid/lib/bibauthorid_tables_utils.py | 1 | 54551 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
bibauthorid_tables_utils
Bibauthorid's DB handler
"""
import sys
import re
import random
import bibauthorid_config as bconfig
import bibauthorid_structs as dat
from search_engine import get_record
from bibrank_citation_searcher import get_citation_dict
from dbquery import run_sql
from dbquery import OperationalError, ProgrammingError
from bibauthorid_utils import split_name_parts, create_normalized_name
from bibauthorid_utils import clean_name_string
from bibauthorid_authorname_utils import update_doclist
def get_papers_recently_modified(date=''):
'''
Returns the bibrecs with modification date more recent then date, or all
the bibrecs if no date is specified.
@param date: date
'''
papers = run_sql("select id from bibrec where modification_date > %s",
(str(date),))
if papers:
bibrecs = [i[0] for i in papers]
bibrecs.append(-1)
min_date = run_sql("select max(modification_date) from bibrec where "
"id in %s", (tuple(bibrecs),))
else:
min_date = run_sql("select now()")
return papers, min_date
def populate_authornames_bibrefs_from_authornames():
'''
Populates aidAUTHORNAMESBIBREFS.
For each entry in aidAUTHORNAMES creates a corresponding entry in aidA.B. so it's possible to search
by bibrec/bibref at a reasonable speed as well and not only by name.
'''
nids = run_sql("select id,bibrefs from aidAUTHORNAMES")
for nid in nids:
for bibref in nid[1].split(','):
if bconfig.TABLES_UTILS_DEBUG:
print ('populate_authornames_bibrefs_from_authornames: Adding: '
' %s %s' % (str(nid[0]), str(bibref)))
run_sql("insert into aidAUTHORNAMESBIBREFS (Name_id, bibref) "
"values (%s,%s)", (str(nid[0]), str(bibref)))
def authornames_tables_gc(bunch_size=50):
'''
Performs garbage collecting on the authornames tables.
Potentially really slow.
'''
bunch_start = run_sql("select min(id) from aidAUTHORNAMESBIBREFS")
if len(bunch_start) >= 1:
bunch_start = int(bunch_start[0][0])
else:
return
abfs_ids_bunch = run_sql("select id,Name_id,bibref from aidAUTHORNAMESBIBREFS limit %s, %s"
, (str(bunch_start - 1), str(bunch_size)))
bunch_start += bunch_size
while len(abfs_ids_bunch) >= 1:
bib100list = []
bib700list = []
for i in abfs_ids_bunch:
if i[2].split(':')[0] == '100':
bib100list.append(i[2].split(':')[1])
elif i[2].split(':')[0] == '700':
bib700list.append(i[2].split(':')[1])
bib100liststr = '( '
for i in bib100list:
bib100liststr += "'" + str(i) + "',"
bib100liststr = bib100liststr[0:len(bib100liststr) - 1] + " )"
bib700liststr = '( '
for i in bib700list:
bib700liststr += "'" + str(i) + "',"
bib700liststr = bib700liststr[0:len(bib700liststr) - 1] + " )"
if len(bib100list) >= 1:
bib10xids = run_sql("select id from bib10x where id in %s"
% bib100liststr)
else:
bib10xids = []
if len(bib700list) >= 1:
bib70xids = run_sql("select id from bib70x where id in %s"
% bib700liststr)
else:
bib70xids = []
bib10xlist = []
bib70xlist = []
for i in bib10xids:
bib10xlist.append(str(i[0]))
for i in bib70xids:
bib70xlist.append(str(i[0]))
bib100junk = set(bib100list).difference(set(bib10xlist))
bib700junk = set(bib700list).difference(set(bib70xlist))
idsdict = {}
for i in abfs_ids_bunch:
idsdict[i[2]] = [i[0], i[1]]
junklist = []
for i in bib100junk:
junklist.append('100:' + i)
for i in bib700junk:
junklist.append('700:' + i)
for junkref in junklist:
try:
id_to_remove = idsdict[junkref]
run_sql("delete from aidAUTHORNAMESBIBREFS where id=%s",
(str(id_to_remove[0]),))
if bconfig.TABLES_UTILS_DEBUG:
print "authornames_tables_gc: idAUTHORNAMESBIBREFS deleting row " + str(id_to_remove)
authrow = run_sql("select id,Name,bibrefs,db_name from aidAUTHORNAMES where id=%s", (str(id_to_remove[1]),))
if len(authrow[0][2].split(',')) == 1:
run_sql("delete from aidAUTHORNAMES where id=%s", (str(id_to_remove[1]),))
if bconfig.TABLES_UTILS_DEBUG:
print "authornames_tables_gc: aidAUTHORNAMES deleting " + str(authrow)
else:
bibreflist = ''
for ref in authrow[0][2].split(','):
if ref != junkref:
bibreflist += ref + ','
bibreflist = bibreflist[0:len(bibreflist) - 1]
run_sql("update aidAUTHORNAMES set bibrefs=%s where id=%s",
(bibreflist, id_to_remove[1]))
if bconfig.TABLES_UTILS_DEBUG:
print "authornames_tables_gc: aidAUTHORNAMES updating " + str(authrow) + ' with ' + str(bibreflist)
except:
pass
abfs_ids_bunch = run_sql("select id,Name_id,bibref from aidAUTHORNAMESBIBREFS limit %s,%s" ,
(str(bunch_start - 1), str(bunch_size)))
bunch_start += bunch_size
def update_authornames_tables_from_paper(papers_list=[]):
"""
Updates the authornames tables with the names on the given papers list
@param papers_list: list of the papers which have been updated (bibrecs) ((1,),)
For each paper of the list gathers all names, bibrefs and bibrecs to be added to aidAUTHORNAMES
table, taking care of updating aidA.B. as well
NOTE: update_authornames_tables_from_paper: just to remember: get record would be faster but
we don't have the bibref there,
maybe there is a way to rethink everything not to use bibrefs? How to address
authors then?
"""
def update_authornames_tables(name, bibref):
'''
Update the tables for one bibref,name touple
'''
authornames_row = run_sql("select id,Name,bibrefs,db_name from aidAUTHORNAMES where db_name like %s",
(str(name),))
authornames_bibrefs_row = run_sql("select id,Name_id,bibref from aidAUTHORNAMESBIBREFS "
"where bibref like %s", (str(bibref),))
#@XXX: update_authornames_tables: if i'm not wrong there should always be only one result; will be checked further on
if ((len(authornames_row) > 1) or (len(authornames_bibrefs_row) > 1) or
(len(authornames_row) < len(authornames_bibrefs_row))):
if bconfig.TABLES_UTILS_DEBUG:
print "update_authornames_tables: More then one result or missing authornames?? Something is wrong, not updating" + str(authornames_row) + str(authornames_bibrefs_row)
return
if len(authornames_row) == 1:
# we have an hit for the name string; check if there is the 'new' bibref associated,
# if yes there is nothing to do, otherwise shold add it here and in the ANbibrefs table
if authornames_row[0][2].count(bibref) < 1:
if bconfig.TABLES_UTILS_DEBUG:
print 'update_authornames_tables: Adding new bibref to ' + str(authornames_row) + ' ' + str(name) + ' ' + str(bibref)
run_sql("update aidAUTHORNAMES set bibrefs=%s where id=%s",
(authornames_row[0][2] + ',' + str(bibref), authornames_row[0][0]))
if len(authornames_bibrefs_row) < 1:
# we have to add the bibref to the name, would be strange if it would be already there
run_sql("insert into aidAUTHORNAMESBIBREFS (Name_id,bibref) values (%s,%s)",
(authornames_row[0][0], str(bibref)))
else:
if bconfig.TABLES_UTILS_DEBUG:
print 'update_authornames_tables: Nothing to add to ' + str(authornames_row) + ' ' + str(name) + ' ' + str(bibref)
else:
#@NOTE: update_authornames_tables: we don't have the name string in the db: the name associated to the bibref is changed
# or this is a new name? Experimenting with bibulpload looks like if a name on a paper changes a new bibref is created;
#
if len(authornames_bibrefs_row) == 1:
# If len(authornames_row) is zero but we have a row in authornames_bibrefs_row means that
# the name string is changed, somehow!
# @FIXME: update_authornames_tables: this case should really be considered?
if bconfig.TABLES_UTILS_DEBUG:
print 'update_authornames_tables: The name associated to the bibref is changed?? ' + str(name) + ' ' + str(bibref)
else:
artifact_removal = re.compile("[^a-zA-Z0-9]")
raw_name = artifact_removal.sub("", name)
if len(raw_name) > 1:
dbname = name
else:
dbname = 'Error in name parsing!'
clean_name = create_normalized_name(split_name_parts(name))
authornamesid = run_sql("insert into aidAUTHORNAMES (Name,bibrefs,db_name) values (%s,%s,%s)",
(clean_name, str(bibref), dbname))
run_sql("insert into aidAUTHORNAMESBIBREFS (Name_id,bibref) values (%s,%s)",
(authornamesid, str(bibref)))
if bconfig.TABLES_UTILS_DEBUG:
print 'update_authornames_tables: Created new name ' + str(authornamesid) + ' ' + str(name) + ' ' + str(bibref)
tables = [['bibrec_bib10x', 'bib10x', '100__a', '100'], ['bibrec_bib70x', 'bib70x', '700__a', '700']]
for paper in papers_list:
for table in tables:
sqlstr = "select id_bibxxx from %s where id_bibrec=" % table[0]
bibrefs = run_sql(sqlstr+"%s", (str(paper[0]),))
for ref in bibrefs:
sqlstr = "select value from %s where tag='%s' and id=" % (table[1], table[2])
name = run_sql(sqlstr+"%s", (str(ref[0]),))
if len(name) >= 1:
update_authornames_tables(name[0][0], table[3] + ':' + str(ref[0]))
def populate_authornames():
"""
Author names table population from bib10x and bib70x
Average Runtime: 376.61 sec (6.27 min) for 327k entries
Should be called only with empty table, then use
update_authornames_tables_from_paper with the new papers which
are coming in or modified.
"""
max_rows_per_run = bconfig.TABLE_POPULATION_BUNCH_SIZE
if max_rows_per_run == -1:
max_rows_per_run = 5000
max100 = run_sql("SELECT COUNT(id) FROM bib10x WHERE tag = '100__a'")
max700 = run_sql("SELECT COUNT(id) FROM bib70x WHERE tag = '700__a'")
tables = "bib10x", "bib70x"
authornames_is_empty_checked = 0
authornames_is_empty = 1
# Bring author names from bib10x and bib70x to authornames table
for table in tables:
if table == "bib10x":
table_number = "100"
else:
table_number = "700"
querylimiter_start = 0
querylimiter_max = eval('max' + str(table_number) + '[0][0]')
if bconfig.TABLES_UTILS_DEBUG:
print "\nProcessing %s (%s entries):" % (table, querylimiter_max)
sys.stdout.write("0% ")
sys.stdout.flush()
while querylimiter_start <= querylimiter_max:
sys.stdout.write(".")
sys.stdout.flush()
percentage = int(((querylimiter_start + max_rows_per_run) * 100)
/ querylimiter_max)
sys.stdout.write(".%s%%." % (percentage))
sys.stdout.flush()
# Query the Database for a list of authors from the correspondent
# tables--several thousands at a time
bib = run_sql("SELECT id, value FROM %s WHERE tag = '%s__a' "
"LIMIT %s, %s" % (table, table_number,
querylimiter_start, max_rows_per_run))
authorexists = None
querylimiter_start += max_rows_per_run
for i in bib:
# For mental sanity, exclude things that are not names...
# Yes, I know that there are strange names out there!
# Yes, I read the 40 misconceptions about names.
# Yes, I know!
# However, these statistical outlaws are harmful.
artifact_removal = re.compile("[^a-zA-Z0-9]")
authorname = ""
if not i[1]:
continue
raw_name = artifact_removal.sub("", i[1])
if len(raw_name) > 1:
authorname = i[1]
if not authorname:
continue
if not authornames_is_empty_checked:
authornames_is_empty = run_sql("SELECT COUNT(id) "
"FROM aidAUTHORNAMES")
if authornames_is_empty[0][0] == 0:
authornames_is_empty_checked = 1
authornames_is_empty = 1
if not authornames_is_empty:
# Find duplicates in the database and append id if
# duplicate is found
authorexists = run_sql("SELECT id,Name,bibrefs,db_name FROM aidAUTHORNAMES "
"WHERE db_name = %s", (authorname,))
bibrefs = "%s:%s" % (table_number, i[0])
if not authorexists:
insert_name = ""
if len(authorname) > 240:
bconfig.LOGGER.warn("\nName too long, truncated to 240"
" chars: %s" % (authorname))
insert_name = authorname[0:254]
else:
insert_name = authorname
run_sql("INSERT INTO aidAUTHORNAMES VALUES"
" (NULL, %s, %s, %s)",
(create_normalized_name(
split_name_parts(insert_name)),
bibrefs, insert_name))
if authornames_is_empty:
authornames_is_empty = 0
else:
if authorexists[0][2].count(bibrefs) >= 0:
upd_bibrefs = "%s,%s" % (authorexists[0][2], bibrefs)
run_sql("UPDATE aidAUTHORNAMES SET bibrefs = "
"%s WHERE id = %s",
(upd_bibrefs, authorexists[0][0]))
if bconfig.TABLES_UTILS_DEBUG:
sys.stdout.write(" Done.")
sys.stdout.flush()
def get_diff_marc10x70x_to_anames():
'''
Determines the difference between the union of bib10x and bib70x and the
aidAUTHORNAMES table.
It will return the entries which are present in bib10x and bib70x but not
in aidAUTHORNAMES. Meant to be run periodically.
@todo: get_diff_marc10x70x_to_anames: find meaningful use for the
returned results.
@return: a list of the author names not contained in the authornames table
@rtype: list
'''
run_sql("DROP VIEW authors")
run_sql("create view authors AS \
(SELECT value FROM bib10x WHERE tag =\"100__a\") \
UNION \
(SELECT value FROM bib70x WHERE tag =\"700__a\")")
diff = run_sql("SELECT value from authors LEFT JOIN aidAUTHORNAMES as b"
+ " ON (authors.value = b.Name) WHERE b.name IS NULL")
return diff
def populate_doclist_for_author_surname(surname):
"""
Searches for all the documents containing a given surname and processes
them: creates the virtual author for each author on a document.
@param surname: The search is based on this last name.
@type surname: string
"""
if not dat.CITES_DICT:
cites = get_citation_dict("citationdict")
for key in cites:
dat.CITES_DICT[key] = cites[key]
if not dat.CITED_BY_DICT:
cited_by = get_citation_dict("reversedict")
for key in cited_by:
dat.CITED_BY_DICT[key] = cited_by[key]
bconfig.LOGGER.log(25, "Populating document list for %s" % (surname))
init_authornames(surname)
authors = [row for row in dat.AUTHOR_NAMES if not row['processed']]
for author in authors:
marc_100 = []
marc_700 = []
temp_marc = author['bibrefs'].split(',')
for j in temp_marc:
marcfield, internalid = j.split(':')
if marcfield == '100':
marc_100.append(internalid)
elif marcfield == '700':
marc_700.append(internalid)
else:
bconfig.LOGGER.error("Wrong MARC field. How did you do"
" that?!--This should never happen! boo!")
bibrecs = []
if marc_100:
bibrecs_100 = run_sql("SELECT id_bibrec FROM bibrec_bib10x"
+ " WHERE id_bibxxx = %s"
% (" OR id_bibxxx = ".join(marc_100)))
for j in bibrecs_100:
bibrecs.append(j[0])
if marc_700:
bibrecs_700 = run_sql("SELECT id_bibrec FROM bibrec_bib70x"
+ " WHERE id_bibxxx = %s"
% (" OR id_bibxxx = ".join(marc_700)))
for j in bibrecs_700:
bibrecs.append(j[0])
if load_records_to_mem_cache(bibrecs):
for bibrec in bibrecs:
update_doclist(bibrec, authorname_id=author['id'])
def load_records_to_mem_cache(bibrec_ids):
'''
Loads all the records specified in the list into the memory storage
facility. It will try to attach citation information to each record in
the process.
@param bibrec_ids: list of bibrec IDs to load to memory
@type bibrec_ids: list
@return: Success (True) or failure (False) of the process
@rtype: boolean
'''
if not bibrec_ids:
return False
for bibrec in bibrec_ids:
if not bibrec in dat.RELEVANT_RECORDS:
rec = get_record(bibrec)
if bconfig.LIMIT_AUTHORS_PER_DOCUMENT:
is_collaboration = False
authors = 0
try:
for field in rec['710'][0][0]:
if field[0] == 'g':
is_collaboration = True
break
except KeyError:
pass
if is_collaboration:
# If experimentalists shall be excluded uncomment
# the following line
#continue
pass
else:
try:
for field in rec['100'][0][0]:
if field[0] == 'a':
authors += 1
break
except KeyError:
pass
try:
for coauthor in rec['700']:
if coauthor[0][0][0] == 'a':
authors += 1
except KeyError:
pass
if authors > bconfig.MAX_AUTHORS_PER_DOCUMENT:
continue
dat.RELEVANT_RECORDS[bibrec] = rec
cites = []
cited_by = []
try:
cites = dat.CITES_DICT[bibrec]
except KeyError:
pass
try:
cited_by = dat.CITED_BY_DICT[bibrec]
except KeyError:
pass
dat.RELEVANT_RECORDS[bibrec]['cites'] = cites
dat.RELEVANT_RECORDS[bibrec]['cited_by'] = cited_by
return True
def init_authornames(surname):
'''
Initializes the AUTHOR_NAMES memory storage
@param surname: The surname to search for
@type surname: string
'''
if len(dat.AUTHOR_NAMES) > 0:
existing = [row for row in dat.AUTHOR_NAMES
if row['name'].split(",")[0] == surname]
if existing:
bconfig.LOGGER.log(25, "AUTHOR_NAMES already holds the "
"correct data.")
else:
bconfig.LOGGER.debug("AUTHOR_NAMES will have additional content")
for updated in [row for row in dat.AUTHOR_NAMES
if not row['processed']]:
updated['processed'] = True
_perform_authornames_init(surname)
else:
_perform_authornames_init(surname)
def _perform_authornames_init(surname):
'''
Performs the actual AUTHOR_NAMES memory storage init by reading values
from the database
@param surname: The surname to search for
@type surname: string
'''
# instead of replacing with ' ', this will construct the regex for the
# SQL query as well as the next if statement.
surname = clean_name_string(surname,
replacement=".{0,3}",
keep_whitespace=False)
if not surname.startswith(".{0,3}"):
surname = "^['`-]*%s" % (surname)
sql_query = ("SELECT id, name, bibrefs, db_name "
"FROM aidAUTHORNAMES WHERE name REGEXP \"%s\""
% (surname))
for author in run_sql(sql_query):
dat.AUTHOR_NAMES.append({'id': author[0],
'name': author[1],
'bibrefs': author[2],
'db_name': author[3],
'processed': False})
def find_all_last_names():
'''
Filters out all last names from all names in the database.
@return: a list of last names
@rtype: list of strings
'''
all_names = run_sql("SELECT Name FROM aidAUTHORNAMES")
last_names = set()
for name in all_names:
last_name = split_name_parts(name[0])[0]
# For mental sanity, exclude things that are not names...
# - Single letter names
# - Single number names
# - Names containing only numbers and/or symbols
# Yes, I know that there are strange names out there!
# Yes, I read the 40 misconceptions about names.
# Yes, I know!
# However, these statistical outlaws are harmful to the data set.
artifact_removal = re.compile("[^a-zA-Z0-9]")
last_name_test = artifact_removal.sub("", last_name)
if len(last_name_test) > 1:
last_names.add("%s," % (last_name,))
# for name in all_names:
# last_names.add([split_name_parts(name[0]), name[0]])
return list(last_names)
def write_mem_cache_to_tables(sanity_checks=False):
'''
Reads every memory cache and writes its contents to the appropriate
table in the database.
@param sanity_checks: Perform sanity checks before inserting (i.e. is the
data already present in the db?) and after the insertion (i.e. is the
data entered correctly?)
@type sanity_checks: boolean
'''
ra_id_offset = run_sql("SELECT max(realauthorID) FROM"
+ " aidREALAUTHORS")[0][0]
va_id_offset = run_sql("SELECT max(virtualauthorID) FROM"
+ " aidVIRTUALAUTHORS")[0][0]
cluster_id_offset = run_sql("SELECT max(id) FROM"
" aidVIRTUALAUTHORSCLUSTERS")[0][0]
if not ra_id_offset:
ra_id_offset = 0
if not va_id_offset:
va_id_offset = 0
if not cluster_id_offset:
cluster_id_offset = 0
max_va_id = dat.ID_TRACKER["va_id_counter"]
if max_va_id <= 1:
max_va_id = 2
random_va_id = random.randint(1, max_va_id - 1)
va_mem_data = [row['value'] for row in dat.VIRTUALAUTHOR_DATA
if (row["virtualauthorid"] == random_va_id
and row['tag'] == "orig_authorname_id")][0]
if sanity_checks:
if va_mem_data:
check_on_va = run_sql("SELECT id,virtualauthorID,tag,value FROM aidVIRTUALAUTHORSDATA "
"WHERE tag='orig_authorname_id' AND "
"value=%s" , (va_mem_data,))
if check_on_va:
bconfig.LOGGER.error("Sanity check reported that the data "
"exists. We'll skip this record for now. "
"Please check the data set manually.")
return False
bconfig.LOGGER.log(25, "Writing to persistence layer")
bconfig.LOGGER.log(25, "Offsets...RA: %s; VA: %s; CL: %s" % (ra_id_offset,
va_id_offset,
cluster_id_offset))
batch_max = bconfig.TABLE_POPULATION_BUNCH_SIZE
query = []
for va_cluster in dat.VIRTUALAUTHOR_CLUSTERS:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"cluster table failed")
return False
query = []
query.append("INSERT INTO aidVIRTUALAUTHORSCLUSTERS (cluster_name) "
"VALUES (\"%s\"); "
% (va_cluster['clustername'],))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"cluster table failed")
return False
query = []
for va_data in dat.VIRTUALAUTHOR_DATA:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"data table failed")
return False
query = []
query.append("INSERT INTO aidVIRTUALAUTHORSDATA "
"(virtualauthorID, tag, value) VALUES "
"(%d, \"%s\", \"%s\"); "
% (va_data['virtualauthorid'] + va_id_offset,
va_data['tag'], va_data['value']))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"data table failed")
return False
query = []
for va_entry in dat.VIRTUALAUTHORS:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"table failed")
return False
query = []
query.append("INSERT INTO aidVIRTUALAUTHORS "
"(virtualauthorID, authornamesID, p, clusterID) VALUES "
"(%d, %d, \"%s\", %d); "
% (va_entry['virtualauthorid'] + va_id_offset,
va_entry['authornamesid'], va_entry['p'],
va_entry['clusterid'] + cluster_id_offset))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"table failed")
return False
query = []
for ra_data in dat.REALAUTHOR_DATA:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"data table failed")
return False
query = []
if not ra_data['tag'] == 'outgoing_citation':
query.append("INSERT INTO aidREALAUTHORDATA "
"(realauthorID, tag, value, va_count, "
"va_names_p, va_p) VALUES "
"(%d, \"%s\", \"%s\", %d, "
"%f, %f); "
% (ra_data['realauthorid'] + ra_id_offset,
ra_data['tag'], ra_data['value'],
ra_data['va_count'], ra_data['va_np'],
ra_data['va_p']))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"data table failed")
return False
query = []
for ra_entry in dat.REALAUTHORS:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"table failed")
return False
query = []
query.append("INSERT INTO aidREALAUTHORS "
"(realauthorID, virtualauthorID, p) VALUES "
"(%d, %d, %f); "
% (ra_entry['realauthorid'] + ra_id_offset,
ra_entry['virtualauthorid'] + va_id_offset,
ra_entry['p']))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"table failed")
return False
query = []
for doc in dat.DOC_LIST:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into doc list "
"table failed")
return False
query = []
for processed_author in doc['authornameids']:
query.append("INSERT INTO aidDOCLIST "
"(bibrecID, processed_author) VALUES "
"(%d, %d); "
% (doc['bibrecid'], processed_author))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into doc list "
"table failed")
return False
query = []
if sanity_checks:
if va_mem_data:
check_on_va = run_sql("SELECT id,virtualauthorID,tag,value FROM aidVIRTUALAUTHORSDATA "
"WHERE tag='orig_authorname_id' AND "
"value=%s" , (va_mem_data,))
if not check_on_va:
bconfig.LOGGER.error("Sanity check reported that no data "
" exists in the database after writing "
" to it.")
return False
bconfig.LOGGER.log(25, "Everything is now written to the database. "
"Thanks. Bye.")
return True
def get_existing_last_names():
'''
Find all authors that have been processed and written to the database.
Extract all last names from this list and return these last names.
Especially helpful to exclude these clusters (last names) from a run.
@return: list of last names
@rtype: list of strings
'''
bconfig.LOGGER.log(25, "Reading info about existing authors from database")
db_lnames = set()
db_names = run_sql("select value from aidVIRTUALAUTHORSDATA where"
+ " tag='orig_name_string'")
for i in db_names:
db_lnames.add(i[0].split(',')[0])
return list(db_lnames)
def get_len_authornames_bibrefs():
'''
Reads the lengths of authornames and bibrefs.
Used to determine if esstential tables already exist.
@return: dict({'names': -1, 'bibrefs': -1})
@rtype: dict
'''
lengths = {'names':-1,
'bibrefs':-1}
if check_and_create_aid_tables():
authornames_len = run_sql("SELECT count(id) from aidAUTHORNAMES")
bibrefs_len = run_sql("SELECT count(id) from aidAUTHORNAMESBIBREFS")
try:
lengths['names'] = int(authornames_len[0][0])
lengths['bibrefs'] = int(bibrefs_len[0][0])
except (ValueError, TypeError):
lengths['names'] = -1
lengths['bibrefs'] = -1
return lengths
def check_and_create_aid_tables():
'''
Checks if the database tables for Bibauthorid exist. If not, creates them
@return: True if tables exist, False if there was an error
@rtype: boolean
'''
try:
if not run_sql("show tables like 'aidAUTHORNAMES';"):
return False
except (ProgrammingError, OperationalError):
return False
return True
def load_mem_cache_from_tables():
'''
Loads database content for an author's last name cluster into the
memory storage facility.
@precondition: memory storage facility needs to be loaded with respective
authornames data (init_authornames(lastname))
@return: Success (True) or failure (False) of the loading process
@rtype: boolean
'''
# print "check for authornames mem table"
if not dat.AUTHOR_NAMES:
return False
authornames_ids = [row['id'] for row in dat.AUTHOR_NAMES]
if not authornames_ids:
return False
# print "Building offsets"
ra_id_offset = run_sql("SELECT max(realauthorID) FROM"
" aidREALAUTHORS")[0][0]
va_id_offset = run_sql("SELECT max(virtualauthorID) FROM"
" aidVIRTUALAUTHORS")[0][0]
cluster_id_offset = run_sql("SELECT max(id) FROM"
" aidVIRTUALAUTHORSCLUSTERS")[0][0]
dat.set_tracker("raid_counter", ra_id_offset + 1)
dat.set_tracker("va_id_counter", va_id_offset + 1)
dat.set_tracker("cluster_id", cluster_id_offset + 1)
# print "working on authornames ids..."
for authornames_id in authornames_ids:
db_vas = run_sql("SELECT virtualauthorid, authornamesid, p, clusterid "
"from aidVIRTUALAUTHORS WHERE authornamesid = %s",
(authornames_id,))
# print "loading VAs for authid %s" % authornames_id
db_vas_set = set([row[0] for row in db_vas])
if not db_vas_set:
db_vas_set = (-1, -1)
else:
db_vas_set.add(-1)
db_vas_tuple = tuple(db_vas_set)
db_ras = run_sql("SELECT realauthorid FROM "
"aidREALAUTHORS WHERE virtualauthorid in %s"
, (tuple(db_vas_tuple),))
if db_ras:
db_ras_set = set([row[0] for row in db_ras])
db_ras_set.add(-1)
db_ras_tuple = tuple(db_ras_set)
db_ra_vas = run_sql("SELECT virtualauthorid FROM aidREALAUTHORS "
"WHERE realauthorid in %s", (db_ras_tuple,))
db_ra_vas_set = set([row[0] for row in db_ra_vas])
db_ra_vas_set.add(-1)
db_ras_tuple = tuple(db_ra_vas_set)
db_vas_all = run_sql("SELECT virtualauthorid, authornamesid, p, "
"clusterid FROM aidVIRTUALAUTHORS WHERE "
"virtualauthorid in %s",
(db_ras_tuple,))
else:
db_vas_all = db_vas
for db_va in db_vas_all:
dat.VIRTUALAUTHORS.append({'virtualauthorid': db_va[0],
'authornamesid': db_va[1],
'p': db_va[2],
'clusterid': db_va[3]})
if not dat.VIRTUALAUTHORS:
# print "No Virtual Authors loaded. None created before."
return True
# print "Loading clusters"
cluster_ids = set([row['clusterid'] for row in dat.VIRTUALAUTHORS])
if not cluster_ids:
cluster_ids = (-1, -1)
else:
cluster_ids.add(-1)
db_va_clusters = run_sql("SELECT id, cluster_name FROM "
"aidVIRTUALAUTHORSCLUSTERS WHERE id in %s"
, (tuple(cluster_ids),))
# print "Storing clusters"
for db_va_cluster in db_va_clusters:
dat.VIRTUALAUTHOR_CLUSTERS.append({'clusterid': db_va_cluster[0],
'clustername': db_va_cluster[1]})
# print "Loading VA data"
va_ids = set([row['virtualauthorid'] for row in dat.VIRTUALAUTHORS])
if not va_ids:
va_ids = (-1, -1)
else:
va_ids.add(-1)
# print "Storing VA data"
db_va_data = run_sql("SELECT virtualauthorid, tag, value FROM "
"aidVIRTUALAUTHORSDATA WHERE virtualauthorid in %s"
, (tuple(va_ids),))
for db_va_dat in db_va_data:
dat.VIRTUALAUTHOR_DATA.append({'virtualauthorid' : db_va_dat[0],
'tag': db_va_dat[1],
'value': db_va_dat[2]})
# print "Loading RAs"
db_ras = run_sql("SELECT realauthorid, virtualauthorid, p FROM "
"aidREALAUTHORS WHERE virtualauthorid in %s"
, (tuple(va_ids),))
# print "Storing RAs"
for db_ra in db_ras:
dat.REALAUTHORS.append({'realauthorid': db_ra[0],
'virtualauthorid': db_ra[1],
'p': db_ra[2]})
# print "Loading RA data"
ra_ids = set([row['realauthorid'] for row in dat.REALAUTHORS])
if not ra_ids:
ra_ids = (-1, -1)
else:
ra_ids.add(-1)
db_ra_data = run_sql("SELECT realauthorid, tag, value, va_count, "
"va_names_p, va_p FROM aidREALAUTHORDATA WHERE "
"realauthorid in %s", (tuple(ra_ids),))
# print "Storing RA data"
for db_ra_dat in db_ra_data:
dat.REALAUTHOR_DATA.append({'realauthorid': db_ra_dat[0],
'tag': db_ra_dat[1],
'value': db_ra_dat[2],
'va_count': db_ra_dat[3],
'va_np': db_ra_dat[4],
'va_p': db_ra_dat[5]})
# print "Loading doclist entries"
bibrec_ids = set([int(row['value']) for row in dat.REALAUTHOR_DATA
if row['tag'] == "bibrec_id"])
if not bibrec_ids:
bibrec_ids = (-1, -1)
else:
bibrec_ids.add(-1)
db_doclist = run_sql("SELECT bibrecid, processed_author FROM aidDOCLIST "
"WHERE bibrecid in %s", (tuple(bibrec_ids),))
# print "Storing doclist entries"
for db_doc in db_doclist:
existing_item = [row for row in dat.DOC_LIST
if row['bibrecid'] == db_doc[0]]
if existing_item:
for update in [row for row in dat.DOC_LIST
if row['bibrecid'] == db_doc[0]]:
if not db_doc[1] in update['authornameids']:
update['authornameids'].append(db_doc[1])
else:
dat.DOC_LIST.append({'bibrecid': db_doc[0],
'authornameids': [db_doc[1]]})
if set(bibrec_ids).remove(-1):
# print "will load recs"
if not load_records_to_mem_cache(list(bibrec_ids)):
# print" FAILED loading records"
return False
return True
def update_tables_from_mem_cache(sanity_checks=False, return_ra_updates=False):
'''
Updates the tables in the database with the information in the memory
storage while taking into account only changed data to optimize the time
needed for the update.
@param sanity_checks: Perform sanity checks while updating--slows down the
process but might detect mistakes and prevent harm. Default: False
@type sanity_checks: boolean
@param return_ra_updates: Will force the method to return a list of real
author ids that have been updated. Default: False
@type return_ra_updates: boolean
@return: Either True if update went through without trouble or False if it
did not and a list of updated real authors or an empty list
@rtype: tuple of (boolean, list)
'''
del_ra_ids = set([-1])
del_va_ids = dat.UPDATES_LOG['deleted_vas'].union(
dat.UPDATES_LOG['touched_vas'])
if del_va_ids:
del_va_ids.add(-1)
del_ra_ids_db = run_sql("SELECT realauthorid FROM aidREALAUTHORS "
"WHERE virtualauthorid in %s"
, (tuple(del_va_ids),))
for ra_id in del_ra_ids_db:
del_ra_ids.add(ra_id[0])
if sanity_checks:
va_count_db = run_sql("SELECT COUNT(DISTINCT virtualauthorid) "
"FROM aidVIRTUALAUTHORS WHERE "
"virtualauthorid in %s"
, (tuple(del_va_ids),))
try:
va_count_db = int(va_count_db[0][0])
except (ValueError, IndexError, TypeError):
bconfig.LOGGER.exception("Error while reading number of "
"virtual authors in database")
va_count_db = -1
if not (va_count_db == len(del_va_ids)):
bconfig.LOGGER.error("Sanity checks reported that the number "
"of virtual authors in the memory "
"storage is not equal to the number of "
"virtual authors in the database. "
"Aborting update mission.")
return (False, [])
bconfig.LOGGER.log(25, "Removing updated entries from "
"persistence layer")
run_sql("DELETE FROM aidVIRTUALAUTHORSDATA "
"WHERE virtualauthorid in %s", (tuple(del_va_ids),))
run_sql("DELETE FROM aidVIRTUALAUTHORS "
"WHERE virtualauthorid in %s", (tuple(del_va_ids),))
if len(tuple(del_ra_ids)) > 1:
run_sql("DELETE FROM aidREALAUTHORDATA "
"WHERE realauthorid in %s", (tuple(del_ra_ids),))
run_sql("DELETE FROM aidREALAUTHORS "
"WHERE realauthorid in %s", (tuple(del_ra_ids),))
insert_ra_ids = dat.UPDATES_LOG['new_ras'].union(del_ra_ids)
insert_va_ids = dat.UPDATES_LOG['new_vas'].union(
dat.UPDATES_LOG['touched_vas'])
bconfig.LOGGER.log(25, "Writing to persistence layer")
batch_max = bconfig.TABLE_POPULATION_BUNCH_SIZE
ra_id_db_max = run_sql("SELECT max(realauthorID) FROM"
" aidREALAUTHORS")[0][0]
va_id_db_max = run_sql("SELECT max(virtualauthorID) FROM"
" aidVIRTUALAUTHORS")[0][0]
cluster_id_db_max = run_sql("SELECT max(id) FROM"
" aidVIRTUALAUTHORSCLUSTERS")[0][0]
if not ra_id_db_max or not va_id_db_max or not cluster_id_db_max:
return (False, [])
new_clusters = [row for row in dat.VIRTUALAUTHOR_CLUSTERS
if row['clusterid'] > cluster_id_db_max]
query = []
if not insert_ra_ids or not insert_va_ids:
bconfig.LOGGER.log(25, "Saving update to persistence layer finished "
"with success! (There was nothing to do.)")
return (True, [])
for va_cluster in new_clusters:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"cluster table failed")
return (False, [])
query = []
query.append("INSERT INTO aidVIRTUALAUTHORSCLUSTERS (cluster_name) "
"VALUES (\"%s\"); "
% (va_cluster['clustername'],))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"cluster table failed")
return (False, [])
query = []
va_data_to_insert = [row for row in dat.VIRTUALAUTHOR_DATA
if row['virtualauthorid'] in insert_va_ids]
if sanity_checks:
db_existing_va_ids = run_sql("SELECT COUNT(DISTINCT virtualauthorid) "
"WHERE virtualauthorid in %s"
, (tuple(insert_va_ids),))
try:
db_existing_va_ids = int(va_count_db[0][0])
except (ValueError, IndexError, TypeError):
bconfig.LOGGER.exception("Error while reading number of "
"virtual authors in database")
db_existing_va_ids = -1
if not (db_existing_va_ids == 0):
bconfig.LOGGER.error("Sanity checks reported that the "
"virtual authors in the memory storage "
"that shall be inserted already exist "
"in the database. Aborting update mission.")
return (False, [])
for va_data in va_data_to_insert:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"data table failed")
return (False, [])
query = []
query.append("INSERT INTO aidVIRTUALAUTHORSDATA "
"(virtualauthorID, tag, value) VALUES "
"(%d, \"%s\", \"%s\"); "
% (va_data['virtualauthorid'],
va_data['tag'], va_data['value']))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"data table failed")
return (False, [])
query = []
vas_to_insert = [row for row in dat.VIRTUALAUTHORS
if row['virtualauthorid'] in insert_va_ids]
for va_entry in vas_to_insert:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"table failed")
return (False, [])
query = []
query.append("INSERT INTO aidVIRTUALAUTHORS "
"(virtualauthorID, authornamesID, p, clusterID) VALUES "
"(%d, %d, \"%s\", %d); "
% (va_entry['virtualauthorid'],
va_entry['authornamesid'], va_entry['p'],
va_entry['clusterid']))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"table failed")
return (False, [])
query = []
if sanity_checks:
db_existing_ra_ids = run_sql("SELECT COUNT(DISTINCT realauthorid) "
"WHERE realauthorid in %s"
, (tuple(insert_ra_ids),))
try:
db_existing_ra_ids = int(db_existing_ra_ids[0][0])
except (ValueError, IndexError, TypeError):
bconfig.LOGGER.exception("Error while reading number of "
"real authors in database")
db_existing_va_ids = -1
if not (db_existing_ra_ids == 0):
bconfig.LOGGER.error("Sanity checks reported that the "
"real authors in the memory storage "
"that shall be inserted already exist "
"in the database. Aborting update mission.")
return (False, [])
ra_data_to_insert = [row for row in dat.REALAUTHOR_DATA
if row['realauthorid'] in insert_ra_ids]
for ra_data in ra_data_to_insert:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"data table failed")
return (False, [])
query = []
if not ra_data['tag'] == 'outgoing_citation':
query.append("INSERT INTO aidREALAUTHORDATA "
"(realauthorID, tag, value, va_count, "
"va_names_p, va_p) VALUES "
"(%d, \"%s\", \"%s\", %d, "
"%f, %f); "
% (ra_data['realauthorid'],
ra_data['tag'], ra_data['value'],
ra_data['va_count'], ra_data['va_np'],
ra_data['va_p']))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"data table failed")
return (False, [])
query = []
ras_to_insert = [row for row in dat.REALAUTHORS
if row['realauthorid'] in insert_ra_ids]
for ra_entry in ras_to_insert:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"table failed")
return (False, [])
query = []
query.append("INSERT INTO aidREALAUTHORS "
"(realauthorID, virtualauthorID, p) VALUES "
"(%d, %d, %f); "
% (ra_entry['realauthorid'],
ra_entry['virtualauthorid'],
ra_entry['p']))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"table failed")
return (False, [])
query = []
if sanity_checks:
db_existing_ra_ids = run_sql("SELECT COUNT(DISTINCT realauthorid) "
"WHERE realauthorid in %s"
, (tuple(insert_ra_ids),))
try:
db_existing_ra_ids = int(db_existing_ra_ids[0][0])
except (ValueError, IndexError, TypeError):
bconfig.LOGGER.exception("Error while reading number of "
"real authors in database")
db_existing_ra_ids = -1
if not (db_existing_ra_ids == len(insert_ra_ids)):
bconfig.LOGGER.error("Sanity checks reported that the number of"
"real authors in the memory storage "
"that shall be inserted is not equal to "
"the number of real authors now "
"in the database. Aborting update mission.")
return (False, [])
recid_updates = dat.UPDATES_LOG["rec_updates"]
if recid_updates:
recid_updates.add(-1)
run_sql("DELETE FROM aidDOCLIST WHERE bibrecid in %s"
, (tuple(recid_updates),))
doclist_insert = [row for row in dat.DOC_LIST
if row['bibrecid'] in dat.UPDATES_LOG["rec_updates"]]
for doc in doclist_insert:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into doc list "
"table failed")
return (False, [])
query = []
for processed_author in doc['authornameids']:
query.append("INSERT INTO aidDOCLIST "
"(bibrecID, processed_author) VALUES "
"(%d, %d); "
% (doc['bibrecid'], processed_author))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into doc list "
"table failed")
return (False, [])
query = []
bconfig.LOGGER.log(25, "Saving update to persistence layer finished "
"with success!")
if return_ra_updates:
ra_ids = [[row['realauthorid']] for row in ras_to_insert]
return (True, ra_ids)
else:
return (True, [])
| gpl-2.0 |
mahak/neutron | neutron/agent/linux/openvswitch_firewall/firewall.py | 2 | 65149 | # Copyright 2015
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import copy
import eventlet
import netaddr
from neutron_lib.callbacks import events as callbacks_events
from neutron_lib.callbacks import registry as callbacks_registry
from neutron_lib.callbacks import resources as callbacks_resources
from neutron_lib import constants as lib_const
from neutron_lib.plugins import utils as p_utils
from neutron_lib.utils import helpers
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from neutron._i18n import _
from neutron.agent.common import ovs_lib
from neutron.agent import firewall
from neutron.agent.linux import ip_conntrack
from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts
from neutron.agent.linux.openvswitch_firewall import exceptions
from neutron.agent.linux.openvswitch_firewall import iptables
from neutron.agent.linux.openvswitch_firewall import rules
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \
as ovs_consts
LOG = logging.getLogger(__name__)
def _replace_register(flow_params, register_number, register_value):
"""Replace value from flows to given register number
'register_value' key in dictionary will be replaced by register number
given by 'register_number'
:param flow_params: Dictionary containing defined flows
:param register_number: The number of register where value will be stored
:param register_value: Key to be replaced by register number
"""
try:
reg_port = flow_params[register_value]
del flow_params[register_value]
flow_params['reg{:d}'.format(register_number)] = reg_port
except KeyError:
pass
def create_reg_numbers(flow_params):
"""Replace reg_(port|net) values with defined register numbers"""
_replace_register(flow_params, ovsfw_consts.REG_PORT, 'reg_port')
_replace_register(flow_params, ovsfw_consts.REG_NET, 'reg_net')
_replace_register(
flow_params, ovsfw_consts.REG_REMOTE_GROUP, 'reg_remote_group')
def get_segmentation_id_from_other_config(bridge, port_name):
"""Return segmentation_id stored in OVSDB other_config metadata.
:param bridge: OVSBridge instance where port is.
:param port_name: Name of the port.
"""
try:
other_config = bridge.db_get_val(
'Port', port_name, 'other_config')
network_type = other_config.get('network_type')
if lib_const.TYPE_VLAN == network_type:
return int(other_config.get('segmentation_id'))
except (TypeError, ValueError):
pass
def get_network_type_from_other_config(bridge, port_name):
"""Return network_type stored in OVSDB other_config metadata.
:param bridge: OVSBridge instance where port is.
:param port_name: Name of the port.
"""
other_config = bridge.db_get_val('Port', port_name, 'other_config')
return other_config.get('network_type')
def get_physical_network_from_other_config(bridge, port_name):
"""Return physical_network stored in OVSDB other_config metadata.
:param bridge: OVSBridge instance where port is.
:param port_name: Name of the port.
"""
other_config = bridge.db_get_val('Port', port_name, 'other_config')
return other_config.get('physical_network')
def get_tag_from_other_config(bridge, port_name):
"""Return tag stored in OVSDB other_config metadata.
:param bridge: OVSBridge instance where port is.
:param port_name: Name of the port.
:raises OVSFWTagNotFound: In case tag cannot be found in OVSDB.
"""
other_config = None
try:
other_config = bridge.db_get_val(
'Port', port_name, 'other_config')
return int(other_config['tag'])
except (KeyError, TypeError, ValueError):
raise exceptions.OVSFWTagNotFound(
port_name=port_name, other_config=other_config)
class SecurityGroup(object):
def __init__(self, id_):
self.id = id_
self.raw_rules = []
self.remote_rules = []
self.members = {}
self.ports = set()
def update_rules(self, rules):
"""Separate raw and remote rules.
If a rule has a protocol field, it is normalized to a number
here in order to ease later processing.
"""
self.raw_rules = []
self.remote_rules = []
for rule in copy.deepcopy(rules):
protocol = rule.get('protocol')
if protocol is not None:
if protocol.isdigit():
rule['protocol'] = int(protocol)
elif (rule.get('ethertype') == lib_const.IPv6 and
protocol == lib_const.PROTO_NAME_ICMP):
rule['protocol'] = lib_const.PROTO_NUM_IPV6_ICMP
else:
rule['protocol'] = lib_const.IP_PROTOCOL_MAP.get(
protocol, protocol)
if 'remote_group_id' in rule or 'remote_address_group_id' in rule:
self.remote_rules.append(rule)
else:
self.raw_rules.append(rule)
def get_ethertype_filtered_addresses(self, ethertype):
return self.members.get(ethertype, [])
class OFPort(object):
def __init__(self, port_dict, ovs_port, vlan_tag, segment_id=None,
network_type=None, physical_network=None):
self.id = port_dict['device']
self.vlan_tag = vlan_tag
self.segment_id = segment_id
self.mac = ovs_port.vif_mac
self.lla_address = str(netutils.get_ipv6_addr_by_EUI64(
lib_const.IPv6_LLA_PREFIX, self.mac))
self.ofport = ovs_port.ofport
self.sec_groups = list()
self.fixed_ips = port_dict.get('fixed_ips', [])
self.neutron_port_dict = port_dict.copy()
self.allowed_pairs_v4 = self._get_allowed_pairs(port_dict, version=4)
self.allowed_pairs_v6 = self._get_allowed_pairs(port_dict, version=6)
self.network_type = network_type
self.physical_network = physical_network
@staticmethod
def _get_allowed_pairs(port_dict, version):
aap_dict = port_dict.get('allowed_address_pairs', set())
return {(aap['mac_address'], aap['ip_address']) for aap in aap_dict
if netaddr.IPNetwork(aap['ip_address']).version == version}
@property
def all_allowed_macs(self):
macs = {item[0] for item in self.allowed_pairs_v4.union(
self.allowed_pairs_v6)}
macs.add(self.mac)
return macs
@property
def ipv4_addresses(self):
return [ip_addr for ip_addr in self.fixed_ips
if netaddr.IPAddress(ip_addr).version == 4]
@property
def ipv6_addresses(self):
return [ip_addr for ip_addr in self.fixed_ips
if netaddr.IPAddress(ip_addr).version == 6]
def update(self, port_dict):
self.allowed_pairs_v4 = self._get_allowed_pairs(port_dict,
version=4)
self.allowed_pairs_v6 = self._get_allowed_pairs(port_dict,
version=6)
# Neighbour discovery uses LLA
self.allowed_pairs_v6.add((self.mac, self.lla_address))
self.fixed_ips = port_dict.get('fixed_ips', [])
self.neutron_port_dict = port_dict.copy()
class SGPortMap(object):
def __init__(self):
self.ports = {}
self.sec_groups = {}
# Maps port_id to ofport number
self.unfiltered = {}
def get_sg(self, sg_id):
return self.sec_groups.get(sg_id, None)
def get_or_create_sg(self, sg_id):
try:
sec_group = self.sec_groups[sg_id]
except KeyError:
sec_group = SecurityGroup(sg_id)
self.sec_groups[sg_id] = sec_group
return sec_group
def delete_sg(self, sg_id):
del self.sec_groups[sg_id]
def create_port(self, port, port_dict):
self.ports[port.id] = port
self.update_port(port, port_dict)
def update_port(self, port, port_dict):
for sec_group in self.sec_groups.values():
sec_group.ports.discard(port)
port.sec_groups = [self.get_or_create_sg(sg_id)
for sg_id in port_dict['security_groups']]
for sec_group in port.sec_groups:
sec_group.ports.add(port)
port.update(port_dict)
def remove_port(self, port):
for sec_group in port.sec_groups:
sec_group.ports.discard(port)
del self.ports[port.id]
def update_rules(self, sg_id, rules):
sec_group = self.get_or_create_sg(sg_id)
sec_group.update_rules(rules)
def update_members(self, sg_id, members):
sec_group = self.get_or_create_sg(sg_id)
sec_group.members = members
class ConjIdMap(object):
"""Handle conjunction ID allocations and deallocations."""
def __new__(cls):
if not hasattr(cls, '_instance'):
cls._instance = super(ConjIdMap, cls).__new__(cls)
return cls._instance
def __init__(self):
self.id_map = collections.defaultdict(self._conj_id_factory)
# Stores the set of conjuntion IDs used for each unique tuple
# (sg_id, remote_id, direction, ethertype). Each tuple
# can have up to 8 conjuntion IDs (see ConjIPFlowManager.add()).
self.id_map_group = collections.defaultdict(set)
self.id_free = collections.deque()
self.max_id = 0
def _conj_id_factory(self):
# If there is any freed ID, use one.
if self.id_free:
return self.id_free.popleft()
# Allocate new one. It must be divisible by 8. (See the next function.)
self.max_id += 8
return self.max_id
def get_conj_id(self, sg_id, remote_id, direction, ethertype):
"""Return a conjunction ID specified by the arguments.
Allocate one if necessary. The returned ID is divisible by 8,
as there are 4 priority levels (see rules.flow_priority_offset)
and 2 conjunction IDs are needed per priority.
"""
if direction not in [lib_const.EGRESS_DIRECTION,
lib_const.INGRESS_DIRECTION]:
raise ValueError(_("Invalid direction '%s'") % direction)
if ethertype not in [lib_const.IPv4, lib_const.IPv6]:
raise ValueError(_("Invalid ethertype '%s'") % ethertype)
return self.id_map[(sg_id, remote_id, direction, ethertype)]
def delete_sg(self, sg_id):
"""Free all conj_ids associated with the sg_id and
return a list of (remote_sg_id, conj_id), which are no longer
in use.
"""
result = set([])
for k in list(self.id_map.keys()):
if sg_id in k[0:2]:
conj_id = self.id_map.pop(k)
result.add((k[1], conj_id))
self.id_free.append(conj_id)
# If the remote_sg_id is removed, the tuple (sg_id, remote_sg_id,
# direction, ethertype) no longer exists; the conjunction IDs assigned
# to this tuple should be removed too.
for k in list(self.id_map_group.keys()):
if sg_id in k[0:2]:
conj_id_groups = self.id_map_group.pop(k, [])
for conj_id in conj_id_groups:
result.add((k[1], conj_id))
return result
class ConjIPFlowManager(object):
"""Manage conj_id allocation and remote securitygroups derived
conjunction flows.
Flows managed by this class is of form:
nw_src=10.2.3.4,reg_net=0xf00 actions=conjunction(123,1/2)
These flows are managed per network and are usually per remote_group_id,
but flows from different remote_group need to be merged on shared networks,
where the complexity arises and this manager is needed.
"""
def __init__(self, driver):
self.conj_id_map = ConjIdMap()
self.driver = driver
# The following two are dict of dicts and are indexed like:
# self.x[vlan_tag][(direction, ethertype)]
self.conj_ids = collections.defaultdict(dict)
self.flow_state = collections.defaultdict(
lambda: collections.defaultdict(dict))
def _build_addr_conj_id_map(self, ethertype, sg_ag_conj_id_map):
"""Build a map of addr -> list of conj_ids."""
addr_to_conj = collections.defaultdict(list)
for remote_id, conj_id_set in sg_ag_conj_id_map.items():
remote_group = self.driver.sg_port_map.get_sg(remote_id)
if not remote_group or not remote_group.members:
LOG.debug('No member for security group or'
'address group %s', remote_id)
continue
for addr in remote_group.get_ethertype_filtered_addresses(
ethertype):
addr_to_conj[addr].extend(conj_id_set)
return addr_to_conj
def _update_flows_for_vlan_subr(self, direction, ethertype, vlan_tag,
flow_state, addr_to_conj,
conj_id_to_remove):
"""Do the actual flow updates for given direction and ethertype."""
conj_id_to_remove = conj_id_to_remove or []
# Delete any current flow related to any deleted IP address, before
# creating the flows for the current IPs.
self.driver.delete_flows_for_flow_state(
flow_state, addr_to_conj, direction, ethertype, vlan_tag)
for conj_id_set in conj_id_to_remove:
# Remove any remaining flow with remote SG/AG ID conj_id_to_remove
for current_ip, conj_ids in flow_state.items():
conj_ids_to_remove = conj_id_set & set(conj_ids)
self.driver.delete_flow_for_ip(
current_ip, direction, ethertype, vlan_tag,
conj_ids_to_remove)
# NOTE(hangyang): Handle add/delete overlapped IPs among
# remote security groups and remote address groups
removed_ips = set([str(netaddr.IPNetwork(addr[0]).cidr) for addr in (
set(flow_state.keys()) - set(addr_to_conj.keys()))])
ip_to_conj = collections.defaultdict(set)
for addr, conj_ids in addr_to_conj.items():
# Addresses from remote security groups have mac addresses,
# others from remote address groups have not.
ip_to_conj[str(netaddr.IPNetwork(addr[0]).cidr)].update(conj_ids)
for addr in addr_to_conj.keys():
ip_cidr = str(netaddr.IPNetwork(addr[0]).cidr)
# When the overlapped IP in remote security group and remote
# address group have different conjunction ids but with the
# same priority offset, we need to combine the conj_ids together
# before create flows otherwise flows will be overridden in the
# creation sequence.
conj_ids = list(ip_to_conj[ip_cidr])
conj_ids.sort()
if flow_state.get(addr) == conj_ids and ip_cidr not in removed_ips:
# When there are IP overlaps among remote security groups
# and remote address groups, removal of the overlapped ips
# from one remote group will also delete the flows for the
# other groups because the non-strict delete method cannot
# match flow priority or actions for different conjunction
# ids, therefore we need to recreate the affected flows.
continue
for flow in rules.create_flows_for_ip_address(
addr, direction, ethertype, vlan_tag, conj_ids):
self.driver._add_flow(**flow)
def update_flows_for_vlan(self, vlan_tag, conj_id_to_remove=None):
"""Install action=conjunction(conj_id, 1/2) flows,
which depend on IP addresses of remote_group_id or
remote_address_group_id.
"""
for (direction, ethertype), sg_ag_conj_id_map in (
self.conj_ids[vlan_tag].items()):
# TODO(toshii): optimize when remote_groups have
# no address overlaps.
addr_to_conj = self._build_addr_conj_id_map(
ethertype, sg_ag_conj_id_map)
self._update_flows_for_vlan_subr(
direction, ethertype, vlan_tag,
self.flow_state[vlan_tag][(direction, ethertype)],
addr_to_conj, conj_id_to_remove)
self.flow_state[vlan_tag][(direction, ethertype)] = addr_to_conj
def add(self, vlan_tag, sg_id, remote_id, direction, ethertype,
priority_offset):
"""Get conj_id specified by the arguments
and notify the manager that
(remote_id, direction, ethertype, conj_id) flows need
to be populated on the vlan_tag network.
A caller must call update_flows_for_vlan to have the change in effect.
"""
conj_id = self.conj_id_map.get_conj_id(
sg_id, remote_id, direction, ethertype) + priority_offset * 2
if (direction, ethertype) not in self.conj_ids[vlan_tag]:
self.conj_ids[vlan_tag][(direction, ethertype)] = (
collections.defaultdict(set))
self.conj_ids[vlan_tag][(direction, ethertype)][remote_id].add(
conj_id)
conj_id_tuple = (sg_id, remote_id, direction, ethertype)
self.conj_id_map.id_map_group[conj_id_tuple].add(conj_id)
return conj_id
def sg_removed(self, sg_id):
"""Handle SG removal events.
Free all conj_ids associated with the sg_id removed and clean up
obsolete entries from the self.conj_ids map. Unlike the add
method, it also updates flows.
If a SG is removed, both sg_id and remote_sg_id should be removed from
the "vlan_conj_id_map".
"""
id_set = self.conj_id_map.delete_sg(sg_id)
unused_dict = collections.defaultdict(set)
for remote_sg_id, conj_id in id_set:
unused_dict[remote_sg_id].add(conj_id)
for vlan_tag, vlan_conj_id_map in self.conj_ids.items():
update = False
conj_id_to_remove = []
for sg_conj_id_map in vlan_conj_id_map.values():
for remote_sg_id, unused in unused_dict.items():
if (remote_sg_id in sg_conj_id_map and
sg_conj_id_map[remote_sg_id] & unused):
if remote_sg_id == sg_id:
conj_id_to_remove.append(
sg_conj_id_map[remote_sg_id] & unused)
sg_conj_id_map[remote_sg_id] -= unused
if not sg_conj_id_map[remote_sg_id]:
del sg_conj_id_map[remote_sg_id]
update = True
if update:
self.update_flows_for_vlan(vlan_tag,
conj_id_to_remove=conj_id_to_remove)
class OVSFirewallDriver(firewall.FirewallDriver):
REQUIRED_PROTOCOLS = [
ovs_consts.OPENFLOW10,
ovs_consts.OPENFLOW11,
ovs_consts.OPENFLOW12,
ovs_consts.OPENFLOW13,
ovs_consts.OPENFLOW14,
]
provides_arp_spoofing_protection = True
def __init__(self, integration_bridge):
"""Initialize object
:param integration_bridge: Bridge on which openflow rules will be
applied
"""
self.permitted_ethertypes = cfg.CONF.SECURITYGROUP.permitted_ethertypes
self.int_br = self.initialize_bridge(integration_bridge)
self._initialize_sg()
self._update_cookie = None
self._deferred = False
self.iptables_helper = iptables.Helper(self.int_br.br)
self.iptables_helper.load_driver_if_needed()
self.ipconntrack = ip_conntrack.OvsIpConntrackManager()
self._initialize_firewall()
callbacks_registry.subscribe(
self._init_firewall_callback,
callbacks_resources.AGENT,
callbacks_events.OVS_RESTARTED)
def _init_firewall_callback(self, resource, event, trigger, payload=None):
LOG.info("Reinitialize Openvswitch firewall after OVS restart.")
self._initialize_sg()
self._initialize_firewall()
def _initialize_sg(self):
self.sg_port_map = SGPortMap()
self.conj_ip_manager = ConjIPFlowManager(self)
self.sg_to_delete = set()
def _initialize_firewall(self):
self._drop_all_unmatched_flows()
self._initialize_common_flows()
self._initialize_third_party_tables()
@contextlib.contextmanager
def update_cookie_context(self):
try:
self._update_cookie = self.int_br.br.request_cookie()
yield
finally:
self.int_br.br.unset_cookie(self._update_cookie)
self._update_cookie = None
def security_group_updated(self, action_type, sec_group_ids,
device_ids=None):
"""The current driver doesn't make use of this method.
It exists here to avoid NotImplementedError raised from the parent
class's method.
"""
def _accept_flow(self, **flow):
for f in rules.create_accept_flows(flow):
self._add_flow(**f)
def _add_flow(self, **kwargs):
dl_type = kwargs.get('dl_type')
create_reg_numbers(kwargs)
if isinstance(dl_type, int):
kwargs['dl_type'] = "0x{:04x}".format(dl_type)
if self._update_cookie:
kwargs['cookie'] = self._update_cookie
if self._deferred:
self.int_br.add_flow(**kwargs)
else:
self.int_br.br.add_flow(**kwargs)
def _delete_flows(self, **kwargs):
create_reg_numbers(kwargs)
deferred = kwargs.pop('deferred', self._deferred)
if deferred:
self.int_br.delete_flows(**kwargs)
else:
self.int_br.br.delete_flows(**kwargs)
def _strict_delete_flow(self, **kwargs):
"""Delete given flow right away even if bridge is deferred.
Delete command will use strict delete.
"""
create_reg_numbers(kwargs)
self.int_br.br.delete_flows(strict=True, **kwargs)
@staticmethod
def initialize_bridge(int_br):
int_br.add_protocols(*OVSFirewallDriver.REQUIRED_PROTOCOLS)
return int_br.deferred(full_ordered=True, use_bundle=True)
def _drop_all_unmatched_flows(self):
for table in ovs_consts.OVS_FIREWALL_TABLES:
self.int_br.br.add_flow(table=table, priority=0, actions='drop')
def _initialize_common_flows(self):
# Remove conntrack information from tracked packets
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=110,
ct_state=ovsfw_consts.OF_STATE_TRACKED,
actions='ct_clear,'
'resubmit(,%d)' % ovs_consts.BASE_EGRESS_TABLE,
)
def _initialize_third_party_tables(self):
self.int_br.br.add_flow(
table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE,
priority=1,
actions='normal')
self.int_br.br.add_flow(
table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE,
priority=1,
actions='resubmit(,%d)' % (
ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE)
)
for table in (ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE,
ovs_consts.DROPPED_TRAFFIC_TABLE):
self.int_br.br.add_flow(
table=table, priority=0, actions='drop')
def get_ovs_port(self, port_id):
ovs_port = self.int_br.br.get_vif_port_by_id(port_id)
if not ovs_port or ovs_port.ofport in (ovs_lib.UNASSIGNED_OFPORT,
ovs_lib.INVALID_OFPORT):
raise exceptions.OVSFWPortNotFound(port_id=port_id)
return ovs_port
def get_ovs_ports(self, port_ids):
return self.int_br.br.get_vifs_by_ids(port_ids)
def _get_port_vlan_tag(self, port_name):
return get_tag_from_other_config(self.int_br.br, port_name)
def _get_port_segmentation_id(self, port_name):
return get_segmentation_id_from_other_config(
self.int_br.br, port_name)
def _get_port_network_type(self, port_name):
return get_network_type_from_other_config(
self.int_br.br, port_name)
def _get_port_physical_network(self, port_name):
return get_physical_network_from_other_config(
self.int_br.br, port_name)
def _delete_invalid_conntrack_entries_for_port(self, port, of_port):
port['of_port'] = of_port
for ethertype in [lib_const.IPv4, lib_const.IPv6]:
self.ipconntrack.delete_conntrack_state_by_remote_ips(
[port], ethertype, set(), mark=ovsfw_consts.CT_MARK_INVALID)
def get_ofport(self, port):
port_id = port['device']
return self.sg_port_map.ports.get(port_id)
def get_or_create_ofport(self, port):
"""Get ofport specified by port['device'], checking and reflecting
ofport changes.
If ofport is nonexistent, create and return one.
"""
port_id = port['device']
ovs_port = self.get_ovs_port(port_id)
try:
of_port = self.sg_port_map.ports[port_id]
except KeyError:
port_vlan_id = self._get_port_vlan_tag(ovs_port.port_name)
segment_id = self._get_port_segmentation_id(
ovs_port.port_name)
network_type = self._get_port_network_type(
ovs_port.port_name)
physical_network = self._get_port_physical_network(
ovs_port.port_name)
of_port = OFPort(port, ovs_port, port_vlan_id,
segment_id,
network_type, physical_network)
self.sg_port_map.create_port(of_port, port)
else:
if of_port.ofport != ovs_port.ofport:
self.sg_port_map.remove_port(of_port)
of_port = OFPort(port, ovs_port, of_port.vlan_tag,
of_port.segment_id)
self.sg_port_map.create_port(of_port, port)
else:
self.sg_port_map.update_port(of_port, port)
return of_port
def is_port_managed(self, port):
return port['device'] in self.sg_port_map.ports
def prepare_port_filter(self, port):
self.iptables_helper.cleanup_port(port)
if not firewall.port_sec_enabled(port):
self._initialize_egress_no_port_security(port['device'])
return
try:
old_of_port = self.get_ofport(port)
of_port = self.get_or_create_ofport(port)
if old_of_port:
LOG.info("Initializing port %s that was already initialized.",
port['device'])
self._update_flows_for_port(of_port, old_of_port)
else:
self._set_port_filters(of_port)
self._delete_invalid_conntrack_entries_for_port(port, of_port)
except exceptions.OVSFWPortNotFound as not_found_error:
LOG.info("port %(port_id)s does not exist in ovsdb: %(err)s.",
{'port_id': port['device'],
'err': not_found_error})
except exceptions.OVSFWTagNotFound as tag_not_found:
LOG.info("Tag was not found for port %(port_id)s: %(err)s.",
{'port_id': port['device'],
'err': tag_not_found})
def update_port_filter(self, port):
"""Update rules for given port
Current existing filtering rules are removed and new ones are generated
based on current loaded security group rules and members.
"""
if not firewall.port_sec_enabled(port):
self.remove_port_filter(port)
self._initialize_egress_no_port_security(port['device'])
return
elif not self.is_port_managed(port):
try:
self._remove_egress_no_port_security(port['device'])
except exceptions.OVSFWPortNotHandled as e:
LOG.debug(e)
else:
self.prepare_port_filter(port)
return
try:
# Make sure delete old allowed_address_pair MACs because
# allowed_address_pair MACs will be updated in
# self.get_or_create_ofport(port)
old_of_port = self.get_ofport(port)
of_port = self.get_or_create_ofport(port)
if old_of_port:
self._update_flows_for_port(of_port, old_of_port)
else:
self._set_port_filters(of_port)
self._delete_invalid_conntrack_entries_for_port(port, of_port)
except exceptions.OVSFWPortNotFound as not_found_error:
LOG.info("port %(port_id)s does not exist in ovsdb: %(err)s.",
{'port_id': port['device'],
'err': not_found_error})
# If port doesn't exist in ovsdb, lets ensure that there are no
# leftovers
self.remove_port_filter(port)
except exceptions.OVSFWTagNotFound as tag_not_found:
LOG.info("Tag was not found for port %(port_id)s: %(err)s.",
{'port_id': port['device'],
'err': tag_not_found})
def _set_port_filters(self, of_port):
self.initialize_port_flows(of_port)
self.add_flows_from_rules(of_port)
def _update_flows_for_port(self, of_port, old_of_port):
with self.update_cookie_context():
self._set_port_filters(of_port)
# Flush the flows caused by changes made to deferred bridge. The reason
# is that following delete_all_port_flows() call uses --strict
# parameter that cannot be combined with other non-strict rules, hence
# all parameters with --strict are applied right away. In order to
# avoid applying delete rules with --strict *before*
# _set_port_filters() we dump currently cached flows here.
self.int_br.apply_flows()
self.delete_all_port_flows(old_of_port)
# Rewrite update cookie with default cookie
self._set_port_filters(of_port)
def remove_port_filter(self, port):
"""Remove port from firewall
All flows related to this port are removed from ovs. Port is also
removed from ports managed by this firewall.
"""
if self.is_port_managed(port):
of_port = self.get_ofport(port)
self.delete_all_port_flows(of_port)
self.sg_port_map.remove_port(of_port)
for sec_group in of_port.sec_groups:
self._schedule_sg_deletion_maybe(sec_group.id)
def update_security_group_rules(self, sg_id, rules):
self.sg_port_map.update_rules(sg_id, rules)
def update_security_group_members(self, sg_id, member_ips):
self.sg_port_map.update_members(sg_id, member_ips)
if not member_ips:
self._schedule_sg_deletion_maybe(sg_id)
def _schedule_sg_deletion_maybe(self, sg_id):
"""Schedule possible deletion of the given SG.
This function must be called when the number of ports
associated to sg_id drops to zero, as it isn't possible
to know SG deletions from agents due to RPC API design.
"""
sec_group = self.sg_port_map.get_or_create_sg(sg_id)
if not sec_group.members or not sec_group.ports:
self.sg_to_delete.add(sg_id)
def _cleanup_stale_sg(self):
sg_to_delete = self.sg_to_delete
self.sg_to_delete = set()
for sg_id in sg_to_delete:
sec_group = self.sg_port_map.get_sg(sg_id)
if sec_group.members and sec_group.ports:
# sec_group is still in use
continue
self.conj_ip_manager.sg_removed(sg_id)
self.sg_port_map.delete_sg(sg_id)
def process_trusted_ports(self, port_ids):
"""Pass packets from these ports directly to ingress pipeline."""
ovs_ports = self.get_ovs_ports(port_ids)
for port_id in port_ids:
self._initialize_egress_no_port_security(port_id,
ovs_ports=ovs_ports)
# yield to let other greenthreads proceed
eventlet.sleep(0)
def remove_trusted_ports(self, port_ids):
for port_id in port_ids:
try:
self._remove_egress_no_port_security(port_id)
except exceptions.OVSFWPortNotHandled as e:
LOG.debug(e)
def filter_defer_apply_on(self):
self._deferred = True
def filter_defer_apply_off(self):
if self._deferred:
self._cleanup_stale_sg()
self.int_br.apply_flows()
self._deferred = False
@property
def ports(self):
return {id_: port.neutron_port_dict
for id_, port in self.sg_port_map.ports.items()}
def install_physical_direct_flow(self, mac, segment_id,
ofport, local_vlan, network_type):
actions = ('set_field:{:d}->reg{:d},'
'set_field:{:d}->reg{:d},').format(
ofport,
ovsfw_consts.REG_PORT,
# This always needs the local vlan.
local_vlan,
ovsfw_consts.REG_NET)
if network_type == lib_const.TYPE_VLAN:
actions += 'strip_vlan,resubmit(,{:d})'.format(
ovs_consts.BASE_INGRESS_TABLE)
self._add_flow(
table=ovs_consts.TRANSIENT_TABLE,
priority=90,
dl_dst=mac,
dl_vlan='0x%x' % segment_id,
actions=actions)
elif network_type == lib_const.TYPE_FLAT:
# If the port belong to flat network, we need match vlan_tci and
# needn't pop vlan
actions += 'resubmit(,{:d})'.format(
ovs_consts.BASE_INGRESS_TABLE)
self._add_flow(
table=ovs_consts.TRANSIENT_TABLE,
priority=90,
dl_dst=mac,
vlan_tci=ovs_consts.FLAT_VLAN_TCI,
actions=actions)
def delete_physical_direct_flow(self, mac, segment_id):
if segment_id:
self._strict_delete_flow(priority=90,
table=ovs_consts.TRANSIENT_TABLE,
dl_dst=mac,
dl_vlan=segment_id)
else:
self._strict_delete_flow(priority=90,
table=ovs_consts.TRANSIENT_TABLE,
dl_dst=mac,
vlan_tci=ovs_consts.FLAT_VLAN_TCI)
def initialize_port_flows(self, port):
"""Set base flows for port
:param port: OFPort instance
"""
# Identify egress flow
self._add_flow(
table=ovs_consts.TRANSIENT_TABLE,
priority=100,
in_port=port.ofport,
actions='set_field:{:d}->reg{:d},'
'set_field:{:d}->reg{:d},'
'resubmit(,{:d})'.format(
port.ofport,
ovsfw_consts.REG_PORT,
port.vlan_tag,
ovsfw_consts.REG_NET,
ovs_consts.BASE_EGRESS_TABLE)
)
# Identify ingress flows
for mac_addr in port.all_allowed_macs:
self.install_physical_direct_flow(
mac_addr, port.segment_id, port.ofport,
port.vlan_tag, port.network_type)
self._add_flow(
table=ovs_consts.TRANSIENT_TABLE,
priority=90,
dl_dst=mac_addr,
dl_vlan='0x%x' % port.vlan_tag,
actions='set_field:{:d}->reg{:d},'
'set_field:{:d}->reg{:d},'
'strip_vlan,resubmit(,{:d})'.format(
port.ofport,
ovsfw_consts.REG_PORT,
port.vlan_tag,
ovsfw_consts.REG_NET,
ovs_consts.BASE_INGRESS_TABLE),
)
self._initialize_egress(port)
self._initialize_ingress(port)
def _initialize_egress_ipv6_icmp(self, port, allowed_pairs):
allowed_pairs = allowed_pairs.union({(port.mac, port.lla_address)})
for mac_addr, ip_addr in allowed_pairs:
for icmp_type in firewall.ICMPV6_ALLOWED_EGRESS_TYPES:
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=95,
in_port=port.ofport,
reg_port=port.ofport,
dl_type=lib_const.ETHERTYPE_IPV6,
nw_proto=lib_const.PROTO_NUM_IPV6_ICMP,
icmp_type=icmp_type,
dl_src=mac_addr,
ipv6_src=ip_addr,
actions='resubmit(,%d)' % (
ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE)
)
for icmp_type in firewall.ICMPV6_RESTRICTED_EGRESS_TYPES:
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=95,
in_port=port.ofport,
reg_port=port.ofport,
dl_type=lib_const.ETHERTYPE_IPV6,
nw_proto=lib_const.PROTO_NUM_IPV6_ICMP,
icmp_type=icmp_type,
nd_target=ip_addr,
actions='resubmit(,%d)' % (
ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE)
)
def _initialize_egress_no_port_security(self, port_id, ovs_ports=None):
try:
if ovs_ports is not None:
ovs_port = ovs_ports.get(port_id)
if not ovs_port:
raise exceptions.OVSFWPortNotFound(port_id=port_id)
else:
ovs_port = self.get_ovs_port(port_id)
vlan_tag = self._get_port_vlan_tag(ovs_port.port_name)
except exceptions.OVSFWTagNotFound:
# It's a patch port, don't set anything
return
except exceptions.OVSFWPortNotFound as not_found_e:
LOG.error("Initializing unfiltered port %(port_id)s that does not "
"exist in ovsdb: %(err)s.",
{'port_id': port_id,
'err': not_found_e})
return
self.sg_port_map.unfiltered[port_id] = (ovs_port, vlan_tag)
self._add_flow(
table=ovs_consts.TRANSIENT_TABLE,
priority=100,
in_port=ovs_port.ofport,
actions='set_field:%d->reg%d,'
'set_field:%d->reg%d,'
'resubmit(,%d)' % (
ovs_port.ofport,
ovsfw_consts.REG_PORT,
vlan_tag,
ovsfw_consts.REG_NET,
ovs_consts.ACCEPT_OR_INGRESS_TABLE)
)
self._add_flow(
table=ovs_consts.ACCEPT_OR_INGRESS_TABLE,
priority=80,
reg_port=ovs_port.ofport,
actions='resubmit(,%d)' % (
ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE)
)
tunnel_direct_info = {
"network_type": self._get_port_network_type(ovs_port.port_name),
"physical_network": self._get_port_physical_network(
ovs_port.port_name)
}
self.install_accepted_egress_direct_flow(
ovs_port.vif_mac, vlan_tag, ovs_port.ofport,
tunnel_direct_info=tunnel_direct_info)
def _remove_egress_no_port_security(self, port_id):
try:
ovs_port, vlan_tag = self.sg_port_map.unfiltered[port_id]
except KeyError:
raise exceptions.OVSFWPortNotHandled(port_id=port_id)
self._delete_flows(
table=ovs_consts.TRANSIENT_TABLE,
in_port=ovs_port.ofport
)
self._delete_flows(
table=ovs_consts.ACCEPT_OR_INGRESS_TABLE,
reg_port=ovs_port.ofport
)
self.delete_accepted_egress_direct_flow(
ovs_port.vif_mac, vlan_tag)
del self.sg_port_map.unfiltered[port_id]
def _initialize_egress(self, port):
"""Identify egress traffic and send it to egress base"""
# Apply mac/ip pairs for IPv4
allowed_mac_ipv4_pairs = port.allowed_pairs_v4.union(
{(port.mac, ip_addr) for ip_addr in port.ipv4_addresses})
for mac_addr, ip_addr in allowed_mac_ipv4_pairs:
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=95,
in_port=port.ofport,
reg_port=port.ofport,
dl_src=mac_addr,
dl_type=lib_const.ETHERTYPE_ARP,
arp_spa=ip_addr,
actions='resubmit(,%d)' % (
ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE)
)
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=65,
reg_port=port.ofport,
dl_type=lib_const.ETHERTYPE_IP,
in_port=port.ofport,
dl_src=mac_addr,
nw_src=ip_addr,
actions='ct(table={:d},zone=NXM_NX_REG{:d}[0..15])'.format(
ovs_consts.RULES_EGRESS_TABLE,
ovsfw_consts.REG_NET)
)
# Apply mac/ip pairs for IPv6
allowed_mac_ipv6_pairs = port.allowed_pairs_v6.union(
{(port.mac, ip_addr) for ip_addr in port.ipv6_addresses})
self._initialize_egress_ipv6_icmp(port, allowed_mac_ipv6_pairs)
for mac_addr, ip_addr in allowed_mac_ipv6_pairs:
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=65,
reg_port=port.ofport,
in_port=port.ofport,
dl_type=lib_const.ETHERTYPE_IPV6,
dl_src=mac_addr,
ipv6_src=ip_addr,
actions='ct(table={:d},zone=NXM_NX_REG{:d}[0..15])'.format(
ovs_consts.RULES_EGRESS_TABLE,
ovsfw_consts.REG_NET)
)
# DHCP discovery
additional_ipv4_filters = [
{"dl_src": mac, "nw_src": ip}
for mac, ip in (*allowed_mac_ipv4_pairs,
(port.mac, '0.0.0.0'),)]
additional_ipv6_filters = [
{"dl_src": mac, "ipv6_src": ip}
for mac, ip in allowed_mac_ipv6_pairs]
for dl_type, src_port, dst_port, filters_list in (
(lib_const.ETHERTYPE_IP, 68, 67, additional_ipv4_filters),
(lib_const.ETHERTYPE_IPV6, 546, 547, additional_ipv6_filters)):
for additional_filters in filters_list:
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=80,
reg_port=port.ofport,
in_port=port.ofport,
dl_type=dl_type,
**additional_filters,
nw_proto=lib_const.PROTO_NUM_UDP,
tp_src=src_port,
tp_dst=dst_port,
actions='resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE)
)
# Ban dhcp service running on an instance
for dl_type, src_port, dst_port in (
(lib_const.ETHERTYPE_IP, 67, 68),
(lib_const.ETHERTYPE_IPV6, 547, 546)):
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=70,
in_port=port.ofport,
reg_port=port.ofport,
dl_type=dl_type,
nw_proto=lib_const.PROTO_NUM_UDP,
tp_src=src_port,
tp_dst=dst_port,
actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE
)
# Drop Router Advertisements from instances
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=70,
in_port=port.ofport,
reg_port=port.ofport,
dl_type=lib_const.ETHERTYPE_IPV6,
nw_proto=lib_const.PROTO_NUM_IPV6_ICMP,
icmp_type=lib_const.ICMPV6_TYPE_RA,
actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE
)
# Allow custom ethertypes
for permitted_ethertype in self.permitted_ethertypes:
if permitted_ethertype[:2] == '0x':
try:
hex_ethertype = hex(int(permitted_ethertype, base=16))
action = ('resubmit(,%d)' %
ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE)
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=95,
dl_type=hex_ethertype,
reg_port=port.ofport,
actions=action
)
continue
except ValueError:
pass
LOG.warning("Custom ethertype %(permitted_ethertype)s is not "
"a hexadecimal number.",
{'permitted_ethertype': permitted_ethertype})
# Drop all remaining egress connections
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=10,
in_port=port.ofport,
reg_port=port.ofport,
actions='ct_clear,'
'resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE
)
# Fill in accept_or_ingress table by checking that traffic is ingress
# and if not, accept it
for mac_addr in port.all_allowed_macs:
self._add_flow(
table=ovs_consts.ACCEPT_OR_INGRESS_TABLE,
priority=100,
dl_dst=mac_addr,
reg_net=port.vlan_tag,
actions='set_field:{:d}->reg{:d},resubmit(,{:d})'.format(
port.ofport,
ovsfw_consts.REG_PORT,
ovs_consts.BASE_INGRESS_TABLE),
)
for ethertype in [lib_const.ETHERTYPE_IP, lib_const.ETHERTYPE_IPV6]:
self._add_flow(
table=ovs_consts.ACCEPT_OR_INGRESS_TABLE,
priority=90,
dl_type=ethertype,
reg_port=port.ofport,
ct_state=ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED,
actions='ct(commit,zone=NXM_NX_REG{:d}[0..15]),'
'resubmit(,{:d})'.format(
ovsfw_consts.REG_NET,
ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE)
)
self._add_flow(
table=ovs_consts.ACCEPT_OR_INGRESS_TABLE,
priority=80,
reg_port=port.ofport,
actions='resubmit(,%d)' % (
ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE)
)
tunnel_direct_info = {"network_type": port.network_type,
"physical_network": port.physical_network}
self.install_accepted_egress_direct_flow(
port.mac, port.vlan_tag, port.ofport,
tunnel_direct_info=tunnel_direct_info)
def install_accepted_egress_direct_flow(self, mac, vlan_tag, dst_port,
tunnel_direct_info=None):
if not cfg.CONF.AGENT.explicitly_egress_direct:
return
# Prevent flood for accepted egress traffic
self._add_flow(
table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE,
priority=12,
dl_dst=mac,
reg_net=vlan_tag,
actions='output:{:d}'.format(dst_port)
)
# The former flow may not match, that means the destination port is
# not in this host. So, we direct the packet to mapped bridge(s).
if tunnel_direct_info:
patch_ofport = ovs_lib.INVALID_OFPORT
if tunnel_direct_info["network_type"] in (
lib_const.TYPE_VXLAN, lib_const.TYPE_GRE,
lib_const.TYPE_GENEVE):
# Some ports like router internal gateway will not install
# the l2pop related flows, so we will transmit the ARP request
# packet to tunnel bridge use NORMAL action as usual.
port_name = cfg.CONF.OVS.int_peer_patch_port
patch_ofport = self.int_br.br.get_port_ofport(port_name)
elif tunnel_direct_info["network_type"] == lib_const.TYPE_VLAN:
physical_network = tunnel_direct_info["physical_network"]
if not physical_network:
return
bridge_mappings = helpers.parse_mappings(
cfg.CONF.OVS.bridge_mappings)
bridge = bridge_mappings.get(physical_network)
port_name = p_utils.get_interface_name(
bridge, prefix=ovs_consts.PEER_INTEGRATION_PREFIX)
patch_ofport = self.int_br.br.get_port_ofport(port_name)
if patch_ofport is not ovs_lib.INVALID_OFPORT:
self._add_flow(
table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE,
priority=10,
dl_src=mac,
dl_dst="00:00:00:00:00:00/01:00:00:00:00:00",
reg_net=vlan_tag,
actions='mod_vlan_vid:{:d},output:{:d}'.format(
vlan_tag,
patch_ofport)
)
def delete_accepted_egress_direct_flow(self, mac, vlan_tag):
self._delete_flows(
table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE,
dl_dst=mac,
reg_net=vlan_tag)
self._delete_flows(
table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE,
dl_src=mac,
reg_net=vlan_tag)
def _initialize_tracked_egress(self, port):
# Drop invalid packets
self._add_flow(
table=ovs_consts.RULES_EGRESS_TABLE,
priority=50,
ct_state=ovsfw_consts.OF_STATE_INVALID,
actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE
)
# Drop traffic for removed sg rules
self._add_flow(
table=ovs_consts.RULES_EGRESS_TABLE,
priority=50,
reg_port=port.ofport,
ct_mark=ovsfw_consts.CT_MARK_INVALID,
actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE
)
for state in (
ovsfw_consts.OF_STATE_ESTABLISHED_REPLY,
ovsfw_consts.OF_STATE_RELATED,
):
self._add_flow(
table=ovs_consts.RULES_EGRESS_TABLE,
priority=50,
ct_state=state,
ct_mark=ovsfw_consts.CT_MARK_NORMAL,
reg_port=port.ofport,
ct_zone=port.vlan_tag,
actions='resubmit(,%d)' % (
ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE)
)
self._add_flow(
table=ovs_consts.RULES_EGRESS_TABLE,
priority=40,
reg_port=port.ofport,
ct_state=ovsfw_consts.OF_STATE_NOT_ESTABLISHED,
actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE
)
for ethertype in [lib_const.ETHERTYPE_IP, lib_const.ETHERTYPE_IPV6]:
self._add_flow(
table=ovs_consts.RULES_EGRESS_TABLE,
priority=40,
dl_type=ethertype,
reg_port=port.ofport,
ct_state=ovsfw_consts.OF_STATE_ESTABLISHED,
actions="ct(commit,zone=NXM_NX_REG{:d}[0..15],"
"exec(set_field:{:s}->ct_mark))".format(
ovsfw_consts.REG_NET,
ovsfw_consts.CT_MARK_INVALID)
)
def _initialize_ingress_ipv6_icmp(self, port):
for icmp_type in firewall.ICMPV6_ALLOWED_INGRESS_TYPES:
self._add_flow(
table=ovs_consts.BASE_INGRESS_TABLE,
priority=100,
reg_port=port.ofport,
dl_type=lib_const.ETHERTYPE_IPV6,
nw_proto=lib_const.PROTO_NUM_IPV6_ICMP,
icmp_type=icmp_type,
actions='output:{:d}'.format(port.ofport)
)
def _initialize_ingress(self, port):
# Allow incoming ARPs
self._add_flow(
table=ovs_consts.BASE_INGRESS_TABLE,
priority=100,
dl_type=lib_const.ETHERTYPE_ARP,
reg_port=port.ofport,
actions='output:{:d}'.format(port.ofport)
)
self._initialize_ingress_ipv6_icmp(port)
# DHCP offers
for dl_type, src_port, dst_port in (
(lib_const.ETHERTYPE_IP, 67, 68),
(lib_const.ETHERTYPE_IPV6, 547, 546)):
self._add_flow(
table=ovs_consts.BASE_INGRESS_TABLE,
priority=95,
reg_port=port.ofport,
dl_type=dl_type,
nw_proto=lib_const.PROTO_NUM_UDP,
tp_src=src_port,
tp_dst=dst_port,
actions='output:{:d}'.format(port.ofport)
)
# Track untracked
for dl_type in (lib_const.ETHERTYPE_IP, lib_const.ETHERTYPE_IPV6):
self._add_flow(
table=ovs_consts.BASE_INGRESS_TABLE,
priority=90,
reg_port=port.ofport,
dl_type=dl_type,
ct_state=ovsfw_consts.OF_STATE_NOT_TRACKED,
actions='ct(table={:d},zone=NXM_NX_REG{:d}[0..15])'.format(
ovs_consts.RULES_INGRESS_TABLE,
ovsfw_consts.REG_NET)
)
self._add_flow(
table=ovs_consts.BASE_INGRESS_TABLE,
ct_state=ovsfw_consts.OF_STATE_TRACKED,
priority=80,
reg_port=port.ofport,
actions='resubmit(,{:d})'.format(ovs_consts.RULES_INGRESS_TABLE)
)
def _initialize_tracked_ingress(self, port):
# Drop invalid packets
self._add_flow(
table=ovs_consts.RULES_INGRESS_TABLE,
priority=50,
ct_state=ovsfw_consts.OF_STATE_INVALID,
actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE
)
# Drop traffic for removed sg rules
self._add_flow(
table=ovs_consts.RULES_INGRESS_TABLE,
priority=50,
reg_port=port.ofport,
ct_mark=ovsfw_consts.CT_MARK_INVALID,
actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE
)
# NOTE: The OUTPUT action is used instead of NORMAL action to reduce
# cpu utilization, but it causes the datapath rule to be flood rule.
# This is due to mac learning not happened on ingress traffic.
# While this is ok for no offload case, in ovs offload flood rule
# is not offloaded. Therefore, we change the action to be NORMAL in
# offload case. In case the explicitly_egress_direct is used the
# pipeline don't contain action NORMAL so we don't have flood rule
# issue.
actions = 'output:{:d}'.format(port.ofport)
if (self.int_br.br.is_hw_offload_enabled and
not cfg.CONF.AGENT.explicitly_egress_direct):
actions = 'mod_vlan_vid:{:d},normal'.format(port.vlan_tag)
# Allow established and related connections
for state in (ovsfw_consts.OF_STATE_ESTABLISHED_REPLY,
ovsfw_consts.OF_STATE_RELATED):
self._add_flow(
table=ovs_consts.RULES_INGRESS_TABLE,
priority=50,
reg_port=port.ofport,
ct_state=state,
ct_mark=ovsfw_consts.CT_MARK_NORMAL,
ct_zone=port.vlan_tag,
actions=actions
)
self._add_flow(
table=ovs_consts.RULES_INGRESS_TABLE,
priority=40,
reg_port=port.ofport,
ct_state=ovsfw_consts.OF_STATE_NOT_ESTABLISHED,
actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE
)
for ethertype in [lib_const.ETHERTYPE_IP, lib_const.ETHERTYPE_IPV6]:
self._add_flow(
table=ovs_consts.RULES_INGRESS_TABLE,
priority=40,
dl_type=ethertype,
reg_port=port.ofport,
ct_state=ovsfw_consts.OF_STATE_ESTABLISHED,
actions="ct(commit,zone=NXM_NX_REG{:d}[0..15],"
"exec(set_field:{:s}->ct_mark))".format(
ovsfw_consts.REG_NET,
ovsfw_consts.CT_MARK_INVALID)
)
def _add_non_ip_conj_flows(self, port):
"""Install conjunction flows that don't depend on IP address of remote
groups, which consist of actions=conjunction(conj_id, 2/2) flows and
actions=accept flows.
The remaining part is done by ConjIPFlowManager.
"""
port_rules = collections.defaultdict(list)
for sec_group_id, rule in (
self._create_remote_rules_generator_for_port(port)):
direction = rule['direction']
ethertype = rule['ethertype']
protocol = rule.get('protocol')
priority_offset = rules.flow_priority_offset(rule)
if rule.get('remote_group_id'):
remote_type = 'security group'
remote_id = rule.get('remote_group_id')
else:
remote_type = 'address group'
remote_id = rule.get('remote_address_group_id')
conj_id = self.conj_ip_manager.add(port.vlan_tag, sec_group_id,
remote_id,
direction, ethertype,
priority_offset)
LOG.debug("Created conjunction %(conj_id)s for SG %(sg_id)s "
"referencing remote %(remote_type)s ID %(remote_id)s "
"on port %(port_id)s.",
{'conj_id': conj_id,
'sg_id': sec_group_id,
'remote_type': remote_type,
'remote_id': remote_id,
'port_id': port.id})
rule1 = rule.copy()
rule1.pop('remote_group_id', None)
rule1.pop('remote_address_group_id', None)
port_rules_key = (direction, ethertype, protocol)
port_rules[port_rules_key].append((rule1, conj_id))
for (direction, ethertype, protocol), rule_conj_list in (
port_rules.items()):
all_conj_ids = set()
for rule, conj_id in rule_conj_list:
all_conj_ids.add(conj_id)
if protocol in [lib_const.PROTO_NUM_SCTP,
lib_const.PROTO_NUM_TCP,
lib_const.PROTO_NUM_UDP]:
rule_conj_list = rules.merge_port_ranges(rule_conj_list)
else:
rule_conj_list = rules.merge_common_rules(rule_conj_list)
for rule, conj_ids in rule_conj_list:
flows = rules.create_flows_from_rule_and_port(
rule, port, conjunction=True)
for flow in rules.substitute_conjunction_actions(
flows, 2, conj_ids):
self._add_flow(**flow)
# Install accept flows and store conj_id to reg7 for future process
for conj_id in all_conj_ids:
for flow in rules.create_conj_flows(
port, conj_id, direction, ethertype):
flow['actions'] = "set_field:{:d}->reg{:d},{:s}".format(
flow['conj_id'],
ovsfw_consts.REG_REMOTE_GROUP,
flow['actions']
)
self._add_flow(**flow)
def add_flows_from_rules(self, port):
self._initialize_tracked_ingress(port)
self._initialize_tracked_egress(port)
LOG.debug('Creating flow rules for port %s that is port %d in OVS',
port.id, port.ofport)
for rule in self._create_rules_generator_for_port(port):
# NOTE(toshii): A better version of merge_common_rules and
# its friend should be applied here in order to avoid
# overlapping flows.
flows = rules.create_flows_from_rule_and_port(rule, port)
LOG.debug("RULGEN: Rules generated for flow %s are %s",
rule, flows)
for flow in flows:
self._accept_flow(**flow)
self._add_non_ip_conj_flows(port)
self.conj_ip_manager.update_flows_for_vlan(port.vlan_tag)
def _create_rules_generator_for_port(self, port):
for sec_group in port.sec_groups:
for rule in sec_group.raw_rules:
yield rule
def _create_remote_rules_generator_for_port(self, port):
for sec_group in port.sec_groups:
for rule in sec_group.remote_rules:
yield sec_group.id, rule
def delete_all_port_flows(self, port):
"""Delete all flows for given port"""
for mac_addr in port.all_allowed_macs:
self._strict_delete_flow(priority=90,
table=ovs_consts.TRANSIENT_TABLE,
dl_dst=mac_addr,
dl_vlan=port.vlan_tag)
self.delete_physical_direct_flow(mac_addr, port.segment_id)
self._delete_flows(table=ovs_consts.ACCEPT_OR_INGRESS_TABLE,
dl_dst=mac_addr, reg_net=port.vlan_tag)
self.delete_accepted_egress_direct_flow(
port.mac, port.vlan_tag)
self._strict_delete_flow(priority=100,
table=ovs_consts.TRANSIENT_TABLE,
in_port=port.ofport)
self._delete_flows(reg_port=port.ofport)
def delete_flows_for_flow_state(
self, flow_state, addr_to_conj, direction, ethertype, vlan_tag):
# Remove rules for deleted IPs and action=conjunction(conj_id, 1/2)
removed_ips = set(flow_state.keys()) - set(addr_to_conj.keys())
for removed_ip in removed_ips:
conj_ids = flow_state[removed_ip]
self.delete_flow_for_ip(removed_ip, direction, ethertype, vlan_tag,
conj_ids)
if not cfg.CONF.AGENT.explicitly_egress_direct:
return
for ip_addr in removed_ips:
# Generate deletion template with bogus conj_id.
self.delete_flow_for_ip(ip_addr, direction, ethertype, vlan_tag,
[0])
def delete_flow_for_ip(self, ip_address, direction, ethertype,
vlan_tag, conj_ids):
for flow in rules.create_flows_for_ip_address(
ip_address, direction, ethertype, vlan_tag, conj_ids):
# The following del statements are partly for
# complying the OpenFlow spec. It forbids the use of
# these field in non-strict delete flow messages, and
# the actions field is bogus anyway.
del flow['actions']
del flow['priority']
# NOTE(hangyang) If cookie is not set then _delete_flows will
# use the OVSBridge._default_cookie to filter the flows but that
# will not match with the ip flow's cookie so OVS won't actually
# delete the flow
flow['cookie'] = ovs_lib.COOKIE_ANY
self._delete_flows(deferred=False, **flow)
| apache-2.0 |
imankulov/sentry | src/sentry/buffer/redis.py | 2 | 5264 | """
sentry.buffer.redis
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.conf import settings
from django.db import models
from django.utils.encoding import force_bytes
from time import time
from sentry.buffer import Buffer
from sentry.exceptions import InvalidConfiguration
from sentry.tasks.process_buffer import process_incr
from sentry.utils import metrics
from sentry.utils.compat import pickle
from sentry.utils.hashlib import md5
from sentry.utils.imports import import_string
from sentry.utils.redis import make_rb_cluster
class RedisBuffer(Buffer):
key_expire = 60 * 60 # 1 hour
pending_key = 'b:p'
def __init__(self, **options):
if not options:
# inherit default options from REDIS_OPTIONS
options = settings.SENTRY_REDIS_OPTIONS
options.setdefault('hosts', {
0: {},
})
self.cluster = make_rb_cluster(options['hosts'])
def validate(self):
try:
with self.cluster.all() as client:
client.ping()
except Exception as e:
raise InvalidConfiguration(unicode(e))
def _coerce_val(self, value):
if isinstance(value, models.Model):
value = value.pk
return force_bytes(value, errors='replace')
def _make_key(self, model, filters):
"""
Returns a Redis-compatible key for the model given filters.
"""
return 'b:k:%s:%s' % (
model._meta,
md5('&'.join('%s=%s' % (k, self._coerce_val(v))
for k, v in sorted(filters.iteritems()))).hexdigest(),
)
def _make_lock_key(self, key):
return 'l:%s' % (key,)
def incr(self, model, columns, filters, extra=None):
"""
Increment the key by doing the following:
- Insert/update a hashmap based on (model, columns)
- Perform an incrby on counters
- Perform a set (last write wins) on extra
- Add hashmap key to pending flushes
"""
# TODO(dcramer): longer term we'd rather not have to serialize values
# here (unless it's to JSON)
key = self._make_key(model, filters)
# We can't use conn.map() due to wanting to support multiple pending
# keys (one per Redis shard)
conn = self.cluster.get_local_client_for_key(key)
pipe = conn.pipeline()
pipe.hsetnx(key, 'm', '%s.%s' % (model.__module__, model.__name__))
pipe.hsetnx(key, 'f', pickle.dumps(filters))
for column, amount in columns.iteritems():
pipe.hincrby(key, 'i+' + column, amount)
if extra:
for column, value in extra.iteritems():
pipe.hset(key, 'e+' + column, pickle.dumps(value))
pipe.expire(key, self.key_expire)
pipe.zadd(self.pending_key, time(), key)
pipe.execute()
def process_pending(self):
client = self.cluster.get_routing_client()
lock_key = self._make_lock_key(self.pending_key)
# prevent a stampede due to celerybeat + periodic task
if not client.set(lock_key, '1', nx=True, ex=60):
return
try:
for host_id in self.cluster.hosts.iterkeys():
conn = self.cluster.get_local_client(host_id)
keys = conn.zrange(self.pending_key, 0, -1)
if not keys:
continue
keycount = 0
for key in keys:
keycount += 1
process_incr.apply_async(kwargs={
'key': key,
})
pipe = conn.pipeline()
pipe.zrem(self.pending_key, *keys)
pipe.execute()
metrics.timing('buffer.pending-size', keycount)
finally:
client.delete(lock_key)
def process(self, key):
client = self.cluster.get_routing_client()
lock_key = self._make_lock_key(key)
# prevent a stampede due to the way we use celery etas + duplicate
# tasks
if not client.set(lock_key, '1', nx=True, ex=10):
metrics.incr('buffer.revoked', tags={'reason': 'locked'})
self.logger.info('Skipped process on %s; unable to get lock', key)
return
conn = self.cluster.get_local_client_for_key(key)
pipe = conn.pipeline()
pipe.hgetall(key)
pipe.zrem(self.pending_key, key)
pipe.delete(key)
values = pipe.execute()[0]
if not values:
metrics.incr('buffer.revoked', tags={'reason': 'empty'})
self.logger.info('Skipped process on %s; no values found', key)
return
model = import_string(values['m'])
filters = pickle.loads(values['f'])
incr_values = {}
extra_values = {}
for k, v in values.iteritems():
if k.startswith('i+'):
incr_values[k[2:]] = int(v)
elif k.startswith('e+'):
extra_values[k[2:]] = pickle.loads(v)
super(RedisBuffer, self).process(model, incr_values, filters, extra_values)
| bsd-3-clause |
yongminxia/elasticsearch | dev-tools/upload-s3.py | 255 | 2375 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import sys
import argparse
try:
import boto.s3
except:
raise RuntimeError("""
S3 upload requires boto to be installed
Use one of:
'pip install -U boto'
'apt-get install python-boto'
'easy_install boto'
""")
import boto.s3
def list_buckets(conn):
return conn.get_all_buckets()
def upload_s3(conn, path, key, file, bucket):
print 'Uploading %s to Amazon S3 bucket %s/%s' % \
(file, bucket, os.path.join(path, key))
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
bucket = conn.create_bucket(bucket)
k = bucket.new_key(os.path.join(path, key))
k.set_contents_from_filename(file, cb=percent_cb, num_cb=100)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Uploads files to Amazon S3')
parser.add_argument('--file', '-f', metavar='path to file',
help='the branch to release from', required=True)
parser.add_argument('--bucket', '-b', metavar='B42', default='download.elasticsearch.org',
help='The S3 Bucket to upload to')
parser.add_argument('--path', '-p', metavar='elasticsearch/elasticsearch', default='elasticsearch/elasticsearch',
help='The key path to use')
parser.add_argument('--key', '-k', metavar='key', default=None,
help='The key - uses the file name as default key')
args = parser.parse_args()
if args.key:
key = args.key
else:
key = os.path.basename(args.file)
connection = boto.connect_s3()
upload_s3(connection, args.path, key, args.file, args.bucket);
| apache-2.0 |
bayespy/bayespy | bayespy/inference/vmp/nodes/tests/test_multinomial.py | 1 | 7236 | ################################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `multinomial` module.
"""
import numpy as np
import scipy
from bayespy.nodes import (Multinomial,
Dirichlet,
Mixture)
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestMultinomial(TestCase):
"""
Unit tests for Multinomial node
"""
def test_init(self):
"""
Test the creation of multinomial nodes.
"""
# Some simple initializations
X = Multinomial(10, [0.1, 0.3, 0.6])
X = Multinomial(10, Dirichlet([5,4,3]))
# Check that plates are correct
X = Multinomial(10, [0.1, 0.3, 0.6], plates=(3,4))
self.assertEqual(X.plates,
(3,4))
X = Multinomial(10, 0.25*np.ones((2,3,4)))
self.assertEqual(X.plates,
(2,3))
n = 10 * np.ones((3,4), dtype=np.int)
X = Multinomial(n, [0.1, 0.3, 0.6])
self.assertEqual(X.plates,
(3,4))
X = Multinomial(n, Dirichlet([2,1,9], plates=(3,4)))
self.assertEqual(X.plates,
(3,4))
# Probabilities not a vector
self.assertRaises(ValueError,
Multinomial,
10,
0.5)
# Invalid probability
self.assertRaises(ValueError,
Multinomial,
10,
[-0.5, 1.5])
self.assertRaises(ValueError,
Multinomial,
10,
[0.5, 1.5])
# Invalid number of trials
self.assertRaises(ValueError,
Multinomial,
-1,
[0.5, 0.5])
self.assertRaises(ValueError,
Multinomial,
8.5,
[0.5, 0.5])
# Inconsistent plates
self.assertRaises(ValueError,
Multinomial,
10,
0.25*np.ones((2,4)),
plates=(3,))
# Explicit plates too small
self.assertRaises(ValueError,
Multinomial,
10,
0.25*np.ones((2,4)),
plates=(1,))
pass
def test_moments(self):
"""
Test the moments of multinomial nodes.
"""
# Simple test
X = Multinomial(1, [0.7,0.2,0.1])
u = X._message_to_child()
self.assertEqual(len(u), 1)
self.assertAllClose(u[0],
[0.7,0.2,0.1])
# Test n
X = Multinomial(10, [0.7,0.2,0.1])
u = X._message_to_child()
self.assertAllClose(u[0],
[7,2,1])
# Test plates in p
n = np.random.randint(1, 10)
p = np.random.dirichlet([1,1], size=3)
X = Multinomial(n, p)
u = X._message_to_child()
self.assertAllClose(u[0],
p*n)
# Test plates in n
n = np.random.randint(1, 10, size=(3,))
p = np.random.dirichlet([1,1,1,1])
X = Multinomial(n, p)
u = X._message_to_child()
self.assertAllClose(u[0],
p*n[:,None])
# Test plates in p and n
n = np.random.randint(1, 10, size=(4,1))
p = np.random.dirichlet([1,1], size=3)
X = Multinomial(n, p)
u = X._message_to_child()
self.assertAllClose(u[0],
p*n[...,None])
# Test with Dirichlet prior
P = Dirichlet([7, 3])
logp = P._message_to_child()[0]
p0 = np.exp(logp[0]) / (np.exp(logp[0]) + np.exp(logp[1]))
p1 = np.exp(logp[1]) / (np.exp(logp[0]) + np.exp(logp[1]))
X = Multinomial(1, P)
u = X._message_to_child()
p = np.array([p0, p1])
self.assertAllClose(u[0],
p)
# Test with broadcasted plates
P = Dirichlet([7, 3], plates=(10,))
X = Multinomial(5, P)
u = X._message_to_child()
self.assertAllClose(u[0] * np.ones(X.get_shape(0)),
5*p*np.ones((10,1)))
pass
def test_lower_bound(self):
"""
Test lower bound for multinomial node.
"""
# Test for a bug found in multinomial
X = Multinomial(10, [0.3, 0.5, 0.2])
l = X.lower_bound_contribution()
self.assertAllClose(l, 0.0)
pass
def test_mixture(self):
"""
Test multinomial mixture
"""
p0 = [0.1, 0.5, 0.2, 0.2]
p1 = [0.5, 0.1, 0.1, 0.3]
p2 = [0.3, 0.2, 0.1, 0.4]
X = Mixture(2, Multinomial, 10, [p0, p1, p2])
u = X._message_to_child()
self.assertAllClose(u[0],
10*np.array(p2))
pass
def test_mixture_with_count_array(self):
"""
Test multinomial mixture
"""
p0 = [0.1, 0.5, 0.2, 0.2]
p1 = [0.5, 0.1, 0.1, 0.3]
p2 = [0.3, 0.2, 0.1, 0.4]
counts = [[10], [5], [3]]
X = Mixture(2, Multinomial, counts, [p0, p1, p2])
u = X._message_to_child()
self.assertAllClose(
u[0],
np.array(counts)*np.array(p2)
)
# Multi-mixture and count array
# Shape(p) = (2, 1, 3) + (4,)
p = [
[[
[0.1, 0.5, 0.2, 0.2],
[0.5, 0.1, 0.1, 0.3],
[0.3, 0.2, 0.1, 0.4],
]],
[[
[0.3, 0.2, 0.1, 0.4],
[0.5, 0.1, 0.2, 0.2],
[0.4, 0.1, 0.2, 0.3],
]],
]
# Shape(Z1) = (1, 3) + (2,) -> () + (2,)
Z1 = 1
# Shape(Z2) = (1,) + (3,) -> () + (3,)
Z2 = 2
# Shape(counts) = (5, 1)
counts = [[10], [5], [3], [2], [4]]
# Shape(X) = (5,) + (4,)
X = Mixture(
Z1,
Mixture,
Z2,
Multinomial,
counts,
p,
# NOTE: We mix over axes -3 and -1. But as we first mix over the
# default (-1), then the next mixing happens over -2 (because one
# axis was already dropped).
cluster_plate=-2,
)
self.assertAllClose(
X._message_to_child()[0],
np.array(counts)[:,0,None] * np.array(p)[Z1,:,Z2]
)
# Can't have non-singleton axis in counts over the mixed axis
p0 = [0.1, 0.5, 0.2, 0.2]
p1 = [0.5, 0.1, 0.1, 0.3]
p2 = [0.3, 0.2, 0.1, 0.4]
counts = [10, 5, 3]
self.assertRaises(
ValueError,
Mixture,
2,
Multinomial,
counts,
[p0, p1, p2],
)
return
| mit |
cmap/cmapPy | cmapPy/clue_api_client/tests/test_clue_api_client.py | 1 | 5593 | import unittest
import cmapPy.clue_api_client.setup_logger as setup_logger
import logging
import cmapPy.clue_api_client.clue_api_client as clue_api_client
import os.path
import collections
__authors__ = "David L. Lahr"
__email__ = "dlahr@broadinstitute.org"
logger = logging.getLogger(setup_logger.LOGGER_NAME)
config_filepath = os.path.expanduser("~/.cmapPy.cfg")
config_section = "test"
cao = None
test_brew_prefix = "dlahr brew prefix 001"
test_status = "my fake status"
class TestClueApiClient(unittest.TestCase):
def test_run_query(self):
#get one gene
r = cao.run_filter_query("genes", {"where":{"entrez_id":5720}})
self.assertIsNotNone(r)
logger.debug("len(r): {}".format(len(r)))
logger.debug("r: {}".format(r))
self.assertEqual(1, len(r))
#get multiple genes
r = cao.run_filter_query("genes", {"where":{"entrez_id":{"inq":[5720,207]}}})
self.assertIsNotNone(r)
logger.debug("len(r): {}".format(len(r)))
logger.debug("r: {}".format(r))
self.assertEqual(2, len(r))
r = cao.run_filter_query("perts", {"where":{"pert_id":"BRD-K12345678"}})
self.assertIsNotNone(r)
logger.debug("len(r): {}".format(len(r)))
self.assertEqual(0, len(r))
def test_run_query_handle_fail(self):
with self.assertRaises(Exception) as context:
cao.run_filter_query("fakeresource", {})
self.assertIsNotNone(context.exception)
logger.debug("context.exception: {}".format(context.exception))
self.assertIn("ClueApiClient request failed", str(context.exception))
def test_run_where_query(self):
r = cao.run_count_query("cells", {"cell_id":"A375"})
self.assertIsNotNone(r)
logger.debug("r: {}".format(r))
self.assertIn("count", r)
self.assertEqual(1, r["count"])
def test__check_request_response(self):
FakeResponse = collections.namedtuple("FakeResponse", ["status_code", "reason"])
#happy path
fr = FakeResponse(200, None)
clue_api_client.ClueApiClient._check_request_response(fr)
#response status code that should cause failure
fr2 = FakeResponse(404, "I don't need a good reason!")
with self.assertRaises(Exception) as context:
clue_api_client.ClueApiClient._check_request_response(fr2)
logger.debug("context.exception: {}".format(context.exception))
self.assertIn(str(fr2.status_code), str(context.exception))
self.assertIn(fr2.reason, str(context.exception))
def test_run_post(self):
#check that the entry isn't already there, if it is delete it
check_result = cao.run_count_query("macchiato", {"brew_prefix":test_brew_prefix})
if check_result["count"] == 1:
lookup_result = cao.run_filter_query("macchiato", {"where":{"brew_prefix":test_brew_prefix}})[0]
cao.run_delete("macchiato", lookup_result["id"])
#happy path
data = {"brew_prefix":test_brew_prefix, "status":test_status}
r = cao.run_post("macchiato", data)
self.assertIsNotNone(r)
logger.debug("r: {}".format(r))
self.assertIn("brew_prefix", r)
self.assertEqual(data["brew_prefix"], r["brew_prefix"])
self.assertIn("id", r)
#check that user key has not been added to entry
self.assertNotIn("user_key", r)
#clean up
r = cao.run_delete("macchiato", r["id"])
def test_run_delete(self):
#check that there is an entry to delete, if not create it
lookup_result = add_entry_if_not_already_present(cao, "macchiato", {"brew_prefix":test_brew_prefix},
{"brew_prefix":test_brew_prefix, "status": test_status})
delete_id = lookup_result["id"]
#happy path
r = cao.run_delete("macchiato", delete_id)
self.assertIsNotNone(r)
logger.debug("r: {}".format(r))
self.assertTrue(r)
def test_run_put(self):
#check that there is an entry to update, if not create it
lookup_result = add_entry_if_not_already_present(cao, "macchiato", {"brew_prefix":test_brew_prefix},
{"brew_prefix":test_brew_prefix, "status": test_status})
put_id = lookup_result["id"]
expected_status = "test status for test_clue_api_client test_run_put"
r = cao.run_put("macchiato", put_id, {"status":expected_status})
self.assertIsNotNone(r)
logger.debug("r: {}".format(r))
self.assertIn("status", r)
self.assertEqual(expected_status, r["status"])
self.assertNotIn("user_key", r)
def build_clue_api_client_from_default_test_config():
cfg = configparser.RawConfigParser()
cfg.read(config_filepath)
cao = clue_api_client.ClueApiClient(base_url=cfg.get(config_section, "clue_api_url"),
user_key=cfg.get(config_section, "clue_api_user_key"))
return cao
def add_entry_if_not_already_present(my_clue_api_orm, resource_name, where_query, default_data):
check_result = my_clue_api_orm.run_count_query(resource_name, where_query)
if check_result["count"] == 0:
lookup_result = my_clue_api_orm.run_post(resource_name, default_data)
else:
lookup_result = my_clue_api_orm.run_filter_query(resource_name, {"where":where_query})[0]
return lookup_result
if __name__ == "__main__":
setup_logger.setup(verbose=True)
cao = build_clue_api_client_from_default_test_config()
unittest.main()
| bsd-3-clause |
jobiols/management-system | mgmtsystem_action/__openerp__.py | 2 | 1942 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Management System - Action",
"version": "8.0.1.2.2",
"author": "Savoir-faire Linux,Odoo Community Association (OCA)",
"website": "http://www.savoirfairelinux.com",
"license": "AGPL-3",
"category": "Management System",
"description": """\
This module enables you to manage the different actions of your management
system:
* immediate actions
* corrective actions
* preventive actions
* improvement opportunities.
""",
"depends": ['mgmtsystem', 'crm_claim'],
"data": [
'data/mgmtsystem_action_stage.xml',
'security/ir.model.access.csv',
'security/mgmtsystem_action_security.xml',
'action_sequence.xml',
'workflow_mgmtsystem_action.xml',
'views/menus.xml',
'mgmtsystem_action.xml',
'views/mgmtsystem_action_stage.xml',
'board_mgmtsystem_action.xml',
],
"demo": ['demo_action.xml'],
"installable": True,
}
| agpl-3.0 |
akulakov/pyquestions | playground/urls.py | 1 | 1122 | """proj1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from views import *
urlpatterns = [
url(r'^$', Home.as_view(), name='home'),
url(r'^question/$', RandomQuestionView.as_view(), name='random_question'),
url(r'^review-question/$', ReviewRandomQuestionView.as_view(), name='review_question'),
url(r'^new-question/$', NewQuestionView.as_view(), name='new_question'),
url(r'^update-username/$', UpdateUsernameView.as_view(), name='update_username'),
]
| gpl-3.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/theano/sandbox/cuda/tests/test_opt.py | 3 | 33090 | from __future__ import print_function
import operator
import sys
import unittest
import numpy
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
from nose.tools import assert_raises
import theano
from six.moves import reduce
from theano.compile.pfunc import pfunc
from theano import config, tensor
import theano.tensor.tests.test_nlinalg
import theano.tensor.tests.test_opt as test_opt
from theano.tests.breakpoint import PdbBreakpoint
from theano.tests import unittest_tools as utt
import theano.sandbox.cuda as cuda
if not cuda.cuda_available:
raise SkipTest('Optional package cuda disabled')
import theano.sandbox.cuda.cula as cula
from theano.sandbox.cuda import basic_ops
from theano.sandbox.cuda.type import CudaNdarrayType
from theano.scalar.basic_scipy import erfinv
from theano.tensor.nnet.blocksparse import sparse_block_dot
from theano.sandbox.cuda.blocksparse import GpuSparseBlockGemv, GpuSparseBlockOuter
imported_scipy_special = False
try:
import scipy.special
imported_scipy_special = True
# Importing scipy.special may raise ValueError.
# See http://projects.scipy.org/scipy/ticket/1739
except (ImportError, ValueError):
pass
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')
def test_no_shared_var_graph():
"""Test that the InputToGpuOptimizer optimizer make graph that don't have shared variable compiled too.
"""
a = tensor.fmatrix()
b = tensor.fmatrix()
f = theano.function([a, b], [a + b], mode=mode_with_gpu)
l = f.maker.fgraph.toposort()
assert len(l) == 4
assert numpy.any(isinstance(x.op, cuda.GpuElemwise) for x in l)
assert numpy.any(isinstance(x.op, cuda.GpuFromHost) for x in l)
assert numpy.any(isinstance(x.op, cuda.HostFromGpu) for x in l)
def test_local_assert():
x = theano.tensor.fmatrix()
a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any())
f = theano.function([x], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
assert isinstance(a_op[0].inputs[0].type, CudaNdarrayType)
def test_local_remove_all_assert():
x = theano.tensor.fmatrix()
a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any())
# By default `unsafe` should not be there
f = theano.function([x], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
# Put `unsafe`
f = theano.function([x], a, mode=mode_with_gpu.including('unsafe'))
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 0
# Remove `unsafe`
f = theano.function([x], a, mode=mode_with_gpu.excluding('unsafe'))
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
def test_local_gpu_contiguous_gpu_contiguous():
a = tensor.fmatrix()
o1 = basic_ops.gpu_contiguous(a)
o2 = basic_ops.gpu_contiguous(o1)
f1 = theano.function([a], o1, mode=mode_with_gpu)
f2 = theano.function([a], o2, mode=mode_with_gpu)
assert 1 == len([node for node in f1.maker.fgraph.toposort()
if isinstance(node.op, basic_ops.GpuContiguous)])
assert 1 == len([node for node in f2.maker.fgraph.toposort()
if isinstance(node.op, basic_ops.GpuContiguous)])
def test_local_assert_no_cpu_op():
numpy.random.seed(1)
m = numpy.random.uniform(-1, 1, (10, 10)).astype("float32")
ms = cuda.shared_constructor(m, name="m_shared")
out = theano.tensor.tanh(ms).dot(ms.T)
mode_local_assert = mode_with_gpu.including("assert_no_cpu_op")
mode_local_assert = mode_local_assert.excluding("local_gpu_elemwise_0")
mode_local_assert = mode_local_assert.excluding("local_gpu_elemwise_1")
old = config.assert_no_cpu_op
old2 = config.on_opt_error
# If the flag is raise
try:
config.assert_no_cpu_op = 'raise'
config.on_opt_error = 'ignore'
assert_raises(AssertionError, theano.function,
[], out, mode=mode_local_assert)
finally:
config.assert_no_cpu_op = old
config.on_opt_error = old2
# If the flag is ignore
try:
config.assert_no_cpu_op = 'ignore'
theano.function([], out, mode=mode_local_assert)
finally:
config.assert_no_cpu_op = old
def test_int_pow():
a = CudaNdarrayType([False])()
f = theano.function([a], (a*4).sum(), mode=mode_with_gpu)
op_names = [n.op.__class__.__name__ for n in f.maker.fgraph.toposort()]
assert op_names == ['GpuCAReduce', 'GpuElemwise', 'HostFromGpu']
f = theano.function([a], tensor.pow(a, 4).sum(), mode=mode_with_gpu)
op_names = [n.op.__class__.__name__ for n in f.maker.fgraph.toposort()]
assert op_names == ['GpuElemwise', 'GpuCAReduce', 'HostFromGpu']
def test_gpualloc():
'''
This tests tries to catch the scenario when, due to infer_shape,
the input of the alloc changes from tensor scalar to a constant
1. In this case the original constracted broadcastable pattern will
have a False for that dimension, but the new broadcastable pattern
that will be inserted by gpualloc will have a True since it knows the
dimension is 1 and therefore broadcastable.
'''
x = theano.shared(numpy.ones(3, dtype='float32'), 'x')
m = (x).dimshuffle(['x', 0])
v = tensor.alloc(1., *m.shape)
f = theano.function([], v + x,
mode=mode_with_gpu.excluding("local_elemwise_alloc"))
l = f.maker.fgraph.toposort()
assert numpy.any([isinstance(x.op, cuda.GpuAlloc) for x in l])
def test_gpuallocempty():
f_gpu = theano.function([], tensor.AllocEmpty('float32')(2,3),
mode=mode_with_gpu)
l_gpu = f_gpu.maker.fgraph.toposort()
assert numpy.any([isinstance(x.op, basic_ops.GpuAllocEmpty) for x in l_gpu])
f_cpu = theano.function([], tensor.AllocEmpty('int32')(2,3))
l_cpu = f_cpu.maker.fgraph.toposort()
assert not numpy.any([isinstance(x.op, basic_ops.GpuAllocEmpty) for x in l_cpu])
class Test_local_elemwise_alloc(test_opt.Test_local_elemwise_alloc):
dtype = 'float32'
def setUp(self):
super(Test_local_elemwise_alloc, self).setUp()
self.fast_run_mode = mode_with_gpu
# self.vec = tensor.vector('vec', dtype=dtype)
# self.mat = tensor.matrix('mat', dtype=dtype)
# self.tens = tensor.tensor3('tens', dtype=dtype)
# self.alloc_wo_dep = basic_ops.gpu_alloc(self.vec, 2, 2)
# self.alloc_w_dep = basic_ops.gpu_alloc(self.vec, *self.mat.shape)
self.alloc_wo_dep = basic_ops.gpu_alloc(self.vec, 2, 2)
self.alloc_w_dep = basic_ops.gpu_alloc(self.vec, *self.mat.shape)
self.alloc_w_dep_tens = basic_ops.gpu_alloc(
self.vec,
self.tens.shape[0],
self.tens.shape[1]
)
self.tv_wo_dep = basic_ops.gpu_alloc(self.vec, 5, 5)
self.tm_wo_dep = basic_ops.gpu_alloc(self.mat, 5, 5, 5)
self.s = tensor.iscalar('s')
self.tv_w_dep = basic_ops.gpu_alloc(self.vec, self.s, self.s)
self.tm_w_dep = basic_ops.gpu_alloc(self.mat, 5, 5, 5)
self.row = tensor.row(dtype=self.dtype)
self.o = basic_ops.gpu_alloc(self.row, 5, 5)
def _verify_alloc_count(self, f, count):
assert(
sum([isinstance(elem.op, basic_ops.GpuAlloc)
for elem in f.maker.fgraph.toposort()
if elem.op is not None]) == count
)
def test_alloc_memset_0():
i = tensor.iscalar()
z = numpy.zeros((1,), dtype='float32')
o = numpy.ones((1,), dtype='float32')
ones = numpy.ones((2,), dtype='float32')
# Test with 0
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(z)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc) and topo[0].op.memset_0
assert (numpy.asarray(f(6)) == 0).all()
# Test with 1
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(o)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc)
assert not topo[0].op.memset_0
assert (numpy.asarray(f(6)) == 1).all()
# Test with 1, 1
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(ones)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc)
assert not topo[0].op.memset_0
assert (numpy.asarray(f(2)) == 1).all()
def test_gpuspecifyshape():
x = cuda.shared_constructor(numpy.ones(3, dtype='float32'), 'x')
m = theano.tensor.specify_shape(x + numpy.float32(1), (3,))
f = theano.function([], updates=[(x, m * numpy.float32(2))],
mode=mode_with_gpu)
l = f.maker.fgraph.toposort()
assert not numpy.any([isinstance(x.op, cuda.HostFromGpu) for x in l])
def test_softmax():
x = tensor.fmatrix()
f = theano.function([x], tensor.nnet.nnet.Softmax()(x),
mode=mode_with_gpu.excluding('cudnn'))
f2 = theano.function([x], tensor.nnet.nnet.Softmax()(x),
mode=mode_without_gpu)
assert isinstance(f.maker.fgraph.toposort()[1].op, cuda.nnet.GpuSoftmax)
xv = numpy.random.rand(7, 8).astype('float32')
assert numpy.allclose(f(xv), f2(xv))
def test_softmax_with_bias():
x = tensor.fmatrix()
b = tensor.fvector()
f = theano.function([x, b], tensor.nnet.nnet.SoftmaxWithBias()(x, b),
mode=mode_with_gpu)
f2 = theano.function([x, b], tensor.nnet.nnet.SoftmaxWithBias()(x, b),
mode=mode_without_gpu)
assert isinstance(f.maker.fgraph.toposort()[2].op,
cuda.nnet.GpuSoftmaxWithBias)
xv = numpy.random.rand(7, 8).astype('float32')
bv = numpy.random.rand(8).astype('float32')
assert numpy.allclose(f(xv, bv), f2(xv, bv))
def test_opt_gpujoin_onlyajoin():
# from a bug in normal sampling
_a = numpy.asarray([[1, 2], [3, 4]], dtype='float32')
_b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype='float32')
a = cuda.shared_constructor(_a)
b = cuda.shared_constructor(_b)
c = tensor.join(1, a, b)
f = theano.function([], c, mode=mode_with_gpu)
f()
graph_nodes = f.maker.fgraph.toposort()
assert isinstance(graph_nodes[-1].op, cuda.HostFromGpu)
assert isinstance(graph_nodes[-2].op, cuda.GpuJoin)
assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1))
# test mixed dtype
_b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype='float64')
b = theano.tensor.constant(_b)
c = tensor.join(1, a, b)
f = theano.function([], c, mode=mode_with_gpu)
f()
graph_nodes = f.maker.fgraph.toposort()
assert isinstance(graph_nodes[-1].op, theano.tensor.Join)
assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1))
def test_opt_gpujoin_joinvectors_elemwise_then_minusone():
# from a bug in gpu normal sampling
_a = numpy.asarray([1, 2, 3, 4], dtype='float32')
_b = numpy.asarray([5, 6, 7, 8], dtype='float32')
a = cuda.shared_constructor(_a)
b = cuda.shared_constructor(_b)
a_prime = tensor.cos(a)
b_prime = tensor.sin(b)
c = tensor.join(0, a_prime, b_prime)
d = c[:-1]
f = theano.function([], d, mode=mode_with_gpu)
graph_nodes = f.maker.fgraph.toposort()
assert isinstance(graph_nodes[-1].op, cuda.HostFromGpu)
assert isinstance(graph_nodes[-2].op, cuda.GpuSubtensor)
assert isinstance(graph_nodes[-3].op, cuda.GpuJoin)
concat = numpy.concatenate([numpy.cos(_a), numpy.sin(_b)], axis=0)
concat = concat[:-1]
assert numpy.allclose(numpy.asarray(f()), concat)
def test_opt_gpujoin_joinvectors_negativeaxes():
"""
Test that negative axis concatenation works as expected.
"""
# Test case for one-dimensional vectors
rng = numpy.random.RandomState(22)
x1 = rng.rand(5)
x2 = rng.rand(10)
t1 = cuda.shared_constructor(numpy.asarray(x1, "float32"))
t2 = cuda.shared_constructor(numpy.asarray(x2, "float32"))
t = tensor.concatenate([t1, t2], axis=-1)
f = theano.function(inputs=[], outputs=t)
assert(numpy.allclose(f(), numpy.concatenate([x1, x2], axis=-1)))
# Test case for two-dimensional vectors
x1 = rng.rand(5, 10)
x2 = rng.rand(10, 10)
t1 = cuda.shared_constructor(numpy.asarray(x1, "float32"))
t2 = cuda.shared_constructor(numpy.asarray(x2, "float32"))
t = tensor.concatenate([t1, t2], axis=-2)
f = theano.function(inputs=[], outputs=t)
assert(numpy.allclose(f(), numpy.concatenate([x1, x2], axis=-2)))
# Now check that a value error is raised when vectors don't match
# along the negative concatenation axis
try:
t = tensor.concatenate([t1, t2], axis=-1)
f = theano.function(inputs=[], outputs=t)
f()
assert(False)
except ValueError:
assert(True)
# Finally check that a value error is raised when negative
# axis is larger in absolute value than smallest number of dims
try:
t = tensor.concatenate([t1, t2], axis=-3)
f = theano.function(inputs=[], outputs=t)
f()
assert(False)
except IndexError:
assert(True)
def test_local_gpu_subtensor():
# Test shared forced on CPU.
t = tensor._shared(numpy.zeros(20, "float32"))
f = theano.function([], t[3:4], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert any([type(node.op) is tensor.Subtensor for node in topo])
assert not any([isinstance(node.op, cuda.GpuSubtensor) for node in topo])
# Test graph input.
t = tensor.fmatrix()
f = theano.function([t], t[3:4], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert any([type(node.op) is tensor.Subtensor for node in topo])
assert not any([isinstance(node.op, cuda.GpuSubtensor) for node in topo])
# Test multiple use of the input
# We want the subtensor to be on the GPU to prevent multiple transfer.
t = tensor.fmatrix()
f = theano.function([t], [t[3:4], t+1], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert not any([type(node.op) is tensor.Subtensor for node in topo])
assert any([isinstance(node.op, cuda.GpuSubtensor) for node in topo])
# Test multiple use of the input + input as output
# We want the subtensor to be on the GPU to prevent multiple transfer.
t = tensor.fmatrix()
f = theano.function([t], [t[3:4], t+1, t], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert not any([type(node.op) is tensor.Subtensor for node in topo])
assert any([isinstance(node.op, cuda.GpuSubtensor) for node in topo])
# Test shared forced on CPU end we do computation on the output of
# the subtensor.
t = tensor._shared(numpy.zeros(20, "float32"))
f = theano.function([], t[3:4]+1, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert any([type(node.op) is tensor.Subtensor for node in topo])
assert not any([isinstance(node.op, cuda.GpuSubtensor) for node in topo])
assert any([isinstance(node.op, cuda.GpuElemwise) for node in topo])
def test_local_gpu_split():
""" Test that the GpuSplit op is being applied and works """
# Construct symbolic split
x = tensor.fvector()
splits = tensor.lvector()
ra, rb, rc = tensor.split(x, splits, n_splits=3, axis=0)
# Compile function to use CPU
f = theano.function([x, splits], [ra, rb, rc], mode=mode_without_gpu)
# Get values for CPU version
cpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1])
l = f.maker.fgraph.toposort()
# Ensure that one op is theano.tensor.Split
assert any([isinstance(o.op, theano.tensor.Split) for o in l])
# GPU version
f = theano.function([x, splits], [ra, rb, rc], mode=mode_with_gpu)
gpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1])
l = f.maker.fgraph.toposort()
assert any([isinstance(o.op, cuda.GpuSplit) for o in l])
# Check equality
assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)])
# Test the other path of the optimizer, when it is the output that
# is moved to the GPU.
ra = cuda.gpu_from_host(ra)
f = theano.function([x, splits], [ra, rb, rc],
mode=mode_with_gpu.excluding("InputToGpuOptimizer"))
gpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1])
l = f.maker.fgraph.toposort()
assert any([isinstance(o.op, cuda.GpuSplit) for o in l])
# Check equality
assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)])
# Test that split with only 1 output work
ra = tensor.split(x, splits, n_splits=1, axis=0)
f = theano.function([x, splits], [ra], mode=mode_without_gpu)
cpu_res = f([0, 1, 2, 3, 4, 5], [6])
l = f.maker.fgraph.toposort()
# Ensure that no op is theano.tensor.Split or GpuSplit, they get
# optimized away.
assert not any([isinstance(o.op, (theano.tensor.Split,
cuda.GpuSplit)) for o in l])
# GPU version
f = theano.function([x, splits], [ra], mode=mode_with_gpu)
gpu_res = f([0, 1, 2, 3, 4, 5], [6])
l = f.maker.fgraph.toposort()
assert not any([isinstance(o.op, (theano.tensor.Split,
cuda.GpuSplit)) for o in l])
# Check equality
assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)])
def test_print_op():
""" Test that print ops don't block gpu optimization"""
b = tensor.fmatrix()
f = theano.function([b], theano.printing.Print()(b)*2, mode=mode_with_gpu)
# theano.printing.debugprint(f)
# print f.maker.fgraph.toposort()
#[GpuFromHost(<TensorType(float32, matrix)>), <theano.printing.Print object at 0x3581210>(GpuFromHost.0), GpuElemwise{mul}(CudaNdarray{[[ 2.]]}, <theano.printing.Print object at 0x3581210>.0), HostFromGpu(GpuElemwise{mul}.0)]
topo = f.maker.fgraph.toposort()
assert topo[0].op == cuda.gpu_from_host
assert isinstance(topo[1].op, theano.printing.Print)
assert isinstance(topo[2].op, cuda.GpuElemwise)
assert topo[3].op == cuda.host_from_gpu
f(numpy.random.random((5, 5)).astype('float32'))
def test_pdbbreakpoint_op():
""" Test that PdbBreakpoint ops don't block gpu optimization"""
b = tensor.fmatrix()
# Create a function composed of a breakpoint followed by
# some computation
condition = tensor.gt(b.sum(), 0)
b_monitored = PdbBreakpoint(name='TestBreakpoint')(condition, b)
output = b_monitored ** 2
f = theano.function([b], output, mode=mode_with_gpu)
# Ensure that, in the compiled function, the computation following the
# breakpoint has been moved to the gpu.
topo = f.maker.fgraph.toposort()
assert isinstance(topo[-2].op, cuda.GpuElemwise)
assert topo[-1].op == cuda.host_from_gpu
def test_local_gpu_elemwise_careduce():
x = theano.tensor.fmatrix()
o = (x * x).sum()
f = theano.function([x], o, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 3
assert topo[1].op.pre_scalar_op == theano.scalar.sqr
data = numpy.random.rand(3, 4).astype('float32')
utt.assert_allclose(f(data), (data * data).sum())
o = (x * x).sum(axis=1)
f = theano.function([x], o, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 3
assert topo[1].op.pre_scalar_op == theano.scalar.sqr
utt.assert_allclose(f(data), (data * data).sum(axis=1))
def test_huge_elemwise_fusion():
""" Test the the GpuElemwise fusion work correctly
We check that we fuse one node with part of its input
in case their is too many inputs and that would make it bust the 256
bytes limits.
"""
shape = (2, 3, 4, 5, 6)
ttype = tensor.tensor(dtype='float32', broadcastable=(False,) * len(shape))
gpu_ptr_size = theano.sandbox.cuda.opt.get_device_type_sizes()['gpu_ptr_size']
if gpu_ptr_size == 8:
nb_in = 7
len_topo = 10
elif gpu_ptr_size == 4:
nb_in = 8
len_topo = 11
else:
raise Exception("Unexpected value for gpu_ptr_size", gpu_ptr_size)
vars = [tensor.tanh(ttype) for x in range(nb_in)]
f = pfunc(vars, [reduce(operator.sub, vars)], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == len_topo
assert sum([isinstance(node.op, cuda.GpuElemwise) for node in topo]) == 2
assert isinstance(topo[-3].op.scalar_op, theano.scalar.basic.Sub)
assert isinstance(topo[-2].op.scalar_op, theano.scalar.basic.Composite)
# let debugmode catch errors
gen = lambda: theano._asarray(numpy.random.rand(*shape), dtype='float32')
f(*[gen() for i in range(nb_in)])
# Test the case where we can't put the computation on the gpu! their is too
# many dimensions to the input to have 2 inputs to the op!
shape = (1, 2, 3, 4, 5, 6, 7, 2, 2, 3, 2, 1, 2, 2, 2,)
ttype = tensor.tensor(dtype='float32', broadcastable=(False,) * len(shape))
vars = [tensor.tanh(ttype) for x in range(7)]
f = pfunc(vars, [vars[0] - vars[1] - vars[2] - vars[3] - vars[4] -
vars[5] - vars[6]], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert sum([isinstance(node.op, cuda.GpuElemwise) for node in topo]) == 0
assert sum([isinstance(node.op, tensor.Elemwise) for node in topo]) == 1
# let debugmode catch errors
gen = lambda: theano._asarray(numpy.random.rand(*shape), dtype='float32')
f(gen(), gen(), gen(), gen(), gen(), gen(), gen())
def gen(shape):
return theano._asarray(numpy.random.rand(*shape), dtype='float32')
max_var = 16 # excluded
for shape in [(2,),
(2, 2),
(2, 2, 2),
(2, 2, 2, 2),
(2, 2, 2, 2, 2), # 5d
(2, 2, 2, 2, 2, 2),
# (2, 2, 2, 2, 2, 2, 2),
# (2, 2, 2, 2, 2, 2, 2, 2),
# (2, 2, 2, 1, 1, 1, 1, 2, 2), # 9d
]:
vals = [cuda.shared_constructor(gen(shape)) for x in range(max_var)]
for use_tan in [True, False]:
if use_tan:
vars = [tensor.tanh(x) for x in vals]
else:
vars = vals
for nb_var in range(1, max_var):
out = reduce(lambda x, y: x + y, vars[:nb_var])
if not isinstance(out.type, CudaNdarrayType):
out = cuda.gpu_from_host(out)
f = pfunc([], [out], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
# print shape, nb_var, use_tan, len(topo)
assert (sum([isinstance(node.op, cuda.GpuElemwise)
for node in topo]) == len(topo) or
(nb_var == 1 and use_tan is False))
assert sum([isinstance(node.op, tensor.Elemwise)
for node in topo]) == 0
# let debugmode catch errors
f()
def test_local_gpu_elemwise_0():
"""
Test local_gpu_elemwise_0 when there is a dtype upcastable to float32
"""
a = tensor.bmatrix()
b = tensor.fmatrix()
c = tensor.fmatrix()
a_v = (numpy.random.rand(4, 5) * 10).astype("int8")
b_v = (numpy.random.rand(4, 5) * 10).astype("float32")
c_v = (numpy.random.rand(4, 5) * 10).astype("float32")
# Due to optimization order, this composite is created when all
# the op are on the gpu.
f = theano.function([a, b, c], a + b + c, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, cuda.GpuElemwise) for node in topo) == 1
assert sum(isinstance(node.op, tensor.Elemwise) for node in topo) == 1
utt.assert_allclose(f(a_v, b_v, c_v), a_v + b_v + c_v)
# Now test with the composite already on the cpu before we move it
# to the gpu
a_s = theano.scalar.int8()
b_s = theano.scalar.float32()
c_s = theano.scalar.float32()
out_s = theano.scalar.Composite([a_s, b_s, c_s], [a_s + b_s + c_s])
out_op = tensor.Elemwise(out_s)
f = theano.function([a, b, c], out_op(a, b, c), mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, cuda.GpuElemwise) for node in topo) == 1
assert sum(isinstance(node.op, tensor.Elemwise) for node in topo) == 1
utt.assert_allclose(f(a_v, b_v, c_v), a_v + b_v + c_v)
# Test multiple output
a_s = theano.scalar.float32()
a = tensor.fmatrix()
from theano.scalar.basic import identity
out_s = theano.scalar.Composite([a_s, b_s, c_s],
[identity(a_s), identity(c_s), identity(b_s)])
outs_op = tensor.Elemwise(out_s)
f = theano.function([a, b, c], outs_op(a, b, c), mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, cuda.GpuElemwise) for node in topo) == 1
assert sum(isinstance(node.op, tensor.Elemwise) for node in topo) == 0
out = f(a_v, b_v, c_v)
utt.assert_allclose(out[0], a_v)
utt.assert_allclose(out[1], c_v)
utt.assert_allclose(out[2], b_v)
# Test multiple output
out_s = theano.scalar.Composite([a_s, b_s, c_s], [a_s + b_s, a_s * c_s])
outs_op = tensor.Elemwise(out_s)
f = theano.function([a, b, c], outs_op(a, b, c), mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, cuda.GpuElemwise) for node in topo) == 1
assert sum(isinstance(node.op, tensor.Elemwise) for node in topo) == 0
out = f(a_v, b_v, c_v)
utt.assert_allclose(out[0], a_v + b_v)
utt.assert_allclose(out[1], a_v * c_v)
# Test non-contiguous input
c = cuda.shared_constructor(c_v)
f = theano.function([a, b], outs_op(a[::2], b[::2], c[::2]),
mode=mode_with_gpu)
out = f(a_v, b_v)
utt.assert_allclose(out[0], a_v[::2] + b_v[::2])
utt.assert_allclose(out[1], a_v[::2] * c_v[::2])
def test_elemwise_fusion():
""" Test the the GpuElemwise fusion work correctly"""
shape = (3, 4)
a = cuda.shared_constructor(theano._asarray(numpy.random.rand(*shape),
dtype='float32'), 'a')
b = tensor.fmatrix()
c = tensor.fmatrix()
f = pfunc([b, c], [a + b + c], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
for i, node in enumerate(topo):
print(i, node, file=sys.stdout)
assert len(topo) == 4
assert isinstance(topo[2].op.scalar_op, theano.scalar.basic.Composite)
# let debugmode catch errors
f(theano._asarray(numpy.random.rand(*shape), dtype='float32'),
theano._asarray(numpy.random.rand(*shape), dtype='float32'))
import theano.tests.test_ifelse
class TestIfElse(theano.tests.test_ifelse.test_ifelse):
dtype = "float32"
mode = mode_with_gpu
cast_output = staticmethod(basic_ops.as_cuda_ndarray_variable)
shared = staticmethod(cuda.shared_constructor)
def get_ifelse(self, n):
return theano.ifelse.IfElse(n, gpu=True, as_view=True)
def test_incsubtensor_mixed():
# This catches a bug that occurred when incrementing
# a float32 tensor by a float64 tensor.
# The result is defined to be float32, so it is OK
# to downcast the float64 increment in order to
# transfer it to the GPU.
# The bug was that the optimization called GpuFromHost
# without casting first, causing the optimization to
# fail.
X = tensor.fmatrix()
Y = tensor.dmatrix()
Z = tensor.inc_subtensor(X[0:1, 0:1], Y)
f = theano.function([X, Y], Z, mode=mode_with_gpu)
packed, = f.maker.fgraph.inputs[1].clients
client, idx = packed
print(client)
assert isinstance(client.op, tensor.Elemwise)
assert isinstance(client.op.scalar_op, theano.scalar.Cast)
packed, = client.outputs[0].clients
client, idx = packed
assert isinstance(client.op, cuda.GpuFromHost)
def test_erfinvgpu():
""" Test that local_gpu_elemwise_0 replaces Erfinv with ErfinvGPU """
x = tensor.fmatrix()
f = theano.function([x], tensor.Elemwise(erfinv)(x), mode=mode_with_gpu)
f2 = theano.function([x], tensor.Elemwise(erfinv)(x),
mode=mode_without_gpu)
assert isinstance(f.maker.fgraph.toposort()[1].op, cuda.GpuElemwise)
assert isinstance(f.maker.fgraph.toposort()[1].op.scalar_op,
cuda.elemwise.ErfinvGPU)
xv = numpy.random.rand(7, 8).astype('float32')
if imported_scipy_special:
assert numpy.allclose(f(xv), f2(xv))
def test_local_gpu_solve():
if not cula.cula_available:
raise SkipTest('Optional dependency CULA not available')
numpy.random.seed(1)
def cmp(a_shp, b_shp):
a0 = numpy.random.uniform(-0.4, 0.4,
a_shp).astype('float32')
a = cuda.shared_constructor(a0, 'a')
b0 = numpy.random.uniform(-0.4, 0.4,
b_shp).astype('float32')
b = cuda.shared_constructor(b0, 'b')
f = pfunc([], tensor.slinalg.solve(a, b), mode=mode_with_gpu)
assert isinstance(f.maker.fgraph.toposort()[1].inputs[0].owner.op,
cuda.cula.GpuSolve)
assert cuda.opt.local_gpu_solve.transform(
tensor.slinalg.solve(a, b).owner)
out = f()
assert numpy.allclose(numpy.dot(a0, out), b0)
cmp((6, 6), (6, 1))
cmp((5, 5), (5, 1))
def test_local_gpu_dot_to_dot22dot():
def cmp(a_shp, b_shp):
a0 = numpy.random.rand(*a_shp).astype('float32')
a = cuda.shared_constructor(a0, 'a')
b0 = numpy.random.rand(*b_shp).astype('float32')
b = cuda.shared_constructor(b0, 'b')
f = pfunc([], tensor.dot(a, b), mode=mode_with_gpu)
assert cuda.opt.local_gpu_dot_to_dot22.transform(
tensor.dot(a, b).owner)
out = f()
assert numpy.allclose(numpy.dot(a0, b0), out)
# Try with a matrix equal to a0, but with strides in both dims
a.set_value(a0)
a.set_value(
a.get_value(borrow=True,
return_internal_type=True)[::-1],
borrow=True)
f()
cmp((4,), (4, 5))
cmp((3, 4), (4,))
def test_blocksparse_gpu_gemv_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o, mode=mode_with_gpu)
assert sum(1 for n in f.maker.fgraph.apply_nodes
if isinstance(n.op, GpuSparseBlockGemv)) == 1
def test_blocksparse_gpu_outer_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], [o, tensor.grad(o.sum(),
wrt=W)],
mode=mode_with_gpu)
assert sum(1 for n in f.maker.fgraph.apply_nodes
if isinstance(n.op, GpuSparseBlockOuter)) == 1
class test_diag(theano.tensor.tests.test_nlinalg.test_diag):
mode = mode_with_gpu
shared = staticmethod(cuda.shared_constructor)
floatX = 'float32'
type = CudaNdarrayType
def __init__(self, name):
super(theano.tensor.tests.test_nlinalg.test_diag,
self).__init__(name)
class Test_GpuReshape(test_opt.Test_Reshape):
def setUp(self):
self.mode = mode_with_gpu
self.op = basic_ops.GpuReshape
def test_local_abstractconv_gemm():
""" We test it here as this is the optimization only that we test.
This test gh-4036"""
image = tensor.ftensor4()
W = tensor.ftensor4()
conv = tensor.nnet.conv2d(image,
W,
input_shape=(1, 32, 32, 32),
filter_shape=(32, 32, 3, 3),
border_mode='half')
f = theano.function([image, W], [conv], mode=mode_with_gpu)
f(numpy.random.rand(1, 32, 32, 32).astype('float32'),
numpy.random.rand(32, 32, 3, 3).astype('float32'))
if __name__ == '__main__':
test_gpualloc()
test_opt_gpujoin_onlyajoin()
test_opt_gpujoin_joinvectors_elemwise_then_minusone()
test_opt_gpujoin_joinvectors_negativeaxes()
| mit |
Gravecorp/Gap | packages/IronPython.StdLib.2.7.3/content/Lib/sqlite3/dump.py | 247 | 2350 | # Mimic the sqlite3 console shell's .dump command
# Author: Paul Kippes <kippesp@gmail.com>
def _iterdump(connection):
"""
Returns an iterator to the dump of the database in an SQL text format.
Used to produce an SQL dump of the database. Useful to save an in-memory
database for later restoration. This function should not be called
directly but instead called from the Connection method, iterdump().
"""
cu = connection.cursor()
yield('BEGIN TRANSACTION;')
# sqlite_master table contains the SQL CREATE statements for the database.
q = """
SELECT name, type, sql
FROM sqlite_master
WHERE sql NOT NULL AND
type == 'table'
"""
schema_res = cu.execute(q)
for table_name, type, sql in schema_res.fetchall():
if table_name == 'sqlite_sequence':
yield('DELETE FROM sqlite_sequence;')
elif table_name == 'sqlite_stat1':
yield('ANALYZE sqlite_master;')
elif table_name.startswith('sqlite_'):
continue
# NOTE: Virtual table support not implemented
#elif sql.startswith('CREATE VIRTUAL TABLE'):
# qtable = table_name.replace("'", "''")
# yield("INSERT INTO sqlite_master(type,name,tbl_name,rootpage,sql)"\
# "VALUES('table','%s','%s',0,'%s');" %
# qtable,
# qtable,
# sql.replace("''"))
else:
yield('%s;' % sql)
# Build the insert statement for each row of the current table
res = cu.execute("PRAGMA table_info('%s')" % table_name)
column_names = [str(table_info[1]) for table_info in res.fetchall()]
q = "SELECT 'INSERT INTO \"%(tbl_name)s\" VALUES("
q += ",".join(["'||quote(" + col + ")||'" for col in column_names])
q += ")' FROM '%(tbl_name)s'"
query_res = cu.execute(q % {'tbl_name': table_name})
for row in query_res:
yield("%s;" % row[0])
# Now when the type is 'index', 'trigger', or 'view'
q = """
SELECT name, type, sql
FROM sqlite_master
WHERE sql NOT NULL AND
type IN ('index', 'trigger', 'view')
"""
schema_res = cu.execute(q)
for name, type, sql in schema_res.fetchall():
yield('%s;' % sql)
yield('COMMIT;')
| mpl-2.0 |
Distrotech/intellij-community | python/lib/Lib/site-packages/django/core/handlers/base.py | 71 | 11444 | import sys
from django import http
from django.core import signals
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
from django.utils.log import getLogger
logger = getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
http.fix_IE_for_attach,
http.fix_IE_for_vary,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__).
"""
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
mw_module, mw_classname = middleware_path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured('%s isn\'t a middleware module' % middleware_path)
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured('Error importing middleware %s: "%s"' % (mw_module, e))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django.core import exceptions, urlresolvers
from django.conf import settings
try:
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, "urlconf"):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception, e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
# Complain if the view returned None (a common error).
if response is None:
try:
view_name = callback.func_name # If it's a function
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__' # If it's a class
raise ValueError("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name))
except http.Http404, e:
logger.warning('Not Found: %s' % request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
from django.views import debug
response = debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
response = callback(request, **param_dict)
except:
try:
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
except exceptions.PermissionDenied:
logger.warning('Forbidden (Permission denied): %s' % request.path,
extra={
'status_code': 403,
'request': request
})
response = http.HttpResponseForbidden('<h1>Permission denied</h1>')
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
# Reset URLconf for this thread on the way out for complete
# isolation of request.urlconf
urlresolvers.set_urlconf(None)
try:
# If the response supports deferred rendering, apply template
# response middleware and the render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response.render()
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
from django.conf import settings
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
logger.error('Internal Server Error: %s' % request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request':request
}
)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
raise exc_info[1], None, exc_info[2]
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless DJANGO_USE_POST_REWRITE is set (to
anything).
"""
from django.conf import settings
if settings.FORCE_SCRIPT_NAME is not None:
return force_unicode(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', u'')
if not script_url:
script_url = environ.get('REDIRECT_URL', u'')
if script_url:
return force_unicode(script_url[:-len(environ.get('PATH_INFO', ''))])
return force_unicode(environ.get('SCRIPT_NAME', u''))
| apache-2.0 |
froch/kubernetes-py | kubernetes_py/models/v1/JobStatus.py | 3 | 4434 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
from kubernetes_py.models.v1.JobCondition import JobCondition
from kubernetes_py.utils import is_valid_list, is_valid_string
class JobStatus(object):
"""
http://kubernetes.io/docs/api-reference/batch/v1/definitions/#_v1_jobstatus
"""
def __init__(self, model=None):
super(JobStatus, self).__init__()
self._conditions = []
self._start_time = None
self._completion_time = None
self._active = None
self._succeeded = None
self._failed = None
if model is not None:
self._build_with_model(model)
def _build_with_model(self, model=None):
if "conditions" in model:
conds = []
for c in model["conditions"]:
cond = JobCondition(c)
conds.append(cond)
self.conditions = conds
if "startTime" in model:
self.start_time = model["startTime"]
if "completionTime" in model:
self.completion_time = model["completionTime"]
if "active" in model:
self.active = model["active"]
if "succeeded" in model:
self.succeeded = model["succeeded"]
if "failed" in model:
self.failed = model["failed"]
# --------------------------------------------------------------------------------- conditions
@property
def conditions(self):
return self._conditions
@conditions.setter
def conditions(self, conds=None):
if not is_valid_list(conds, JobCondition):
raise SyntaxError("JobStatus: conditions: [ {} ] is invalid.".format(conds))
self._conditions = conds
# --------------------------------------------------------------------------------- startTime
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, time=None):
if not is_valid_string(time):
raise SyntaxError("JobStatus: start_time: [ {} ] is invalid.".format(time))
self._start_time = time
# --------------------------------------------------------------------------------- completionTime
@property
def completion_time(self):
return self._completion_time
@completion_time.setter
def completion_time(self, time=None):
if not is_valid_string(time):
raise SyntaxError("JobStatus: completion_time: [ {} ] is invalid.".format(time))
self._completion_time = time
# --------------------------------------------------------------------------------- active
@property
def active(self):
return self._active
@active.setter
def active(self, a=None):
if not isinstance(a, int):
raise SyntaxError("JobStatus: active: [ {} ] is invalid.".format(a))
self._active = a
# --------------------------------------------------------------------------------- succeeded
@property
def succeeded(self):
return self._succeeded
@succeeded.setter
def succeeded(self, s=None):
if not isinstance(s, int):
raise SyntaxError("JobStatus: succeeded: [ {} ] is invalid.".format(s))
self._succeeded = s
# --------------------------------------------------------------------------------- failed
@property
def failed(self):
return self._failed
@failed.setter
def failed(self, f=None):
if not isinstance(f, int):
raise SyntaxError("JobStatus: failed: [ {} ] is invalid.".format(f))
self._failed = f
# --------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.conditions is not None:
data["conditions"] = [x.serialize() for x in self.conditions]
if self.start_time is not None:
data["startTime"] = self.start_time
if self.completion_time is not None:
data["completionTime"] = self.completion_time
if self.active is not None:
data["active"] = self.active
if self.succeeded is not None:
data["succeeded"] = self.succeeded
if self.failed is not None:
data["failed"] = self.failed
return data
| apache-2.0 |
kingctan/Misago | misago/threads/events.py | 8 | 2555 | from cgi import escape
from misago.acl import add_acl
from misago.threads.checksums import update_event_checksum
from misago.threads.models import Event
__all__ = ['record_event', 'add_events_to_posts']
LINK_TEMPLATE = '<a href="%s" class="event-%s">%s</a>'
NAME_TEMPLATE = '<strong class="event-%s">%s</strong>'
def format_message(message, links):
if links:
formats = {}
for name, value in links.items():
if isinstance(value, basestring):
formats[name] = NAME_TEMPLATE % (escape(name), escape(value))
else:
try:
replaces = (
escape(value.get_absolute_url()),
escape(name),
escape(unicode(value))
)
except AttributeError:
replaces = (
escape(value[1]),
escape(name),
escape(value[0])
)
formats[name] = LINK_TEMPLATE % replaces
return message % formats
else:
return message
def record_event(user, thread, icon, message, links=None):
event = Event.objects.create(
forum=thread.forum,
thread=thread,
author=user,
author_name=user.username,
author_slug=user.slug,
icon=icon,
message=format_message(message, links))
update_event_checksum(event)
event.save(update_fields=['checksum'])
thread.has_events = True
return event
def add_events_to_posts(user, thread, posts, delimeter=None):
if thread.has_events:
real_add_events_to_posts(user, thread, posts, delimeter)
else:
for post in posts:
post.events = []
def real_add_events_to_posts(user, thread, posts, delimeter=None):
start_date = posts[0].posted_on
events_queryset = thread.event_set.filter(occured_on__gte=start_date)
if delimeter:
events_queryset = events_queryset.filter(occured_on__lt=delimeter)
events_queryset = events_queryset.order_by('id')
acl = user.acl['forums'].get(thread.forum_id, {})
if not acl.get('can_hide_events'):
events_queryset = events_queryset.filter(is_hidden=False)
events = [e for e in events_queryset[:50]]
add_acl(user, events)
for i, post in enumerate(posts[:-1]):
post.events = []
while events and events[0].occured_on < posts[i + 1].posted_on:
post.events.append(events.pop(0))
posts[-1].events = events
| gpl-2.0 |
ndchorley/scipy | scipy/signal/tests/test_windows.py | 74 | 9476 | from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import array
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
run_module_suite, assert_raises, assert_allclose)
from scipy import signal
window_funcs = [
('boxcar', ()),
('triang', ()),
('parzen', ()),
('bohman', ()),
('blackman', ()),
('nuttall', ()),
('blackmanharris', ()),
('flattop', ()),
('bartlett', ()),
('hanning', ()),
('barthann', ()),
('hamming', ()),
('kaiser', (1,)),
('gaussian', (0.5,)),
('general_gaussian', (1.5, 2)),
('chebwin', (1,)),
('slepian', (2,)),
('cosine', ()),
('hann', ()),
('exponential', ()),
('tukey', (0.5,)),
]
cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348,
0.198891, 0.235450, 0.274846, 0.316836,
0.361119, 0.407338, 0.455079, 0.503883,
0.553248, 0.602637, 0.651489, 0.699227,
0.745266, 0.789028, 0.829947, 0.867485,
0.901138, 0.930448, 0.955010, 0.974482,
0.988591, 0.997138, 1.000000, 0.997138,
0.988591, 0.974482, 0.955010, 0.930448,
0.901138, 0.867485, 0.829947, 0.789028,
0.745266, 0.699227, 0.651489, 0.602637,
0.553248, 0.503883, 0.455079, 0.407338,
0.361119, 0.316836, 0.274846, 0.235450,
0.198891, 0.165348, 0.134941, 0.107729,
0.200938])
cheb_even_true = array([0.203894, 0.107279, 0.133904,
0.163608, 0.196338, 0.231986,
0.270385, 0.311313, 0.354493,
0.399594, 0.446233, 0.493983,
0.542378, 0.590916, 0.639071,
0.686302, 0.732055, 0.775783,
0.816944, 0.855021, 0.889525,
0.920006, 0.946060, 0.967339,
0.983557, 0.994494, 1.000000,
1.000000, 0.994494, 0.983557,
0.967339, 0.946060, 0.920006,
0.889525, 0.855021, 0.816944,
0.775783, 0.732055, 0.686302,
0.639071, 0.590916, 0.542378,
0.493983, 0.446233, 0.399594,
0.354493, 0.311313, 0.270385,
0.231986, 0.196338, 0.163608,
0.133904, 0.107279, 0.203894])
class TestChebWin(object):
def test_cheb_odd_high_attenuation(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
cheb_odd = signal.chebwin(53, at=-40)
assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4)
def test_cheb_even_high_attenuation(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
cheb_even = signal.chebwin(54, at=-40)
assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4)
def test_cheb_odd_low_attenuation(self):
cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405,
0.610151, 0.586405, 0.519052,
1.000000])
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
cheb_odd = signal.chebwin(7, at=-10)
assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4)
def test_cheb_even_low_attenuation(self):
cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027,
0.541338, 0.541338, 0.51027,
0.451924, 1.000000])
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
cheb_even = signal.chebwin(8, at=-10)
assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4)
exponential_data = {
(4, None, 0.2, False): array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03]),
(4, None, 0.2, True): array([0.00055308437014783, 0.0820849986238988,
0.0820849986238988, 0.00055308437014783]),
(4, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233]),
(4, None, 1.0, True): array([0.22313016014842982, 0.60653065971263342,
0.60653065971263342, 0.22313016014842982]),
(4, 2, 0.2, False): array([4.53999297624848542e-05, 6.73794699908546700e-03,
1.00000000000000000e+00, 6.73794699908546700e-03]),
(4, 2, 0.2, True): None,
(4, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233]),
(4, 2, 1.0, True): None,
(5, None, 0.2, False): array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03, 4.53999297624848542e-05]),
(5, None, 0.2, True): array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03, 4.53999297624848542e-05]),
(5, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233, 0.1353352832366127]),
(5, None, 1.0, True): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233, 0.1353352832366127]),
(5, 2, 0.2, False): array([4.53999297624848542e-05, 6.73794699908546700e-03,
1.00000000000000000e+00, 6.73794699908546700e-03,
4.53999297624848542e-05]),
(5, 2, 0.2, True): None,
(5, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233, 0.1353352832366127]),
(5, 2, 1.0, True): None
}
def test_exponential():
for k, v in exponential_data.items():
if v is None:
assert_raises(ValueError, signal.exponential, *k)
else:
win = signal.exponential(*k)
assert_allclose(win, v, rtol=1e-14)
tukey_data = {
(4, 0.5, True): array([0.0, 1.0, 1.0, 0.0]),
(4, 0.9, True): array([0.0, 0.84312081893436686, 0.84312081893436686, 0.0]),
(4, 1.0, True): array([0.0, 0.75, 0.75, 0.0]),
(4, 0.5, False): array([0.0, 1.0, 1.0, 1.0]),
(4, 0.9, False): array([0.0, 0.58682408883346526, 1.0, 0.58682408883346526]),
(4, 1.0, False): array([0.0, 0.5, 1.0, 0.5]),
(5, 0.0, True): array([1.0, 1.0, 1.0, 1.0, 1.0]),
(5, 0.8, True): array([0.0, 0.69134171618254492, 1.0, 0.69134171618254492, 0.0]),
(5, 1.0, True): array([0.0, 0.5, 1.0, 0.5, 0.0]),
}
def test_tukey():
# Test against hardcoded data
for k, v in tukey_data.items():
if v is None:
assert_raises(ValueError, signal.tukey, *k)
else:
win = signal.tukey(*k)
assert_allclose(win, v, rtol=1e-14)
# Test extremes of alpha correspond to boxcar and hann
tuk0 = signal.tukey(100,0)
tuk1 = signal.tukey(100,1)
box0 = signal.boxcar(100)
han1 = signal.hann(100)
assert_array_almost_equal(tuk0, box0)
assert_array_almost_equal(tuk1, han1)
class TestGetWindow(object):
def test_boxcar(self):
w = signal.get_window('boxcar', 12)
assert_array_equal(w, np.ones_like(w))
def test_cheb_odd(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
w = signal.get_window(('chebwin', -40), 53, fftbins=False)
assert_array_almost_equal(w, cheb_odd_true, decimal=4)
def test_cheb_even(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
w = signal.get_window(('chebwin', -40), 54, fftbins=False)
assert_array_almost_equal(w, cheb_even_true, decimal=4)
def test_array_as_window(self):
# github issue 3603
osfactor = 128
sig = np.arange(128)
win = signal.get_window(('kaiser', 8.0), osfactor // 2)
assert_raises(ValueError, signal.resample, (sig, len(sig) * osfactor), {'window': win})
def test_windowfunc_basics():
for window_name, params in window_funcs:
window = getattr(signal, window_name)
with warnings.catch_warnings(record=True): # window is not suitable...
w1 = window(7, *params, sym=True)
w2 = window(7, *params, sym=False)
assert_array_almost_equal(w1, w2)
# just check the below runs
window(6, *params, sym=True)
window(6, *params, sym=False)
def test_needs_params():
for winstr in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'slepian', 'optimal', 'slep', 'dss', 'dpss',
'chebwin', 'cheb', 'exponential', 'poisson', 'tukey',
'tuk']:
assert_raises(ValueError, signal.get_window, winstr, 7)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
JeremyMorgan/Raspberry_Pi_Weather_Station | sensortests/AM2302/AdafruitDHT.py | 9 | 2290 | #!/usr/bin/python
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import Adafruit_DHT
# Parse command line parameters.
sensor_args = { '11': Adafruit_DHT.DHT11,
'22': Adafruit_DHT.DHT22,
'2302': Adafruit_DHT.AM2302 }
if len(sys.argv) == 3 and sys.argv[1] in sensor_args:
sensor = sensor_args[sys.argv[1]]
pin = sys.argv[2]
else:
print 'usage: sudo ./Adafruit_DHT.py [11|22|2302] GPIOpin#'
print 'example: sudo ./Adafruit_DHT.py 2302 4 - Read from an AM2302 connected to GPIO #4'
sys.exit(1)
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# Un-comment the line below to convert the temperature to Fahrenheit.
# temperature = temperature * 9/5.0 + 32
# Note that sometimes you won't get a reading and
# the results will be null (because Linux can't
# guarantee the timing of calls to read the sensor).
# If this happens try again!
if humidity is not None and temperature is not None:
print 'Temp={0:0.1f}* Humidity={1:0.1f}%'.format(temperature, humidity)
else:
print 'Failed to get reading. Try again!'
sys.exit(1)
| gpl-2.0 |
sajuptpm/neutron-ipam | neutron/extensions/allowedaddresspairs.py | 1 | 4938 | # Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as nexception
from oslo.config import cfg
allowed_address_pair_opts = [
#TODO(limao): use quota framework when it support quota for attributes
cfg.IntOpt('max_allowed_address_pair', default=10,
help=_("Maximum number of allowed address pairs")),
]
cfg.CONF.register_opts(allowed_address_pair_opts)
class AllowedAddressPairsMissingIP(nexception.InvalidInput):
message = _("AllowedAddressPair must contain ip_address")
class AddressPairAndPortSecurityRequired(nexception.Conflict):
message = _("Port Security must be enabled in order to have allowed "
"address pairs on a port.")
class DuplicateAddressPairInRequest(nexception.InvalidInput):
message = _("Request contains duplicate address pair: "
"mac_address %(mac_address)s ip_address %(ip_address)s.")
class AddressPairMatchesPortFixedIPAndMac(nexception.InvalidInput):
message = _("Port's Fixed IP and Mac Address match an address pair entry.")
class AllowedAddressPairExhausted(nexception.BadRequest):
message = _("The number of allowed address pair "
"exceeds the maximum %(quota)s.")
def _validate_allowed_address_pairs(address_pairs, valid_values=None):
unique_check = {}
if len(address_pairs) > cfg.CONF.max_allowed_address_pair:
raise AllowedAddressPairExhausted(
quota=cfg.CONF.max_allowed_address_pair)
for address_pair in address_pairs:
# mac_address is optional, if not set we use the mac on the port
if 'mac_address' in address_pair:
msg = attr._validate_mac_address(address_pair['mac_address'])
if msg:
raise webob.exc.HTTPBadRequest(msg)
if 'ip_address' not in address_pair:
raise AllowedAddressPairsMissingIP()
mac = address_pair.get('mac_address')
ip_address = address_pair['ip_address']
if (mac, ip_address) not in unique_check:
unique_check[(mac, ip_address)] = None
else:
raise DuplicateAddressPairInRequest(mac_address=mac,
ip_address=ip_address)
invalid_attrs = set(address_pair.keys()) - set(['mac_address',
'ip_address'])
if invalid_attrs:
msg = (_("Unrecognized attribute(s) '%s'") %
', '.join(set(address_pair.keys()) -
set(['mac_address', 'ip_address'])))
raise webob.exc.HTTPBadRequest(msg)
if '/' in ip_address:
msg = attr._validate_subnet(ip_address)
else:
msg = attr._validate_ip_address(ip_address)
if msg:
raise webob.exc.HTTPBadRequest(msg)
attr.validators['type:validate_allowed_address_pairs'] = (
_validate_allowed_address_pairs)
ADDRESS_PAIRS = 'allowed_address_pairs'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
ADDRESS_PAIRS: {'allow_post': True, 'allow_put': True,
'convert_list_to':
attr.convert_kvp_list_to_dict,
'validate': {'type:validate_allowed_address_pairs':
None},
'enforce_policy': True,
'default': attr.ATTR_NOT_SPECIFIED,
'is_visible': True},
}
}
class Allowedaddresspairs(object):
"""Extension class supporting allowed address pairs."""
@classmethod
def get_name(cls):
return "Allowed Address Pairs"
@classmethod
def get_alias(cls):
return "allowed-address-pairs"
@classmethod
def get_description(cls):
return "Provides allowed address pairs"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/allowedaddresspairs/api/v2.0"
@classmethod
def get_updated(cls):
return "2013-07-23T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
attr.PLURALS.update({'allowed_address_pairs':
'allowed_address_pair'})
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apache-2.0 |
blueboxjesse/giftwrap | giftwrap/gerrit.py | 1 | 4684 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014, Craig Tracey <craigtracey@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
import re
import requests
from pygerrit.rest import GerritRestAPI
DEFAULT_GERRIT_URL = 'https://review.openstack.org'
class GerritReview(object):
def __init__(self, changeid, project, gerrit_url=DEFAULT_GERRIT_URL):
self.changeid = changeid
self.project = project
self._gerrit_url = gerrit_url
self._restclient = None
def build_pip_dependencies(self, py26=False, py27=True, string=False):
url = self._get_gate_build_log_url(py26, py27)
response = requests.get(url)
if response.status_code != 200:
raise Exception("Unable to get console log at %s. Error: %d" %
(url, response.status_code))
log = response.text.encode('utf-8')
freeze_found = False
dependencies = []
for line in log.split('\n'):
line = re.sub('.*\|\s*', '', line)
if not freeze_found:
if line.endswith("pip freeze") or line.endswith("pbr freeze"):
freeze_found = True
continue
elif re.match('[\w\-]+==.+', line) and not line.startswith('-e'):
dependencies.append(line)
short_name = self.project.split('/')[1]
dependencies = filter(lambda x: not x.startswith(short_name + "=="),
dependencies)
if string:
return (' ').join(dependencies)
return dependencies
def _get_rest_client(self):
if not self._restclient:
self._restclient = GerritRestAPI(url=self._gerrit_url)
return self._restclient
def _get_review_detail(self):
""" get review details for a given change ID """
restclient = self._get_rest_client()
url = "/changes/?q=%s" % self.changeid
changes = restclient.get(url)
change = None
for c in changes:
if c['project'] == self.project:
change = c
break
if not change:
raise Exception("could not find change with ID: %s" %
self.changeid)
detail = restclient.get("/changes/%s/detail" % change['id'])
return detail
def _get_reveiew_messages(self):
details = self._get_review_detail()
return details['messages']
def _get_gate_build_log_url(self, py26, py27):
messages = self._get_reveiew_messages()
messages.reverse()
mergemsg = None
for message in messages:
msgtext = message['message']
if re.search('Patch Set \d+: Verified', msgtext):
mergemsg = msgtext
break
gate_info = self._parse_merge_message(mergemsg)
url = None
for gate in gate_info:
if py26 and re.match('gate\-.+\-python26', gate['name']):
url = gate['url']
if py27 and re.match('gate\-.+\-python27', gate['name']):
url = gate['url']
# check if it is console.html or console.html.gz
resp = requests.get(url)
if resp.status_code != 200:
raise Exception("Unable to find the build's console log for %s" %
url)
build_log = None
if 'console.html.gz' in resp.text:
build_log = 'console.html.gz'
elif 'console.html' in resp.text:
build_log = 'console.html'
else:
raise Exception("Didn't find a build log. Does one exist?")
if url:
return "%s/%s" % (url, build_log)
return url
def _parse_merge_message(self, msg):
""" a function that parses a successful gate gerrit message """
gate_info = []
for line in msg.split('\n'):
parts = re.split('\s+', line)
if parts[0] == '-':
gate = {}
gate['name'] = parts[1]
gate['url'] = parts[2]
gate['result'] = parts[4]
gate_info.append(gate)
return gate_info
| apache-2.0 |
eusi/MissionPlanerHM | Lib/site-packages/scipy/constants/constants.py | 55 | 8861 | """
Collection of physical constants and conversion factors.
Most constants are in SI units, so you can do
print '10 mile per minute is', 10*mile/minute, 'm/s or', 10*mile/(minute*knot), 'knots'
The list is not meant to be comprehensive, but just a convenient list for everyday use.
"""
"""
BasSw 2006
physical constants: imported from CODATA
unit conversion: see e.g. NIST special publication 811
Use at own risk: double-check values before calculating your Mars orbit-insertion burn.
Some constants exist in a few variants, which are marked with suffixes.
The ones without any suffix should be the most common one.
"""
import math as _math
from codata import value as _cd
import numpy as np
#mathematical constants
pi = _math.pi
golden = golden_ratio = (1 + _math.sqrt(5)) / 2
#SI prefixes
yotta = 1e24
zetta = 1e21
exa = 1e18
peta = 1e15
tera = 1e12
giga = 1e9
mega = 1e6
kilo = 1e3
hecto = 1e2
deka = 1e1
deci = 1e-1
centi = 1e-2
milli = 1e-3
micro = 1e-6
nano = 1e-9
pico = 1e-12
femto = 1e-15
atto = 1e-18
zepto = 1e-21
#binary prefixes
kibi = 2**10
mebi = 2**20
gibi = 2**30
tebi = 2**40
pebi = 2**50
exbi = 2**60
zebi = 2**70
yobi = 2**80
#physical constants
c = speed_of_light = _cd('speed of light in vacuum')
mu_0 = 4e-7*pi
epsilon_0 = 1 / (mu_0*c*c)
h = Planck = _cd('Planck constant')
hbar = h / (2 * pi)
G = gravitational_constant = _cd('Newtonian constant of gravitation')
g = _cd('standard acceleration of gravity')
e = elementary_charge = _cd('elementary charge')
R = gas_constant = _cd('molar gas constant')
alpha = fine_structure = _cd('fine-structure constant')
N_A = Avogadro = _cd('Avogadro constant')
k = Bolzmann = _cd('Boltzmann constant')
sigma = Stefan_Bolzmann = _cd('Stefan-Boltzmann constant')
Wien = _cd('Wien wavelength displacement law constant')
Rydberg = _cd('Rydberg constant')
#weight in kg
gram = 1e-3
metric_ton = 1e3
grain = 64.79891e-6
lb = pound = 7000 * grain #avoirdupois
oz = ounce = pound / 16
stone = 14 * pound
long_ton = 2240 * pound
short_ton = 2000 * pound
troy_ounce = 480 * grain #only for metals / gems
troy_pound = 12 * troy_ounce
carat = 200e-6
m_e = electron_mass = _cd('electron mass')
m_p = proton_mass = _cd('proton mass')
m_n = neutron_mass = _cd('neutron mass')
m_u = u = atomic_mass = _cd('atomic mass constant')
#angle in rad
degree = pi / 180
arcmin = arcminute = degree / 60
arcsec = arcsecond = arcmin / 60
#time in second
minute = 60.0
hour = 60 * minute
day = 24 * hour
week = 7 * day
year = 365 * day
Julian_year = 365.25 * day
#length in meter
inch = 0.0254
foot = 12 * inch
yard = 3 * foot
mile = 1760 * yard
mil = inch / 1000
pt = point = inch / 72 #typography
survey_foot = 1200.0 / 3937
survey_mile = 5280 * survey_foot
nautical_mile = 1852.0
fermi = 1e-15
angstrom = 1e-10
micron = 1e-6
au = astronomical_unit = 149597870691.0
light_year = Julian_year * c
parsec = au / arcsec
#pressure in pascal
atm = atmosphere = _cd('standard atmosphere')
bar = 1e5
torr = mmHg = atm / 760
psi = pound * g / (inch * inch)
#area in meter**2
hectare = 1e4
acre = 43560 * foot**2
#volume in meter**3
litre = liter = 1e-3
gallon = gallon_US = 231 * inch**3 #US
#pint = gallon_US / 8
fluid_ounce = fluid_ounce_US = gallon_US / 128
bbl = barrel = 42 * gallon_US #for oil
gallon_imp = 4.54609e-3 #uk
fluid_ounce_imp = gallon_imp / 160
#speed in meter per second
kmh = 1e3 / hour
mph = mile / hour
mach = speed_of_sound = 340.5 #approx value at 15 degrees in 1 atm. is this a common value?
knot = nautical_mile / hour
#temperature in kelvin
zero_Celsius = 273.15
degree_Fahrenheit = 1/1.8 #only for differences
#energy in joule
eV = electron_volt = elementary_charge # * 1 Volt
calorie = calorie_th = 4.184
calorie_IT = 4.1868
erg = 1e-7
Btu_th = pound * degree_Fahrenheit * calorie_th / gram
Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram
ton_TNT = 1e9 * calorie_th
#Wh = watt_hour
#power in watt
hp = horsepower = 550 * foot * pound * g
#force in newton
dyn = dyne = 1e-5
lbf = pound_force = pound * g
kgf = kilogram_force = g # * 1 kg
#functions for conversions that are not linear
def C2K(C):
"""
Convert Celsius to Kelvin
Parameters
----------
C : array_like
Celsius temperature(s) to be converted.
Returns
-------
K : float or array of floats
Equivalent Kelvin temperature(s).
Notes
-----
Computes ``K = C + zero_Celsius`` where `zero_Celsius` = 273.15, i.e.,
(the absolute value of) temperature "absolute zero" as measured in Celsius.
Examples
--------
>>> from scipy.constants.constants import C2K
>>> C2K(np.array([-40, 40.0]))
array([ 233.15, 313.15])
"""
return np.asanyarray(C) + zero_Celsius
def K2C(K):
"""
Convert Kelvin to Celsius
Parameters
----------
K : array_like
Kelvin temperature(s) to be converted.
Returns
-------
C : float or array of floats
Equivalent Celsius temperature(s).
Notes
-----
Computes ``C = K - zero_Celsius`` where `zero_Celsius` = 273.15, i.e.,
(the absolute value of) temperature "absolute zero" as measured in Celsius.
Examples
--------
>>> from scipy.constants.constants import K2C
>>> K2C(np.array([233.15, 313.15]))
array([-40., 40.])
"""
return np.asanyarray(K) - zero_Celsius
def F2C(F):
"""
Convert Fahrenheit to Celsius
Parameters
----------
F : array_like
Fahrenheit temperature(s) to be converted.
Returns
-------
C : float or array of floats
Equivalent Celsius temperature(s).
Notes
-----
Computes ``C = (F - 32) / 1.8``.
Examples
--------
>>> from scipy.constants.constants import F2C
>>> F2C(np.array([-40, 40.0]))
array([-40. , 4.44444444])
"""
return (np.asanyarray(F) - 32) / 1.8
def C2F(C):
"""
Convert Celsius to Fahrenheit
Parameters
----------
C : array_like
Celsius temperature(s) to be converted.
Returns
-------
F : float or array of floats
Equivalent Fahrenheit temperature(s).
Notes
-----
Computes ``F = 1.8 * C + 32``.
Examples
--------
>>> from scipy.constants.constants import C2F
>>> C2F(np.array([-40, 40.0]))
array([ -40., 104.])
"""
return 1.8 * np.asanyarray(C) + 32
def F2K(F):
"""
Convert Fahrenheit to Kelvin
Parameters
----------
F : array_like
Fahrenheit temperature(s) to be converted.
Returns
-------
K : float or array of floats
Equivalent Kelvin temperature(s).
Notes
-----
Computes ``K = (F - 32)/1.8 + zero_Celsius`` where `zero_Celsius` =
273.15, i.e., (the absolute value of) temperature "absolute zero" as
measured in Celsius.
Examples
--------
>>> from scipy.constants.constants import F2K
>>> F2K(np.array([-40, 104]))
array([ 233.15, 313.15])
"""
return C2K(F2C(np.asanyarray(F)))
def K2F(K):
"""
Convert Kelvin to Fahrenheit
Parameters
----------
K : array_like
Kelvin temperature(s) to be converted.
Returns
-------
F : float or array of floats
Equivalent Fahrenheit temperature(s).
Notes
-----
Computes ``F = 1.8 * (K - zero_Celsius) + 32`` where `zero_Celsius` =
273.15, i.e., (the absolute value of) temperature "absolute zero" as
measured in Celsius.
Examples
--------
>>> from scipy.constants.constants import K2F
>>> K2F(np.array([233.15, 313.15]))
array([ -40., 104.])
"""
return C2F(K2C(np.asanyarray(K)))
#optics
def lambda2nu(lambda_):
"""
Convert wavelength to optical frequency
Parameters
----------
lambda : array_like
Wavelength(s) to be converted.
Returns
-------
nu : float or array of floats
Equivalent optical frequency.
Notes
-----
Computes ``nu = c / lambda`` where c = 299792458.0, i.e., the
(vacuum) speed of light in meters/second.
Examples
--------
>>> from scipy.constants.constants import lambda2nu
>>> lambda2nu(np.array((1, speed_of_light)))
array([ 2.99792458e+08, 1.00000000e+00])
"""
return np.asanyarray(c) / lambda_
def nu2lambda(nu):
"""
Convert optical frequency to wavelength.
Parameters
----------
nu : array_like
Optical frequency to be converted.
Returns
-------
lambda : float or array of floats
Equivalent wavelength(s).
Notes
-----
Computes ``lambda = c / nu`` where c = 299792458.0, i.e., the
(vacuum) speed of light in meters/second.
Examples
--------
>>> from scipy.constants.constants import nu2lambda
>>> nu2lambda(np.array((1, speed_of_light)))
array([ 2.99792458e+08, 1.00000000e+00])
"""
return c / np.asanyarray(nu)
| gpl-3.0 |
zshipko/libtwombly | py/twombly/drawing.py | 1 | 25228 | from __future__ import absolute_import
from ctypes import *
from numpy import zeros, arange, ndarray, asarray
from twombly.colors import _colors
import platform, os
from functools import partial
if platform.system() == "Darwin":
ext = "dylib"
else:
ext = "so"
twombly = cdll.LoadLibrary("libtwombly." + ext)
PATH_CMD_STOP = 0
PATH_CMD_MOVE_TO = 1
PATH_CMD_LINE_TO = 2
PATH_CMD_CURVE3 = 3
PATH_CMD_CURVE4 = 4
PATH_CMD_CURVEN = 5
PATH_CMD_CATROM = 6
PATH_CMD_UBSPLINE = 7
PATH_CMD_END_POLY = 0x0f
PATH_CMD_MASK = 0x0f
class PathCommand(object):
stop = PATH_CMD_STOP
move_to = PATH_CMD_MOVE_TO
line_to = PATH_CMD_LINE_TO
curve3 = PATH_CMD_CURVE3
curve4 = PATH_CMD_CURVE4
curven = PATH_CMD_CURVEN
catrom = PATH_CMD_CATROM
ubspline = PATH_CMD_UBSPLINE
end_poly = PATH_CMD_END_POLY
mask = PATH_CMD_MASK
class flags(object):
none = 0
ccw = 0x10
cw = 0x20
close = 0x40
mask = 0xF0
GRADIENT_CIRCLE = 0
GRADIENT_RADIAL = 1
GRADIENT_RADIAL_D = 2
GRADIENT_RADIAL_FOCUS = 3
GRADIENT_X = 4
GRADIENT_Y = 5
GRADIENT_DIAMOND = 6
GRADIENT_XY = 7
GRADIENT_SQRT_XY = 8
GRADIENT_CONIC = 9
CAP_BUTT = 0
CAP_SQUARE = 1
CAP_ROUND = 2
JOIN_MITER = 0
JOIN_MITER_REVERT = 1
JOIN_ROUND = 2
JOIN_BEVEL = 3
JOIN_MITER_ROUND = 4
class DrawingStyle(object):
class cap(object):
butt = CAP_BUTT
square = CAP_SQUARE
round = CAP_ROUND
class join(object):
miter = JOIN_MITER
miter_revert = JOIN_MITER_REVERT
round = JOIN_ROUND
bevel = JOIN_BEVEL
miter_round = JOIN_MITER_ROUND
class DrawingType(Structure):
''' C struct for drawing type'''
_fields_ = [
("handle", c_void_p),
("channels", c_int),
("bits_per_channel", c_int),
("is_bgr", c_bool)
]
class TransformType(Structure):
''' C struct for transformation matrix '''
_fields_ = [
("handle", c_void_p),
("free_handle", c_int)
]
class GradientType(Structure):
''' C struct for gradient '''
_fields_ = [
("handle", c_void_p)
]
def _method_decl(c_fn, res=None, args=[POINTER(DrawingType)]):
''' declare a ctypes function '''
tmp = c_fn
tmp.argtypes = args
tmp.restype = res
return tmp
_METHODS = dict(
empty=_method_decl(twombly.draw_empty, POINTER(DrawingType), args=[]),
create=_method_decl(twombly.draw_create,
POINTER(DrawingType), [c_int, c_int, c_int, c_void_p]),
create16=_method_decl(twombly.draw_create16,
POINTER(DrawingType), [c_int, c_int, c_int, c_void_p]),
create_bgr=_method_decl(twombly.draw_create_bgr,
POINTER(DrawingType), [c_int, c_int, c_int, c_void_p]),
create16_bgr=_method_decl(twombly.draw_create16_bgr,
POINTER(DrawingType), [c_int, c_int, c_int, c_void_p]),
free=_method_decl(twombly.draw_free, args=[POINTER(POINTER(DrawingType))]),
get_antialias=_method_decl(twombly.draw_get_antialias, c_bool),
set_antialias=_method_decl(twombly.draw_set_antialias, args=[POINTER(DrawingType), c_bool]),
get_preserve=_method_decl(twombly.draw_get_preserve, c_bool),
set_preserve=_method_decl(twombly.draw_set_preserve, args=[POINTER(DrawingType), c_bool]),
set_line_width=_method_decl(twombly.draw_set_line_width,
args=[POINTER(DrawingType), c_double]),
get_line_width=_method_decl(twombly.draw_get_line_width, c_double),
set_miter_limit=_method_decl(twombly.draw_set_miter_limit,
args=[POINTER(DrawingType), c_double]),
get_miter_limit=_method_decl(twombly.draw_get_miter_limit, c_double),
set_line_join=_method_decl(twombly.draw_set_line_join,
args=[POINTER(DrawingType), c_int]),
get_line_join=_method_decl(twombly.draw_get_line_join, c_int),
set_line_cap=_method_decl(twombly.draw_set_line_cap,
args=[POINTER(DrawingType), c_int]),
get_line_cap=_method_decl(twombly.draw_get_line_cap, c_int),
set_active_path=_method_decl(twombly.draw_set_active_path,
args=[POINTER(DrawingType), c_uint]),
get_active_path=_method_decl(twombly.draw_get_active_path, c_uint),
new_path=_method_decl(twombly.draw_new_path, c_uint),
rotate=_method_decl(twombly.draw_rotate, args=[POINTER(DrawingType), c_double]),
translate=_method_decl(twombly.draw_translate,
args=[POINTER(DrawingType), c_double, c_double]),
scale=_method_decl(twombly.draw_scale,
args=[POINTER(DrawingType), c_double]),
scale2=_method_decl(twombly.draw_scale2,
args=[POINTER(DrawingType), c_double, c_double]),
skew=_method_decl(twombly.draw_skew,
args=[POINTER(DrawingType), c_double, c_double]),
clear_transforms=_method_decl(twombly.draw_clear_transforms),
close_polygon=_method_decl(twombly.draw_close_polygon),
end_polygon=_method_decl(twombly.draw_end_polygon),
invert_polygon=_method_decl(twombly.draw_invert_polygon),
reset=_method_decl(twombly.draw_reset),
clear=_method_decl(twombly.draw_clear,
args=[POINTER(DrawingType), c_uint8, c_uint8, c_uint8, c_uint8]),
remove_all=_method_decl(twombly.draw_remove_all),
ellipse=_method_decl(twombly.draw_ellipse,
args=[POINTER(DrawingType), c_double, c_double, c_double, c_double]),
rect=_method_decl(twombly.draw_rect,
args=[POINTER(DrawingType), c_double, c_double, c_double, c_double]),
clip=_method_decl(twombly.draw_clip,
args=[POINTER(DrawingType), c_double, c_double, c_double, c_double]),
reset_clip=_method_decl(twombly.draw_reset_clip),
last_x=_method_decl(twombly.draw_last_x, c_double),
last_y=_method_decl(twombly.draw_last_y, c_double),
rel_to_abs=_method_decl(twombly.draw_rel_to_abs,
args=[POINTER(DrawingType), POINTER(c_double), POINTER(c_double)]),
move_to=_method_decl(twombly.draw_move_to,
args=[POINTER(DrawingType), c_double, c_double]),
move_rel=_method_decl(twombly.draw_move_rel,
args=[POINTER(DrawingType), c_double, c_double]),
line_to=_method_decl(twombly.draw_line_to,
args=[POINTER(DrawingType), c_double, c_double]),
vline_to=_method_decl(twombly.draw_vline_to,
args=[POINTER(DrawingType), c_double]),
hline_to=_method_decl(twombly.draw_hline_to,
args=[POINTER(DrawingType), c_double]),
vline_rel=_method_decl(twombly.draw_vline_rel,
args=[POINTER(DrawingType), c_double]),
hline_rel=_method_decl(twombly.draw_hline_rel,
args=[POINTER(DrawingType), c_double]),
line_rel=_method_decl(twombly.draw_line_rel,
args=[POINTER(DrawingType), c_double, c_double]),
curve_to2=_method_decl(twombly.draw_curve_to2,
args=[POINTER(DrawingType), c_double, c_double]),
curve_rel2=_method_decl(twombly.draw_curve_rel2,
args=[POINTER(DrawingType), c_double, c_double]),
curve_to4=_method_decl(twombly.draw_curve_to4,
args=[POINTER(DrawingType), c_double, c_double, c_double, c_double]),
curve_rel4=_method_decl(twombly.draw_curve_rel4,
args=[POINTER(DrawingType), c_double, c_double, c_double, c_double]),
curve_to6=_method_decl(twombly.draw_curve_to6,
args=[POINTER(DrawingType), c_double, c_double, c_double, c_double, c_double, c_double]),
curve_rel6=_method_decl(twombly.draw_curve_rel6,
args=[POINTER(DrawingType), c_double, c_double, c_double, c_double, c_double, c_double]),
arc_to=_method_decl(twombly.draw_arc_to,
args=[POINTER(DrawingType), c_double, c_double, c_double, c_double, c_double]),
arc_rel=_method_decl(twombly.draw_arc_rel,
args=[POINTER(DrawingType), c_double, c_double, c_double, c_double, c_double]),
text_simple=_method_decl(twombly.draw_text_simple, c_double,
args=[POINTER(DrawingType), c_double, c_double, c_char_p, c_int, c_double, c_char_p]),
set_color=_method_decl(twombly.draw_set_color,
args=[POINTER(DrawingType), c_uint8, c_uint8, c_uint8, c_uint8]),
fill=_method_decl(twombly.draw_fill),
stroke=_method_decl(twombly.draw_stroke),
stroke_color=_method_decl(twombly.draw_stroke_color, args=[POINTER(DrawingType), POINTER(c_float)]),
fill_color=_method_decl(twombly.draw_fill_color, args=[POINTER(DrawingType), POINTER(c_float)]),
dash_color=_method_decl(twombly.draw_stroke_color, args=[POINTER(DrawingType), POINTER(c_float), c_double, c_double]),
dash=_method_decl(
twombly.draw_dash, args=[POINTER(DrawingType), c_double, c_double]),
paint=_method_decl(twombly.draw_paint),
auto_close=_method_decl(twombly.draw_auto_close,
args=[POINTER(DrawingType), c_bool]),
in_path=_method_decl(twombly.draw_in_path, c_bool,
args=[POINTER(DrawingType), c_double, c_double]),
is_drawn=_method_decl(twombly.draw_in_path, c_bool,
args=[POINTER(DrawingType), c_double, c_double]),
get_vertex=_method_decl(twombly.draw_get_vertex, c_uint,
args=[POINTER(DrawingType), c_uint, POINTER(c_double), POINTER(c_double)]),
next_vertex=_method_decl(twombly.draw_next_vertex, c_uint,
args=[POINTER(DrawingType), POINTER(c_double), POINTER(c_double)]),
get_command=_method_decl(twombly.draw_get_command, c_uint,
args=[POINTER(DrawingType), c_uint]),
last_vertex=_method_decl(twombly.draw_last_vertex, c_uint,
args=[POINTER(DrawingType), POINTER(c_double), POINTER(c_double)]),
prev_vertex=_method_decl(twombly.draw_prev_vertex, c_uint,
args=[POINTER(DrawingType), POINTER(c_double), POINTER(c_double)]),
modify_vertex=_method_decl(twombly.draw_modify_vertex,
args=[POINTER(DrawingType), c_uint, c_double, c_double, c_uint]),
total_vertices=_method_decl(twombly.draw_total_vertices, c_uint),
join=_method_decl(twombly.draw_join,
args=[POINTER(DrawingType), POINTER(DrawingType)]),
concat=_method_decl(twombly.draw_concat,
args=[POINTER(DrawingType), POINTER(DrawingType)]),
# alpha mask
alpha_mask_init=_method_decl(twombly.draw_alpha_mask_init),
alpha_mask_free=_method_decl(twombly.draw_alpha_mask_free),
alpha_mask_fill=_method_decl(twombly.draw_alpha_mask_fill,
args=[POINTER(DrawingType), c_uint8]),
alpha_mask_get=_method_decl(twombly.draw_alpha_mask_get, c_uint8,
args=[POINTER(DrawingType), c_int32, c_int32]),
alpha_mask_set=_method_decl(twombly.draw_alpha_mask_set,
args=[POINTER(DrawingType), c_int32, c_int32, c_uint8]),
alpha_mask_ptr_offs=_method_decl(twombly.draw_alpha_mask_ptr_offs, POINTER(c_uint8),
args=[POINTER(DrawingType), c_int32, c_int32]),
alpha_mask_ptr=_method_decl(twombly.draw_alpha_mask_ptr, POINTER(c_uint8)),
# gradient
fill_gradient=_method_decl(twombly.draw_fill_gradient,
args=[POINTER(DrawingType), POINTER(GradientType), c_int, c_int, c_int]),
stroke_gradient=_method_decl(twombly.draw_stroke_gradient,
args=[POINTER(DrawingType), POINTER(GradientType), c_int, c_int, c_int]),
)
try:
_METHODS['text']= _method_decl(twombly.draw_text, c_double,
args=[POINTER(DrawingType), c_double, c_double, c_char_p, c_char_p, c_double, c_double])
except:
pass
_transform_matrix_create = _method_decl(twombly.draw_transform_matrix_create, POINTER(TransformType), args=[])
_transform_matrix_free = _method_decl(twombly.draw_transform_matrix_free, args=[POINTER(POINTER(TransformType))])
_transform_matrix_get = _method_decl(twombly.draw_get_transform_matrix, POINTER(TransformType), args=[POINTER(DrawingType)])
_transform_matrix_to_double = _method_decl(twombly.draw_transform_matrix_to_double, args=[POINTER(TransformType), POINTER(c_double)])
_transform_matrix_from_double = _method_decl(twombly.draw_transform_matrix_from_double, args=[POINTER(TransformType), POINTER(c_double)])
_transform_matrix_inverse_transform = _method_decl(twombly.draw_transform_matrix_inverse_transform, args=[POINTER(TransformType), POINTER(c_double), POINTER(c_double)])
_transform_matrix_transform = _method_decl(twombly.draw_transform_matrix_transform, args=[POINTER(TransformType), POINTER(c_double), POINTER(c_double)])
_transform_matrix_translate = _method_decl(twombly.draw_transform_matrix_translate, args=[POINTER(TransformType), c_double, c_double])
_transform_matrix_rotate = _method_decl(twombly.draw_transform_matrix_rotate, args=[POINTER(TransformType), c_double])
_transform_matrix_reset = _method_decl(twombly.draw_transform_matrix_reset, args=[POINTER(TransformType)])
_transform_matrix_scale = _method_decl(twombly.draw_transform_matrix_scale, args=[POINTER(TransformType), c_double, c_double])
class TransformMatrix(object):
'''
transforms points and drawings
'''
def __init__(self, m=None):
if m is None:
m = _transform_matrix_create()
self._free = True
else: self._free = False
self._mtx = m
_as_parameter_ = self._mtx
def scale(self, x, y):
_transform_matrix_scale(self._mtx, x, y)
def translate(self, x, y):
_transform_matrix_translate(self._mtx, x, y)
def rotate(self, a):
_transform_matrix_rotate(self._mtx, a)
def reset(self):
''' clear all transformations'''
_transform_matrix_reset(self._mtx)
def __del__(self):
if self._free and self._mtx is not None:
_transform_matrix_free(pointer(self._mtx))
self._free = False
self._mtx = None
def transform(self, x, y, inverse=False):
''' transform a point '''
x_ptr = pointer(c_double(x))
y_ptr = pointer(c_double(y))
if inverse:
_transform_matrix_inverse_transform(self._mtx, x_ptr, y_ptr)
else:
_transform_matrix_transform(self._mtx, x_ptr, y_ptr)
return (x_ptr[0], y_ptr[0])
@property
def array(self):
''' Get and set transformation matrix data using numpy arrays'''
arr = zeros(6, dtype="double")
_transform_matrix_to_double(self._mtx, cast(arr.ctypes.data, POINTER(c_double)))
return arr
@array.setter
def array(self, arr):
_transform_matrix_from_double(self._mtx, cast(asarray(arr, dtype='double').ctypes.data,
POINTER(c_double)))
_gradient_create = _method_decl(twombly.draw_gradient_create, POINTER(GradientType), args=[])
_gradient_create16 = _method_decl(twombly.draw_gradient_create16, POINTER(GradientType), args=[])
_gradient_free = _method_decl(twombly.draw_gradient_free, args=[POINTER(POINTER(GradientType))])
_gradient_add_stop = _method_decl(twombly.draw_gradient_add_stop, args=[POINTER(GradientType), POINTER(c_float)])
_gradient_add_stop16 = _method_decl(twombly.draw_gradient_add_stop16, args=[POINTER(GradientType), POINTER(c_float)])
_gradient_get_matrix = _method_decl(twombly.draw_gradient_get_matrix, POINTER(TransformType), args=[POINTER(GradientType)])
class Gradient(object):
''' Gradient Class '''
circle = GRADIENT_CIRCLE
radial = GRADIENT_RADIAL
radial_d = GRADIENT_RADIAL_D
radial_focus = GRADIENT_RADIAL_FOCUS
x = GRADIENT_X
y = GRADIENT_Y
diamond = GRADIENT_DIAMOND
xy = GRADIENT_XY
sqrt_xy = GRADIENT_SQRT_XY
conic = GRADIENT_CONIC
def __init__(self, *args, **kw):
self.depth = kw.get('depth', 8)
self._gradient= None
if self.depth == 16:
self._gradient = _gradient_create16()
else:
self._gradient = _gradient_create()
for arg in args:
self.add_stop(Color(arg))
self._as_parameter_ = self._gradient
def add_stop(self, c):
if self.depth == 8:
_gradient_add_stop(self._gradient, (c_float * 4)(*c))
elif self.depth == 16:
_gradient_add_stop16(self._gradient, (c_float * 4)(*c))
@property
def matrix(self):
return TransformMatrix(_gradient_get_matrix(self._gradient))
def __del__(self):
if hasattr(self, "_gradient") and self._gradient is not None:
_gradient_free(pointer(self._gradient))
self._gradient = None
def as_uint8(arr):
return [c_uint8(int(i)) for i in arr]
def as_uint16(arr):
return [c_uint16(int(i)) for i in arr]
class Color(ndarray):
''' a a numpy array of 4 floats '''
def __new__(cls, red, green=None, blue=None, alpha=255):
return ndarray.__new__(cls, 4, 'float')
def __init__(self, red, green=None, blue=None, alpha=255):
if isinstance(red, str):
red, green, blue, alpha = _colors.get(red.lower().replace(' ', ''), [0, 0, 0, alpha])
elif green is None or blue is None:
green = blue = red
elif hasattr(red, '__getitem__') and hasattr(red, '__len__'):
arr = red
if len(red) == 3:
red.append(alpha)
elif len(red) != 4:
raise Exception("bad color")
red, green, blue, alpha = arr[0], arr[1], arr[2], arr[3]
self[0] = red
self[1] = green
self[2] = blue
self[3] = alpha
_as_parameter_ = (c_float * 4)(*self)
def as_uint8(self):
''' convert red, green, blue, alpha to chars 0-255 '''
return as_uint8(self)
def as_uint16(self):
''' convert red, green, blue, alpha to uint16s 0-65535 '''
return as_uint16(self)
class Vertex(list):
def __init__(self, iterable, update_fn=None):
list.__init__(self, iterable)
self.update_fn = update_fn
@property
def x(self):
return self[0]
@property
def y(self):
return self[1]
@property
def command(self):
return self[2]
@command.setter
def command(self, cmd):
self[2] = cmd
if self.update_fn is not None:
self.update_fn(*self)
@x.setter
def x(self, val):
self[0] = val
if self.update_fn is not None:
self.update_fn(*self)
@y.setter
def y(self, val):
self[1] = val
if self.update_fn is not None:
self.update_fn(*self)
class Drawing(object):
''' python wrapper for libtwombly Drawing class '''
def __init__(self, arr, bgr=False, width=None, height=None):
self._free = _METHODS["free"]
self._color = Color(0, 0, 0, 0)
self.array = arr
bgr_str = ""
if bgr:
bgr_str = "_bgr"
if not width:
width = arr.shape[1]
if not height:
height = arr.shape[0]
if arr.dtype == 'uint8':
self._drawing = _METHODS["create" + bgr_str](width, height,
arr.shape[2], arr.ravel().ctypes.data)
self._is_16 = False
elif arr.dtype == 'uint16':
self._drawing = _METHODS["create16" + bgr_str](width, height,
arr.shape[2], arr.ravel().ctypes.data)
self._is_16 = True
else:
self._drawing = None
raise ValueError("bad image type")
self._as_parameter_ = self._drawing
def __getattr__(self, key):
if key in _METHODS:
def wrapper(*args):
''' get method by name'''
return _METHODS[key](self._drawing, *args)
return wrapper
raise AttributeError
@property
def total_vertices(self):
return _METHODS["total_vertices"](self)
@property
def vertices(self):
class Vertices(object):
def __init__(_self):
_self._index = 0
def __setitem__(_self, index, val):
self.modify_vertex(index, val[0], val[1], val[2])
def __getitem__(_self, index):
return self.get_vertex(index)
def __len__(_self):
return self.total_vertices
def __iter__(_self):
return _self
def __next__(_self):
if _self.index >= self.total_vertices:
raise StopIteration
result = self.get_vertex(_self.index)
_self.index += 1
return result
def __str__(_self):
return 'Vertices{%d}' % self.total_vertices
return Vertices()
@vertices.setter
def vertices(self, arr):
for index, vertex in enumerate(arr):
self.set_vertex(index, vertex[0], vertex[1], vertex[2])
def get_vertex(self, index):
if index >= self.total_vertices:
raise Exception("out of bounds")
x_ptr = pointer(c_double(0))
y_ptr = pointer(c_double(0))
_METHODS["get_vertex"](self, index, x_ptr, y_ptr)
cmd = _METHODS["get_command"](self, index)
return Vertex([x_ptr[0], y_ptr[0], cmd], update_fn=partial(self.set_vertex, index))
def set_vertex(self, index, x, y, cmd=None):
if cmd is None:
cmd = self.get_command(index)
self.modify_vertex(index, x, y, cmd)
def clear(self, r, g=None, b=None, a=255):
if isinstance(r, Color):
_METHODS["clear"](self._drawing, *r.as_uint8())
else:
_METHODS["clear"](self._drawing, *Color(r, g, b, a).as_uint8())
@property
def color(self):
return self._color
@color.setter
def color(self, r, g=None, b=None, a=255):
if isinstance(r, str):
r = Color(r)
elif isinstance(r, (tuple, list)) and len(r) >= 3:
g = r[1]
b = r[2]
if len(r) > 3:
a = r[3]
r = r[0]
if isinstance(r, Color):
_METHODS["set_color"](self._drawing, *r.as_uint8())
self._color = r.as_uint8()
else:
_METHODS["set_color"](self._drawing, *Color(r, g, b, a).as_uint8())
self._color = Color(r, g, b, a).as_uint8()
def curve_to(self, a, b, c=None, d=None, e=None, f=None):
if c is None or d is None:
return self.curve_to2(a, b)
elif e is None or f is None:
return self.curve_to4(a, b, c, d)
return self.curve_to6(a, b, c, d, e, f)
def curve_rel(self, a, b, c=None, d=None, e=None, f=None):
if c is None or d is None:
return self.curve_rel2(a, b)
elif e is None or f is None:
return self.curve_rel4(a, b, c, d)
return self.curve_rel6(a, b, c, d, e, f)
def __del__(self):
if self._drawing is not None:
self._free(self._drawing)
self._drawing = None
@property
def matrix(self):
return TransformMatrix(_transform_matrix_get(self))
@property
def antialias(self):
return self.get_antialias()
@antialias.setter
def antialias(self, aa):
self.set_antialias(aa)
@property
def preserve(self):
return self.get_preserve()
@preserve.setter
def preserve(self, p):
self.set_preserve(p)
@property
def line_width(self):
return self.get_line_width()
@line_width.setter
def line_width(self, w):
self.set_line_width(w)
@property
def miter_limit(self):
return self.get_miter_limit()
@miter_limit.setter
def miter_limit(self, limit):
self.set_miter_limit(limit)
@property
def line_join(self):
return self.get_line_join()
@line_join.setter
def line_join(self, j):
return self.set_line_join(j)
@property
def line_cap(self):
return self.get_line_cap()
@line_cap.setter
def line_cap(self, cap):
self.set_line_cap(cap)
@property
def path(self):
return self.get_active_path()
@path.setter
def path(self, pth):
self.set_active_path(pth)
def alpha(self, x=0, y=0, val=None):
self.alpha_mask_init()
if val is None:
return self.alpha_mask_ptr_offs(x, y)[0]
else:
self.alpha_mask_set(x, y, val)
def draw(arr, *args, **kwargs):
return Drawing(arr, *args, **kwargs)
def new_image(width, height, channels=3, depth='uint8'):
return zeros((height, width, channels), depth)
| mit |
paweljasinski/ironpython3 | Src/StdLib/Lib/dis.py | 88 | 17160 | """Disassembler of Python byte code into mnemonics."""
import sys
import types
import collections
import io
from opcode import *
from opcode import __all__ as _opcodes_all
__all__ = ["code_info", "dis", "disassemble", "distb", "disco",
"findlinestarts", "findlabels", "show_code",
"get_instructions", "Instruction", "Bytecode"] + _opcodes_all
del _opcodes_all
_have_code = (types.MethodType, types.FunctionType, types.CodeType, type)
def _try_compile(source, name):
"""Attempts to compile the given source, first as an expression and
then as a statement if the first approach fails.
Utility function to accept strings in functions that otherwise
expect code objects
"""
try:
c = compile(source, name, 'eval')
except SyntaxError:
c = compile(source, name, 'exec')
return c
def dis(x=None, *, file=None):
"""Disassemble classes, methods, functions, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb(file=file)
return
if hasattr(x, '__func__'): # Method
x = x.__func__
if hasattr(x, '__code__'): # Function
x = x.__code__
if hasattr(x, '__dict__'): # Class or module
items = sorted(x.__dict__.items())
for name, x1 in items:
if isinstance(x1, _have_code):
print("Disassembly of %s:" % name, file=file)
try:
dis(x1, file=file)
except TypeError as msg:
print("Sorry:", msg, file=file)
print(file=file)
elif hasattr(x, 'co_code'): # Code object
disassemble(x, file=file)
elif isinstance(x, (bytes, bytearray)): # Raw bytecode
_disassemble_bytes(x, file=file)
elif isinstance(x, str): # Source code
_disassemble_str(x, file=file)
else:
raise TypeError("don't know how to disassemble %s objects" %
type(x).__name__)
def distb(tb=None, *, file=None):
"""Disassemble a traceback (default: last traceback)."""
if tb is None:
try:
tb = sys.last_traceback
except AttributeError:
raise RuntimeError("no last traceback to disassemble")
while tb.tb_next: tb = tb.tb_next
disassemble(tb.tb_frame.f_code, tb.tb_lasti, file=file)
# The inspect module interrogates this dictionary to build its
# list of CO_* constants. It is also used by pretty_flags to
# turn the co_flags field into a human readable list.
COMPILER_FLAG_NAMES = {
1: "OPTIMIZED",
2: "NEWLOCALS",
4: "VARARGS",
8: "VARKEYWORDS",
16: "NESTED",
32: "GENERATOR",
64: "NOFREE",
}
def pretty_flags(flags):
"""Return pretty representation of code flags."""
names = []
for i in range(32):
flag = 1<<i
if flags & flag:
names.append(COMPILER_FLAG_NAMES.get(flag, hex(flag)))
flags ^= flag
if not flags:
break
else:
names.append(hex(flags))
return ", ".join(names)
def _get_code_object(x):
"""Helper to handle methods, functions, strings and raw code objects"""
if hasattr(x, '__func__'): # Method
x = x.__func__
if hasattr(x, '__code__'): # Function
x = x.__code__
if isinstance(x, str): # Source code
x = _try_compile(x, "<disassembly>")
if hasattr(x, 'co_code'): # Code object
return x
raise TypeError("don't know how to disassemble %s objects" %
type(x).__name__)
def code_info(x):
"""Formatted details of methods, functions, or code."""
return _format_code_info(_get_code_object(x))
def _format_code_info(co):
lines = []
lines.append("Name: %s" % co.co_name)
lines.append("Filename: %s" % co.co_filename)
lines.append("Argument count: %s" % co.co_argcount)
lines.append("Kw-only arguments: %s" % co.co_kwonlyargcount)
lines.append("Number of locals: %s" % co.co_nlocals)
lines.append("Stack size: %s" % co.co_stacksize)
lines.append("Flags: %s" % pretty_flags(co.co_flags))
if co.co_consts:
lines.append("Constants:")
for i_c in enumerate(co.co_consts):
lines.append("%4d: %r" % i_c)
if co.co_names:
lines.append("Names:")
for i_n in enumerate(co.co_names):
lines.append("%4d: %s" % i_n)
if co.co_varnames:
lines.append("Variable names:")
for i_n in enumerate(co.co_varnames):
lines.append("%4d: %s" % i_n)
if co.co_freevars:
lines.append("Free variables:")
for i_n in enumerate(co.co_freevars):
lines.append("%4d: %s" % i_n)
if co.co_cellvars:
lines.append("Cell variables:")
for i_n in enumerate(co.co_cellvars):
lines.append("%4d: %s" % i_n)
return "\n".join(lines)
def show_code(co, *, file=None):
"""Print details of methods, functions, or code to *file*.
If *file* is not provided, the output is printed on stdout.
"""
print(code_info(co), file=file)
_Instruction = collections.namedtuple("_Instruction",
"opname opcode arg argval argrepr offset starts_line is_jump_target")
class Instruction(_Instruction):
"""Details for a bytecode operation
Defined fields:
opname - human readable name for operation
opcode - numeric code for operation
arg - numeric argument to operation (if any), otherwise None
argval - resolved arg value (if known), otherwise same as arg
argrepr - human readable description of operation argument
offset - start index of operation within bytecode sequence
starts_line - line started by this opcode (if any), otherwise None
is_jump_target - True if other code jumps to here, otherwise False
"""
def _disassemble(self, lineno_width=3, mark_as_current=False):
"""Format instruction details for inclusion in disassembly output
*lineno_width* sets the width of the line number field (0 omits it)
*mark_as_current* inserts a '-->' marker arrow as part of the line
"""
fields = []
# Column: Source code line number
if lineno_width:
if self.starts_line is not None:
lineno_fmt = "%%%dd" % lineno_width
fields.append(lineno_fmt % self.starts_line)
else:
fields.append(' ' * lineno_width)
# Column: Current instruction indicator
if mark_as_current:
fields.append('-->')
else:
fields.append(' ')
# Column: Jump target marker
if self.is_jump_target:
fields.append('>>')
else:
fields.append(' ')
# Column: Instruction offset from start of code sequence
fields.append(repr(self.offset).rjust(4))
# Column: Opcode name
fields.append(self.opname.ljust(20))
# Column: Opcode argument
if self.arg is not None:
fields.append(repr(self.arg).rjust(5))
# Column: Opcode argument details
if self.argrepr:
fields.append('(' + self.argrepr + ')')
return ' '.join(fields).rstrip()
def get_instructions(x, *, first_line=None):
"""Iterator for the opcodes in methods, functions or code
Generates a series of Instruction named tuples giving the details of
each operations in the supplied code.
If *first_line* is not None, it indicates the line number that should
be reported for the first source line in the disassembled code.
Otherwise, the source line information (if any) is taken directly from
the disassembled code object.
"""
co = _get_code_object(x)
cell_names = co.co_cellvars + co.co_freevars
linestarts = dict(findlinestarts(co))
if first_line is not None:
line_offset = first_line - co.co_firstlineno
else:
line_offset = 0
return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names,
co.co_consts, cell_names, linestarts,
line_offset)
def _get_const_info(const_index, const_list):
"""Helper to get optional details about const references
Returns the dereferenced constant and its repr if the constant
list is defined.
Otherwise returns the constant index and its repr().
"""
argval = const_index
if const_list is not None:
argval = const_list[const_index]
return argval, repr(argval)
def _get_name_info(name_index, name_list):
"""Helper to get optional details about named references
Returns the dereferenced name as both value and repr if the name
list is defined.
Otherwise returns the name index and its repr().
"""
argval = name_index
if name_list is not None:
argval = name_list[name_index]
argrepr = argval
else:
argrepr = repr(argval)
return argval, argrepr
def _get_instructions_bytes(code, varnames=None, names=None, constants=None,
cells=None, linestarts=None, line_offset=0):
"""Iterate over the instructions in a bytecode string.
Generates a sequence of Instruction namedtuples giving the details of each
opcode. Additional information about the code's runtime environment
(e.g. variable names, constants) can be specified using optional
arguments.
"""
labels = findlabels(code)
extended_arg = 0
starts_line = None
free = None
# enumerate() is not an option, since we sometimes process
# multiple elements on a single pass through the loop
n = len(code)
i = 0
while i < n:
op = code[i]
offset = i
if linestarts is not None:
starts_line = linestarts.get(i, None)
if starts_line is not None:
starts_line += line_offset
is_jump_target = i in labels
i = i+1
arg = None
argval = None
argrepr = ''
if op >= HAVE_ARGUMENT:
arg = code[i] + code[i+1]*256 + extended_arg
extended_arg = 0
i = i+2
if op == EXTENDED_ARG:
extended_arg = arg*65536
# Set argval to the dereferenced value of the argument when
# availabe, and argrepr to the string representation of argval.
# _disassemble_bytes needs the string repr of the
# raw name index for LOAD_GLOBAL, LOAD_CONST, etc.
argval = arg
if op in hasconst:
argval, argrepr = _get_const_info(arg, constants)
elif op in hasname:
argval, argrepr = _get_name_info(arg, names)
elif op in hasjrel:
argval = i + arg
argrepr = "to " + repr(argval)
elif op in haslocal:
argval, argrepr = _get_name_info(arg, varnames)
elif op in hascompare:
argval = cmp_op[arg]
argrepr = argval
elif op in hasfree:
argval, argrepr = _get_name_info(arg, cells)
elif op in hasnargs:
argrepr = "%d positional, %d keyword pair" % (code[i-2], code[i-1])
yield Instruction(opname[op], op,
arg, argval, argrepr,
offset, starts_line, is_jump_target)
def disassemble(co, lasti=-1, *, file=None):
"""Disassemble a code object."""
cell_names = co.co_cellvars + co.co_freevars
linestarts = dict(findlinestarts(co))
_disassemble_bytes(co.co_code, lasti, co.co_varnames, co.co_names,
co.co_consts, cell_names, linestarts, file=file)
def _disassemble_bytes(code, lasti=-1, varnames=None, names=None,
constants=None, cells=None, linestarts=None,
*, file=None, line_offset=0):
# Omit the line number column entirely if we have no line number info
show_lineno = linestarts is not None
# TODO?: Adjust width upwards if max(linestarts.values()) >= 1000?
lineno_width = 3 if show_lineno else 0
for instr in _get_instructions_bytes(code, varnames, names,
constants, cells, linestarts,
line_offset=line_offset):
new_source_line = (show_lineno and
instr.starts_line is not None and
instr.offset > 0)
if new_source_line:
print(file=file)
is_current_instr = instr.offset == lasti
print(instr._disassemble(lineno_width, is_current_instr), file=file)
def _disassemble_str(source, *, file=None):
"""Compile the source string, then disassemble the code object."""
disassemble(_try_compile(source, '<dis>'), file=file)
disco = disassemble # XXX For backwards compatibility
def findlabels(code):
"""Detect all offsets in a byte code which are jump targets.
Return the list of offsets.
"""
labels = []
# enumerate() is not an option, since we sometimes process
# multiple elements on a single pass through the loop
n = len(code)
i = 0
while i < n:
op = code[i]
i = i+1
if op >= HAVE_ARGUMENT:
arg = code[i] + code[i+1]*256
i = i+2
label = -1
if op in hasjrel:
label = i+arg
elif op in hasjabs:
label = arg
if label >= 0:
if label not in labels:
labels.append(label)
return labels
def findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
"""
byte_increments = list(code.co_lnotab[0::2])
line_increments = list(code.co_lnotab[1::2])
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno)
class Bytecode:
"""The bytecode operations of a piece of code
Instantiate this with a function, method, string of code, or a code object
(as returned by compile()).
Iterating over this yields the bytecode operations as Instruction instances.
"""
def __init__(self, x, *, first_line=None, current_offset=None):
self.codeobj = co = _get_code_object(x)
if first_line is None:
self.first_line = co.co_firstlineno
self._line_offset = 0
else:
self.first_line = first_line
self._line_offset = first_line - co.co_firstlineno
self._cell_names = co.co_cellvars + co.co_freevars
self._linestarts = dict(findlinestarts(co))
self._original_object = x
self.current_offset = current_offset
def __iter__(self):
co = self.codeobj
return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names,
co.co_consts, self._cell_names,
self._linestarts,
line_offset=self._line_offset)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__,
self._original_object)
@classmethod
def from_traceback(cls, tb):
""" Construct a Bytecode from the given traceback """
while tb.tb_next:
tb = tb.tb_next
return cls(tb.tb_frame.f_code, current_offset=tb.tb_lasti)
def info(self):
"""Return formatted information about the code object."""
return _format_code_info(self.codeobj)
def dis(self):
"""Return a formatted view of the bytecode operations."""
co = self.codeobj
if self.current_offset is not None:
offset = self.current_offset
else:
offset = -1
with io.StringIO() as output:
_disassemble_bytes(co.co_code, varnames=co.co_varnames,
names=co.co_names, constants=co.co_consts,
cells=self._cell_names,
linestarts=self._linestarts,
line_offset=self._line_offset,
file=output,
lasti=offset)
return output.getvalue()
def _test():
"""Simple test program to disassemble a file."""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=argparse.FileType(), nargs='?', default='-')
args = parser.parse_args()
with args.infile as infile:
source = infile.read()
code = compile(source, args.infile.name, "exec")
dis(code)
if __name__ == "__main__":
_test()
| apache-2.0 |
maciejkula/spotlight | examples/bloom_embeddings/performance.py | 1 | 5801 | import os
import pickle
import time
import numpy as np
import torch
from spotlight.layers import BloomEmbedding, ScaledEmbedding
from spotlight.factorization.implicit import ImplicitFactorizationModel
from spotlight.factorization.representations import BilinearNet
from spotlight.sequence.implicit import ImplicitSequenceModel
from spotlight.sequence.representations import LSTMNet
from spotlight.datasets.movielens import get_movielens_dataset
CUDA = torch.cuda.is_available()
EMBEDDING_DIM = 64
N_ITER = 2
NUM_HASH_FUNCTIONS = 4
def time_fitting(model, data, repetitions=2):
timings = []
# Warm-up epoch
model.fit(data)
for _ in range(repetitions):
start_time = time.time()
model.fit(data)
timings.append(time.time() - start_time)
print(min(timings))
return min(timings)
def factorization_model(num_embeddings, bloom):
if bloom:
user_embeddings = BloomEmbedding(num_embeddings, EMBEDDING_DIM,
num_hash_functions=NUM_HASH_FUNCTIONS)
item_embeddings = BloomEmbedding(num_embeddings, EMBEDDING_DIM,
num_hash_functions=NUM_HASH_FUNCTIONS)
else:
user_embeddings = ScaledEmbedding(num_embeddings, EMBEDDING_DIM)
item_embeddings = ScaledEmbedding(num_embeddings, EMBEDDING_DIM)
network = BilinearNet(num_embeddings,
num_embeddings,
user_embedding_layer=user_embeddings,
item_embedding_layer=item_embeddings)
model = ImplicitFactorizationModel(loss='adaptive_hinge',
n_iter=N_ITER,
embedding_dim=EMBEDDING_DIM,
batch_size=2048,
learning_rate=1e-2,
l2=1e-6,
representation=network,
use_cuda=CUDA)
return model
def sequence_model(num_embeddings, bloom):
if bloom:
item_embeddings = BloomEmbedding(num_embeddings, EMBEDDING_DIM,
num_hash_functions=NUM_HASH_FUNCTIONS)
else:
item_embeddings = ScaledEmbedding(num_embeddings, EMBEDDING_DIM)
network = LSTMNet(num_embeddings, EMBEDDING_DIM,
item_embedding_layer=item_embeddings)
model = ImplicitSequenceModel(loss='adaptive_hinge',
n_iter=N_ITER,
batch_size=512,
learning_rate=1e-3,
l2=1e-2,
representation=network,
use_cuda=CUDA)
return model
def get_sequence_data():
dataset = get_movielens_dataset('1M')
max_sequence_length = 200
min_sequence_length = 20
data = dataset.to_sequence(max_sequence_length=max_sequence_length,
min_sequence_length=min_sequence_length,
step_size=max_sequence_length)
print(data.sequences.shape)
return data
def get_factorization_data():
dataset = get_movielens_dataset('1M')
return dataset
def embedding_size_scalability():
sequence_data = get_sequence_data()
factorization_data = get_factorization_data()
embedding_dims = (1e4,
1e4 * 5,
1e5,
1e5 * 5,
1e6,
1e6 * 5)
bloom_sequence = np.array([time_fitting(sequence_model(int(dim), True),
sequence_data)
for dim in embedding_dims])
baseline_sequence = np.array([time_fitting(sequence_model(int(dim), False),
sequence_data)
for dim in embedding_dims])
sequence_ratio = bloom_sequence / baseline_sequence
print('Sequence ratio {}'.format(sequence_ratio))
bloom_factorization = np.array([time_fitting(factorization_model(int(dim), True),
factorization_data)
for dim in embedding_dims])
baseline_factorization = np.array([time_fitting(factorization_model(int(dim), False),
factorization_data)
for dim in embedding_dims])
factorization_ratio = bloom_factorization / baseline_factorization
print('Factorization ratio {}'.format(factorization_ratio))
return np.array(embedding_dims), sequence_ratio, factorization_ratio
def plot(dims, sequence, factorization):
import matplotlib
matplotlib.use('Agg') # NOQA
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
plt.ylabel("Speed improvement")
plt.xlabel("Size of embedding layers")
plt.title("Fitting speed (1.0 = no change)")
plt.xscale('log')
plt.plot(dims,
1.0 / sequence,
label='Sequence model')
plt.plot(dims,
1.0 / factorization,
label='Factorization model')
plt.legend(loc='lower right')
plt.savefig('speed.png')
plt.close()
if __name__ == '__main__':
fname = 'performance.pickle'
if not os.path.exists(fname):
dims, sequence, factorization = embedding_size_scalability()
with open(fname, 'wb') as fle:
pickle.dump((dims, sequence, factorization), fle)
with open(fname, 'rb') as fle:
(dims, sequence, factorization) = pickle.load(fle)
plot(dims, sequence, factorization)
| mit |
laperry1/android_external_chromium_org | build/android/pylib/linker/test_runner.py | 45 | 3288 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs linker tests on a particular device."""
import logging
import os.path
import sys
import traceback
from pylib import constants
from pylib.base import base_test_result
from pylib.base import base_test_runner
from pylib.linker import test_case
from pylib.utils import apk_helper
# Name of the Android package to install for this to work.
_PACKAGE_NAME = 'ChromiumLinkerTest'
class LinkerExceptionTestResult(base_test_result.BaseTestResult):
"""Test result corresponding to a python exception in a host-custom test."""
def __init__(self, test_name, exc_info):
"""Constructs a LinkerExceptionTestResult object.
Args:
test_name: name of the test which raised an exception.
exc_info: exception info, ostensibly from sys.exc_info().
"""
exc_type, exc_value, exc_traceback = exc_info
trace_info = ''.join(traceback.format_exception(exc_type, exc_value,
exc_traceback))
log_msg = 'Exception:\n' + trace_info
super(LinkerExceptionTestResult, self).__init__(
test_name,
base_test_result.ResultType.FAIL,
log = "%s %s" % (exc_type, log_msg))
class LinkerTestRunner(base_test_runner.BaseTestRunner):
"""Orchestrates running a set of linker tests.
Any Python exceptions in the tests are caught and translated into a failed
result, rather than being re-raised on the main thread.
"""
#override
def __init__(self, device, tool, push_deps, cleanup_test_files):
"""Creates a new LinkerTestRunner.
Args:
device: Attached android device.
tool: Name of the Valgrind tool.
push_deps: If True, push all dependencies to the device.
cleanup_test_files: Whether or not to cleanup test files on device.
"""
super(LinkerTestRunner, self).__init__(device, tool, push_deps,
cleanup_test_files)
#override
def InstallTestPackage(self):
apk_path = os.path.join(
constants.GetOutDirectory(), 'apks', '%s.apk' % _PACKAGE_NAME)
if not os.path.exists(apk_path):
raise Exception('%s not found, please build it' % apk_path)
package_name = apk_helper.GetPackageName(apk_path)
self.device.old_interface.ManagedInstall(apk_path, package_name)
#override
def RunTest(self, test):
"""Sets up and runs a test case.
Args:
test: An object which is ostensibly a subclass of LinkerTestCaseBase.
Returns:
A TestRunResults object which contains the result produced by the test
and, in the case of a failure, the test that should be retried.
"""
assert isinstance(test, test_case.LinkerTestCaseBase)
try:
results = test.Run(self.device)
except Exception:
logging.exception('Caught exception while trying to run test: ' +
test.tagged_name)
exc_info = sys.exc_info()
results = base_test_result.TestRunResults()
results.AddResult(LinkerExceptionTestResult(
test.tagged_name, exc_info))
if not results.DidRunPass():
return results, test
else:
return results, None
| bsd-3-clause |
peerster/CouchPotatoServer | libs/pyutil/dictutil.py | 106 | 20151 | """
Tools to mess with dicts.
"""
import warnings
import copy, operator
from bisect import bisect_left, insort_left
from pyutil.assertutil import _assert, precondition
def move(k, d1, d2, strict=False):
"""
Move item with key k from d1 to d2.
"""
warnings.warn("deprecated", DeprecationWarning)
if strict and not d1.has_key(k):
raise KeyError, k
d2[k] = d1[k]
del d1[k]
def subtract(d1, d2):
"""
Remove all items from d1 whose key occurs in d2.
@returns d1
"""
warnings.warn("deprecated", DeprecationWarning)
if len(d1) > len(d2):
for k in d2.keys():
if d1.has_key(k):
del d1[k]
else:
for k in d1.keys():
if d2.has_key(k):
del d1[k]
return d1
class DictOfSets(dict):
def add(self, key, value):
warnings.warn("deprecated", DeprecationWarning)
if key in self:
self[key].add(value)
else:
self[key] = set([value])
def discard(self, key, value):
warnings.warn("deprecated", DeprecationWarning)
if not key in self:
return
self[key].discard(value)
if not self[key]:
del self[key]
class UtilDict:
def __init__(self, initialdata={}):
warnings.warn("deprecated", DeprecationWarning)
self.d = {}
self.update(initialdata)
def del_if_present(self, key):
if self.has_key(key):
del self[key]
def items_sorted_by_value(self):
"""
@return a sequence of (key, value,) pairs sorted according to value
"""
l = [(x[1], x[0],) for x in self.d.iteritems()]
l.sort()
return [(x[1], x[0],) for x in l]
def items_sorted_by_key(self):
"""
@return a sequence of (key, value,) pairs sorted according to key
"""
l = self.d.items()
l.sort()
return l
def __repr__(self, *args, **kwargs):
return self.d.__repr__(*args, **kwargs)
def __str__(self, *args, **kwargs):
return self.d.__str__(*args, **kwargs)
def __contains__(self, *args, **kwargs):
return self.d.__contains__(*args, **kwargs)
def __len__(self, *args, **kwargs):
return self.d.__len__(*args, **kwargs)
def __cmp__(self, other):
try:
return self.d.__cmp__(other)
except TypeError, le:
# maybe we should look for a .d member in other. I know this is insanely kludgey, but the Right Way To Do It is for dict.__cmp__ to use structural typing ("duck typing")
try:
return self.d.__cmp__(other.d)
except:
raise le
def __eq__(self, *args, **kwargs):
return self.d.__eq__(*args, **kwargs)
def __ne__(self, *args, **kwargs):
return self.d.__ne__(*args, **kwargs)
def __gt__(self, *args, **kwargs):
return self.d.__gt__(*args, **kwargs)
def __ge__(self, *args, **kwargs):
return self.d.__ge__(*args, **kwargs)
def __le__(self, *args, **kwargs):
return self.d.__le__(*args, **kwargs)
def __lt__(self, *args, **kwargs):
return self.d.__lt__(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
return self.d.__getitem__(*args, **kwargs)
def __setitem__(self, *args, **kwargs):
return self.d.__setitem__(*args, **kwargs)
def __delitem__(self, *args, **kwargs):
return self.d.__delitem__(*args, **kwargs)
def __iter__(self, *args, **kwargs):
return self.d.__iter__(*args, **kwargs)
def clear(self, *args, **kwargs):
return self.d.clear(*args, **kwargs)
def copy(self, *args, **kwargs):
return self.__class__(self.d.copy(*args, **kwargs))
def fromkeys(self, *args, **kwargs):
return self.__class__(self.d.fromkeys(*args, **kwargs))
def get(self, key, default=None):
return self.d.get(key, default)
def has_key(self, *args, **kwargs):
return self.d.has_key(*args, **kwargs)
def items(self, *args, **kwargs):
return self.d.items(*args, **kwargs)
def iteritems(self, *args, **kwargs):
return self.d.iteritems(*args, **kwargs)
def iterkeys(self, *args, **kwargs):
return self.d.iterkeys(*args, **kwargs)
def itervalues(self, *args, **kwargs):
return self.d.itervalues(*args, **kwargs)
def keys(self, *args, **kwargs):
return self.d.keys(*args, **kwargs)
def pop(self, *args, **kwargs):
return self.d.pop(*args, **kwargs)
def popitem(self, *args, **kwargs):
return self.d.popitem(*args, **kwargs)
def setdefault(self, *args, **kwargs):
return self.d.setdefault(*args, **kwargs)
def update(self, *args, **kwargs):
self.d.update(*args, **kwargs)
def values(self, *args, **kwargs):
return self.d.values(*args, **kwargs)
class NumDict:
def __init__(self, initialdict={}):
warnings.warn("deprecated", DeprecationWarning)
self.d = copy.deepcopy(initialdict)
def add_num(self, key, val, default=0):
"""
If the key doesn't appear in self then it is created with value default
(before addition).
"""
self.d[key] = self.d.get(key, default) + val
def subtract_num(self, key, val, default=0):
self.d[key] = self.d.get(key, default) - val
def sum(self):
"""
@return: the sum of all values
"""
return reduce(operator.__add__, self.d.values())
def inc(self, key, default=0):
"""
Increment the value associated with key in dict. If there is no such
key, then one will be created with initial value 0 (before inc() --
therefore value 1 after inc).
"""
self.add_num(key, 1, default)
def dec(self, key, default=0):
"""
Decrement the value associated with key in dict. If there is no such
key, then one will be created with initial value 0 (before dec() --
therefore value -1 after dec).
"""
self.subtract_num(key, 1, default)
def items_sorted_by_value(self):
"""
@return a sequence of (key, value,) pairs sorted according to value
"""
l = [(x[1], x[0],) for x in self.d.iteritems()]
l.sort()
return [(x[1], x[0],) for x in l]
def item_with_largest_value(self):
it = self.d.iteritems()
(winner, winnerval,) = it.next()
try:
while True:
n, nv = it.next()
if nv > winnerval:
winner = n
winnerval = nv
except StopIteration:
pass
return (winner, winnerval,)
def items_sorted_by_key(self):
"""
@return a sequence of (key, value,) pairs sorted according to key
"""
l = self.d.items()
l.sort()
return l
def __repr__(self, *args, **kwargs):
return self.d.__repr__(*args, **kwargs)
def __str__(self, *args, **kwargs):
return self.d.__str__(*args, **kwargs)
def __contains__(self, *args, **kwargs):
return self.d.__contains__(*args, **kwargs)
def __len__(self, *args, **kwargs):
return self.d.__len__(*args, **kwargs)
def __cmp__(self, other):
try:
return self.d.__cmp__(other)
except TypeError, le:
# maybe we should look for a .d member in other. I know this is insanely kludgey, but the Right Way To Do It is for dict.__cmp__ to use structural typing ("duck typing")
try:
return self.d.__cmp__(other.d)
except:
raise le
def __eq__(self, *args, **kwargs):
return self.d.__eq__(*args, **kwargs)
def __ne__(self, *args, **kwargs):
return self.d.__ne__(*args, **kwargs)
def __gt__(self, *args, **kwargs):
return self.d.__gt__(*args, **kwargs)
def __ge__(self, *args, **kwargs):
return self.d.__ge__(*args, **kwargs)
def __le__(self, *args, **kwargs):
return self.d.__le__(*args, **kwargs)
def __lt__(self, *args, **kwargs):
return self.d.__lt__(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
return self.d.__getitem__(*args, **kwargs)
def __setitem__(self, *args, **kwargs):
return self.d.__setitem__(*args, **kwargs)
def __delitem__(self, *args, **kwargs):
return self.d.__delitem__(*args, **kwargs)
def __iter__(self, *args, **kwargs):
return self.d.__iter__(*args, **kwargs)
def clear(self, *args, **kwargs):
return self.d.clear(*args, **kwargs)
def copy(self, *args, **kwargs):
return self.__class__(self.d.copy(*args, **kwargs))
def fromkeys(self, *args, **kwargs):
return self.__class__(self.d.fromkeys(*args, **kwargs))
def get(self, key, default=0):
return self.d.get(key, default)
def has_key(self, *args, **kwargs):
return self.d.has_key(*args, **kwargs)
def items(self, *args, **kwargs):
return self.d.items(*args, **kwargs)
def iteritems(self, *args, **kwargs):
return self.d.iteritems(*args, **kwargs)
def iterkeys(self, *args, **kwargs):
return self.d.iterkeys(*args, **kwargs)
def itervalues(self, *args, **kwargs):
return self.d.itervalues(*args, **kwargs)
def keys(self, *args, **kwargs):
return self.d.keys(*args, **kwargs)
def pop(self, *args, **kwargs):
return self.d.pop(*args, **kwargs)
def popitem(self, *args, **kwargs):
return self.d.popitem(*args, **kwargs)
def setdefault(self, *args, **kwargs):
return self.d.setdefault(*args, **kwargs)
def update(self, *args, **kwargs):
return self.d.update(*args, **kwargs)
def values(self, *args, **kwargs):
return self.d.values(*args, **kwargs)
def del_if_present(d, k):
if d.has_key(k):
del d[k]
class ValueOrderedDict:
"""
Note: this implementation assumes that the values do not mutate and change
their sort order. That is, it stores the values in a sorted list and
as items are added and removed from the dict, it makes updates to the list
which will keep the list sorted. But if a value that is currently sitting
in the list changes its sort order, then the internal consistency of this
object will be lost.
If that happens, and if assertion checking is turned on, then you will get
an assertion failure the very next time you try to do anything with this
ValueOrderedDict. However, those internal consistency checks are very slow
and almost certainly unacceptable to leave turned on in production code.
"""
class ItemIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c.l), "The iterated ValueOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, self.c)
precondition((self.i == len(self.c.l)) or self.c.d.has_key(self.c.l[self.i][1]), "The iterated ValueOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
if self.i == len(self.c.l):
raise StopIteration
le = self.c.l[self.i]
self.i += 1
return (le[1], le[0],)
def iteritems(self):
return ValueOrderedDict.ItemIterator(self)
def items(self):
return zip(map(operator.__getitem__, self.l, [1]*len(self.l)), map(operator.__getitem__, self.l, [0]*len(self.l)))
def values(self):
return map(operator.__getitem__, self.l, [0]*len(self.l))
def keys(self):
return map(operator.__getitem__, self.l, [1]*len(self.l))
class KeyIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c.l), "The iterated ValueOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, self.c)
precondition((self.i == len(self.c.l)) or self.c.d.has_key(self.c.l[self.i][1]), "The iterated ValueOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
if self.i == len(self.c.l):
raise StopIteration
le = self.c.l[self.i]
self.i += 1
return le[1]
def iterkeys(self):
return ValueOrderedDict.KeyIterator(self)
class ValueIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c.l), "The iterated ValueOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, self.c)
precondition((self.i == len(self.c.l)) or self.c.d.has_key(self.c.l[self.i][1]), "The iterated ValueOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
if self.i == len(self.c.l):
raise StopIteration
le = self.c.l[self.i]
self.i += 1
return le[0]
def itervalues(self):
return ValueOrderedDict.ValueIterator(self)
def __init__(self, initialdata={}):
warnings.warn("deprecated", DeprecationWarning)
self.d = {} # k: key, v: val
self.l = [] # sorted list of tuples of (val, key,)
self.update(initialdata)
assert self._assert_invariants()
def __len__(self):
return len(self.l)
def __repr_n__(self, n=None):
s = ["{",]
try:
iter = self.iteritems()
x = iter.next()
s.append(str(x[0])); s.append(": "); s.append(str(x[1]))
i = 1
while (n is None) or (i < n):
x = iter.next()
s.append(", "); s.append(str(x[0])); s.append(": "); s.append(str(x[1]))
except StopIteration:
pass
s.append("}")
return ''.join(s)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(),)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(16),)
def __eq__(self, other):
for (k, v,) in other.iteritems():
if not self.d.has_key(k) or self.d[k] != v:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _assert_invariants(self):
iter = self.l.__iter__()
try:
oldx = iter.next()
while True:
x = iter.next()
# self.l is required to be sorted
_assert(x >= oldx, x, oldx)
# every element of self.l is required to appear in self.d
_assert(self.d.has_key(x[1]), x)
oldx =x
except StopIteration:
pass
for (k, v,) in self.d.iteritems():
i = bisect_left(self.l, (v, k,))
while (self.l[i][0] is not v) or (self.l[i][1] is not k):
i += 1
_assert(i < len(self.l), i, len(self.l), k, v, self.l)
_assert(self.l[i][0] is v, i, v, l=self.l, d=self.d)
_assert(self.l[i][1] is k, i, k, l=self.l, d=self.d)
return True
def insert(self, key, val=None):
assert self._assert_invariants()
result = self.__setitem__(key, val)
assert self._assert_invariants()
return result
def setdefault(self, key, default=None):
assert self._assert_invariants()
if not self.has_key(key):
self[key] = default
assert self._assert_invariants()
return self[key]
def __setitem__(self, key, val=None):
assert self._assert_invariants()
if self.d.has_key(key):
oldval = self.d[key]
if oldval != val:
# re-sort
i = bisect_left(self.l, (oldval, key,))
while (self.l[i][0] is not oldval) or (self.l[i][1] is not key):
i += 1
self.l.pop(i)
insort_left(self.l, (val, key,))
elif oldval is not val:
# replace
i = bisect_left(self.l, (oldval, key,))
while (self.l[i][0] is not oldval) or (self.l[i][1] is not key):
i += 1
self.l[i] = (val, key,)
else:
insort_left(self.l, (val, key,))
self.d[key] = val
assert self._assert_invariants()
return val
def remove(self, key, default=None, strictkey=True):
assert self._assert_invariants()
result = self.__delitem__(key, default, strictkey)
assert self._assert_invariants()
return result
def __getitem__(self, key, default=None, strictkey=True):
if not self.d.has_key(key):
if strictkey:
raise KeyError, key
else:
return default
return self.d[key]
def __delitem__(self, key, default=None, strictkey=True):
"""
@param strictkey: True if you want a KeyError in the case that
key is not there, False if you want a reference to default
in the case that key is not there
@param default: the object to return if key is not there; This
is ignored if strictkey.
@return: the object removed or default if there is not item by
that key and strictkey is False
"""
assert self._assert_invariants()
if self.d.has_key(key):
val = self.d.pop(key)
i = bisect_left(self.l, (val, key,))
while (self.l[i][0] is not val) or (self.l[i][1] is not key):
i += 1
self.l.pop(i)
assert self._assert_invariants()
return val
elif strictkey:
assert self._assert_invariants()
raise KeyError, key
else:
assert self._assert_invariants()
return default
def clear(self):
assert self._assert_invariants()
self.d.clear()
del self.l[:]
assert self._assert_invariants()
def update(self, otherdict):
"""
@return: self
"""
assert self._assert_invariants()
for (k, v,) in otherdict.iteritems():
self.insert(k, v)
assert self._assert_invariants()
return self
def has_key(self, key):
assert self._assert_invariants()
return self.d.has_key(key)
def popitem(self):
if not self.l:
raise KeyError, 'popitem(): dictionary is empty'
le = self.l.pop(0)
del self.d[le[1]]
return (le[1], le[0],)
def pop(self, k, default=None, strictkey=False):
if not self.d.has_key(k):
if strictkey:
raise KeyError, k
else:
return default
v = self.d.pop(k)
i = bisect_left(self.l, (v, k,))
while (self.l[i][0] is not v) or (self.l[i][1] is not k):
i += 1
self.l.pop(i)
return v
def pop_from_list(self, i=0):
le = self.l.pop(i)
del self.d[le[1]]
return le[1]
| gpl-3.0 |
abdullah-radwan/pray-times | PrayTimesEn.py | 1 | 27138 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
--------------------- Copyright Block ----------------------
Prayer Times Program (ver 5.0)
Copyright (C) 2016 Abdullah Radwan
License: GNU GPL v3.0
TERMS OF USE:
Permission is granted to use this code, with or
without modification, in any website or application
provided that credit is given to the original work
with a link back to Abdullah Radwan.
This program is distributed in the hope that it will
be useful, but WITHOUT ANY WARRANTY.
PLEASE DO NOT REMOVE THIS COPYRIGHT BLOCK.'''
# Import main libs
import gi
gi.require_version("Gtk", "3.0")
gi.require_version("Notify", "0.7")
from gi.repository import Gtk, GObject, Notify
from umalqurra.hijri_date import HijriDate
import time, PrayTimesLib, datetime, pyglet, configparser
# Import Glade File
builder = Gtk.Builder()
builder.add_from_file("PrayTimesEn.glade")
# Main class
class SalatTimes:
# Initialization
def __init__(self):
# Set main objects
self.builder = builder
self.lb = builder.get_object("lb_cities")
self.sa = builder.get_object("sa_cities")
self.ae = builder.get_object("ae_cities")
self.ma = builder.get_object("ma_cities")
self.ps = builder.get_object("ps_cities")
self.iq = builder.get_object("iq_cities")
self.ye = builder.get_object("ye_cities")
self.sy = builder.get_object("sy_cities")
self.dz = builder.get_object("dz_cities")
self.ly = builder.get_object("ly_cities")
self.kw = builder.get_object("kw_cities")
self.jo = builder.get_object("jo_cities")
self.sd = builder.get_object("sd_cities")
self.tn = builder.get_object("tn_cities")
self.bh = builder.get_object("bh_cities")
self.qa = builder.get_object("qa_cities")
self.om = builder.get_object("om_cities")
self.eg = builder.get_object("eg_cities")
self.countries = {
"Egypt": [self.eg, "Egypt"],
"Saudi Arabia": [self.sa, "Makkah"],
"United Arabic Emirates": [self.ae, "Makkah"],
"Oman": [self.om, "Makkah"],
"Yemen": [self.ye, "Makkah"],
"Qatar": [self.qa, "Makkah"],
"Bahrain": [self.bh, "Makkah"],
"Kuwait": [self.kw, "Makkah"],
"Iraq": [self.iq, "MWL"],
"Jordan": [self.jo, "Makkah"],
"Syria": [self.sy, "Makkah"],
"Lebanon": [self.lb, "MWL"],
"Palestine": [self.ps, "MWL"],
"Sudan": [self.sd, "Egypt"],
"Libya": [self.ly, "Egypt"],
"Tunisia": [self.tn, "MWL"],
"Algeria": [self.dz, "Egypt"],
"Morocco": [self.ma, "MWL"]
}
self.cities = {
# Egypt
"Cairo": [30.0500, 31.2500, +2],
"Alexandria": [31.1981, 29.9192, +2],
"Asyut": [27.1828, 31.1828, +2],
"Port Said": [31.2667, 32.3000, +2],
"Suez": [29.9667, 32.5500, +2],
"Tanta": [30.7911, 30.9981, +2],
# Saudi Arabia
"Riyadh": [24.6700, 46.6900, +3],
"Onaizah": [26.085478, 43.9768123, +3],
"Madinah": [24.4527, 39.6667, +3],
"Jeddah": [21.5169, 39.219, +3],
"Dammam": [26.4400, 50.1000, +3],
"Makkah": [21.4200, 39.8300, +3],
"Hail": [27.5258717, 41.6748334, +3],
"Abha": [18.2200, 42.5100, +3],
"Al Jouf": [29.9029354, 40.1848983, +3],
"Al Qurayyat": [31.316667, 37.366667, +3],
"Najran": [17.48333, 44.11667, +3],
"Taif": [21.2703, 40.4158, +3],
"Al Baha": [20, 41.45, +3],
"Buraidah": [26.3317, 43.9717, +3],
"Jazan": [16.883333, 42.55, +3],
"Tabuk": [28.3833, 36.5833, +3],
"Hafr Al Batin": [28.4342, 45.9636, +3],
"Al Khafji": [28.4403, 48.4844, +3],
# United Arabic Emirates
"Dubai": [25.2522, 55.2800, +4],
"Abu Dhabi": [24.4500, 54.3833, +4],
"Ajman": [25.4061, 55.4428, +4],
"Ras al Khaima": [24.4500, 54.3833, +4],
"Sharjah": [25.0000, 55.7500, +4],
# Oman
"Muscat": [23.6133, 58.5933, +4],
"Salala": [17.0175, 54.0828, +4],
# Yemen
"Aden": [12.7667, 45.0167, +3],
"Dhamar": [14.5500, 44.4017, +3],
"Mukalla": [14.5300, 49.1314, +3],
"Sanaa": [15.3547, 44.2067, +3],
"Taiz": [13.5000, 44.0000],
# Qatar
"Doha": [25.2867, 51.5333, +3],
# Bahrain
"Manama": [26.2361, 50.5831, +3],
# Kuwait
"Kuwait": [29.5000, 47.7500, +3],
"Al Jahara": [29.3375, 47.6581, +3],
# Iraq
"An Najaf": [31.9922, 44.3514, +3],
"Baghdad": [33.3386, 44.3939, +3],
"Basra": [30.5000, 47.8500, +3],
"Erbil": [36.1900, 44.0089, +3],
"Kufa": [32.0347, 44.4033, +3],
"Mosul": [36.3350, 43.1189, +3],
# Jordan
"Amman": [31.9500, 35.9333, +3],
"Irbid": [32.5556, 35.8500, +3],
"Madaba": [31.7167, 35.8000, +3],
# Syria
"Al Ladhiqiyah": [35.5167, 35.7833, +3],
"Aleppo": [36.2028, 37.1586, +3],
"Damascus": [33.5000, 36.3000, +3],
"Hama": [35.1333, 36.7500, +3],
"Hasakeh": [36.5000, 41.0000, +3],
"Homs": [34.7333, 36.7167, +3],
"Rakka": [35.9500, 39.0167, +3],
"Tartous": [34.8833, 35.8833, +3],
# Lebanon
"Akka": [34.4167, 36.2167, +3],
"Baalbek": [34.0000, 36.2000, +3],
"Beirut": [33.8719, 35.5097, +3],
"Sidon": [33.5631, 35.3689, +3],
"Tyre": [33.2711, 35.1964, +3],
# Palestine
"Elat": [29.5611, 34.9517, +3],
"Gaza": [31.5000, 34.4667, +3],
"Haifa": [32.8156, 34.9892, +3],
"Tel Aviv": [32.0667, 34.7667, +3],
# Sudan
"Atbara": [17.7167, 34.0667, +3],
"Kassala": [16.0000, 36.0000, +3],
"Khartoum": [15.5881, 32.5342, +3],
"Kosti": [13.1667, 32.6667, +3],
"Port Sudan": [19.6158, 37.2164, +3],
# Libya
"Agedabia": [30.7592, 20.2231, +2],
"Benghazi": [32.1167, 20.0667, +2],
"Misurata": [32.3783, 15.0906, +2],
"Sebha": [27.0333, 14.4333, +2],
"Tripoli": [32.8925, 13.1800, +2],
"Tubruq": [32.0836, 23.9764, +2],
# Tunisia
"Ariana": [36.8625, 10.1956, +1],
"Djerba": [33.8747, 10.8592, +1],
"Gabes": [33.8833, 10.1167, +1],
"Kairouan": [35.6744, 10.1017, +1],
"Sfax": [34.7406, 10.7603, +1],
"Sousse": [35.8256, 10.6411, +1],
"Tunis": [36.8028, 10.1797, +1],
# Algeria
"Algiers": [36.7631, 3.0506, +1],
"Annaba": [36.9000, 7.7667, +1],
"Bejaia": [36.7500, 5.0833, +1],
"Blida": [36.4686, 2.8289, +1],
"Constantine": [36.3650, 6.6147, +1],
"Oran": [35.6911, -0.6417, +1],
"Setif": [36.1914, 5.4094, +1],
"Skikda": [36.8792, 6.9067, +1],
"Tlemcen": [34.8783, -1.3150, +1],
# Morocco
"Agadir": [29.0167, -10.2500, +0],
"Casablanca": [33.5931, -7.6164, +0],
"Fez": [34.0528, -4.9828, +0],
"Kenitra": [34.2608, -6.5794, +0],
"Marrakech": [31.6333, -8.0000, +0],
"Meknes": [33.9000, -5.5500, +0],
"Oujda": [34.6867, -1.9114, +0],
"Rabat": [34.0253, -6.8361, +0],
"Safi": [32.3000, -9.2386, +0],
"Tangier": [35.7847, -5.8128, +0],
}
self.main_win = builder.get_object("window1")
self.about_dialog = builder.get_object("aboutdia"
"log1")
self.set_win = builder.get_object("window2")
self.pray_win = builder.get_object("window3")
self.time_label = builder.get_object("label11")
self.error_label = builder.get_object("label24")
self.fajr_entry = builder.get_object("entry1")
self.duhur_entry = builder.get_object("entry2")
self.asr_entry = builder.get_object("entry3")
self.maghrib_entry = builder.get_object("entry4")
self.isha_entry = builder.get_object("entry5")
self.status_icon = builder.get_object("statusicon1")
self.adan_box = builder.get_object("combobox3")
self.calc_times = PrayTimesLib.PrayTimes()
self.salat_times_grid = builder.get_object("grid1")
self.fajr_label = builder.get_object("label2")
self.duhur_label = builder.get_object("label4")
self.asr_label = builder.get_object("label6")
self.maghrib_label = builder.get_object("label8")
self.isha_label = builder.get_object("label10")
self.next_salat_label = builder.get_object("label26")
self.country_box = builder.get_object("combobox2")
self.city_box = builder.get_object("combobox1")
self.data = datetime.date.today()
self.sunrise_entry = builder.get_object("entry6")
self.sunrise_label = builder.get_object("label28")
self.hj_date_label = builder.get_object("label30")
self.gr_date_label = builder.get_object("label31")
self.gr_date_check = builder.get_object("checkbutton6")
self.hj_date_check = builder.get_object("checkbutton5")
self.time_now_check = builder.get_object("checkbutton2")
self.salat_times_check = builder.get_object("checkbutton1")
self.status_icon_check = builder.get_object("checkbutton4")
self.adan_check = builder.get_object("checkbutton3")
self.adan_label = builder.get_object("label20")
# Set default parameters
self.times = self.calc_times.getTimes((self.data), (26.085478, 43.9768123), +3)
self.sunrise_time = self.times["sunrise"]
self.salat_time = {"Salat Al Fajr": self.times["fajr"], "Salat Al Duhur": self.times["dhuhr"],
"Salat Al Asr": self.times["asr"], "Salat Al Maghrib": self.times["maghrib"],
"Salat Al Isha": self.times["isha"]}
self.play_adan = False
self.adan_sound = "makkah.wav"
self.adan_set = True
self.adan()
self.salat_times()
self.next_salat()
self.read_config()
GObject.timeout_add_seconds(1, self.adan)
GObject.timeout_add_seconds(1, self.next_salat)
# Enable or disable adan function
def enable_adan(self, widget = None):
self.adan_check = widget
if widget.get_active():
self.adan_box.show()
self.adan_label.show()
self.adan_set = True
else:
self.adan_box.hide()
self.adan_label.hide()
self.adan_set = False
# Next prayer function
def next_salat(self):
d = time.strptime(self.salat_time["Salat Al Duhur"], "%H:%M")
a = time.strptime(self.salat_time["Salat Al Asr"], "%H:%M")
m = time.strptime(self.salat_time["Salat Al Maghrib"], "%H:%M")
i = time.strptime(self.salat_time["Salat Al Isha"], "%H:%M")
t = time.strptime(time.strftime("%H:%M:%S"), "%H:%M:%S")
h = time.strptime('23:59:00', "%H:%M:%S")
f1 = time.strptime(self.salat_time["Salat Al Fajr"], "%H:%M")
f = time.strptime("2 " + self.salat_time["Salat Al Fajr"], "%d %H:%M")
if (t < f) and (t > i):
self.eta = "Salat Al Fajr"
elif (t > f1) and (t < d):
self.eta = "Salat Al Duhur"
elif (t > d) and (t < a):
self.eta = "Salat Al Asr"
elif (t > a) and (t < m):
self.eta = "Salat Al Maghrib"
elif (t > m) and (t < i):
self.eta = "Salat Al Isha"
else:
self.eta = "Unknows"
self.next_salat_label.set_text(self.eta)
return True
# Change adan function
def change_adan_sound(self, widget):
adansound = widget.get_active_iter()
model = widget.get_model()
name = model[adansound][0]
if name == "Makkah":
self.adan_sound = "makkah.wav"
elif name == "Madinah":
self.adan_sound = "madinah.wav"
elif name == "Egypt":
self.adan_sound = "egypt.wav"
# Adan function
def adan(self):
if self.adan_set == True:
now = time.strftime("%H:%M")
if now in self.salat_time.values():
if self.play_adan == False:
Notify.init("Pray Times Program")
adan_notify = Notify.Notification.new("Adan Now", "Adan Started", "dialog-information")
adan_notify.show()
adan = pyglet.resource.media(self.adan_sound)
adan.play()
self.time_label.set_text("Adan Now")
self.play_adan = True
else:
if self.play_adan == True:
self.play_adan = False
self.time_label.set_text("\nTime Now: %s" % (time.strftime("%H:%M:%S")))
um = HijriDate(int(time.strftime("%Y")), int(time.strftime("%m")), int(time.strftime("%d")), gr=True)
self.hj_date_label.set_text("\nHijri Date: %s/%s/%s" % (int(um.year), int(um.month), int(um.day)))
self.gr_date_label.set_text("\nGregorian Date: %s" % (time.strftime(("%Y/%m/%d"))))
else:
self.time_label.set_text("\nTime Now: %s" % (time.strftime("%H:%M:%S")))
um = HijriDate(int(time.strftime("%Y")), int(time.strftime("%m")), int(time.strftime("%d")), gr=True)
self.hj_date_label.set_text("\nHijri Date: %s/%s/%s" % (int(um.year), int(um.month), int(um.day)))
self.gr_date_label.set_text("\nGregorian Date: %s" % (time.strftime(("%Y/%m/%d"))))
return True
# Show time now option
def show_time_now(self, widget):
self.time_now_check = widget
if widget.get_active():
self.time_label.show()
else:
self.time_label.hide()
# Show salat times option
def show_salat_times(self, widget):
self.salat_times_check = widget
if widget.get_active():
self.salat_times_grid.show()
else:
self.salat_times_grid.hide()
# Set cities
def country(self, widget):
country = widget.get_active_iter()
model = widget.get_model()
self.country_name = model[country][0]
self.city_box.set_model(self.countries[self.country_name][0])
self.calc_times.setMethod(self.countries[self.country_name][1])
# Set times to each city
def city(self, widget):
self.data = [int(time.strftime("%Y")), int(time.strftime("%m")), int(time.strftime("%d"))]
city = widget.get_active_iter()
model = widget.get_model()
self.city_name = model[city][0]
self.times = self.calc_times.getTimes((self.data), (self.cities[self.city_name][0], self.cities[self.city_name][1]),
self.cities[self.city_name][2])
if self.calc_times.getMethod() == "Makkah":
um = HijriDate(int(time.strftime("%Y")), int(time.strftime("%m")), int(time.strftime("%d")), gr=True)
if um.month == 9.0:
hours = 2
minutes = 0
else:
hours = 1
minutes = 30
m = datetime.datetime.strptime(self.times["maghrib"], "%H:%M")
add_min = m + datetime.timedelta(hours=hours, minutes=minutes)
salat_isha = str(add_min.strftime("%H:%M"))
self.sunrise_time = self.times["sunrise"]
self.salat_time = {"Salat Al Fajr": self.times["fajr"], "Salat Al Duhur": self.times["dhuhr"],
"Salat Al Asr": self.times["asr"], "Salat Al Maghrib": self.times["maghrib"],
"Salat Al Isha": salat_isha}
else:
self.sunrise_time = self.times["sunrise"]
self.salat_time = {"Salat Al Fajr": self.times["fajr"], "Salat Al Duhur": self.times["dhuhr"],
"Salat Al Asr": self.times["asr"], "Salat Al Maghrib": self.times["maghrib"],
"Salat Al Isha": self.times["isha"]}
# Show set salat times manual window
def show_set_time_manual(self, widget):
self.pray_win.show_all()
# Set salat manual function
def save_set_time_manual(self, widget, data=None):
e1d = self.fajr_entry.get_text()
e2d = self.duhur_entry.get_text()
e3d = self.asr_entry.get_text()
e4d = self.maghrib_entry.get_text()
e5d = self.isha_entry.get_text()
e6d = self.sunrise_entry.get_text()
if not e1d or not e2d or not e3d or not e4d or not e5d:
self.error_label.set_text("Please Enter All Data")
else:
self.sunrise_time = e6d
self.salat_time = {"Salat Al Fajr": e1d, "Salat Al Duhur": e2d, "Salat Al Asr": e3d,
"Salat Al Maghrib": e4d, "Salat Al Isha": e5d}
self.salat_times()
self.pray_win.hide_on_delete()
# Start about dialog
def run_about(self, widget):
self.about_dialog.run()
self.about_dialog.hide_on_delete()
# Show settings window
def run_settings(self, widget):
self.set_win.show_all()
# Show main window
def show_program(self, widget, data=None):
self.main_win.show_all()
# Set right click menu on statue bar icon
def sbe(self, si, event_button, event_time, data=None):
self.menu2 = Gtk.Menu()
show_window = Gtk.MenuItem("Show Main Window")
show_settings_window = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_PREFERENCES)
show_settings_window.set_always_show_image(True)
about_program = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_ABOUT)
about_program.set_always_show_image(True)
quit_program = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_CLOSE)
quit_program.set_always_show_image(True)
show_window.connect_object("activate", self.show_program, "Show Main Window")
show_settings_window.connect_object('activate', self.run_settings, "Preferences")
about_program.connect_object('activate', self.run_about, "About")
quit_program.connect_object("activate", self.quit, "Quit")
self.menu2.append(show_window)
self.menu2.append(show_settings_window)
self.menu2.append(about_program)
self.menu2.append(quit_program)
self.menu2.show_all()
self.menu2.popup(None, None, None, si, event_button, event_time)
def right(self, data, event_button, event_time):
self.sbe(self.status_icon, event_button, event_time)
def view_statusicon(self, widget=None):
if widget.get_active():
self.status_icon.set_visible(True)
else:
self.status_icon.set_visible(False)
self.main_win.show_all()
# Set salat times on main window
def salat_times(self,widget=None):
self.fajr_label.set_text(self.salat_time["Salat Al Fajr"])
self.sunrise_label.set_text(self.sunrise_time)
self.duhur_label.set_text(self.salat_time["Salat Al Duhur"])
self.asr_label.set_text(self.salat_time["Salat Al Asr"])
self.maghrib_label.set_text(self.salat_time["Salat Al Maghrib"])
self.isha_label.set_text(self.salat_time["Salat Al Isha"])
# Show Hijri Date
def show_hijri_date(self,widget):
self.hj_date_check = widget
if widget.get_active():
self.hj_date_label.show()
else:
self.hj_date_label.hide()
# Show Gregorian Date
def show_greg_date(self, widget):
self.gr_date_check = widget
if widget.get_active():
self.gr_date_label.show()
else:
self.gr_date_label.hide()
# Read Config File
def read_config(self):
self.main_win.show_all()
config = configparser.RawConfigParser()
config.read("config-en.cfg")
config1 = config.read("config-en.cfg")
if config1 == []:
return None
else:
try:
self.data = [int(time.strftime("%Y")), int(time.strftime("%m")), int(time.strftime("%d"))]
self.city_name = config.get("Settings","City")
self.method = config.get("Settings", "Method")
self.calc_times.setMethod(self.method)
self.times = self.calc_times.getTimes((self.data), (self.cities[self.city_name][0],
self.cities[self.city_name][1]), self.cities[self.city_name][2])
if self.method == "Makkah":
um = HijriDate(int(time.strftime("%Y")), int(time.strftime("%m")), int(time.strftime("%d")), gr=True)
if um.month == 9.0:
hours = 2
minutes = 0
else:
hours = 1
minutes = 30
m = datetime.datetime.strptime(self.times["maghrib"], "%H:%M")
add_min = m + datetime.timedelta(hours=hours, minutes=minutes)
salat_isha = str(add_min.strftime("%H:%M"))
self.sunrise_time = self.times["sunrise"]
self.salat_time = {"Salat Al Fajr": self.times["fajr"], "Salat Al Duhur": self.times["dhuhr"],
"Salat Al Asr": self.times["asr"], "Salat Al Maghrib": self.times["maghrib"],
"Salat Al Isha": salat_isha}
else:
self.sunrise_time = self.times["sunrise"]
self.salat_time = {"Salat Al Fajr": self.times["fajr"], "Salat Al Duhur": self.times["dhuhr"],
"Salat Al Asr": self.times["asr"], "Salat Al Maghrib": self.times["maghrib"],
"Salat Al Isha": self.times["isha"]}
except: pass
self.adan_sound = config.get("Settings", "AdanSound")
if config.get("Settings", "ShowSalatTimes") == "True": self.salat_times_grid.show()
else:
self.salat_times_grid.hide()
self.salat_times_check.set_active(False)
if config.get("Settings", "ShowTimeNow") == "True": self.time_label.show()
else:
self.time_label.hide()
self.time_now_check.set_active(False)
if config.get("Settings", "ShowHijriDate") == "True": self.hj_date_label.show()
else:
self.hj_date_label.hide()
self.hj_date_check.set_active(False)
if config.get("Settings", "ShowGregorianDate") == "True": self.gr_date_label.show()
else:
self.gr_date_label.hide()
self.gr_date_check.set_active(False)
if config.get("Settings", "ShowStatusIcon") == "True": self.status_icon.set_visible(True)
else:
self.status_icon.set_visible(False)
self.status_icon_check.set_active(False)
if config.get("Settings", "Adan") == "True": self.adan_set = True
else:
self.adan_set = False
self.adan_check.set_active(False)
self.adan_box.hide()
self.adan_label.hide()
self.salat_times()
# Write Config File
def write_config(self):
config = configparser.RawConfigParser()
config.add_section('Settings')
try: config.set('Settings', 'City', self.city_name)
except: pass
config.set("Settings", "Adan", self.adan_set)
config.set("Settings", "AdanSound", self.adan_sound)
config.set("Settings", "ShowSalatTimes", self.salat_times_check.get_active())
config.set("Settings", "ShowTimeNow", self.time_now_check.get_active())
config.set("Settings", "ShowHijriDate", self.hj_date_check.get_active())
config.set("Settings", "ShowGregorianDate", self.gr_date_check.get_active())
config.set("Settings", "Method", self.calc_times.getMethod())
config.set("Settings", "ShowStatusIcon", self.status_icon.get_visible())
with open('config-en.cfg', 'wt') as configfile:
config.write(configfile)
def reset_config(self, widget = None):
config = configparser.RawConfigParser()
config.add_section('Settings')
config.set('Settings', 'City', "Onaizah")
config.set("Settings", "Adan", "True")
config.set("Settings", "AdanSound", "Makkah")
config.set("Settings", "ShowSalatTimes", "True")
config.set("Settings", "ShowTimeNow", "True")
config.set("Settings", "ShowHijriDate", "True")
config.set("Settings", "ShowGregorianDate", "True")
config.set("Settings", "Method", "Makkah")
config.set("Settings", "ShowStatusIcon", "True")
with open('config.cfg', 'wt') as configfile:
config.write(configfile)
self.salat_times_grid.show()
self.salat_times_check.set_active(True)
self.time_label.show()
self.time_now_check.set_active(True)
self.hj_date_label.show()
self.hj_date_check.set_active(True)
self.gr_date_label.show()
self.gr_date_check.set_active(True)
self.status_icon.set_visible(True)
self.status_icon_check.set_active(True)
# Quit function
def quit(self, widget=None):
self.write_config()
Gtk.main_quit()
# Hide windows
def delete_event(self, widget, data=None):
if self.status_icon.get_visible() == True:
self.main_win.hide_on_delete()
else:
self.write_config()
Gtk.main_quit()
return True
def delete_event1(self, widget, data=None):
self.set_win.hide_on_delete()
return True
def delete_event2(self, widget, data=None):
self.pray_win.hide_on_delete()
return True
# Connect signals
builder.connect_signals(SalatTimes())
Gtk.main() | gpl-3.0 |
XENON1T/pax | pax/plugins/io/Queues.py | 1 | 10740 | import time
import heapq
from pax import plugin, utils, exceptions, datastructure
from pax.parallel import queue, RabbitQueue, NO_MORE_EVENTS, REGISTER_PUSHER, PUSHER_DONE, DEFAULT_RABBIT_URI
def get_queue_from_config(config):
"""Given a queueplugin config, get the queue from it
Yeah, should have maybe made base class with this as only method...
"""
if 'queue' in config:
return config['queue']
elif 'queue_name' in config:
return RabbitQueue(config['queue_name'],
config.get('queue_url', DEFAULT_RABBIT_URI))
class PullFromQueue(plugin.InputPlugin):
# We may get eventproxies rather than real events
do_output_check = False
no_more_events = False
def startup(self):
self.queue = get_queue_from_config(self.config)
# If we need to order events received from the queue before releasing them, we need a heap
# NB! If you enable this, you must GUARANTEE no other process will be consuming from this queue
# (otherwise there will be holes in the event block ids, triggering an infinite wait)
self.ordered_pull = self.config.get('ordered_pull', False)
self.time_slept_since_last_response = 0
self.block_heap = []
self.pushers = []
# If no message has been received for this amount of seconds, crash.
self.timeout_after_sec = self.config.get('timeout_after_sec', float('inf'))
self.max_blocks_on_heap = self.config.get('max_blocks_on_heap', 250)
def get_block(self):
"""Get a block of events from the queue, or raise queue.Empty if no events are available
"""
if self.no_more_events:
# There are no more events.
# There could be stuff left on the queue, but then it's a None = NoMoreEvents message for other consumers.
raise queue.Empty
head, body = self.queue.get(block=True, timeout=1)
if head == NO_MORE_EVENTS:
# The last event has been popped from the queue. Push None back on the queue for
# the benefit of other consumers.
self.no_more_events = True
self.log.info("Received no more events message, putting it back on queue for others")
self.queue.put((NO_MORE_EVENTS, None))
raise queue.Empty
elif head == REGISTER_PUSHER:
# We're in a many-push to one-pull situation.
# One of the pushers has just announced itself.
self.pushers.append(body)
self.log.debug("Registered new pusher: %s" % body)
return self.get_block()
elif head == PUSHER_DONE:
# A pusher just proclaimed it will no longer push events
self.pushers.remove(body)
self.log.debug("Removed pusher: %s. %d remaining pushers" % (body, len(self.pushers)))
if not len(self.pushers):
# No pushers left, stop processing once there are no more events.
# This assumes all pushers will register before the first one is done!
self.queue.put((NO_MORE_EVENTS, None))
return self.get_block()
else:
block_id, event_block = head, body
return block_id, event_block
def get_events(self):
block_heap = self.block_heap
block_id = -1
while True:
try:
if self.ordered_pull:
# We have to ensure the event blocks are pulled out in order.
# If we don't have the block we want yet, keep fetching event blocks from the queue
# and push them onto a heap.
# While the next event we wan't isn't on the block heap, pull blocks from queue into the heap
while not (len(block_heap) and block_heap[0][0] == block_id + 1):
new_block = self.get_block()
heapq.heappush(block_heap, new_block)
if len(block_heap) > self.max_blocks_on_heap:
raise exceptions.EventBlockHeapSizeExceededException(
"We have received over %d blocks without receiving the next block id (%d) in order. "
"Likely one of the block producers has died without telling anyone." % (
self.max_blocks_on_heap, block_id + 1))
self.log.debug("Just got block %d, heap is now %d blocks long" % (
new_block[0], len(block_heap)))
self.log.debug("Earliest block: %d, looking for block %s" % (block_heap[0][0], block_id + 1))
# If we get here, we have the event block we need sitting at the top of the heap
block_id, event_block = heapq.heappop(block_heap)
assert block_id >= 0
else:
block_id, event_block = self.get_block()
self.time_slept_since_last_response = 0
except queue.Empty:
if self.no_more_events and not len(block_heap):
self.log.debug("All done!")
# We're done, no more events!
break
# The queue is empty so we must wait for the next event / The event we wan't hasn't arrived on the heap.
self.log.debug("Found empty queue, no more events is %s, len block heap is %s, sleeping for 1 sec" % (
self.no_more_events, len(block_heap)))
if len(block_heap) > 0.3 * self.max_blocks_on_heap:
self.log.warning("%d blocks on heap, will crash if more than %d" % (len(block_heap),
self.max_blocks_on_heap))
time.sleep(1)
self.processor.timer.last_t = time.time() # Time spent idling shouldn't count for the timing report
self.time_slept_since_last_response += 1
if self.time_slept_since_last_response > self.timeout_after_sec:
raise exceptions.QueueTimeoutException(
"Waited for more than %s seconds to receive events; "
"lost confidence they will ever come." % self.timeout_after_sec)
continue
self.log.debug("Now processing block %d, %d events" % (block_id, len(event_block)))
for i, event in enumerate(event_block):
self.log.debug("Yielding event number %d" % event.event_number)
yield event
self.log.debug("Exited get_events loop")
def shutdown(self):
if hasattr(self.queue, 'close'):
self.queue.close()
class PushToQueue(plugin.OutputPlugin):
# We must be allowed to route eventproxies as well as actual events
do_input_check = False
do_output_check = False
def startup(self):
self.queue = get_queue_from_config(self.config)
self.max_queue_blocks = self.config.get('max_queue_blocks', 100)
self.max_block_size = self.config.get('event_block_size', 10)
self.preserve_ids = self.config.get('preserve_ids', False)
self.many_to_one = self.config.get('many_to_one', False)
# If we can't push a message due to a full queue for more than this number of seconds, crash
# since probably the process responsible for pulling from the queue has died.
self.timeout_after_sec = self.config.get('timeout_after_sec', float('inf'))
if self.many_to_one:
# Generate random name and tell the puller we're in town
self.pusher_name = utils.randomstring(20)
self.queue.put((REGISTER_PUSHER, self.pusher_name))
self.current_block = []
self.current_block_id = 0
def write_event(self, event):
if self.preserve_ids:
# Someone else already set the block ids. Good for us.
assert event.block_id >= 0 # Datastructure default is -1, if we see that here we are in big doodoo
if event.block_id != self.current_block_id:
# A change in the block id must always be just after sending a block
# otherwise the pax chain is using inconsistent event block sizes
assert len(self.current_block) == 0
self.current_block_id = event.block_id
else:
# We are responsible for setting the block id's.
# This is a bit convoluted because events could be either Events or EventProxy's, but the latter
# are immutable namedtuples.
if isinstance(event, datastructure.EventProxy):
# Namedtuples are immutable, so we need to create a new event proxy with the same raw data
event = datastructure.make_event_proxy(event, data=event.data, block_id=self.current_block_id)
else:
event.block_id = self.current_block_id
self.current_block.append(event)
# Send events once the max block size is reached. Do not wait until event with next id arrives:
# that can take forever if we're doing low-rate processing with way to much cores.
if len(self.current_block) == self.max_block_size:
self.send_block()
# If we're setting the id's, from now on, we have to set with the next number
if not self.preserve_ids:
self.current_block_id += 1
def send_block(self):
"""Sends the current block if it has any events in it, then resets the current block to []
Does NOT change self.current_block_id!
"""
seconds_slept_with_queue_full = 0
if len(self.current_block):
while self.queue.qsize() >= self.max_queue_blocks:
self.log.info("Max queue size %d reached, waiting to push block" % self.max_queue_blocks)
seconds_slept_with_queue_full += 1
time.sleep(1)
if seconds_slept_with_queue_full >= self.timeout_after_sec:
raise exceptions.QueueTimeoutException(
"Blocked from pushing to the queue for more than %s seconds; "
"lost confidence we will ever be able to." % self.timeout_after_sec)
self.queue.put((self.current_block_id, self.current_block))
self.current_block = []
def shutdown(self):
self.send_block()
if self.many_to_one:
self.queue.put((PUSHER_DONE, self.pusher_name))
else:
self.queue.put((NO_MORE_EVENTS, None))
if hasattr(self.queue, 'close'):
self.queue.close()
| bsd-3-clause |
Pelagicore/tracker-ivi | tests/estress/client.py | 5 | 2596 | #!/usr/bin/env python
#
# Copyright (C) 2009, Nokia <ivan.frade@nokia.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import gobject
import gtk
import time
import dbus
import getopt
import sys
TRACKER = 'org.freedesktop.Tracker1'
TRACKER_OBJ = '/org/freedesktop/Tracker1/Resources'
SPARQL_QUERY = """
SELECT ?entry ?title ?date ?isRead WHERE {
?entry a nmo:FeedMessage ;
nie:title ?title ;
nie:contentLastModified ?date .
OPTIONAL {
?entry nmo:isRead ?isRead.
}
} ORDER BY DESC(?date) LIMIT %s
"""
bus = dbus.SessionBus ()
obj = bus.get_object (TRACKER, TRACKER_OBJ)
iface = dbus.Interface (obj, "org.freedesktop.Tracker1.Resources")
def run_query ():
start = time.time ()
results = iface.SparqlQuery (SPARQL_QUERY % ("10"))
end = time.time ()
print int (time.time()), "%f" % (end - start)
return True
def exit_cb ():
sys.exit (0)
def usage ():
print "Usage:"
print " client.py [OPTION...] - Run periodically a query on tracker"
print ""
print "Help Options:"
print " -h, --help Show help options"
print ""
print "Application Options:"
print " -p, --period=NUM Time (in sec) between queries"
print " -t, --timeout=NUM Switch off the program after NUM seconds"
print ""
if __name__ == "__main__":
opts, args = getopt.getopt(sys.argv[1:],
"p:t:h",
["period", "timeout", "help"])
period = 1
timeout = 0
for o, a in opts:
if o in ["-p", "--period"]:
period = int (a)
if o in ["-t", "--timeout"]:
timeout = int (a)
if o in ["-h", "--help"]:
usage ()
sys.exit (0)
gobject.timeout_add (period * 1000, run_query)
if (timeout > 0):
gobject.timeout_add (timeout *1000, exit_cb)
gtk.main ()
| gpl-2.0 |
stshine/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/terminal.py | 176 | 21784 | """ terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
import pytest
import py
import sys
import time
import platform
import _pytest._pluggy as pluggy
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption('-v', '--verbose', action="count",
dest="verbose", default=0, help="increase verbosity."),
group._addoption('-q', '--quiet', action="count",
dest="quiet", default=0, help="decrease verbosity."),
group._addoption('-r',
action="store", dest="reportchars", default=None, metavar="chars",
help="show extra test summary info as specified by chars (f)ailed, "
"(E)error, (s)skipped, (x)failed, (X)passed (w)pytest-warnings "
"(p)passed, (P)passed with output, (a)all except pP.")
group._addoption('-l', '--showlocals',
action="store_true", dest="showlocals", default=False,
help="show locals in tracebacks (disabled by default).")
group._addoption('--report',
action="store", dest="report", default=None, metavar="opts",
help="(deprecated, use -r)")
group._addoption('--tb', metavar="style",
action="store", dest="tbstyle", default='auto',
choices=['auto', 'long', 'short', 'no', 'line', 'native'],
help="traceback print mode (auto/long/short/line/native/no).")
group._addoption('--fulltrace', '--full-trace',
action="store_true", default=False,
help="don't cut any tracebacks (default is to cut).")
group._addoption('--color', metavar="color",
action="store", dest="color", default='auto',
choices=['yes', 'no', 'auto'],
help="color terminal output (yes/no/auto).")
def pytest_configure(config):
config.option.verbose -= config.option.quiet
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, 'terminalreporter')
if config.option.debug or config.option.traceconfig:
def mywriter(tags, args):
msg = " ".join(map(str, args))
reporter.write_line("[traceconfig] " + msg)
config.trace.root.setprocessor("pytest:config", mywriter)
def getreportopt(config):
reportopts = ""
optvalue = config.option.report
if optvalue:
py.builtin.print_("DEPRECATED: use -r instead of --report option.",
file=sys.stderr)
if optvalue:
for setting in optvalue.split(","):
setting = setting.strip()
if setting == "skipped":
reportopts += "s"
elif setting == "xfailed":
reportopts += "x"
reportchars = config.option.reportchars
if reportchars:
for char in reportchars:
if char not in reportopts and char != 'a':
reportopts += char
elif char == 'a':
reportopts = 'fEsxXw'
return reportopts
def pytest_report_teststatus(report):
if report.passed:
letter = "."
elif report.skipped:
letter = "s"
elif report.failed:
letter = "F"
if report.when != "call":
letter = "f"
return report.outcome, letter, report.outcome.upper()
class WarningReport:
def __init__(self, code, message, nodeid=None, fslocation=None):
self.code = code
self.message = message
self.nodeid = nodeid
self.fslocation = fslocation
class TerminalReporter:
def __init__(self, config, file=None):
import _pytest.config
self.config = config
self.verbosity = self.config.option.verbose
self.showheader = self.verbosity >= 0
self.showfspath = self.verbosity >= 0
self.showlongtestinfo = self.verbosity > 0
self._numcollected = 0
self.stats = {}
self.startdir = py.path.local()
if file is None:
file = sys.stdout
self._tw = self.writer = _pytest.config.create_terminal_writer(config,
file)
self.currentfspath = None
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
self.isatty = file.isatty()
def hasopt(self, char):
char = {'xfailed': 'x', 'skipped': 's'}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, nodeid, res):
fspath = self.config.rootdir.join(nodeid.split("::")[0])
if fspath != self.currentfspath:
self.currentfspath = fspath
fspath = self.startdir.bestrelpath(fspath)
self._tw.line()
self._tw.write(fspath + " ")
self._tw.write(res)
def write_ensure_prefix(self, prefix, extra="", **kwargs):
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self):
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def write(self, content, **markup):
self._tw.write(content, **markup)
def write_line(self, line, **markup):
if not py.builtin._istext(line):
line = py.builtin.text(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line, **markup):
line = str(line)
self._tw.write("\r" + line, **markup)
def write_sep(self, sep, title=None, **markup):
self.ensure_newline()
self._tw.sep(sep, title, **markup)
def section(self, title, sep="=", **kw):
self._tw.sep(sep, title, **kw)
def line(self, msg, **kw):
self._tw.line(msg, **kw)
def pytest_internalerror(self, excrepr):
for line in py.builtin.text(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return 1
def pytest_logwarning(self, code, fslocation, message, nodeid):
warnings = self.stats.setdefault("warnings", [])
if isinstance(fslocation, tuple):
fslocation = "%s:%d" % fslocation
warning = WarningReport(code=code, fslocation=fslocation,
message=message, nodeid=nodeid)
warnings.append(warning)
def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig:
msg = "PLUGIN registered: %s" % (plugin,)
# XXX this event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line
self.write_line(msg)
def pytest_deselected(self, items):
self.stats.setdefault('deselected', []).extend(items)
def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the
# 1st test of a module starts running
if self.showlongtestinfo:
line = self._locationline(nodeid, *location)
self.write_ensure_prefix(line, "")
elif self.showfspath:
fsid = nodeid.split("::")[0]
self.write_fspath_result(fsid, "")
def pytest_runtest_logreport(self, report):
rep = report
res = self.config.hook.pytest_report_teststatus(report=rep)
cat, letter, word = res
self.stats.setdefault(cat, []).append(rep)
self._tests_ran = True
if not letter and not word:
# probably passed setup/teardown
return
if self.verbosity <= 0:
if not hasattr(rep, 'node') and self.showfspath:
self.write_fspath_result(rep.nodeid, letter)
else:
self._tw.write(letter)
else:
if isinstance(word, tuple):
word, markup = word
else:
if rep.passed:
markup = {'green':True}
elif rep.failed:
markup = {'red':True}
elif rep.skipped:
markup = {'yellow':True}
line = self._locationline(rep.nodeid, *rep.location)
if not hasattr(rep, 'node'):
self.write_ensure_prefix(line, word, **markup)
#self._tw.write(word, **markup)
else:
self.ensure_newline()
if hasattr(rep, 'node'):
self._tw.write("[%s] " % rep.node.gateway.id)
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
def pytest_collection(self):
if not self.isatty and self.config.option.verbose >= 1:
self.write("collecting ... ", bold=True)
def pytest_collectreport(self, report):
if report.failed:
self.stats.setdefault("error", []).append(report)
elif report.skipped:
self.stats.setdefault("skipped", []).append(report)
items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items)
if self.isatty:
#self.write_fspath_result(report.nodeid, 'E')
self.report_collect()
def report_collect(self, final=False):
if self.config.option.verbose < 0:
return
errors = len(self.stats.get('error', []))
skipped = len(self.stats.get('skipped', []))
if final:
line = "collected "
else:
line = "collecting "
line += str(self._numcollected) + " items"
if errors:
line += " / %d errors" % errors
if skipped:
line += " / %d skipped" % skipped
if self.isatty:
if final:
line += " \n"
self.rewrite(line, bold=True)
else:
self.write_line(line)
def pytest_collection_modifyitems(self):
self.report_collect(True)
@pytest.hookimpl(trylast=True)
def pytest_sessionstart(self, session):
self._sessionstarttime = time.time()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
msg = "platform %s -- Python %s" % (sys.platform, verinfo)
if hasattr(sys, 'pypy_version_info'):
verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
msg += ", pytest-%s, py-%s, pluggy-%s" % (
pytest.__version__, py.__version__, pluggy.__version__)
if self.verbosity > 0 or self.config.option.debug or \
getattr(self.config.option, 'pastebin', None):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, startdir=self.startdir)
lines.reverse()
for line in flatten(lines):
self.write_line(line)
def pytest_report_header(self, config):
inifile = ""
if config.inifile:
inifile = config.rootdir.bestrelpath(config.inifile)
lines = ["rootdir: %s, inifile: %s" %(config.rootdir, inifile)]
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
lines.append(
"plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
return lines
def pytest_collection_finish(self, session):
if self.config.option.collectonly:
self._printcollecteditems(session.items)
if self.stats.get('failed'):
self._tw.sep("!", "collection failures")
for rep in self.stats.get('failed'):
rep.toterminal(self._tw)
return 1
return 0
if not self.showheader:
return
#for i, testarg in enumerate(self.config.args):
# self.write_line("test path %d: %s" %(i+1, testarg))
def _printcollecteditems(self, items):
# to print out items and their parent collectors
# we take care to leave out Instances aka ()
# because later versions are going to get rid of them anyway
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = {}
for item in items:
name = item.nodeid.split('::', 1)[0]
counts[name] = counts.get(name, 0) + 1
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
nodeid = item.nodeid
nodeid = nodeid.replace("::()::", "::")
self._tw.line(nodeid)
return
stack = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[:len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack):]:
stack.append(col)
#if col.name == "()":
# continue
indent = (len(stack) - 1) * " "
self._tw.line("%s%s" % (indent, col))
@pytest.hookimpl(hookwrapper=True)
def pytest_sessionfinish(self, exitstatus):
outcome = yield
outcome.get_result()
self._tw.line("")
summary_exit_codes = (
EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR,
EXIT_NOTESTSCOLLECTED)
if exitstatus in summary_exit_codes:
self.config.hook.pytest_terminal_summary(terminalreporter=self)
self.summary_errors()
self.summary_failures()
self.summary_warnings()
self.summary_passes()
if exitstatus == EXIT_INTERRUPTED:
self._report_keyboardinterrupt()
del self._keyboardinterrupt_memo
self.summary_deselected()
self.summary_stats()
def pytest_keyboard_interrupt(self, excinfo):
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self):
if hasattr(self, '_keyboardinterrupt_memo'):
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self):
excrepr = self._keyboardinterrupt_memo
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
self._tw.line("to show a full traceback on KeyboardInterrupt use --fulltrace", yellow=True)
excrepr.reprcrash.toterminal(self._tw)
def _locationline(self, nodeid, fspath, lineno, domain):
def mkrel(nodeid):
line = self.config.cwd_relative_nodeid(nodeid)
if domain and line.endswith(domain):
line = line[:-len(domain)]
l = domain.split("[")
l[0] = l[0].replace('.', '::') # don't replace '.' in params
line += "[".join(l)
return line
# collect_fspath comes from testid which has a "/"-normalized path
if fspath:
res = mkrel(nodeid).replace("::()", "") # parens-normalization
if nodeid.split("::")[0] != fspath.replace("\\", "/"):
res += " <- " + self.startdir.bestrelpath(fspath)
else:
res = "[location]"
return res + " "
def _getfailureheadline(self, rep):
if hasattr(rep, 'location'):
fspath, lineno, domain = rep.location
return domain
else:
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# summaries for sessionfinish
#
def getreports(self, name):
l = []
for x in self.stats.get(name, []):
if not hasattr(x, '_pdbshown'):
l.append(x)
return l
def summary_warnings(self):
if self.hasopt("w"):
warnings = self.stats.get("warnings")
if not warnings:
return
self.write_sep("=", "pytest-warning summary")
for w in warnings:
self._tw.line("W%s %s %s" % (w.code,
w.fslocation, w.message))
def summary_passes(self):
if self.config.option.tbstyle != "no":
if self.hasopt("P"):
reports = self.getreports('passed')
if not reports:
return
self.write_sep("=", "PASSES")
for rep in reports:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg)
self._outrep_summary(rep)
def summary_failures(self):
if self.config.option.tbstyle != "no":
reports = self.getreports('failed')
if not reports:
return
self.write_sep("=", "FAILURES")
for rep in reports:
if self.config.option.tbstyle == "line":
line = self._getcrashline(rep)
self.write_line(line)
else:
msg = self._getfailureheadline(rep)
markup = {'red': True, 'bold': True}
self.write_sep("_", msg, **markup)
self._outrep_summary(rep)
def summary_errors(self):
if self.config.option.tbstyle != "no":
reports = self.getreports('error')
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats['error']:
msg = self._getfailureheadline(rep)
if not hasattr(rep, 'when'):
# collect
msg = "ERROR collecting " + msg
elif rep.when == "setup":
msg = "ERROR at setup of " + msg
elif rep.when == "teardown":
msg = "ERROR at teardown of " + msg
self.write_sep("_", msg)
self._outrep_summary(rep)
def _outrep_summary(self, rep):
rep.toterminal(self._tw)
for secname, content in rep.sections:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self):
session_duration = time.time() - self._sessionstarttime
(line, color) = build_summary_stats_line(self.stats)
msg = "%s in %.2f seconds" % (line, session_duration)
markup = {color: True, 'bold': True}
if self.verbosity >= 0:
self.write_sep("=", msg, **markup)
if self.verbosity == -1:
self.write_line(msg, **markup)
def summary_deselected(self):
if 'deselected' in self.stats:
l = []
k = self.config.option.keyword
if k:
l.append("-k%s" % k)
m = self.config.option.markexpr
if m:
l.append("-m %r" % m)
if l:
self.write_sep("=", "%d tests deselected by %r" % (
len(self.stats['deselected']), " ".join(l)), bold=True)
def repr_pythonversion(v=None):
if v is None:
v = sys.version_info
try:
return "%s.%s.%s-%s-%s" % v
except (TypeError, ValueError):
return str(v)
def flatten(l):
for x in l:
if isinstance(x, (list, tuple)):
for y in flatten(x):
yield y
else:
yield x
def build_summary_stats_line(stats):
keys = ("failed passed skipped deselected "
"xfailed xpassed warnings error").split()
key_translation = {'warnings': 'pytest-warnings'}
unknown_key_seen = False
for key in stats.keys():
if key not in keys:
if key: # setup/teardown reports have an empty key, ignore them
keys.append(key)
unknown_key_seen = True
parts = []
for key in keys:
val = stats.get(key, None)
if val:
key_name = key_translation.get(key, key)
parts.append("%d %s" % (len(val), key_name))
if parts:
line = ", ".join(parts)
else:
line = "no tests ran"
if 'failed' in stats or 'error' in stats:
color = 'red'
elif 'warnings' in stats or unknown_key_seen:
color = 'yellow'
elif 'passed' in stats:
color = 'green'
else:
color = 'yellow'
return (line, color)
def _plugin_nameversions(plugininfo):
l = []
for plugin, dist in plugininfo:
# gets us name and version!
name = '{dist.project_name}-{dist.version}'.format(dist=dist)
# questionable convenience, but it keeps things short
if name.startswith("pytest-"):
name = name[7:]
# we decided to print python package names
# they can have more than one plugin
if name not in l:
l.append(name)
return l
| mpl-2.0 |
cjparsons74/Kupfer-cjparsons74 | kupfer/plugin/kupfer_plugins.py | 2 | 2808 | __kupfer_name__ = _("Kupfer Plugins")
__kupfer_sources__ = ("KupferPlugins", )
__description__ = _("Access Kupfer's plugin list in Kupfer")
__version__ = ""
__author__ = "Ulrik Sverdrup <ulrik.sverdrup@gmail.com>"
import os
from kupfer.objects import Action, Source, Leaf, FileLeaf, TextLeaf
from kupfer import icons
from kupfer import kupferui
# Since this is a core plugin we break some rules
# This module is normally out of bounds for plugins
from kupfer.core import plugins, settings
class ShowInfo (Action):
def __init__(self):
Action.__init__(self, _("Show Information"))
def activate(self, leaf):
plugin_id = leaf.object["name"]
kupferui.show_plugin_info(plugin_id)
def get_description(self):
pass
def get_icon_name(self):
return "dialog-information"
class ShowSource (Action):
def __init__(self):
Action.__init__(self, _("Show Source Code"))
def has_result(self):
return True
def activate(self, leaf):
# Try to find the __file__ attribute for the plugin
# It will fail for files inside zip packages, but that is
# uncommon for now.
# Additionally, it will fail for fake plugins
plugin_id = leaf.object["name"]
filename = plugins.get_plugin_attribute(plugin_id, "__file__")
if not filename:
return leaf
root, ext = os.path.splitext(filename)
if ext.lower() == ".pyc" and os.path.exists(root + ".py"):
return FileLeaf(root + ".py")
if not os.path.exists(filename):
# handle modules in zip or eggs
import pkgutil
pfull = "kupfer.plugin." + plugin_id
loader = pkgutil.get_loader(pfull)
if loader:
return TextLeaf(loader.get_source(pfull))
return FileLeaf(filename)
def get_description(self):
pass
def get_icon_name(self):
return "dialog-information"
class Plugin (Leaf):
# NOTE: Just to be sure that a plugin ranks lower than a
# like-named other object by default.
rank_adjust = -1
def __init__(self, obj, name):
Leaf.__init__(self, obj, name)
def get_actions(self):
yield ShowInfo()
yield ShowSource()
def get_description(self):
setctl = settings.GetSettingsController()
enabled = setctl.get_plugin_enabled(self.object["name"])
return u"%s (%s)" % (self.object["description"],
_("enabled") if enabled else _("disabled"))
def get_icon_name(self):
return "package-x-generic"
class KupferPlugins (Source):
def __init__(self):
Source.__init__(self, _("Kupfer Plugins"))
def get_items(self):
setctl = settings.GetSettingsController()
for info in plugins.get_plugin_info():
plugin_id = info["name"]
if setctl.get_plugin_is_hidden(plugin_id):
continue
yield Plugin(info, info["localized_name"])
def should_sort_lexically(self):
return True
def provides(self):
yield Plugin
def get_gicon(self):
return icons.ComposedIcon("package-x-generic", "package-x-generic")
| gpl-3.0 |
huiyi1990/RosbagPandas | rosbag_pandas.py | 1 | 8453 | #!/usr/bin/env python
import warnings
import re
import subprocess
import types
import yaml
import pandas as pd
import numpy as np
import rosbag
import rospy
from roslib.message import get_message_class
def bag_to_dataframe(bag_name, include=None, exclude=None, parse_header=False, seconds=False):
'''
Read in a rosbag file and create a pandas data frame that
is indexed by the time the message was recorded in the bag.
:bag_name: String name for the bag file
:include: None, String, or List Topics to include in the dataframe
if None all topics added, if string it is used as regular
expression, if list that list is used.
:exclude: None, String, or List Topics to be removed from those added
using the include option using set difference. If None no topics
removed. If String it is treated as a regular expression. A list
removes those in the list.
:seconds: time index is in seconds
:returns: a pandas dataframe object
'''
# get list of topics to parse
yaml_info = get_bag_info(bag_name)
bag_topics = get_topics(yaml_info)
bag_topics = prune_topics(bag_topics, include, exclude)
length = get_length(bag_topics, yaml_info)
msgs_to_read, msg_type = get_msg_info(yaml_info, bag_topics, parse_header)
bag = rosbag.Bag(bag_name)
dmap = create_data_map(msgs_to_read)
# create datastore
datastore = {}
for topic in dmap.keys():
for f, key in dmap[topic].items():
t = msg_type[topic][f]
if isinstance(t, int) or isinstance(t, float):
arr = np.empty(length)
arr.fill(np.NAN)
else:
arr = np.array([None] * length)
datastore[key] = arr
# create the index
index = np.empty(length)
index.fill(np.NAN)
# all of the data is loaded
idx = 0
for topic, msg, mt in bag.read_messages(topics=bag_topics):
try:
if seconds:
index[idx] = msg.header.stamp.to_sec()
else:
index[idx] = msg.header.stamp.to_nsec()
except:
if seconds:
index[idx] = mt.to_sec()
else:
index[idx] = mt.to_nsec()
fields = dmap[topic]
for f, key in fields.items():
try:
d = get_message_data(msg, f)
datastore[key][idx] = d
except:
pass
idx = idx + 1
bag.close()
# convert the index
if not seconds:
index = pd.to_datetime(index, unit='ns')
# now we have read all of the messages its time to assemble the dataframe
return pd.DataFrame(data=datastore, index=index)
def get_length(topics, yaml_info):
'''
Find the length (# of rows) in the created dataframe
'''
total = 0
info = yaml_info['topics']
for topic in topics:
for t in info:
if t['topic'] == topic:
total = total + t['messages']
break
return total
def create_data_map(msgs_to_read):
'''
Create a data map for usage when parsing the bag
'''
dmap = {}
for topic in msgs_to_read.keys():
base_name = get_key_name(topic) + '__'
fields = {}
for f in msgs_to_read[topic]:
key = (base_name + f).replace('.', '_')
fields[f] = key
dmap[topic] = fields
return dmap
def prune_topics(bag_topics, include, exclude):
'''prune the topics. If include is None add all to the set of topics to
use if include is a string regex match that string,
if it is a list use the list
If exclude is None do nothing, if string remove the topics with regex,
if it is a list remove those topics'''
topics_to_use = set()
# add all of the topics
if include is None:
for t in bag_topics:
topics_to_use.add(t)
elif isinstance(include, basestring):
check = re.compile(include)
for t in bag_topics:
if re.match(check, t) is not None:
topics_to_use.add(t)
else:
try:
# add all of the includes if it is in the topic
for topic in include:
if topic in bag_topics:
topics_to_use.add(topic)
except:
warnings.warn('Error in topic selection Using All!')
topics_to_use = set()
for t in bag_topics:
topics_to_use.add(t)
to_remove = set()
# now exclude the exclusions
if exclude is None:
pass
elif isinstance(exclude, basestring):
check = re.compile(exclude)
for t in list(topics_to_use):
if re.match(check, t) is not None:
to_remove.add(t)
else:
for remove in exclude:
if remove in exclude:
to_remove.add(remove)
# final set stuff to get topics to use
topics_to_use = topics_to_use - to_remove
# return a list for the results
return list(topics_to_use)
def get_msg_info(yaml_info, topics, parse_header=True):
'''
Get info from all of the messages about what they contain
and will be added to the dataframe
'''
topic_info = yaml_info['topics']
msgs = {}
classes = {}
for topic in topics:
base_key = get_key_name(topic)
msg_paths = []
msg_types = {}
for info in topic_info:
if info['topic'] == topic:
msg_class = get_message_class(info['type'])
if msg_class is None:
warnings.warn(
'Could not find types for ' + topic + ' skpping ')
else:
(msg_paths, msg_types) = get_base_fields(msg_class(), "",
parse_header)
msgs[topic] = msg_paths
classes[topic] = msg_types
return (msgs, classes)
def get_bag_info(bag_file):
'''Get uamle dict of the bag information
by calling the subprocess -- used to create correct sized
arrays'''
# Get the info on the bag
bag_info = yaml.load(subprocess.Popen(
['rosbag', 'info', '--yaml', bag_file],
stdout=subprocess.PIPE).communicate()[0])
return bag_info
def get_topics(yaml_info):
''' Returns the names of all of the topics in the bag, and prints them
to stdout if requested
'''
# Pull out the topic info
names = []
# Store all of the topics in a dictionary
topics = yaml_info['topics']
for topic in topics:
names.append(topic['topic'])
return names
def get_base_fields(msg, prefix='', parse_header=True):
'''function to get the full names of every message field in the message'''
slots = msg.__slots__
ret_val = []
msg_types = dict()
for i in slots:
slot_msg = getattr(msg, i)
if not parse_header and i == 'header':
continue
if hasattr(slot_msg, '__slots__'):
(subs, type_map) = get_base_fields(
slot_msg, prefix=prefix + i + '.',
parse_header=parse_header,
)
for i in subs:
ret_val.append(i)
for k, v in type_map.items():
msg_types[k] = v
else:
ret_val.append(prefix + i)
msg_types[prefix + i] = slot_msg
return (ret_val, msg_types)
def get_message_data(msg, key):
'''get the datapoint from the dot delimited message field key
e.g. translation.x looks up translation than x and returns the value found
in x'''
data = msg
paths = key.split('.')
for i in paths:
data = getattr(data, i)
return data
def get_key_name(name):
'''fix up topic to key names to make them a little prettier'''
if name[0] == '/':
name = name[1:]
name = name.replace('/', '.')
return name
def clean_for_export(df):
new_df = pd.DataFrame()
for c, t in df.dtypes.iteritems():
if t.kind in 'OSUV':
s = df[c].dropna().apply(func=str)
s = s.str.replace('\n', '')
s = s.str.replace('\r', '')
s = s.str.replace(',','\t')
new_df[c] = s
else:
new_df[c] = df[c]
return new_df
if __name__ == '__main__':
print 'hello'
| apache-2.0 |
Shirling-VT/davitpy_sam | davitpy/pydarn/sdio/fitexfilter.py | 1 | 8808 | # Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
*************************************
**Module**: pydarn.sdio.fitexfilter
*************************************
Filter fitexfiles natively in python
.. warning::
This is very SLOW. We are currently working on this. We recommend using the c version which is folded into :func:`pydarn.sdio.radDataRead.radDataOpen`
**Functions**:
* :func:`pydarn.sdio.fitexfilter.combBeams`
* :func:`pydarn.sdio.fitexfilter.fitFilter`
* :func:`pydarn.sdio.fitexfilter.doFilter`
**Classes**:
* :class:`pydarn.sdio.fitexfilter.Gate`
"""
import numpy as np
import datetime as dt
from davitpy import utils
class Gate(object):
"""A class to represent a single range gate
**Attrs**:
* **v** (float): velocity
* **w_l** (float): spectral width
* **p_l** (float): power
* **elv** (float): elevation angle
* **phi0** (float): phase difference between front and back array
written by AJ, 20130402
"""
def __init__(self,fit,i):
self.v = fit.v[i]
self.w_l = fit.w_l[i]
self.p_l = fit.p_l[i]
self.pwr0 = fit.pwr0[i]
if fit.elv != None: self.elv = fit.elv[i]
else: self.elv = None
if fit.phi0 != None: self.phi0 = fit.phi0[i]
else: self.phi0 = None
def combBeams(scan):
"""This function combines all repeated beams within a scan into an averaged beam
**Args**:
* **scan** (:class:`sdio.scanData`): the scan to be combined
**Returns**:
* **outScan** (:class:`sdio.scanData`): the combined scan
**Example**:
::
combBeams(myScan)
written by AJ, 20130402
"""
from davitpy import pydarn
outScan = []
#sort the scan by beam number
sorted(scan, key=lambda beam: beam.bmnum)
#see if any beam number repeat
bcnt = np.zeros(50)
for b in scan:
bcnt[b.bmnum] += 1.
#save any single beams:
for b in scan:
if bcnt[b.bmnum] == 1:
outScan.append(b)
#average any repeat beams
for i in range(len(bcnt)):
beams = []
#check for more than one
if bcnt[i] > 1:
myBeam = pydarn.sdio.beamData()
for b in scan:
#append it to beams list
if b.bmnum == i: beams.append(b)
nrang = max(beams,key= lambda x:x.prm.nrang)
#initialize a new beam object
myBeam.copyData(beams[0])
for key,val in myBeam.fit.__dict__.iteritems():
setattr(myBeam.fit,key,[])
myBeam.prm.nrang = nrang
for j in range(nrang):
cnt,pos = 0.,float(bcnt[i])
for b in beams:
if j in b.fit.slist: cnt += 1.
if cnt/pos > .5:
myBeam.fit.slist.append(j)
myBeam.fit.qflg = 1
for key in myBeam.fit.__dict__.iterkeys():
if key == 'qflg' or key == 'gflg' or key == 'slist':
continue
arr = []
for b in beams:
if j in b.fit.slist:
ind = b.fit.slist.index(j)
arr.append(getattr(b.fit, key)[ind])
setattr(myBeam.fit,key,np.median(arr))
outScan.append(myBeam)
sorted(outScan, key=lambda beam: beam.bmnum)
return outScan
def fitFilter(inFile,outFile,thresh=0.4,vb=False):
"""This function applies a boxcar filter to a fitacf file
.. warning::
This is **VERY** slow. We are currently working on improving this.
**Args**:
* **infile** (str): the name of the input fitacf-format file
* **outfile** (str): the name of the output file
* **[thresh]** (float): the filter threshold for turning on a R-B cell. default = 0.4
* **[vb]** (boolean): a flag indicating verbose output. default = False
**Returns**:
* Nothing.
**Example**:
::
pydarn.sdio.fitexfilter.fitFilter('input.fitacf','output.fitacf',thresh=0.5,vb=True)
written by AJ, 20130402
"""
from davitpy import pydarn
inp = pydarn.sdio.radDataOpen(dt.datetime(2010,5,1),'bks',fileName=inFile)
outp = open(outFile,'w')
scans = [None, None, None]
sc = pydarn.sdio.radDataReadScan(inp)
scans[1] = sc
sc = pydarn.sdio.radDataReadScan(inp)
scans[2] = sc
while sc != None:
tsc = doFilter(scans,thresh=thresh)
for b in tsc:
print b
pydarn.dmapio.writeFitRec(b,utils.datetimeToEpoch(b.time),outp)
sc = pydarn.sdio.radDataReadScan(inp)
scans[0] = scans[1]
scans[1] = scans[2]
scans[2] = sc
tsc = doFilter(scans,thresh=thresh)
print tsc.time
for b in tsc:
pydarn.dmapio.writeFitRec(b,utils.datetimeToEpoch(b.time),outp)
outp.close()
def doFilter(scans,thresh=.4):
"""This function applies a boxcar filter to consecutive scans
.. warning::
This is **VERY** slow. We are currently working on improving this.
**Args**:
* **scans** (list): a list of 3 consecutive scans in time.
* **[thresh]** (float): the filter threshold for turning on a R-B cell. default = 0.4
**Returns**:
* outScan** (:class:`pydarn.sdio.radDataTypes.scanData`): the filtered scan
**Example**:
::
filtScan = pydarn.sdio.fitexfilter.doFilter(scanList,thresh=0.5)
written by AJ, 20130402
"""
from davitpy import pydarn
myScans = []
for s in scans:
if s == None:
myScans.append(None)
else:
myScans.append(combBeams(s))
outScan = pydarn.sdio.scanData()
#define the weigths array
w = [[[0. for i in range(3)] for j in range(3)] for k in range(3)]
for i in range(0,3):
for j in range(0,3):
for k in range(0,3):
if k == 0: tplus = 1
else: tplus = 0
if i == 0: rplus = 1
else: rplus = 0
if j == 0: bmplus = 1
else: bmplus = 0
if i == 0 and j == 0 and k == 0: centplus = 1
else: centplus = 0
w[(i+1)%3][(j+1)%3][(k+1)%3] = 1+tplus+rplus+bmplus+centplus
for b in myScans[1]:
bmnum = b.bmnum
#make a new beam
myBeam = pydarn.sdio.beamData()
myBeam.copyData(b)
for key,val in myBeam.fit.__dict__.iteritems():
setattr(myBeam.fit,key,[])
for r in range(0,b.prm.nrang):
#boxcar to hold the gates
box = [[[None for j in range(3)] for k in range(3)] for n in range(3)]
#iterate through time
for j in range(0,3):
#iterate through beam
for k in range(-1,2):
#iterate through gate
for n in range(-1,2):
#get the scan we are working on
s = myScans[j]
if s == None: continue
#get the beam we are working on
tbm = None
for bm in s:
if bm.bmnum == bmnum + k: tbm = bm
if tbm == None: continue
#check if target gate number is in the beam
if r+n in tbm.fit.slist:
ind = tbm.fit.slist.index(r+n)
box[j][k+1][n+1] = Gate(tbm.fit,ind)
else: box[j][k+1][n+1] = 0
pts,tot=0.,0.
v,w_l,p_l,elv,phi0,pwr0 = [],[],[],[],[],[]
#iterate through time
for j in range(0,3):
#iterate through beam
for k in range(0,3):
#iterate through gate
for n in range(0,3):
bx = box[j][k][n]
if bx == None: continue
wt = w[j][k][n]
tot += wt
if bx != 0:
pts += wt
for m in range(0,wt):
v.append(bx.v)
pwr0.append(bx.pwr0)
w_l.append(bx.w_l)
p_l.append(bx.p_l)
if bx.elv: elv.append(bx.elv)
if bx.phi0: phi0.append(bx.phi0)
#check if we meet the threshold
if pts/tot >= thresh:
myBeam.fit.slist.append(r)
myBeam.fit.qflg.append(1)
myBeam.fit.v.append(np.median(v))
myBeam.fit.w_l.append(np.median(w_l))
myBeam.fit.p_l.append(np.median(p_l))
myBeam.fit.pwr0.append(np.median(pwr0))
if elv != []: myBeam.fit.elv.append(np.median(elv))
if phi0 != []: myBeam.fit.phi0.append(np.median(phi0))
if np.median(w_l) > -3.*np.median(v)+90.:
myBeam.fit.gflg.append(0)
else: myBeam.fit.gflg.append(1)
outScan.append(myBeam)
return outScan
| gpl-3.0 |
SqueezeStudioAnimation/ExocortexCrate | Maya/MEL/ExocortexAlembic/_attach.py | 2 | 6423 | import traceback
import maya.cmds as cmds
import _functions as fnt
""" Attach to existing """
def attachTimeAndFile(node, jobInfo, isConstant=False):
connAttr = cmds.connectionInfo(node+".inTime", sfd=True)
if connAttr == "" and not isConstant:
cmds.connectAttr(jobInfo.timeCtrl+".outTime", node+".inTime")
node = node + ".fileName" # compute once, used 2-3 times
connAttr = cmds.connectionInfo(node, sfd=True)
if connAttr != None and connAttr != "":
cmds.disconnectAttr(connAttr, node)
cmds.connectAttr(jobInfo.filenode+".outFileName", node)
pass
def attachXform(name, identifier, jobInfo, isConstant=False):
cmds.ExocortexAlembic_profileBegin(f="Python.ExocortexAlembic._attach.attachXform")
try:
conX = cmds.listConnections(name+".translate")
if conX:
# already receiving transformation from another node!
conX = conX[0]
if cmds.objectType(conX) == "ExocortexAlembicXform":
attachTimeAndFile(conX, jobInfo, isConstant)
return [conX]
else:
return ["!", "Cannot attach Xform to " + name + ", it's attach to a node that is not an \"ExocortexAlembicXform\""]
newXform = cmds.createNode("ExocortexAlembicXform")
cmds.setAttr(newXform+".identifier", identifier, type="string")
cmds.connectAttr(newXform+".translate", name+".translate")
cmds.connectAttr(newXform+".rotate", name+".rotate")
cmds.connectAttr(newXform+".scale", name+".scale")
cmds.connectAttr(newXform+".outVisibility", name+".visibility")
attachTimeAndFile(newXform, jobInfo, isConstant)
except:
return ["!", traceback.format_exc()]
finally:
cmds.ExocortexAlembic_profileEnd(f="Python.ExocortexAlembic._attach.attachXform")
return [newXform]
def attachPolyMesh(name, identifier, jobInfo, isConstant=False):
cmds.ExocortexAlembic_profileBegin(f="Python.ExocortexAlembic._attach.attachPolyMesh")
try:
if cmds.objectType(name) != "mesh":
return ["!", "Only mesh can be attached too!"]
conX = cmds.listConnections(name, d=False, type="ExocortexAlembicPolyMeshDeform")
if conX: # it's already attached to a deform, simply change the file reference
polyObj = conX[0]
attachTimeAndFile(polyObj, jobInfo, isConstant)
return [polyObj]
# create deformer, and attach time and file
newDform = cmds.deformer(name, type="ExocortexAlembicPolyMeshDeform")[0]
cmds.setAttr(newDform+".identifier", identifier, type="string")
attachTimeAndFile(newDform, jobInfo, isConstant)
if jobInfo.useFaceSets:
cmds.ExocortexAlembic_createFaceSets(f=cmds.getAttr(jobInfo.filenode+".outFileName"), i=identifier, o=name)
except:
return ["!", traceback.format_exc()]
finally:
cmds.ExocortexAlembic_profileEnd(f="Python.ExocortexAlembic._attach.attachPolyMesh")
return [newDform]
def attachCamera(name, identifier, jobInfo, isConstant=False):
cmds.ExocortexAlembic_profileBegin(f="Python.ExocortexAlembic._attach.attachCamera")
try:
conX = cmds.listConnections(name, d=False, type="ExocortexAlembicCamera")
if conX:
camObj = conX[0]
attachTimeAndFile(camObj, jobInfo, isConstant)
return [camObj]
reader = cmds.createNode("ExocortexAlembicCamera")
cmds.connectAttr(reader+".focalLength", name+".focalLength")
cmds.connectAttr(reader+".focusDistance", name+".focusDistance")
cmds.connectAttr(reader+".lensSqueezeRatio", name+".lensSqueezeRatio")
cmds.connectAttr(reader+".horizontalFilmAperture", name+".horizontalFilmAperture")
cmds.connectAttr(reader+".verticalFilmAperture", name+".verticalFilmAperture")
cmds.connectAttr(reader+".horizontalFilmOffset", name+".horizontalFilmOffset")
cmds.connectAttr(reader+".verticalFilmOffset", name+".verticalFilmOffset")
cmds.connectAttr(reader+".fStop", name+".fStop")
cmds.connectAttr(reader+".shutterAngle", name+".shutterAngle")
attachTimeAndFile(reader, jobInfo, isConstant)
except:
return ["!", traceback.format_exc()]
finally:
cmds.ExocortexAlembic_profileEnd(f="Python.ExocortexAlembic._attach.attachCamera")
return [reader]
def attachCurves(name, identifier, jobInfo, isConstant=False):
cmds.ExocortexAlembic_profileBegin(f="Python.ExocortexAlembic._attach.attachCurves")
try:
conX = (cmds.listConnections(name+".create", d=False, type="ExocortexAlembicCurvesDeform") or
cmds.listConnections(name+".create", d=False, type="ExocortexAlembicCurves"))
if conX:
curObj = conX[0]
attachTimeAndFile(curObj, jobInfo, isConstant)
return [curObj]
# create deformer, and attach time and file
newDform = cmds.deformer(name, type="ExocortexAlembicCurvesDeform")[0]
cmds.setAttr(newDform+".identifier", identifier, type="string")
attachTimeAndFile(newDform, jobInfo, isConstant)
# get curObj new "output" attribute connection
conX = cmds.listConnections(name+".create", d=False, type="ExocortexAlembicCurvesDeform")
if conX:
curObj = conX[0]
originalCur = cmds.connectionInfo(curObj+".output", sfd=True).split('.')[0]
cmds.delete(curObj)
curObj = cmds.createNode("ExocortexAlembicCurves")
attachTimeAndFile(curObj, jobInfo, isConstant)
cmds.connectAttr(curObj+".outCurve", originalCur+".create")
cmds.connectAttr(jobInfo.filenode+".outFileName", curObj+".fileName")
cmds.setAttr(curObj+".identifier", identifier, type="string")
except:
return ["!", traceback.format_exc()]
finally:
cmds.ExocortexAlembic_profileEnd(f="Python.ExocortexAlembic._attach.attachCurves")
return [curObj]
def attachPoints(name, identifier, jobInfo, isConstant=False):
cmds.ExocortexAlembic_profileBegin(f="Python.ExocortexAlembic._attach.attachPoints")
try:
conX = cmds.listConnections(name, d=False, type="ExocortexAlembicPoints")
if conX:
ptsObj = conX[0]
attachTimeAndFile(ptsObj, jobInfo, isConstant)
return [ptsObj]
reader = cmds.createNode("ExocortexAlembicPoints")
cmds.addAttr(name, ln="rgbPP", dt="vectorArray")
cmds.addAttr(name, ln="opacityPP", dt="doubleArray")
cmds.addAttr(name, ln="agePP", dt="doubleArray")
cmds.addAttr(name, ln="shapeInstanceIdPP", dt="doubleArray")
cmds.addAttr(name, ln="orientationPP", dt="vectorArray")
cmds.connectAttr(reader+".output[0]", name+".newParticles[0]")
cmds.connectAttr(jobInfo.timeCtrl+".outTime", name+".currentTime")
cmds.setAttr(name+".conserve", 0)
attachTimeAndFile(reader, jobInfo, isConstant)
except:
return ["!", traceback.format_exc()]
finally:
cmds.ExocortexAlembic_profileEnd(f="Python.ExocortexAlembic._attach.attachPoints")
return [reader]
| bsd-3-clause |
eayunstack/fuel-web | nailgun/nailgun/test/unit/test_restriction.py | 2 | 16624 | # -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import yaml
from nailgun.errors import errors
from nailgun import objects
from nailgun.settings import settings
from nailgun.test import base
from nailgun.utils.restrictions import AttributesRestriction
from nailgun.utils.restrictions import LimitsMixin
from nailgun.utils.restrictions import RestrictionBase
from nailgun.utils.restrictions import VmwareAttributesRestriction
DATA = """
attributes:
group:
attribute_1:
name: attribute_1
value: true
restrictions:
- condition: 'settings:group.attribute_2.value == true'
message: 'Only one of attributes 1 and 2 allowed'
- condition: 'settings:group.attribute_3.value == "spam"'
message: 'Only one of attributes 1 and 3 allowed'
attribute_2:
name: attribute_2
value: true
attribute_3:
name: attribute_3
value: spam
restrictions:
- condition: 'settings:group.attribute_3.value ==
settings:group.attribute_4.value'
message: 'Only one of attributes 3 and 4 allowed'
action: enable
attribute_4:
name: attribute_4
value: spam
attribute_5:
name: attribute_5
value: 4
roles_meta:
cinder:
limits:
min: 1
overrides:
- condition: 'settings:group.attribute_2.value == true'
message: 'At most one role_1 node can be added'
max: 1
controller:
limits:
recommended: 'settings:group.attribute_5.value'
mongo:
limits:
max: 12
message: 'At most 12 MongoDB node should be added'
overrides:
- condition: 'settings:group.attribute_3.value == "spam"'
min: 4
message: 'At least 4 MongoDB node can be added if spam'
- condition: 'settings:group.attribute_3.value == "egg"'
recommended: 3
message: "At least 3 MongoDB nodes are recommended"
"""
class TestRestriction(base.BaseTestCase):
def setUp(self):
super(TestRestriction, self).setUp()
self.data = yaml.load(DATA)
def test_check_restrictions(self):
attributes = self.data.get('attributes')
for gkey, gvalue in six.iteritems(attributes):
for key, value in six.iteritems(gvalue):
result = RestrictionBase.check_restrictions(
models={'settings': attributes},
restrictions=value.get('restrictions', []))
# check when couple restrictions true for some item
if key == 'attribute_1':
self.assertTrue(result.get('result'))
self.assertEqual(
result.get('message'),
'Only one of attributes 1 and 2 allowed. ' +
'Only one of attributes 1 and 3 allowed')
# check when different values uses in restriction
if key == 'attribute_3':
self.assertTrue(result.get('result'))
self.assertEqual(
result.get('message'),
'Only one of attributes 3 and 4 allowed')
def test_expand_restriction_format(self):
string_restriction = 'settings.some_attribute.value != true'
dict_restriction_long_format = {
'condition': 'settings.some_attribute.value != true',
'message': 'Another attribute required'
}
dict_restriction_short_format = {
'settings.some_attribute.value != true':
'Another attribute required'
}
result = {
'action': 'disable',
'condition': 'settings.some_attribute.value != true',
}
invalid_format = ['some_condition']
# check string format
self.assertDictEqual(
RestrictionBase._expand_restriction(
string_restriction), result)
result['message'] = 'Another attribute required'
# check long format
self.assertDictEqual(
RestrictionBase._expand_restriction(
dict_restriction_long_format), result)
# check short format
self.assertDictEqual(
RestrictionBase._expand_restriction(
dict_restriction_short_format), result)
# check invalid format
self.assertRaises(
errors.InvalidData,
RestrictionBase._expand_restriction,
invalid_format)
class TestLimits(base.BaseTestCase):
def setUp(self):
super(TestLimits, self).setUp()
self.data = yaml.load(DATA)
self.env.create(
nodes_kwargs=[
{"status": "ready", "roles": ["cinder"]},
{"status": "ready", "roles": ["controller"]},
{"status": "ready", "roles": ["mongo"]},
{"status": "ready", "roles": ["mongo"]},
]
)
def test_check_node_limits(self):
roles = self.data.get('roles_meta')
attributes = self.data.get('attributes')
for role, data in six.iteritems(roles):
result = LimitsMixin().check_node_limits(
models={'settings': attributes},
nodes=self.env.nodes,
role=role,
limits=data.get('limits'))
if role == 'cinder':
self.assertTrue(result.get('valid'))
if role == 'controller':
self.assertFalse(result.get('valid'))
self.assertEqual(
result.get('messages'),
'Default message')
if role == 'mongo':
self.assertFalse(result.get('valid'))
self.assertEqual(
result.get('messages'),
'At least 4 MongoDB node can be added if spam')
def test_check_override(self):
roles = self.data.get('roles_meta')
attributes = self.data.get('attributes')
limits = LimitsMixin()
# Set nodes count to 4
limits.count = 4
limits.limit_reached = True
limits.models = {'settings': attributes}
limits.nodes = self.env.nodes
# Set "cinder" role to working on
limits.role = 'cinder'
limits.limit_types = ['max']
limits.checked_limit_types = {}
limits.limit_values = {'max': None}
override_data = roles['cinder']['limits']['overrides'][0]
result = limits._check_override(override_data)
self.assertEqual(
result[0]['message'], 'At most one role_1 node can be added')
def test_get_proper_message(self):
limits = LimitsMixin()
limits.messages = [
{'type': 'min', 'value': '1', 'message': 'Message for min_1'},
{'type': 'min', 'value': '2', 'message': 'Message for min_2'},
{'type': 'max', 'value': '5', 'message': 'Message for max_5'},
{'type': 'max', 'value': '8', 'message': 'Message for max_8'}
]
self.assertEqual(
limits._get_message('min'), 'Message for min_2')
self.assertEqual(
limits._get_message('max'), 'Message for max_5')
class TestAttributesRestriction(base.BaseTestCase):
def setUp(self):
super(TestAttributesRestriction, self).setUp()
self.cluster = self.env.create(
cluster_kwargs={
'api': False
}
)
attributes_metadata = """
editable:
access:
user:
value: ""
type: "text"
regex:
source: '\S'
error: "Invalid username"
email:
value: "admin@localhost"
type: "text"
regex:
source: '\S'
error: "Invalid email"
tenant:
value: ""
type: "text"
regex:
source: '\S'
error: "Invalid tenant name"
password:
value: "secret"
type: "password"
regex:
source: '\S'
error: "Empty password"
"""
self.attributes_data = yaml.load(attributes_metadata)
def test_check_with_invalid_values(self):
objects.Cluster.update_attributes(
self.cluster, self.attributes_data)
attributes = objects.Cluster.get_editable_attributes(self.cluster)
models = {
'settings': attributes,
'default': attributes,
}
errs = AttributesRestriction.check_data(models, attributes)
self.assertItemsEqual(
errs, ['Invalid username', 'Invalid tenant name'])
def test_check_with_valid_values(self):
access = self.attributes_data['editable']['access']
access['user']['value'] = 'admin'
access['tenant']['value'] = 'test'
objects.Cluster.update_attributes(
self.cluster, self.attributes_data)
attributes = objects.Cluster.get_editable_attributes(self.cluster)
models = {
'settings': attributes,
'default': attributes,
}
errs = AttributesRestriction.check_data(models, attributes)
self.assertListEqual(errs, [])
class TestVmwareAttributesRestriction(base.BaseTestCase):
def setUp(self):
super(TestVmwareAttributesRestriction, self).setUp()
self.cluster = self.env.create(
cluster_kwargs={
'api': False
}
)
self.vm_data = self.env.read_fixtures(['vmware_attributes'])[0]
def test_check_data_with_empty_values_without_restrictions(self):
attributes = objects.Cluster.get_editable_attributes(self.cluster)
attributes['common']['use_vcenter']['value'] = True
attributes['storage']['images_vcenter']['value'] = True
vmware_attributes = self.vm_data.copy()
empty_values = {
"availability_zones": [
{
"az_name": "",
"vcenter_host": "",
"vcenter_username": "",
"vcenter_password": "",
"nova_computes": [
{
"vsphere_cluster": "",
"service_name": "",
"datastore_regex": ""
}
]
}
],
"network": {
"esxi_vlan_interface": ""
},
"glance": {
"vcenter_host": "",
"vcenter_username": "",
"vcenter_password": "",
"datacenter": "",
"datastore": ""
}
}
# Update value with empty value
vmware_attributes['editable']['value'] = empty_values
models = {
'settings': attributes,
'default': vmware_attributes['editable'],
'cluster': self.cluster,
'version': settings.VERSION,
'networking_parameters': self.cluster.network_config
}
errs = VmwareAttributesRestriction.check_data(
models=models,
metadata=vmware_attributes['editable']['metadata'],
data=vmware_attributes['editable']['value'])
self.assertItemsEqual(
errs,
['Empty cluster', 'Empty host', 'Empty username',
'Empty password', 'Empty datacenter', 'Empty datastore'])
def test_check_data_with_invalid_values_without_restrictions(self):
# Disable restrictions
attributes = objects.Cluster.get_editable_attributes(self.cluster)
attributes['common']['use_vcenter']['value'] = True
attributes['storage']['images_vcenter']['value'] = True
# value data taken from fixture one cluster of
# nova computes left empty
vmware_attributes = self.vm_data.copy()
models = {
'settings': attributes,
'default': vmware_attributes['editable'],
'cluster': self.cluster,
'version': settings.VERSION,
'networking_parameters': self.cluster.network_config
}
errs = VmwareAttributesRestriction.check_data(
models=models,
metadata=vmware_attributes['editable']['metadata'],
data=vmware_attributes['editable']['value'])
self.assertItemsEqual(errs, ['Empty cluster'])
def test_check_data_with_invalid_values_and_with_restrictions(self):
attributes = objects.Cluster.get_editable_attributes(self.cluster)
# fixture have restrictions enabled for glance that's why
# only 'Empty cluster' should returned
vmware_attributes = self.vm_data.copy()
models = {
'settings': attributes,
'default': vmware_attributes['editable'],
'cluster': self.cluster,
'version': settings.VERSION,
'networking_parameters': self.cluster.network_config
}
errs = VmwareAttributesRestriction.check_data(
models=models,
metadata=vmware_attributes['editable']['metadata'],
data=vmware_attributes['editable']['value'])
self.assertItemsEqual(errs, ['Empty cluster'])
def test_check_data_with_valid_values_and_with_restrictions(self):
attributes = objects.Cluster.get_editable_attributes(self.cluster)
vmware_attributes = self.vm_data.copy()
# Set valid data for clusters
for i, azone in enumerate(
vmware_attributes['editable']['value']['availability_zones']):
for j, ncompute in enumerate(azone['nova_computes']):
ncompute['vsphere_cluster'] = 'cluster-{0}-{1}'.format(i, j)
models = {
'settings': attributes,
'default': vmware_attributes['editable'],
'cluster': self.cluster,
'version': settings.VERSION,
'networking_parameters': self.cluster.network_config
}
errs = VmwareAttributesRestriction.check_data(
models=models,
metadata=vmware_attributes['editable']['metadata'],
data=vmware_attributes['editable']['value'])
self.assertItemsEqual(errs, [])
def test_check_data_with_valid_values_and_without_restrictions(self):
# Disable restrictions
attributes = objects.Cluster.get_editable_attributes(self.cluster)
attributes['common']['use_vcenter']['value'] = True
attributes['storage']['images_vcenter']['value'] = True
vmware_attributes = self.vm_data.copy()
# Set valid data for clusters
for i, azone in enumerate(
vmware_attributes['editable']['value']['availability_zones']):
for j, ncompute in enumerate(azone['nova_computes']):
ncompute['vsphere_cluster'] = 'cluster-{0}-{1}'.format(i, j)
# Set valid data for glance
glance = vmware_attributes['editable']['value']['glance']
glance['datacenter'] = 'test_datacenter'
glance['datastore'] = 'test_datastore'
models = {
'settings': attributes,
'default': vmware_attributes['editable'],
'cluster': self.cluster,
'version': settings.VERSION,
'networking_parameters': self.cluster.network_config
}
errs = VmwareAttributesRestriction.check_data(
models=models,
metadata=vmware_attributes['editable']['metadata'],
data=vmware_attributes['editable']['value'])
self.assertItemsEqual(errs, [])
| apache-2.0 |
theintencity/vvowproject | server/restserver.py | 1 | 9342 | # This is the Python restserver module used in the websocket server by
# restserver_wsh.py and AJAX server by ajaxserver.py.
import logging, sqlite3, json, time, random, re
logger = logging.getLogger('restserver')
class Database():
def __init__(self, filename="restserver.db"):
self.conn = sqlite3.connect(filename, check_same_thread=False)
self.cursor = self.conn.cursor()
self._create()
def _create(self):
try:
self.commit('''CREATE TABLE resource (
rid varchar(1024) PRIMARY KEY NOT NULL DEFAULT '',
prid varchar(1024) NOT NULL DEFAULT '',
type varchar(64) NOT NULL DEFAULT 'application/json',
entity blob,
cid varchar(25)
)''')
self.commit('''create table subscribe (
rid varchar(1024) NOT NULL DEFAULT '',
cid varchar(25) NOT NULL DEFAULT '',
PRIMARY KEY (rid, cid)
)''')
logger.debug('Database created')
except sqlite3.OperationalError:
logger.debug('Database already created')
def reset(self):
# cleanup the subscribe table, since there are no subscription on startup
self.commit("DELETE FROM subscribe");
self.commit("DELETE FROM resource WHERE cid != ''");
def close(self):
if self.cursor:
self.cursor.close()
self.cursor = None
def commit(self, *args):
logger.debug('commit%r', args)
self.cursor.execute(*args)
self.conn.commit()
def iterate(self, *args):
logger.debug('iterate%r', args)
return self.cursor.execute(*args)
def fetchone(self, *args):
logger.debug('fetchone%r', args)
self.cursor.execute(*args)
result = self.cursor.fetchone()
logger.debug('fetchone%r=>\n %r', args, result)
return result
def fetchall(self, *args):
logger.debug('fetchall%r', args)
self.cursor.execute(*args)
result = self.cursor.fetchall()
logger.debug('fetchall%r=>\n %s', args, '\n '.join(['%r'%(x,) for x in result]))
return result
def uniqid():
return str(int(time.time()) * 1000 + random.randint(0, 999))
class Handler():
def __init__(self, db):
self.db = db
def POST(self, request):
parent, ctype, entity, persistent = request['resource'], request.get('type', 'application/json'), \
json.dumps(request.get('entity', {})), request.get('persistent', False)
rid = request['id'] if 'id' in request else uniqid()
resource = parent + '/' + rid
cid = '' if persistent else self.id
try:
self.db.commit('INSERT INTO resource (rid, prid, type, entity, cid) VALUES (?, ?, ?, ?, ?)',
(resource, parent, ctype, entity, cid))
except:
logger.exception('failed to insert resource')
return dict(code='failed', reason='failed to insert this resource')
self.NOTIFY(resource, 'POST')
return dict(code='success', id=rid)
def PUT(self, request):
resource, attr, ignore = self._parse(request['resource'])
ctype, entity, persistent = request.get('type', 'application/json'), \
json.dumps(request.get('entity', {})), request.get('persistent', False)
if attr:
result = None
try:
result = self.db.fetchone('SELECT type, entity FROM resource WHERE rid=?', (resource,))
except:
logger.exception('failed to get resource')
if not result:
return dict(code='failed', reason='failed to get the resource')
result = json.loads(result[1])
result[attr] = request.get('entity', None);
entity = json.dumps(result)
try:
self.db.commit('UPDATE resource SET entity=? WHERE rid=?', (entity, resource))
except:
logger.exception('failed to replace resource attribute')
return dict(code='failed', reason='failed to replace resource attribute')
else:
parent = self.get_parent(resource)
cid = '' if persistent else self.id
try:
self.db.commit('REPLACE INTO resource (rid, prid, type, entity, cid) VALUES (?, ?, ?, ?, ?)',
(resource, parent, ctype, entity, cid))
except:
logger.exception('failed to replace resource')
return dict(code='failed', reason='failed to replace this resource')
self.NOTIFY(resource, 'PUT')
return dict(code='success')
def GET(self, request):
resource, attr, params = self._parse(request['resource'])
if attr:
result = None
try:
result = self.db.fetchone('SELECT type, entity FROM resource WHERE rid=?', (resource,))
entity = json.loads(result[1])
if attr in entity:
return dict(code="success", resource=request['resource'], entity=json.dumps(entity[attr]))
else:
return dict(code="failed", reason="failed to get this resource attribute")
except:
logger.exception('failed to read resource')
return dict(code='failed', reason='failed to get this resource')
elif params:
try:
query, attrs = 'SELECT rid FROM resource WHERE prid=?', [resource]
if 'like' in params:
query += " AND rid LIKE ?"
attrs.append(params['like'])
if 'limit' in params:
query += " LIMIT " + params['limit']
if 'offset' in params:
query += " OFFSET " + params['offset']
if 'order' in params:
query += " " + params['order']
result = self.db.fetchall(query, attrs)
except:
logger.exception('failed to read parent resource')
return dict(code='failed', reason='failed to get child resources')
response = [(row[0][len(resource)+1:] if row[0].startswith(resource) else row[0]) for row in result]
else:
try:
result = self.db.fetchone('SELECT type, entity FROM resource WHERE rid=?', (resource,))
except:
logger.exception('failed to read resource')
return dict(code='failed', reason='failed to get this resource')
if result:
ctype, entity = result[0], json.loads(result[1])
entity = dict([(k, v) for k, v in entity.iteritems() if not k or k[0] != "_"])
return dict(code='success', resource=resource, type=ctype, entity=entity)
try:
result = self.db.fetchall('SELECT rid FROM resource WHERE prid=?', (resource,))
except:
logger.exception('failed to read parent resource')
return dict(code='failed', reason='failed to get child resources')
response = [(row[0][len(resource)+1:] if row[0].startswith(resource) else row[0]) for row in result]
if response:
return dict(code='success', resource=resource, type='application/json', entity=response)
return dict(code='failed', reason='no value found for this resource')
def DELETE(self, request):
resource = request['resource']
result = self.db.fetchone('SELECT count(rid) FROM resource WHERE prid=?', (resource,))
if result[0]:
return dict(code='failed', reason='this parent resource has children')
self.db.commit('DELETE FROM resource WHERE rid=?', (resource,))
self.NOTIFY(resource, 'DELETE')
return dict(code='success')
def SUBSCRIBE(self, request):
resource = request['resource']
try:
self.db.commit('REPLACE INTO subscribe (rid, cid) VALUES (?, ?)', (resource, self.id))
except:
logger.exception('failed to replace subscribe')
return dict(code='failed', reason='failed to subscribe the client to the resource')
return dict(code='success')
def UNSUBSCRIBE(self, request):
resource = request['resource']
try:
self.db.commit('DELETE FROM subscribe WHERE rid=? AND cid=?', (resource, self.id))
except:
logger.exception('failed to delete subscribe')
return dict(code='failed', reason='failed to unsubscribe the client from the resource')
return dict(code='success')
# to be overridden by the sub-class if it supports NOTIFY
def NOTIFY(self, request, method=None):
pass
def get_parent(self, resource):
index = resource.rfind('/')
return resource[:index] if index >= 0 else ''
def _parse(self, value):
match = re.match(r'([^\[\?]+)(\[([^\]\?]*)\])?(\?.*)?$', value)
if not match: return (value, None, None)
groups = match.groups()
return (groups[0], groups[2], dict([x.split('=', 1) for x in groups[3][1:].split('&')]) if groups[3] else None)
| lgpl-3.0 |
loop1024/pymo-global | android/pgs4a-0.9.6/python-install/lib/python2.7/dumbdbm.py | 251 | 8820 | """A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
import os as _os
import __builtin__
import UserDict
_open = __builtin__.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database(UserDict.DictMixin):
# The on-disk directory and data files can remain in mutually
# inconsistent states for an arbitrarily long time (see comments
# at the end of __setitem__). This is only repaired when _commit()
# gets called. One place _commit() gets called is from __del__(),
# and if that occurs at program shutdown time, module globals may
# already have gotten rebound to None. Since it's crucial that
# _commit() finish successfully, we can't ignore shutdown races
# here, and _commit() must not reference any globals.
_os = _os # for _commit()
_open = _open # for _commit()
def __init__(self, filebasename, mode):
self._mode = mode
# The directory file is a text file. Each line looks like
# "%r, (%d, %d)\n" % (key, pos, siz)
# where key is the string key, pos is the offset into the dat
# file of the associated value's first byte, and siz is the number
# of bytes in the associated value.
self._dirfile = filebasename + _os.extsep + 'dir'
# The data file is a binary file pointed into by the directory
# file, and holds the values associated with keys. Each value
# begins at a _BLOCKSIZE-aligned byte offset, and is a raw
# binary 8-bit string value.
self._datfile = filebasename + _os.extsep + 'dat'
self._bakfile = filebasename + _os.extsep + 'bak'
# The index is an in-memory dict, mirroring the directory file.
self._index = None # maps keys to (pos, siz) pairs
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
f = _open(self._datfile, 'w')
self._chmod(self._datfile)
f.close()
self._update()
# Read directory file into the in-memory index dict.
def _update(self):
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
for line in f:
line = line.rstrip()
key, pos_and_siz_pair = eval(line)
self._index[key] = pos_and_siz_pair
f.close()
# Write the index dict to the directory file. The original directory
# file (if any) is renamed with a .bak extension first. If a .bak
# file currently exists, it's deleted.
def _commit(self):
# CAUTION: It's vital that _commit() succeed, and _commit() can
# be called from __del__(). Therefore we must never reference a
# global in this routine.
if self._index is None:
return # nothing to do
try:
self._os.unlink(self._bakfile)
except self._os.error:
pass
try:
self._os.rename(self._dirfile, self._bakfile)
except self._os.error:
pass
f = self._open(self._dirfile, 'w')
self._chmod(self._dirfile)
for key, pos_and_siz_pair in self._index.iteritems():
f.write("%r, %r\n" % (key, pos_and_siz_pair))
f.close()
sync = _commit
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
f = _open(self._datfile, 'rb')
f.seek(pos)
dat = f.read(siz)
f.close()
return dat
# Append val to the data file, starting at a _BLOCKSIZE-aligned
# offset. The data file is first padded with NUL bytes (if needed)
# to get to an aligned offset. Return pair
# (starting offset of val, len(val))
def _addval(self, val):
f = _open(self._datfile, 'rb+')
f.seek(0, 2)
pos = int(f.tell())
npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
f.close()
return (pos, len(val))
# Write val to the data file, starting at offset pos. The caller
# is responsible for ensuring that there's enough room starting at
# pos to hold val, without overwriting some other value. Return
# pair (pos, len(val)).
def _setval(self, pos, val):
f = _open(self._datfile, 'rb+')
f.seek(pos)
f.write(val)
f.close()
return (pos, len(val))
# key is a new key whose associated value starts in the data file
# at offset pos and with length siz. Add an index record to
# the in-memory index dict, and append one to the directory file.
def _addkey(self, key, pos_and_siz_pair):
self._index[key] = pos_and_siz_pair
f = _open(self._dirfile, 'a')
self._chmod(self._dirfile)
f.write("%r, %r\n" % (key, pos_and_siz_pair))
f.close()
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
raise TypeError, "keys and values must be strings"
if key not in self._index:
self._addkey(key, self._addval(val))
else:
# See whether the new value is small enough to fit in the
# (padded) space currently occupied by the old value.
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
if newblocks <= oldblocks:
self._index[key] = self._setval(pos, val)
else:
# The new value doesn't fit in the (padded) space used
# by the old value. The blocks used by the old value are
# forever lost.
self._index[key] = self._addval(val)
# Note that _index may be out of synch with the directory
# file now: _setval() and _addval() don't update the directory
# file. This also means that the on-disk directory and data
# files are in a mutually inconsistent state, and they'll
# remain that way until _commit() is called. Note that this
# is a disaster (for the database) if the program crashes
# (so that _commit() never gets called).
def __delitem__(self, key):
# The blocks used by the associated value are lost.
del self._index[key]
# XXX It's unclear why we do a _commit() here (the code always
# XXX has, so I'm not changing it). _setitem__ doesn't try to
# XXX keep the directory file in synch. Why should we? Or
# XXX why shouldn't __setitem__?
self._commit()
def keys(self):
return self._index.keys()
def has_key(self, key):
return key in self._index
def __contains__(self, key):
return key in self._index
def iterkeys(self):
return self._index.iterkeys()
__iter__ = iterkeys
def __len__(self):
return len(self._index)
def close(self):
self._commit()
self._index = self._datfile = self._dirfile = self._bakfile = None
__del__ = close
def _chmod (self, file):
if hasattr(self._os, 'chmod'):
self._os.chmod(file, self._mode)
def open(file, flag=None, mode=0666):
"""Open the database file, filename, and return corresponding object.
The flag argument, used to control how the database is opened in the
other DBM implementations, is ignored in the dumbdbm module; the
database is always opened for update, and will be created if it does
not exist.
The optional mode argument is the UNIX mode of the file, used only when
the database has to be created. It defaults to octal code 0666 (and
will be modified by the prevailing umask).
"""
# flag argument is currently ignored
# Modify mode depending on the umask
try:
um = _os.umask(0)
_os.umask(um)
except AttributeError:
pass
else:
# Turn off any bits that are set in the umask
mode = mode & (~um)
return _Database(file, mode)
| mit |
whygee/oppia | core/platform/email/gae_email_services.py | 15 | 3019 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides email services."""
__author__ = 'Sean Lip'
from core import counters
import feconf
from google.appengine.api import app_identity
from google.appengine.api import mail
def send_mail_to_admin(subject, body):
"""Enqueues a 'send email' request with the GAE mail service.
Args:
- subject: str. The subject line of the email.
- body: str. The plaintext body of the email.
"""
if feconf.CAN_SEND_EMAILS_TO_ADMIN:
if not mail.is_email_valid(feconf.ADMIN_EMAIL_ADDRESS):
raise Exception(
'Malformed email address: %s' %
feconf.ADMIN_EMAIL_ADDRESS)
app_id = app_identity.get_application_id()
body = '(Sent from %s)\n\n%s' % (app_id, body)
mail.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS, subject,
body)
counters.EMAILS_SENT.inc()
def send_mail(
sender_email, recipient_email, subject, plaintext_body, html_body):
"""Sends an email. The client is responsible for recording any audit logs.
In general this function should only be called from
email_manager._send_email().
Args:
- sender_email: str. the email address of the sender. This should be in
the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>'.
- recipient_email: str. the email address of the recipient.
- subject: str. The subject line of the email.
- plaintext_body: str. The plaintext body of the email.
- html_body: str. The HTML body of the email. Must fit in a datastore
entity.
Raises:
Exception: if the configuration in feconf.py forbids emails from being
sent.
ValueError: if 'sender_email' or 'recipient_email' is invalid, according
to App Engine.
(and possibly other exceptions, due to mail.send_mail() failures)
"""
if not feconf.CAN_SEND_EMAILS_TO_USERS:
raise Exception('This app cannot send emails to users.')
if not mail.is_email_valid(sender_email):
raise ValueError(
'Malformed sender email address: %s' % sender_email)
if not mail.is_email_valid(recipient_email):
raise ValueError(
'Malformed recipient email address: %s' % recipient_email)
mail.send_mail(
sender_email, recipient_email, subject, plaintext_body, html=html_body)
counters.EMAILS_SENT.inc()
| apache-2.0 |
mvaled/gunicorn | gunicorn/app/djangoapp.py | 79 | 5026 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
from gunicorn.app.base import Application
from gunicorn import util
def is_setting_mod(path):
return (os.path.isfile(os.path.join(path, "settings.py")) or
os.path.isfile(os.path.join(path, "settings.pyc")))
def find_settings_module(path):
path = os.path.abspath(path)
project_path = None
settings_name = "settings"
if os.path.isdir(path):
project_path = None
if not is_setting_mod(path):
for d in os.listdir(path):
if d in ('..', '.'):
continue
root = os.path.join(path, d)
if is_setting_mod(root):
project_path = root
break
else:
project_path = path
elif os.path.isfile(path):
project_path = os.path.dirname(path)
settings_name, _ = os.path.splitext(os.path.basename(path))
return project_path, settings_name
def make_default_env(cfg):
if cfg.django_settings:
os.environ['DJANGO_SETTINGS_MODULE'] = cfg.django_settings
if cfg.pythonpath and cfg.pythonpath is not None:
paths = cfg.pythonpath.split(",")
for path in paths:
pythonpath = os.path.abspath(cfg.pythonpath)
if pythonpath not in sys.path:
sys.path.insert(0, pythonpath)
try:
os.environ['DJANGO_SETTINGS_MODULE']
except KeyError:
# not settings env set, try to build one.
cwd = util.getcwd()
project_path, settings_name = find_settings_module(cwd)
if not project_path:
raise RuntimeError("django project not found")
pythonpath, project_name = os.path.split(project_path)
os.environ['DJANGO_SETTINGS_MODULE'] = "%s.%s" % (project_name,
settings_name)
if pythonpath not in sys.path:
sys.path.insert(0, pythonpath)
if project_path not in sys.path:
sys.path.insert(0, project_path)
class DjangoApplication(Application):
def init(self, parser, opts, args):
if args:
if ("." in args[0] and not (os.path.isfile(args[0])
or os.path.isdir(args[0]))):
self.cfg.set("django_settings", args[0])
else:
# not settings env set, try to build one.
project_path, settings_name = find_settings_module(
os.path.abspath(args[0]))
if project_path not in sys.path:
sys.path.insert(0, project_path)
if not project_path:
raise RuntimeError("django project not found")
pythonpath, project_name = os.path.split(project_path)
self.cfg.set("django_settings", "%s.%s" % (project_name,
settings_name))
self.cfg.set("pythonpath", pythonpath)
def load(self):
# chdir to the configured path before loading,
# default is the current dir
os.chdir(self.cfg.chdir)
# set settings
make_default_env(self.cfg)
# load wsgi application and return it.
mod = util.import_module("gunicorn.app.django_wsgi")
return mod.make_wsgi_application()
class DjangoApplicationCommand(Application):
def __init__(self, options, admin_media_path):
self.usage = None
self.prog = None
self.cfg = None
self.config_file = options.get("config") or ""
self.options = options
self.admin_media_path = admin_media_path
self.callable = None
self.project_path = None
self.do_load_config()
def init(self, *args):
if 'settings' in self.options:
self.options['django_settings'] = self.options.pop('settings')
cfg = {}
for k, v in self.options.items():
if k.lower() in self.cfg.settings and v is not None:
cfg[k.lower()] = v
return cfg
def load(self):
# chdir to the configured path before loading,
# default is the current dir
os.chdir(self.cfg.chdir)
# set settings
make_default_env(self.cfg)
# load wsgi application and return it.
mod = util.import_module("gunicorn.app.django_wsgi")
return mod.make_command_wsgi_application(self.admin_media_path)
def run():
"""\
The ``gunicorn_django`` command line runner for launching Django
applications.
"""
util.warn("""This command is deprecated.
You should now run your application with the WSGI interface
installed with your project. Ex.:
gunicorn myproject.wsgi:application
See https://docs.djangoproject.com/en/1.5/howto/deployment/wsgi/gunicorn/
for more info.""")
from gunicorn.app.djangoapp import DjangoApplication
DjangoApplication("%(prog)s [OPTIONS] [SETTINGS_PATH]").run()
| mit |
Lh4cKg/sl4a | python/src/Lib/plat-mac/lib-scriptpackages/Finder/Finder_Basics.py | 73 | 6854 | """Suite Finder Basics: Commonly-used Finder commands and object classes
Level 1, version 1
Generated from /System/Library/CoreServices/Finder.app
AETE/AEUT resource version 0/144, language 0, script 0
"""
import aetools
import MacOS
_code = 'fndr'
class Finder_Basics_Events:
def copy(self, _no_object=None, _attributes={}, **_arguments):
"""copy: (NOT AVAILABLE YET) Copy the selected items to the clipboard (the Finder must be the front application)
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'misc'
_subcode = 'copy'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object is not None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_sort = {
'by' : 'by ',
}
def sort(self, _object, _attributes={}, **_arguments):
"""sort: (NOT AVAILABLE YET) Return the specified object(s) in a sorted list
Required argument: a list of finder objects to sort
Keyword argument by: the property to sort the items by (name, index, date, etc.)
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: the sorted items in their new order
"""
_code = 'DATA'
_subcode = 'SORT'
aetools.keysubst(_arguments, self._argmap_sort)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
class application(aetools.ComponentItem):
"""application - The Finder """
want = 'capp'
class _Prop_Finder_preferences(aetools.NProperty):
"""Finder preferences - (NOT AVAILABLE YET) Various preferences that apply to the Finder as a whole """
which = 'pfrp'
want = 'cprf'
Finder_preferences = _Prop_Finder_preferences()
class _Prop_clipboard(aetools.NProperty):
"""clipboard - (NOT AVAILABLE YET) the Finder\xd5s clipboard window """
which = 'pcli'
want = 'obj '
clipboard = _Prop_clipboard()
class _Prop_desktop(aetools.NProperty):
"""desktop - the desktop """
which = 'desk'
want = 'cdsk'
desktop = _Prop_desktop()
class _Prop_frontmost(aetools.NProperty):
"""frontmost - Is the Finder the frontmost process? """
which = 'pisf'
want = 'bool'
frontmost = _Prop_frontmost()
class _Prop_home(aetools.NProperty):
"""home - the home directory """
which = 'home'
want = 'cfol'
home = _Prop_home()
class _Prop_insertion_location(aetools.NProperty):
"""insertion location - the container in which a new folder would appear if \xd2New Folder\xd3 was selected """
which = 'pins'
want = 'obj '
insertion_location = _Prop_insertion_location()
class _Prop_name(aetools.NProperty):
"""name - the Finder\xd5s name """
which = 'pnam'
want = 'itxt'
name = _Prop_name()
class _Prop_product_version(aetools.NProperty):
"""product version - the version of the System software running on this computer """
which = 'ver2'
want = 'utxt'
product_version = _Prop_product_version()
class _Prop_selection(aetools.NProperty):
"""selection - the selection in the frontmost Finder window """
which = 'sele'
want = 'obj '
selection = _Prop_selection()
class _Prop_startup_disk(aetools.NProperty):
"""startup disk - the startup disk """
which = 'sdsk'
want = 'cdis'
startup_disk = _Prop_startup_disk()
class _Prop_trash(aetools.NProperty):
"""trash - the trash """
which = 'trsh'
want = 'ctrs'
trash = _Prop_trash()
class _Prop_version(aetools.NProperty):
"""version - the version of the Finder """
which = 'vers'
want = 'utxt'
version = _Prop_version()
class _Prop_visible(aetools.NProperty):
"""visible - Is the Finder\xd5s layer visible? """
which = 'pvis'
want = 'bool'
visible = _Prop_visible()
# element 'alia' as ['indx', 'name']
# element 'appf' as ['indx', 'name', 'ID ']
# element 'brow' as ['indx', 'ID ']
# element 'cdis' as ['indx', 'name', 'ID ']
# element 'cfol' as ['indx', 'name', 'ID ']
# element 'clpf' as ['indx', 'name']
# element 'cobj' as ['indx', 'rele', 'name', 'rang', 'test']
# element 'ctnr' as ['indx', 'name']
# element 'cwin' as ['indx', 'name']
# element 'docf' as ['indx', 'name']
# element 'file' as ['indx', 'name']
# element 'inlf' as ['indx', 'name']
# element 'lwnd' as ['indx', 'name']
# element 'pack' as ['indx', 'name']
application._superclassnames = []
import Files
import Window_classes
import Containers_and_folders
import Finder_items
application._privpropdict = {
'Finder_preferences' : _Prop_Finder_preferences,
'clipboard' : _Prop_clipboard,
'desktop' : _Prop_desktop,
'frontmost' : _Prop_frontmost,
'home' : _Prop_home,
'insertion_location' : _Prop_insertion_location,
'name' : _Prop_name,
'product_version' : _Prop_product_version,
'selection' : _Prop_selection,
'startup_disk' : _Prop_startup_disk,
'trash' : _Prop_trash,
'version' : _Prop_version,
'visible' : _Prop_visible,
}
application._privelemdict = {
'Finder_window' : Window_classes.Finder_window,
'alias_file' : Files.alias_file,
'application_file' : Files.application_file,
'clipping' : Files.clipping,
'clipping_window' : Window_classes.clipping_window,
'container' : Containers_and_folders.container,
'disk' : Containers_and_folders.disk,
'document_file' : Files.document_file,
'file' : Files.file,
'folder' : Containers_and_folders.folder,
'internet_location_file' : Files.internet_location_file,
'item' : Finder_items.item,
'package' : Files.package,
'window' : Window_classes.window,
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'capp' : application,
}
_propdeclarations = {
'desk' : _Prop_desktop,
'home' : _Prop_home,
'pcli' : _Prop_clipboard,
'pfrp' : _Prop_Finder_preferences,
'pins' : _Prop_insertion_location,
'pisf' : _Prop_frontmost,
'pnam' : _Prop_name,
'pvis' : _Prop_visible,
'sdsk' : _Prop_startup_disk,
'sele' : _Prop_selection,
'trsh' : _Prop_trash,
'ver2' : _Prop_product_version,
'vers' : _Prop_version,
}
_compdeclarations = {
}
_enumdeclarations = {
}
| apache-2.0 |
Shoh/PokemonGo-Bot | pokemongo_bot/test/sleep_schedule_test.py | 23 | 4903 | import unittest
from datetime import timedelta, datetime
from mock import patch, MagicMock
from pokemongo_bot.cell_workers.sleep_schedule import SleepSchedule
from tests import FakeBot
class SleepScheculeTestCase(unittest.TestCase):
config = {'time': '12:20', 'duration': '01:05', 'time_random_offset': '00:05', 'duration_random_offset': '00:05'}
def setUp(self):
self.bot = FakeBot()
self.worker = SleepSchedule(self.bot, self.config)
def test_config(self):
self.assertEqual(self.worker.time.hour, 12)
self.assertEqual(self.worker.time.minute, 20)
self.assertEqual(self.worker.duration, timedelta(hours=1, minutes=5).total_seconds())
self.assertEqual(self.worker.time_random_offset, timedelta(minutes=5).total_seconds())
self.assertEqual(self.worker.duration_random_offset, timedelta(minutes=5).total_seconds())
@patch('pokemongo_bot.cell_workers.sleep_schedule.datetime')
def test_get_next_time(self, mock_datetime):
mock_datetime.now.return_value = datetime(year=2016, month=8, day=01, hour=8, minute=0)
next_time = self.worker._get_next_sleep_schedule()
from_date = datetime(year=2016, month=8, day=1, hour=12, minute=15)
to_date = datetime(year=2016, month=8, day=1, hour=12, minute=25)
self.assertGreaterEqual(next_time, from_date)
self.assertLessEqual(next_time, to_date)
@patch('pokemongo_bot.cell_workers.sleep_schedule.datetime')
def test_get_next_time_called_near_activation_time(self, mock_datetime):
mock_datetime.now.return_value = datetime(year=2016, month=8, day=1, hour=12, minute=25)
next = self.worker._get_next_sleep_schedule()
from_date = datetime(year=2016, month=8, day=02, hour=12, minute=15)
to_date = datetime(year=2016, month=8, day=02, hour=12, minute=25)
self.assertGreaterEqual(next, from_date)
self.assertLessEqual(next, to_date)
@patch('pokemongo_bot.cell_workers.sleep_schedule.datetime')
def test_get_next_time_called_when_this_days_time_passed(self, mock_datetime):
mock_datetime.now.return_value = datetime(year=2016, month=8, day=1, hour=14, minute=0)
next = self.worker._get_next_sleep_schedule()
from_date = datetime(year=2016, month=8, day=02, hour=12, minute=15)
to_date = datetime(year=2016, month=8, day=02, hour=12, minute=25)
self.assertGreaterEqual(next, from_date)
self.assertLessEqual(next, to_date)
def test_get_next_duration(self):
from_seconds = int(timedelta(hours=1).total_seconds())
to_seconds = int(timedelta(hours=1, minutes=10).total_seconds())
duration = self.worker._get_next_duration()
self.assertGreaterEqual(duration, from_seconds)
self.assertLessEqual(duration, to_seconds)
@patch('pokemongo_bot.cell_workers.sleep_schedule.sleep')
def test_sleep(self, mock_sleep):
self.worker._next_duration = SleepSchedule.LOG_INTERVAL_SECONDS * 10
self.worker._sleep()
#Sleep should be called 10 times with LOG_INTERVAL_SECONDS as argument
self.assertEqual(mock_sleep.call_count, 10)
calls = [x[0][0] for x in mock_sleep.call_args_list]
for arg in calls:
self.assertEqual(arg, SleepSchedule.LOG_INTERVAL_SECONDS)
@patch('pokemongo_bot.cell_workers.sleep_schedule.sleep')
def test_sleep_not_divedable_by_interval(self, mock_sleep):
self.worker._next_duration = SleepSchedule.LOG_INTERVAL_SECONDS * 10 + 5
self.worker._sleep()
self.assertEqual(mock_sleep.call_count, 11)
calls = [x[0][0] for x in mock_sleep.call_args_list]
for arg in calls[:-1]:
self.assertEqual(arg, SleepSchedule.LOG_INTERVAL_SECONDS)
#Last call must be 5
self.assertEqual(calls[-1], 5)
@patch('pokemongo_bot.cell_workers.sleep_schedule.sleep')
@patch('pokemongo_bot.cell_workers.sleep_schedule.datetime')
def test_call_work_before_schedule(self, mock_datetime, mock_sleep):
self.worker._next_sleep = datetime(year=2016, month=8, day=1, hour=12, minute=0)
mock_datetime.now.return_value = self.worker._next_sleep - timedelta(minutes=5)
self.worker.work()
self.assertEqual(mock_sleep.call_count, 0)
@patch('pokemongo_bot.cell_workers.sleep_schedule.sleep')
@patch('pokemongo_bot.cell_workers.sleep_schedule.datetime')
def test_call_work_after_schedule(self, mock_datetime, mock_sleep):
self.bot.login = MagicMock()
self.worker._next_sleep = datetime(year=2016, month=8, day=1, hour=12, minute=0)
# Change time to be after schedule
mock_datetime.now.return_value = self.worker._next_sleep + timedelta(minutes=5)
self.worker.work()
self.assertGreater(mock_sleep.call_count, 0)
self.assertGreater(self.bot.login.call_count, 0)
| mit |
Infern01/BRC1 | share/qt/extract_strings_qt.py | 23 | 1847 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/freicoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *freicoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("freicoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/Twisted/twisted/web/script.py | 18 | 4768 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""I contain PythonScript, which is a very simple python script resource.
"""
import server
import resource
import html
import error
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from twisted.web import http
from twisted import copyright
import traceback
import os
from twisted.web import resource
from twisted.web import static
rpyNoResource = """<p>You forgot to assign to the variable "resource" in your script. For example:</p>
<pre>
# MyCoolWebApp.rpy
import mygreatresource
resource = mygreatresource.MyGreatResource()
</pre>
"""
class AlreadyCached(Exception):
"""This exception is raised when a path has already been cached.
"""
class CacheScanner:
def __init__(self, path, registry):
self.path = path
self.registry = registry
self.doCache = 0
def cache(self):
c = self.registry.getCachedPath(self.path)
if c is not None:
raise AlreadyCached(c)
self.recache()
def recache(self):
self.doCache = 1
noRsrc = error.ErrorPage(500, "Whoops! Internal Error", rpyNoResource)
def ResourceScript(path, registry):
"""
I am a normal py file which must define a 'resource' global, which should
be an instance of (a subclass of) web.resource.Resource; it will be
renderred.
"""
cs = CacheScanner(path, registry)
glob = {'__file__': path,
'resource': noRsrc,
'registry': registry,
'cache': cs.cache,
'recache': cs.recache}
try:
execfile(path, glob, glob)
except AlreadyCached, ac:
return ac.args[0]
rsrc = glob['resource']
if cs.doCache and rsrc is not noRsrc:
registry.cachePath(path, rsrc)
return rsrc
def ResourceTemplate(path, registry):
from quixote import ptl_compile
glob = {'__file__': path,
'resource': error.ErrorPage(500, "Whoops! Internal Error",
rpyNoResource),
'registry': registry}
e = ptl_compile.compile_template(open(path), path)
exec e in glob
return glob['resource']
class ResourceScriptWrapper(resource.Resource):
def __init__(self, path, registry=None):
resource.Resource.__init__(self)
self.path = path
self.registry = registry or static.Registry()
def render(self, request):
res = ResourceScript(self.path, self.registry)
return res.render(request)
def getChildWithDefault(self, path, request):
res = ResourceScript(self.path, self.registry)
return res.getChildWithDefault(path, request)
class ResourceScriptDirectory(resource.Resource):
def __init__(self, pathname, registry=None):
resource.Resource.__init__(self)
self.path = pathname
self.registry = registry or static.Registry()
def getChild(self, path, request):
fn = os.path.join(self.path, path)
if os.path.isdir(fn):
return ResourceScriptDirectory(fn, self.registry)
if os.path.exists(fn):
return ResourceScript(fn, self.registry)
return error.NoResource()
def render(self, request):
return error.NoResource().render(request)
class PythonScript(resource.Resource):
"""I am an extremely simple dynamic resource; an embedded python script.
This will execute a file (usually of the extension '.epy') as Python code,
internal to the webserver.
"""
isLeaf = 1
def __init__(self, filename, registry):
"""Initialize me with a script name.
"""
self.filename = filename
self.registry = registry
def render(self, request):
"""Render me to a web client.
Load my file, execute it in a special namespace (with 'request' and
'__file__' global vars) and finish the request. Output to the web-page
will NOT be handled with print - standard output goes to the log - but
with request.write.
"""
request.setHeader("x-powered-by","Twisted/%s" % copyright.version)
namespace = {'request': request,
'__file__': self.filename,
'registry': self.registry}
try:
execfile(self.filename, namespace, namespace)
except IOError, e:
if e.errno == 2: #file not found
request.setResponseCode(http.NOT_FOUND)
request.write(error.NoResource("File not found.").render(request))
except:
io = StringIO.StringIO()
traceback.print_exc(file=io)
request.write(html.PRE(io.getvalue()))
request.finish()
return server.NOT_DONE_YET
| apache-2.0 |
jeremyfix/pylearn2 | pylearn2/scripts/gpu_pkl_to_cpu_pkl.py | 44 | 6819 | #!/usr/bin/env python
"""
Converts a pickle file containing CudaNdarraySharedVariables into
a pickle file containing only TensorSharedVariables.
Usage:
gpu_pkl_to_cpu_pkl.py <gpu.pkl> <cpu.pkl>
Loads gpu.pkl, replaces cuda shared variables with numpy ones,
and saves to cpu.pkl.
If you create a model while using GPU and later want to unpickle it
on a machine without a GPU, you must convert it this way.
This is theano's fault, not pylearn2's. I would like to fix theano,
but don't understand the innards of theano well enough, and none of
the theano developers has been willing to help me at all with this
issue. If it annoys you that you have to do this, please help me
persuade the theano developers that this issue is worth more of their
attention.
Note: This script is also useful if you want to create a model on GPU,
save it, and then run other theano functionality on CPU later, even
if your machine has a GPU. It could be useful to modify this script
to do the reverse conversion, so you can create a model on CPU, save
it, and then run theano functions on GPU later.
Further note: this script is very hacky and imprecise. It is likely
to do things like blow away subclasses of list and dict and turn them
into plain lists and dicts. It is also liable to overlook all sorts of
theano shared variables if you have an exotic data structure stored in
the pickle. You probably want to test that the cpu pickle file can be
loaded on a machine without GPU to be sure that the script actually
found them all.
"""
from __future__ import print_function
__author__ = "Ian Goodfellow"
import sys
import types
if __name__ == '__main__':
_, in_path, out_path = sys.argv
from pylearn2.utils import serial
from theano import shared
model = serial.load(in_path)
# map ids of objects we've fixed before to the fixed version, so we don't clone objects when fixing
# can't use object itself as key because not all objects are hashable
already_fixed = {}
# ids of objects being fixed right now (we don't support cycles)
currently_fixing = []
blacklist = ["im_class", "func_closure", "co_argcount", "co_cellvars", "func_code",
"append", "capitalize", "im_self", "func_defaults", "func_name"]
blacklisted_keys = ["bytearray", "IndexError", "isinstance", "copyright", "main"]
postponed_fixes = []
class Placeholder(object):
def __init__(self, id_to_sub):
self.id_to_sub = id_to_sub
class FieldFixer(object):
def __init__(self, obj, field, fixed_field):
self.obj = obj
self.field = field
self.fixed_field = fixed_field
def apply(self):
obj = self.obj
field = self.field
fixed_field = already_fixed[self.fixed_field.id_to_sub]
setattr(obj, field, fixed_field)
def fix(obj, stacklevel=0):
prefix = ''.join(['.']*stacklevel)
oid = id(obj)
canary_oid = oid
print(prefix + 'fixing '+str(oid))
if oid in already_fixed:
return already_fixed[oid]
if oid in currently_fixing:
print('returning placeholder for '+str(oid))
return Placeholder(oid)
currently_fixing.append(oid)
if hasattr(obj, 'set_value'):
# Base case: we found a shared variable, must convert it
rval = shared(obj.get_value())
# Sabotage its getstate so if something tries to pickle it, we'll find out
obj.__getstate__ = None
elif obj is None:
rval = None
elif isinstance(obj, list):
print(prefix + 'fixing a list')
rval = []
for i, elem in enumerate(obj):
print(prefix + '.fixing elem %d' % i)
fixed_elem = fix(elem, stacklevel + 2)
if isinstance(fixed_elem, Placeholder):
raise NotImplementedError()
rval.append(fixed_elem)
elif isinstance(obj, dict):
print(prefix + 'fixing a dict')
rval = obj
"""
rval = {}
for key in obj:
if key in blacklisted_keys or (isinstance(key, str) and key.endswith('Error')):
print(prefix + '.%s is blacklisted' % str(key))
rval[key] = obj[key]
continue
print(prefix + '.fixing key ' + str(key) + ' of type '+str(type(key)))
fixed_key = fix(key, stacklevel + 2)
if isinstance(fixed_key, Placeholder):
raise NotImplementedError()
print(prefix + '.fixing value for key '+str(key))
fixed_value = fix(obj[key], stacklevel + 2)
if isinstance(fixed_value, Placeholder):
raise NotImplementedError()
rval[fixed_key] = fixed_value
"""
elif isinstance(obj, tuple):
print(prefix + 'fixing a tuple')
rval = []
for i, elem in enumerate(obj):
print(prefix + '.fixing elem %d' % i)
fixed_elem = fix(elem, stacklevel + 2)
if isinstance(fixed_elem, Placeholder):
raise NotImplementedError()
rval.append(fixed_elem)
rval = tuple(rval)
elif isinstance(obj, (int, float, str)):
rval = obj
else:
print(prefix + 'fixing a generic object')
field_names = dir(obj)
for field in field_names:
if isinstance(getattr(obj, field), types.MethodType):
print(prefix + '.%s is an instancemethod' % field)
continue
if field in blacklist or (field.startswith('__')):
print(prefix + '.%s is blacklisted' % field)
continue
print(prefix + '.fixing field %s' % field)
updated_field = fix(getattr(obj, field), stacklevel + 2)
print(prefix + '.applying fix to field %s' % field)
if isinstance(updated_field, Placeholder):
postponed_fixes.append(FieldFixer(obj, field, updated_field))
else:
try:
setattr(obj, field, updated_field)
except Exception as e:
print("Couldn't do that because of exception: "+str(e))
rval = obj
already_fixed[oid] = rval
print(prefix+'stored fix for '+str(oid))
assert canary_oid == oid
del currently_fixing[currently_fixing.index(oid)]
return rval
model = fix(model)
assert len(currently_fixing) == 0
for fixer in postponed_fixes:
fixer.apply()
serial.save(out_path, model)
| bsd-3-clause |
vhanla/CudaText | app/cudatext.app/Contents/Resources/py/sys/idna/codec.py | 426 | 3299 | from .core import encode, decode, alabel, ulabel, IDNAError
import codecs
import re
_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
class Codec(codecs.Codec):
def encode(self, data, errors='strict'):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return "", 0
return encode(data), len(data)
def decode(self, data, errors='strict'):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return u"", 0
return decode(data), len(data)
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
def _buffer_encode(self, data, errors, final):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return ("", 0)
labels = _unicode_dots_re.split(data)
trailing_dot = u''
if labels:
if not labels[-1]:
trailing_dot = '.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = '.'
result = []
size = 0
for label in labels:
result.append(alabel(label))
if size:
size += 1
size += len(label)
# Join with U+002E
result = ".".join(result) + trailing_dot
size += len(trailing_dot)
return (result, size)
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, data, errors, final):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return (u"", 0)
# IDNA allows decoding to operate on Unicode strings, too.
if isinstance(data, unicode):
labels = _unicode_dots_re.split(data)
else:
# Must be ASCII string
data = str(data)
unicode(data, "ascii")
labels = data.split(".")
trailing_dot = u''
if labels:
if not labels[-1]:
trailing_dot = u'.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = u'.'
result = []
size = 0
for label in labels:
result.append(ulabel(label))
if size:
size += 1
size += len(label)
result = u".".join(result) + trailing_dot
size += len(trailing_dot)
return (result, size)
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(
name='idna',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| mpl-2.0 |
else/mosquitto | test/broker/09-plugin-auth-unpwd-success.py | 7 | 1040 | #!/usr/bin/env python
# Test whether a connection is successful with correct username and password
# when using a simple auth_plugin.
import subprocess
import socket
import time
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 10
connect_packet = mosq_test.gen_connect("connect-uname-pwd-test", keepalive=keepalive, username="test-username", password="cnwTICONIURW")
connack_packet = mosq_test.gen_connack(rc=0)
broker = mosq_test.start_broker(filename=os.path.basename(__file__))
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20)
rc = 0
sock.close()
finally:
broker.terminate()
broker.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
exit(rc)
| bsd-3-clause |
ksmit799/Toontown-Source | toontown/catalog/CatalogFurnitureItem.py | 1 | 30435 | import CatalogAtticItem
import CatalogItem
import random
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
FTModelName = 0
FTColor = 1
FTColorOptions = 2
FTBasePrice = 3
FTFlags = 4
FTScale = 5
FLBank = 1
FLCloset = 2
FLRug = 4
FLPainting = 8
FLOnTable = 16
FLIsTable = 32
FLPhone = 64
FLBillboard = 128
FLTrunk = 256
furnitureColors = [(0.792,
0.353,
0.29,
1.0),
(0.176,
0.592,
0.439,
1.0),
(0.439,
0.424,
0.682,
1.0),
(0.325,
0.58,
0.835,
1.0),
(0.753,
0.345,
0.557,
1.0),
(0.992,
0.843,
0.392,
1.0)]
woodColors = [(0.933,
0.773,
0.569,
1.0),
(0.9333,
0.6785,
0.055,
1.0),
(0.545,
0.451,
0.333,
1.0),
(0.541,
0.0,
0.0,
1.0),
(0.5451,
0.2706,
0.0745,
1.0),
(0.5451,
0.4118,
0.4118,
1.0)]
BankToMoney = {1300: 12000,
1310: 12000,
1320: 12000,
1330: 12000,
1340: 12000,
1350: 12000}
MoneyToBank = {}
for bankId, maxMoney in BankToMoney.items():
MoneyToBank[maxMoney] = bankId
MaxBankId = 1350
ClosetToClothes = {500: 10,
502: 15,
504: 20,
506: 25,
508: 50,
510: 10,
512: 15,
514: 20,
516: 25,
518: 50}
ClothesToCloset = {}
for closetId, maxClothes in ClosetToClothes.items():
if not ClothesToCloset.has_key(maxClothes):
ClothesToCloset[maxClothes] = (closetId,)
else:
ClothesToCloset[maxClothes] += (closetId,)
MaxClosetIds = (508, 518)
MaxTrunkIds = (4000, 4010)
FurnitureTypes = {100: ('phase_5.5/models/estate/chairA',
None,
None,
80),
105: ('phase_5.5/models/estate/chairAdesat',
None,
{0: (('**/cushion*', furnitureColors[0]), ('**/arm*', furnitureColors[0])),
1: (('**/cushion*', furnitureColors[1]), ('**/arm*', furnitureColors[1])),
2: (('**/cushion*', furnitureColors[2]), ('**/arm*', furnitureColors[2])),
3: (('**/cushion*', furnitureColors[3]), ('**/arm*', furnitureColors[3])),
4: (('**/cushion*', furnitureColors[4]), ('**/arm*', furnitureColors[4])),
5: (('**/cushion*', furnitureColors[5]), ('**/arm*', furnitureColors[5]))},
160),
110: ('phase_3.5/models/modules/chair',
None,
None,
40),
120: ('phase_5.5/models/estate/deskChair',
None,
None,
60),
130: ('phase_5.5/models/estate/BugRoomChair',
None,
None,
160),
140: ('phase_5.5/models/estate/UWlobsterChair',
None,
None,
200),
145: ('phase_5.5/models/estate/UWlifeSaverChair',
None,
None,
200),
150: ('phase_5.5/models/estate/West_saddleStool2',
None,
None,
160),
160: ('phase_5.5/models/estate/West_nativeChair',
None,
None,
160),
170: ('phase_5.5/models/estate/cupcakeChair',
None,
None,
240),
200: ('phase_5.5/models/estate/regular_bed',
None,
None,
400),
205: ('phase_5.5/models/estate/regular_bed_desat',
None,
{0: (('**/bar*', woodColors[0]),
('**/post*', woodColors[0]),
('**/*support', woodColors[0]),
('**/top', woodColors[0]),
('**/bottom', woodColors[0]),
('**/pPlane*', woodColors[0])),
1: (('**/bar*', woodColors[1]),
('**/post*', woodColors[1]),
('**/*support', woodColors[1]),
('**/top', woodColors[1]),
('**/bottom', woodColors[1]),
('**/pPlane*', woodColors[1])),
2: (('**/bar*', woodColors[2]),
('**/post*', woodColors[2]),
('**/*support', woodColors[2]),
('**/top', woodColors[2]),
('**/bottom', woodColors[2]),
('**/pPlane*', woodColors[2])),
3: (('**/bar*', woodColors[3]),
('**/post*', woodColors[3]),
('**/*support', woodColors[3]),
('**/top', woodColors[3]),
('**/bottom', woodColors[3]),
('**/pPlane*', woodColors[3])),
4: (('**/bar*', woodColors[4]),
('**/post*', woodColors[4]),
('**/*support', woodColors[4]),
('**/top', woodColors[4]),
('**/bottom', woodColors[4]),
('**/pPlane*', woodColors[4])),
5: (('**/bar*', woodColors[5]),
('**/post*', woodColors[5]),
('**/*support', woodColors[5]),
('**/top', woodColors[5]),
('**/bottom', woodColors[5]),
('**/pPlane*', woodColors[5]))},
800),
210: ('phase_5.5/models/estate/girly_bed',
None,
None,
450),
220: ('phase_5.5/models/estate/bathtub_bed',
None,
None,
550),
230: ('phase_5.5/models/estate/bugRoomBed',
None,
None,
600),
240: ('phase_5.5/models/estate/UWBoatBed',
None,
None,
600),
250: ('phase_5.5/models/estate/West_cactusHammoc',
None,
None,
550),
260: ('phase_5.5/models/estate/icecreamBed',
None,
None,
700),
270: ('phase_5.5/models/estate/trolley_bed',
None,
None,
1200,
None,
None,
0.25),
300: ('phase_5.5/models/estate/Piano',
None,
None,
1000,
FLIsTable),
310: ('phase_5.5/models/estate/Organ',
None,
None,
2500),
400: ('phase_5.5/models/estate/FireplaceSq',
None,
None,
800),
410: ('phase_5.5/models/estate/FireplaceGirlee',
None,
None,
800),
420: ('phase_5.5/models/estate/FireplaceRound',
None,
None,
800),
430: ('phase_5.5/models/estate/bugRoomFireplace',
None,
None,
800),
440: ('phase_5.5/models/estate/CarmelAppleFireplace',
None,
None,
800),
450: ('phase_5.5/models/estate/fireplace_coral',
None,
None,
950),
460: ('phase_5.5/models/estate/tt_m_prp_int_fireplace_coral',
None,
None,
1250,
None,
None,
0.5),
470: ('phase_5.5/models/estate/tt_m_prp_int_fireplace_square',
None,
None,
1100,
None,
None,
0.5),
480: ('phase_5.5/models/estate/tt_m_prp_int_fireplace_round',
None,
None,
1100,
None,
None,
0.5),
490: ('phase_5.5/models/estate/tt_m_prp_int_fireplace_girlee',
None,
None,
1100,
None,
None,
0.5),
491: ('phase_5.5/models/estate/tt_m_prp_int_fireplace_bugRoom',
None,
None,
1100,
None,
None,
0.5),
492: ('phase_5.5/models/estate/tt_m_prp_int_fireplace_caramelApple',
None,
None,
1100,
None,
None,
0.5),
500: ('phase_5.5/models/estate/closetBoy',
None,
None,
500,
FLCloset,
0.85),
502: ('phase_5.5/models/estate/closetBoy',
None,
None,
500,
FLCloset,
1.0),
504: ('phase_5.5/models/estate/closetBoy',
None,
None,
500,
FLCloset,
1.15),
506: ('phase_5.5/models/estate/closetBoy',
None,
None,
500,
FLCloset,
1.3),
508: ('phase_5.5/models/estate/closetBoy',
None,
None,
500,
FLCloset,
1.3),
510: ('phase_5.5/models/estate/closetGirl',
None,
None,
500,
FLCloset,
0.85),
512: ('phase_5.5/models/estate/closetGirl',
None,
None,
500,
FLCloset,
1.0),
514: ('phase_5.5/models/estate/closetGirl',
None,
None,
500,
FLCloset,
1.15),
516: ('phase_5.5/models/estate/closetGirl',
None,
None,
500,
FLCloset,
1.3),
518: ('phase_5.5/models/estate/closetGirl',
None,
None,
500,
FLCloset,
1.3),
600: ('phase_3.5/models/modules/lamp_short',
None,
None,
45,
FLOnTable),
610: ('phase_3.5/models/modules/lamp_tall',
None,
None,
45),
620: ('phase_5.5/models/estate/lampA',
None,
None,
35,
FLOnTable),
625: ('phase_5.5/models/estate/lampADesat',
None,
{0: (('**/top', furnitureColors[0]),),
1: (('**/top', furnitureColors[1]),),
2: (('**/top', furnitureColors[2]),),
3: (('**/top', furnitureColors[3]),),
4: (('**/top', furnitureColors[4]),),
5: (('**/top', furnitureColors[5]),)},
70,
FLOnTable),
630: ('phase_5.5/models/estate/bugRoomDaisyLamp1',
None,
None,
55),
640: ('phase_5.5/models/estate/bugRoomDaisyLamp2',
None,
None,
55),
650: ('phase_5.5/models/estate/UWlamp_jellyfish',
None,
None,
55,
FLOnTable),
660: ('phase_5.5/models/estate/UWlamps_jellyfishB',
None,
None,
55,
FLOnTable),
670: ('phase_5.5/models/estate/West_cowboyLamp',
None,
None,
55,
FLOnTable),
680: ('phase_5.5/models/estate/tt_m_ara_int_candlestick',
None,
{0: (('**/candlestick/candlestick', (1.0,
1.0,
1.0,
1.0)),),
1: (('**/candlestick/candlestick', furnitureColors[1]),),
2: (('**/candlestick/candlestick', furnitureColors[2]),),
3: (('**/candlestick/candlestick', furnitureColors[3]),),
4: (('**/candlestick/candlestick', furnitureColors[4]),),
5: (('**/candlestick/candlestick', furnitureColors[5]),),
6: (('**/candlestick/candlestick', furnitureColors[0]),)},
20,
FLOnTable),
681: ('phase_5.5/models/estate/tt_m_ara_int_candlestickLit',
None,
{0: (('**/candlestick/candlestick', (1.0,
1.0,
1.0,
1.0)),),
1: (('**/candlestickLit/candlestick', furnitureColors[1]),),
2: (('**/candlestickLit/candlestick', furnitureColors[2]),),
3: (('**/candlestickLit/candlestick', furnitureColors[3]),),
4: (('**/candlestickLit/candlestick', furnitureColors[4]),),
5: (('**/candlestickLit/candlestick', furnitureColors[5]),),
6: (('**/candlestickLit/candlestick', furnitureColors[0]),)},
25,
FLOnTable),
700: ('phase_3.5/models/modules/couch_1person',
None,
None,
230),
705: ('phase_5.5/models/estate/couch_1personDesat',
None,
{0: (('**/*couch', furnitureColors[0]),),
1: (('**/*couch', furnitureColors[1]),),
2: (('**/*couch', furnitureColors[2]),),
3: (('**/*couch', furnitureColors[3]),),
4: (('**/*couch', furnitureColors[4]),),
5: (('**/*couch', furnitureColors[5]),)},
460),
710: ('phase_3.5/models/modules/couch_2person',
None,
None,
230),
715: ('phase_5.5/models/estate/couch_2personDesat',
None,
{0: (('**/*couch', furnitureColors[0]),),
1: (('**/*couch', furnitureColors[1]),),
2: (('**/*couch', furnitureColors[2]),),
3: (('**/*couch', furnitureColors[3]),),
4: (('**/*couch', furnitureColors[4]),),
5: (('**/*couch', furnitureColors[5]),)},
460),
720: ('phase_5.5/models/estate/West_HayCouch',
None,
None,
420),
730: ('phase_5.5/models/estate/twinkieCouch',
None,
None,
480),
800: ('phase_3.5/models/modules/desk_only_wo_phone',
None,
None,
65,
FLIsTable),
810: ('phase_5.5/models/estate/BugRoomDesk',
None,
None,
125,
FLIsTable),
900: ('phase_3.5/models/modules/umbrella_stand',
None,
None,
30),
910: ('phase_3.5/models/modules/coatrack',
None,
None,
75),
920: ('phase_3.5/models/modules/paper_trashcan',
None,
None,
30),
930: ('phase_5.5/models/estate/BugRoomRedMushroomPot',
None,
None,
60),
940: ('phase_5.5/models/estate/BugRoomYellowMushroomPot',
None,
None,
60),
950: ('phase_5.5/models/estate/UWcoralClothRack',
None,
None,
75),
960: ('phase_5.5/models/estate/west_barrelStand',
None,
None,
75),
970: ('phase_5.5/models/estate/West_fatCactus',
None,
None,
75),
980: ('phase_5.5/models/estate/West_Tepee',
None,
None,
150),
990: ('phase_5.5/models/estate/gag_fan',
None,
None,
500,
None,
None,
0.5),
1000: ('phase_3.5/models/modules/rug',
None,
None,
75,
FLRug),
1010: ('phase_5.5/models/estate/rugA',
None,
None,
75,
FLRug),
1015: ('phase_5.5/models/estate/rugADesat',
None,
{0: (('**/pPlane*', furnitureColors[0]),),
1: (('**/pPlane*', furnitureColors[1]),),
2: (('**/pPlane*', furnitureColors[2]),),
3: (('**/pPlane*', furnitureColors[3]),),
4: (('**/pPlane*', furnitureColors[4]),),
5: (('**/pPlane*', furnitureColors[5]),)},
150,
FLRug),
1020: ('phase_5.5/models/estate/rugB',
None,
None,
75,
FLRug,
2.5),
1030: ('phase_5.5/models/estate/bugRoomLeafMat',
None,
None,
75,
FLRug),
1040: ('phase_5.5/models/estate/tt_m_ara_int_presents',
None,
None,
300),
1050: ('phase_5.5/models/estate/tt_m_ara_int_sled',
None,
None,
400),
1100: ('phase_5.5/models/estate/cabinetRwood',
None,
None,
825),
1110: ('phase_5.5/models/estate/cabinetYwood',
None,
None,
825),
1120: ('phase_3.5/models/modules/bookcase',
None,
None,
650,
FLIsTable),
1130: ('phase_3.5/models/modules/bookcase_low',
None,
None,
650,
FLIsTable),
1140: ('phase_5.5/models/estate/icecreamChest',
None,
None,
750),
1200: ('phase_3.5/models/modules/ending_table',
None,
None,
60,
FLIsTable),
1210: ('phase_5.5/models/estate/table_radio',
None,
None,
60,
FLIsTable,
50.0),
1215: ('phase_5.5/models/estate/table_radioDesat',
None,
{0: (('**/RADIOTABLE_*', woodColors[0]),),
1: (('**/RADIOTABLE_*', woodColors[1]),),
2: (('**/RADIOTABLE_*', woodColors[2]),),
3: (('**/RADIOTABLE_*', woodColors[3]),),
4: (('**/RADIOTABLE_*', woodColors[4]),),
5: (('**/RADIOTABLE_*', woodColors[5]),)},
120,
FLIsTable,
50.0),
1220: ('phase_5.5/models/estate/coffeetableSq',
None,
None,
180,
FLIsTable),
1230: ('phase_5.5/models/estate/coffeetableSq_BW',
None,
None,
180,
FLIsTable),
1240: ('phase_5.5/models/estate/UWtable',
None,
None,
180,
FLIsTable),
1250: ('phase_5.5/models/estate/cookieTableA',
None,
None,
220,
FLIsTable),
1260: ('phase_5.5/models/estate/TABLE_Bedroom_Desat',
None,
{0: (('**/Bedroom_Table', woodColors[0]),),
1: (('**/Bedroom_Table', woodColors[1]),),
2: (('**/Bedroom_Table', woodColors[2]),),
3: (('**/Bedroom_Table', woodColors[3]),),
4: (('**/Bedroom_Table', woodColors[4]),),
5: (('**/Bedroom_Table', woodColors[5]),)},
220,
FLIsTable),
1300: ('phase_5.5/models/estate/jellybeanBank',
None,
None,
0,
FLBank,
1.0),
1310: ('phase_5.5/models/estate/jellybeanBank',
None,
None,
0,
FLBank,
1.0),
1320: ('phase_5.5/models/estate/jellybeanBank',
None,
None,
0,
FLBank,
1.0),
1330: ('phase_5.5/models/estate/jellybeanBank',
None,
None,
0,
FLBank,
1.0),
1340: ('phase_5.5/models/estate/jellybeanBank',
None,
None,
0,
FLBank,
1.0),
1350: ('phase_5.5/models/estate/jellybeanBank',
None,
None,
0,
FLBank,
1.0),
1399: ('phase_5.5/models/estate/prop_phone-mod',
None,
None,
0,
FLPhone),
1400: ('phase_5.5/models/estate/cezanne_toon',
None,
None,
425,
FLPainting,
2.0),
1410: ('phase_5.5/models/estate/flowers',
None,
None,
425,
FLPainting,
2.0),
1420: ('phase_5.5/models/estate/modernistMickey',
None,
None,
425,
FLPainting,
2.0),
1430: ('phase_5.5/models/estate/rembrandt_toon',
None,
None,
425,
FLPainting,
2.0),
1440: ('phase_5.5/models/estate/landscape',
None,
None,
425,
FLPainting,
100.0),
1441: ('phase_5.5/models/estate/whistler-horse',
None,
None,
425,
FLPainting,
2.0),
1442: ('phase_5.5/models/estate/degasHorseStar',
None,
None,
425,
FLPainting,
2.5),
1443: ('phase_5.5/models/estate/MagPie',
None,
None,
425,
FLPainting,
2.0),
1450: ('phase_5.5/models/estate/tt_m_prp_int_painting_valentine',
None,
None,
425,
FLPainting),
1500: ('phase_5.5/models/estate/RADIO_A',
None,
None,
25,
FLOnTable,
15.0),
1510: ('phase_5.5/models/estate/RADIO_B',
None,
None,
25,
FLOnTable,
15.0),
1520: ('phase_5.5/models/estate/radio_c',
None,
None,
25,
FLOnTable,
15.0),
1530: ('phase_5.5/models/estate/bugRoomTV',
None,
None,
675),
1600: ('phase_5.5/models/estate/vaseA_short',
None,
None,
120,
FLOnTable),
1610: ('phase_5.5/models/estate/vaseA_tall',
None,
None,
120,
FLOnTable),
1620: ('phase_5.5/models/estate/vaseB_short',
None,
None,
120,
FLOnTable),
1630: ('phase_5.5/models/estate/vaseB_tall',
None,
None,
120,
FLOnTable),
1640: ('phase_5.5/models/estate/vaseC_short',
None,
None,
120,
FLOnTable),
1650: ('phase_5.5/models/estate/vaseD_short',
None,
None,
120,
FLOnTable),
1660: ('phase_5.5/models/estate/UWcoralVase',
None,
None,
120,
FLOnTable | FLBillboard),
1661: ('phase_5.5/models/estate/UWshellVase',
None,
None,
120,
FLOnTable | FLBillboard),
1670: ('phase_5.5/models/estate/tt_m_prp_int_roseVase_valentine',
None,
None,
200,
FLOnTable),
1680: ('phase_5.5/models/estate/tt_m_prp_int_roseWatercan_valentine',
None,
None,
200,
FLOnTable),
1700: ('phase_5.5/models/estate/popcornCart',
None,
None,
400),
1710: ('phase_5.5/models/estate/bugRoomLadyBug',
None,
None,
260),
1720: ('phase_5.5/models/estate/UWfountain',
None,
None,
450),
1725: ('phase_5.5/models/estate/UWOceanDryer',
None,
None,
400),
1800: ('phase_5.5/models/estate/UWskullBowl',
None,
None,
120,
FLOnTable),
1810: ('phase_5.5/models/estate/UWlizardBowl',
None,
None,
120,
FLOnTable),
1900: ('phase_5.5/models/estate/UWswordFish',
None,
None,
425,
FLPainting,
0.5),
1910: ('phase_5.5/models/estate/UWhammerhead',
None,
None,
425,
FLPainting),
1920: ('phase_5.5/models/estate/West_hangingHorns',
None,
None,
475,
FLPainting),
1930: ('phase_5.5/models/estate/West_Sombrero',
None,
None,
425,
FLPainting),
1940: ('phase_5.5/models/estate/West_fancySombrero',
None,
None,
450,
FLPainting),
1950: ('phase_5.5/models/estate/West_CoyotePawdecor',
None,
None,
475,
FLPainting),
1960: ('phase_5.5/models/estate/West_Horseshoe',
None,
None,
475,
FLPainting),
1970: ('phase_5.5/models/estate/West_bisonPortrait',
None,
None,
475,
FLPainting),
2000: ('phase_5.5/models/estate/candySwingSet',
None,
None,
300),
2010: ('phase_5.5/models/estate/cakeSlide',
None,
None,
200),
3000: ('phase_5.5/models/estate/BanannaSplitShower',
None,
None,
400),
4000: ('phase_5.5/models/estate/tt_m_ara_est_accessoryTrunkBoy',
None,
None,
5,
FLTrunk,
0.9),
4010: ('phase_5.5/models/estate/tt_m_ara_est_accessoryTrunkGirl',
None,
None,
5,
FLTrunk,
0.9),
10000: ('phase_4/models/estate/pumpkin_short',
None,
None,
200,
FLOnTable),
10010: ('phase_4/models/estate/pumpkin_tall',
None,
None,
250,
FLOnTable),
10020: ('phase_5.5/models/estate/tt_m_prp_int_winter_tree',
None,
None,
500,
None,
None,
0.1),
10030: ('phase_5.5/models/estate/tt_m_prp_int_winter_wreath',
None,
None,
200,
FLPainting)}
class CatalogFurnitureItem(CatalogAtticItem.CatalogAtticItem):
def makeNewItem(self, furnitureType, colorOption = None, posHpr = None):
self.furnitureType = furnitureType
self.colorOption = colorOption
self.posHpr = posHpr
CatalogAtticItem.CatalogAtticItem.makeNewItem(self)
def needsCustomize(self):
return self.colorOption == None and FurnitureTypes[self.furnitureType][FTColorOptions] != None
def saveHistory(self):
return 1
def replacesExisting(self):
return self.getFlags() & (FLCloset | FLBank | FLTrunk) != 0
def hasExisting(self):
return 1
def getYourOldDesc(self):
if self.getFlags() & FLCloset:
return TTLocalizer.FurnitureYourOldCloset
elif self.getFlags() & FLBank:
return TTLocalizer.FurnitureYourOldBank
elif self.getFlags() & FLTrunk:
return TTLocalizer.FurnitureYourOldTrunk
else:
return None
return None
def notOfferedTo(self, avatar):
if self.getFlags() & FLCloset or self.getFlags() & FLTrunk:
decade = self.furnitureType - self.furnitureType % 10
forBoys = (decade == 500 or decade == 4000)
if avatar.getStyle().getGender() == 'm':
return not forBoys
else:
return forBoys
return 0
def isDeletable(self):
return self.getFlags() & (FLBank | FLCloset | FLPhone | FLTrunk) == 0
def getMaxAccessories(self):
return ToontownGlobals.MaxAccessories
def getMaxBankMoney(self):
return BankToMoney.get(self.furnitureType)
def getMaxClothes(self):
index = self.furnitureType % 10
if index == 0:
return 10
elif index == 2:
return 15
elif index == 4:
return 20
elif index == 6:
return 25
elif index == 8:
return 50
else:
return None
return None
def reachedPurchaseLimit(self, avatar):
if self.getFlags() & FLBank:
if self.getMaxBankMoney() <= avatar.getMaxBankMoney():
return 1
if self in avatar.onOrder or self in avatar.mailboxContents:
return 1
if self.getFlags() & FLCloset:
if self.getMaxClothes() <= avatar.getMaxClothes():
return 1
if self in avatar.onOrder or self in avatar.mailboxContents:
return 1
if self.getFlags() & FLTrunk:
if self.getMaxAccessories() <= avatar.getMaxAccessories():
return 1
if self in avatar.onOrder or self in avatar.mailboxContents:
return 1
return 0
def getTypeName(self):
flags = self.getFlags()
if flags & FLPainting:
return TTLocalizer.PaintingTypeName
else:
return TTLocalizer.FurnitureTypeName
def getName(self):
return TTLocalizer.FurnitureNames[self.furnitureType]
def getFlags(self):
defn = FurnitureTypes[self.furnitureType]
if FTFlags < len(defn):
flag = defn[FTFlags]
if flag == None:
return 0
else:
return flag
else:
return 0
return
def isGift(self):
if self.getEmblemPrices():
return 0
if self.getFlags() & (FLCloset | FLBank | FLTrunk):
return 0
else:
return 1
def recordPurchase(self, avatar, optional):
house, retcode = self.getHouseInfo(avatar)
self.giftTag = None
if retcode >= 0:
if self.getFlags() & FLCloset:
if avatar.getMaxClothes() > self.getMaxClothes():
return ToontownGlobals.P_AlreadyOwnBiggerCloset
avatar.b_setMaxClothes(self.getMaxClothes())
if self.getFlags() & FLTrunk:
avatar.b_setMaxAccessories(self.getMaxAccessories())
house.addAtticItem(self)
if self.getFlags() & FLBank:
avatar.b_setMaxBankMoney(self.getMaxBankMoney())
return retcode
def getDeliveryTime(self):
return 24 * 60
def getPicture(self, avatar):
model = self.loadModel()
spin = 1
flags = self.getFlags()
if flags & FLRug:
spin = 0
model.setP(90)
elif flags & FLPainting:
spin = 0
elif flags & FLBillboard:
spin = 0
model.setBin('unsorted', 0, 1)
self.hasPicture = True
return self.makeFrameModel(model, spin)
def output(self, store = -1):
return 'CatalogFurnitureItem(%s%s)' % (self.furnitureType, self.formatOptionalData(store))
def getFilename(self):
type = FurnitureTypes[self.furnitureType]
return type[FTModelName]
def compareTo(self, other):
return self.furnitureType - other.furnitureType
def getHashContents(self):
return self.furnitureType
def getSalePrice(self):
if self.furnitureType in [508, 518]:
return 50
else:
return CatalogItem.CatalogItem.getSalePrice(self)
def getBasePrice(self):
return FurnitureTypes[self.furnitureType][FTBasePrice]
def loadModel(self):
type = FurnitureTypes[self.furnitureType]
model = loader.loadModel(type[FTModelName])
self.applyColor(model, type[FTColor])
if type[FTColorOptions] != None:
if self.colorOption == None:
option = random.choice(type[FTColorOptions].values())
else:
option = type[FTColorOptions].get(self.colorOption)
self.applyColor(model, option)
if FTScale < len(type):
scale = type[FTScale]
if not scale == None:
model.setScale(scale)
model.flattenLight()
return model
def decodeDatagram(self, di, versionNumber, store):
CatalogAtticItem.CatalogAtticItem.decodeDatagram(self, di, versionNumber, store)
self.furnitureType = di.getInt16()
self.colorOption = None
type = FurnitureTypes[self.furnitureType]
if type[FTColorOptions]:
if store & CatalogItem.Customization:
self.colorOption = di.getUint8()
option = type[FTColorOptions][self.colorOption]
return
def encodeDatagram(self, dg, store):
CatalogAtticItem.CatalogAtticItem.encodeDatagram(self, dg, store)
dg.addInt16(self.furnitureType)
if FurnitureTypes[self.furnitureType][FTColorOptions]:
if store & CatalogItem.Customization:
dg.addUint8(self.colorOption)
def getAcceptItemErrorText(self, retcode):
if retcode == ToontownGlobals.P_AlreadyOwnBiggerCloset:
return TTLocalizer.CatalogAcceptClosetError
return CatalogAtticItem.CatalogAtticItem.getAcceptItemErrorText(self, retcode)
def nextAvailableCloset(avatar, duplicateItems):
if avatar.getStyle().getGender() == 'm':
index = 0
else:
index = 1
if not hasattr(avatar, 'maxClothes'):
return None
closetIds = ClothesToCloset.get(avatar.getMaxClothes())
closetIds = list(closetIds)
closetIds.sort()
closetId = closetIds[index]
if closetId == None or closetId == MaxClosetIds[index]:
return
closetId += 2
item = CatalogFurnitureItem(closetId)
while item in avatar.onOrder or item in avatar.mailboxContents:
closetId += 2
if closetId > MaxClosetIds[index]:
return
item = CatalogFurnitureItem(closetId)
return item
def get50ItemCloset(avatar, duplicateItems):
if avatar.getStyle().getGender() == 'm':
index = 0
else:
index = 1
closetId = MaxClosetIds[index]
item = CatalogFurnitureItem(closetId)
if item in avatar.onOrder or item in avatar.mailboxContents:
return None
return item
def getMaxClosets():
list = []
for closetId in MaxClosetIds:
list.append(CatalogFurnitureItem(closetId))
return list
def getAllClosets():
list = []
for closetId in ClosetToClothes.keys():
list.append(CatalogFurnitureItem(closetId))
return list
def get50ItemTrunk(avatar, duplicateItems):
if avatar.getStyle().getGender() == 'm':
index = 0
else:
index = 1
trunkId = MaxTrunkIds[index]
item = CatalogFurnitureItem(trunkId)
if item in avatar.onOrder or item in avatar.mailboxContents:
return None
return item
def getMaxTrunks():
list = []
for trunkId in MaxTrunkIds:
list.append(CatalogFurnitureItem(trunkId))
return list
def getAllFurnitures(index):
list = []
colors = FurnitureTypes[index][FTColorOptions]
for n in range(len(colors)):
list.append(CatalogFurnitureItem(index, n))
return list
| mit |
rickmendes/ansible-modules-extras | cloud/amazon/s3_logging.py | 65 | 5644 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: s3_logging
short_description: Manage logging facility of an s3 bucket in AWS
description:
- Manage logging facility of an s3 bucket in AWS
version_added: "2.0"
author: Rob White (@wimnat)
options:
name:
description:
- "Name of the s3 bucket."
required: true
state:
description:
- "Enable or disable logging."
required: false
default: present
choices: [ 'present', 'absent' ]
target_bucket:
description:
- "The bucket to log to. Required when state=present."
required: false
default: null
target_prefix:
description:
- "The prefix that should be prepended to the generated log files written to the target_bucket."
required: false
default: ""
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs
s3_logging:
name: mywebsite.com
target_bucket: mylogs
target_prefix: logs/mywebsite.com
state: present
- name: Remove logging on an s3 bucket
s3_logging:
name: mywebsite.com
state: absent
'''
try:
import boto.ec2
from boto.s3.connection import OrdinaryCallingFormat, Location
from boto.exception import BotoServerError, S3CreateError, S3ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def compare_bucket_logging(bucket, target_bucket, target_prefix):
bucket_log_obj = bucket.get_logging_status()
if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix:
return False
else:
return True
def enable_bucket_logging(connection, module):
bucket_name = module.params.get("name")
target_bucket = module.params.get("target_bucket")
target_prefix = module.params.get("target_prefix")
changed = False
try:
bucket = connection.get_bucket(bucket_name)
except S3ResponseError as e:
module.fail_json(msg=e.message)
try:
if not compare_bucket_logging(bucket, target_bucket, target_prefix):
# Before we can enable logging we must give the log-delivery group WRITE and READ_ACP permissions to the target bucket
try:
target_bucket_obj = connection.get_bucket(target_bucket)
except S3ResponseError as e:
if e.status == 301:
module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged")
else:
module.fail_json(msg=e.message)
target_bucket_obj.set_as_logging_target()
bucket.enable_logging(target_bucket, target_prefix)
changed = True
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def disable_bucket_logging(connection, module):
bucket_name = module.params.get("name")
changed = False
try:
bucket = connection.get_bucket(bucket_name)
if not compare_bucket_logging(bucket, None, None):
bucket.disable_logging()
changed = True
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name = dict(required=True),
target_bucket = dict(required=False, default=None),
target_prefix = dict(required=False, default=""),
state = dict(required=False, default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
try:
connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if connection is None:
connection = boto.connect_s3(**aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
state = module.params.get("state")
if state == 'present':
enable_bucket_logging(connection, module)
elif state == 'absent':
disable_bucket_logging(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
EKiefer/edge-starter | py34env/Lib/site-packages/django/views/generic/dates.py | 212 | 25790 | from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.http import Http404
from django.utils import timezone
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.views.generic.base import View
from django.views.generic.detail import (
BaseDetailView, SingleObjectTemplateResponseMixin,
)
from django.views.generic.list import (
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
class YearMixin(object):
"""
Mixin for views manipulating year-based data.
"""
year_format = '%Y'
year = None
def get_year_format(self):
"""
Get a year format string in strptime syntax to be used to parse the
year from url variables.
"""
return self.year_format
def get_year(self):
"""
Return the year for which this view should display data.
"""
year = self.year
if year is None:
try:
year = self.kwargs['year']
except KeyError:
try:
year = self.request.GET['year']
except KeyError:
raise Http404(_("No year specified"))
return year
def get_next_year(self, date):
"""
Get the next valid year.
"""
return _get_next_prev(self, date, is_previous=False, period='year')
def get_previous_year(self, date):
"""
Get the previous valid year.
"""
return _get_next_prev(self, date, is_previous=True, period='year')
def _get_next_year(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date.replace(year=date.year + 1, month=1, day=1)
def _get_current_year(self, date):
"""
Return the start date of the current interval.
"""
return date.replace(month=1, day=1)
class MonthMixin(object):
"""
Mixin for views manipulating month-based data.
"""
month_format = '%b'
month = None
def get_month_format(self):
"""
Get a month format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.month_format
def get_month(self):
"""
Return the month for which this view should display data.
"""
month = self.month
if month is None:
try:
month = self.kwargs['month']
except KeyError:
try:
month = self.request.GET['month']
except KeyError:
raise Http404(_("No month specified"))
return month
def get_next_month(self, date):
"""
Get the next valid month.
"""
return _get_next_prev(self, date, is_previous=False, period='month')
def get_previous_month(self, date):
"""
Get the previous valid month.
"""
return _get_next_prev(self, date, is_previous=True, period='month')
def _get_next_month(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
if date.month == 12:
return date.replace(year=date.year + 1, month=1, day=1)
else:
return date.replace(month=date.month + 1, day=1)
def _get_current_month(self, date):
"""
Return the start date of the previous interval.
"""
return date.replace(day=1)
class DayMixin(object):
"""
Mixin for views manipulating day-based data.
"""
day_format = '%d'
day = None
def get_day_format(self):
"""
Get a day format string in strptime syntax to be used to parse the day
from url variables.
"""
return self.day_format
def get_day(self):
"""
Return the day for which this view should display data.
"""
day = self.day
if day is None:
try:
day = self.kwargs['day']
except KeyError:
try:
day = self.request.GET['day']
except KeyError:
raise Http404(_("No day specified"))
return day
def get_next_day(self, date):
"""
Get the next valid day.
"""
return _get_next_prev(self, date, is_previous=False, period='day')
def get_previous_day(self, date):
"""
Get the previous valid day.
"""
return _get_next_prev(self, date, is_previous=True, period='day')
def _get_next_day(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date + datetime.timedelta(days=1)
def _get_current_day(self, date):
"""
Return the start date of the current interval.
"""
return date
class WeekMixin(object):
"""
Mixin for views manipulating week-based data.
"""
week_format = '%U'
week = None
def get_week_format(self):
"""
Get a week format string in strptime syntax to be used to parse the
week from url variables.
"""
return self.week_format
def get_week(self):
"""
Return the week for which this view should display data
"""
week = self.week
if week is None:
try:
week = self.kwargs['week']
except KeyError:
try:
week = self.request.GET['week']
except KeyError:
raise Http404(_("No week specified"))
return week
def get_next_week(self, date):
"""
Get the next valid week.
"""
return _get_next_prev(self, date, is_previous=False, period='week')
def get_previous_week(self, date):
"""
Get the previous valid week.
"""
return _get_next_prev(self, date, is_previous=True, period='week')
def _get_next_week(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date + datetime.timedelta(days=7 - self._get_weekday(date))
def _get_current_week(self, date):
"""
Return the start date of the current interval.
"""
return date - datetime.timedelta(self._get_weekday(date))
def _get_weekday(self, date):
"""
Return the weekday for a given date.
The first day according to the week format is 0 and the last day is 6.
"""
week_format = self.get_week_format()
if week_format == '%W': # week starts on Monday
return date.weekday()
elif week_format == '%U': # week starts on Sunday
return (date.weekday() + 1) % 7
else:
raise ValueError("unknown week format: %s" % week_format)
class DateMixin(object):
"""
Mixin class for views manipulating date-based data.
"""
date_field = None
allow_future = False
def get_date_field(self):
"""
Get the name of the date field to be used to filter by.
"""
if self.date_field is None:
raise ImproperlyConfigured("%s.date_field is required." % self.__class__.__name__)
return self.date_field
def get_allow_future(self):
"""
Returns `True` if the view should be allowed to display objects from
the future.
"""
return self.allow_future
# Note: the following three methods only work in subclasses that also
# inherit SingleObjectMixin or MultipleObjectMixin.
@cached_property
def uses_datetime_field(self):
"""
Return `True` if the date field is a `DateTimeField` and `False`
if it's a `DateField`.
"""
model = self.get_queryset().model if self.model is None else self.model
field = model._meta.get_field(self.get_date_field())
return isinstance(field, models.DateTimeField)
def _make_date_lookup_arg(self, value):
"""
Convert a date into a datetime when the date field is a DateTimeField.
When time zone support is enabled, `date` is assumed to be in the
current time zone, so that displayed items are consistent with the URL.
"""
if self.uses_datetime_field:
value = datetime.datetime.combine(value, datetime.time.min)
if settings.USE_TZ:
value = timezone.make_aware(value, timezone.get_current_timezone())
return value
def _make_single_date_lookup(self, date):
"""
Get the lookup kwargs for filtering on a single date.
If the date field is a DateTimeField, we can't just filter on
date_field=date because that doesn't take the time into account.
"""
date_field = self.get_date_field()
if self.uses_datetime_field:
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(date + datetime.timedelta(days=1))
return {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
else:
# Skip self._make_date_lookup_arg, it's a no-op in this branch.
return {date_field: date}
class BaseDateListView(MultipleObjectMixin, DateMixin, View):
"""
Abstract base class for date-based views displaying a list of objects.
"""
allow_empty = False
date_list_period = 'year'
def get(self, request, *args, **kwargs):
self.date_list, self.object_list, extra_context = self.get_dated_items()
context = self.get_context_data(object_list=self.object_list,
date_list=self.date_list)
context.update(extra_context)
return self.render_to_response(context)
def get_dated_items(self):
"""
Obtain the list of dates and items.
"""
raise NotImplementedError('A DateView must provide an implementation of get_dated_items()')
def get_ordering(self):
"""
Returns the field or fields to use for ordering the queryset; uses the
date field by default.
"""
return '-%s' % self.get_date_field() if self.ordering is None else self.ordering
def get_dated_queryset(self, **lookup):
"""
Get a queryset properly filtered according to `allow_future` and any
extra lookup kwargs.
"""
qs = self.get_queryset().filter(**lookup)
date_field = self.get_date_field()
allow_future = self.get_allow_future()
allow_empty = self.get_allow_empty()
paginate_by = self.get_paginate_by(qs)
if not allow_future:
now = timezone.now() if self.uses_datetime_field else timezone_today()
qs = qs.filter(**{'%s__lte' % date_field: now})
if not allow_empty:
# When pagination is enabled, it's better to do a cheap query
# than to load the unpaginated queryset in memory.
is_empty = len(qs) == 0 if paginate_by is None else not qs.exists()
if is_empty:
raise Http404(_("No %(verbose_name_plural)s available") % {
'verbose_name_plural': force_text(qs.model._meta.verbose_name_plural)
})
return qs
def get_date_list_period(self):
"""
Get the aggregation period for the list of dates: 'year', 'month', or 'day'.
"""
return self.date_list_period
def get_date_list(self, queryset, date_type=None, ordering='ASC'):
"""
Get a date list by calling `queryset.dates/datetimes()`, checking
along the way for empty lists that aren't allowed.
"""
date_field = self.get_date_field()
allow_empty = self.get_allow_empty()
if date_type is None:
date_type = self.get_date_list_period()
if self.uses_datetime_field:
date_list = queryset.datetimes(date_field, date_type, ordering)
else:
date_list = queryset.dates(date_field, date_type, ordering)
if date_list is not None and not date_list and not allow_empty:
name = force_text(queryset.model._meta.verbose_name_plural)
raise Http404(_("No %(verbose_name_plural)s available") %
{'verbose_name_plural': name})
return date_list
class BaseArchiveIndexView(BaseDateListView):
"""
Base class for archives of date-based items.
Requires a response mixin.
"""
context_object_name = 'latest'
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
qs = self.get_dated_queryset()
date_list = self.get_date_list(qs, ordering='DESC')
if not date_list:
qs = qs.none()
return (date_list, qs, {})
class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView):
"""
Top-level archive of date-based items.
"""
template_name_suffix = '_archive'
class BaseYearArchiveView(YearMixin, BaseDateListView):
"""
List of objects published in a given year.
"""
date_list_period = 'month'
make_object_list = False
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format())
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_year(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs)
if not self.get_make_object_list():
# We need this to be a queryset since parent classes introspect it
# to find information about the model.
qs = qs.none()
return (date_list, qs, {
'year': date,
'next_year': self.get_next_year(date),
'previous_year': self.get_previous_year(date),
})
def get_make_object_list(self):
"""
Return `True` if this view should contain the full list of objects in
the given year.
"""
return self.make_object_list
class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView):
"""
List of objects published in a given year.
"""
template_name_suffix = '_archive_year'
class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView):
"""
List of objects published in a given month.
"""
date_list_period = 'day'
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
month = self.get_month()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format())
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_month(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs)
return (date_list, qs, {
'month': date,
'next_month': self.get_next_month(date),
'previous_month': self.get_previous_month(date),
})
class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView):
"""
List of objects published in a given month.
"""
template_name_suffix = '_archive_month'
class BaseWeekArchiveView(YearMixin, WeekMixin, BaseDateListView):
"""
List of objects published in a given week.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
week = self.get_week()
date_field = self.get_date_field()
week_format = self.get_week_format()
week_start = {
'%W': '1',
'%U': '0',
}[week_format]
date = _date_from_string(year, self.get_year_format(),
week_start, '%w',
week, week_format)
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_week(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'week': date,
'next_week': self.get_next_week(date),
'previous_week': self.get_previous_week(date),
})
class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView):
"""
List of objects published in a given week.
"""
template_name_suffix = '_archive_week'
class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView):
"""
List of objects published on a given day.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
return self._get_dated_items(date)
def _get_dated_items(self, date):
"""
Do the actual heavy lifting of getting the dated items; this accepts a
date object so that TodayArchiveView can be trivial.
"""
lookup_kwargs = self._make_single_date_lookup(date)
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'day': date,
'previous_day': self.get_previous_day(date),
'next_day': self.get_next_day(date),
'previous_month': self.get_previous_month(date),
'next_month': self.get_next_month(date)
})
class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView):
"""
List of objects published on a given day.
"""
template_name_suffix = "_archive_day"
class BaseTodayArchiveView(BaseDayArchiveView):
"""
List of objects published today.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
return self._get_dated_items(datetime.date.today())
class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView):
"""
List of objects published today.
"""
template_name_suffix = "_archive_day"
class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
def get_object(self, queryset=None):
"""
Get the object this request displays.
"""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
# Use a custom queryset if provided
qs = self.get_queryset() if queryset is None else queryset
if not self.get_allow_future() and date > datetime.date.today():
raise Http404(_(
"Future %(verbose_name_plural)s not available because "
"%(class_name)s.allow_future is False.") % {
'verbose_name_plural': qs.model._meta.verbose_name_plural,
'class_name': self.__class__.__name__,
},
)
# Filter down a queryset from self.queryset using the date from the
# URL. This'll get passed as the queryset to DetailView.get_object,
# which'll handle the 404
lookup_kwargs = self._make_single_date_lookup(date)
qs = qs.filter(**lookup_kwargs)
return super(BaseDetailView, self).get_object(queryset=qs)
class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
template_name_suffix = '_detail'
def _date_from_string(year, year_format, month='', month_format='', day='', day_format='', delim='__'):
"""
Helper: get a datetime.date object given a format string and a year,
month, and day (only year is mandatory). Raise a 404 for an invalid date.
"""
format = delim.join((year_format, month_format, day_format))
datestr = delim.join((year, month, day))
try:
return datetime.datetime.strptime(force_str(datestr), format).date()
except ValueError:
raise Http404(_("Invalid date string '%(datestr)s' given format '%(format)s'") % {
'datestr': datestr,
'format': format,
})
def _get_next_prev(generic_view, date, is_previous, period):
"""
Helper: Get the next or the previous valid date. The idea is to allow
links on month/day views to never be 404s by never providing a date
that'll be invalid for the given view.
This is a bit complicated since it handles different intervals of time,
hence the coupling to generic_view.
However in essence the logic comes down to:
* If allow_empty and allow_future are both true, this is easy: just
return the naive result (just the next/previous day/week/month,
regardless of object existence.)
* If allow_empty is true, allow_future is false, and the naive result
isn't in the future, then return it; otherwise return None.
* If allow_empty is false and allow_future is true, return the next
date *that contains a valid object*, even if it's in the future. If
there are no next objects, return None.
* If allow_empty is false and allow_future is false, return the next
date that contains a valid object. If that date is in the future, or
if there are no next objects, return None.
"""
date_field = generic_view.get_date_field()
allow_empty = generic_view.get_allow_empty()
allow_future = generic_view.get_allow_future()
get_current = getattr(generic_view, '_get_current_%s' % period)
get_next = getattr(generic_view, '_get_next_%s' % period)
# Bounds of the current interval
start, end = get_current(date), get_next(date)
# If allow_empty is True, the naive result will be valid
if allow_empty:
if is_previous:
result = get_current(start - datetime.timedelta(days=1))
else:
result = end
if allow_future or result <= timezone_today():
return result
else:
return None
# Otherwise, we'll need to go to the database to look for an object
# whose date_field is at least (greater than/less than) the given
# naive result
else:
# Construct a lookup and an ordering depending on whether we're doing
# a previous date or a next date lookup.
if is_previous:
lookup = {'%s__lt' % date_field: generic_view._make_date_lookup_arg(start)}
ordering = '-%s' % date_field
else:
lookup = {'%s__gte' % date_field: generic_view._make_date_lookup_arg(end)}
ordering = date_field
# Filter out objects in the future if appropriate.
if not allow_future:
# Fortunately, to match the implementation of allow_future,
# we need __lte, which doesn't conflict with __lt above.
if generic_view.uses_datetime_field:
now = timezone.now()
else:
now = timezone_today()
lookup['%s__lte' % date_field] = now
qs = generic_view.get_queryset().filter(**lookup).order_by(ordering)
# Snag the first object from the queryset; if it doesn't exist that
# means there's no next/previous link available.
try:
result = getattr(qs[0], date_field)
except IndexError:
return None
# Convert datetimes to dates in the current time zone.
if generic_view.uses_datetime_field:
if settings.USE_TZ:
result = timezone.localtime(result)
result = result.date()
# Return the first day of the period.
return get_current(result)
def timezone_today():
"""
Return the current date in the current time zone.
"""
if settings.USE_TZ:
return timezone.localtime(timezone.now()).date()
else:
return datetime.date.today()
| mit |
microelly2/cadquery-freecad-module | CadQuery/Libs/pyflakes/messages.py | 42 | 3808 | """
Provide the class Message and its subclasses.
"""
class Message(object):
message = ''
message_args = ()
def __init__(self, filename, loc):
self.filename = filename
self.lineno = loc.lineno
self.col = getattr(loc, 'col_offset', 0)
def __str__(self):
return '%s:%s: %s' % (self.filename, self.lineno,
self.message % self.message_args)
class UnusedImport(Message):
message = '%r imported but unused'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class RedefinedWhileUnused(Message):
message = 'redefinition of unused %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class RedefinedInListComp(Message):
message = 'list comprehension redefines %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class ImportShadowedByLoopVar(Message):
message = 'import %r from line %r shadowed by loop variable'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class ImportStarUsed(Message):
message = "'from %s import *' used; unable to detect undefined names"
def __init__(self, filename, loc, modname):
Message.__init__(self, filename, loc)
self.message_args = (modname,)
class UndefinedName(Message):
message = 'undefined name %r'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class DoctestSyntaxError(Message):
message = 'syntax error in doctest'
def __init__(self, filename, loc, position=None):
Message.__init__(self, filename, loc)
if position:
(self.lineno, self.col) = position
self.message_args = ()
class UndefinedExport(Message):
message = 'undefined name %r in __all__'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class UndefinedLocal(Message):
message = ('local variable %r (defined in enclosing scope on line %r) '
'referenced before assignment')
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class DuplicateArgument(Message):
message = 'duplicate argument %r in function definition'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class Redefined(Message):
message = 'redefinition of %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class LateFutureImport(Message):
message = 'future import(s) %r after other statements'
def __init__(self, filename, loc, names):
Message.__init__(self, filename, loc)
self.message_args = (names,)
class UnusedVariable(Message):
"""
Indicates that a variable has been explicity assigned to but not actually
used.
"""
message = 'local variable %r is assigned to but never used'
def __init__(self, filename, loc, names):
Message.__init__(self, filename, loc)
self.message_args = (names,)
class ReturnWithArgsInsideGenerator(Message):
"""
Indicates a return statement with arguments inside a generator.
"""
message = '\'return\' with argument inside generator'
| lgpl-3.0 |
MingdaZhou/gnuradio | gr-digital/python/digital/qa_lfsr.py | 57 | 1377 | #!/usr/bin/env python
#
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
from gnuradio import gr, gr_unittest, digital
class test_lfsr(gr_unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_lfsr_001(self):
reglen = 8
l = digital.lfsr(1, 1, reglen)
result_data = []
for i in xrange(4*(reglen+1)):
result_data.append(l.next_bit())
expected_result = 4*([1,] + reglen*[0,])
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 5)
if __name__ == '__main__':
gr_unittest.run(test_lfsr, "test_lfsr.xml")
| gpl-3.0 |
imbstack/pushmanager | tests/test_core_util.py | 6 | 5332 | #!/usr/bin/python
import copy
import datetime
import testing as T
from core.util import EscapedDict
from core.util import get_servlet_urlspec
from core.util import pretty_date
from core.util import add_to_tags_str
from core.util import del_from_tags_str
from core.util import tags_contain
from core.util import tags_str_as_set
from core.util import dict_copy_keys
from servlets.pushes import PushesServlet
class CoreUtilFunctionsTest(T.TestCase):
def test_pretty_date(self):
now = datetime.datetime.now()
five_minutes_ago = now - datetime.timedelta(minutes=5)
seven_days_ago = now - datetime.timedelta(days=7)
one_week_ago = now - datetime.timedelta(weeks=1)
one_year_ago = now - datetime.timedelta(days=370)
one_year_four_months_ago = now - datetime.timedelta(days=500)
five_years_ago = now - datetime.timedelta(days=365 * 5)
T.assert_equal(pretty_date(five_minutes_ago), "5 minutes ago")
T.assert_equal(pretty_date(one_week_ago), "1 week ago")
T.assert_equal(pretty_date(seven_days_ago), pretty_date(one_week_ago))
T.assert_equal(pretty_date(one_year_ago), "1 year ago")
T.assert_equal(pretty_date(one_year_four_months_ago), "1 year, 4 months ago")
T.assert_equal(pretty_date(five_years_ago), "5 years ago")
def test_servlet_urlspec(self):
T.assert_equal(get_servlet_urlspec(PushesServlet), (r"/pushes", PushesServlet))
def test_tags_str_as_set(self):
T.assert_equal(set(), tags_str_as_set(""))
T.assert_equal(set(), tags_str_as_set(","))
T.assert_equal(set(["A"]), tags_str_as_set("A"))
T.assert_equal(set(["A", "B"]), tags_str_as_set("A,B"))
T.assert_equal(set(["A", "B", "C"]), tags_str_as_set("A,B,C"))
T.assert_equal(set(["A", "B", "C"]), tags_str_as_set(" A ,B , C "))
def test_add_to_tags_str(self):
T.assert_equal("A", add_to_tags_str("", "A"))
T.assert_equal("A", add_to_tags_str("A", "A"))
T.assert_equal("A,B", add_to_tags_str("A", "B"))
T.assert_equal("A,B,C", add_to_tags_str("A,B", "C"))
T.assert_equal("A,B,C", add_to_tags_str("A,B,C", "C"))
T.assert_equal("A,B,C", add_to_tags_str("A", "A,B,C"))
T.assert_equal("A,B", add_to_tags_str("A", "B"))
T.assert_equal("A,B,C", add_to_tags_str("B,A", "C"))
T.assert_equal("A,B,C", add_to_tags_str("A,C,B", "C"))
T.assert_equal("A,B,C", add_to_tags_str("A", "B,A,C"))
def test_del_from_tags_str(self):
T.assert_equal("", del_from_tags_str("A", "A"))
T.assert_equal("A", del_from_tags_str("A", "C"))
T.assert_equal("A", del_from_tags_str("A", "B,C"))
T.assert_equal("A,B", del_from_tags_str("A,B", "C"))
T.assert_equal("A,B", del_from_tags_str("A,B,C", "C"))
T.assert_equal("A,B", del_from_tags_str("A,C,B", "C"))
T.assert_equal("B", del_from_tags_str("A,C,B", "C,A"))
T.assert_equal("B,C", del_from_tags_str("A,C,B", "A,A,A"))
T.assert_equal("B", del_from_tags_str("A, C , B", " C ,A"))
def test_tags_contains(self):
T.assert_raises(TypeError, tags_contain, "A,B,C", None)
T.assert_raises(AttributeError, tags_contain, None, [])
T.assert_raises(AttributeError, tags_contain, None, None)
T.assert_equal(tags_contain("", []), False)
T.assert_equal(tags_contain("A,B,C", []), False)
T.assert_equal(tags_contain("A,B,C", ["D"]), False)
T.assert_equal(tags_contain("A", ["A"]), True)
T.assert_equal(tags_contain("A,B,C", ["A"]), True)
T.assert_equal(tags_contain("A,B,C", ["B"]), True)
T.assert_equal(tags_contain("A,B,C", ["C"]), True)
T.assert_equal(tags_contain("A,B,C", ["A", "C"]), True)
T.assert_equal(tags_contain("A,B,C", ["A", "B"]), True)
T.assert_equal(tags_contain("A,B,C", ["A", "B", "C"]), True)
def test_dict_copy_keys(self):
from_dict = {
'a': 'lala',
'b': 10,
'c': {
'x': 1,
'y': 2,
},
(1, 2): 11,
}
to_dict = {
(1, 2): None,
'a': None,
'c': {'x': None}
}
orig_to_dict = copy.deepcopy(to_dict)
dict_copy_keys(to_dict, from_dict)
T.assert_equal(sorted(to_dict.keys()), sorted(orig_to_dict))
T.assert_equal(to_dict['a'], from_dict['a'])
T.assert_equal(to_dict[(1, 2)], from_dict[(1, 2)])
T.assert_equal(to_dict['c']['x'], from_dict['c']['x'])
T.assert_equal(to_dict['c'].get('y', None), None)
class CoreUtilEscapedDictTest(T.TestCase):
@T.class_setup
def setup_dictionary(self):
self.d = {
"amp": "Music & Fun!",
"gt": "A is greater than B ( A > B )"
}
self.ed = EscapedDict(self.d)
self.escaped = {
"amp": "Music & Fun!",
"gt": "A is greater than B ( A > B )"
}
def test_escape(self):
T.assert_equal(
[k for k in self.d if self.ed[k] != self.escaped[k]],
[],
"EscapedDict values doesn't match with pre-computed valued"
)
T.assert_in("&", self.ed['amp'])
T.assert_not_in(">", self.ed['gt'])
| apache-2.0 |
alexanderturner/ansible | lib/ansible/module_utils/cloud.py | 119 | 3974 | #
# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
This module adds shared support for generic cloud modules
In order to use this module, include it as part of a custom
module as shown below.
from ansible.module_utils.cloud import *
The 'cloud' module provides the following common classes:
* CloudRetry
- The base class to be used by other cloud providers, in order to
provide a backoff/retry decorator based on status codes.
- Example using the AWSRetry class which inherits from CloudRetry.
@AWSRetry.retry(tries=20, delay=2, backoff=2)
get_ec2_security_group_ids_from_names()
"""
from functools import wraps
import syslog
import time
from ansible.module_utils.pycompat24 import get_exception
class CloudRetry(object):
""" CloudRetry can be used by any cloud provider, in order to implement a
backoff algorithm/retry effect based on Status Code from Exceptions.
"""
# This is the base class of the exception.
# AWS Example botocore.exceptions.ClientError
base_class = None
@staticmethod
def status_code_from_exception(error):
""" Return the status code from the exception object
Args:
error (object): The exception itself.
"""
pass
@staticmethod
def found(response_code):
""" Return True if the Response Code to retry on was found.
Args:
response_code (str): This is the Response Code that is being matched against.
"""
pass
@classmethod
def backoff(cls, tries=10, delay=3, backoff=1.1):
""" Retry calling the Cloud decorated function using an exponential backoff.
Kwargs:
tries (int): Number of times to try (not retry) before giving up
default=10
delay (int): Initial delay between retries in seconds
default=3
backoff (int): backoff multiplier e.g. value of 2 will double the delay each retry
default=2
"""
def deco(f):
@wraps(f)
def retry_func(*args, **kwargs):
max_tries, max_delay = tries, delay
while max_tries > 1:
try:
return f(*args, **kwargs)
except Exception:
e = get_exception()
if isinstance(e, cls.base_class):
response_code = cls.status_code_from_exception(e)
if cls.found(response_code):
msg = "{0}: Retrying in {1} seconds...".format(str(e), max_delay)
syslog.syslog(syslog.LOG_INFO, msg)
time.sleep(max_delay)
max_tries -= 1
max_delay *= backoff
else:
# Return original exception if exception is not a ClientError
raise e
else:
# Return original exception if exception is not a ClientError
raise e
return f(*args, **kwargs)
return retry_func # true decorator
return deco
| gpl-3.0 |
messense/shadowsocks | utils/autoban.py | 1033 | 2156 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='See README')
parser.add_argument('-c', '--count', default=3, type=int,
help='with how many failure times it should be '
'considered as an attack')
config = parser.parse_args()
ips = {}
banned = set()
for line in sys.stdin:
if 'can not parse header when' in line:
ip = line.split()[-1].split(':')[0]
if ip not in ips:
ips[ip] = 1
print(ip)
sys.stdout.flush()
else:
ips[ip] += 1
if ip not in banned and ips[ip] >= config.count:
banned.add(ip)
cmd = 'iptables -A INPUT -s %s -j DROP' % ip
print(cmd, file=sys.stderr)
sys.stderr.flush()
os.system(cmd)
| apache-2.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/IPython/testing/globalipapp.py | 9 | 5151 | """Global IPython app to support test running.
We must start our own ipython object and heavily muck with it so that all the
modifications IPython makes to system behavior don't send the doctest machinery
into a fit. This code should be considered a gross hack, but it gets the job
done.
"""
from __future__ import absolute_import
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import os
import sys
# our own
from . import tools
from IPython.core import page
from IPython.utils import io
from IPython.utils import py3compat
from IPython.utils.py3compat import builtin_mod
from IPython.terminal.interactiveshell import TerminalInteractiveShell
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
class StreamProxy(io.IOStream):
"""Proxy for sys.stdout/err. This will request the stream *at call time*
allowing for nose's Capture plugin's redirection of sys.stdout/err.
Parameters
----------
name : str
The name of the stream. This will be requested anew at every call
"""
def __init__(self, name):
self.name=name
@property
def stream(self):
return getattr(sys, self.name)
def flush(self):
self.stream.flush()
def get_ipython():
# This will get replaced by the real thing once we start IPython below
return start_ipython()
# A couple of methods to override those in the running IPython to interact
# better with doctest (doctest captures on raw stdout, so we need to direct
# various types of output there otherwise it will miss them).
def xsys(self, cmd):
"""Replace the default system call with a capturing one for doctest.
"""
# We use getoutput, but we need to strip it because pexpect captures
# the trailing newline differently from commands.getoutput
print(self.getoutput(cmd, split=False, depth=1).rstrip(), end='', file=sys.stdout)
sys.stdout.flush()
def _showtraceback(self, etype, evalue, stb):
"""Print the traceback purely on stdout for doctest to capture it.
"""
print(self.InteractiveTB.stb2text(stb), file=sys.stdout)
def start_ipython():
"""Start a global IPython shell, which we need for IPython-specific syntax.
"""
global get_ipython
# This function should only ever run once!
if hasattr(start_ipython, 'already_called'):
return
start_ipython.already_called = True
# Store certain global objects that IPython modifies
_displayhook = sys.displayhook
_excepthook = sys.excepthook
_main = sys.modules.get('__main__')
# Create custom argv and namespaces for our IPython to be test-friendly
config = tools.default_config()
# Create and initialize our test-friendly IPython instance.
shell = TerminalInteractiveShell.instance(config=config,
)
# A few more tweaks needed for playing nicely with doctests...
# remove history file
shell.tempfiles.append(config.HistoryManager.hist_file)
# These traps are normally only active for interactive use, set them
# permanently since we'll be mocking interactive sessions.
shell.builtin_trap.activate()
# Modify the IPython system call with one that uses getoutput, so that we
# can capture subcommands and print them to Python's stdout, otherwise the
# doctest machinery would miss them.
shell.system = py3compat.MethodType(xsys, shell)
shell._showtraceback = py3compat.MethodType(_showtraceback, shell)
# IPython is ready, now clean up some global state...
# Deactivate the various python system hooks added by ipython for
# interactive convenience so we don't confuse the doctest system
sys.modules['__main__'] = _main
sys.displayhook = _displayhook
sys.excepthook = _excepthook
# So that ipython magics and aliases can be doctested (they work by making
# a call into a global _ip object). Also make the top-level get_ipython
# now return this without recursively calling here again.
_ip = shell
get_ipython = _ip.get_ipython
builtin_mod._ip = _ip
builtin_mod.get_ipython = get_ipython
# To avoid extra IPython messages during testing, suppress io.stdout/stderr
io.stdout = StreamProxy('stdout')
io.stderr = StreamProxy('stderr')
# Override paging, so we don't require user interaction during the tests.
def nopage(strng, start=0, screen_lines=0, pager_cmd=None):
print(strng)
page.orig_page = page.pager_page
page.pager_page = nopage
return _ip
| artistic-2.0 |
uelei/api_cluster | tests/test_utils.py | 1 | 4153 | from datetime import datetime
from back.utils import parse_float, parse_int, parse_number, parse_string, parse_date, parse_list, preprocess_date, \
replace_field_space, check_if_list, convert_list, get_key_value, process_data
def test_parse_float():
value = parse_float("10.2")
assert value == 10.2
value = parse_float("-810.2")
assert value == -810.2
value = parse_float("2")
assert value == 2
value = parse_float("ASD10.2")
assert value == 10.2
value = parse_float("-810.2DASDA")
assert value == -810.2
value = parse_float("dasas2das")
assert value == 2
value = parse_float("dasas11.2das1")
assert value == 11.2
def test_parse_int():
value = parse_int("10")
assert value == 10
value = parse_int("-810")
assert value == -810
value = parse_int("ASD102")
assert value == 102
value = parse_int("-8102DASDA")
assert value == -8102
value = parse_int("dasas2das")
assert value == 2
value = parse_int("dasas11das1")
assert value == 11
def test_parse_number():
value = parse_number("10.2")
assert value == '10.2'
value = parse_number("-810.2")
assert value == '-810.2'
value = parse_number("2")
assert value == '2'
value = parse_number("ASD10.2")
assert value == '10.2'
value = parse_number("-810.2DASDA")
assert value == '-810.2'
value = parse_number("dasas2das")
assert value == '2'
value = parse_number("dasas11.2das1")
assert value == '11.2'
def test_parse_string():
value = parse_string("10.2")
assert value == "10.2"
def test_parse_date():
value = parse_date("2017-12-11T12:11:22")
assert value == datetime(2017, 12, 11, 12, 11, 22)
def test_parse_list():
value = parse_list(["1", "2"])
assert value == '["1", "2"]'
def test_preprocess_date():
value = preprocess_date("UTC_Time: 2016-10-4 16:47:50;")
assert value == "UTC_Time: 2016-10-4_16__47__50;"
def test_replace_field_space():
value = replace_field_space("UTC Time FFT Re FFT Img WiFi Strength")
assert value == 'UTC_Time FFT_Re FFT_Img WiFi_Strength'
def test_check_if_list():
assert check_if_list("q;t;q")
assert not check_if_list("1;")
def test_convert_list():
value = convert_list("3;4;1;2;-43")
assert value == ["3", "4", "1", "2", "-43"]
def test_get_key_value():
value = get_key_value("ID=12;")
assert value == ("ID", "12")
value = get_key_value("123")
assert value == ("", "123")
def test_process_data():
text = """Device: ID=1; Fw=16071801; Evt=1; Alarms: CoilRevesed=OFF; Power: Active=1832W; Reactive=279var; Appearent=403VA; Line: Current=7.50400019; Voltage=230.08V; Phase=-43,841rad; Peaks: 14.3940001;14.420999499999999;14.46;14.505999599999999;14.1499996;13.925999599999999;13.397999800000003;13.0539999;13.020999900000001;13.074000400000001; FFT Re: 10263;13;145;-13;943;-19;798;0;237; FFT Img: 1465;6;-818;13;1115;6;706;19;699; UTC Time: 2016-10-4 16:47:50; hz: 49.87; WiFi Strength: -62; Dummy: 20"""
value = process_data(text)
print(value)
assert value == {
'WiFi_Strength': '-62',
'DeviceID': '1',
'hz': '49.87',
'DeviceFw': '16071801',
'UTC_Time': '2016-10-04T16:47:50',
'PowerActive': '1832W',
'DeviceEvt': '1',
'PowerAppearent': '403VA',
'PowerReactive': '279var',
'Dummy': '20',
'LineVoltage': '230.08V',
'AlarmsCoilRevesed': 'OFF',
'Peaks': ['14.3940001',
'14.420999499999999',
'14.46',
'14.505999599999999',
'14.1499996',
'13.925999599999999',
'13.397999800000003',
'13.0539999',
'13.020999900000001',
'13.074000400000001'],
'LineCurrent': '7.50400019',
'LinePhase': '-43,841rad',
'FFT_Img': [
'1465', '6', '-818', '13', '1115', '6', '706', '19', '699'],
'FFT_Re': ['10263', '13', '145', '-13', '943', '-19', '798', '0', '237']
}
| mit |
napalm-automation/napalm-nxos | test/unit_ssh/conftest.py | 1 | 1648 | """Test fixtures."""
from builtins import super
import pytest
from napalm_base.test import conftest as parent_conftest
from napalm_base.test.double import BaseTestDouble
from napalm_base.utils import py23_compat
from napalm_nxos_ssh import nxos_ssh
@pytest.fixture(scope='class')
def set_device_parameters(request):
"""Set up the class."""
def fin():
request.cls.device.close()
request.addfinalizer(fin)
request.cls.driver = nxos_ssh.NXOSSSHDriver
request.cls.patched_driver = PatchedNXOSSSHDriver
request.cls.vendor = 'nxos_ssh'
parent_conftest.set_device_parameters(request)
def pytest_generate_tests(metafunc):
"""Generate test cases dynamically."""
parent_conftest.pytest_generate_tests(metafunc, __file__)
class PatchedNXOSSSHDriver(nxos_ssh.NXOSSSHDriver):
"""Patched NXOS Driver."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
super().__init__(hostname, username, password, timeout, optional_args)
self.patched_attrs = ['device']
self.device = FakeNXOSSSHDevice()
def disconnect(self):
pass
def is_alive(self):
return {
'is_alive': True # In testing everything works..
}
def open(self):
pass
class FakeNXOSSSHDevice(BaseTestDouble):
"""NXOS device test double."""
def send_command(self, command, **kwargs):
filename = '{}.txt'.format(self.sanitize_text(command))
full_path = self.find_file(filename)
result = self.read_txt_file(full_path)
return py23_compat.text_type(result)
def disconnect(self):
pass
| apache-2.0 |
lduarte1991/edx-platform | openedx/core/djangoapps/dark_lang/migrations/0001_initial.py | 86 | 1203 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DarkLangConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('released_languages', models.TextField(help_text=b'A comma-separated list of language codes to release to the public.', blank=True)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
| agpl-3.0 |
OpenLD/enigma2-wetek | tools/genmetaindex.py | 155 | 1104 | # usage: genmetaindex.py <xml-files> > index.xml
import sys, os
from xml.etree.ElementTree import ElementTree, Element
root = Element("index")
for file in sys.argv[1:]:
p = ElementTree()
p.parse(file)
package = Element("package")
package.set("details", os.path.basename(file))
# we need all prerequisites
package.append(p.find("prerequisites"))
info = None
# we need some of the info, but not all
for i in p.findall("info"):
if not info:
info = i
assert info
for i in info[:]:
if i.tag not in ["name", "packagename", "packagetype", "shortdescription"]:
info.remove(i)
for i in info[:]:
package.set(i.tag, i.text)
root.append(package)
def indent(elem, level=0):
i = "\n" + level*"\t"
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + "\t"
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
indent(root)
ElementTree(root).write(sys.stdout)
| gpl-2.0 |
wimoverwater/Sick-Beard | sickbeard/encodingKludge.py | 1 | 1282 | import os
import os.path
from sickbeard import logger
# This module tries to deal with the apparently random behavior of python when dealing with unicode <-> utf-8
# encodings. It tries to just use unicode, but if that fails then it tries forcing it to utf-8. Any functions
# which return something should always return unicode.
def fixStupidEncodings(x):
if type(x) == str:
try:
return x.decode('utf-8')
except UnicodeDecodeError:
logger.log(u"Unable to decode value: "+str(repr(x)), logger.ERROR)
return None
elif type(x) == unicode:
return x
else:
logger.log(u"Unknown value passed in, ignoring it: "+str(type(x)), logger.ERROR)
return None
return None
def fixListEncodings(x):
if type(x) != list:
return x
else:
return filter(lambda x: x != None, map(fixStupidEncodings, x))
def ek(func, *args):
result = None
if os.name == 'nt':
result = func(*args)
else:
result = func(*[x.encode('UTF-8') for x in args])
if type(result) == list:
return fixListEncodings(result)
elif type(result) == str:
return fixStupidEncodings(result)
else:
return result
| gpl-3.0 |
rfguri/vimfiles | bundle/ycm/third_party/ycmd/third_party/JediHTTP/vendor/jedi/jedi/debug.py | 1 | 3648 | from jedi._compatibility import encoding, is_py3, u
import inspect
import os
import time
def _lazy_colorama_init():
"""
Lazily init colorama if necessary, not to screw up stdout is debug not
enabled.
This version of the function does nothing.
"""
pass
_inited=False
try:
if os.name == 'nt':
# Does not work on Windows, as pyreadline and colorama interfere
raise ImportError
else:
# Use colorama for nicer console output.
from colorama import Fore, init
from colorama import initialise
def _lazy_colorama_init():
"""
Lazily init colorama if necessary, not to screw up stdout is
debug not enabled.
This version of the function does init colorama.
"""
global _inited
if not _inited:
# pytest resets the stream at the end - causes troubles. Since
# after every output the stream is reset automatically we don't
# need this.
initialise.atexit_done = True
try:
init()
except Exception:
# Colorama fails with initializing under vim and is buggy in
# version 0.3.6.
pass
_inited = True
except ImportError:
class Fore(object):
RED = ''
GREEN = ''
YELLOW = ''
MAGENTA = ''
RESET = ''
NOTICE = object()
WARNING = object()
SPEED = object()
enable_speed = False
enable_warning = False
enable_notice = False
# callback, interface: level, str
debug_function = None
ignored_modules = ['jedi.parser']
_debug_indent = 0
_start_time = time.time()
def reset_time():
global _start_time, _debug_indent
_start_time = time.time()
_debug_indent = 0
def increase_indent(func):
"""Decorator for makin """
def wrapper(*args, **kwargs):
global _debug_indent
_debug_indent += 1
try:
return func(*args, **kwargs)
finally:
_debug_indent -= 1
return wrapper
def dbg(message, *args, **kwargs):
""" Looks at the stack, to see if a debug message should be printed. """
if kwargs:
# Python 2 compatibility, because it doesn't understand default args
# after *args.
color = kwargs.get('color')
if color is None:
raise TypeError("debug.dbg doesn't support more named arguments than color")
else:
color = 'GREEN'
if debug_function and enable_notice:
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
if not (mod.__name__ in ignored_modules):
i = ' ' * _debug_indent
_lazy_colorama_init()
debug_function(color, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args))
def warning(message, *args):
if debug_function and enable_warning:
i = ' ' * _debug_indent
debug_function('RED', i + 'warning: ' + message % tuple(u(repr(a)) for a in args))
def speed(name):
if debug_function and enable_speed:
now = time.time()
i = ' ' * _debug_indent
debug_function('YELLOW', i + 'speed: ' + '%s %s' % (name, now - _start_time))
def print_to_stdout(color, str_out):
"""
The default debug function that prints to standard out.
:param str color: A string that is an attribute of ``colorama.Fore``.
"""
col = getattr(Fore, color)
_lazy_colorama_init()
if not is_py3:
str_out = str_out.encode(encoding, 'replace')
print(col + str_out + Fore.RESET)
# debug_function = print_to_stdout
| mit |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/djcelery/mon.py | 10 | 2272 | from __future__ import absolute_import, unicode_literals
import os
import sys
import types
from celery.app.defaults import strtobool
from celery.utils import import_from_cwd
DEFAULT_APPS = ('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.admindocs',
'djcelery')
DEFAULTS = {'ROOT_URLCONF': 'djcelery.monproj.urls',
'DATABASE_ENGINE': 'sqlite3',
'DATABASE_NAME': 'djcelerymon.db',
'DATABASES': {'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'djcelerymon.db'}},
'BROKER_URL': 'amqp://',
'SITE_ID': 1,
'INSTALLED_APPS': DEFAULT_APPS,
'DEBUG': strtobool(os.environ.get('DJCELERYMON_DEBUG', '0'))}
def default_settings(name='__default_settings__'):
c = type(name, (types.ModuleType, ), DEFAULTS)(name)
c.__dict__.update({'__file__': __file__})
sys.modules[name] = c
return name
def configure():
from celery import current_app
from celery.loaders.default import DEFAULT_CONFIG_MODULE
from django.conf import settings
app = current_app
conf = {}
if not settings.configured:
if 'loader' in app.__dict__ and app.loader.configured:
conf = current_app.loader.conf
else:
os.environ.pop('CELERY_LOADER', None)
settings_module = os.environ.get('CELERY_CONFIG_MODULE',
DEFAULT_CONFIG_MODULE)
try:
import_from_cwd(settings_module)
except ImportError:
settings_module = default_settings()
settings.configure(SETTINGS_MODULE=settings_module,
**dict(DEFAULTS, **conf))
def run_monitor(argv):
from .management.commands import djcelerymon
djcelerymon.Command().run_from_argv([argv[0], 'djcelerymon'] + argv[1:])
def main(argv=sys.argv):
from django.core import management
os.environ['CELERY_LOADER'] = 'default'
configure()
management.call_command('syncdb')
run_monitor(argv)
if __name__ == '__main__':
main()
| agpl-3.0 |
sunny94/temp | sympy/physics/vector/tests/test_point.py | 79 | 3549 | from sympy.physics.vector import dynamicsymbols, Point, ReferenceFrame
def test_point_v1pt_theorys():
q, q2 = dynamicsymbols('q q2')
qd, q2d = dynamicsymbols('q q2', 1)
qdd, q2dd = dynamicsymbols('q q2', 2)
N = ReferenceFrame('N')
B = ReferenceFrame('B')
B.set_ang_vel(N, qd * B.z)
O = Point('O')
P = O.locatenew('P', B.x)
P.set_vel(B, 0)
O.set_vel(N, 0)
assert P.v1pt_theory(O, N, B) == qd * B.y
O.set_vel(N, N.x)
assert P.v1pt_theory(O, N, B) == N.x + qd * B.y
P.set_vel(B, B.z)
assert P.v1pt_theory(O, N, B) == B.z + N.x + qd * B.y
def test_point_a1pt_theorys():
q, q2 = dynamicsymbols('q q2')
qd, q2d = dynamicsymbols('q q2', 1)
qdd, q2dd = dynamicsymbols('q q2', 2)
N = ReferenceFrame('N')
B = ReferenceFrame('B')
B.set_ang_vel(N, qd * B.z)
O = Point('O')
P = O.locatenew('P', B.x)
P.set_vel(B, 0)
O.set_vel(N, 0)
assert P.a1pt_theory(O, N, B) == -(qd**2) * B.x + qdd * B.y
P.set_vel(B, q2d * B.z)
assert P.a1pt_theory(O, N, B) == -(qd**2) * B.x + qdd * B.y + q2dd * B.z
O.set_vel(N, q2d * B.x)
assert P.a1pt_theory(O, N, B) == ((q2dd - qd**2) * B.x + (q2d * qd + qdd) * B.y +
q2dd * B.z)
def test_point_v2pt_theorys():
q = dynamicsymbols('q')
qd = dynamicsymbols('q', 1)
N = ReferenceFrame('N')
B = N.orientnew('B', 'Axis', [q, N.z])
O = Point('O')
P = O.locatenew('P', 0)
O.set_vel(N, 0)
assert P.v2pt_theory(O, N, B) == 0
P = O.locatenew('P', B.x)
assert P.v2pt_theory(O, N, B) == (qd * B.z ^ B.x)
O.set_vel(N, N.x)
assert P.v2pt_theory(O, N, B) == N.x + qd * B.y
def test_point_a2pt_theorys():
q = dynamicsymbols('q')
qd = dynamicsymbols('q', 1)
qdd = dynamicsymbols('q', 2)
N = ReferenceFrame('N')
B = N.orientnew('B', 'Axis', [q, N.z])
O = Point('O')
P = O.locatenew('P', 0)
O.set_vel(N, 0)
assert P.a2pt_theory(O, N, B) == 0
P.set_pos(O, B.x)
assert P.a2pt_theory(O, N, B) == (-qd**2) * B.x + (qdd) * B.y
def test_point_funcs():
q, q2 = dynamicsymbols('q q2')
qd, q2d = dynamicsymbols('q q2', 1)
qdd, q2dd = dynamicsymbols('q q2', 2)
N = ReferenceFrame('N')
B = ReferenceFrame('B')
B.set_ang_vel(N, 5 * B.y)
O = Point('O')
P = O.locatenew('P', q * B.x)
assert P.pos_from(O) == q * B.x
P.set_vel(B, qd * B.x + q2d * B.y)
assert P.vel(B) == qd * B.x + q2d * B.y
O.set_vel(N, 0)
assert O.vel(N) == 0
assert P.a1pt_theory(O, N, B) == ((-25 * q + qdd) * B.x + (q2dd) * B.y +
(-10 * qd) * B.z)
B = N.orientnew('B', 'Axis', [q, N.z])
O = Point('O')
P = O.locatenew('P', 10 * B.x)
O.set_vel(N, 5 * N.x)
assert O.vel(N) == 5 * N.x
assert P.a2pt_theory(O, N, B) == (-10 * qd**2) * B.x + (10 * qdd) * B.y
B.set_ang_vel(N, 5 * B.y)
O = Point('O')
P = O.locatenew('P', q * B.x)
P.set_vel(B, qd * B.x + q2d * B.y)
O.set_vel(N, 0)
assert P.v1pt_theory(O, N, B) == qd * B.x + q2d * B.y - 5 * q * B.z
def test_point_pos():
q = dynamicsymbols('q')
N = ReferenceFrame('N')
B = N.orientnew('B', 'Axis', [q, N.z])
O = Point('O')
P = O.locatenew('P', 10 * N.x + 5 * B.x)
assert P.pos_from(O) == 10 * N.x + 5 * B.x
Q = P.locatenew('Q', 10 * N.y + 5 * B.y)
assert Q.pos_from(P) == 10 * N.y + 5 * B.y
assert Q.pos_from(O) == 10 * N.x + 10 * N.y + 5 * B.x + 5 * B.y
assert O.pos_from(Q) == -10 * N.x - 10 * N.y - 5 * B.x - 5 * B.y
| bsd-3-clause |
PriceChild/ansible | lib/ansible/modules/network/nxos/nxos_vtp_domain.py | 19 | 5963 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vtp_domain
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VTP domain configuration.
description:
- Manages VTP domain configuration.
author:
- Gabriele Gerbino (@GGabriele)
notes:
- VTP feature must be active on the device to use this module.
- This module is used to manage only VTP domain names.
- VTP domain names are case-sensible.
- If it's never been configured before, VTP version is set to 1 by default.
Otherwise, it leaves the previous configured version untouched.
Use M(nxos_vtp_version) to change it.
- Use this in combination with M(nxos_vtp_password) and M(nxos_vtp_version)
to fully manage VTP operations.
options:
domain:
description:
- VTP domain name.
required: true
'''
EXAMPLES = '''
# ENSURE VTP DOMAIN IS CONFIGURED
- nxos_vtp_domain:
domain: ntc
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"domain": "ntc"}
existing:
description:
- k/v pairs of existing vtp domain
type: dict
sample: {"domain": "testing", "version": "2", "vtp_password": "\"}
end_state:
description: k/v pairs of vtp domain after module execution
returned: always
type: dict
sample: {"domain": "ntc", "version": "2", "vtp_password": "\"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["vtp domain ntc"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'status' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_vtp_config(module):
command = 'show vtp status'
body = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
vtp_parsed = {}
if body:
version_regex = '.*VTP version running\s+:\s+(?P<version>\d).*'
domain_regex = '.*VTP Domain Name\s+:\s+(?P<domain>\S+).*'
try:
match_version = re.match(version_regex, body, re.DOTALL)
version = match_version.groupdict()['version']
except AttributeError:
version = ''
try:
match_domain = re.match(domain_regex, body, re.DOTALL)
domain = match_domain.groupdict()['domain']
except AttributeError:
domain = ''
if domain and version:
vtp_parsed['domain'] = domain
vtp_parsed['version'] = version
vtp_parsed['vtp_password'] = get_vtp_password(module)
return vtp_parsed
def get_vtp_password(module):
command = 'show vtp password'
body = execute_show_command(command, module)[0]
password = body['passwd']
if password:
return str(password)
else:
return ""
def main():
argument_spec = dict(
domain=dict(type='str', required=True),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
domain = module.params['domain']
existing = get_vtp_config(module)
end_state = existing
args = dict(domain=domain)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if delta:
commands.append(['vtp domain {0}'.format(domain)])
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_vtp_config(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
aapav01/android_kernel_samsung_j7elte | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
marty331/jakesclock | flask/lib/python2.7/site-packages/pytz/exceptions.py | 657 | 1333 | '''
Custom exceptions raised by pytz.
'''
__all__ = [
'UnknownTimeZoneError', 'InvalidTimeError', 'AmbiguousTimeError',
'NonExistentTimeError',
]
class UnknownTimeZoneError(KeyError):
'''Exception raised when pytz is passed an unknown timezone.
>>> isinstance(UnknownTimeZoneError(), LookupError)
True
This class is actually a subclass of KeyError to provide backwards
compatibility with code relying on the undocumented behavior of earlier
pytz releases.
>>> isinstance(UnknownTimeZoneError(), KeyError)
True
'''
pass
class InvalidTimeError(Exception):
'''Base class for invalid time exceptions.'''
class AmbiguousTimeError(InvalidTimeError):
'''Exception raised when attempting to create an ambiguous wallclock time.
At the end of a DST transition period, a particular wallclock time will
occur twice (once before the clocks are set back, once after). Both
possibilities may be correct, unless further information is supplied.
See DstTzInfo.normalize() for more info
'''
class NonExistentTimeError(InvalidTimeError):
'''Exception raised when attempting to create a wallclock time that
cannot exist.
At the start of a DST transition period, the wallclock time jumps forward.
The instants jumped over never occur.
'''
| gpl-2.0 |
Jgarcia-IAS/localizacion | openerp/addons-extra/odoo-pruebas/odoo-server/addons/purchase_requisition/__init__.py | 378 | 1072 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_requisition
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yfried/ansible | lib/ansible/modules/system/puppet.py | 22 | 9376 | #!/usr/bin/python
# Copyright: (c) 2015, Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: puppet
short_description: Runs puppet
description:
- Runs I(puppet) agent or apply in a reliable manner.
version_added: "2.0"
options:
timeout:
description:
- How long to wait for I(puppet) to finish.
default: 30m
puppetmaster:
description:
- The hostname of the puppetmaster to contact.
modulepath:
description:
- Path to an alternate location for puppet modules.
version_added: "2.4"
manifest:
description:
- Path to the manifest file to run puppet apply on.
facts:
description:
- A dict of values to pass in as persistent external facter facts.
facter_basename:
description:
- Basename of the facter output file.
default: ansible
environment:
description:
- Puppet environment to be used.
logdest:
description: |
Where the puppet logs should go, if puppet apply is being used. C(all)
will go to both C(stdout) and C(syslog).
choices: [ stdout, syslog, all ]
default: stdout
version_added: "2.1"
certname:
description:
- The name to use when handling certificates.
version_added: "2.1"
tags:
description:
- A comma-separated list of puppet tags to be used.
version_added: "2.1"
execute:
description:
- Execute a specific piece of Puppet code.
- It has no effect with a puppetmaster.
version_added: "2.1"
summarize:
description:
- Whether to print a transaction summary
version_added: "2.7"
verbose:
description:
- Print extra information
version_added: "2.7"
debug:
description:
- Enable full debugging
version_added: "2.7"
requirements:
- puppet
author:
- Monty Taylor (@emonty)
'''
EXAMPLES = '''
- name: Run puppet agent and fail if anything goes wrong
puppet:
- name: Run puppet and timeout in 5 minutes
puppet:
timeout: 5m
- name: Run puppet using a different environment
puppet:
environment: testing
- name: Run puppet using a specific certname
puppet:
certname: agent01.example.com
- name: Run puppet using a specific piece of Puppet code. Has no effect with a puppetmaster
puppet:
execute: include ::mymodule
- name: Run puppet using a specific tags
puppet:
tags: update,nginx
- name: Run a manifest with debug, log to both syslog and stdout, specify module path
puppet:
modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
logdest: all
manifest: /var/lib/example/puppet_step_config.pp
'''
import json
import os
import pipes
import stat
from ansible.module_utils.basic import AnsibleModule
def _get_facter_dir():
if os.getuid() == 0:
return '/etc/facter/facts.d'
else:
return os.path.expanduser('~/.facter/facts.d')
def _write_structured_data(basedir, basename, data):
if not os.path.exists(basedir):
os.makedirs(basedir)
file_path = os.path.join(basedir, "{0}.json".format(basename))
# This is more complex than you might normally expect because we want to
# open the file with only u+rw set. Also, we use the stat constants
# because ansible still supports python 2.4 and the octal syntax changed
out_file = os.fdopen(
os.open(
file_path, os.O_CREAT | os.O_WRONLY,
stat.S_IRUSR | stat.S_IWUSR), 'w')
out_file.write(json.dumps(data).encode('utf8'))
out_file.close()
def main():
module = AnsibleModule(
argument_spec=dict(
timeout=dict(type='str', default='30m'),
puppetmaster=dict(type='str'),
modulepath=dict(type='str'),
manifest=dict(type='str'),
logdest=dict(type='str', default='stdout', choices=['stdout',
'syslog',
'all']),
# internal code to work with --diff, do not use
show_diff=dict(type='bool', default=False, aliases=['show-diff']),
facts=dict(type='dict'),
facter_basename=dict(type='str', default='ansible'),
environment=dict(type='str'),
certname=dict(type='str'),
tags=dict(type='list'),
execute=dict(type='str'),
summarize=dict(type='bool', default=False),
debug=dict(type='bool', default=False),
verbose=dict(type='bool', default=False),
),
supports_check_mode=True,
mutually_exclusive=[
('puppetmaster', 'manifest'),
('puppetmaster', 'manifest', 'execute'),
('puppetmaster', 'modulepath')
],
)
p = module.params
global PUPPET_CMD
PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin'])
if not PUPPET_CMD:
module.fail_json(
msg="Could not find puppet. Please ensure it is installed.")
global TIMEOUT_CMD
TIMEOUT_CMD = module.get_bin_path("timeout", False)
if p['manifest']:
if not os.path.exists(p['manifest']):
module.fail_json(
msg="Manifest file %(manifest)s not found." % dict(
manifest=p['manifest']))
# Check if puppet is disabled here
if not p['manifest']:
rc, stdout, stderr = module.run_command(
PUPPET_CMD + " config print agent_disabled_lockfile")
if os.path.exists(stdout.strip()):
module.fail_json(
msg="Puppet agent is administratively disabled.",
disabled=True)
elif rc != 0:
module.fail_json(
msg="Puppet agent state could not be determined.")
if module.params['facts'] and not module.check_mode:
_write_structured_data(
_get_facter_dir(),
module.params['facter_basename'],
module.params['facts'])
if TIMEOUT_CMD:
base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict(
timeout_cmd=TIMEOUT_CMD,
timeout=pipes.quote(p['timeout']),
puppet_cmd=PUPPET_CMD)
else:
base_cmd = PUPPET_CMD
if not p['manifest'] and not p['execute']:
cmd = ("%(base_cmd)s agent --onetime"
" --ignorecache --no-daemonize --no-usecacheonfailure --no-splay"
" --detailed-exitcodes --verbose --color 0") % dict(base_cmd=base_cmd)
if p['puppetmaster']:
cmd += " --server %s" % pipes.quote(p['puppetmaster'])
if p['show_diff']:
cmd += " --show_diff"
if p['environment']:
cmd += " --environment '%s'" % p['environment']
if p['tags']:
cmd += " --tags '%s'" % ','.join(p['tags'])
if p['certname']:
cmd += " --certname='%s'" % p['certname']
if module.check_mode:
cmd += " --noop"
else:
cmd += " --no-noop"
else:
cmd = "%s apply --detailed-exitcodes " % base_cmd
if p['logdest'] == 'syslog':
cmd += "--logdest syslog "
if p['logdest'] == 'all':
cmd += " --logdest syslog --logdest stdout"
if p['modulepath']:
cmd += "--modulepath='%s'" % p['modulepath']
if p['environment']:
cmd += "--environment '%s' " % p['environment']
if p['certname']:
cmd += " --certname='%s'" % p['certname']
if p['tags']:
cmd += " --tags '%s'" % ','.join(p['tags'])
if module.check_mode:
cmd += "--noop "
else:
cmd += "--no-noop "
if p['execute']:
cmd += " --execute '%s'" % p['execute']
else:
cmd += pipes.quote(p['manifest'])
if p['summarize']:
cmd += " --summarize"
if p['debug']:
cmd += " --debug"
if p['verbose']:
cmd += " --verbose"
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
# success
module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr)
elif rc == 1:
# rc==1 could be because it's disabled
# rc==1 could also mean there was a compilation failure
disabled = "administratively disabled" in stdout
if disabled:
msg = "puppet is disabled"
else:
msg = "puppet did not run"
module.exit_json(
rc=rc, disabled=disabled, msg=msg,
error=True, stdout=stdout, stderr=stderr)
elif rc == 2:
# success with changes
module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
elif rc == 124:
# timeout
module.exit_json(
rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr)
else:
# failure
module.fail_json(
rc=rc, msg="%s failed with return code: %d" % (cmd, rc),
stdout=stdout, stderr=stderr)
if __name__ == '__main__':
main()
| gpl-3.0 |
pkulev/xoinvader | xoinvader/gui.py | 1 | 11966 | """ Graphical user interface widgets."""
from typing import Callable, Optional, List, Tuple, Generator
from eaf import Timer
from xo1 import Surface, Renderable
from xoinvader.style import Style
from xoinvader.utils import (
InfiniteList,
Point,
)
class TextWidget(Renderable):
"""Simple text widget.
:param pos: widget's global position
:param text: contained text
:param style: curses style for text
"""
render_priority = 1
draw_on_border = True
def __init__(self, pos: Point, text: str, style: int = None):
super().__init__(pos)
self._text = text
self._style = style
self._image = self._make_image()
def _make_image(self) -> Surface:
"""Make Surface object from text and style.
:return: Surface instance
:rtype: `xoinvader.utils.Surface`
"""
_style = self._style or Style().gui["normal"]
return Surface(
[self._text], [[_style] * len(self._text)], ["B" * len(self._text)],
)
def update(
self, dt: int, text: Optional[str] = None, style: Optional[int] = None
):
"""Obtain (or not) new data and refresh image.
:param text: new text
:param style: new style
"""
if text:
self._text = text
if style:
self._style = style
if text or style:
self._image = self._make_image()
# TODO XXX FIXME: [proper-gui-hierarchy]
# This bloody mess bourned in hell suburbans to serve
# to one great target: easily enable updating by callback
# to update score string. It was so easy to do...
# Reimplement TextWidget to support callbacks too.
class TextCallbackWidget(TextWidget):
"""Simple text widget with callback.
:param pos: widget's global position
:param text: contained text
:param style: curses style for text
"""
def __init__(
self, pos: Point, callback: Callable, style: Optional[int] = None
):
self._callback = callback
super(TextCallbackWidget, self).__init__(pos, callback(), style)
def update(self, dt):
text = self._callback()
if self._text != text:
self._text = text
self._image = self._make_image()
class MenuItemWidget(TextWidget):
"""Selectable menu item widget.
:param pos: widget's global position
:type pos: `xoinvader.utils.Point`
:param text: contained text
:type text: string
:param template: left and right markers
:type template: tuple of two strings
.. note:: add [ [style] ... ] support
:param style: curses style for text
:type style: integer(curses style)
"""
render_priority = 1
def __init__(
self,
pos: Point,
text: str,
action: Optional[Callable] = None,
template: Tuple[str, str] = ("* ", " *"),
style=None,
align_left=True,
):
self._action = action
self._left = template[0]
self._right = template[1]
self._selected = False
self._align_left = align_left
super(MenuItemWidget, self).__init__(pos, text, style)
def _make_image(self) -> Surface:
"""Make Surface object from text, markers and style."""
_style = self._style or Style().gui["yellow"]
if self._selected:
_full_text = "".join([self._left, self._text, self._right])
else:
if self._align_left:
_full_text = "".join([" " * len(self._left), self._text])
else:
_full_text = self._text
return Surface(
[[ch for ch in _full_text]],
[[_style for _ in range(len(_full_text))]],
["B" * len(_full_text)],
)
def toggle_select(self):
"""Draw or not selector characters."""
self._selected = not self._selected
self._image = self._make_image()
def select(self):
"""Select and refresh image."""
self._selected = True
self._image = self._make_image()
def deselect(self):
"""Deselect and refresh image."""
self._selected = False
self._image = self._make_image()
@property
def selected(self) -> bool:
"""Shows is item selected or not.
.. warning:: Complete menu workflow.
"""
return self._selected
def do_action(self):
"""Call action callback."""
if callable(self._action):
self._action()
class MenuItemContainer(Renderable): # (CompoundMixin)
"""Container for menu items, manages current selected, dispatches action."""
compound = True
def __init__(self, items: Optional[List[MenuItemWidget]] = None):
# This object is containter, it doesn't matter where it placed
# (while we have no local coordinates of childs implemented).
super().__init__(Point())
self._image = None
self._items = InfiniteList(items) if items else InfiniteList()
def add(self, item: MenuItemWidget):
self._items.append(item)
# TODO: add ability to update infinity list index after changing?
def remove(self, item: MenuItemWidget):
self._items.remove(item)
def select(self, index: int) -> MenuItemWidget:
"""Select desired element by index, returns this element."""
self._items.current().deselect()
selected = self._items.select(index)
selected.select()
return selected
def do_action(self):
self._items.current().do_action()
def prev(self):
self._items.current().deselect()
item = self._items.prev()
item.select()
return item
def next(self):
self._items.current().deselect()
item = self._items.next()
item.select()
return item
def current(self):
return self._items.current()
def update(self, dt):
pass
def get_renderable_objects(self):
return list(self._items)
# pylint: disable=too-many-arguments
class PopUpNotificationWidget(TextWidget):
"""Widget that allows to show short messages with timeout.
.. warning:: Experimental stuff. Fix constructor 6/5.
:param pos: global position
:type pos: :class:`xoinvader.utils.Point`
:param text: text for display
:type text: string
:param style: curses style
:type style: int | [int]
:param timeout: timer timeout
:type timeout: float
:param callback: callback for removal object
:type callback: function
"""
def __init__(self, pos, text, style=None, timeout=1.0, callback=None):
super(PopUpNotificationWidget, self).__init__(pos, text, style)
self._callback = callback
self._timer = Timer(timeout, self._finalize_cb)
self._update_text = super(PopUpNotificationWidget, self).update
self._timer.start()
def _finalize_cb(self):
"""Finalize callback, e.g. pass to it self for removal."""
if self._callback:
self._callback(self)
def update(
self, dt: int, text: Optional[str] = None, style: Optional[int] = None
):
self._update_text(text, style)
self._timer.update(dt)
class WeaponWidget(Renderable):
"""Widget for displaying weapon information.
.. warning:: !!! Duplicates TextWidget !!!
:param pos: global position
:type pos: :class:`xoinvader.utils.Point`
:param get_data: callback for getting data
:type get_data: function
"""
render_priority = 1
draw_on_border = True
def __init__(self, pos, get_data):
self._pos = pos
self._get_data = get_data
self._data = self._get_data()
self._image = self._make_image()
def _make_image(self):
"""Return Surface object."""
return Surface(
[self._data],
[[Style().gui["yellow"] for _ in range(len(self._data))]],
["B" * len(self._data)],
)
def update(self, dt):
"""Obtain new data and refresh image."""
self._data = self._get_data()
self._image = self._make_image()
# pylint: disable=too-many-instance-attributes
class Bar(Renderable):
"""Progress bar widget.
:param pos: Bar's global position
:param str prefix: text before the bar
:param str postfix: text after the bar
:param str left: left edge of the bar
:param str right: right edge of the bar
:param str marker: symbol that fills the bar
:param int marker_style: curses style for marker (passes to render)
:param str empty: symbols that fills empty bar space (without marker)
:param int empty_style: curses style for empty marker (passes to render)
:param int count: number of markers in the bar
:param int maxval: max value of displayed parameter (affects the accuracy)
:param int general_style: style of other characters(prefix, postfix, etc)
:param stylemap: mapping of compare functions and integers to curses style
:type stylemap: dict(function, integer(curses style)
:param function callback: calls if not None to get new percentage value
"""
render_priority = 1
draw_on_border = True
def __init__(
self,
pos: Point,
prefix: str = "",
postfix: str = "",
left: str = "[",
right: str = "]",
marker: str = "█",
marker_style: Optional[int] = None,
empty: str = "-",
empty_style: Optional[int] = None,
count: int = 10,
maxval: int = 100,
general_style: Optional[int] = None,
stylemap=None,
callback=None,
):
self._pos = pos
self._prefix = prefix
self._postfix = postfix
self._left = left
self._right = right
self._marker = marker
self._marker_style = marker_style
self._empty = empty
self._empty_style = empty_style
self._count = count
self._maxval = maxval
self._general_style = general_style
self._stylemap = stylemap
self._callback = callback
# fmt: off
self._template = "".join([
str(val) for val in [
self._prefix,
self._left, "{blocks}", self._right,
self._postfix
]
])
# fmt: on
self._current_count = self._count
self._image = None
self._update_image()
def _update_current_count(self, val):
"""Normalize current percentage and update count of marker blocks.
:param int val: value to normalize
"""
self._current_count = int(round(val * self._count / self._maxval))
def _style(self, val):
"""Return style in depend on percentage."""
for cmp_func, bar_style in self._stylemap.items():
if cmp_func(val):
return bar_style
return None
def _update_image(self):
"""Update image in depend on percentage."""
left = self._marker * self._current_count
right = self._empty * (self._count - self._current_count)
bar = self._template.format(blocks=left + right)
image = []
for char in bar:
if char == self._marker:
image.append((char, self._marker_style))
else:
image.append((char, self._general_style))
self._image = Surface(
[[ch[0] for ch in image]],
[[st[1] for st in image]],
["B" * len(image)],
)
def update(self, dt: int, val=None):
"""Update bar if there's need for it."""
if self._callback:
val = self._callback()
if val is None:
raise ValueError("val = None, what to do?")
self._marker_style = self._style(val)
self._update_current_count(val)
self._update_image()
| mit |
slank/ansible | test/units/modules/cloud/google/test_gce_tag.py | 8 | 1997 | #!/usr/bin/env python
import unittest
from ansible.modules.cloud.google.gce_tag import _get_changed_items, _intersect_items, _union_items
class TestGCETag(unittest.TestCase):
"""Unit tests for gce_tag module."""
def test_union_items(self):
"""
Combine items in both lists
removing duplicates.
"""
listA = [1, 2, 3, 4, 5, 8, 9]
listB = [1, 2, 3, 4, 5, 6, 7]
want = [1, 2, 3, 4, 5, 6, 7, 8, 9]
got = _union_items(listA, listB)
self.assertEqual(want, got)
def test_intersect_items(self):
"""
All unique items from either list.
"""
listA = [1, 2, 3, 4, 5, 8, 9]
listB = [1, 2, 3, 4, 5, 6, 7]
want = [1, 2, 3, 4, 5]
got = _intersect_items(listA, listB)
self.assertEqual(want, got)
# tags removed
new_tags = ['one', 'two']
existing_tags = ['two']
want = ['two'] # only remove the tag that was present
got = _intersect_items(existing_tags, new_tags)
self.assertEqual(want, got)
def test_get_changed_items(self):
"""
All the items from left list that don't match
any item from the right list.
"""
listA = [1, 2, 3, 4, 5, 8, 9]
listB = [1, 2, 3, 4, 5, 6, 7]
want = [8, 9]
got = _get_changed_items(listA, listB)
self.assertEqual(want, got)
# simulate new tags added
tags_to_add = ['one', 'two']
existing_tags = ['two']
want = ['one']
got = _get_changed_items(tags_to_add, existing_tags)
self.assertEqual(want, got)
# simulate removing tags
# specifying one tag on right that doesn't exist
tags_to_remove = ['one', 'two']
existing_tags = ['two', 'three']
want = ['three']
got = _get_changed_items(existing_tags, tags_to_remove)
self.assertEqual(want, got)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
jeremiahyan/lammps | tools/python/pizza/gnu.py | 64 | 12601 | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# gnu tool
oneline = "Create plots via GnuPlot plotting program"
docstr = """
g = gnu() start up GnuPlot
g.stop() shut down GnuPlot process
g.plot(a) plot vector A against linear index
g.plot(a,b) plot B against A
g.plot(a,b,c,d,...) plot B against A, D against C, etc
g.mplot(M,N,S,"file",a,b,...) multiple plots saved to file0000.eps, etc
each plot argument can be a tuple, list, or Numeric/NumPy vector
mplot loops over range(M,N,S) and create one plot per iteration
last args are same as list of vectors for plot(), e.g. 1, 2, 4 vectors
each plot is made from a portion of the vectors, depending on loop index i
Ith plot is of b[0:i] vs a[0:i], etc
series of plots saved as file0000.eps, file0001.eps, etc
if use xrange(),yrange() then plot axes will be same for all plots
g("plot 'file.dat' using 2:3 with lines") execute string in GnuPlot
g.enter() enter GnuPlot shell
gnuplot> plot sin(x) with lines type commands directly to GnuPlot
gnuplot> exit, quit exit GnuPlot shell
g.export("data",range(100),a,...) create file with columns of numbers
all vectors must be of equal length
could plot from file with GnuPlot command: plot 'data' using 1:2 with lines
g.select(N) figure N becomes the current plot
subsequent commands apply to this plot
g.hide(N) delete window for figure N
g.save("file") save current plot as file.eps
Set attributes for current plot:
g.erase() reset all attributes to default values
g.aspect(1.3) aspect ratio
g.xtitle("Time") x axis text
g.ytitle("Energy") y axis text
g.title("My Plot") title text
g.title("title","x","y") title, x axis, y axis text
g.xrange(xmin,xmax) x axis range
g.xrange() default x axis range
g.yrange(ymin,ymax) y axis range
g.yrange() default y axis range
g.xlog() toggle x axis between linear and log
g.ylog() toggle y axis between linear and log
g.label(x,y,"text") place label at x,y coords
g.curve(N,'r') set color of curve N
colors: 'k' = black, 'r' = red, 'g' = green, 'b' = blue
'm' = magenta, 'c' = cyan, 'y' = yellow
"""
# History
# 8/05, Matt Jones (BYU): original version
# 9/05, Steve Plimpton: added mplot() method
# ToDo list
# allow choice of JPG or PNG or GIF when saving ?
# can this be done from GnuPlot or have to do via ImageMagick convert ?
# way to trim EPS plot that is created ?
# hide does not work on Mac aqua
# select does not pop window to front on Mac aqua
# Variables
# current = index of current figure (1-N)
# figures = list of figure objects with each plot's attributes
# so they aren't lost between replots
# Imports and external programs
import types, os
try: from DEFAULTS import PIZZA_GNUPLOT
except: PIZZA_GNUPLOT = "gnuplot"
try: from DEFAULTS import PIZZA_GNUTERM
except: PIZZA_GNUTERM = "x11"
# Class definition
class gnu:
# --------------------------------------------------------------------
def __init__(self):
self.GNUPLOT = os.popen(PIZZA_GNUPLOT,'w')
self.file = "tmp.gnu"
self.figures = []
self.select(1)
# --------------------------------------------------------------------
def stop(self):
self.__call__("quit")
del self.GNUPLOT
# --------------------------------------------------------------------
def __call__(self,command):
self.GNUPLOT.write(command + '\n')
self.GNUPLOT.flush()
# --------------------------------------------------------------------
def enter(self):
while 1:
command = raw_input("gnuplot> ")
if command == "quit" or command == "exit": return
self.__call__(command)
# --------------------------------------------------------------------
# write plot vectors to files and plot them
def plot(self,*vectors):
if len(vectors) == 1:
file = self.file + ".%d.1" % self.current
linear = range(len(vectors[0]))
self.export(file,linear,vectors[0])
self.figures[self.current-1].ncurves = 1
else:
if len(vectors) % 2: raise StandardError,"vectors must come in pairs"
for i in range(0,len(vectors),2):
file = self.file + ".%d.%d" % (self.current,i/2+1)
self.export(file,vectors[i],vectors[i+1])
self.figures[self.current-1].ncurves = len(vectors)/2
self.draw()
# --------------------------------------------------------------------
# create multiple plots from growing vectors, save to numbered files
# don't plot empty vector, create a [0] instead
def mplot(self,start,stop,skip,file,*vectors):
n = 0
for i in range(start,stop,skip):
partial_vecs = []
for vec in vectors:
if i: partial_vecs.append(vec[:i])
else: partial_vecs.append([0])
self.plot(*partial_vecs)
if n < 10: newfile = file + "000" + str(n)
elif n < 100: newfile = file + "00" + str(n)
elif n < 1000: newfile = file + "0" + str(n)
else: newfile = file + str(n)
self.save(newfile)
n += 1
# --------------------------------------------------------------------
# write list of equal-length vectors to filename
def export(self,filename,*vectors):
n = len(vectors[0])
for vector in vectors:
if len(vector) != n: raise StandardError,"vectors must be same length"
f = open(filename,'w')
nvec = len(vectors)
for i in xrange(n):
for j in xrange(nvec):
print >>f,vectors[j][i],
print >>f
f.close()
# --------------------------------------------------------------------
# select plot N as current plot
def select(self,n):
self.current = n
if len(self.figures) < n:
for i in range(n - len(self.figures)):
self.figures.append(figure())
cmd = "set term " + PIZZA_GNUTERM + ' ' + str(n)
self.__call__(cmd)
if self.figures[n-1].ncurves: self.draw()
# --------------------------------------------------------------------
# delete window for plot N
def hide(self,n):
cmd = "set term %s close %d" % (PIZZA_GNUTERM,n)
self.__call__(cmd)
# --------------------------------------------------------------------
# save plot to file.eps
# final re-select will reset terminal
# do not continue until plot file is written out
# else script could go forward and change data file
# use tmp.done as semaphore to indicate plot is finished
def save(self,file):
self.__call__("set terminal postscript enhanced solid lw 2 color portrait")
cmd = "set output '%s.eps'" % file
self.__call__(cmd)
if os.path.exists("tmp.done"): os.remove("tmp.done")
self.draw()
self.__call__("!touch tmp.done")
while not os.path.exists("tmp.done"): continue
self.__call__("set output")
self.select(self.current)
# --------------------------------------------------------------------
# restore default attributes by creating a new fig object
def erase(self):
fig = figure()
fig.ncurves = self.figures[self.current-1].ncurves
self.figures[self.current-1] = fig
self.draw()
# --------------------------------------------------------------------
def aspect(self,value):
self.figures[self.current-1].aspect = value
self.draw()
# --------------------------------------------------------------------
def xrange(self,*values):
if len(values) == 0:
self.figures[self.current-1].xlimit = 0
else:
self.figures[self.current-1].xlimit = (values[0],values[1])
self.draw()
# --------------------------------------------------------------------
def yrange(self,*values):
if len(values) == 0:
self.figures[self.current-1].ylimit = 0
else:
self.figures[self.current-1].ylimit = (values[0],values[1])
self.draw()
# --------------------------------------------------------------------
def label(self,x,y,text):
self.figures[self.current-1].labels.append((x,y,text))
self.figures[self.current-1].nlabels += 1
self.draw()
# --------------------------------------------------------------------
def nolabels(self):
self.figures[self.current-1].nlabel = 0
self.figures[self.current-1].labels = []
self.draw()
# --------------------------------------------------------------------
def title(self,*strings):
if len(strings) == 1:
self.figures[self.current-1].title = strings[0]
else:
self.figures[self.current-1].title = strings[0]
self.figures[self.current-1].xtitle = strings[1]
self.figures[self.current-1].ytitle = strings[2]
self.draw()
# --------------------------------------------------------------------
def xtitle(self,label):
self.figures[self.current-1].xtitle = label
self.draw()
# --------------------------------------------------------------------
def ytitle(self,label):
self.figures[self.current-1].ytitle = label
self.draw()
# --------------------------------------------------------------------
def xlog(self):
if self.figures[self.current-1].xlog:
self.figures[self.current-1].xlog = 0
else:
self.figures[self.current-1].xlog = 1
self.draw()
# --------------------------------------------------------------------
def ylog(self):
if self.figures[self.current-1].ylog:
self.figures[self.current-1].ylog = 0
else:
self.figures[self.current-1].ylog = 1
self.draw()
# --------------------------------------------------------------------
def curve(self,num,color):
fig = self.figures[self.current-1]
while len(fig.colors) < num: fig.colors.append(0)
fig.colors[num-1] = colormap[color]
self.draw()
# --------------------------------------------------------------------
# draw a plot with all its settings
# just return if no files of vectors defined yet
def draw(self):
fig = self.figures[self.current-1]
if not fig.ncurves: return
cmd = 'set size ratio ' + str(1.0/float(fig.aspect))
self.__call__(cmd)
cmd = 'set title ' + '"' + fig.title + '"'
self.__call__(cmd)
cmd = 'set xlabel ' + '"' + fig.xtitle + '"'
self.__call__(cmd)
cmd = 'set ylabel ' + '"' + fig.ytitle + '"'
self.__call__(cmd)
if fig.xlog: self.__call__("set logscale x")
else: self.__call__("unset logscale x")
if fig.ylog: self.__call__("set logscale y")
else: self.__call__("unset logscale y")
if fig.xlimit:
cmd = 'set xr [' + str(fig.xlimit[0]) + ':' + str(fig.xlimit[1]) + ']'
self.__call__(cmd)
else: self.__call__("set xr [*:*]")
if fig.ylimit:
cmd = 'set yr [' + str(fig.ylimit[0]) + ':' + str(fig.ylimit[1]) + ']'
self.__call__(cmd)
else: self.__call__("set yr [*:*]")
self.__call__("set nolabel")
for i in range(fig.nlabels):
x = fig.labels[i][0]
y = fig.labels[i][1]
text = fig.labels[i][2]
cmd = 'set label ' + '\"' + text + '\" at ' + str(x) + ',' + str(y)
self.__call__(cmd)
self.__call__("set key off")
cmd = 'plot '
for i in range(fig.ncurves):
file = self.file + ".%d.%d" % (self.current,i+1)
if len(fig.colors) > i and fig.colors[i]:
cmd += "'" + file + "' using 1:2 with line %d, " % fig.colors[i]
else:
cmd += "'" + file + "' using 1:2 with lines, "
self.__call__(cmd[:-2])
# --------------------------------------------------------------------
# class to store settings for a single plot
class figure:
def __init__(self):
self.ncurves = 0
self.colors = []
self.title = ""
self.xtitle = ""
self.ytitle = ""
self.aspect = 1.3
self.xlimit = 0
self.ylimit = 0
self.xlog = 0
self.ylog = 0
self.nlabels = 0
self.labels = []
# --------------------------------------------------------------------
# line color settings
colormap = {'k':-1, 'r':1, 'g':2, 'b':3, 'm':4, 'c':5, 'y':7}
| gpl-2.0 |
pabulumm/neighbors | lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/chardistribution.py | 2755 | 9226 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
| bsd-3-clause |
ltilve/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/boto/tests/integration/cloudsearch2/test_layers.py | 118 | 2846 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Tests for Layer1 of Cloudsearch
"""
import time
from tests.unit import unittest
from boto.cloudsearch2.layer1 import CloudSearchConnection
from boto.cloudsearch2.layer2 import Layer2
from boto.regioninfo import RegionInfo
class CloudSearchLayer1Test(unittest.TestCase):
cloudsearch = True
def setUp(self):
super(CloudSearchLayer1Test, self).setUp()
self.layer1 = CloudSearchConnection()
self.domain_name = 'test-%d' % int(time.time())
def test_create_domain(self):
resp = self.layer1.create_domain(self.domain_name)
resp = (resp['CreateDomainResponse']
['CreateDomainResult']
['DomainStatus'])
self.addCleanup(self.layer1.delete_domain, self.domain_name)
self.assertTrue(resp.get('Created', False))
class CloudSearchLayer2Test(unittest.TestCase):
cloudsearch = True
def setUp(self):
super(CloudSearchLayer2Test, self).setUp()
self.layer2 = Layer2()
self.domain_name = 'test-%d' % int(time.time())
def test_create_domain(self):
domain = self.layer2.create_domain(self.domain_name)
self.addCleanup(domain.delete)
self.assertTrue(domain.created, False)
self.assertEqual(domain.domain_name, self.domain_name)
def test_initialization_regression(self):
us_west_2 = RegionInfo(
name='us-west-2',
endpoint='cloudsearch.us-west-2.amazonaws.com'
)
self.layer2 = Layer2(
region=us_west_2,
host='cloudsearch.us-west-2.amazonaws.com'
)
self.assertEqual(
self.layer2.layer1.host,
'cloudsearch.us-west-2.amazonaws.com'
)
| bsd-3-clause |
tectronics/wfrog | wfcommon/storage/base.py | 5 | 3750 | ## Copyright 2009 Laurent Bovet <laurent.bovet@windmaster.ch>
## Jordi Puigsegur <jordi.puigsegur@gmail.com>
##
## This file is part of wfrog
##
## wfrog is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
from datetime import datetime
class DatabaseStorage(object):
'''
Base class for database storages.
'''
time_format = '%Y-%m-%d %H:%M:%S'
tablename = 'METEO'
mandatory_storage_fields = ['TEMP', 'HUM', 'DEW_POINT', 'WIND', 'WIND_DIR', 'WIND_GUST',
'WIND_GUST_DIR', 'RAIN', 'RAIN_RATE', 'PRESSURE']
optional_storage_fields = ['UV_INDEX', 'SOLAR_RAD', 'TEMPINT', 'HUMINT', 'TEMP2', 'HUM2',
'TEMP3', 'HUM3', 'TEMP4', 'HUM4', 'TEMP5', 'HUM5',
'TEMP6', 'HUM6', 'TEMP7', 'HUM7', 'TEMP8', 'HUM8',
'TEMP9', 'HUM9']
# Database storages should rewrite the storage_fields variable with the actual available fields
storage_fields = mandatory_storage_fields
def write_sample(self, sample, context={}):
timestamp = time.mktime(sample['localtime'].timetuple())
utc_time = datetime.utcfromtimestamp(timestamp)
sql = "INSERT INTO %s (TIMESTAMP_UTC, TIMESTAMP_LOCAL, %s) VALUES (%s, %s, %s)" % (
self.tablename,
', '.join(self.storage_fields),
"'%s'" % utc_time.strftime(self.time_format),
"'%s'" % sample['localtime'].strftime(self.time_format),
', '.join(map(lambda x: self.format(sample[x.lower()] if x.lower() in sample else None), self.storage_fields)))
try:
self.db.connect()
self.db.execute(sql)
self.logger.debug("SQL executed: %s", sql)
except:
self.logger.exception("Error writting current data to database")
finally:
self.db.disconnect()
def keys(self, context={}):
return ['utctime', 'localtime'] + map(str.lower ,self.storage_fields)
def samples(self, from_time=datetime.fromtimestamp(0), to_time=datetime.now(), context={}):
self.logger.debug("Getting samples for range: %s to %s", from_time, to_time)
sql = ( "SELECT TIMESTAMP_UTC, TIMESTAMP_LOCAL, %s FROM %s " + \
" WHERE TIMESTAMP_LOCAL >= '%s' AND TIMESTAMP_LOCAL < '%s' "+ \
" ORDER BY TIMESTAMP_LOCAL ASC" ) % (
', '.join(self.storage_fields),
self.tablename,
from_time.strftime(self.time_format),
to_time.strftime(self.time_format))
try:
self.db.connect()
for row in self.db.select(sql):
if not isinstance(row[0], datetime):
row = list(row)
row[0] = datetime.strptime(row[0], self.time_format)
yield row
finally:
self.db.disconnect()
def format(self, value):
if value is None:
return 'NULL'
else:
return str(value) # rounds up values to 1 decimal, which is OK.
| gpl-3.0 |
osrg/ryu | ryu/exception.py | 4 | 2338 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RyuException(Exception):
message = 'An unknown exception'
def __init__(self, msg=None, **kwargs):
self.kwargs = kwargs
if msg is None:
msg = self.message
try:
msg = msg % kwargs
except Exception:
msg = self.message
super(RyuException, self).__init__(msg)
class OFPUnknownVersion(RyuException):
message = 'unknown version %(version)x'
class OFPMalformedMessage(RyuException):
message = 'malformed message'
class OFPTruncatedMessage(RyuException):
message = 'truncated message: %(orig_ex)s'
def __init__(self, ofpmsg, residue, original_exception,
msg=None, **kwargs):
self.ofpmsg = ofpmsg
self.residue = residue
self.original_exception = original_exception
kwargs['orig_ex'] = str(original_exception)
super(OFPTruncatedMessage, self).__init__(msg, **kwargs)
class OFPInvalidActionString(RyuException):
message = 'unable to parse: %(action_str)s'
class NetworkNotFound(RyuException):
message = 'no such network id %(network_id)s'
class NetworkAlreadyExist(RyuException):
message = 'network id %(network_id)s already exists'
class PortNotFound(RyuException):
message = 'no such port (%(dpid)s, %(port)s) in network %(network_id)s'
class PortAlreadyExist(RyuException):
message = 'port (%(dpid)s, %(port)s) in network %(network_id)s ' \
'already exists'
class PortUnknown(RyuException):
message = 'unknown network id for port (%(dpid)s %(port)s)'
class MacAddressDuplicated(RyuException):
message = 'MAC address %(mac)s is duplicated'
| apache-2.0 |
Mega-DatA-Lab/mxnet | example/gluon/mnist.py | 37 | 4568 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
import argparse
import logging
logging.basicConfig(level=logging.DEBUG)
import numpy as np
import mxnet as mx
from mxnet import gluon, autograd
from mxnet.gluon import nn
# Parse CLI arguments
parser = argparse.ArgumentParser(description='MXNet Gluon MNIST Example')
parser.add_argument('--batch-size', type=int, default=100,
help='batch size for training and testing (default: 100)')
parser.add_argument('--epochs', type=int, default=10,
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.1,
help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9,
help='SGD momentum (default: 0.9)')
parser.add_argument('--cuda', action='store_true', default=False,
help='Train on GPU with CUDA')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
opt = parser.parse_args()
# define network
net = nn.Sequential()
with net.name_scope():
net.add(nn.Dense(128, activation='relu'))
net.add(nn.Dense(64, activation='relu'))
net.add(nn.Dense(10))
# data
def transformer(data, label):
data = data.reshape((-1,)).astype(np.float32)/255
return data, label
train_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=True, transform=transformer),
batch_size=opt.batch_size, shuffle=True, last_batch='discard')
val_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=False, transform=transformer),
batch_size=opt.batch_size, shuffle=False)
# train
def test(ctx):
metric = mx.metric.Accuracy()
for data, label in val_data:
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
output = net(data)
metric.update([label], [output])
return metric.get()
def train(epochs, ctx):
# Collect all parameters from net and its children, then initialize them.
net.initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)
# Trainer is for updating parameters with gradient.
trainer = gluon.Trainer(net.collect_params(), 'sgd',
{'learning_rate': opt.lr, 'momentum': opt.momentum})
metric = mx.metric.Accuracy()
loss = gluon.loss.SoftmaxCrossEntropyLoss()
for epoch in range(epochs):
# reset data iterator and metric at begining of epoch.
metric.reset()
for i, (data, label) in enumerate(train_data):
# Copy data to ctx if necessary
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
# Start recording computation graph with record() section.
# Recorded graphs can then be differentiated with backward.
with autograd.record():
output = net(data)
L = loss(output, label)
L.backward()
# take a gradient step with batch_size equal to data.shape[0]
trainer.step(data.shape[0])
# update metric at last.
metric.update([label], [output])
if i % opt.log_interval == 0 and i > 0:
name, acc = metric.get()
print('[Epoch %d Batch %d] Training: %s=%f'%(epoch, i, name, acc))
name, acc = metric.get()
print('[Epoch %d] Training: %s=%f'%(epoch, name, acc))
name, val_acc = test(ctx)
print('[Epoch %d] Validation: %s=%f'%(epoch, name, val_acc))
net.save_params('mnist.params')
if __name__ == '__main__':
if opt.cuda:
ctx = mx.gpu(0)
else:
ctx = mx.cpu()
train(opt.epochs, ctx)
| apache-2.0 |
Instagram/django | django/contrib/messages/api.py | 307 | 3570 | from django.contrib.messages import constants
from django.contrib.messages.storage import default_storage
from django.utils.functional import lazy, memoize
__all__ = (
'add_message', 'get_messages',
'get_level', 'set_level',
'debug', 'info', 'success', 'warning', 'error',
)
class MessageFailure(Exception):
pass
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""
Attempts to add a message to the request using the 'messages' app, falling
back to the user's message_set if MessageMiddleware hasn't been enabled.
"""
if hasattr(request, '_messages'):
return request._messages.add(level, message, extra_tags)
if hasattr(request, 'user') and request.user.is_authenticated():
return request.user.message_set.create(message=message)
if not fail_silently:
raise MessageFailure('Without the django.contrib.messages '
'middleware, messages can only be added to '
'authenticated users.')
def get_messages(request):
"""
Returns the message storage on the request if it exists, otherwise returns
user.message_set.all() as the old auth context processor did.
"""
if hasattr(request, '_messages'):
return request._messages
def get_user():
if hasattr(request, 'user'):
return request.user
else:
from django.contrib.auth.models import AnonymousUser
return AnonymousUser()
return lazy(memoize(get_user().get_and_delete_messages, {}, 0), list)()
def get_level(request):
"""
Returns the minimum level of messages to be recorded.
The default level is the ``MESSAGE_LEVEL`` setting. If this is not found,
the ``INFO`` level is used.
"""
if hasattr(request, '_messages'):
storage = request._messages
else:
storage = default_storage(request)
return storage.level
def set_level(request, level):
"""
Sets the minimum level of messages to be recorded, returning ``True`` if
the level was recorded successfully.
If set to ``None``, the default level will be used (see the ``get_level``
method).
"""
if not hasattr(request, '_messages'):
return False
request._messages.level = level
return True
def debug(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``DEBUG`` level.
"""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``INFO`` level.
"""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``SUCCESS`` level.
"""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``WARNING`` level.
"""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``ERROR`` level.
"""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
| bsd-3-clause |
Aaron1992/shiguang | app/test_utils.py | 1 | 1147 | from utils import clean_html
str = '''<b>redy</b>
<img src="data:image/jpeg;base64,">
<<script></script>script> alert("Haha, I hacked your page."); <<script></script>script>
'''
html = '''
both <em id="foo" style="color: black">can</em> have
<img id="bar" src="foo"/>
<script type="text/javascript" src="evil-site"></script>
<link rel="alternate" type="text/rss" src="evil-rss">
<style>
body {background-image: url(javascript:do_evil)};
div {color: expression(evil)};
</style>
< onload="evil_function()">
<!-- I am interpreted for EVIL! -->
<img src="">
<a href="javascript:evil_function()">a link</a>
<a href="#" onclick="evil_function()">another link</a>
<p onclick="evil_function()">a paragraph</p>
<div style="display: none">secret EVIL!</div>
<object> of EVIL! </object>
<iframe src="evil-site"></iframe>
<form action="evil-site">
Password: <input type="password" name="password">
</form>
<blink>annoying EVIL!</blink>
<a href="evil-site">spam spam SPAM!</a>
<image src="evil!">
</html>''' + str
print(clean_html(html))
| apache-2.0 |
V3n0M93/fmi-projects | python/Problem 5/solution.py | 1 | 7286 | class FileSystemError(Exception):
pass
class NodeDoesNotExistError(FileSystemError):
pass
class SourceNodeDoesNotExistError(NodeDoesNotExistError, FileSystemError):
pass
class DestinationNodeDoesNotExistError(NodeDoesNotExistError, FileSystemError):
pass
class FileSystemMountError(FileSystemError):
pass
class MountPointDoesNotExistError(FileSystemMountError, FileSystemError):
pass
class MountPointNotADirectoryError(FileSystemMountError, FileSystemError):
pass
class MountPointNotEmptyError(FileSystemMountError, FileSystemError):
pass
class NotAMountpointError(FileSystemMountError, FileSystemError):
pass
class NotEnoughSpaceError(FileSystemError):
pass
class NonExplicitDirectoryDeletionError(FileSystemError):
pass
class NonEmptyDirectoryDeletionError(FileSystemError):
pass
class DestinationNotADirectoryError(FileSystemError):
pass
class DestinationNodeExistsError(FileSystemError):
pass
class File:
def __init__(self, name, content):
self.content = content
self.is_directory = False
self.name = name
self.size = len(content) + 1
def append(self, text):
self.content += text
self.size += len(text)
def truncate(self, text):
self.content = text
self.size = len(text)
def remove(self, filesystem):
filesystem.available_size += self.size
class Directory:
def __init__(self, name):
self.directories = []
self.files = []
self.nodes = []
self.name = name
self.is_directory = True
self.is_root = False
def add_file(self, file_to_add):
self.files.append(file_to_add)
self.nodes.append(file_to_add)
def add_directory(self, directory):
self.directories.append(directory)
self.nodes.append(directory)
def remove(self, filesystem):
filesystem.available_size += 1
for node in self.nodes:
node.remove(filesystem)
class FileSystem:
def __init__(self, size):
self.size = size
self.available_size = size - 1
self.root = Directory("")
self.root.is_root = True
def get_node(self, path):
if path == "":
return self.root
path = path.split('/')
if path[1] == "":
return self.root
current_dir = self.root
for directory in path[1:-1]:
is_directory_found = False
for dirr in current_dir.directories:
if directory == dirr.name:
is_directory_found = True
current_dir = dirr
if not is_directory_found:
raise NodeDoesNotExistError
for node in current_dir.nodes:
if node.name == path[-1]:
return node
raise NodeDoesNotExistError
def create(self, path, directory=False, content=''):
if self.available_size - len(content) - 1 < 0:
raise NotEnoughSpaceError
path, node_name = path.rsplit('/', 1)
if path == '':
father_node = self.root
else:
try:
father_node = self.get_node(path)
except (NodeDoesNotExistError):
raise DestinationNodeDoesNotExistError
if father_node.is_directory is False:
raise DestinationNodeDoesNotExistError
for node in father_node.nodes:
if node.name == node_name:
raise DestinationNodeExistsError
self.available_size = self.available_size - len(content) - 1
if directory:
new_directory = Directory(node_name)
father_node.add_directory(new_directory)
else:
new_file = File(node_name, content)
father_node.add_file(new_file)
def remove(self, path, directory=False, force=True):
path, node_name = path.rsplit('/', 1)
if path == '':
father_node = self.root
else:
try:
father_node = self.get_node(path)
except (NodeDoesNotExistError):
raise NodeDoesNotExistError
node_to_delete = None
for node in father_node.nodes:
if node.name == node_name:
node_to_delete = node
if node_to_delete is None:
raise NodeDoesNotExistError
if node_to_delete.is_directory and directory is False:
raise NonExplicitDirectoryDeletionError
if (node_to_delete.is_directory and len(node_to_delete.nodes) > 0
and force is False):
raise NonEmptyDirectoryDeletionError
if node_to_delete.is_directory:
node_to_delete.remove(self)
father_node.nodes.remove(node_to_delete)
father_node.directories.remove(node_to_delete)
else:
node_to_delete.remove(self)
father_node.nodes.remove(node_to_delete)
father_node.files.remove(node_to_delete)
def mount(self, file_system, path):
try:
mount_node = self.get_node(path)
except (NodeDoesNotExistError):
raise MountPointDoesNotExistError
if mount_node.is_directory is False:
raise MountPointNotEmptyError
if len(mount_node.nodes) > 0:
raise MountPointNotEmptyError
father_node = self.get_node(path.rsplit('/', 1)[0])
father_node.nodes.remove(mount_node)
father_node.directories.remove(mount_node)
file_system.root.name = mount_node.name
father_node.nodes.append(file_system.root)
father_node.directories.append(file_system.root)
def unmount(self, path):
try:
unmount_node = self.get_node(path)
except (NodeDoesNotExistError):
raise NodeDoesNotExistError
if unmount_node.is_root is False:
raise NotAMountpointError
father_node = self.get_node(path.rsplit('/', 1)[0])
father_node.nodes.remove(unmount_node)
father_node.directories.remove(unmount_node)
regular_directory = Directory(unmount_node.name)
father_node.nodes.append(regular_directory)
father_node.directories.append(regular_directory)
def move(self, source, destination):
try:
source_node = self.get_node(source)
except (NodeDoesNotExistError):
raise SourceNodeDoesNotExistError
try:
destination_node = self.get_node(destination)
except (NodeDoesNotExistError):
raise DestinationNodeDoesNotExistError
if destination_node.is_directory is False:
raise DestinationNotADirectoryError
for node in destination_node.nodes:
if node.name == source_node.name:
raise DestinationNodeExistsError
father_node = self.get_node(source.rsplit('/', 1)[0])
father_node.nodes.remove(source_node)
destination_node.nodes.append(source_node)
if source_node.is_directory:
father_node.directories.remove(source_node)
destination_node.directories.append(source_node)
else:
father_node.files.remove(source_node)
destination_node.files.append(source_node)
| gpl-2.0 |
yqm/sl4a | python/src/Tools/scripts/pdeps.py | 96 | 3937 | #! /usr/bin/env python
# pdeps
#
# Find dependencies between a bunch of Python modules.
#
# Usage:
# pdeps file1.py file2.py ...
#
# Output:
# Four tables separated by lines like '--- Closure ---':
# 1) Direct dependencies, listing which module imports which other modules
# 2) The inverse of (1)
# 3) Indirect dependencies, or the closure of the above
# 4) The inverse of (3)
#
# To do:
# - command line options to select output type
# - option to automatically scan the Python library for referenced modules
# - option to limit output to particular modules
import sys
import re
import os
# Main program
#
def main():
args = sys.argv[1:]
if not args:
print 'usage: pdeps file.py file.py ...'
return 2
#
table = {}
for arg in args:
process(arg, table)
#
print '--- Uses ---'
printresults(table)
#
print '--- Used By ---'
inv = inverse(table)
printresults(inv)
#
print '--- Closure of Uses ---'
reach = closure(table)
printresults(reach)
#
print '--- Closure of Used By ---'
invreach = inverse(reach)
printresults(invreach)
#
return 0
# Compiled regular expressions to search for import statements
#
m_import = re.compile('^[ \t]*from[ \t]+([^ \t]+)[ \t]+')
m_from = re.compile('^[ \t]*import[ \t]+([^#]+)')
# Collect data from one file
#
def process(filename, table):
fp = open(filename, 'r')
mod = os.path.basename(filename)
if mod[-3:] == '.py':
mod = mod[:-3]
table[mod] = list = []
while 1:
line = fp.readline()
if not line: break
while line[-1:] == '\\':
nextline = fp.readline()
if not nextline: break
line = line[:-1] + nextline
if m_import.match(line) >= 0:
(a, b), (a1, b1) = m_import.regs[:2]
elif m_from.match(line) >= 0:
(a, b), (a1, b1) = m_from.regs[:2]
else: continue
words = line[a1:b1].split(',')
# print '#', line, words
for word in words:
word = word.strip()
if word not in list:
list.append(word)
# Compute closure (this is in fact totally general)
#
def closure(table):
modules = table.keys()
#
# Initialize reach with a copy of table
#
reach = {}
for mod in modules:
reach[mod] = table[mod][:]
#
# Iterate until no more change
#
change = 1
while change:
change = 0
for mod in modules:
for mo in reach[mod]:
if mo in modules:
for m in reach[mo]:
if m not in reach[mod]:
reach[mod].append(m)
change = 1
#
return reach
# Invert a table (this is again totally general).
# All keys of the original table are made keys of the inverse,
# so there may be empty lists in the inverse.
#
def inverse(table):
inv = {}
for key in table.keys():
if not inv.has_key(key):
inv[key] = []
for item in table[key]:
store(inv, item, key)
return inv
# Store "item" in "dict" under "key".
# The dictionary maps keys to lists of items.
# If there is no list for the key yet, it is created.
#
def store(dict, key, item):
if dict.has_key(key):
dict[key].append(item)
else:
dict[key] = [item]
# Tabulate results neatly
#
def printresults(table):
modules = table.keys()
maxlen = 0
for mod in modules: maxlen = max(maxlen, len(mod))
modules.sort()
for mod in modules:
list = table[mod]
list.sort()
print mod.ljust(maxlen), ':',
if mod in list:
print '(*)',
for ref in list:
print ref,
print
# Call main and honor exit status
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
| apache-2.0 |
patricklaw/pants | src/python/pants/util/socket.py | 4 | 2351 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import io
import selectors
import socket
def is_readable(fileobj, *, timeout=None):
"""Check that the file-like resource is readable within the given timeout via polling.
:param Union[int, SupportsFileNo] fileobj:
:param Optional[int] timeout: (in seconds)
:return bool
"""
with selectors.DefaultSelector() as selector:
selector.register(fileobj, selectors.EVENT_READ)
events = selector.select(timeout=timeout)
return bool(events)
class RecvBufferedSocket:
"""A socket wrapper that simplifies recv() buffering."""
def __init__(self, sock, chunk_size=io.DEFAULT_BUFFER_SIZE, select_timeout=None):
"""
:param socket sock: The socket.socket object to wrap.
:param int chunk_size: The smallest max read size for calls to recv() in bytes.
:param float select_timeout: The select timeout for a socket read in seconds. An integer value
effectively makes self.recv non-blocking (default: None, blocking).
"""
self._socket = sock
self._chunk_size = chunk_size
self._select_timeout = select_timeout
self._buffer = b""
self._maybe_tune_socket(sock)
def _maybe_tune_socket(self, sock):
try:
# Disable Nagle's algorithm to improve latency.
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (OSError, IOError):
# This can fail in tests where `socket.socketpair()` is used, or potentially
# in odd environments - but we shouldn't ever crash over it.
return
def recv(self, bufsize):
"""Buffers up to _chunk_size bytes when the internal buffer has less than `bufsize`
bytes."""
assert bufsize > 0, "a positive bufsize is required"
if len(self._buffer) < bufsize and is_readable(self._socket, timeout=self._select_timeout):
recvd = self._socket.recv(max(self._chunk_size, bufsize))
self._buffer = self._buffer + recvd
return_buf, self._buffer = self._buffer[:bufsize], self._buffer[bufsize:]
return return_buf
def __getattr__(self, attr):
return getattr(self._socket, attr)
| apache-2.0 |
coolhandmook/trayjenkins | tests/trayjenkins/test_status.py | 1 | 9033 | import mox
from unittest import TestCase
from trayjenkins.event import Event, IEvent
from trayjenkins.jobs import IModel as JobsModel, IFilter, JobModel
from trayjenkins.status import IModel, IView, Presenter, IMessageComposer,\
IStatusReader, Model, StatusReader, DefaultMessageComposer
from pyjenkins.job import Job, JobStatus
class StatusPresenterTests(TestCase):
def test_Constructor_ModelFiresStatusChangedEvent_ViewSetStatusCalled(self):
mocks = mox.Mox()
model = mocks.CreateMock(IModel)
view = mocks.CreateMock(IView)
event = Event()
model.status_changed_event().AndReturn(event)
view.set_status('some status string', 'status message')
mocks.ReplayAll()
presenter = Presenter(model, view) # @UnusedVariable
event.fire('some status string', 'status message')
mox.Verify(view)
class StatusModelTests(TestCase):
def setUp(self):
self.mocks = mox.Mox()
self.jobs_filter = self.mocks.CreateMock(IFilter)
self.messageComposer = self.mocks.CreateMock(IMessageComposer)
self.statusReader = self.mocks.CreateMock(IStatusReader)
self.statusEvent = self.mocks.CreateMock(IEvent)
self.jobsModel = self.mocks.CreateMock(JobsModel)
self.jobsEvent = Event()
self.jobsModel.jobs_updated_event().AndReturn(self.jobsEvent)
self.jobs = [Job('who', 'cares?')]
self.job_models = [JobModel(self.jobs[0], False)]
def test_updateStatus_JobsModelFiresFirstUpdateEventStatusUnknownAndMessageNone_StatusChangedEventNotFired(self):
self.jobs_filter.filter_jobs(self.job_models).AndReturn(self.job_models)
self.messageComposer.message(self.jobs).AndReturn(None)
self.statusReader.status(self.jobs).AndReturn(JobStatus.UNKNOWN)
self.mocks.ReplayAll()
model = Model(self.jobsModel, # @UnusedVariable
self.jobs_filter,
self.messageComposer,
self.statusReader,
self.statusEvent)
self.jobsEvent.fire(self.job_models)
mox.Verify(self.statusEvent)
def test_updateStatus_JobsModelFiresFirstUpdateEvent_StatusChangedEventFired(self):
self.jobs_filter.filter_jobs(self.job_models).AndReturn(self.job_models)
self.messageComposer.message(self.jobs).AndReturn('message')
self.statusReader.status(self.jobs).AndReturn(JobStatus.FAILING)
self.statusEvent.fire(JobStatus.FAILING, 'message')
self.mocks.ReplayAll()
model = Model(self.jobsModel, # @UnusedVariable
self.jobs_filter,
self.messageComposer,
self.statusReader,
self.statusEvent)
self.jobsEvent.fire(self.job_models)
mox.Verify(self.statusEvent)
def test_updateStatus_TwoJobsModelUpdatesWithSameStatusAndMessage_StatusChangedEventFiredOnce(self):
self.jobs_filter.filter_jobs(self.job_models).AndReturn(self.job_models)
self.jobs_filter.filter_jobs(self.job_models).AndReturn(self.job_models)
self.messageComposer.message(self.jobs).AndReturn('message')
self.messageComposer.message(self.jobs).AndReturn('message')
self.statusReader.status(self.jobs).AndReturn(JobStatus.FAILING)
self.statusReader.status(self.jobs).AndReturn(JobStatus.FAILING)
self.statusEvent.fire(JobStatus.FAILING, 'message')
self.mocks.ReplayAll()
model = Model(self.jobsModel, # @UnusedVariable
self.jobs_filter,
self.messageComposer,
self.statusReader,
self.statusEvent)
self.jobsEvent.fire(self.job_models)
self.jobsEvent.fire(self.job_models)
mox.Verify(self.statusEvent)
def test_updateStatus_TwoJobsModelUpdatesWithDifferentStatus_StatusChangedEventFiredTwice(self):
self.jobs_filter.filter_jobs(self.job_models).AndReturn(self.job_models)
self.jobs_filter.filter_jobs(self.job_models).AndReturn(self.job_models)
self.messageComposer.message(self.jobs).AndReturn('message')
self.messageComposer.message(self.jobs).AndReturn('message')
self.statusReader.status(self.jobs).AndReturn(JobStatus.FAILING)
self.statusReader.status(self.jobs).AndReturn(JobStatus.OK)
self.statusEvent.fire(JobStatus.FAILING, 'message')
self.statusEvent.fire(JobStatus.OK, 'message')
self.mocks.ReplayAll()
model = Model(self.jobsModel, # @UnusedVariable
self.jobs_filter,
self.messageComposer,
self.statusReader,
self.statusEvent)
self.jobsEvent.fire(self.job_models)
self.jobsEvent.fire(self.job_models)
mox.Verify(self.statusEvent)
def test_updateStatus_TwoJobsModelUpdatesWithDifferentMessage_StatusChangedEventFiredTwice(self):
self.jobs_filter.filter_jobs(self.job_models).AndReturn(self.job_models)
self.jobs_filter.filter_jobs(self.job_models).AndReturn(self.job_models)
self.messageComposer.message(self.jobs).AndReturn('message one')
self.messageComposer.message(self.jobs).AndReturn('message two')
self.statusReader.status(self.jobs).AndReturn(JobStatus.FAILING)
self.statusReader.status(self.jobs).AndReturn(JobStatus.FAILING)
self.statusEvent.fire(JobStatus.FAILING, 'message one')
self.statusEvent.fire(JobStatus.FAILING, 'message two')
self.mocks.ReplayAll()
model = Model(self.jobsModel, # @UnusedVariable
self.jobs_filter,
self.messageComposer,
self.statusReader,
self.statusEvent)
self.jobsEvent.fire(self.job_models)
self.jobsEvent.fire(self.job_models)
mox.Verify(self.statusEvent)
def test_updateStatus_JobsFilterReturnsModifiedList_ModifiedListPassedTo(self):
filtered_jobs = [Job('completely', 'different')]
filtered_models = [JobModel(filtered_jobs[0], True)]
self.jobs_filter.filter_jobs(self.job_models).AndReturn(filtered_models)
self.messageComposer.message(filtered_jobs).AndReturn('message')
self.statusReader.status(filtered_jobs).AndReturn(JobStatus.OK)
self.statusEvent.fire(JobStatus.OK, 'message')
self.mocks.ReplayAll()
model = Model(self.jobsModel, # @UnusedVariable
self.jobs_filter,
self.messageComposer,
self.statusReader,
self.statusEvent)
self.jobsEvent.fire(self.job_models)
mox.Verify(self.statusEvent)
class StatusReaderTests(TestCase):
def test_status_OneFailingJob_ReturnFailing(self):
jobs = [Job('eric', JobStatus.UNKNOWN),
Job('john', JobStatus.FAILING),
Job('terry', JobStatus.OK),
Job('graham', JobStatus.DISABLED)]
reader = StatusReader()
result = reader.status(jobs)
self.assertEqual(JobStatus.FAILING, result)
def test_status_NoFailingJobs_ReturnOk(self):
jobs = [Job('eric', JobStatus.UNKNOWN),
Job('terry', JobStatus.OK),
Job('graham', JobStatus.DISABLED)]
reader = StatusReader()
result = reader.status(jobs)
self.assertEqual(JobStatus.OK, result)
def test_status_JobsListIsNone_ReturnUnknown(self):
reader = StatusReader()
result = reader.status(None)
self.assertEqual(JobStatus.UNKNOWN, result)
class DefaultMessageComposerTests(TestCase):
def test_message_EmptyJobs_ReturnCorrectMessage(self):
jobs = []
composer = DefaultMessageComposer()
result = composer.message(jobs)
self.assertEqual('No jobs', result)
def test_message_AllJobsOk_ReturnCorrectMessage(self):
jobs = [Job('eric', JobStatus.OK),
Job('terry', JobStatus.OK)]
composer = DefaultMessageComposer()
result = composer.message(jobs)
self.assertEqual('All active jobs pass', result)
def test_message_OneFailingJob_ReturnCorrectMessage(self):
jobs = [Job('eric', JobStatus.OK),
Job('terry', JobStatus.FAILING)]
composer = DefaultMessageComposer()
result = composer.message(jobs)
self.assertEqual('FAILING:\nterry', result)
def test_message_TwoFailingJobs_ReturnCorrectMessage(self):
jobs = [Job('eric', JobStatus.FAILING),
Job('terry', JobStatus.FAILING)]
composer = DefaultMessageComposer()
result = composer.message(jobs)
self.assertEqual('FAILING:\neric\nterry', result)
def test_message_JobsListIsNone_ReturnUnknown(self):
composer = DefaultMessageComposer()
result = composer.message(None)
self.assertEqual('', result)
| mit |
madscatt/zazmol | src/python/test_sasmol/test_system/test_intg_system_Atom_load.py | 2 | 1850 | '''
SASMOL: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from unittest import main
from mocker import Mocker, MockerTestCase, ANY, ARGS, KWARGS
import numpy,copy
from sasmol.test_sasmol.utilities import env
import sasmol.system as system
import os
DataPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','data','sasmol','system')+os.path.sep
floattype=os.environ['SASMOL_FLOATTYPE']
import warnings; warnings.filterwarnings('ignore')
class Test_intg_system_Atom_load(MockerTestCase):
def setUp(self):
self.o=system.Atom(3,'1CRN-3frames.pdb')
def assert_list_almost_equal(self,a,b,places=5):
if (len(a)!=len(b)):
raise TypeError
else:
for i in range(len(a)):
if isinstance(a[i],(int,float,numpy.generic)):
if (numpy.isnan(a[i]) and numpy.isnan(b[i])): continue
self.assertAlmostEqual(a[i],b[i],places)
else:
self.assert_list_almost_equal(a[i],b[i],places)
def test_1CRN_3frames(self):
'''
test a regular pdb file with 3 frame
'''
pass
def tearDown(self):
pass
if __name__ == '__main__':
main()
| gpl-3.0 |
liutairan/pyGCS | dev/PreDownloadMap.py | 1 | 1358 | #!/usr/bin/env python
'''
MIT License
Copyright (c) 2017 Tairan Liu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
__author__ = "Tairan Liu"
__copyright__ = "Copyright 2017, Tairan Liu"
__credits__ = ["Tairan Liu", "Other Supporters"]
__license__ = "MIT"
__version__ = "0.4-dev"
__maintainer__ = "Tairan Liu"
__email__ = "liutairan2012@gmail.com"
__status__ = "Development"
| mit |
eugene7646/autopsy | thirdparty/gstreamer/1.0/x86_64/lib/gst-validate-launcher/python/launcher/httpserver.py | 10 | 3990 | #!/usr/bin/env python3
#
# Copyright (c) 2013,Thibault Saunier <thibault.saunier@collabora.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301, USA.
import os
import time
from . import loggable
import subprocess
import sys
import urllib.request, urllib.error, urllib.parse
logcat = "httpserver"
class HTTPServer(loggable.Loggable):
""" Class to run a SimpleHttpServer in a process."""
def __init__(self, options):
loggable.Loggable.__init__(self)
self.options = options
self._process = None
self._logsfile = None
def _check_is_up(self, timeout=60):
""" Check if the server is up, running a simple test based on wget. """
start = time.time()
while True:
try:
response = urllib.request.urlopen('http://127.0.0.1:%s' % (
self.options.http_server_port))
return True
except urllib.error.URLError as e:
pass
if time.time() - start > timeout:
return False
time.sleep(1)
def start(self):
""" Start the server in a subprocess """
self._logsfile = open(os.path.join(self.options.logsdir,
"httpserver.logs"), 'w+')
if self.options.http_server_dir is not None:
if self._check_is_up(timeout=2):
return True
print("Starting Server")
try:
self.debug("Launching http server")
cmd = "%s %s %d %s" % (sys.executable, os.path.join(os.path.dirname(__file__),
"RangeHTTPServer.py"),
self.options.http_server_port,
self.options.http_bandwith,
)
curdir = os.path.abspath(os.curdir)
os.chdir(self.options.http_server_dir)
# cmd = "twistd -no web --path=%s -p %d" % (
# self.options.http_server_dir, self.options.http_server_port)
self.debug(
"Launching server: %s (logs in %s)", cmd, self._logsfile)
self._process = subprocess.Popen(cmd.split(" "),
stderr=self._logsfile,
stdout=self._logsfile)
os.chdir(curdir)
self.debug("Launched http server")
# Dirty way to avoid eating to much CPU...
# good enough for us anyway.
time.sleep(1)
if self._check_is_up():
print("Started")
return True
else:
print("Failed starting server")
self._process.terminate()
self._process = None
except OSError as ex:
print("Failed starting server")
self.warning(logcat, "Could not launch server %s" % ex)
return False
def stop(self):
""" Stop the server subprocess if running. """
if self._process:
self._process.terminate()
self._process = None
self.debug("Server stopped")
| apache-2.0 |
augustyip/puppy | puppy.py | 1 | 1406 | #!/usr/bin/env python3
"""
Copyright (c) August Yip (http://august.hk/)
"""
import time
import curses
from curses import wrapper
from datetime import datetime
import sys, traceback
from lib.core.config import config
from lib.source.yahoo import yahoo
from lib.source.tencent import tencent
def main() :
return
try :
# Initialize curses
stdscr = curses.initscr()
# Turn off echoing of keys, and enter cbreak mode,
# where no buffering is performed on keyboard input
curses.noecho()
curses.cbreak()
# In keypad mode, escape sequences for special keys
# (like the cursor keys) will be interpreted andh
# a special value like curses.KEY_LEFT will be returned
stdscr.keypad(1)
while True:
stdscr.addstr(0, 0, 'puppy - version: 0.9.3.14159265')
stdscr.addstr(1, 0, 'current data source: ' + config.config['Default']['source'] + ', last refresh: ' + time.strftime('%H:%M:%S'))
stdscr.addstr(23, 0, 'Press q Key to Exit...')
stdscr.refresh()
# Set everything back to normal
stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
# Terminate curses
except :
# In event of error, restore terminal to sane state.
stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
traceback.print_exc()
finally :
curses.endwin()
if __name__ == '__main__':
main()
| gpl-2.0 |
martinbuc/missionplanner | Lib/sqlite3/__init__.py | 42 | 1199 | #-*- coding: ISO-8859-1 -*-
# pysqlite2/__init__.py: the pysqlite2 package.
#
# Copyright (C) 2005 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
def _():
import sys
if sys.platform == 'cli':
import clr
clr.AddReference('IronPython.SQLite')
_()
del _
from dbapi2 import *
| gpl-3.0 |
mscuthbert/abjad | abjad/tools/sequencetools/partition_sequence_by_weights.py | 2 | 11403 | # -*- encoding: utf-8 -*-
from abjad.tools import mathtools
def partition_sequence_by_weights(
sequence,
weights,
cyclic=False,
overhang=False,
allow_part_weights=Exact,
):
r'''Partitions `sequence` by `weights` exactly.
::
>>> sequence = [3, 3, 3, 3, 4, 4, 4, 4, 5]
.. container:: example
**Example 1.** Partition sequence once by weights exactly without
overhang:
::
>>> sequencetools.partition_sequence_by_weights(
... sequence,
... [3, 9],
... cyclic=False,
... overhang=False,
... )
[[3], [3, 3, 3]]
.. container:: example
**Example 2.** Partition sequence once by weights exactly with
overhang:
::
>>> sequencetools.partition_sequence_by_weights(
... sequence,
... [3, 9],
... cyclic=False,
... overhang=True,
... )
[[3], [3, 3, 3], [4, 4, 4, 4, 5]]
.. container:: example
**Example 3.** Partition sequence cyclically by weights exactly
without overhang:
::
>>> sequencetools.partition_sequence_by_weights(
... sequence,
... [12],
... cyclic=True,
... overhang=False,
... )
[[3, 3, 3, 3], [4, 4, 4]]
.. container:: example
**Example 4.** Partition sequence cyclically by weights exactly with
overhang:
::
>>> sequencetools.partition_sequence_by_weights(
... sequence,
... [12],
... cyclic=True,
... overhang=True,
... )
[[3, 3, 3, 3], [4, 4, 4], [4, 5]]
::
>>> sequence = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5]
.. container:: example
**Example 1.** Partition sequence once by weights at most without
overhang:
::
>>> sequencetools.partition_sequence_by_weights(
... sequence,
... [10, 4],
... cyclic=False,
... overhang=False,
... allow_part_weights=Less,
... )
[[3, 3, 3], [3]]
.. container:: example
**Example 2.** Partition sequence once by weights at most with
overhang:
::
>>> sequencetools.partition_sequence_by_weights(
... sequence,
... [10, 4],
... cyclic=False,
... overhang=True,
... allow_part_weights=Less,
... )
[[3, 3, 3], [3], [4, 4, 4, 4, 5, 5]]
.. container:: example
**Example 3.** Partition sequence cyclically by weights at most
without overhang:
::
>>> sequencetools.partition_sequence_by_weights(
... sequence,
... [10, 5],
... cyclic=True,
... overhang=False,
... allow_part_weights=Less,
... )
[[3, 3, 3], [3], [4, 4], [4], [4, 5], [5]]
.. container:: example
**Example 4.** Partition sequence cyclically by weights at most
with overhang:
::
>>> sequencetools.partition_sequence_by_weights(
... sequence,
... [10, 5],
... cyclic=True,
... overhang=True,
... allow_part_weights=Less,
... )
[[3, 3, 3], [3], [4, 4], [4], [4, 5], [5]]
::
>>> sequence = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5]
.. container:: example
**Example 1.** Partition sequence once by weights at least without
overhang:
::
>>> sequencetools.partition_sequence_by_weights(
... sequence,
... [10, 4],
... cyclic=False,
... overhang=False,
... allow_part_weights=More,
... )
[[3, 3, 3, 3], [4]]
.. container:: example
**Example 2.** Partition sequence once by weights at least with
overhang:
::
>>> sequencetools.partition_sequence_by_weights(
... sequence,
... [10, 4],
... cyclic=False,
... overhang=True,
... allow_part_weights=More,
... )
[[3, 3, 3, 3], [4], [4, 4, 4, 5, 5]]
.. container:: example
**Example 3.** Partition sequence cyclically by weights at least
without overhang:
::
>>> sequencetools.partition_sequence_by_weights(
... sequence,
... [10, 4],
... cyclic=True,
... overhang=False,
... allow_part_weights=More,
... )
[[3, 3, 3, 3], [4], [4, 4, 4], [5]]
.. container:: example
**Example 4.** Partition sequence cyclically by weights at least with
overhang:
::
>>> sequencetools.partition_sequence_by_weights(
... sequence,
... [10, 4],
... cyclic=True,
... overhang=True,
... allow_part_weights=More,
... )
[[3, 3, 3, 3], [4], [4, 4, 4], [5], [5]]
Returns list sequence objects.
'''
from abjad.tools import sequencetools
if allow_part_weights == Exact:
candidate = sequencetools.split_sequence(
sequence,
weights,
cyclic=cyclic,
overhang=overhang,
)
flattened_candidate = sequencetools.flatten_sequence(candidate)
if flattened_candidate == sequence[:len(flattened_candidate)]:
return candidate
else:
message = 'can not partition exactly.'
raise PartitionError(message)
elif allow_part_weights == More:
if not cyclic:
return _partition_sequence_once_by_weights_at_least(
sequence, weights, overhang=overhang)
else:
return _partition_sequence_cyclically_by_weights_at_least(
sequence, weights, overhang=overhang)
elif allow_part_weights == Less:
if not cyclic:
return _partition_sequence_once_by_weights_at_most(
sequence,
weights,
overhang=overhang,
)
else:
return _partition_sequence_cyclically_by_weights_at_most(
sequence,
weights,
overhang=overhang,
)
else:
message = 'not an ordinal value constant: {!r}.'
message = message.format(allow_part_weights)
raise ValueError(message)
def _partition_sequence_once_by_weights_at_least(
sequence,
weights,
overhang=False,
):
result = []
current_part = []
l_copy = sequence[:]
for num_weight, target_weight in enumerate(weights):
while True:
try:
x = l_copy.pop(0)
except IndexError:
if num_weight + 1 == len(weights):
if current_part:
result.append(current_part)
break
message = 'too few elements in sequence.'
raise PartitionError(message)
current_part.append(x)
if target_weight <= mathtools.weight(current_part):
result.append(current_part)
current_part = []
break
if l_copy:
if overhang:
result.append(l_copy)
return result
def _partition_sequence_cyclically_by_weights_at_least(
sequence,
weights,
overhang=False,
):
l_copy = sequence[:]
result = []
current_part = []
target_weight_index = 0
len_weights = len(weights)
while l_copy:
target_weight = weights[target_weight_index % len_weights]
x = l_copy.pop(0)
current_part.append(x)
if target_weight <= mathtools.weight(current_part):
result.append(current_part)
current_part = []
target_weight_index += 1
assert not l_copy
if current_part:
if overhang:
result.append(current_part)
return result
def _partition_sequence_once_by_weights_at_most(
sequence,
weights,
overhang=False,
):
l_copy = sequence[:]
result = []
current_part = []
for target_weight in weights:
while True:
try:
x = l_copy.pop(0)
except IndexError:
message = 'too few elements in sequence.'
raise PartitionError(message)
current_weight = mathtools.weight(current_part)
candidate_weight = current_weight + mathtools.weight([x])
if candidate_weight < target_weight:
current_part.append(x)
elif candidate_weight == target_weight:
current_part.append(x)
result.append(current_part)
current_part = []
break
elif target_weight < candidate_weight:
if current_part:
result.append(current_part)
current_part = []
l_copy.insert(0, x)
break
else:
message = 'elements in sequence too big.'
raise PartitionError(message)
else:
message = 'candidate and target weights must compare.'
raise ValueError(message)
if overhang:
left_over = current_part + l_copy
if left_over:
result.append(left_over)
return result
def _partition_sequence_cyclically_by_weights_at_most(
sequence,
weights,
overhang=False,
):
result = []
current_part = []
current_target_weight_index = 0
current_target_weight = weights[current_target_weight_index]
l_copy = sequence[:]
while l_copy:
current_target_weight = \
weights[current_target_weight_index % len(weights)]
x = l_copy.pop(0)
current_part_weight = mathtools.weight(current_part)
candidate_part_weight = current_part_weight + mathtools.weight([x])
if candidate_part_weight < current_target_weight:
current_part.append(x)
elif candidate_part_weight == current_target_weight:
current_part.append(x)
result.append(current_part)
current_part = []
current_target_weight_index += 1
elif current_target_weight < candidate_part_weight:
if current_part:
l_copy.insert(0, x)
result.append(current_part)
current_part = []
current_target_weight_index += 1
else:
message = 'elements in sequence too big.'
raise PartitionError(message)
else:
message = 'candidate and target rates must compare.'
raise ValueError(message)
if current_part:
if overhang:
result.append(current_part)
return result | gpl-3.0 |
MatthewWilkes/django | django/test/testcases.py | 49 | 58069 | from __future__ import unicode_literals
import difflib
import errno
import json
import os
import posixpath
import socket
import sys
import threading
import unittest
import warnings
from collections import Counter
from contextlib import contextmanager
from copy import copy
from functools import wraps
from unittest.util import safe_repr
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import WSGIRequestHandler, WSGIServer
from django.core.urlresolvers import clear_url_caches, set_urlconf
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils import six
from django.utils.decorators import classproperty
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import (
unquote, urlparse, urlsplit, urlunsplit,
)
from django.utils.six.moves.urllib.request import url2pathname
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e.msg)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super(_AssertNumQueriesContext, self).__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
query['sql'] for query in self.captured_queries
)
)
)
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += ' No template was rendered.'
else:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _CursorFailure(object):
def __init__(self, cls_name, wrapped):
self.cls_name = cls_name
self.wrapped = wrapped
def __call__(self):
raise AssertionError(
"Database queries aren't allowed in SimpleTestCase. "
"Either use TestCase or TransactionTestCase to ensure proper test isolation or "
"set %s.allow_database_queries to True to silence this failure." % self.cls_name
)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
_overridden_settings = None
_modified_settings = None
# Tests shouldn't be allowed to query the database since
# this base class doesn't enforce any isolation.
allow_database_queries = False
@classmethod
def setUpClass(cls):
super(SimpleTestCase, cls).setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = _CursorFailure(cls.__name__, connection.cursor)
@classmethod
def tearDownClass(cls):
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = connection.cursor.wrapped
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super(SimpleTestCase, cls).tearDownClass()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False))
if not skipped:
try:
self._pre_setup()
except Exception:
result.addError(self, sys.exc_info())
return
super(SimpleTestCase, self).__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Creating a test client.
* If the class has a 'urls' attribute, replace ROOT_URLCONF with it.
* Clearing the mail test outbox.
"""
self.client = self.client_class()
self._urlconf_setup()
mail.outbox = []
def _urlconf_setup(self):
if hasattr(self, 'urls'):
warnings.warn(
"SimpleTestCase.urls is deprecated and will be removed in "
"Django 1.10. Use @override_settings(ROOT_URLCONF=...) "
"in %s instead." % self.__class__.__name__,
RemovedInDjango110Warning, stacklevel=2)
set_urlconf(None)
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = self.urls
clear_url_caches()
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Putting back the original ROOT_URLCONF if it was changed.
"""
self._urlconf_teardown()
def _urlconf_teardown(self):
if hasattr(self, '_old_root_urlconf'):
set_urlconf(None)
settings.ROOT_URLCONF = self._old_root_urlconf
clear_url_caches()
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix='',
fetch_redirect_response=True):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request (use fetch_redirect_response=False to check
such links without fetching them).
"""
if host is not None:
warnings.warn(
"The host argument is deprecated and no longer used by assertRedirects",
RemovedInDjango20Warning, stacklevel=2
)
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
self.assertEqual(response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected:"
" Response code was %d (expected %d)" %
(response.redirect_chain[0][1], status_code))
url, status_code = response.redirect_chain[-1]
scheme, netloc, path, query, fragment = urlsplit(url)
self.assertEqual(response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
if fetch_redirect_response:
redirect_response = response.client.get(path, QueryDict(query),
secure=(scheme == 'https'))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s':"
" response code was %d (expected %d)" %
(path, redirect_response.status_code, target_status_code))
if url != expected_url:
# For temporary backwards compatibility, try to compare with a relative url
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)
relative_url = urlunsplit(('', '', e_path, e_query, e_fragment))
if url == relative_url:
warnings.warn(
"assertRedirects had to strip the scheme and domain from the "
"expected URL, as it was always added automatically to URLs "
"before Django 1.9. Please update your expected URLs by "
"removing the scheme and domain.",
RemovedInDjango20Warning, stacklevel=2)
expected_url = relative_url
self.assertEqual(url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" %
(url, expected_url))
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (hasattr(response, 'render') and callable(response.render)
and not response.is_rendered):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response.charset)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None,
"Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None,
"Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of %s in response"
" (expected %d)" % (real_count, text_repr, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0,
msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to "
"render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors)))
elif field in context[form].fields:
self.fail(msg_prefix + "The field '%s' on form '%s'"
" in context %d contains no errors" %
(field, form, i))
else:
self.fail(msg_prefix + "The form '%s' in context %d"
" does not contain the field '%s'" %
(form, i, field))
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors))
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the"
" response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Asserts that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err,
repr(field_errors)))
elif field in context[formset].forms[form_index].fields:
self.fail(msg_prefix + "The field '%s' "
"on formset '%s', form %d in "
"context %d contains no errors" %
(field, formset, form_index, i))
else:
self.fail(msg_prefix + "The formset '%s', form %d in "
"context %d does not contain the field '%s'" %
(formset, form_index, i, field))
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(len(non_field_errors) == 0,
msg_prefix + "The formset '%s', form %d in "
"context %d does not contain any non-field "
"errors." % (formset, form_index, i))
self.assertTrue(err in non_field_errors,
msg_prefix + "The formset '%s', form %d "
"in context %d does not contain the "
"non-field error '%s' "
"(actual errors: %s)" %
(formset, form_index, i, err,
repr(non_field_errors)))
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(len(non_form_errors) == 0,
msg_prefix + "The formset '%s' in "
"context %d does not contain any "
"non-form errors." % (formset, i))
self.assertTrue(err in non_form_errors,
msg_prefix + "The formset '%s' in context "
"%d does not contain the "
"non-form error '%s' (actual errors: %s)" %
(formset, i, err, repr(non_form_errors)))
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render "
"the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not
None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
"""
Asserts that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s" %
(template_name, ', '.join(template_names)))
if count is not None:
self.assertEqual(template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)." %
(template_name, count, template_names.count(template_name)))
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering"
" the response" % template_name)
@contextmanager
def _assert_raises_message_cm(self, expected_exception, expected_message):
with self.assertRaises(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(cm.exception))
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
"""
Asserts that expected_message is found in the the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
# callable_obj was a documented kwarg in Django 1.8 and older.
callable_obj = kwargs.pop('callable_obj', None)
if callable_obj:
warnings.warn(
'The callable_obj kwarg is deprecated. Pass the callable '
'as a positional argument instead.', RemovedInDjango20Warning
)
elif len(args):
callable_obj = args[0]
args = args[1:]
cm = self._assert_raises_message_cm(expected_exception, expected_message)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Asserts that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args,
**dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [force_text(required.error_messages['required'])]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages,
error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs),
fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Asserts that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg,
'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
six.text_type(dom1).splitlines(),
six.text_type(dom2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Asserts that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg,
'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None,
'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None,
'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of '%s' in response"
" (expected %d)" % (real_count, needle, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
diff = ('\n' + '\n'.join(
difflib.ndiff(
six.text_type(xml1).splitlines(),
six.text_type(xml2).splitlines(),
)
))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
# Since tests will be wrapped in a transaction, or serialized if they
# are not available, we allow queries to be run.
allow_database_queries = True
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* If the class has an 'available_apps' attribute, restricting the app
registry to these applications, then firing post_migrate -- it must
run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, installing these fixtures.
"""
super(TransactionTestCase, self)._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
raise
@classmethod
def _databases_names(cls, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if getattr(cls, 'multi_db', False):
return [alias for alias in connections
if include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']]
else:
return [DEFAULT_DB_ALIAS]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# If we need to provide replica initial data from migrated apps,
# then do so.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Flushing the contents of the database, to leave a clean slate. If
the class has an 'available_apps' attribute, post_migrate isn't fired.
* Force-closing the connection, so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super(TransactionTestCase, self)._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None
or (
# Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback and
hasattr(connections[db_name], '_test_serialized_contents')
)
)
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None):
items = six.moves.map(transform, qs)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions():
"""
Returns True if all connections support transactions.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but uses `transaction.atomic()` to achieve
test isolation.
In most situation, TestCase should be prefered to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Helper method to open atomic blocks for multiple databases"""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened through the previous method"""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
if not connections_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{
'verbosity': 0,
'commit': False,
'database': db_name,
})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
@classmethod
def tearDownClass(cls):
if connections_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super(TestCase, cls).tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase"""
pass
def _should_reload_connections(self):
if connections_support_transactions():
return False
return super(TestCase, self)._should_reload_connections()
def _fixture_setup(self):
if not connections_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super(TestCase, self)._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
self._rollback_atomics(self.atomics)
class CheckCondition(object):
"""Descriptor class for deferred condition checking"""
def __init__(self, cond_func):
self.cond_func = cond_func
def __get__(self, obj, objtype):
return self.cond_func()
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
test_item.__unittest_skip__ = CheckCondition(condition)
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIfDBFeature(*features):
"""
Skip a test if a database has at least one of the named features.
"""
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features)
)
def skipUnlessDBFeature(*features):
"""
Skip a test unless a database has all the named features.
"""
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features)
)
def skipUnlessAnyDBFeature(*features):
"""
Skip a test unless a database has any of the named features.
"""
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features)
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
Just a regular WSGIRequestHandler except it doesn't log to the standard
output any of the requests received, so as to not clutter the output for
the tests' results.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super(FSFilesHandler, self).__init__()
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super(FSFilesHandler, self).get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(FSFilesHandler, self).__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""
Thread for running a live http server while the tests are running.
"""
def __init__(self, host, possible_ports, static_handler, connections_override=None):
self.host = host
self.port = None
self.possible_ports = possible_ports
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super(LiveServerThread, self).__init__()
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
# Go through the list of possible ports, hoping that we can find
# one that is free to use for the WSGI server.
for index, port in enumerate(self.possible_ports):
try:
self.httpd = self._create_server(port)
except socket.error as e:
if (index + 1 < len(self.possible_ports) and
e.errno == errno.EADDRINUSE):
# This port is already in use, so we go on and try with
# the next one in the list.
continue
else:
# Either none of the given ports are free or the error
# is something else than "Address already in use". So
# we let that error bubble up to the main thread.
raise
else:
# A free port was found.
self.port = port
break
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
def _create_server(self, port):
return WSGIServer((self.host, port), QuietWSGIRequestHandler)
def terminate(self):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
class LiveServerTestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase but also launches a live
http server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
Note that it inherits from TransactionTestCase instead of TestCase because
the threads do not share the same transactions (unless if using in-memory
sqlite) and each thread needs to commit all their transactions so that the
other thread can see the changes.
"""
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (
cls.server_thread.host, cls.server_thread.port)
@classmethod
def setUpClass(cls):
super(LiveServerTestCase, cls).setUpClass()
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']):
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
# Launch the live server's thread
specified_address = os.environ.get(
'DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081-8179')
# The specified ports may be of the form '8000-8010,8080,9200-9300'
# i.e. a comma-separated list of ports or ranges of ports, so we break
# it down into a detailed list of all possible ports.
possible_ports = []
try:
host, port_ranges = specified_address.split(':')
for port_range in port_ranges.split(','):
# A port range can be of either form: '8000' or '8000-8010'.
extremes = list(map(int, port_range.split('-')))
assert len(extremes) in [1, 2]
if len(extremes) == 1:
# Port range of the form '8000'
possible_ports.append(extremes[0])
else:
# Port range of the form '8000-8010'
for port in range(extremes[0], extremes[1] + 1):
possible_ports.append(port)
except Exception:
msg = 'Invalid address ("%s") for live server.' % specified_address
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2])
cls.server_thread = cls._create_server_thread(host, possible_ports, connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, host, possible_ports, connections_override):
return LiveServerThread(
host,
possible_ports,
cls.static_handler,
connections_override=connections_override,
)
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
cls.server_thread.join()
# Restore sqlite in-memory database connections' non-shareability
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']):
conn.allow_thread_sharing = False
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
super(LiveServerTestCase, cls).tearDownClass()
class SerializeMixin(object):
"""
Mixin to enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass / tearDownClass.
"""
lockfile = None
@classmethod
def setUpClass(cls):
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__))
cls._lockfile = open(cls.lockfile)
locks.lock(cls._lockfile, locks.LOCK_EX)
super(SerializeMixin, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(SerializeMixin, cls).tearDownClass()
cls._lockfile.close()
| bsd-3-clause |
zenodo/zenodo | zenodo/modules/exporter/tasks.py | 2 | 1328 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Celery tasks for export jobs."""
from __future__ import absolute_import, print_function
from celery import shared_task
from flask import current_app
from .api import Exporter
@shared_task
def export_job(job_id=None):
"""Export job."""
job_definition = current_app.extensions['invenio-exporter'].job(job_id)
Exporter(**job_definition).run()
| gpl-2.0 |
jlandmann/oggm | oggm/workflow.py | 2 | 6116 | """Wrappers for the single tasks, multi processor handling."""
from __future__ import division
# Built ins
import logging
import os
from shutil import rmtree
import collections
# External libs
import pandas as pd
import multiprocessing as mp
# Locals
import oggm
from oggm import cfg, tasks, utils
# MPI
try:
import oggm.mpi as ogmpi
_have_ogmpi = True
except ImportError:
_have_ogmpi = False
# Module logger
log = logging.getLogger(__name__)
# Multiprocessing Pool
_mp_pool = None
def _init_pool_globals(_cfg_contents, global_lock):
cfg.unpack_config(_cfg_contents)
utils.lock = global_lock
def init_mp_pool(reset=False):
"""Necessary because at import time, cfg might be uninitialized"""
global _mp_pool
if _mp_pool and not reset:
return _mp_pool
cfg_contents = cfg.pack_config()
global_lock = mp.Manager().Lock()
mpp = cfg.PARAMS['mp_processes']
if mpp == -1:
try:
mpp = int(os.environ['SLURM_JOB_CPUS_PER_NODE'])
log.info('Multiprocessing: using slurm allocated '
'processors (N={})'.format(mpp))
except KeyError:
mpp = mp.cpu_count()
log.info('Multiprocessing: using all available '
'processors (N={})'.format(mpp))
else:
log.info('Multiprocessing: using the requested number of '
'processors (N={})'.format(mpp))
_mp_pool = mp.Pool(mpp, initializer=_init_pool_globals,
initargs=(cfg_contents, global_lock))
return _mp_pool
def _merge_dicts(*dicts):
r = {}
for d in dicts:
r.update(d)
return r
class _pickle_copier(object):
"""Pickleable alternative to functools.partial,
Which is not pickleable in python2 and thus doesn't work
with Multiprocessing."""
def __init__(self, func, kwargs):
self.call_func = func
self.out_kwargs = kwargs
def __call__(self, gdir):
try:
if isinstance(gdir, collections.Sequence):
gdir, gdir_kwargs = gdir
gdir_kwargs = _merge_dicts(self.out_kwargs, gdir_kwargs)
return self.call_func(gdir, **gdir_kwargs)
else:
return self.call_func(gdir, **self.out_kwargs)
except Exception as e:
try:
err_msg = '{0}: exception occured while processing task ' \
'{1}'.format(gdir.rgi_id, self.call_func.__name__)
raise RuntimeError(err_msg) from e
except AttributeError:
pass
raise
def reset_multiprocessing():
"""Reset multiprocessing state
Call this if you changed configuration parameters mid-run and need them to
be re-propagated to child processes.
"""
global _mp_pool
if _mp_pool:
_mp_pool.terminate()
_mp_pool = None
def execute_entity_task(task, gdirs, **kwargs):
"""Execute a task on gdirs.
If you asked for multiprocessing, it will do it.
Parameters
----------
task : function
the entity task to apply
gdirs : list
the list of oggm.GlacierDirectory to process.
Optionally, each list element can be a tuple, with the first element
being the ``oggm.GlacierDirectory``, and the second element a dict that
will be passed to the task function as ``**kwargs``.
"""
if task.__dict__.get('global_task', False):
return task(gdirs, **kwargs)
pc = _pickle_copier(task, kwargs)
if _have_ogmpi:
if ogmpi.OGGM_MPI_COMM is not None:
ogmpi.mpi_master_spin_tasks(pc, gdirs)
return
if cfg.PARAMS['use_multiprocessing']:
mppool = init_mp_pool()
mppool.map(pc, gdirs, chunksize=1)
else:
for gdir in gdirs:
pc(gdir)
def init_glacier_regions(rgidf, reset=False, force=False):
"""Very first task to do (always).
Set reset=True in order to delete the content of the directories.
"""
if reset and not force:
reset = utils.query_yes_no('Delete all glacier directories?')
# if reset delete also the log directory
if reset:
fpath = os.path.join(cfg.PATHS['working_dir'], 'log')
if os.path.exists(fpath):
rmtree(fpath)
gdirs = []
new_gdirs = []
for _, entity in rgidf.iterrows():
gdir = oggm.GlacierDirectory(entity, reset=reset)
if not os.path.exists(gdir.get_filepath('dem')):
new_gdirs.append((gdir, dict(entity=entity)))
gdirs.append(gdir)
execute_entity_task(tasks.define_glacier_region, new_gdirs)
return gdirs
def gis_prepro_tasks(gdirs):
"""Helper function: run all flowlines tasks."""
task_list = [
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.compute_downstream_lines,
tasks.initialize_flowlines,
tasks.compute_downstream_bedshape,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction
]
for task in task_list:
execute_entity_task(task, gdirs)
def climate_tasks(gdirs):
"""Helper function: run all climate tasks."""
# I don't know where this logic is best placed...
if ('climate_file' in cfg.PATHS) and \
os.path.exists(cfg.PATHS['climate_file']):
_process_task = tasks.process_custom_climate_data
else:
# OK, so use the default CRU "high-resolution" method
_process_task = tasks.process_cru_data
execute_entity_task(_process_task, gdirs)
# Then, only global tasks
tasks.compute_ref_t_stars(gdirs)
tasks.distribute_t_stars(gdirs)
def inversion_tasks(gdirs):
"""Helper function: run all bed inversion tasks."""
# Init
execute_entity_task(tasks.prepare_for_inversion, gdirs)
# Global task
tasks.optimize_inversion_params(gdirs)
# Inversion for all glaciers
execute_entity_task(tasks.volume_inversion, gdirs)
# Filter
execute_entity_task(tasks.filter_inversion_output, gdirs)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.