repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
w1ll1am23/home-assistant | homeassistant/helpers/sun.py | 4 | 4076 | """Helpers for sun events."""
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING
from homeassistant.const import SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET
from homeassistant.core import HomeAssistant, callback
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util
if TYPE_CHECKING:
import astral
DATA_LOCATION_CACHE = "astral_location_cache"
ELEVATION_AGNOSTIC_EVENTS = ("noon", "midnight")
@callback
@bind_hass
def get_astral_location(
hass: HomeAssistant,
) -> tuple[astral.location.Location, astral.Elevation]:
"""Get an astral location for the current Home Assistant configuration."""
from astral import LocationInfo # pylint: disable=import-outside-toplevel
from astral.location import Location # pylint: disable=import-outside-toplevel
latitude = hass.config.latitude
longitude = hass.config.longitude
timezone = str(hass.config.time_zone)
elevation = hass.config.elevation
info = ("", "", timezone, latitude, longitude)
# Cache astral locations so they aren't recreated with the same args
if DATA_LOCATION_CACHE not in hass.data:
hass.data[DATA_LOCATION_CACHE] = {}
if info not in hass.data[DATA_LOCATION_CACHE]:
hass.data[DATA_LOCATION_CACHE][info] = Location(LocationInfo(*info))
return hass.data[DATA_LOCATION_CACHE][info], elevation
@callback
@bind_hass
def get_astral_event_next(
hass: HomeAssistant,
event: str,
utc_point_in_time: datetime.datetime | None = None,
offset: datetime.timedelta | None = None,
) -> datetime.datetime:
"""Calculate the next specified solar event."""
location, elevation = get_astral_location(hass)
return get_location_astral_event_next(
location, elevation, event, utc_point_in_time, offset
)
@callback
def get_location_astral_event_next(
location: astral.location.Location,
elevation: astral.Elevation,
event: str,
utc_point_in_time: datetime.datetime | None = None,
offset: datetime.timedelta | None = None,
) -> datetime.datetime:
"""Calculate the next specified solar event."""
if offset is None:
offset = datetime.timedelta()
if utc_point_in_time is None:
utc_point_in_time = dt_util.utcnow()
kwargs = {"local": False}
if event not in ELEVATION_AGNOSTIC_EVENTS:
kwargs["observer_elevation"] = elevation
mod = -1
while True:
try:
next_dt: datetime.datetime = (
getattr(location, event)(
dt_util.as_local(utc_point_in_time).date()
+ datetime.timedelta(days=mod),
**kwargs,
)
+ offset
)
if next_dt > utc_point_in_time:
return next_dt
except ValueError:
pass
mod += 1
@callback
@bind_hass
def get_astral_event_date(
hass: HomeAssistant,
event: str,
date: datetime.date | datetime.datetime | None = None,
) -> datetime.datetime | None:
"""Calculate the astral event time for the specified date."""
location, elevation = get_astral_location(hass)
if date is None:
date = dt_util.now().date()
if isinstance(date, datetime.datetime):
date = dt_util.as_local(date).date()
kwargs = {"local": False}
if event not in ELEVATION_AGNOSTIC_EVENTS:
kwargs["observer_elevation"] = elevation
try:
return getattr(location, event)(date, **kwargs) # type: ignore
except ValueError:
# Event never occurs for specified date.
return None
@callback
@bind_hass
def is_up(
hass: HomeAssistant, utc_point_in_time: datetime.datetime | None = None
) -> bool:
"""Calculate if the sun is currently up."""
if utc_point_in_time is None:
utc_point_in_time = dt_util.utcnow()
next_sunrise = get_astral_event_next(hass, SUN_EVENT_SUNRISE, utc_point_in_time)
next_sunset = get_astral_event_next(hass, SUN_EVENT_SUNSET, utc_point_in_time)
return next_sunrise > next_sunset
| apache-2.0 |
datachand/h2o-3 | py2/testdir_single_jvm/test_summary_stepping.py | 21 | 6220 | import unittest, time, sys, random, math, getpass
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_util, h2o_print as h2p, h2o_summ
from h2o_test import OutputObj
print "Same as test_summary2_uniform.py but with exponential distribution on the data"
DO_TRY_SCIPY = False
if getpass.getuser() == 'kevin' or getpass.getuser() == 'jenkins':
DO_TRY_SCIPY = True
DO_MEDIAN = True
MAX_QBINS = 1000
def write_syn_dataset(csvPathname, rowCount, colCount, rangeMin, rangeMax, SEED=None):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
expectedMin = None
expectedMax = None
# step = (float(rangeMax) - float(rangeMin)) / (MAX_QBINS/2)
# cover the full range with the # of rows we're going to create
step = (float(rangeMax) - float(rangeMin)) / rowCount
for row in range(rowCount):
rowData = []
# Do a fixed distribution, with knowledge of the 1000-way binning in h2o
# Divide by 500, so it's every other bin
# WEIRD: didn't get any percentiles with 5 entries all the same
for col in range(colCount):
# add the col in there for a little variance
value = col + (rangeMin + (row * step))
print "value:", value
# None doesn't dominate for max, it doesn for min
if expectedMin is None:
expectedMin = value
else:
expectedMin = min(expectedMin, value)
if expectedMax is None:
expectedMax = value
else:
expectedMax = max(expectedMax, value)
rowData.append(value)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
return (expectedMin, expectedMax)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_summary_stepping(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
# co.label, (min, 25th, 50th, 75th, max)
# parse setup error
# (1, 1, 'x.hex', 1, 20000, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', 1, 20000, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', 1, 20000, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', 1, 20000, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', -5000, 0, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', .4900, .5000, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', -.5000, -.4900, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', 490, 500, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', -500, -490, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', 49000, 50000, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', -50000, -49000, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', 4900, 5000, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', -5000, -4900, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', -100000, 100000, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', -1, 1, ['C1', None, None, None, None, None]),
(5, 1, 'x.hex', 1, 100, ['C1', None, None, None, None, None]),
]
timeoutSecs = 10
trial = 1
n = h2o.nodes[0]
lenNodes = len(h2o.nodes)
x = 0
timeoutSecs = 60
for (rowCount, colCount, hex_key, rangeMin, rangeMax, expected) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
x += 1
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
(expectedMin, expectedMax) = write_syn_dataset(csvPathname, rowCount, colCount, rangeMin, rangeMax, SEED=SEEDPERFILE)
print "expectedMin:", expectedMin, "expectedMax:", expectedMax
# add 5% for fp errors?
maxErr = ((expectedMax - expectedMin)/1000) * 1.05
expected[1] = expectedMin
expected[5] = expectedMax
csvPathnameFull = h2i.find_folder_and_filename(None, csvPathname, returnFullPath=True)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=30, doSummary=False)
pA = h2o_cmd.ParseObj(parseResult, expectedNumRows=rowCount, expectedNumCols=colCount)
print pA.numRows, pA.numCols, pA.parse_key
iA = h2o_cmd.InspectObj(pA.parse_key,
expectedNumRows=rowCount, expectedNumCols=colCount, expectedMissinglist=[])
print iA.missingList, iA.labelList, iA.numRows, iA.numCols
# column 0 not used here
assert len(expected) == 6
co = h2o_cmd.runSummary(key=hex_key, column=0, expected=expected[1:], maxDelta=maxErr)
trial += 1
h2o.nodes[0].remove_all_keys()
scipyCol = 0
print "maxErr", maxErr
if co.label!='' and expected[scipyCol]:
# don't do for enums
# also get the median with a sort (h2o_summ.percentileOnSortedlist()
h2o_summ.quantile_comparisons(
csvPathnameFull,
skipHeader=False,
col=scipyCol,
datatype='float',
quantile=0.5 if DO_MEDIAN else 0.99,
h2oSummary2=co.percentiles[5 if DO_MEDIAN else 9],
# h2oQuantilesApprox=qresult_single,
# h2oQuantilesExact=qresult,
h2oSummary2MaxErr=maxErr,
)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
minirop/wren | test/benchmark/binary_trees.py | 14 | 1316 | # The Computer Language Benchmarks Game
# http://shootout.alioth.debian.org/
#
# contributed by Antoine Pitrou
# modified by Dominique Wahli
# modified by Heinrich Acker
from __future__ import print_function
import time
# Map "range" to an efficient range in both Python 2 and 3.
try:
range = xrange
except NameError:
pass
def make_tree(item, depth):
if not depth: return item, None, None
item2 = item + item
depth -= 1
return item, make_tree(item2 - 1, depth), make_tree(item2, depth)
def check_tree(node):
item, left, right = node
if not left: return item
return item + check_tree(left) - check_tree(right)
min_depth = 4
max_depth = 12
stretch_depth = max_depth + 1
start = time.clock()
print("stretch tree of depth %d check:" % stretch_depth, check_tree(make_tree(0, stretch_depth)))
long_lived_tree = make_tree(0, max_depth)
iterations = 2 ** max_depth
for depth in range(min_depth, stretch_depth, 2):
check = 0
for i in range(1, iterations + 1):
check += check_tree(make_tree(i, depth)) + check_tree(make_tree(-i, depth))
print("%d trees of depth %d check:" % (iterations * 2, depth), check)
iterations //= 4
print("long lived tree of depth %d check:" % max_depth, check_tree(long_lived_tree))
print("elapsed: " + str(time.clock() - start)) | mit |
joebowen/LogMyRocket_API | LogMyRocket/libraries/sys_packages/botocore/botocore/vendored/requests/utils.py | 618 | 21334 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
| gpl-3.0 |
ahmadRagheb/goldenHR | erpnext/hr/doctype/leave_block_list/leave_block_list.py | 62 | 2084 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class LeaveBlockList(Document):
def validate(self):
dates = []
for d in self.get("leave_block_list_dates"):
# date is not repeated
if d.block_date in dates:
frappe.msgprint(_("Date is repeated") + ":" + d.block_date, raise_exception=1)
dates.append(d.block_date)
@frappe.whitelist()
def get_applicable_block_dates(from_date, to_date, employee=None,
company=None, all_lists=False):
block_dates = []
for block_list in get_applicable_block_lists(employee, company, all_lists):
block_dates.extend(frappe.db.sql("""select block_date, reason
from `tabLeave Block List Date` where parent=%s
and block_date between %s and %s""", (block_list, from_date, to_date),
as_dict=1))
return block_dates
def get_applicable_block_lists(employee=None, company=None, all_lists=False):
block_lists = []
if not employee:
employee = frappe.db.get_value("Employee", {"user_id":frappe.session.user})
if not employee:
return []
if not company:
company = frappe.db.get_value("Employee", employee, "company")
def add_block_list(block_list):
if block_list:
if all_lists or not is_user_in_allow_list(block_list):
block_lists.append(block_list)
# per department
department = frappe.db.get_value("Employee",employee, "department")
if department:
block_list = frappe.db.get_value("Department", department, "leave_block_list")
add_block_list(block_list)
# global
for block_list in frappe.db.sql_list("""select name from `tabLeave Block List`
where applies_to_all_departments=1 and company=%s""", company):
add_block_list(block_list)
return list(set(block_lists))
def is_user_in_allow_list(block_list):
return frappe.session.user in frappe.db.sql_list("""select allow_user
from `tabLeave Block List Allow` where parent=%s""", block_list)
| gpl-3.0 |
partofthething/home-assistant | homeassistant/components/lifx_legacy/light.py | 1 | 7711 | """
Support for the LIFX platform that implements lights.
This is a legacy platform, included because the current lifx platform does
not yet support Windows.
"""
import logging
import liffylights
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_TRANSITION,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_TRANSITION,
LightEntity,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_time_change
from homeassistant.util.color import (
color_temperature_kelvin_to_mired,
color_temperature_mired_to_kelvin,
)
_LOGGER = logging.getLogger(__name__)
BYTE_MAX = 255
CONF_BROADCAST = "broadcast"
CONF_SERVER = "server"
SHORT_MAX = 65535
TEMP_MAX = 9000
TEMP_MAX_HASS = 500
TEMP_MIN = 2500
TEMP_MIN_HASS = 154
SUPPORT_LIFX = (
SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR | SUPPORT_TRANSITION
)
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_SERVER),
cv.deprecated(CONF_BROADCAST),
PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_SERVER): cv.string, vol.Optional(CONF_BROADCAST): cv.string}
),
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the LIFX platform."""
_LOGGER.warning(
"The LIFX Legacy platform is deprecated and will be removed in "
"Home Assistant Core 2021.6.0. Use the LIFX integration instead."
)
server_addr = config.get(CONF_SERVER)
broadcast_addr = config.get(CONF_BROADCAST)
lifx_library = LIFX(add_entities, server_addr, broadcast_addr)
# Register our poll service
track_time_change(hass, lifx_library.poll, second=[10, 40])
lifx_library.probe()
class LIFX:
"""Representation of a LIFX light."""
def __init__(self, add_entities_callback, server_addr=None, broadcast_addr=None):
"""Initialize the light."""
self._devices = []
self._add_entities_callback = add_entities_callback
self._liffylights = liffylights.LiffyLights(
self.on_device, self.on_power, self.on_color, server_addr, broadcast_addr
)
def find_bulb(self, ipaddr):
"""Search for bulbs."""
bulb = None
for device in self._devices:
if device.ipaddr == ipaddr:
bulb = device
break
return bulb
def on_device(self, ipaddr, name, power, hue, sat, bri, kel):
"""Initialize the light."""
bulb = self.find_bulb(ipaddr)
if bulb is None:
_LOGGER.debug(
"new bulb %s %s %d %d %d %d %d", ipaddr, name, power, hue, sat, bri, kel
)
bulb = LIFXLight(self._liffylights, ipaddr, name, power, hue, sat, bri, kel)
self._devices.append(bulb)
self._add_entities_callback([bulb])
else:
_LOGGER.debug(
"update bulb %s %s %d %d %d %d %d",
ipaddr,
name,
power,
hue,
sat,
bri,
kel,
)
bulb.set_power(power)
bulb.set_color(hue, sat, bri, kel)
bulb.schedule_update_ha_state()
def on_color(self, ipaddr, hue, sat, bri, kel):
"""Initialize the light."""
bulb = self.find_bulb(ipaddr)
if bulb is not None:
bulb.set_color(hue, sat, bri, kel)
bulb.schedule_update_ha_state()
def on_power(self, ipaddr, power):
"""Initialize the light."""
bulb = self.find_bulb(ipaddr)
if bulb is not None:
bulb.set_power(power)
bulb.schedule_update_ha_state()
def poll(self, now):
"""Set up polling for the light."""
self.probe()
def probe(self, address=None):
"""Probe the light."""
self._liffylights.probe(address)
class LIFXLight(LightEntity):
"""Representation of a LIFX light."""
def __init__(self, liffy, ipaddr, name, power, hue, saturation, brightness, kelvin):
"""Initialize the light."""
_LOGGER.debug("LIFXLight: %s %s", ipaddr, name)
self._liffylights = liffy
self._ip = ipaddr
self.set_name(name)
self.set_power(power)
self.set_color(hue, saturation, brightness, kelvin)
@property
def should_poll(self):
"""No polling needed for LIFX light."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def ipaddr(self):
"""Return the IP address of the device."""
return self._ip
@property
def hs_color(self):
"""Return the hs value."""
return (self._hue / 65535 * 360, self._sat / 65535 * 100)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
brightness = int(self._bri / (BYTE_MAX + 1))
_LOGGER.debug("brightness: %d", brightness)
return brightness
@property
def color_temp(self):
"""Return the color temperature."""
temperature = color_temperature_kelvin_to_mired(self._kel)
_LOGGER.debug("color_temp: %d", temperature)
return temperature
@property
def is_on(self):
"""Return true if device is on."""
_LOGGER.debug("is_on: %d", self._power)
return self._power != 0
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_LIFX
def turn_on(self, **kwargs):
"""Turn the device on."""
if ATTR_TRANSITION in kwargs:
fade = int(kwargs[ATTR_TRANSITION] * 1000)
else:
fade = 0
if ATTR_HS_COLOR in kwargs:
hue, saturation = kwargs[ATTR_HS_COLOR]
hue = hue / 360 * 65535
saturation = saturation / 100 * 65535
else:
hue = self._hue
saturation = self._sat
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS] * (BYTE_MAX + 1)
else:
brightness = self._bri
if ATTR_COLOR_TEMP in kwargs:
kelvin = int(color_temperature_mired_to_kelvin(kwargs[ATTR_COLOR_TEMP]))
else:
kelvin = self._kel
_LOGGER.debug(
"turn_on: %s (%d) %d %d %d %d %d",
self._ip,
self._power,
hue,
saturation,
brightness,
kelvin,
fade,
)
if self._power == 0:
self._liffylights.set_color(
self._ip, hue, saturation, brightness, kelvin, 0
)
self._liffylights.set_power(self._ip, 65535, fade)
else:
self._liffylights.set_color(
self._ip, hue, saturation, brightness, kelvin, fade
)
def turn_off(self, **kwargs):
"""Turn the device off."""
if ATTR_TRANSITION in kwargs:
fade = int(kwargs[ATTR_TRANSITION] * 1000)
else:
fade = 0
_LOGGER.debug("turn_off: %s %d", self._ip, fade)
self._liffylights.set_power(self._ip, 0, fade)
def set_name(self, name):
"""Set name of the light."""
self._name = name
def set_power(self, power):
"""Set power state value."""
_LOGGER.debug("set_power: %d", power)
self._power = power != 0
def set_color(self, hue, sat, bri, kel):
"""Set color state values."""
self._hue = hue
self._sat = sat
self._bri = bri
self._kel = kel
| mit |
lgnq/RPI8836 | main.py | 1 | 14217 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import define
import tw8836
import spi
import fontosd
import bmposd
import sx1505
import string
import time
import sys
img = [
#0x000000,
0x0015C0,
0x0044A0,
0x007390,
0x00A2A0,
0x00D1B0,
0x0100B0,
0x013020,
0x015F60,
0x018EE0,
0x01BEA0,
0x01EED0,
0x021EC0,
0x024F00,
0x027F60,
0x02AFB0,
0x02E080,
0x031160,
0x034250,
0x037350,
0x03A410,
0x03D4C0,
0x0405B0,
0x043680,
0x046710,
0x0497D0,
0x04C8C0,
0x04F970,
0x0529F0,
0x055A80,
0x058B60,
0x05BBE0,
0x05ECA0,
0x061D00,
0x064D70,
0x067DA0,
0x06AE10,
0x06DE50,
0x070EA0,
0x073F40,
0x076FC0,
0x079FD0,
0x07D020,
0x080020,
0x083020,
0x085FE0,
0x088FD0,
0x08BF90,
0x08EF40,
0x091E80,
0x094DE0,
0x097D10,
0x09ABD0,
0x09DAD0,
0x0A0970,
0x0A37F0,
0x0A7050,
0x0AA840,
0x0AE030,
0x0B1820,
0x0B5000,
0x0B87E0,
0x0BBEE0,
0x0BF5C0,
0x0C2CD0,
0x0C6360,
0x0C9AC0,
0x0CD1E0,
0x0D0910,
0x0D3FF0,
0x0D76B0,
0x0DAD70,
0x0DE490,
0x0E1B90,
0x0E5240,
0x0E88F0,
0x0EBFD0,
0x0EF700,
0x0F2E60,
0x0F6550,
0x0F9BD0,
0x0FD3C0,
0x100B60,
0x104370,
0x107B50,
0x10B300,
0x10EAC0,
0x111930,
0x1147A0,
0x117630,
0x11A4B0,
0x11D380,
0x1202E0,
0x123230,
0x126150,
0x1290B0,
0x12C070,
0x12F000,
0x131FD0,
0x134FB0,
0x137FD0,
0x13B000,
0x13DFD0,
0x141010,
0x144050,
0x1470A0,
0x14A0D0,
0x14D150,
0x1501D0,
0x153250,
0x1562D0,
0x159380,
0x15C410,
0x15F480,
0x162500,
0x1655A0,
0x168650,
0x16B6D0,
0x16E6F0,
0x171700,
0x174730,
0x177780,
0x17A770,
0x17D7C0,
0x1807A0,
0x183780,
0x186780,
0x189700,
0x18C680,
0x18F5E0,
0x192570,
0x1954A0,
0x1983C0,
0x19B2F0,
0x19E1D0,
0x1A10E0,
0x1A3FD0,
0x1A6EF0,
0x1A9DC0,
0x1ACCA0,
0x1AFB50,
]
img_norle = [
0x000000,
0x03F890,
0x07F120,
0x0BE9B0,
0x0FE240,
0x13DAD0,
0x17D360,
0x1BCBF0,
0x1FC480,
0x23BD10,
0x27B5A0,
0x2BAE30,
0x2FA6C0,
0x339F50,
0x3797E0,
0x3B9070,
0x3F8900,
0x438190,
0x477A20,
0x4B72B0,
0x4F6B40,
0x5363D0,
0x575C60,
0x5B54F0,
0x5F4D80,
0x634610,
0x673EA0,
0x6B3730,
0x6F2FC0,
0x732850,
]
if __name__ == '__main__':
length = len(sys.argv)
if (length == 1 or sys.argv[1] == 'help' or sys.argv[1] == 'h' or sys.argv[1] == '?'):
print 'this is tw8836 demo using raspberrypi 2'
print '================= help ================='
print 'help, h, ? - print this help message'
print 'init, i - init TW8836'
print 'show, s - show image at address 0xXXXXXX'
print ' - s winno address sx sy alpha level offset'
print 'detect, d - input source detect'
print '================= note ================='
print 'please set the I2C speed > 400Kbps'
print 'sudo modprobe -r i2c_bcm2708'
print 'sudo modprobe i2c-bcm2708 baudrate=1000000'
print '========================================'
print 'sudo cat /sys/module/i2c_bcm2708/parameters/baudrate'
print 'lsmod'
exit
elif (sys.argv[1] == 'init' or sys.argv[1] == 'i'):
tw8836.detect()
while tw8836.SPI_READ_SLOW != tw8836.spi_read_mode_check():
tw8836.spi_read_mode(tw8836.SPI_READ_SLOW)
spi.init()
while tw8836.SPI_READ_QUAD_IO != tw8836.spi_read_mode_check():
tw8836.spi_read_mode(tw8836.SPI_READ_QUAD_IO)
try:
print 'Enable LVDS RX'
sx1505.lvds_rx_onoff(define.ON)
print 'FPPWC ON'
sx1505.fppwc_onoff(define.ON)
#print 'FPBIAS ON'
#sx1505.fpbias_onoff(define.ON)
except IOError:
print '\033[1;40;31mNot\033[0m find SX1505 at address 0x20'
tw8836.init()
tw8836.sspll1_set_freq(72000000)
tw8836.sspll2_set_freq(108000000)
print 'SSPLL1 frequency is:', tw8836.sspll1_get_freq()
print 'SSPLL2 frequency is:', tw8836.sspll2_get_freq()
print 'SPI CLK is:', tw8836.spi_clk_get()
tw8836.rb_swap(define.ON)
bmposd.onoff_control(define.ON)
elif sys.argv[1] == 'animation' or sys.argv[1] == 'a':
img_spi_addr = 0x600000
img_list = img
bmposd.lut_load(bmposd.WINNO1, img_spi_addr + img_list[3], 0)
for i in range(0, 50):
tw8836.wait_vblank(1)
#bmposd.all_win_off()
bmposd.win_onoff(1, define.OFF)
bmposd.image_display(bmposd.WINNO1, img_spi_addr + img_list[i], 0, 0, bmposd.PIXEL_ALPHA_MODE, 0x08, 0)
tw8836.wait_vblank(2)
elif sys.argv[1] == 'show' or sys.argv[1] == 's':
winno = bmposd.WINNO1
img_spi_addr = 0x100000
sx = 0
sy = 0
alpha = bmposd.NO_ALPHA_MODE
level = 0x000000
offset = 0
spi.spi_flash_detect()
if length > 2:
winno = int(sys.argv[2], 16)
if length > 3:
img_spi_addr = int(sys.argv[3], 16)
print 'show a picture at address', hex(img_spi_addr)
bmposd.win_onoff(winno, define.OFF)
bmposd.lut_load(winno, img_spi_addr, offset)
if length > 4:
sx = int(sys.argv[4], 16)
if length > 5:
sy = int(sys.argv[5], 16)
if length > 6:
if sys.argv[6] == 'pixel' or sys.argv[6] == 'p':
print 'set pixel alpha blending'
alpha = bmposd.PIXEL_ALPHA_MODE
elif sys.argv[6] == 'golable' or sys.argv[6] == 'g':
print 'set golable alpha blending'
alpha = bmposd.GLOBAL_ALPHA_MODE
if length > 7:
level = int(sys.argv[7], 16)
if length > 8:
offset = int(sys.argv[8], 16)
tw8836.wait_vblank(1)
bmposd.image_display(winno, img_spi_addr, sx, sy, alpha, level, offset)
elif sys.argv[1] == 'detect' or sys.argv[1] == 'd':
print 'detect the input status'
print '======================='
tw8836.detect_inputs()
else:
#bmposd.devalue_set()
#bmposd.lut_load(bmposd.WINNO8, 0x100000, 0)
#bmposd.image_display(bmposd.WINNO8, 0x100000, 0, 0, bmposd.GLOBAL_ALPHA_MODE, 0x61, 0)
img_spi_addr = 0x600000
img_list = img
bmposd.lut_load(bmposd.WINNO1, img_spi_addr+img_list[0], 0)
bmposd.pixel_alpha_set(bmposd.WINNO1, 0, 0, 0x30)
bmposd.pixel_alpha_set(bmposd.WINNO1, 0, 1, 0x40)
bmposd.pixel_alpha_set(bmposd.WINNO1, 0, 2, 0x50)
bmposd.pixel_alpha_set(bmposd.WINNO1, 0, 3, 0x60)
bmposd.pixel_alpha_set(bmposd.WINNO1, 0, 4, 0x7F)
bmposd.pixel_alpha_set(bmposd.WINNO1, 0, 5, 0x7F)
bmposd.pixel_alpha_set(bmposd.WINNO1, 0, 6, 0x7F)
bmposd.pixel_alpha_set(bmposd.WINNO1, 0, 7, 0x7F)
bmposd.pixel_alpha_set(bmposd.WINNO1, 0, 8, 0x7F)
bmposd.pixel_alpha_set(bmposd.WINNO1, 0, 9, 0x7F)
bmposd.pixel_alpha_set(bmposd.WINNO1, 0, 10, 0x7F)
bmposd.image_display(bmposd.WINNO1, img_spi_addr+img_list[0], 200, 0, bmposd.PIXEL_ALPHA_MODE, 0x8, 0)
#bmposd.win_start_addr_set(bmposd.WINNO1, img_spi_addr+0x0015C0+16+256*4)
#bmposd.rlc_set(bmposd.WINNO1, 8, 8)
#bmposd.win_start_addr_set(bmposd.WINNO1, img_spi_addr+16+256*4)
bmposd.color_fill_onoff(2, define.ON)
bmposd.win_onoff(2, define.ON)
while (1):
for i in range(0, len(img_list)):
tw8836.wait_vblank(1)
bmposd.rlc_set(bmposd.WINNO1, 8, 8)
bmposd.win_start_addr_set(bmposd.WINNO1, img_spi_addr+img_list[i]+16+256*4)
bmposd.color_fill_set(bmposd.WINNO2, 200, 200, i, 20, 0)
#bmposd.image_display(bmposd.WINNO1, img_spi_addr+d, 200, 0, bmposd.PIXEL_ALPHA_MODE, 0x8, 0)
#time.sleep(0.01)
for i in range(0, len(img_list)):
tw8836.wait_vblank(1)
n = len(img_list)-1-i
bmposd.win_start_addr_set(bmposd.WINNO1, img_spi_addr+img_list[n]+16+256*4)
bmposd.color_fill_set(bmposd.WINNO2, 200, 200, n, 20, 0)
while 1:
cmd = raw_input("\033[1;40;32mcmd>>\033[0m")
cmd = cmd.lower()
cmd_list = cmd.split(' ')
if cmd == 'exit' or cmd == 'quit' or cmd == 'q':
sys.exit()
elif cmd == 'help' or cmd == 'h' or cmd == '?':
print 'this is tw8836 demo using raspberrypi 2'
print '================= help ================='
print 'help, h, ? - print this help message'
print 'init, i - init TW8836'
print 'show, s - show image at address 0xXXXXXX'
print ' - s winno address sx sy alpha level offset'
print 'detect, d - input source detect'
print 'exit, quit, q - exit the program'
print '================= note ================='
print 'please set the I2C speed > 400Kbps'
print 'sudo modprobe -r i2c_bcm2708'
print 'sudo modprobe i2c-bcm2708 baudrate=1000000'
print '========================================'
print 'sudo cat /sys/module/i2c_bcm2708/parameters/baudrate'
print 'lsmod'
elif cmd == 'init' or cmd == 'i':
tw8836.detect()
while tw8836.SPI_READ_SLOW != tw8836.spi_read_mode_get():
tw8836.spi_read_mode_set(tw8836.SPI_READ_SLOW)
spi.init()
while tw8836.SPI_READ_QUAD_IO != tw8836.spi_read_mode_get():
tw8836.spi_read_mode_set(tw8836.SPI_READ_QUAD_IO)
try:
print 'Enable LVDS RX'
sx1505.lvds_rx_onoff(define.ON)
print 'FPPWC ON'
sx1505.fppwc_onoff(define.ON)
#print 'FPBIAS ON'
#sx1505.fpbias_onoff(define.ON)
except IOError:
print '\033[1;40;31mNot\033[0m find SX1505 at address 0x20'
tw8836.init()
tw8836.sspll1_set_freq(72000000)
tw8836.sspll2_set_freq(108000000)
print 'SSPLL1 frequency is:', tw8836.sspll1_get_freq()
print 'SSPLL2 frequency is:', tw8836.sspll2_get_freq()
clk = tw8836.spi_clk_get()
print 'SPI CLK =', clk, 'Hz'
tw8836.rb_swap(define.ON)
bmposd.onoff_control(define.ON)
elif cmd == 'detect' or cmd == 'd':
print 'detect the input status'
print '======================='
tw8836.detect_inputs()
elif cmd_list[0] == 'show' or cmd_list[0] == 's':
winno = bmposd.WINNO1
img_spi_addr = 0x100000
sx = 0
sy = 0
alpha = bmposd.NO_ALPHA_MODE
level = 0x000000
offset = 0
if len(cmd_list) == 1:
winno = input("winno = ")
img_spi_addr = input("address = ")
elif len(cmd_list) == 2:
winno = string.atoi(cmd_list[1], 16)
print 'winno = ' + cmd_list[1]
img_spi_addr = input("address = ")
elif len(cmd_list) == 3:
winno = string.atoi(cmd_list[1], 16)
img_spi_addr = string.atoi(cmd_list[2], 16)
print hex(img_spi_addr)
bmposd.win_onoff(winno, define.OFF)
bmposd.lut_load(winno, img_spi_addr, offset)
tw8836.wait_vblank(1)
bmposd.image_display(winno, img_spi_addr, sx, sy, alpha, level, offset)
elif cmd_list[0] == 'osd':
if len(cmd_list) != 3:
print 'osd n on/off'
else:
winno = string.atoi(cmd_list[1], 16)
if cmd_list[2] == 'on':
bmposd.win_onoff(winno, 1)
elif cmd_list[2] == 'off':
bmposd.win_onoff(winno, 0)
else:
print 'osd n on/off'
elif cmd_list[0] == 'spi':
if len(cmd_list) == 1:
print 'spi operation'
elif len(cmd_list) == 2:
if cmd_list[1] == 'd':
spi.spi_flash_detect()
elif cmd_list[1] == 'clk':
clk = tw8836.spi_clk_get()
print 'spi clk =', clk, 'Hz'
elif cmd_list[1] == 'mode':
mode = tw8836.spi_read_mode_get()
print 'spi read mode is', mode
elif cmd_list[1] == 'test':
spi.program_test()
else:
print 'wrong spi command'
print 'spi d'
print 'spi clk'
print 'spi tset'
else:
print 'wrong spi command'
elif cmd_list[0] == "codefont":
fontosd.code_font_test()
elif cmd_list[0] == "spifont":
fontosd.spi_font_test()
elif cmd_list[0] == 'w':
idx = string.atoi(cmd_list[1], 16)
val = string.atoi(cmd_list[2], 16)
tw8836.write(idx, val)
print 'w', hex(idx), hex(val)
elif cmd_list[0] == 'r':
idx = string.atoi(cmd_list[1], 16)
page = tw8836.read(0xff)
if idx == 0xff:
print 'PAGE', hex(page)
else:
print 'PAGE', hex(page), 'IDX', hex(idx), 'is', hex(tw8836.read(idx))
elif cmd_list[0] == ',':
val = val - 1
tw8836.write(idx, val)
print 'w', hex(idx), hex(val)
elif cmd_list[0] == '.':
val = val + 1
tw8836.write(idx, val)
print 'w', hex(idx), hex(val)
else:
print 'wrong command'
| lgpl-3.0 |
mattupstate/overholt | overholt/helpers.py | 10 | 2395 | # -*- coding: utf-8 -*-
"""
overholt.helpers
~~~~~~~~~~~~~~~~
overholt helpers module
"""
import pkgutil
import importlib
from flask import Blueprint
from flask.json import JSONEncoder as BaseJSONEncoder
def register_blueprints(app, package_name, package_path):
"""Register all Blueprint instances on the specified Flask application found
in all modules for the specified package.
:param app: the Flask application
:param package_name: the package name
:param package_path: the package path
"""
rv = []
for _, name, _ in pkgutil.iter_modules(package_path):
m = importlib.import_module('%s.%s' % (package_name, name))
for item in dir(m):
item = getattr(m, item)
if isinstance(item, Blueprint):
app.register_blueprint(item)
rv.append(item)
return rv
class JSONEncoder(BaseJSONEncoder):
"""Custom :class:`JSONEncoder` which respects objects that include the
:class:`JsonSerializer` mixin.
"""
def default(self, obj):
if isinstance(obj, JsonSerializer):
return obj.to_json()
return super(JSONEncoder, self).default(obj)
class JsonSerializer(object):
"""A mixin that can be used to mark a SQLAlchemy model class which
implements a :func:`to_json` method. The :func:`to_json` method is used
in conjuction with the custom :class:`JSONEncoder` class. By default this
mixin will assume all properties of the SQLAlchemy model are to be visible
in the JSON output. Extend this class to customize which properties are
public, hidden or modified before being being passed to the JSON serializer.
"""
__json_public__ = None
__json_hidden__ = None
__json_modifiers__ = None
def get_field_names(self):
for p in self.__mapper__.iterate_properties:
yield p.key
def to_json(self):
field_names = self.get_field_names()
public = self.__json_public__ or field_names
hidden = self.__json_hidden__ or []
modifiers = self.__json_modifiers__ or dict()
rv = dict()
for key in public:
rv[key] = getattr(self, key)
for key, modifier in modifiers.items():
value = getattr(self, key)
rv[key] = modifier(value, self)
for key in hidden:
rv.pop(key, None)
return rv
| mit |
disorientedperson/textbasedgame | utils.py | 2 | 12962 | import inspect
import os
import shelve
import random
import time
import readline
import obj
import locations
import entities
def confirm(prompt='', default=True):
if default:
answer = input(prompt + ' (Y/n) ')
if answer.lower() == 'y' or answer.lower() == 'yes' or answer == '':
return True
elif answer.lower() == 'n' or answer.lower() == 'no':
return False
else:
return False
else:
answer = input(prompt + ' (y/N)')
if answer.lower() == 'y' or answer.lower() == 'yes':
return True
elif answer.lower() == 'n' or answer.lower() == 'no':
return False
else:
return False
def choose(prompt='', choices=[], prefix='', default=True):
i = 1
print(prompt)
if default:
for choice in choices:
print(str(i) + '. ' + choice)
i += 1
while True:
if prefix:
descision = input(prefix + ' : ')
else:
descision = input(': ')
try:
if int(descision) <= len(choices):
return choices[int(descision)-1]
else:
print('Invalid choice.')
except ValueError:
if descision.split(' ')[0] in choices:
return descision
else:
print('Invalid Choice.')
else:
for choice in choices:
if i == len(choices) - 1:
print(str(i) + '. ' + choice[0] + ' (' + choice[1] + ') or')
else:
print(str(i) + '. ' + choice[0] + ' (' + choice[1] + ')')
i += 1
if prefix:
descision = input(prefix + ' : ')
else:
descision = input(': ')
try:
return choices[int(descision)-1][1].upper()
except ValueError:
return descision.upper()
def listItems(prompt='', listedItems=[], objType=None):
i = 0
if objType is not None:
for listedItem in listedItems:
if isinstance(listedItem, objType):
if isinstance(listedItem, obj.Weapon):
print(listedItem.name + ': Has ' + str(listedItem.power) + ' power')
elif isinstance(listedItem, obj.Food):
print(listedItem.name + ': Restores ' + str(listedItem.hp) + ' health')
else:
print(objType.name)
else:
for listedItem in listedItems:
if isinstance(listedItem, obj.Weapon):
print(listedItem.name + ': Has ' + str(listedItem.power) + ' power')
elif isinstance(listedItem, obj.Food):
print(listedItem.name + ': Restores ' + str(listedItem.hp) + ' health')
else:
print(str(i) + '. ' + listedItem)
def getBestInventoryWeapon():
bestItemPower = 0
bestItem = None
for item in entities.player.inventory:
if isinstance(item, obj.Weapon):
weapPwr = item.power
if weapPwr > bestItemPower:
bestItemPower = weapPwr
bestItem = item
return bestItemPower, bestItem
def fight(person, weapon):
entities.player.location.entity = person
time.sleep(0.5)
print('The ' + str(entities.player.location.entity.name) + ' pulls out a(n) ' + str(weapon.name) + ' threateningly.')
time.sleep(1)
if isinstance(weapon, obj.Food): # Code no longer relevant
print("...So you took the " + str(weapon.name) + " and ate it")
entities.player.health += weapon.hp
if entities.player.location.entity == entities.you:
entities.player.location.entity.health += weapon.hp
print("The " + str(entities.player.location.entity.name) + " ran away")
commandLine()
while entities.player.health > 1 and entities.player.location.entity.health > 1:
print('\nYour Health [ ', end='')
i = 0
while i != entities.player.health:
print('#', end='')
i += 1
print(' ]\n\n', end='')
command = choose('Interact Commands:', ['auto', 'act', 'item', 'retreat'], 'Interact').split(' ')
if command[0].upper() == 'AUTO':
break
elif command[0].upper() == 'ACT':
print("You " + str(entities.player.location.entity.acts) + " the " + str(entities.player.location.entity.name) + ".")
if entities.player.location.entity.acts == "pet":
print("The " + str(entities.player.location.entity.name) + " runs away")
return
else:
print("...But it didn't work")
break
elif command[0].upper() == 'ITEM':
if len(command) == 3:
listItems('Weapons:', entities.player.inventory, obj.Weapon)
if command[1].upper() == 'EAT':
for item in entities.player.inventory:
if item.name == command[2]:
if isinstance(item, obj.Food):
entities.player.inventory.remove(item)
entities.player.health += item.hp
if entities.player.location.entity == entities.you:
entities.player.location.entity.health += item.hp
print('%s points added to health!' % item.hp)
break
else:
print("You cannot eat that")
break
elif command[1].upper() == 'USE':
for item in entities.player.inventory:
if item.name == command[2]:
if item.itemtype == 'bomb':
print("The " + item.name + " exploded")
print("The %s took %s damage!" % (entities.player.location.entity.name, item.power))
entities.player.location.entity.health -= item.power
entities.player.inventory.remove(item)
break
else:
print("The %s took %s damage!" % (entities.player.location.entity.name, item.power))
entities.player.location.entity.health -= item.power
# hero.inventory.remove(item)
break
elif command[1] == 'throw':
for item in entities.player.inventory:
if item.name == command[2]:
entities.player.inventory.remove(item)
print("You threw away the %s" % item.name)
break
break
else:
print("Item command not found.")
else:
print('"item" requires 3 arguments. Maximum 4.')
elif command[0].upper() == 'RETREAT':
print("You ran away.")
entities.player.location.entity = None
return
while True:
entities.player.hit(weapon.power + entities.player.location.entity.power) # Remove health from player
entities.player.location.entity.health -= getBestInventoryWeapon()[0] + entities.player.power # Remove health of opponent
if entities.player.health - (weapon.power + entities.player.location.entity.power) < 1 and entities.player.location.entity.health - (getBestInventoryWeapon()[0] + entities.player.power) < 1:
# In case of draw
time.sleep(0.2)
print('You somehow managed to escape with %s health remaining.' % entities.player.health)
entities.worldEntities.append(entities.player.location.entity)
entities.player.location.entity = None
break
elif entities.player.health < 1:
# In case of loss
time.sleep(0.2)
print('You\'re dead!')
for item in entities.player.inventory:
if random.randint(1, 2) == 1 and item != stick:
entities.player.inventory.remove(item)
# player.location.entity.inventory.append(removedItems)
print(str(item) + ' dropped from inventory.')
droppedCoins = random.randint(0, int(entities.player.money / 2))
entities.player.spend(droppedCoins)
time.sleep(0.2)
print('You dropped %s coins on your death.' % droppedCoins)
entities.player.location.entity.money += droppedCoins
worldEntities.append(entities.player.location.entity)
entities.player.location.entity = None
break
elif entities.player.location.entity.health < 1:
# In case of win
print('The ' + str(entities.player.location.entity.name) + ' has been defeated!')
powerToAdd = entities.player.location.entity.power / 4
entities.player.gain(powerToAdd)
time.sleep(0.2)
print('Your power level is now ' + str(entities.player.power))
if random.randint(1, 2) == 1:
for item in entities.player.location.entity.inventory:
entities.player.inventory.append(item)
entities.player.location.entity.inventory.remove(item)
time.sleep(0.2)
print('%s added to inventory.' % weapon.name)
coinsToAdd = entities.player.location.entity.power * 5 + random.randint(-4, 4) # Dropped coins is opponent pwr * 5 + randint
entitis.player.receive(coinsToAdd)
time.sleep(0.2)
print('Opponent dropped %s coins' % coinsToAdd)
entities.player.location.entity = None
break
def saveInfo(username, name, info):
saveFile = shelve.open(fileDir + '/saves/%s.save' % username)
saveFile[name] = info
saveFile.close()
def loadInfo(username, wantedInfo):
saveFile = shelve.open(fileDir + '/saves/%s.save' % username)
info = saveFile[wantedInfo]
return info
def goToVendor(vendor):
global previousVendor
previousVendor = vendor
entities.player.location = entities.getLocation('Market')
entities.player.location.entity = vendor
print('%s\nItems for sale:' % vendor.message)
vendor.say(vendor.goods)
while True:
command = input('Market > %s : ' % vendor.name).split(' ')
thingToBuy = None
buying = False
for good in vendor.goods:
if good.name == command[0]:
thingToBuy = good
buying = True
break
if buying:
entities.player.inventory.append(thingToBuy)
entities.player.spend(thingToBuy.cost)
print('%s purchased for %s money.' % (thingToBuy.name, thingToBuy.cost))
elif command[0].upper() == 'INFO':
thingToGetInfoOn = command[1]
itemInShop = False
for item in vendor.goods:
if item.name == thingToGetInfoOn:
itemInShop = True
break
if not itemInShop:
print('Item not found.')
else:
if isinstance(item, obj.Weapon):
print('Power: %s' % item.power)
elif isinstance(item, obj.Food):
print('Healing power: %s' % item.hp)
print('Description: ' + item.description)
elif command[0].upper() == 'EXIT':
print('You left the store.')
entities.player.location.entity = entities.getLocation('Main')
return
elif command[0].upper() == 'HELP':
entities.getHelpMsg('Market').printMsg()
elif command[0].upper() == 'MONEY':
print(entities.player.money + ' coins')
else:
print('Command not found.')
def execute(command):
if command[0] == '?' or command[0].upper() == 'HELP':
print('Possible commands:')
entities.getHelpMsg('Main').printMsg()
elif command[0].upper() == 'GOTO':
if command[1].upper() == 'INTERACT':
locations.personInteraction()
elif command[1].upper() == 'MARKET':
print('Going to market...')
locations.market()
elif command[1].upper() == 'INVENTORY':
print('Entering Inventory...')
locations.inventory()
elif command[1].upper() == 'MEMORY':
locations.memory()
else:
print('Location not found.')
else:
print('Command not found. Type "help" or "?" for help.')
# Get current file path
fileDir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
| gpl-3.0 |
wangyang2013/NodeManager | net.py | 2 | 5916 | """network configuration"""
# system provided modules
import os, string, time, socket
# PlanetLab system modules
import sioc, plnet
# local modules
import plnode.bwlimit as bwlimit
import logger, iptables, tools
# we can't do anything without a network
priority=1
dev_default = tools.get_default_if()
def start():
logger.log("net: plugin starting up...")
def GetSlivers(data, config, plc):
# added by caglar
# band-aid for short period as old API returns networks instead of interfaces
global KEY_NAME
KEY_NAME = "interfaces"
#################
logger.verbose("net: GetSlivers called.")
if not 'interfaces' in data:
# added by caglar
# band-aid for short period as old API returns networks instead of interfaces
# logger.log_missing_data('net.GetSlivers','interfaces')
# return
if not 'networks' in data:
logger.log_missing_data('net.GetSlivers','interfaces')
return
else:
KEY_NAME = "networks"
##################
plnet.InitInterfaces(logger, plc, data)
if 'OVERRIDES' in dir(config):
if config.OVERRIDES.get('net_max_rate') == '-1':
logger.log("net: Slice and node BW Limits disabled.")
if len(bwlimit.tc("class show dev %s" % dev_default)):
logger.verbose("net: *** DISABLING NODE BW LIMITS ***")
bwlimit.stop()
else:
InitNodeLimit(data)
InitI2(plc, data)
else:
InitNodeLimit(data)
InitI2(plc, data)
InitNAT(plc, data)
def InitNodeLimit(data):
# query running network interfaces
devs = sioc.gifconf()
ips = dict(zip(devs.values(), devs.keys()))
macs = {}
for dev in devs:
macs[sioc.gifhwaddr(dev).lower()] = dev
for interface in data[KEY_NAME]:
# Get interface name preferably from MAC address, falling
# back on IP address.
hwaddr=interface['mac']
if hwaddr <> None: hwaddr=hwaddr.lower()
if hwaddr in macs:
dev = macs[interface['mac']]
elif interface['ip'] in ips:
dev = ips[interface['ip']]
else:
logger.log('net: %s: no such interface with address %s/%s' % (interface['hostname'], interface['ip'], interface['mac']))
continue
# Get current node cap
try:
old_bwlimit = bwlimit.get_bwcap(dev)
except:
old_bwlimit = None
# Get desired node cap
if interface['bwlimit'] is None or interface['bwlimit'] < 0:
new_bwlimit = bwlimit.bwmax
else:
new_bwlimit = interface['bwlimit']
if old_bwlimit != new_bwlimit:
# Reinitialize bandwidth limits
bwlimit.init(dev, new_bwlimit)
# XXX This should trigger an rspec refresh in case
# some previously invalid sliver bwlimit is now valid
# again, or vice-versa.
def InitI2(plc, data):
if not 'groups' in data: return
if "Internet2" in data['groups']:
logger.log("net: This is an Internet2 node. Setting rules.")
i2nodes = []
i2nodeids = plc.GetNodeGroups(["Internet2"])[0]['node_ids']
for node in plc.GetInterfaces({"node_id": i2nodeids}, ["ip"]):
# Get the IPs
i2nodes.append(node['ip'])
# this will create the set if it doesn't already exist
# and add IPs that don't exist in the set rather than
# just recreateing the set.
bwlimit.exempt_init('Internet2', i2nodes)
# set the iptables classification rule if it doesnt exist.
cmd = '-A POSTROUTING -m set --set Internet2 dst -j CLASSIFY --set-class 0001:2000 --add-mark'
rules = []
ipt = os.popen("/sbin/iptables-save")
for line in ipt.readlines(): rules.append(line.strip(" \n"))
ipt.close()
if cmd not in rules:
logger.verbose("net: Adding iptables rule for Internet2")
os.popen("/sbin/iptables -t mangle " + cmd)
def InitNAT(plc, data):
# query running network interfaces
devs = sioc.gifconf()
ips = dict(zip(devs.values(), devs.keys()))
macs = {}
for dev in devs:
macs[sioc.gifhwaddr(dev).lower()] = dev
ipt = iptables.IPTables()
for interface in data[KEY_NAME]:
# Get interface name preferably from MAC address, falling
# back on IP address.
hwaddr=interface['mac']
if hwaddr <> None: hwaddr=hwaddr.lower()
if hwaddr in macs:
dev = macs[interface['mac']]
elif interface['ip'] in ips:
dev = ips[interface['ip']]
else:
logger.log('net: %s: no such interface with address %s/%s' % (interface['hostname'], interface['ip'], interface['mac']))
continue
try:
settings = plc.GetInterfaceTags({'interface_tag_id': interface['interface_tag_ids']})
except:
continue
for setting in settings:
if setting['category'].upper() != 'FIREWALL':
continue
if setting['name'].upper() == 'EXTERNAL':
# Enable NAT for this interface
ipt.add_ext(dev)
elif setting['name'].upper() == 'INTERNAL':
ipt.add_int(dev)
elif setting['name'].upper() == 'PF': # XXX Uglier code is hard to find...
for pf in setting['value'].split("\n"):
fields = {}
for field in pf.split(","):
(key, val) = field.split("=", 2)
fields[key] = val
if 'new_dport' not in fields:
fields['new_dport'] = fields['dport']
if 'source' not in fields:
fields['source'] = "0.0.0.0/0"
ipt.add_pf(fields)
ipt.commit()
| apache-2.0 |
PeriniMatteo/Box-Maker-py | BoxMaker.py | 1 | 22825 | #------------------------------------------------------box-maker.py----#
#
# BOX MAKER
#
#
# Copyright (c) 2014 Matteo Perini
# Copyright (c) 2014 Alessandro Navarini
#
# The original code is written in java
# Copyright (c) 2002 Rahul Bhargava
# https://github.com/rahulbot/boxmaker
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#-----------------------------------------------------------------------
'''
Programma per la prgettazione di Box su file svg con interfaccia
grafica per l'inserimento dati da parte dell'utente
'''
#Importo le librerie per l'interfaccia grafica
from Tkinter import *
from tkMessageBox import *
#Costante per convertire i millimetri in pixel
from_mm_to_pixel = 3.5433071
#Funzione che crea il file svg
def BoxMaker(width, height, depth, thickness, notchLength, cutwidth, autom, coper):
#Verifico se le dimensioni delle tacche sono su automatiche e, se si, calcolo la lunghezza
if autom == "True":
notchLength = thickness*2.5
#Funzione per approssimare il numero delle tacche
def closestOddTo(numd):
num= int(numd+0.5)
if(num % 2 == 0):
return int(num-1)
return int(num)
#Funzione per disegnare le linee orizzontali nel file svg
def drawHorizontalLine(x0,y0,notchWidth,notchCount,notchHieght,cutwidth,flip,smallside,fil):
x=x0
y=y0
for step in range(notchCount):
if(((step%2)==0) != flip):
y=y0
else:
y=y0+notchHieght
if(step==0):
if(smallside):
#fil.write(' <line x1="'+str(x+notchHieght)+'" y1="'+str(y)+'" x2="'+str(x+notchWidth+cutwidth)+'" y2="'+str(y)+'"/>\n')
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x+notchHieght,y,x+notchWidth+cutwidth,y))
else:
#fil.write(' <line x1="'+str(x)+'" y1="'+str(y)+'" x2="'+str(x+notchWidth)+'" y2="'+str(y)+'"/>\n')
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x,y,x+notchWidth+cutwidth,y))
elif (step==(notchCount-1)):
#fil.write(' <line x1="'+str(x-cutwidth)+'" y1="'+str(y)+'" x2="'+str(x+notchWidth-notchHieght)+'" y2="'+str(y)+'"/>\n')
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x-cutwidth,y,x+notchWidth-notchHieght,y))
elif (step%2==0):
#fil.write(' <line x1="'+str(x-cutwidth)+'" y1="'+str(y)+'" x2="'+str(x+notchWidth+cutwidth)+'" y2="'+str(y)+'"/>\n')
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x-cutwidth,y,x+notchWidth+cutwidth,y))
else:
#fil.write(' <line x1="'+str(x+cutwidth)+'" y1="'+str(y)+'" x2="'+str(x+notchWidth-cutwidth)+'" y2="'+str(y)+'"/>\n')
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x+cutwidth,y,x+notchWidth-cutwidth,y))
if (step<(notchCount-1)):
if (step%2==0):
#fil.write(' <line x1="'+str(x+notchWidth+cutwidth)+'" y1="'+str(y0+notchHieght)+'" x2="'+str(x+notchWidth+cutwidth)+'" y2="'+str(y0)+'"/>\n')
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x+notchWidth+cutwidth,y0+notchHieght,x+notchWidth+cutwidth,y0))
else:
#fil.write(' <line x1="'+str(x+notchWidth-cutwidth)+'" y1="'+str(y0+notchHieght)+'" x2="'+str(x+notchWidth-cutwidth)+'" y2="'+str(y0)+'"/>\n')
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x+notchWidth-cutwidth,y0+notchHieght,x+notchWidth-cutwidth,y0))
x=x+notchWidth
#Funzione per disegnare e linee verticali nel file svg
def drawVerticalLine(x0,y0,stepLength,numSteps,mlength,cutwidth,flip,smallside,fil):
x=x0
y=y0
for step in range(numSteps):
if(((step%2)==0) != flip):
x=x0
else:
x=x0+mlength
if (step==0):
if(smallside):
#drawLineByMm(x,y+mlength,x,y+stepLength+cutwidth)
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x,y+mlength,x,y+stepLength+cutwidth))
else:
#drawLineByMm(x,y,x,y+stepLength+cutwidth)
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x,y,x,y+stepLength+cutwidth))
elif(step==(numSteps-1)):
#g.moveTo(x,y+cutwidth); g.lineTo(x,y+stepLength); g.stroke()
if(smallside):
#drawLineByMm(x,y-cutwidth,x,y+stepLength-mlength)
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x,y-cutwidth,x,y+stepLength-mlength))
else:
#drawLineByMm(x,y-cutwidth,x,y+stepLength)
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x,y-cutwidth,x,y+stepLength))
elif (step%2==0):
#drawLineByMm(x,y-cutwidth,x,y+stepLength+cutwidth)
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x,y-cutwidth,x,y+stepLength+cutwidth))
else:
#drawLineByMm(x,y+cutwidth,x,y+stepLength-cutwidth)
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x,y+cutwidth,x,y+stepLength-cutwidth))
if (step<(numSteps-1)):
if (step%2==0):
#drawLineByMm(x0+mlength,y+stepLength+cutwidth,x0,y+stepLength+cutwidth)
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x0+mlength,y+stepLength+cutwidth,x0,y+stepLength+cutwidth))
else:
#drawLineByMm(x0+mlength,y+stepLength-cutwidth,x0,y+stepLength-cutwidth)
fil.write(' <line x1="%.3f" y1="%.3f" x2="%.3f" y2="%.3f"/>\n'%(x0+mlength,y+stepLength-cutwidth,x0,y+stepLength-cutwidth))
y=y+stepLength
#Aumento le dimensioni per compensare quelle perse a causa dello spessore del taglio
width+=cutwidth
height+=cutwidth
depth+=cutwidth
#Calcolo del numero di tacche per ogni dimensione (Altezza, lunghezza e profondita')
numNotchesW = closestOddTo(width / notchLength)
numNotchesH = closestOddTo(height / notchLength)
numNotchesD = closestOddTo(depth / notchLength)
#Calcolo dell'esatta lunghezza delle tacche
notchLengthW = width / float(numNotchesW)
notchLengthH = height / float(numNotchesH)
notchLengthD = depth / float(numNotchesD)
#and compute the new width based on that (should be a NO-OP)
margin=10.0+cutwidth
width = numNotchesW*notchLengthW
height = numNotchesH*notchLengthH
depth = numNotchesD*notchLengthD
boxPiecesWidth = (depth*2+width)
boxPiecesHeight = (height*2+depth*2)
#Apertura del file
out_file=open('Box.svg','w')
out_file.write('<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n')
out_file.write('<svg\n')
out_file.write(' xmlns:dc="http://purl.org/dc/elements/1.1/"\n')
out_file.write(' xmlns:cc="http://creativecommons.org/ns#"\n')
out_file.write(' xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"\n')
out_file.write(' xmlns:svg="http://www.w3.org/2000/svg"\n')
out_file.write(' xmlns="http://www.w3.org/2000/svg"\n')
out_file.write(' xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"\n')
out_file.write(' xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"\n')
out_file.write(' width="'+str(boxPiecesWidth+margin*4)+'"\n')
out_file.write(' height="'+str(boxPiecesHeight+margin*5)+'"\n')
out_file.write(' id="svg2"\n')
out_file.write(' version="1.1"\n')
out_file.write(' inkscape:version="0.48.4 r9939">\n')
out_file.write(' <sodipodi:namedview\n')
out_file.write(' id="base"\n')
out_file.write(' pagecolor="#ffffff"\n')
out_file.write(' bordercolor="#666666"\n')
out_file.write(' borderopacity="1.0"\n')
out_file.write(' inkscape:pageopacity="0.0"\n')
out_file.write(' inkscape:pageshadow="2"\n')
out_file.write(' inkscape:zoom="0.35"\n')
out_file.write(' inkscape:cx="500"\n')
out_file.write(' inkscape:cy="500"\n')
out_file.write(' inkscape:document-units="px"\n')
out_file.write(' inkscape:current-layer="layer1"\n')
out_file.write(' />\n')
#out_file.write(' <g\n')
#out_file.write(' inkscape:label="Livello 1"\n')
#out_file.write(' inkscape:groupmode="layer"\n')
#out_file.write(' id="layer1"\n')
#out_file.write(' transform="translate(500,500)"\n')
#out_file.write(' >\n')
# Embed gear in group to make animation easier:
# Translate group, Rotate path.
#Variabili per le coordinate di x e di y
xOrig = 0.0
yOrig = 0.0
# compensate for the cut width (in part) by increasing mwidth (eolson)
# no, don't do that, because the cut widths cancel out. (eolson)
# mwidth+=cutwidth/2;
xOrig = depth + margin*2
yOrig = margin
out_file.write(' <g fill="none" stroke="black" stroke-width="0.07086614" >\n')
#1. a W x H side (il retro)
if coper=="True":
#Se bisogna fare il coperchio faccio le tacche sul lato alto
drawHorizontalLine(xOrig,yOrig,notchLengthW,numNotchesW,thickness,cutwidth/2.0,False,False,out_file) #top
else:
#Altrimenti tiro una riga dritta
drawHorizontalLine(xOrig,yOrig,width-thickness-cutwidth/2,1,thickness,cutwidth/2.0,False,False,out_file)
drawHorizontalLine(xOrig,yOrig+height-thickness,notchLengthW,numNotchesW,thickness,cutwidth/2,True,False,out_file) #bottom
drawVerticalLine(xOrig,yOrig,notchLengthH,numNotchesH,thickness,cutwidth/2.0,False,False,out_file) #left
drawVerticalLine(xOrig+width-thickness,yOrig,notchLengthH,numNotchesH,thickness,-cutwidth/2,False,False,out_file) #right
out_file.write(' </g>\n')
out_file.write(' <g fill="none" stroke="black" stroke-width="0.07086614" >\n')
#2. a D x H side (Il lato sinistro)
xOrig = margin;
yOrig = height + margin*2;
if coper=="True":
drawHorizontalLine(xOrig,yOrig,notchLengthD,numNotchesD,thickness,cutwidth/2,False,False,out_file) #top
else:
drawHorizontalLine(xOrig,yOrig,depth-thickness-cutwidth/2,1,thickness,cutwidth/2,False,False,out_file)
drawHorizontalLine(xOrig,yOrig+height-thickness,notchLengthD,numNotchesD,thickness,cutwidth/2,True,False,out_file) #bottom
drawVerticalLine(xOrig,yOrig,notchLengthH,numNotchesH,thickness,cutwidth/2,False,False,out_file) #left
drawVerticalLine(xOrig+depth-thickness,yOrig,notchLengthH,numNotchesH,thickness,-cutwidth/2,False,False,out_file) #right
out_file.write(' </g>\n')
out_file.write(' <g fill="none" stroke="black" stroke-width="0.07086614" >\n')
#3. a W x D side (il fondo)
xOrig = depth + margin*2
yOrig = height + margin*2
drawHorizontalLine(xOrig,yOrig,notchLengthW,numNotchesW,thickness,-cutwidth/2,True,True,out_file) #top
drawHorizontalLine(xOrig,yOrig+depth-thickness,notchLengthW,numNotchesW,thickness,-cutwidth/2,False,True,out_file) #bottom
drawVerticalLine(xOrig,yOrig,notchLengthD,numNotchesD,thickness,-cutwidth/2,True,True,out_file) #left
drawVerticalLine(xOrig+width-thickness,yOrig,notchLengthD,numNotchesD,thickness,-cutwidth/2,False,True,out_file) #right
out_file.write(' </g>\n')
out_file.write(' <g fill="none" stroke="black" stroke-width="0.07086614" >\n')
#4. a D x H side (il lato destro)
xOrig = depth + width + margin*3
yOrig = height + margin*2
if coper=="True":
drawHorizontalLine(xOrig,yOrig,notchLengthD,numNotchesD,thickness,cutwidth/2,False,False,out_file) #top
else:
drawHorizontalLine(xOrig,yOrig,depth-thickness-cutwidth/2,1,thickness,cutwidth/2,False,False,out_file)
drawHorizontalLine(xOrig,yOrig+height-thickness,notchLengthD,numNotchesD,thickness,cutwidth/2,True,False,out_file) #bottom
drawVerticalLine(xOrig,yOrig,notchLengthH,numNotchesH,thickness,cutwidth/2,False,False,out_file) #left
drawVerticalLine(xOrig+depth-thickness,yOrig,notchLengthH,numNotchesH,thickness,-cutwidth/2,False,False,out_file) #right
out_file.write(' </g>\n')
out_file.write(' <g fill="none" stroke="black" stroke-width="0.07086614" >\n')
#5. a W x H side (il davanti)
xOrig = depth + margin*2
yOrig = height + depth+ margin*3
if coper=="True":
drawHorizontalLine(xOrig,yOrig,notchLengthW,numNotchesW,thickness,cutwidth/2,False,False,out_file) #top
else:
drawHorizontalLine(xOrig,yOrig,width-thickness-cutwidth/2,1,thickness,cutwidth/2,False,False,out_file)
drawHorizontalLine(xOrig,yOrig+height-thickness,notchLengthW,numNotchesW,thickness,cutwidth/2,True,False,out_file) #bottom
drawVerticalLine(xOrig,yOrig,notchLengthH,numNotchesH,thickness,cutwidth/2,False,False,out_file) #left
drawVerticalLine(xOrig+width-thickness,yOrig,notchLengthH,numNotchesH,thickness,-cutwidth/2,False,False,out_file) #right
out_file.write(' </g>\n')
out_file.write(' <g fill="none" stroke="black" stroke-width="0.07086614" >\n')
#6. a W x D side (il coperchio)
#Disegna il coperchio solamente se la checkbox nella finestra e' selezionata
if coper=="True":
xOrig = depth + margin*2
yOrig = height*2 + depth + margin*4
drawHorizontalLine(xOrig,yOrig,notchLengthW,numNotchesW,thickness,-cutwidth/2,True,True,out_file) #top
drawHorizontalLine(xOrig,yOrig+depth-thickness,notchLengthW,numNotchesW,thickness,-cutwidth/2,False,True,out_file) #bottom
drawVerticalLine(xOrig,yOrig,notchLengthD,numNotchesD,thickness,-cutwidth/2,True,True,out_file) #left
drawVerticalLine(xOrig+width-thickness,yOrig,notchLengthD,numNotchesD,thickness,-cutwidth/2,False,True,out_file) #right
#chiusura del file svg
out_file.write(' </g>\n')
##t = 'translate(' + str( self.view_center[0] ) + ',' + str( self.view_center[1] ) + ')'
##g_attribs = {inkex.addNS('label','inkscape'):'Gear' + str( teeth ),
## 'transform':t }
##g = inkex.etree.SubElement(self.current_layer, 'g', g_attribs)
# Create SVG Path for gear
##style = { 'stroke': '#000000', 'fill': 'none' }
##gear_attribs = {'style':simplestyle.formatStyle(style), 'd':path}
##gear = inkex.etree.SubElement(g, inkex.addNS('path','svg'), gear_attribs )
#out_file.write('<path\n')
#out_file.write('d="'+path+'"\n')
#out_file.write('style="fill:none;stroke:#000000" />\n')
#out_file.write(' </g>')
out_file.write('</svg>')
out_file.close()
#Avviso l'utente che il file e' pronto
showinfo("Avviso", "Il file svg e' stato creato correttamente")
#Funzione che cotrolla se il contenuto delle caselle di testo e' numerico
def controllo(Input):
try:
variabile = float(Input)
return float(Input)
except:
showerror("Errore", "Non hai inserito un valore numerico, riprova.")
return None
#Funzione collegata al pulsante "Disegna"
def pulsante():
continua = True #Si recuperano i valori dalle caselle di testo e dalle checkbox
udmm = udm.get() #Unita' di misura selezionata
width_ = controllo(larghezza.get().replace(",", ".")) #Larghezza
height_ = controllo(altezza.get().replace(",", ".")) #Altezza
depth_ = controllo(profondita.get().replace(",", ".")) #Profondita'
thickness_ = controllo(spessore_materiale.get().replace(",", ".")) #Spessore del materiale
#Controllo se la lunghezza delle tacche e' su automatica
if str(bool(autom.get()))=="True":
notchLength_ = 0 #Se e' automatica metto 0 nella variabile per non ricevere errori
else:
notchLength_ = controllo(tacche.get().replace(",", ".")) #Altrimenti eseguo i controlli
cutwidth_ = controllo(spessore_taglio.get().replace(",", ".")) #Spessore del taglio
autom_ = str(bool(autom.get())) #Lunghezza delle tacche automatica
coper_ = str(bool(coper.get())) #Coperchio
if (width_<>None and height_<>None and depth_<>None and thickness_<>None and notchLength_<>None and cutwidth_<>None): #Controlla se il valore delle variabili e' valido
#Conversione inch --> millimetri
if udmm=="inch":
width_*=25.4
height_*=25.4
depth_*=25.4
thickness_*=25.4
notchLength_*=25.4
cutwidth_*=25.4
#Conversione centimetri --> millimetri
elif udmm=="centimetri":
width_*=10
height_*=10
depth_*=10
thickness_*=10
notchLength_*=10
cutwidth_*=10
#Inizio dei controlli sui valori inseriti dall'utente
if thickness_>=width_ or thickness_>=height_ or thickness_>=depth_:
showwarning("Attenzione", "Lo spessore del materiale non puo' essere maggiore delle dimensioni della scatola, riprova")
continua = False
if height_>1000 or width_>1000 or depth_>1000:
showwarning("Attenzione", "Le dimensioni della scatola non possono essere maggiori di 1 metro, riprova")
continua = False
if thickness_<1:
showwarning("Attenzione", "Lo spessore del materiale non puo' essere inferiore a 1 millimetro, riprova")
continua = False
if thickness_>=30:
showwarning("Attenzione", "Lo spessore del materiale non puo' essere maggiore a 30 millimetri, riprova")
continua = False
if autom_ == "False":
if notchLength_<=thickness_:
showwarning("Attenzione", "La lunghezza delle tacche non puo' essere inferiore allo spessore del materiale, riprova")
continua = False
if autom_ == "False":
if notchLength_>(height_/2) or notchLength_>(width_/2) or notchLength_>(depth_/2):
showwarning("Attenzione", "La lunghezza delle tacche deve essere inferiore alla meta' delle dimensioni della scatola, riprova")
continua = False
if cutwidth_>1:
showwarning("Attenzione", "Lo spessore del taglio non puo' essere superiore ad 1 millimetro, riprova")
continua = False
#Se i controlli non hanno rilevato problemi si procede con la creazione del file tramite la funzione BoxMaker
if continua==True:
#Conversione millimetri --> pixel
width_*=from_mm_to_pixel
height_*=from_mm_to_pixel
depth_*=from_mm_to_pixel
thickness_*=from_mm_to_pixel
notchLength_*=from_mm_to_pixel
cutwidth_*=from_mm_to_pixel
BoxMaker(width_, height_, depth_, thickness_, notchLength_, cutwidth_, autom_, coper_)
'''
print "Larghezza:", width_, 'pixel'
print "Altezza:", height_, 'pixel'
print "Profondita':", depth_, 'pixel'
print "Spessore del materiale:", thickness_, 'pixel'
print "Lunghezza delle tacche:", notchLength_, 'pixel'
print "Automatica:", autom_
print "Coperchio:", coper_
print "Spessore del taglio:", cutwidth_, 'pixel'
'''
#Interfaccia grafica con Tkinter
#Le label vuote sono state posizionate per riempire gli spazi e mantenere allineate le righe
#Gli elementi della finestra sono disposti in tre frame verticali: sinistra, centro e destra
#Creo la finestra
main = Tk()
main.title("BoxMaker")
#Suddivido la finestra in tre frame
sinistra = Frame(main)
centro = Frame(main)
destra = Frame(main)
basso = Frame(main)
#Frame sinistra: descrizioni
l1=Label(sinistra, text="Unita' di misura").pack(pady=1)
l2=Label(sinistra, text="Larghezza").pack(pady=1)
l3=Label(sinistra, text="Altezza").pack(pady=1)
l4=Label(sinistra, text="Profondita'").pack(pady=1)
l5=Label(sinistra, text="Spessore materiale").pack(pady=1)
l6=Label(sinistra, text="Lunghezza tacche").pack(pady=1)
l7=Label(sinistra, text="").pack(pady=1)
l8=Label(sinistra, text="Spessore del taglio").pack(pady=1)
l9=Label(sinistra, text="").pack(pady=1)
l10=Label(sinistra, text="").pack(pady=1)
sinistra.pack(side=LEFT, fill=BOTH)
#Frame centro: raccolta input e pulsante
udm=Spinbox(centro, values=('millimetri', 'centimetri', 'inch'))
udm.pack()
larghezza = Entry(centro)
larghezza.pack()
altezza = Entry(centro)
altezza.pack()
profondita = Entry(centro)
profondita.pack()
spessore_materiale = Entry(centro)
spessore_materiale.pack()
tacche = Entry(centro, text="0")
#tacche.insert(0, "5")
tacche.pack()
l12=Label(centro, text="").pack(pady=1)
spessore_taglio = Entry(centro)
spessore_taglio.insert(0, "0")
spessore_taglio.pack()
l13=Label(centro, text="").pack(pady=1)
p = Button (centro, text="Disegna", command=pulsante)
p.pack()
centro.pack(side=LEFT, fill=BOTH)
#Frame destra: checkbox e suggerimenti
l20=Label(destra, text="").pack(pady=1)
l21=Label(destra, text="Dimensioni esterne").pack(pady=1)
l22=Label(destra, text="Dimensioni esterne").pack(pady=1)
l23=Label(destra, text="Dimensioni esterne").pack(pady=1)
l24=Label(destra, text="").pack(pady=1)
autom=BooleanVar()
w = Checkbutton (destra, text="Automatica", variable = autom, onvalue = True, offvalue = False)
w.select()
w.pack()
coper=BooleanVar()
x = Checkbutton (destra, text="Coperchio", variable = coper, onvalue = True, offvalue = False)
x.pack()
l26=Label(destra, text="").pack(pady=1)
l27=Label(destra, text="").pack(pady=1)
l28=Label(destra, text="").pack(pady=1)
destra.pack(side=LEFT, fill=BOTH)
#Frame basso: logo MUSE FabLab
logo = PhotoImage(file="FabLab.png")
immagine = Label()
immagine.configure(image=logo)
immagine.pack()
basso.pack()
#Apro la finestra
main.mainloop()
| gpl-3.0 |
MentorEmbedded/qmtest | qm/label.py | 2 | 6102 | ########################################################################
#
# File: label.py
# Author: Alex Samuel
# Date: 2001-03-17
#
# Contents:
# Label
#
# Copyright (c) 2001, 2002 by CodeSourcery, LLC. All rights reserved.
#
# For license terms see the file COPYING.
#
########################################################################
########################################################################
# Imports
########################################################################
from __future__ import nested_scopes
import os
import re
import string
import types
########################################################################
# Classes
########################################################################
class Label:
"""A 'Label' identifies an entity.
A 'Label' is a generalization of a filename. Like filenames, labels
consist of one or more directories followed by a basename. However,
the format used for a label need not be the same as that used by
filenames.
Each label class defines a separator character to take the place of
the '/' character used by many file systems.
All labels are relative labels; no label may begin with a separator
character."""
def __init__(self, label):
"""Construct a new 'Label'.
'label' -- A string giving the value of the label."""
assert type(label) in (types.StringType, types.UnicodeType)
self._label = label
def Join(self, *labels):
"""Combine this label and the 'labels' into a single label.
'labels' -- A sequence of strings giving the components of the
new label. All but the last are taken as directory names; the
last is treated as a basename."""
result = self._label
for l in labels:
if not result:
# If the label is empty so far, l is the first component.
result = l
elif result and result[-1] == self._sep:
# If the label thus far ends with a separator, we do not
# want to add another one.
result += l
else:
result = result + self._sep + l
return self.__class__(result)
def Split(self):
"""Split the label into a pair '(directory, basename)'.
returns -- A pair '(directory, basename)', each of which is
a label.
It is always true that 'directory.join(basename)' will return a
label equivalent to the original label."""
last_sep = self._label.rfind(self._sep)
if last_sep != -1:
return (self.__class__(self._label[:last_sep]),
self.__class__(self._label[last_sep + 1:]))
else:
return (self.__class__(""),
self.__class__(self._label))
def SplitLeft(self):
"""Split the label into a pair '(parent, subpath)'. This is
the same operation as Split, except the split occurs at the
leftmost separator, not the rightmost.
returns -- A pair '(directory, basename)', each of which is
a label.
It is always true that 'directory.join(basename)' will return a
label equivalent to the original label."""
first_sep = self._label.find(self._sep)
if first_sep != -1:
return (self.__class__(self._label[:first_sep]),
self.__class__(self._label[first_sep + 1:]))
else:
return (self.__class__(self._label),
self.__class__(""))
def Basename(self):
"""Return the basename for the label.
returns -- A string giving the basename for the label. The
value returned for 'l.basename()' is always the same as
'l.split()[1]'."""
return self.Split()[1]
def Dirname(self):
"""Return the directory name for the 'label'.
returns -- A string giving the directory name for the 'label'.
The value returned for 'l.dirname()' is always the same as
'l.split()[0]'."""
return self.Split()[0]
def IsValid(self, label, is_component):
"""Returns true if 'label' is valid.
'label' -- The string being tested for validity.
'is_component' -- True if the string being tested is just a
single component of a label path.
returns -- True if 'label' is not valid."""
if label and label[0] == self._sep:
# All labels are relative; a valid label cannot begin with a
# separator.
return 0
elif is_component and self._sep in label:
# A component label cannot contain a separator.
return 0
elif label.find(self._sep + self._sep) != -1:
# It is invalid to have two separators in a row.
return 0
return 1
def __str__(self):
"""Return the string form of this label."""
return self._label
########################################################################
# Functions
########################################################################
__thunk_regex = re.compile("[^a-z0-9_]")
def thunk(label):
"""Sanitize and convert 'label' to a valid label.
Makes a best-effort attempt to keep 'label' recognizable during
the conversion.
returns -- A valid label."""
# Strip leading and trailing whitespace.
label = string.strip(label)
# Lower capital letters.
label = string.lower(label)
# Replace all invalid characters with underscores.
label = string.replace(label, "+", "x")
label = __thunk_regex.sub("_", label)
# Trim leading underscores.
while len(label) > 0 and label[0] == "_":
label = label[1:]
# Make sure the label isn't empty.
if label == "":
raise ValueError, "Empty label"
return label
########################################################################
# Local Variables:
# mode: python
# indent-tabs-mode: nil
# fill-column: 72
# End:
| gpl-2.0 |
moazzemi/HAMEX | cpu/gem5/tests/configs/realview64-o3.py | 28 | 2445 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from arm_generic import *
from O3_ARM_v7a import O3_ARM_v7a_3
root = LinuxArmFSSystemUniprocessor(machine_type='VExpress_EMM64',
mem_mode='timing',
mem_class=DDR3_1600_x64,
cpu_class=O3_ARM_v7a_3).create_root()
| mit |
Integral-Technology-Solutions/ConfigNOW | Lib/xml/dom/html/HTMLBaseElement.py | 3 | 1574 | ########################################################################
#
# File Name: HTMLBaseElement
#
# Documentation: http://docs.4suite.com/4DOM/HTMLBaseElement.html
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import Node
from xml.dom.html.HTMLElement import HTMLElement
class HTMLBaseElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="BASE"):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_href(self):
return self.getAttribute("HREF")
def _set_href(self, value):
self.setAttribute("HREF", value)
def _get_target(self):
return self.getAttribute("TARGET")
def _set_target(self, value):
self.setAttribute("TARGET", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"href" : _get_href,
"target" : _get_target
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"href" : _set_href,
"target" : _set_target
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
| mit |
kun--hust/libcloud_with_cn | libcloud/test/compute/test_bluebox.py | 46 | 4672 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.bluebox import BlueboxNodeDriver as Bluebox
from libcloud.compute.base import Node, NodeAuthPassword
from libcloud.compute.types import NodeState
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import BLUEBOX_PARAMS
class BlueboxTest(unittest.TestCase):
def setUp(self):
Bluebox.connectionCls.conn_classes = (None, BlueboxMockHttp)
self.driver = Bluebox(*BLUEBOX_PARAMS)
def test_create_node(self):
node = self.driver.create_node(
name='foo',
size=self.driver.list_sizes()[0],
image=self.driver.list_images()[0],
auth=NodeAuthPassword("test123")
)
self.assertTrue(isinstance(node, Node))
self.assertEqual(node.state, NodeState.PENDING)
self.assertEqual(node.name, 'foo.apitest.blueboxgrid.com')
def test_list_nodes(self):
node = self.driver.list_nodes()[0]
self.assertEqual(node.name, 'foo.apitest.blueboxgrid.com')
self.assertEqual(node.state, NodeState.RUNNING)
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 4)
ids = [s.id for s in sizes]
for size in sizes:
self.assertTrue(size.price > 0)
self.assertTrue('94fd37a7-2606-47f7-84d5-9000deda52ae' in ids)
self.assertTrue('b412f354-5056-4bf0-a42f-6ddd998aa092' in ids)
self.assertTrue('0cd183d3-0287-4b1a-8288-b3ea8302ed58' in ids)
self.assertTrue('b9b87a5b-2885-4a2e-b434-44a163ca6251' in ids)
def test_list_images(self):
images = self.driver.list_images()
image = images[0]
self.assertEqual(len(images), 10)
self.assertEqual(image.name, 'CentOS 5 (Latest Release)')
self.assertEqual(image.id, 'c66b8145-f768-45ef-9878-395bf8b1b7ff')
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
class BlueboxMockHttp(MockHttp):
fixtures = ComputeFileFixtures('bluebox')
def _api_blocks_json(self, method, url, body, headers):
if method == "POST":
body = self.fixtures.load('api_blocks_json_post.json')
else:
body = self.fixtures.load('api_blocks_json.json')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_block_products_json(self, method, url, body, headers):
body = self.fixtures.load('api_block_products_json.json')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_block_templates_json(self, method, url, body, headers):
body = self.fixtures.load('api_block_templates_json.json')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json(self, method, url, body, headers):
if method == 'DELETE':
body = self.fixtures.load(
'api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json')
else:
body = self.fixtures.load(
'api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json(self, method, url, body, headers):
body = self.fixtures.load(
'api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
TechBK/horizon-dev | openstack_dashboard/dashboards/project/data_processing/data_image_registry/views.py | 23 | 4499 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard.api import sahara as saharaclient
from openstack_dashboard.dashboards.project. \
data_processing.data_image_registry.forms import EditTagsForm
from openstack_dashboard.dashboards.project. \
data_processing.data_image_registry.forms import RegisterImageForm
from openstack_dashboard.dashboards.project. \
data_processing.data_image_registry.tables import ImageRegistryTable
LOG = logging.getLogger(__name__)
class ImageRegistryView(tables.DataTableView):
table_class = ImageRegistryTable
template_name = (
'project/data_processing.data_image_registry/image_registry.html')
page_title = _("Image Registry")
def get_data(self):
try:
images = saharaclient.image_list(self.request)
except Exception:
images = []
msg = _('Unable to retrieve image list')
exceptions.handle(self.request, msg)
return images
def update_context_with_plugin_tags(request, context):
try:
plugins = saharaclient.plugin_list(request)
except Exception:
plugins = []
msg = _("Unable to process plugin tags")
exceptions.handle(request, msg)
plugins_object = dict()
for plugin in plugins:
plugins_object[plugin.name] = dict()
for version in plugin.versions:
try:
details = saharaclient. \
plugin_get_version_details(request,
plugin.name,
version)
plugins_object[plugin.name][version] = (
details.required_image_tags)
except Exception:
msg = _("Unable to process plugin tags")
exceptions.handle(request, msg)
context["plugins"] = plugins_object
class EditTagsView(forms.ModalFormView):
form_class = EditTagsForm
template_name = (
'project/data_processing.data_image_registry/edit_tags.html')
success_url = reverse_lazy(
'horizon:project:data_processing.data_image_registry:index')
page_title = _("Edit Image Tags")
def get_context_data(self, **kwargs):
context = super(EditTagsView, self).get_context_data(**kwargs)
context['image'] = self.get_object()
update_context_with_plugin_tags(self.request, context)
return context
@memoized.memoized_method
def get_object(self):
try:
image = saharaclient.image_get(self.request,
self.kwargs["image_id"])
except Exception:
image = None
msg = _("Unable to fetch the image details")
exceptions.handle(self.request, msg)
return image
def get_initial(self):
image = self.get_object()
return {"image_id": image.id,
"tags_list": json.dumps(image.tags),
"user_name": image.username,
"description": image.description}
class RegisterImageView(forms.ModalFormView):
form_class = RegisterImageForm
template_name = (
'project/data_processing.data_image_registry/register_image.html')
success_url = reverse_lazy(
'horizon:project:data_processing.data_image_registry:index')
page_title = _("Register Image")
def get_context_data(self, **kwargs):
context = super(RegisterImageView, self).get_context_data(**kwargs)
update_context_with_plugin_tags(self.request, context)
return context
def get_initial(self):
# need this initialization to allow registration
# of images without tags
return {"tags_list": json.dumps([])}
| apache-2.0 |
qpython-android/QPython3-core | pybuild/packages/cffi2.py | 1 | 1373 | from ..source import GitSource
from ..package import Package
from ..patch import LocalPatch
from ..util import target_arch
import os
class Cffi2(Package):
source = GitSource('https://github.com/QPYPI/cffi.git', alias='cffi2')
patches = [
#LocalPatch('0001-cross-compile'),
]
def prepare(self):
pass
def build(self):
PY_BRANCH = os.getenv('PY_BRANCH')
PY_M_BRANCH = os.getenv('PY_M_BRANCH')
BLD = os.path.join(os.getcwd(),'build/target')
ANDROID_NDK = os.getenv("ANDROID_NDK")
self.run([
'python2',
'setup.py',
'build_ext',
f'-I{BLD}/python{PY_BRANCH}/usr/include/python{PY_BRANCH}.{PY_M_BRANCH}'\
f':{BLD}/openblas/usr/include'\
f':{BLD}/libffi/usr/lib/libffi-3.2.1/include',
f'-L{BLD}/python{PY_BRANCH}/usr/lib'\
f':{BLD}/openblas/usr/lib'\
f':{BLD}/libffi/usr/lib'\
f':{ANDROID_NDK}/toolchains/renderscript/prebuilt/linux-x86_64/platform/arm',
f'-lpython{PY_BRANCH}.{PY_M_BRANCH},m',
])
self.run([
'python2',
'setup.py',
'build_py',
])
self.run([
'python2',
'setup.py',
'install',
'--root',
f'{BLD}/python{PY_BRANCH}',
])
| apache-2.0 |
cylc/cylc | tests/unit/test_rundb.py | 1 | 8120 | # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import contextlib
import os
import sqlite3
import unittest
from tempfile import mktemp
from unittest import mock
from cylc.flow.rundb import CylcSuiteDAO
GLOBAL_CONFIG = """
[platforms]
[[desktop[0-9]{2}|laptop[0-9]{2}]]
# hosts = platform name (default)
# Note: "desktop01" and "desktop02" are both valid and distinct
# platforms
[[sugar]]
hosts = localhost
batch system = slurm
[[hpc]]
hosts = hpcl1, hpcl2
retrieve job logs = True
batch system = pbs
[[hpcl1-bg]]
hosts = hpcl1
retrieve job logs = True
batch system = background
[[hpcl2-bg]]
hosts = hpcl2
retrieve job logs = True
batch system = background
"""
class TestRunDb(unittest.TestCase):
def setUp(self):
self.dao = CylcSuiteDAO(':memory:')
self.mocked_connection = mock.Mock()
self.dao.connect = mock.MagicMock(return_value=self.mocked_connection)
get_select_task_job = [
["cycle", "name", "NN"],
["cycle", "name", None],
["cycle", "name", "02"],
]
def test_select_task_job(self):
"""Test the rundb CylcSuiteDAO select_task_job method"""
columns = self.dao.tables[CylcSuiteDAO.TABLE_TASK_JOBS].columns[3:]
expected_values = [[2 for _ in columns]]
self.mocked_connection.execute.return_value = expected_values
# parameterized test
for cycle, name, submit_num in self.get_select_task_job:
returned_values = self.dao.select_task_job(cycle, name, submit_num)
for column in columns:
self.assertEqual(2, returned_values[column.name])
def test_select_task_job_sqlite_error(self):
"""Test that when the rundb CylcSuiteDAO select_task_job method raises
a SQLite exception, the method returns None"""
self.mocked_connection.execute.side_effect = sqlite3.DatabaseError
r = self.dao.select_task_job("it'll", "raise", "an error!")
self.assertIsNone(r)
@contextlib.contextmanager
def create_temp_db():
"""Create and tidy a temporary database for testing purposes."""
temp_db = mktemp()
conn = sqlite3.connect(temp_db)
yield (temp_db, conn)
os.remove(temp_db)
conn.close() # doesn't raise error on re-invocation
def test_remove_columns():
"""Test workaround for dropping columns in sqlite3."""
with create_temp_db() as (temp_db, conn):
conn.execute(
rf'''
CREATE TABLE foo (
bar,
baz,
pub
)
'''
)
conn.execute(
rf'''
INSERT INTO foo
VALUES (?,?,?)
''',
['BAR', 'BAZ', 'PUB']
)
conn.commit()
conn.close()
dao = CylcSuiteDAO(temp_db)
dao.remove_columns('foo', ['bar', 'baz'])
conn = dao.connect()
data = [row for row in conn.execute(rf'SELECT * from foo')]
assert data == [('PUB',)]
def test_upgrade_hold_swap():
"""Pre Cylc8 DB upgrade compatibility test."""
# test data
initial_data = [
# (name, cycle, status, hold_swap)
('foo', '1', 'waiting', ''),
('bar', '1', 'held', 'waiting'),
('baz', '1', 'held', 'running'),
('pub', '1', 'waiting', 'held')
]
expected_data = [
# (name, cycle, status, hold_swap, is_held)
('foo', '1', 'waiting', 0),
('bar', '1', 'waiting', 1),
('baz', '1', 'running', 1),
('pub', '1', 'waiting', 1)
]
tables = [
CylcSuiteDAO.TABLE_TASK_POOL,
CylcSuiteDAO.TABLE_TASK_POOL_CHECKPOINTS
]
with create_temp_db() as (temp_db, conn):
# initialise tables
for table in tables:
conn.execute(
rf'''
CREATE TABLE {table} (
name varchar(255),
cycle varchar(255),
status varchar(255),
hold_swap varchar(255)
)
'''
)
conn.executemany(
rf'''
INSERT INTO {table}
VALUES (?,?,?,?)
''',
initial_data
)
# close database
conn.commit()
conn.close()
# open database as cylc dao
dao = CylcSuiteDAO(temp_db)
conn = dao.connect()
# check the initial data was correctly inserted
for table in tables:
dump = [x for x in conn.execute(rf'SELECT * FROM {table}')]
assert dump == initial_data
# upgrade
assert dao.upgrade_is_held()
# check the data was correctly upgraded
for table in tables:
dump = [x for x in conn.execute(rf'SELECT * FROM task_pool')]
assert dump == expected_data
# make sure the upgrade is skipped on future runs
assert not dao.upgrade_is_held()
def test_upgrade_to_platforms(mock_glbl_cfg):
"""Test upgrader logic for platforms in the database.
"""
# Set up the global config
mock_glbl_cfg('cylc.flow.rundb.glbl_cfg', GLOBAL_CONFIG)
# task name, cycle, user_at_host, batch_system
initial_data = [
('hpc_with_pbs', '1', 'hpcl1', 'pbs'),
('desktop_with_bg', '1', 'desktop01', 'background'),
('slurm_no_host', '1', '', 'slurm'),
('hpc_bg', '1', 'hpcl1', 'background'),
('username_given', '1', 'slartibartfast@hpcl1', 'pbs')
]
# task name, cycle, user, platform
expected_data = [
('hpc_with_pbs', '1', '', 'hpc'),
('desktop_with_bg', '1', '', 'desktop01'),
('slurm_no_host', '1', '', 'sugar'),
('hpc_bg', '1', '', 'hpcl1-bg'),
('username_given', '1', 'slartibartfast', 'hpc'),
]
with create_temp_db() as (temp_db, conn):
conn.execute(
rf'''
CREATE TABLE {CylcSuiteDAO.TABLE_TASK_JOBS} (
name varchar(255),
cycle varchar(255),
user_at_host varchar(255),
batch_system varchar(255)
)
'''
)
conn.executemany(
rf'''
INSERT INTO {CylcSuiteDAO.TABLE_TASK_JOBS}
VALUES (?,?,?,?)
''',
initial_data
)
# close database
conn.commit()
conn.close()
# open database as cylc dao
dao = CylcSuiteDAO(temp_db)
conn = dao.connect()
# check the initial data was correctly inserted
dump = [
x for x in conn.execute(
rf'SELECT * FROM {CylcSuiteDAO.TABLE_TASK_JOBS}'
)
]
assert dump == initial_data
# Upgrade function returns True?
assert dao.upgrade_to_platforms()
# check the data was correctly upgraded
dump = [
x for x in conn.execute(
rf'SELECT name, cycle, user, platform_name FROM task_jobs'
)
]
assert dump == expected_data
# make sure the upgrade is skipped on future runs
assert not dao.upgrade_to_platforms()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
dancingdan/tensorflow | tensorflow/python/autograph/operators/control_flow_test.py | 5 | 3212 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for control_flow module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import control_flow
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ForLoopTest(test.TestCase):
def test_tensor(self):
s = control_flow.for_stmt(
constant_op.constant([1, 2, 3, 4]),
extra_test=lambda s: True,
body=lambda i, s: (s + i,),
init_state=(0,))
with self.cached_session() as sess:
self.assertEqual((10,), sess.run(s))
def test_python(self):
s = control_flow.for_stmt(
range(5),
extra_test=lambda s: True,
body=lambda i, s: (s + i,),
init_state=(0,))
self.assertEqual(10, s)
def test_dataset(self):
to_int32 = lambda i: math_ops.cast(i, dtypes.int32)
s = control_flow.for_stmt(
dataset_ops.Dataset.range(5).map(to_int32),
extra_test=lambda s: True,
body=lambda i, s: (s + i,),
init_state=(0,))
with self.cached_session() as sess:
self.assertEqual((10,), sess.run(s))
class WhileLoopTest(test.TestCase):
def test_tensor(self):
n = constant_op.constant(5)
results = control_flow.while_stmt(
test=lambda i, s: i < n,
body=lambda i, s: (i + 1, s + i,),
init_state=(0, 0),
extra_deps=(n,))
with self.cached_session() as sess:
self.assertEqual((5, 10), sess.run(results))
def test_python(self):
n = 5
results = control_flow.while_stmt(
test=lambda i, s: i < n,
body=lambda i, s: (i + 1, s + i),
init_state=(0, 0),
extra_deps=(n,))
self.assertEqual((5, 10), results)
class IfStmtTest(test.TestCase):
def test_tensor(self):
def test_if_stmt(cond):
return control_flow.if_stmt(
cond=cond,
body=lambda: 1,
orelse=lambda: -1)
with self.cached_session() as sess:
self.assertEqual(1, sess.run(test_if_stmt(constant_op.constant(True))))
self.assertEqual(-1, sess.run(test_if_stmt(constant_op.constant(False))))
def test_python(self):
self.assertEqual(1, control_flow.if_stmt(True, lambda: 1, lambda: -1))
self.assertEqual(-1, control_flow.if_stmt(False, lambda: 1, lambda: -1))
if __name__ == '__main__':
test.main()
| apache-2.0 |
brototyp/CouchPotato | library/hachoir_parser/program/java.py | 84 | 57979 | """
Compiled Java classes parser.
Author: Thomas de Grenier de Latour (TGL) <degrenier@easyconnect.fr>
Creation: 2006/11/01
Last-update: 2006/11/06
Introduction:
* This parser is for compiled Java classes, aka .class files. What is nice
with this format is that it is well documented in the official Java VM specs.
* Some fields, and most field sets, have dynamic sizes, and there is no offset
to directly jump from an header to a given section, or anything like that.
It means that accessing a field at the end of the file requires that you've
already parsed almost the whole file. That's not very efficient, but it's
okay given the usual size of .class files (usually a few KB).
* Most fields are just indexes of some "constant pool" entries, which holds
most constant datas of the class. And constant pool entries reference other
constant pool entries, etc. Hence, a raw display of this fields only shows
integers and is not really understandable. Because of that, this parser
comes with two important custom field classes:
- CPInfo are constant pool entries. They have a type ("Utf8", "Methodref",
etc.), and some contents fields depending on this type. They also have a
"__str__()" method, which returns a syntetic view of this contents.
- CPIndex are constant pool indexes (UInt16). It is possible to specify
what type of CPInfo they are allowed to points to. They also have a
custom display method, usually printing something like "-> foo", where
foo is the str() of their target CPInfo.
References:
* The Java Virtual Machine Specification, 2nd edition, chapter 4, in HTML:
http://java.sun.com/docs/books/vmspec/2nd-edition/html/ClassFile.doc.html
=> That's the spec i've been implementing so far. I think it is format
version 46.0 (JDK 1.2).
* The Java Virtual Machine Specification, 2nd edition, chapter 4, in PDF:
http://java.sun.com/docs/books/vmspec/2nd-edition/ClassFileFormat.pdf
=> don't trust the URL, this PDF version is more recent than the HTML one.
It highligths some recent additions to the format (i don't know the
exact version though), which are not yet implemented in this parser.
* The Java Virtual Machine Specification, chapter 4:
http://java.sun.com/docs/books/vmspec/html/ClassFile.doc.html
=> describes an older format, probably version 45.3 (JDK 1.1).
TODO/FIXME:
* Google for some existing free .class files parsers, to get more infos on
the various formats differences, etc.
* Write/compile some good tests cases.
* Rework pretty-printing of CPIndex fields. This str() thing sinks.
* Add support of formats other than 46.0 (45.3 seems to already be ok, but
there are things to add for later formats).
* Make parsing robust: currently, the parser will die on asserts as soon as
something seems wrong. It should rather be tolerant, print errors/warnings,
and try its best to continue. Check how error-handling is done in other
parsers.
* Gettextize the whole thing.
* Check whether Float32/64 are really the same as Java floats/double. PEP-0754
says that handling of +/-infinity and NaN is very implementation-dependent.
Also check how this values are displayed.
* Make the parser edition-proof. For instance, editing a constant-pool string
should update the length field of it's entry, etc. Sounds like a huge work.
"""
from hachoir_parser import Parser
from hachoir_core.field import (
ParserError, FieldSet, StaticFieldSet,
Enum, RawBytes, PascalString16, Float32, Float64,
Int8, UInt8, Int16, UInt16, Int32, UInt32, Int64,
Bit, NullBits )
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_core.tools import paddingSize
###############################################################################
def parse_flags(flags, flags_dict, show_unknown_flags=True, separator=" "):
"""
Parses an integer representing a set of flags. The known flags are
stored with their bit-mask in a dictionnary. Returns a string.
"""
flags_list = []
mask = 0x01
while mask <= flags:
if flags & mask:
if mask in flags_dict:
flags_list.append(flags_dict[mask])
elif show_unknown_flags:
flags_list.append("???")
mask = mask << 1
return separator.join(flags_list)
###############################################################################
code_to_type_name = {
'B': "byte",
'C': "char",
'D': "double",
'F': "float",
'I': "int",
'J': "long",
'S': "short",
'Z': "boolean",
'V': "void",
}
def eat_descriptor(descr):
"""
Read head of a field/method descriptor. Returns a pair of strings, where
the first one is a human-readable string representation of the first found
type, and the second one is the tail of the parameter.
"""
array_dim = 0
while descr[0] == '[':
array_dim += 1
descr = descr[1:]
if (descr[0] == 'L'):
try: end = descr.find(';')
except: raise ParserError("Not a valid descriptor string: " + descr)
type = descr[1:end]
descr = descr[end:]
else:
global code_to_type_name
try:
type = code_to_type_name[descr[0]]
except KeyError:
raise ParserError("Not a valid descriptor string: %s" % descr)
return (type.replace("/", ".") + array_dim * "[]", descr[1:])
def parse_field_descriptor(descr, name=None):
"""
Parse a field descriptor (single type), and returns it as human-readable
string representation.
"""
assert descr
(type, tail) = eat_descriptor(descr)
assert not tail
if name:
return type + " " + name
else:
return type
def parse_method_descriptor(descr, name=None):
"""
Parse a method descriptor (params type and return type), and returns it
as human-readable string representation.
"""
assert descr and (descr[0] == '(')
descr = descr[1:]
params_list = []
while descr[0] != ')':
(param, descr) = eat_descriptor(descr)
params_list.append(param)
(type, tail) = eat_descriptor(descr[1:])
assert not tail
params = ", ".join(params_list)
if name:
return "%s %s(%s)" % (type, name, params)
else:
return "%s (%s)" % (type, params)
def parse_any_descriptor(descr, name=None):
"""
Parse either a field or method descriptor, and returns it as human-
readable string representation.
"""
assert descr
if descr[0] == '(':
return parse_method_descriptor(descr, name)
else:
return parse_field_descriptor(descr, name)
###############################################################################
class FieldArray(FieldSet):
"""
Holds a fixed length array of fields which all have the same type. This
type may be variable-length. Each field will be named "foo[x]" (with x
starting at 0).
"""
def __init__(self, parent, name, elements_class, length,
**elements_extra_args):
"""Create a FieldArray of <length> fields of class <elements_class>,
named "<name>[x]". The **elements_extra_args will be passed to the
constructor of each field when yielded."""
FieldSet.__init__(self, parent, name)
self.array_elements_class = elements_class
self.array_length = length
self.array_elements_extra_args = elements_extra_args
def createFields(self):
for i in range(0, self.array_length):
yield self.array_elements_class(self, "%s[%d]" % (self.name, i),
**self.array_elements_extra_args)
class ConstantPool(FieldSet):
"""
ConstantPool is similar to a FieldArray of CPInfo fields, but:
- numbering starts at 1 instead of zero
- some indexes are skipped (after Long or Double entries)
"""
def __init__(self, parent, name, length):
FieldSet.__init__(self, parent, name)
self.constant_pool_length = length
def createFields(self):
i = 1
while i < self.constant_pool_length:
name = "%s[%d]" % (self.name, i)
yield CPInfo(self, name)
i += 1
if self[name].constant_type in ("Long", "Double"):
i += 1
###############################################################################
class CPIndex(UInt16):
"""
Holds index of a constant pool entry.
"""
def __init__(self, parent, name, description=None, target_types=None,
target_text_handler=(lambda x: x), allow_zero=False):
"""
Initialize a CPIndex.
- target_type is the tuple of expected type for the target CPInfo
(if None, then there will be no type check)
- target_text_handler is a string transformation function used for
pretty printing the target str() result
- allow_zero states whether null index is allowed (sometimes, constant
pool index is optionnal)
"""
UInt16.__init__(self, parent, name, description)
if isinstance(target_types, str):
self.target_types = (target_types,)
else:
self.target_types = target_types
self.allow_zero = allow_zero
self.target_text_handler = target_text_handler
self.getOriginalDisplay = lambda: self.value
def createDisplay(self):
cp_entry = self.get_cp_entry()
if self.allow_zero and not cp_entry:
return "ZERO"
assert cp_entry
return "-> " + self.target_text_handler(str(cp_entry))
def get_cp_entry(self):
"""
Returns the target CPInfo field.
"""
assert self.value < self["/constant_pool_count"].value
if self.allow_zero and not self.value: return None
cp_entry = self["/constant_pool/constant_pool[%d]" % self.value]
assert isinstance(cp_entry, CPInfo)
if self.target_types:
assert cp_entry.constant_type in self.target_types
return cp_entry
###############################################################################
class JavaOpcode(FieldSet):
OPSIZE = 0
def __init__(self, parent, name, op, desc):
FieldSet.__init__(self, parent, name)
if self.OPSIZE != 0: self._size = self.OPSIZE*8
self.op = op
self.desc = desc
def createDisplay(self):
return self.op
def createDescription(self):
return self.desc
def createValue(self):
return self.createDisplay()
class OpcodeNoArgs(JavaOpcode):
OPSIZE = 1
def createFields(self):
yield UInt8(self, "opcode")
class OpcodeCPIndex(JavaOpcode):
OPSIZE = 3
def createFields(self):
yield UInt8(self, "opcode")
yield CPIndex(self, "index")
def createDisplay(self):
return "%s(%i)"%(self.op, self["index"].value)
class OpcodeCPIndexShort(JavaOpcode):
OPSIZE = 2
def createFields(self):
yield UInt8(self, "opcode")
yield UInt8(self, "index")
def createDisplay(self):
return "%s(%i)"%(self.op, self["index"].value)
class OpcodeIndex(JavaOpcode):
OPSIZE = 2
def createFields(self):
yield UInt8(self, "opcode")
yield UInt8(self, "index")
def createDisplay(self):
return "%s(%i)"%(self.op, self["index"].value)
class OpcodeShortJump(JavaOpcode):
OPSIZE = 3
def createFields(self):
yield UInt8(self, "opcode")
yield Int16(self, "offset")
def createDisplay(self):
return "%s(%s)"%(self.op, self["offset"].value)
class OpcodeLongJump(JavaOpcode):
OPSIZE = 5
def createFields(self):
yield UInt8(self, "opcode")
yield Int32(self, "offset")
def createDisplay(self):
return "%s(%s)"%(self.op, self["offset"].value)
class OpcodeSpecial_bipush(JavaOpcode):
OPSIZE = 2
def createFields(self):
yield UInt8(self, "opcode")
yield Int8(self, "value")
def createDisplay(self):
return "%s(%s)"%(self.op, self["value"].value)
class OpcodeSpecial_sipush(JavaOpcode):
OPSIZE = 3
def createFields(self):
yield UInt8(self, "opcode")
yield Int16(self, "value")
def createDisplay(self):
return "%s(%s)"%(self.op, self["value"].value)
class OpcodeSpecial_iinc(JavaOpcode):
OPSIZE = 3
def createFields(self):
yield UInt8(self, "opcode")
yield UInt8(self, "index")
yield Int8(self, "value")
def createDisplay(self):
return "%s(%i,%i)"%(self.op, self["index"].value, self["value"].value)
class OpcodeSpecial_wide(JavaOpcode):
def createFields(self):
yield UInt8(self, "opcode")
new_op = UInt8(self, "new_opcode")
yield new_op
op = new_op._description = JavaBytecode.OPCODE_TABLE.get(new_op.value, ["reserved", None, "Reserved"])[0]
yield UInt16(self, "index")
if op == "iinc":
yield Int16(self, "value")
self.createDisplay = lambda self: "%s(%i,%i)"%(self.op, self["index"].value, self["value"].value)
else:
self.createDisplay = lambda self: "%s(%i)"%(self.op, self["index"].value)
class OpcodeSpecial_invokeinterface(JavaOpcode):
OPSIZE = 5
def createFields(self):
yield UInt8(self, "opcode")
yield CPIndex(self, "index")
yield UInt8(self, "count")
yield UInt8(self, "zero", "Must be zero.")
def createDisplay(self):
return "%s(%i,%i,%i)"%(self.op, self["index"].value, self["count"].value, self["zero"].value)
class OpcodeSpecial_newarray(JavaOpcode):
OPSIZE = 2
def createFields(self):
yield UInt8(self, "opcode")
yield Enum(UInt8(self, "atype"), {4: "boolean",
5: "char",
6: "float",
7: "double",
8: "byte",
9: "short",
10:"int",
11:"long"})
def createDisplay(self):
return "%s(%s)"%(self.op, self["atype"].createDisplay())
class OpcodeSpecial_multianewarray(JavaOpcode):
OPSIZE = 4
def createFields(self):
yield UInt8(self, "opcode")
yield CPIndex(self, "index")
yield UInt8(self, "dimensions")
def createDisplay(self):
return "%s(%i,%i)"%(self.op, self["index"].value, self["dimensions"].value)
class OpcodeSpecial_tableswitch(JavaOpcode):
def createFields(self):
yield UInt8(self, "opcode")
pad = paddingSize(self.address+8, 32)
if pad:
yield NullBits(self, "padding", pad)
yield Int32(self, "default")
low = Int32(self, "low")
yield low
high = Int32(self, "high")
yield high
for i in range(high.value-low.value+1):
yield Int32(self, "offset[]")
def createDisplay(self):
return "%s(%i,%i,%i,...)"%(self.op, self["default"].value, self["low"].value, self["high"].value)
class OpcodeSpecial_lookupswitch(JavaOpcode):
def createFields(self):
yield UInt8(self, "opcode")
pad = paddingSize(self.address+8, 32)
if pad:
yield NullBits(self, "padding", pad)
yield Int32(self, "default")
n = Int32(self, "npairs")
yield n
for i in range(n.value):
yield Int32(self, "match[]")
yield Int32(self, "offset[]")
def createDisplay(self):
return "%s(%i,%i,...)"%(self.op, self["default"].value, self["npairs"].value)
class JavaBytecode(FieldSet):
OPCODE_TABLE = {
0x00: ("nop", OpcodeNoArgs, "performs no operation. Stack: [No change]"),
0x01: ("aconst_null", OpcodeNoArgs, "pushes a 'null' reference onto the stack. Stack: -> null"),
0x02: ("iconst_m1", OpcodeNoArgs, "loads the int value -1 onto the stack. Stack: -> -1"),
0x03: ("iconst_0", OpcodeNoArgs, "loads the int value 0 onto the stack. Stack: -> 0"),
0x04: ("iconst_1", OpcodeNoArgs, "loads the int value 1 onto the stack. Stack: -> 1"),
0x05: ("iconst_2", OpcodeNoArgs, "loads the int value 2 onto the stack. Stack: -> 2"),
0x06: ("iconst_3", OpcodeNoArgs, "loads the int value 3 onto the stack. Stack: -> 3"),
0x07: ("iconst_4", OpcodeNoArgs, "loads the int value 4 onto the stack. Stack: -> 4"),
0x08: ("iconst_5", OpcodeNoArgs, "loads the int value 5 onto the stack. Stack: -> 5"),
0x09: ("lconst_0", OpcodeNoArgs, "pushes the long 0 onto the stack. Stack: -> 0L"),
0x0a: ("lconst_1", OpcodeNoArgs, "pushes the long 1 onto the stack. Stack: -> 1L"),
0x0b: ("fconst_0", OpcodeNoArgs, "pushes '0.0f' onto the stack. Stack: -> 0.0f"),
0x0c: ("fconst_1", OpcodeNoArgs, "pushes '1.0f' onto the stack. Stack: -> 1.0f"),
0x0d: ("fconst_2", OpcodeNoArgs, "pushes '2.0f' onto the stack. Stack: -> 2.0f"),
0x0e: ("dconst_0", OpcodeNoArgs, "pushes the constant '0.0' onto the stack. Stack: -> 0.0"),
0x0f: ("dconst_1", OpcodeNoArgs, "pushes the constant '1.0' onto the stack. Stack: -> 1.0"),
0x10: ("bipush", OpcodeSpecial_bipush, "pushes the signed 8-bit integer argument onto the stack. Stack: -> value"),
0x11: ("sipush", OpcodeSpecial_sipush, "pushes the signed 16-bit integer argument onto the stack. Stack: -> value"),
0x12: ("ldc", OpcodeCPIndexShort, "pushes a constant from a constant pool (String, int, float or class type) onto the stack. Stack: -> value"),
0x13: ("ldc_w", OpcodeCPIndex, "pushes a constant from a constant pool (String, int, float or class type) onto the stack. Stack: -> value"),
0x14: ("ldc2_w", OpcodeCPIndex, "pushes a constant from a constant pool (double or long) onto the stack. Stack: -> value"),
0x15: ("iload", OpcodeIndex, "loads an int 'value' from a local variable '#index'. Stack: -> value"),
0x16: ("lload", OpcodeIndex, "loads a long value from a local variable '#index'. Stack: -> value"),
0x17: ("fload", OpcodeIndex, "loads a float 'value' from a local variable '#index'. Stack: -> value"),
0x18: ("dload", OpcodeIndex, "loads a double 'value' from a local variable '#index'. Stack: -> value"),
0x19: ("aload", OpcodeIndex, "loads a reference onto the stack from a local variable '#index'. Stack: -> objectref"),
0x1a: ("iload_0", OpcodeNoArgs, "loads an int 'value' from variable 0. Stack: -> value"),
0x1b: ("iload_1", OpcodeNoArgs, "loads an int 'value' from variable 1. Stack: -> value"),
0x1c: ("iload_2", OpcodeNoArgs, "loads an int 'value' from variable 2. Stack: -> value"),
0x1d: ("iload_3", OpcodeNoArgs, "loads an int 'value' from variable 3. Stack: -> value"),
0x1e: ("lload_0", OpcodeNoArgs, "load a long value from a local variable 0. Stack: -> value"),
0x1f: ("lload_1", OpcodeNoArgs, "load a long value from a local variable 1. Stack: -> value"),
0x20: ("lload_2", OpcodeNoArgs, "load a long value from a local variable 2. Stack: -> value"),
0x21: ("lload_3", OpcodeNoArgs, "load a long value from a local variable 3. Stack: -> value"),
0x22: ("fload_0", OpcodeNoArgs, "loads a float 'value' from local variable 0. Stack: -> value"),
0x23: ("fload_1", OpcodeNoArgs, "loads a float 'value' from local variable 1. Stack: -> value"),
0x24: ("fload_2", OpcodeNoArgs, "loads a float 'value' from local variable 2. Stack: -> value"),
0x25: ("fload_3", OpcodeNoArgs, "loads a float 'value' from local variable 3. Stack: -> value"),
0x26: ("dload_0", OpcodeNoArgs, "loads a double from local variable 0. Stack: -> value"),
0x27: ("dload_1", OpcodeNoArgs, "loads a double from local variable 1. Stack: -> value"),
0x28: ("dload_2", OpcodeNoArgs, "loads a double from local variable 2. Stack: -> value"),
0x29: ("dload_3", OpcodeNoArgs, "loads a double from local variable 3. Stack: -> value"),
0x2a: ("aload_0", OpcodeNoArgs, "loads a reference onto the stack from local variable 0. Stack: -> objectref"),
0x2b: ("aload_1", OpcodeNoArgs, "loads a reference onto the stack from local variable 1. Stack: -> objectref"),
0x2c: ("aload_2", OpcodeNoArgs, "loads a reference onto the stack from local variable 2. Stack: -> objectref"),
0x2d: ("aload_3", OpcodeNoArgs, "loads a reference onto the stack from local variable 3. Stack: -> objectref"),
0x2e: ("iaload", OpcodeNoArgs, "loads an int from an array. Stack: arrayref, index -> value"),
0x2f: ("laload", OpcodeNoArgs, "load a long from an array. Stack: arrayref, index -> value"),
0x30: ("faload", OpcodeNoArgs, "loads a float from an array. Stack: arrayref, index -> value"),
0x31: ("daload", OpcodeNoArgs, "loads a double from an array. Stack: arrayref, index -> value"),
0x32: ("aaload", OpcodeNoArgs, "loads onto the stack a reference from an array. Stack: arrayref, index -> value"),
0x33: ("baload", OpcodeNoArgs, "loads a byte or Boolean value from an array. Stack: arrayref, index -> value"),
0x34: ("caload", OpcodeNoArgs, "loads a char from an array. Stack: arrayref, index -> value"),
0x35: ("saload", OpcodeNoArgs, "load short from array. Stack: arrayref, index -> value"),
0x36: ("istore", OpcodeIndex, "store int 'value' into variable '#index'. Stack: value ->"),
0x37: ("lstore", OpcodeIndex, "store a long 'value' in a local variable '#index'. Stack: value ->"),
0x38: ("fstore", OpcodeIndex, "stores a float 'value' into a local variable '#index'. Stack: value ->"),
0x39: ("dstore", OpcodeIndex, "stores a double 'value' into a local variable '#index'. Stack: value ->"),
0x3a: ("astore", OpcodeIndex, "stores a reference into a local variable '#index'. Stack: objectref ->"),
0x3b: ("istore_0", OpcodeNoArgs, "store int 'value' into variable 0. Stack: value ->"),
0x3c: ("istore_1", OpcodeNoArgs, "store int 'value' into variable 1. Stack: value ->"),
0x3d: ("istore_2", OpcodeNoArgs, "store int 'value' into variable 2. Stack: value ->"),
0x3e: ("istore_3", OpcodeNoArgs, "store int 'value' into variable 3. Stack: value ->"),
0x3f: ("lstore_0", OpcodeNoArgs, "store a long 'value' in a local variable 0. Stack: value ->"),
0x40: ("lstore_1", OpcodeNoArgs, "store a long 'value' in a local variable 1. Stack: value ->"),
0x41: ("lstore_2", OpcodeNoArgs, "store a long 'value' in a local variable 2. Stack: value ->"),
0x42: ("lstore_3", OpcodeNoArgs, "store a long 'value' in a local variable 3. Stack: value ->"),
0x43: ("fstore_0", OpcodeNoArgs, "stores a float 'value' into local variable 0. Stack: value ->"),
0x44: ("fstore_1", OpcodeNoArgs, "stores a float 'value' into local variable 1. Stack: value ->"),
0x45: ("fstore_2", OpcodeNoArgs, "stores a float 'value' into local variable 2. Stack: value ->"),
0x46: ("fstore_3", OpcodeNoArgs, "stores a float 'value' into local variable 3. Stack: value ->"),
0x47: ("dstore_0", OpcodeNoArgs, "stores a double into local variable 0. Stack: value ->"),
0x48: ("dstore_1", OpcodeNoArgs, "stores a double into local variable 1. Stack: value ->"),
0x49: ("dstore_2", OpcodeNoArgs, "stores a double into local variable 2. Stack: value ->"),
0x4a: ("dstore_3", OpcodeNoArgs, "stores a double into local variable 3. Stack: value ->"),
0x4b: ("astore_0", OpcodeNoArgs, "stores a reference into local variable 0. Stack: objectref ->"),
0x4c: ("astore_1", OpcodeNoArgs, "stores a reference into local variable 1. Stack: objectref ->"),
0x4d: ("astore_2", OpcodeNoArgs, "stores a reference into local variable 2. Stack: objectref ->"),
0x4e: ("astore_3", OpcodeNoArgs, "stores a reference into local variable 3. Stack: objectref ->"),
0x4f: ("iastore", OpcodeNoArgs, "stores an int into an array. Stack: arrayref, index, value ->"),
0x50: ("lastore", OpcodeNoArgs, "store a long to an array. Stack: arrayref, index, value ->"),
0x51: ("fastore", OpcodeNoArgs, "stores a float in an array. Stack: arreyref, index, value ->"),
0x52: ("dastore", OpcodeNoArgs, "stores a double into an array. Stack: arrayref, index, value ->"),
0x53: ("aastore", OpcodeNoArgs, "stores into a reference to an array. Stack: arrayref, index, value ->"),
0x54: ("bastore", OpcodeNoArgs, "stores a byte or Boolean value into an array. Stack: arrayref, index, value ->"),
0x55: ("castore", OpcodeNoArgs, "stores a char into an array. Stack: arrayref, index, value ->"),
0x56: ("sastore", OpcodeNoArgs, "store short to array. Stack: arrayref, index, value ->"),
0x57: ("pop", OpcodeNoArgs, "discards the top value on the stack. Stack: value ->"),
0x58: ("pop2", OpcodeNoArgs, "discards the top two values on the stack (or one value, if it is a double or long). Stack: {value2, value1} ->"),
0x59: ("dup", OpcodeNoArgs, "duplicates the value on top of the stack. Stack: value -> value, value"),
0x5a: ("dup_x1", OpcodeNoArgs, "inserts a copy of the top value into the stack two values from the top. Stack: value2, value1 -> value1, value2, value1"),
0x5b: ("dup_x2", OpcodeNoArgs, "inserts a copy of the top value into the stack two (if value2 is double or long it takes up the entry of value3, too) or three values (if value2 is neither double nor long) from the top. Stack: value3, value2, value1 -> value1, value3, value2, value1"),
0x5c: ("dup2", OpcodeNoArgs, "duplicate top two stack words (two values, if value1 is not double nor long; a single value, if value1 is double or long). Stack: {value2, value1} -> {value2, value1}, {value2, value1}"),
0x5d: ("dup2_x1", OpcodeNoArgs, "duplicate two words and insert beneath third word. Stack: value3, {value2, value1} -> {value2, value1}, value3, {value2, value1}"),
0x5e: ("dup2_x2", OpcodeNoArgs, "duplicate two words and insert beneath fourth word. Stack: {value4, value3}, {value2, value1} -> {value2, value1}, {value4, value3}, {value2, value1}"),
0x5f: ("swap", OpcodeNoArgs, "swaps two top words on the stack (note that value1 and value2 must not be double or long). Stack: value2, value1 -> value1, value2"),
0x60: ("iadd", OpcodeNoArgs, "adds two ints together. Stack: value1, value2 -> result"),
0x61: ("ladd", OpcodeNoArgs, "add two longs. Stack: value1, value2 -> result"),
0x62: ("fadd", OpcodeNoArgs, "adds two floats. Stack: value1, value2 -> result"),
0x63: ("dadd", OpcodeNoArgs, "adds two doubles. Stack: value1, value2 -> result"),
0x64: ("isub", OpcodeNoArgs, "int subtract. Stack: value1, value2 -> result"),
0x65: ("lsub", OpcodeNoArgs, "subtract two longs. Stack: value1, value2 -> result"),
0x66: ("fsub", OpcodeNoArgs, "subtracts two floats. Stack: value1, value2 -> result"),
0x67: ("dsub", OpcodeNoArgs, "subtracts a double from another. Stack: value1, value2 -> result"),
0x68: ("imul", OpcodeNoArgs, "multiply two integers. Stack: value1, value2 -> result"),
0x69: ("lmul", OpcodeNoArgs, "multiplies two longs. Stack: value1, value2 -> result"),
0x6a: ("fmul", OpcodeNoArgs, "multiplies two floats. Stack: value1, value2 -> result"),
0x6b: ("dmul", OpcodeNoArgs, "multiplies two doubles. Stack: value1, value2 -> result"),
0x6c: ("idiv", OpcodeNoArgs, "divides two integers. Stack: value1, value2 -> result"),
0x6d: ("ldiv", OpcodeNoArgs, "divide two longs. Stack: value1, value2 -> result"),
0x6e: ("fdiv", OpcodeNoArgs, "divides two floats. Stack: value1, value2 -> result"),
0x6f: ("ddiv", OpcodeNoArgs, "divides two doubles. Stack: value1, value2 -> result"),
0x70: ("irem", OpcodeNoArgs, "logical int remainder. Stack: value1, value2 -> result"),
0x71: ("lrem", OpcodeNoArgs, "remainder of division of two longs. Stack: value1, value2 -> result"),
0x72: ("frem", OpcodeNoArgs, "gets the remainder from a division between two floats. Stack: value1, value2 -> result"),
0x73: ("drem", OpcodeNoArgs, "gets the remainder from a division between two doubles. Stack: value1, value2 -> result"),
0x74: ("ineg", OpcodeNoArgs, "negate int. Stack: value -> result"),
0x75: ("lneg", OpcodeNoArgs, "negates a long. Stack: value -> result"),
0x76: ("fneg", OpcodeNoArgs, "negates a float. Stack: value -> result"),
0x77: ("dneg", OpcodeNoArgs, "negates a double. Stack: value -> result"),
0x78: ("ishl", OpcodeNoArgs, "int shift left. Stack: value1, value2 -> result"),
0x79: ("lshl", OpcodeNoArgs, "bitwise shift left of a long 'value1' by 'value2' positions. Stack: value1, value2 -> result"),
0x7a: ("ishr", OpcodeNoArgs, "int shift right. Stack: value1, value2 -> result"),
0x7b: ("lshr", OpcodeNoArgs, "bitwise shift right of a long 'value1' by 'value2' positions. Stack: value1, value2 -> result"),
0x7c: ("iushr", OpcodeNoArgs, "int shift right. Stack: value1, value2 -> result"),
0x7d: ("lushr", OpcodeNoArgs, "bitwise shift right of a long 'value1' by 'value2' positions, unsigned. Stack: value1, value2 -> result"),
0x7e: ("iand", OpcodeNoArgs, "performs a logical and on two integers. Stack: value1, value2 -> result"),
0x7f: ("land", OpcodeNoArgs, "bitwise and of two longs. Stack: value1, value2 -> result"),
0x80: ("ior", OpcodeNoArgs, "logical int or. Stack: value1, value2 -> result"),
0x81: ("lor", OpcodeNoArgs, "bitwise or of two longs. Stack: value1, value2 -> result"),
0x82: ("ixor", OpcodeNoArgs, "int xor. Stack: value1, value2 -> result"),
0x83: ("lxor", OpcodeNoArgs, "bitwise exclusive or of two longs. Stack: value1, value2 -> result"),
0x84: ("iinc", OpcodeSpecial_iinc, "increment local variable '#index' by signed byte 'const'. Stack: [No change]"),
0x85: ("i2l", OpcodeNoArgs, "converts an int into a long. Stack: value -> result"),
0x86: ("i2f", OpcodeNoArgs, "converts an int into a float. Stack: value -> result"),
0x87: ("i2d", OpcodeNoArgs, "converts an int into a double. Stack: value -> result"),
0x88: ("l2i", OpcodeNoArgs, "converts a long to an int. Stack: value -> result"),
0x89: ("l2f", OpcodeNoArgs, "converts a long to a float. Stack: value -> result"),
0x8a: ("l2d", OpcodeNoArgs, "converts a long to a double. Stack: value -> result"),
0x8b: ("f2i", OpcodeNoArgs, "converts a float to an int. Stack: value -> result"),
0x8c: ("f2l", OpcodeNoArgs, "converts a float to a long. Stack: value -> result"),
0x8d: ("f2d", OpcodeNoArgs, "converts a float to a double. Stack: value -> result"),
0x8e: ("d2i", OpcodeNoArgs, "converts a double to an int. Stack: value -> result"),
0x8f: ("d2l", OpcodeNoArgs, "converts a double to a long. Stack: value -> result"),
0x90: ("d2f", OpcodeNoArgs, "converts a double to a float. Stack: value -> result"),
0x91: ("i2b", OpcodeNoArgs, "converts an int into a byte. Stack: value -> result"),
0x92: ("i2c", OpcodeNoArgs, "converts an int into a character. Stack: value -> result"),
0x93: ("i2s", OpcodeNoArgs, "converts an int into a short. Stack: value -> result"),
0x94: ("lcmp", OpcodeNoArgs, "compares two longs values. Stack: value1, value2 -> result"),
0x95: ("fcmpl", OpcodeNoArgs, "compares two floats. Stack: value1, value2 -> result"),
0x96: ("fcmpg", OpcodeNoArgs, "compares two floats. Stack: value1, value2 -> result"),
0x97: ("dcmpl", OpcodeNoArgs, "compares two doubles. Stack: value1, value2 -> result"),
0x98: ("dcmpg", OpcodeNoArgs, "compares two doubles. Stack: value1, value2 -> result"),
0x99: ("ifeq", OpcodeShortJump, "if 'value' is 0, branch to the 16-bit instruction offset argument. Stack: value ->"),
0x9a: ("ifne", OpcodeShortJump, "if 'value' is not 0, branch to the 16-bit instruction offset argument. Stack: value ->"),
0x9c: ("ifge", OpcodeShortJump, "if 'value' is greater than or equal to 0, branch to the 16-bit instruction offset argument. Stack: value ->"),
0x9d: ("ifgt", OpcodeShortJump, "if 'value' is greater than 0, branch to the 16-bit instruction offset argument. Stack: value ->"),
0x9e: ("ifle", OpcodeShortJump, "if 'value' is less than or equal to 0, branch to the 16-bit instruction offset argument. Stack: value ->"),
0x9f: ("if_icmpeq", OpcodeShortJump, "if ints are equal, branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa0: ("if_icmpne", OpcodeShortJump, "if ints are not equal, branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa1: ("if_icmplt", OpcodeShortJump, "if 'value1' is less than 'value2', branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa2: ("if_icmpge", OpcodeShortJump, "if 'value1' is greater than or equal to 'value2', branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa3: ("if_icmpgt", OpcodeShortJump, "if 'value1' is greater than 'value2', branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa4: ("if_icmple", OpcodeShortJump, "if 'value1' is less than or equal to 'value2', branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa5: ("if_acmpeq", OpcodeShortJump, "if references are equal, branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa6: ("if_acmpne", OpcodeShortJump, "if references are not equal, branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa7: ("goto", OpcodeShortJump, "goes to the 16-bit instruction offset argument. Stack: [no change]"),
0xa8: ("jsr", OpcodeShortJump, "jump to subroutine at the 16-bit instruction offset argument and place the return address on the stack. Stack: -> address"),
0xa9: ("ret", OpcodeIndex, "continue execution from address taken from a local variable '#index'. Stack: [No change]"),
0xaa: ("tableswitch", OpcodeSpecial_tableswitch, "continue execution from an address in the table at offset 'index'. Stack: index ->"),
0xab: ("lookupswitch", OpcodeSpecial_lookupswitch, "a target address is looked up from a table using a key and execution continues from the instruction at that address. Stack: key ->"),
0xac: ("ireturn", OpcodeNoArgs, "returns an integer from a method. Stack: value -> [empty]"),
0xad: ("lreturn", OpcodeNoArgs, "returns a long value. Stack: value -> [empty]"),
0xae: ("freturn", OpcodeNoArgs, "returns a float. Stack: value -> [empty]"),
0xaf: ("dreturn", OpcodeNoArgs, "returns a double from a method. Stack: value -> [empty]"),
0xb0: ("areturn", OpcodeNoArgs, "returns a reference from a method. Stack: objectref -> [empty]"),
0xb1: ("return", OpcodeNoArgs, "return void from method. Stack: -> [empty]"),
0xb2: ("getstatic", OpcodeCPIndex, "gets a static field 'value' of a class, where the field is identified by field reference in the constant pool. Stack: -> value"),
0xb3: ("putstatic", OpcodeCPIndex, "set static field to 'value' in a class, where the field is identified by a field reference in constant pool. Stack: value ->"),
0xb4: ("getfield", OpcodeCPIndex, "gets a field 'value' of an object 'objectref', where the field is identified by field reference <argument> in the constant pool. Stack: objectref -> value"),
0xb5: ("putfield", OpcodeCPIndex, "set field to 'value' in an object 'objectref', where the field is identified by a field reference <argument> in constant pool. Stack: objectref, value ->"),
0xb6: ("invokevirtual", OpcodeCPIndex, "invoke virtual method on object 'objectref', where the method is identified by method reference <argument> in constant pool. Stack: objectref, [arg1, arg2, ...] ->"),
0xb7: ("invokespecial", OpcodeCPIndex, "invoke instance method on object 'objectref', where the method is identified by method reference <argument> in constant pool. Stack: objectref, [arg1, arg2, ...] ->"),
0xb8: ("invokestatic", OpcodeCPIndex, "invoke a static method, where the method is identified by method reference <argument> in the constant pool. Stack: [arg1, arg2, ...] ->"),
0xb9: ("invokeinterface", OpcodeSpecial_invokeinterface, "invokes an interface method on object 'objectref', where the interface method is identified by method reference <argument> in constant pool. Stack: objectref, [arg1, arg2, ...] ->"),
0xba: ("xxxunusedxxx", OpcodeNoArgs, "this opcode is reserved for historical reasons. Stack: "),
0xbb: ("new", OpcodeCPIndex, "creates new object of type identified by class reference <argument> in constant pool. Stack: -> objectref"),
0xbc: ("newarray", OpcodeSpecial_newarray, "creates new array with 'count' elements of primitive type given in the argument. Stack: count -> arrayref"),
0xbd: ("anewarray", OpcodeCPIndex, "creates a new array of references of length 'count' and component type identified by the class reference <argument> in the constant pool. Stack: count -> arrayref"),
0xbe: ("arraylength", OpcodeNoArgs, "gets the length of an array. Stack: arrayref -> length"),
0xbf: ("athrow", OpcodeNoArgs, "throws an error or exception (notice that the rest of the stack is cleared, leaving only a reference to the Throwable). Stack: objectref -> [empty], objectref"),
0xc0: ("checkcast", OpcodeCPIndex, "checks whether an 'objectref' is of a certain type, the class reference of which is in the constant pool. Stack: objectref -> objectref"),
0xc1: ("instanceof", OpcodeCPIndex, "determines if an object 'objectref' is of a given type, identified by class reference <argument> in constant pool. Stack: objectref -> result"),
0xc2: ("monitorenter", OpcodeNoArgs, "enter monitor for object (\"grab the lock\" - start of synchronized() section). Stack: objectref -> "),
0xc3: ("monitorexit", OpcodeNoArgs, "exit monitor for object (\"release the lock\" - end of synchronized() section). Stack: objectref -> "),
0xc4: ("wide", OpcodeSpecial_wide, "execute 'opcode', where 'opcode' is either iload, fload, aload, lload, dload, istore, fstore, astore, lstore, dstore, or ret, but assume the 'index' is 16 bit; or execute iinc, where the 'index' is 16 bits and the constant to increment by is a signed 16 bit short. Stack: [same as for corresponding instructions]"),
0xc5: ("multianewarray", OpcodeSpecial_multianewarray, "create a new array of 'dimensions' dimensions with elements of type identified by class reference in constant pool; the sizes of each dimension is identified by 'count1', ['count2', etc]. Stack: count1, [count2,...] -> arrayref"),
0xc6: ("ifnull", OpcodeShortJump, "if 'value' is null, branch to the 16-bit instruction offset argument. Stack: value ->"),
0xc7: ("ifnonnull", OpcodeShortJump, "if 'value' is not null, branch to the 16-bit instruction offset argument. Stack: value ->"),
0xc8: ("goto_w", OpcodeLongJump, "goes to another instruction at the 32-bit branch offset argument. Stack: [no change]"),
0xc9: ("jsr_w", OpcodeLongJump, "jump to subroutine at the 32-bit branch offset argument and place the return address on the stack. Stack: -> address"),
0xca: ("breakpoint", OpcodeNoArgs, "reserved for breakpoints in Java debuggers; should not appear in any class file."),
0xfe: ("impdep1", OpcodeNoArgs, "reserved for implementation-dependent operations within debuggers; should not appear in any class file."),
0xff: ("impdep2", OpcodeNoArgs, "reserved for implementation-dependent operations within debuggers; should not appear in any class file.")}
def __init__(self, parent, name, length):
FieldSet.__init__(self, parent, name)
self._size = length*8
def createFields(self):
while self.current_size < self.size:
bytecode = ord(self.parent.stream.readBytes(self.absolute_address+self.current_size, 1))
op, cls, desc = self.OPCODE_TABLE.get(bytecode,["<reserved_opcode>", OpcodeNoArgs, "Reserved opcode."])
yield cls(self, "bytecode[]", op, desc)
###############################################################################
class CPInfo(FieldSet):
"""
Holds a constant pool entry. Entries all have a type, and various contents
fields depending on their type.
"""
def createFields(self):
yield Enum(UInt8(self, "tag"), self.root.CONSTANT_TYPES)
if self["tag"].value not in self.root.CONSTANT_TYPES:
raise ParserError("Java: unknown constant type (%s)" % self["tag"].value)
self.constant_type = self.root.CONSTANT_TYPES[self["tag"].value]
if self.constant_type == "Utf8":
yield PascalString16(self, "bytes", charset="UTF-8")
elif self.constant_type == "Integer":
yield Int32(self, "bytes")
elif self.constant_type == "Float":
yield Float32(self, "bytes")
elif self.constant_type == "Long":
yield Int64(self, "bytes")
elif self.constant_type == "Double":
yield Float64(self, "bytes")
elif self.constant_type == "Class":
yield CPIndex(self, "name_index", "Class or interface name", target_types="Utf8")
elif self.constant_type == "String":
yield CPIndex(self, "string_index", target_types="Utf8")
elif self.constant_type == "Fieldref":
yield CPIndex(self, "class_index", "Field class or interface name", target_types="Class")
yield CPIndex(self, "name_and_type_index", target_types="NameAndType")
elif self.constant_type == "Methodref":
yield CPIndex(self, "class_index", "Method class name", target_types="Class")
yield CPIndex(self, "name_and_type_index", target_types="NameAndType")
elif self.constant_type == "InterfaceMethodref":
yield CPIndex(self, "class_index", "Method interface name", target_types="Class")
yield CPIndex(self, "name_and_type_index", target_types="NameAndType")
elif self.constant_type == "NameAndType":
yield CPIndex(self, "name_index", target_types="Utf8")
yield CPIndex(self, "descriptor_index", target_types="Utf8")
else:
raise ParserError("Not a valid constant pool element type: "
+ self["tag"].value)
def __str__(self):
"""
Returns a human-readable string representation of the constant pool
entry. It is used for pretty-printing of the CPIndex fields pointing
to it.
"""
if self.constant_type == "Utf8":
return self["bytes"].value
elif self.constant_type in ("Integer", "Float", "Long", "Double"):
return self["bytes"].display
elif self.constant_type == "Class":
class_name = str(self["name_index"].get_cp_entry())
return class_name.replace("/",".")
elif self.constant_type == "String":
return str(self["string_index"].get_cp_entry())
elif self.constant_type == "Fieldref":
return "%s (from %s)" % (self["name_and_type_index"], self["class_index"])
elif self.constant_type == "Methodref":
return "%s (from %s)" % (self["name_and_type_index"], self["class_index"])
elif self.constant_type == "InterfaceMethodref":
return "%s (from %s)" % (self["name_and_type_index"], self["class_index"])
elif self.constant_type == "NameAndType":
return parse_any_descriptor(
str(self["descriptor_index"].get_cp_entry()),
name=str(self["name_index"].get_cp_entry()))
else:
# FIXME: Return "<error>" instead of raising an exception?
raise ParserError("Not a valid constant pool element type: "
+ self["tag"].value)
###############################################################################
# field_info {
# u2 access_flags;
# u2 name_index;
# u2 descriptor_index;
# u2 attributes_count;
# attribute_info attributes[attributes_count];
# }
class FieldInfo(FieldSet):
def createFields(self):
# Access flags (16 bits)
yield NullBits(self, "reserved[]", 8)
yield Bit(self, "transient")
yield Bit(self, "volatile")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "final")
yield Bit(self, "static")
yield Bit(self, "protected")
yield Bit(self, "private")
yield Bit(self, "public")
yield CPIndex(self, "name_index", "Field name", target_types="Utf8")
yield CPIndex(self, "descriptor_index", "Field descriptor", target_types="Utf8",
target_text_handler=parse_field_descriptor)
yield UInt16(self, "attributes_count", "Number of field attributes")
if self["attributes_count"].value > 0:
yield FieldArray(self, "attributes", AttributeInfo,
self["attributes_count"].value)
###############################################################################
# method_info {
# u2 access_flags;
# u2 name_index;
# u2 descriptor_index;
# u2 attributes_count;
# attribute_info attributes[attributes_count];
# }
class MethodInfo(FieldSet):
def createFields(self):
# Access flags (16 bits)
yield NullBits(self, "reserved[]", 4)
yield Bit(self, "strict")
yield Bit(self, "abstract")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "native")
yield NullBits(self, "reserved[]", 2)
yield Bit(self, "synchronized")
yield Bit(self, "final")
yield Bit(self, "static")
yield Bit(self, "protected")
yield Bit(self, "private")
yield Bit(self, "public")
yield CPIndex(self, "name_index", "Method name", target_types="Utf8")
yield CPIndex(self, "descriptor_index", "Method descriptor",
target_types="Utf8",
target_text_handler=parse_method_descriptor)
yield UInt16(self, "attributes_count", "Number of method attributes")
if self["attributes_count"].value > 0:
yield FieldArray(self, "attributes", AttributeInfo,
self["attributes_count"].value)
###############################################################################
# attribute_info {
# u2 attribute_name_index;
# u4 attribute_length;
# u1 info[attribute_length];
# }
# [...]
class AttributeInfo(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = (self["attribute_length"].value + 6) * 8
def createFields(self):
yield CPIndex(self, "attribute_name_index", "Attribute name", target_types="Utf8")
yield UInt32(self, "attribute_length", "Length of the attribute")
attr_name = str(self["attribute_name_index"].get_cp_entry())
# ConstantValue_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# u2 constantvalue_index;
# }
if attr_name == "ConstantValue":
if self["attribute_length"].value != 2:
raise ParserError("Java: Invalid attribute %s length (%s)" \
% (self.path, self["attribute_length"].value))
yield CPIndex(self, "constantvalue_index",
target_types=("Long","Float","Double","Integer","String"))
# Code_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# u2 max_stack;
# u2 max_locals;
# u4 code_length;
# u1 code[code_length];
# u2 exception_table_length;
# { u2 start_pc;
# u2 end_pc;
# u2 handler_pc;
# u2 catch_type;
# } exception_table[exception_table_length];
# u2 attributes_count;
# attribute_info attributes[attributes_count];
# }
elif attr_name == "Code":
yield UInt16(self, "max_stack")
yield UInt16(self, "max_locals")
yield UInt32(self, "code_length")
if self["code_length"].value > 0:
yield JavaBytecode(self, "code", self["code_length"].value)
yield UInt16(self, "exception_table_length")
if self["exception_table_length"].value > 0:
yield FieldArray(self, "exception_table", ExceptionTableEntry,
self["exception_table_length"].value)
yield UInt16(self, "attributes_count")
if self["attributes_count"].value > 0:
yield FieldArray(self, "attributes", AttributeInfo,
self["attributes_count"].value)
# Exceptions_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# u2 number_of_exceptions;
# u2 exception_index_table[number_of_exceptions];
# }
elif (attr_name == "Exceptions"):
yield UInt16(self, "number_of_exceptions")
yield FieldArray(self, "exception_index_table", CPIndex,
self["number_of_exceptions"].value, target_types="Class")
assert self["attribute_length"].value == \
2 + self["number_of_exceptions"].value * 2
# InnerClasses_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# u2 number_of_classes;
# { u2 inner_class_info_index;
# u2 outer_class_info_index;
# u2 inner_name_index;
# u2 inner_class_access_flags;
# } classes[number_of_classes];
# }
elif (attr_name == "InnerClasses"):
yield UInt16(self, "number_of_classes")
if self["number_of_classes"].value > 0:
yield FieldArray(self, "classes", InnerClassesEntry,
self["number_of_classes"].value)
assert self["attribute_length"].value == \
2 + self["number_of_classes"].value * 8
# Synthetic_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# }
elif (attr_name == "Synthetic"):
assert self["attribute_length"].value == 0
# SourceFile_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# u2 sourcefile_index;
# }
elif (attr_name == "SourceFile"):
assert self["attribute_length"].value == 2
yield CPIndex(self, "sourcefile_index", target_types="Utf8")
# LineNumberTable_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# u2 line_number_table_length;
# { u2 start_pc;
# u2 line_number;
# } line_number_table[line_number_table_length];
# }
elif (attr_name == "LineNumberTable"):
yield UInt16(self, "line_number_table_length")
if self["line_number_table_length"].value > 0:
yield FieldArray(self, "line_number_table",
LineNumberTableEntry,
self["line_number_table_length"].value)
assert self["attribute_length"].value == \
2 + self["line_number_table_length"].value * 4
# LocalVariableTable_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# u2 local_variable_table_length;
# { u2 start_pc;
# u2 length;
# u2 name_index;
# u2 descriptor_index;
# u2 index;
# } local_variable_table[local_variable_table_length];
# }
elif (attr_name == "LocalVariableTable"):
yield UInt16(self, "local_variable_table_length")
if self["local_variable_table_length"].value > 0:
yield FieldArray(self, "local_variable_table",
LocalVariableTableEntry,
self["local_variable_table_length"].value)
assert self["attribute_length"].value == \
2 + self["local_variable_table_length"].value * 10
# Deprecated_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# }
elif (attr_name == "Deprecated"):
assert self["attribute_length"].value == 0
# Unkown attribute type. They are allowed by the JVM specs, but we
# can't say much about them...
elif self["attribute_length"].value > 0:
yield RawBytes(self, "info", self["attribute_length"].value)
class ExceptionTableEntry(FieldSet):
static_size = 48 + CPIndex.static_size
def createFields(self):
yield textHandler(UInt16(self, "start_pc"), hexadecimal)
yield textHandler(UInt16(self, "end_pc"), hexadecimal)
yield textHandler(UInt16(self, "handler_pc"), hexadecimal)
yield CPIndex(self, "catch_type", target_types="Class")
class InnerClassesEntry(StaticFieldSet):
format = (
(CPIndex, "inner_class_info_index",
{"target_types": "Class", "allow_zero": True}),
(CPIndex, "outer_class_info_index",
{"target_types": "Class", "allow_zero": True}),
(CPIndex, "inner_name_index",
{"target_types": "Utf8", "allow_zero": True}),
# Inner class access flags (16 bits)
(NullBits, "reserved[]", 5),
(Bit, "abstract"),
(Bit, "interface"),
(NullBits, "reserved[]", 3),
(Bit, "super"),
(Bit, "final"),
(Bit, "static"),
(Bit, "protected"),
(Bit, "private"),
(Bit, "public"),
)
class LineNumberTableEntry(StaticFieldSet):
format = (
(UInt16, "start_pc"),
(UInt16, "line_number")
)
class LocalVariableTableEntry(StaticFieldSet):
format = (
(UInt16, "start_pc"),
(UInt16, "length"),
(CPIndex, "name_index", {"target_types": "Utf8"}),
(CPIndex, "descriptor_index", {"target_types": "Utf8",
"target_text_handler": parse_field_descriptor}),
(UInt16, "index")
)
###############################################################################
# ClassFile {
# u4 magic;
# u2 minor_version;
# u2 major_version;
# u2 constant_pool_count;
# cp_info constant_pool[constant_pool_count-1];
# u2 access_flags;
# u2 this_class;
# u2 super_class;
# u2 interfaces_count;
# u2 interfaces[interfaces_count];
# u2 fields_count;
# field_info fields[fields_count];
# u2 methods_count;
# method_info methods[methods_count];
# u2 attributes_count;
# attribute_info attributes[attributes_count];
# }
class JavaCompiledClassFile(Parser):
"""
Root of the .class parser.
"""
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "java_class",
"category": "program",
"file_ext": ("class",),
"mime": (u"application/java-vm",),
"min_size": (32 + 3*16),
"description": "Compiled Java class"
}
MAGIC = 0xCAFEBABE
KNOWN_VERSIONS = {
"45.3": "JDK 1.1",
"46.0": "JDK 1.2",
"47.0": "JDK 1.3",
"48.0": "JDK 1.4",
"49.0": "JDK 1.5",
"50.0": "JDK 1.6"
}
# Constants go here since they will probably depend on the detected format
# version at some point. Though, if they happen to be really backward
# compatible, they may become module globals.
CONSTANT_TYPES = {
1: "Utf8",
3: "Integer",
4: "Float",
5: "Long",
6: "Double",
7: "Class",
8: "String",
9: "Fieldref",
10: "Methodref",
11: "InterfaceMethodref",
12: "NameAndType"
}
def validate(self):
if self["magic"].value != self.MAGIC:
return "Wrong magic signature!"
version = "%d.%d" % (self["major_version"].value, self["minor_version"].value)
if version not in self.KNOWN_VERSIONS:
return "Unknown version (%s)" % version
return True
def createDescription(self):
version = "%d.%d" % (self["major_version"].value, self["minor_version"].value)
if version in self.KNOWN_VERSIONS:
return "Compiled Java class, %s" % self.KNOWN_VERSIONS[version]
else:
return "Compiled Java class, version %s" % version
def createFields(self):
yield textHandler(UInt32(self, "magic", "Java compiled class signature"),
hexadecimal)
yield UInt16(self, "minor_version", "Class format minor version")
yield UInt16(self, "major_version", "Class format major version")
yield UInt16(self, "constant_pool_count", "Size of the constant pool")
if self["constant_pool_count"].value > 1:
#yield FieldArray(self, "constant_pool", CPInfo,
# (self["constant_pool_count"].value - 1), first_index=1)
# Mmmh... can't use FieldArray actually, because ConstantPool
# requires some specific hacks (skipping some indexes after Long
# and Double entries).
yield ConstantPool(self, "constant_pool",
(self["constant_pool_count"].value))
# Inner class access flags (16 bits)
yield NullBits(self, "reserved[]", 5)
yield Bit(self, "abstract")
yield Bit(self, "interface")
yield NullBits(self, "reserved[]", 3)
yield Bit(self, "super")
yield Bit(self, "final")
yield Bit(self, "static")
yield Bit(self, "protected")
yield Bit(self, "private")
yield Bit(self, "public")
yield CPIndex(self, "this_class", "Class name", target_types="Class")
yield CPIndex(self, "super_class", "Super class name", target_types="Class")
yield UInt16(self, "interfaces_count", "Number of implemented interfaces")
if self["interfaces_count"].value > 0:
yield FieldArray(self, "interfaces", CPIndex,
self["interfaces_count"].value, target_types="Class")
yield UInt16(self, "fields_count", "Number of fields")
if self["fields_count"].value > 0:
yield FieldArray(self, "fields", FieldInfo,
self["fields_count"].value)
yield UInt16(self, "methods_count", "Number of methods")
if self["methods_count"].value > 0:
yield FieldArray(self, "methods", MethodInfo,
self["methods_count"].value)
yield UInt16(self, "attributes_count", "Number of attributes")
if self["attributes_count"].value > 0:
yield FieldArray(self, "attributes", AttributeInfo,
self["attributes_count"].value)
# vim: set expandtab tabstop=4 shiftwidth=4 autoindent smartindent:
| gpl-3.0 |
zero-rp/miniblink49 | third_party/skia/tools/sanitize_source_files.py | 176 | 4852 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module that sanitizes source files with specified modifiers."""
import commands
import os
import sys
_FILE_EXTENSIONS_TO_SANITIZE = ['cpp', 'h', 'c', 'gyp', 'gypi']
_SUBDIRS_TO_IGNORE = ['.git', '.svn', 'third_party']
def SanitizeFilesWithModifiers(directory, file_modifiers, line_modifiers):
"""Sanitizes source files with the specified file and line modifiers.
Args:
directory: string - The directory which will be recursively traversed to
find source files to apply modifiers to.
file_modifiers: list - file-modification methods which should be applied to
the complete file content (Eg: EOFOneAndOnlyOneNewlineAdder).
line_modifiers: list - line-modification methods which should be applied to
lines in a file (Eg: TabReplacer).
"""
for item in os.listdir(directory):
full_item_path = os.path.join(directory, item)
if os.path.isfile(full_item_path): # Item is a file.
# Only sanitize files with extensions we care about.
if (len(full_item_path.split('.')) > 1 and
full_item_path.split('.')[-1] in _FILE_EXTENSIONS_TO_SANITIZE):
f = file(full_item_path)
try:
lines = f.readlines()
finally:
f.close()
new_lines = [] # Collect changed lines here.
line_number = 0 # Keeps track of line numbers in the source file.
write_to_file = False # File is written to only if this flag is set.
# Run the line modifiers for each line in this file.
for line in lines:
original_line = line
line_number += 1
for modifier in line_modifiers:
line = modifier(line, full_item_path, line_number)
if original_line != line:
write_to_file = True
new_lines.append(line)
# Run the file modifiers.
old_content = ''.join(lines)
new_content = ''.join(new_lines)
for modifier in file_modifiers:
new_content = modifier(new_content, full_item_path)
if new_content != old_content:
write_to_file = True
# Write modifications to the file.
if write_to_file:
f = file(full_item_path, 'w')
try:
f.write(new_content)
finally:
f.close()
print 'Made changes to %s' % full_item_path
elif item not in _SUBDIRS_TO_IGNORE:
# Item is a directory recursively call the method.
SanitizeFilesWithModifiers(full_item_path, file_modifiers, line_modifiers)
############## Line Modification methods ##############
def TrailingWhitespaceRemover(line, file_path, line_number):
"""Strips out trailing whitespaces from the specified line."""
stripped_line = line.rstrip() + '\n'
if line != stripped_line:
print 'Removing trailing whitespace in %s:%s' % (file_path, line_number)
return stripped_line
def CrlfReplacer(line, file_path, line_number):
"""Replaces CRLF with LF."""
if '\r\n' in line:
print 'Replacing CRLF with LF in %s:%s' % (file_path, line_number)
return line.replace('\r\n', '\n')
def TabReplacer(line, file_path, line_number):
"""Replaces Tabs with 4 whitespaces."""
if '\t' in line:
print 'Replacing Tab with whitespace in %s:%s' % (file_path, line_number)
return line.replace('\t', ' ')
############## File Modification methods ##############
def CopywriteChecker(file_content, unused_file_path):
"""Ensures that the copywrite information is correct."""
# TODO(rmistry): Figure out the legal implications of changing old copyright
# headers.
return file_content
def EOFOneAndOnlyOneNewlineAdder(file_content, file_path):
"""Adds one and only one LF at the end of the file."""
if file_content and (file_content[-1] != '\n' or file_content[-2:-1] == '\n'):
file_content = file_content.rstrip()
file_content += '\n'
print 'Added exactly one newline to %s' % file_path
return file_content
def SvnEOLChecker(file_content, file_path):
"""Sets svn:eol-style property to LF."""
output = commands.getoutput(
'svn propget svn:eol-style %s' % file_path)
if output != 'LF':
print 'Setting svn:eol-style property to LF in %s' % file_path
os.system('svn ps svn:eol-style LF %s' % file_path)
return file_content
#######################################################
if '__main__' == __name__:
sys.exit(SanitizeFilesWithModifiers(
os.getcwd(),
file_modifiers=[
CopywriteChecker,
EOFOneAndOnlyOneNewlineAdder,
SvnEOLChecker,
],
line_modifiers=[
CrlfReplacer,
TabReplacer,
TrailingWhitespaceRemover,
],
))
| apache-2.0 |
cristianionita/GeoRsGPU | lib/GDAL/gcps2vec.py | 1 | 4945 | #!/usr/bin/env python
#******************************************************************************
# $Id: gcps2vec.py 33790 2016-03-26 12:42:12Z goatbar $
#
# Project: GDAL
# Purpose: Convert GCPs to a point layer.
# Author: Frank Warmerdam, warmerdam@pobox.com
#
#******************************************************************************
# Copyright (c) 2005, Frank Warmerdam <warmerdam@pobox.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#******************************************************************************
import sys
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
def Usage():
print('Usage: gcps2vec.py [-of <ogr_drivername>] [-p] <raster_file> <vector_file>')
sys.exit(1)
# =============================================================================
# Mainline
# =============================================================================
out_format = 'GML'
in_file = None
out_file = None
pixel_out = 0
gdal.AllRegister()
argv = gdal.GeneralCmdLineProcessor( sys.argv )
if argv is None:
sys.exit( 0 )
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-of':
i = i + 1
out_format = argv[i]
elif arg == '-p':
pixel_out = 1
elif in_file is None:
in_file = argv[i]
elif out_file is None:
out_file = argv[i]
else:
Usage()
i = i + 1
if out_file is None:
Usage()
# ----------------------------------------------------------------------------
# Open input file, and fetch GCPs.
# ----------------------------------------------------------------------------
ds = gdal.Open( in_file )
if ds is None:
print('Unable to open %s' % in_file)
sys.exit(1)
gcp_srs = ds.GetGCPProjection()
gcps = ds.GetGCPs()
ds = None
if gcps is None or len(gcps) == 0:
print('No GCPs on file %s!' % in_file)
sys.exit(1)
# ----------------------------------------------------------------------------
# Create output file, and layer.
# ----------------------------------------------------------------------------
drv = ogr.GetDriverByName( out_format )
if drv is None:
print('No driver named %s available.' % out_format)
sys.exit(1)
ds = drv.CreateDataSource( out_file )
if pixel_out == 0 and gcp_srs != "":
srs = osr.SpatialReference()
srs.ImportFromWkt(gcp_srs)
else:
srs = None
if pixel_out == 0:
geom_type = ogr.wkbPoint25D
else:
geom_type = ogr.wkbPoint
layer = ds.CreateLayer( 'gcps', srs, geom_type = geom_type )
if pixel_out == 0:
fd = ogr.FieldDefn( 'Pixel', ogr.OFTReal )
layer.CreateField( fd )
fd = ogr.FieldDefn( 'Line', ogr.OFTReal )
layer.CreateField( fd )
else:
fd = ogr.FieldDefn( 'X', ogr.OFTReal )
layer.CreateField( fd )
fd = ogr.FieldDefn( 'Y', ogr.OFTReal )
layer.CreateField( fd )
fd = ogr.FieldDefn( 'Z', ogr.OFTReal )
layer.CreateField( fd )
fd = ogr.FieldDefn( 'Id', ogr.OFTString )
layer.CreateField( fd )
fd = ogr.FieldDefn( 'Info', ogr.OFTString )
layer.CreateField( fd )
# ----------------------------------------------------------------------------
# Write GCPs.
# ----------------------------------------------------------------------------
for gcp in gcps:
feat = ogr.Feature( layer.GetLayerDefn() )
if pixel_out == 0:
geom = ogr.Geometry( geom_type )
feat.SetField( 'Pixel', gcp.GCPPixel )
feat.SetField( 'Line', gcp.GCPLine )
geom.SetPoint( 0, gcp.GCPX, gcp.GCPY, gcp.GCPZ )
else:
geom = ogr.Geometry( geom_type )
feat.SetField( 'X', gcp.GCPX )
feat.SetField( 'Y', gcp.GCPY )
feat.SetField( 'Z', gcp.GCPZ )
geom.SetPoint( 0, gcp.GCPPixel, gcp.GCPLine )
feat.SetField( 'Id', gcp.Id )
feat.SetField( 'Info', gcp.Info )
feat.SetGeometryDirectly( geom )
layer.CreateFeature( feat )
feat = None
ds.Destroy()
| mit |
neonknight/convert2gnokii | bin/gcontacts2gnokii.py | 1 | 4022 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
This file is part of data2gnokii.
data2gnokii is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
data2gnokii is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with data2gnokii. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import os
from ConfigParser import ConfigParser
import sys
from operator import itemgetter
import gdata.contacts.client
import getpass
class GContacts2Gnokii(object):
def __init__(self):
section = 'gcontacts2gnokii'
config = self._read_config()
self.googleuser = config.get(section, 'googleuser')
self.phoneregex = config.get(section, 'prefer')
self.gnokiiformat = config.get(section, 'gnokiiformat', True)
self.gd_client = None
self.contacts = []
def authenticate(self):
if not self.googleuser:
while not self.googleuser:
self.googleuser = raw_input('Please enter your Gmail Address: ')
password = None
while not password:
password = getpass.getpass()
if not password:
print 'Password cannot be blank.'
self.gd_client = gdata.contacts.client.ContactsClient('gcontacts2gnokii')
token = self.gd_client.ClientLogin(self.googleuser, password, self.gd_client.source)
def fetch_contacts(self):
query = gdata.contacts.client.ContactsQuery(max_results=3000)
feed = self.gd_client.GetContacts(query=query)
for entry in feed.entry:
if not entry:
continue
if not entry.name:
continue
name = entry.name.full_name.text
if not entry.phone_number:
continue
phone = None
for number in entry.phone_number:
if re.match(self.phoneregex, number.text):
phone = number.text
if not phone:
phone = entry.phone_number[0].text
self.contacts.append((name, phone))
def get_data(self):
return self._gen_gnokii_file(self.contacts, self.gnokiiformat)
def _read_config(self):
homedir = os.path.expanduser('~')
filepaths = [
'config.ini',
'%s/.config/convert2gnokii/config.ini' % homedir,
]
filename = ''
for filepath in filepaths:
if os.path.exists(filepath):
filename = filepath
if not filename:
sys.stderr.write('could not find config file: %s\n' % filepath)
sys.exit(1)
config=ConfigParser()
config.read(filename)
return config
def _gen_gnokii_file(self, phonebook, gnokiiformat):
entries = []
for entry in phonebook:
data = dict(name=entry[0], number=entry[1])
entries.append(data)
entries = sorted(entries, key=itemgetter('name'))
data = ''
index = 1
for entry in entries:
entry['index'] = index
data += gnokiiformat % entry + '\n'
index += 1
data = ''.join(i for i in data if ord(i)<128)
return data
if __name__ == '__main__':
tool = GContacts2Gnokii()
tool.authenticate()
tool.fetch_contacts()
print tool.get_data()
| gpl-3.0 |
eliasdorneles/scrapy | tests/test_utils_url.py | 89 | 11178 | # -*- coding: utf-8 -*-
import unittest
import six
from scrapy.spiders import Spider
from scrapy.utils.url import (url_is_from_any_domain, url_is_from_spider,
canonicalize_url)
__doctests__ = ['scrapy.utils.url']
class UrlUtilsTest(unittest.TestCase):
def test_url_is_from_any_domain(self):
url = 'http://www.wheele-bin-art.co.uk/get/product/123'
self.assertTrue(url_is_from_any_domain(url, ['wheele-bin-art.co.uk']))
self.assertFalse(url_is_from_any_domain(url, ['art.co.uk']))
url = 'http://wheele-bin-art.co.uk/get/product/123'
self.assertTrue(url_is_from_any_domain(url, ['wheele-bin-art.co.uk']))
self.assertFalse(url_is_from_any_domain(url, ['art.co.uk']))
url = 'http://www.Wheele-Bin-Art.co.uk/get/product/123'
self.assertTrue(url_is_from_any_domain(url, ['wheele-bin-art.CO.UK']))
self.assertTrue(url_is_from_any_domain(url, ['WHEELE-BIN-ART.CO.UK']))
url = 'http://192.169.0.15:8080/mypage.html'
self.assertTrue(url_is_from_any_domain(url, ['192.169.0.15:8080']))
self.assertFalse(url_is_from_any_domain(url, ['192.169.0.15']))
url = 'javascript:%20document.orderform_2581_1190810811.mode.value=%27add%27;%20javascript:%20document.orderform_2581_1190810811.submit%28%29'
self.assertFalse(url_is_from_any_domain(url, ['testdomain.com']))
self.assertFalse(url_is_from_any_domain(url+'.testdomain.com', ['testdomain.com']))
def test_url_is_from_spider(self):
spider = Spider(name='example.com')
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', spider))
self.assertFalse(url_is_from_spider('http://www.example.org/some/page.html', spider))
self.assertFalse(url_is_from_spider('http://www.example.net/some/page.html', spider))
def test_url_is_from_spider_class_attributes(self):
class MySpider(Spider):
name = 'example.com'
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', MySpider))
self.assertFalse(url_is_from_spider('http://www.example.org/some/page.html', MySpider))
self.assertFalse(url_is_from_spider('http://www.example.net/some/page.html', MySpider))
def test_url_is_from_spider_with_allowed_domains(self):
spider = Spider(name='example.com', allowed_domains=['example.org', 'example.net'])
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://www.example.org/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://www.example.net/some/page.html', spider))
self.assertFalse(url_is_from_spider('http://www.example.us/some/page.html', spider))
spider = Spider(name='example.com', allowed_domains=set(('example.com', 'example.net')))
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', spider))
spider = Spider(name='example.com', allowed_domains=('example.com', 'example.net'))
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', spider))
def test_url_is_from_spider_with_allowed_domains_class_attributes(self):
class MySpider(Spider):
name = 'example.com'
allowed_domains = ('example.org', 'example.net')
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://www.example.org/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://www.example.net/some/page.html', MySpider))
self.assertFalse(url_is_from_spider('http://www.example.us/some/page.html', MySpider))
class CanonicalizeUrlTest(unittest.TestCase):
def test_canonicalize_url(self):
# simplest case
self.assertEqual(canonicalize_url("http://www.example.com/"),
"http://www.example.com/")
def test_return_str(self):
assert isinstance(canonicalize_url(u"http://www.example.com"), str)
assert isinstance(canonicalize_url(b"http://www.example.com"), str)
def test_append_missing_path(self):
self.assertEqual(canonicalize_url("http://www.example.com"),
"http://www.example.com/")
def test_typical_usage(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3")
self.assertEqual(canonicalize_url("http://www.example.com/do?c=1&b=2&a=3"),
"http://www.example.com/do?a=3&b=2&c=1")
self.assertEqual(canonicalize_url("http://www.example.com/do?&a=1"),
"http://www.example.com/do?a=1")
def test_sorting(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?c=3&b=5&b=2&a=50"),
"http://www.example.com/do?a=50&b=2&b=5&c=3")
def test_keep_blank_values(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&a=2", keep_blank_values=False),
"http://www.example.com/do?a=2")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&a=2"),
"http://www.example.com/do?a=2&b=")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&c&a=2", keep_blank_values=False),
"http://www.example.com/do?a=2")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&c&a=2"),
"http://www.example.com/do?a=2&b=&c=")
self.assertEqual(canonicalize_url(u'http://www.example.com/do?1750,4'),
'http://www.example.com/do?1750%2C4=')
def test_spaces(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a+space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a%20space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
@unittest.skipUnless(six.PY2, "TODO")
def test_normalize_percent_encoding_in_paths(self):
self.assertEqual(canonicalize_url("http://www.example.com/a%a3do"),
"http://www.example.com/a%A3do"),
@unittest.skipUnless(six.PY2, "TODO")
def test_normalize_percent_encoding_in_query_arguments(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?k=b%a3"),
"http://www.example.com/do?k=b%A3")
def test_non_ascii_percent_encoding_in_paths(self):
self.assertEqual(canonicalize_url("http://www.example.com/a do?a=1"),
"http://www.example.com/a%20do?a=1"),
self.assertEqual(canonicalize_url("http://www.example.com/a %20do?a=1"),
"http://www.example.com/a%20%20do?a=1"),
self.assertEqual(canonicalize_url(u"http://www.example.com/a do£.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1")
self.assertEqual(canonicalize_url(b"http://www.example.com/a do\xc2\xa3.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1")
def test_non_ascii_percent_encoding_in_query_arguments(self):
self.assertEqual(canonicalize_url(u"http://www.example.com/do?price=\xa3500&a=5&z=3"),
u"http://www.example.com/do?a=5&price=%C2%A3500&z=3")
self.assertEqual(canonicalize_url(b"http://www.example.com/do?price=\xc2\xa3500&a=5&z=3"),
"http://www.example.com/do?a=5&price=%C2%A3500&z=3")
self.assertEqual(canonicalize_url(b"http://www.example.com/do?price(\xc2\xa3)=500&a=1"),
"http://www.example.com/do?a=1&price%28%C2%A3%29=500")
def test_urls_with_auth_and_ports(self):
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com:81/do?now=1"),
u"http://user:pass@www.example.com:81/do?now=1")
def test_remove_fragments(self):
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com/do?a=1#frag"),
u"http://user:pass@www.example.com/do?a=1")
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com/do?a=1#frag", keep_fragments=True),
u"http://user:pass@www.example.com/do?a=1#frag")
def test_dont_convert_safe_characters(self):
# dont convert safe characters to percent encoding representation
self.assertEqual(canonicalize_url(
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html"),
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html")
@unittest.skipUnless(six.PY2, "TODO")
def test_safe_characters_unicode(self):
# urllib.quote uses a mapping cache of encoded characters. when parsing
# an already percent-encoded url, it will fail if that url was not
# percent-encoded as utf-8, that's why canonicalize_url must always
# convert the urls to string. the following test asserts that
# functionality.
self.assertEqual(canonicalize_url(u'http://www.example.com/caf%E9-con-leche.htm'),
'http://www.example.com/caf%E9-con-leche.htm')
def test_domains_are_case_insensitive(self):
self.assertEqual(canonicalize_url("http://www.EXAMPLE.com/"),
"http://www.example.com/")
def test_quoted_slash_and_question_sign(self):
self.assertEqual(canonicalize_url("http://foo.com/AC%2FDC+rocks%3f/?yeah=1"),
"http://foo.com/AC%2FDC+rocks%3F/?yeah=1")
self.assertEqual(canonicalize_url("http://foo.com/AC%2FDC/"),
"http://foo.com/AC%2FDC/")
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
smarr/Truffle | truffle/mx.truffle/tck.py | 1 | 18467 | #
# Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import print_function
import argparse
import os
import os.path
import re
import subprocess
import sys
import zipfile
class Abort(RuntimeError):
def __init__(self, message, retCode=-1):
RuntimeError.__init__(self)
self.message = message
self.retCode = retCode
_mode_factories = dict()
def _mode_factory(name):
def decorator(sm):
_mode_factories[name] = sm.__func__
return sm
return decorator
class Mode:
_default = None
_compile = None
def __init__(self, name, vm_args=None):
self.name = name
self.vm_args = vm_args if vm_args else []
def __str__(self):
return self.name
@_mode_factory('default')
@staticmethod
def default():
if not Mode._default:
Mode._default = Mode('default')
return Mode._default
@_mode_factory('compile')
@staticmethod
def compile():
if not Mode._compile:
Mode._compile = Mode('compile', [
'-Dpolyglot.engine.AllowExperimentalOptions=true',
'-Dpolyglot.engine.CompileImmediately=true',
'-Dpolyglot.engine.BackgroundCompilation=false',
'-Dtck.inlineVerifierInstrument=false'])
return Mode._compile
@staticmethod
def for_name(name):
factory = _mode_factories.get(name)
if factory:
return factory()
else:
raise Abort('Mode must be default or compile')
class LogLevel:
"""
Log level constants to enable verbose output.
"""
OFF = 1<<31
INFO = 800
FINE = 500
_log_level = LogLevel.INFO
class _ClassPathEntry:
def __init__(self, path):
self.path = path
def install(self, folder):
pass
def __str__(self):
return self.path
class _MvnClassPathEntry(_ClassPathEntry):
def __init__(self, groupId, artifactId, version, required=True, repository=None):
self.repository = repository
self.groupId = groupId
self.artifactId = artifactId
self.version = version
self.required = required
_ClassPathEntry.__init__(self, None)
def install(self, folder):
_log(LogLevel.INFO, 'Installing {0}'.format(self))
if self.pull():
install_folder = os.path.join(folder, self.artifactId)
os.mkdir(install_folder)
self.copy(install_folder)
self.path = os.pathsep.join([os.path.join(install_folder, f) for f in os.listdir(install_folder) if f.endswith('.jar')])
else:
self.path = ''
def pull(self):
process = _MvnClassPathEntry._run_maven(['dependency:get', '-DgroupId=' + self.groupId, '-DartifactId=' + self.artifactId, '-Dversion=' + self.version], self.repository)
ret_code = process.wait()
if ret_code != 0:
if self.required:
raise Abort('Cannot download artifact {0} '.format(self))
else:
return False
else:
return True
def copy(self, folder):
process = _MvnClassPathEntry._run_maven(['dependency:copy', '-Dartifact=' + self.groupId + ':' + self.artifactId + ':' + self.version, '-DoutputDirectory=' + folder], self.repository)
ret_code = process.wait()
if ret_code != 0:
raise Abort('Cannot copy artifact {0}'.format(self))
def __str__(self):
return '{0}:{1}:{2}'.format(self.groupId, self.artifactId, self.version)
@staticmethod
def _run_maven(args, repository=None):
extra_args = ['-Dmaven.repo.local=' + repository] if repository else []
extra_args.append('-q')
host, port = _MvnClassPathEntry._parse_http_proxy(['HTTP_PROXY', 'http_proxy'])
if host:
extra_args.append('-DproxyHost=' + host)
if port:
extra_args.append('-DproxyPort=' + port)
host, port = _MvnClassPathEntry._parse_http_proxy(['HTTPS_PROXY', 'https_proxy'])
if host:
extra_args.append('-Dhttps.proxyHost=' + host)
if port:
extra_args.append('-Dhttps.proxyPort=' + port)
mvn_cmd = 'mvn'
mvn_home = os.environ.get('MAVEN_HOME')
if mvn_home:
mvn_cmd = os.path.join(mvn_home, 'bin', mvn_cmd)
if _is_windows():
mvn_cmd += '.cmd'
extra_args += ['--batch-mode']
return _run([mvn_cmd] + extra_args + args)
@staticmethod
def _parse_http_proxy(envVarNames):
p = re.compile(r'(?:https?://)?([^:]+):?(\d+)?/?$')
for name in envVarNames:
value = os.environ.get(name)
if value:
m = p.match(value)
if m:
return m.group(1), m.group(2)
else:
raise Abort('Value of ' + name + ' is not valid: ' + value)
return (None, None)
def _log(level, message, args=None):
if level != LogLevel.OFF and level >= _log_level:
print(message.format(args if args else []))
def _is_windows():
return sys.platform.startswith('win32')
def _rmdir_recursive(to_delete):
if os.path.isdir(to_delete):
for child in os.listdir(to_delete):
_rmdir_recursive(os.path.join(to_delete, child))
os.rmdir(to_delete)
else:
os.unlink(to_delete)
def _run(args, log_level=False):
_log(LogLevel.FINE, "exec({0})", ', '.join(['"' + a + '"' for a in args]))
return subprocess.Popen(args)
def _run_java(javaHome, mainClass, cp=None, truffleCp=None, bootCp=None, vmArgs=None, args=None, dbgPort=None):
if not vmArgs:
vmArgs = []
if not args:
args = []
if cp:
vmArgs.append('-cp')
vmArgs.append(os.pathsep.join([e.path for e in cp]))
if truffleCp:
vmArgs.append('-Dtruffle.class.path.append=' + os.pathsep.join([e.path for e in truffleCp]))
if bootCp:
vmArgs.append('-Xbootclasspath/a:' + os.pathsep.join([e.path for e in bootCp]))
java_cmd = os.path.join(javaHome, 'bin', 'java')
if _is_windows():
java_cmd += '.exe'
if dbgPort:
vmArgs.append('-Xdebug')
vmArgs.append('-Xrunjdwp:transport=dt_socket,server=y,address={0},suspend=y'.format(dbgPort))
return _run([java_cmd] + vmArgs + [mainClass] + args)
def _split_VM_args_and_filters(args):
jvm_space_separated_args = ['-cp', '-classpath', '-mp', '-modulepath', '-limitmods', '-addmods', '-upgrademodulepath', '-m',
'--module-path', '--limit-modules', '--add-modules', '--upgrade-module-path',
'--module', '--module-source-path', '--add-exports', '--add-reads',
'--patch-module', '--boot-class-path', '--source-path']
for i, e in enumerate(args):
if not e.startswith('-') and (i == 0 or not args[i - 1] in jvm_space_separated_args):
return args[:i], args[i:]
return args, []
def _find_unit_tests(cp, pkgs=None):
def includes(n):
if not pkgs:
return True
else:
index = n.rfind('.')
if index < 0:
owner = n
else:
owner = n[:index]
for pkg in pkgs:
if pkg == owner:
return True
return False
tests = []
for e in cp:
path = e.path
if zipfile.is_zipfile(path):
with zipfile.ZipFile(path) as zf:
for name in zf.namelist():
if name.endswith('Test.class'):
name = name[:len(name) - 6].replace('/', '.')
if includes(name):
tests.append(name)
tests.sort(reverse=True)
return tests
def _execute_tck_impl(graalvm_home, mode, language_filter, values_filter, tests_filter, cp, truffle_cp, boot_cp, vm_args, debug_port):
tests = _find_unit_tests(cp, pkgs=['com.oracle.truffle.tck.tests'])
vm_args.extend(mode.vm_args)
if language_filter:
vm_args.append('-Dtck.language={0}'.format(language_filter))
if values_filter:
vm_args.append('-Dtck.values={0}'.format(','.join(values_filter)))
if tests_filter:
def includes(test):
for pattern in tests_filter:
if test.find(pattern) >= 0:
return True
return False
tests = [test for test in tests if includes(test)]
p = _run_java(graalvm_home, 'org.junit.runner.JUnitCore', cp=cp, truffleCp=truffle_cp, bootCp=boot_cp, vmArgs=vm_args, args=tests, dbgPort=debug_port)
ret_code = p.wait()
return ret_code
def execute_tck(graalvm_home, mode=Mode.default(), language_filter=None, values_filter=None, tests_filter=None, cp=None, truffle_cp=None, boot_cp=None, vm_args=None, debug_port=None):
"""
Executes Truffle TCK with given TCK providers and languages using GraalVM installed in graalvm_home
:param graalvm_home: a path to GraalVM
:param mode: the TCK mode
:param language_filter: the language id, limits TCK tests to certain language
:param values_filter: an iterable of value constructors language ids, limits TCK values to certain language(s)
:param tests_filter: a substring of TCK test name or an iterable of substrings of TCK test names
:param cp: an iterable of paths to add on the Java classpath, the classpath must contain the TCK providers and dependencies
:param truffle_cp: an iterable of paths to add to truffle.class.path, the additional languages and instruments must be a part of the truffle_cp
:param boot_cp: an iterable of paths to add to Java boot path
:param vm_args: an iterable containing additional Java VM args
:param debug_port: a port the Java VM should listen on for debugger connection
"""
if not cp:
cp = []
if not truffle_cp:
truffle_cp = []
if not boot_cp:
boot_cp = []
if not vm_args:
vm_args = []
if tests_filter and isinstance(tests_filter, str):
tests_filter = [tests_filter]
return _execute_tck_impl(graalvm_home, mode, language_filter, values_filter, tests_filter,
[_ClassPathEntry(os.path.abspath(e)) for e in cp],
[_ClassPathEntry(os.path.abspath(e)) for e in truffle_cp],
[_ClassPathEntry(os.path.abspath(e)) for e in boot_cp],
vm_args if isinstance(vm_args, list) else list(vm_args),
debug_port)
def set_log_level(log_level):
"""
Sets the default log level
:param LogLevel log_level: the log level to use
"""
global _log_level
_log_level = log_level
_MVN_DEPENDENCIES = {
'TESTS' : [
{'groupId':'junit', 'artifactId':'junit', 'version':'4.12', 'required':True},
{'groupId':'org/hamcrest', 'artifactId':'hamcrest-all', 'version':'1.3', 'required':True},
{'groupId':'org.graalvm.truffle', 'artifactId':'truffle-tck-tests', 'required':False},
],
'TCK' : [
{'groupId':'org.graalvm.sdk', 'artifactId':'polyglot-tck', 'required':True}
],
'COMMON' : [
{'groupId':'org.graalvm.truffle', 'artifactId':'truffle-tck-common', 'required':True}
],
'INSTRUMENTS' : [
{'groupId':'org.graalvm.truffle', 'artifactId':'truffle-tck-instrumentation', 'required':True},
]
}
def _is_modular_jvm(java_home):
release_file = os.path.join(java_home, 'release')
if os.path.isfile(release_file) and os.access(release_file, os.R_OK):
p = re.compile(r'JAVA_VERSION="(.*)"')
with open(release_file) as f:
for l in f.readlines():
m = p.match(l)
if m:
version = m.group(1)
return not (version.startswith('1.8.') or version.startswith('8.'))
return True
def _main(argv):
unittestHelpSuffix = """
Supported modes are:
default executes the test with default GraalVM configuration
compile compiles the tests before execution
If test filters are supplied, only tests whose fully qualified name
includes a filter as a substring are run.
For example:
python tck.py js default ExpressionTest
will run TCK ExpressionTest for JavaScript language in a default mode.
"""
parser = argparse.ArgumentParser(description='Truffle TCK Runner',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=unittestHelpSuffix)
parser.set_defaults(log_level=LogLevel.INFO)
parser.add_argument('-v', action='store_const', dest='log_level', const=LogLevel.FINE, help='enable log_level output')
parser.add_argument('-q', action='store_const', dest='log_level', const=LogLevel.OFF, help='quiet output')
parser.add_argument('-g', '--graalvm-home', type=str, dest='graalvm_home', help='GraalVM to execute TCK on', required=True, metavar='<graalvm home>')
parser.add_argument('--dbg', type=int, dest='dbg_port', help='make TCK tests wait on <port> for a debugger', metavar='<port>')
parser.add_argument('-d', action='store_const', const=8000, dest='dbg_port', help='alias for "-dbg 8000"')
parser.add_argument('--tck-version', type=str, dest='tck_version', help='maven TCK version, default is LATEST', default='LATEST', metavar='<version>')
parser.add_argument('--tck-values', type=str, dest='tck_values', help="language ids of value providers to use, separated by ','", metavar='<value providers>')
parser.add_argument('-cp', '--class-path', type=str, dest='class_path', help='classpath containing additional TCK provider(s)', metavar='<classpath>')
parser.add_argument('-lp', '--language-path', type=str, dest='truffle_path', help='classpath containing additinal language jar(s)', metavar='<classpath>')
usage = parser.format_usage().strip()
if usage.startswith('usage: '):
usage = usage[len('usage: '):]
parser.usage = usage + ' [VM options...] [language [mode [test filters...]]]'
parsed_args, args = parser.parse_known_args()
global _log_level
_log_level = parsed_args.log_level
cache_folder = 'cache'
try:
vm_args, other_args = _split_VM_args_and_filters(args)
for pattern in other_args:
if pattern.startswith('-'):
raise Abort('VM option {0} must precede {1}'.format(pattern, other_args[0]))
language = None
values = None
mode = Mode.default()
tests_filter = []
if len(other_args) > 0:
language = other_args[0]
if len(other_args) > 1:
mode = Mode.for_name(other_args[1])
if len(other_args) > 2:
tests_filter = other_args[2:]
if parsed_args.tck_values:
values = parsed_args.tck_values.split(',')
os.mkdir(cache_folder)
boot = [_MvnClassPathEntry(e['groupId'], e['artifactId'], e['version'] if 'version' in e else parsed_args.tck_version, e['required']) for e in _MVN_DEPENDENCIES['COMMON']]
cp = [_MvnClassPathEntry(e['groupId'], e['artifactId'], e['version'] if 'version' in e else parsed_args.tck_version, e['required']) for e in _MVN_DEPENDENCIES['TESTS']]
truffle_cp = [_MvnClassPathEntry(e['groupId'], e['artifactId'], e['version'] if 'version' in e else parsed_args.tck_version, e['required']) for e in _MVN_DEPENDENCIES['INSTRUMENTS']]
tck = [_MvnClassPathEntry(e['groupId'], e['artifactId'], e['version'] if 'version' in e else parsed_args.tck_version, e['required']) for e in _MVN_DEPENDENCIES['TCK']]
if _is_modular_jvm(parsed_args.graalvm_home):
cp.extend(tck)
else:
boot.extend(tck)
if parsed_args.class_path:
for e in parsed_args.class_path.split(os.pathsep):
cp.append(_ClassPathEntry(os.path.abspath(e)))
if parsed_args.truffle_path:
for e in parsed_args.truffle_path.split(os.pathsep):
truffle_cp.append(_ClassPathEntry(os.path.abspath(e)))
for entry in boot + cp + truffle_cp:
entry.install(cache_folder)
ret_code = _execute_tck_impl(parsed_args.graalvm_home, mode, language, values, tests_filter, cp, truffle_cp, boot, vm_args, parsed_args.dbg_port)
sys.exit(ret_code)
except Abort as abort:
sys.stderr.write(abort.message)
sys.stderr.write('\n')
sys.exit(abort.retCode)
finally:
if os.path.isdir(cache_folder):
_rmdir_recursive(cache_folder)
if __name__ == '__main__':
_main(sys.argv)
| gpl-2.0 |
kuwa32/chainer | examples/mnist/train_mnist_model_parallel.py | 3 | 4385 | #!/usr/bin/env python
"""Chainer example: train a multi-layer perceptron on MNIST using two GPUs.
This is a toy example to write a model-parallel computation in Chainer.
Note that this is just an example; the network definition is not optimal
and performs poorly on MNIST dataset.
"""
import math
import numpy as np
import six
import chainer
from chainer import cuda
from chainer import functions as F
from chainer import optimizers
import data
batchsize = 100
n_epoch = 50
n_units = 2000
# Prepare dataset
print('load MNIST dataset')
mnist = data.load_mnist_data()
mnist['data'] = mnist['data'].astype(np.float32)
mnist['data'] /= 255
mnist['target'] = mnist['target'].astype(np.int32)
N = 60000
x_train, x_test = np.split(mnist['data'], [N])
y_train, y_test = np.split(mnist['target'], [N])
N_test = y_test.size
# Prepare the multi-layer perceptron model
# Note that the model splits into two GPUs at the first layer,
# and share their activations only at third and sixth layers.
cuda.init()
wscale = math.sqrt(2)
model = chainer.FunctionSet(
gpu0=chainer.FunctionSet(
l1=F.Linear(784, n_units // 2, wscale=wscale),
l2=F.Linear(n_units // 2, n_units // 2, wscale=wscale),
l3=F.Linear(n_units // 2, n_units, wscale=wscale),
l4=F.Linear(n_units, n_units // 2, wscale=wscale),
l5=F.Linear(n_units // 2, n_units // 2, wscale=wscale),
l6=F.Linear(n_units // 2, 10, wscale=wscale)
).to_gpu(0),
gpu1=chainer.FunctionSet(
l1=F.Linear(784, n_units // 2, wscale=wscale),
l2=F.Linear(n_units // 2, n_units // 2, wscale=wscale),
l3=F.Linear(n_units // 2, n_units, wscale=wscale),
l4=F.Linear(n_units, n_units // 2, wscale=wscale),
l5=F.Linear(n_units // 2, n_units // 2, wscale=wscale),
l6=F.Linear(n_units // 2, 10, wscale=wscale)
).to_gpu(1)
)
optimizer = optimizers.SGD(lr=0.1)
optimizer.setup(model)
# Neural net architecture
def forward(x_data, y_data, train=True):
x_0 = chainer.Variable(cuda.to_gpu(x_data, 0), volatile=not train)
x_1 = chainer.Variable(cuda.to_gpu(x_data, 1), volatile=not train)
t = chainer.Variable(cuda.to_gpu(y_data, 0), volatile=not train)
h1_0 = F.dropout(F.relu(model.gpu0.l1(x_0)), train=train)
h1_1 = F.dropout(F.relu(model.gpu1.l1(x_1)), train=train)
h2_0 = F.dropout(F.relu(model.gpu0.l2(h1_0)), train=train)
h2_1 = F.dropout(F.relu(model.gpu1.l2(h1_1)), train=train)
h3_0 = F.dropout(F.relu(model.gpu0.l3(h2_0)), train=train)
h3_1 = F.dropout(F.relu(model.gpu1.l3(h2_1)), train=train)
# Synchronize
h3_0 += F.copy(h3_1, 0)
h3_1 = F.copy(h3_0, 1)
h4_0 = F.dropout(F.relu(model.gpu0.l4(h3_0)), train=train)
h4_1 = F.dropout(F.relu(model.gpu1.l4(h3_1)), train=train)
h5_0 = F.dropout(F.relu(model.gpu0.l5(h4_0)), train=train)
h5_1 = F.dropout(F.relu(model.gpu1.l5(h4_1)), train=train)
h6_0 = F.relu(model.gpu0.l6(h5_0))
h6_1 = F.relu(model.gpu1.l6(h5_1))
# Synchronize
y = h6_0 + F.copy(h6_1, 0)
return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
# Learning loop
x_batch = np.ndarray((batchsize, 784), dtype=np.float32)
y_batch = np.ndarray((batchsize,), dtype=np.int32)
for epoch in six.moves.range(1, n_epoch + 1):
print('epoch', epoch)
# training
perm = np.random.permutation(N)
sum_accuracy = 0
sum_loss = 0
for i in six.moves.range(0, N, batchsize):
x_batch[:] = x_train[perm[i:i + batchsize]]
y_batch[:] = y_train[perm[i:i + batchsize]]
optimizer.zero_grads()
loss, acc = forward(x_batch, y_batch)
loss.backward()
optimizer.update()
sum_loss += float(cuda.to_cpu(loss.data)) * batchsize
sum_accuracy += float(cuda.to_cpu(acc.data)) * batchsize
print('train mean loss={}, accuracy={}'.format(
sum_loss / N, sum_accuracy / N))
# evaluation
sum_accuracy = 0
sum_loss = 0
for i in six.moves.range(0, N_test, batchsize):
loss, acc = forward(x_test[i:i + batchsize], y_test[i:i + batchsize],
train=False)
sum_loss += float(cuda.to_cpu(loss.data)) * batchsize
sum_accuracy += float(cuda.to_cpu(acc.data)) * batchsize
print('test mean loss={}, accuracy={}'.format(
sum_loss / N_test, sum_accuracy / N_test))
| mit |
atsuyim/MITMf | plugins/spoof.py | 18 | 4851 | # Copyright (c) 2014-2016 Marcello Salvati
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
from plugins.plugin import Plugin
class Spoof(Plugin):
name = "Spoof"
optname = "spoof"
desc = "Redirect/Modify traffic using ICMP, ARP, DHCP or DNS"
version = "0.6"
def initialize(self, options):
'''Called if plugin is enabled, passed the options namespace'''
self.options = options
self.protocol_instances = []
from core.utils import iptables, shutdown, set_ip_forwarding
#Makes scapy more verbose
debug = False
if options.arp:
if not options.gateway:
shutdown("[Spoof] --arp argument requires --gateway")
from core.poisoners.ARP import ARPpoisoner
arp = ARPpoisoner(options)
arp.debug = debug
self.tree_info.append('ARP spoofing enabled')
self.protocol_instances.append(arp)
elif options.dhcp:
from core.poisoners.DHCP import DHCPpoisoner
if options.targets:
shutdown("[Spoof] --targets argument invalid when DCHP spoofing")
dhcp = DHCPpoisoner(options)
dhcp.debug = debug
self.tree_info.append('DHCP spoofing enabled')
self.protocol_instances.append(dhcp)
elif options.icmp:
from core.poisoners.ICMP import ICMPpoisoner
if not options.gateway:
shutdown("[Spoof] --icmp argument requires --gateway")
if not options.targets:
shutdown("[Spoof] --icmp argument requires --targets")
icmp = ICMPpoisoner(options)
icmp.debug = debug
self.tree_info.append('ICMP spoofing enabled')
self.protocol_instances.append(icmp)
if options.dns:
self.tree_info.append('DNS spoofing enabled')
if iptables().dns is False and options.filter is None:
iptables().DNS(self.config['MITMf']['DNS']['port'])
if not options.arp and not options.icmp and not options.dhcp and not options.dns:
shutdown("[Spoof] Spoof plugin requires --arp, --icmp, --dhcp or --dns")
set_ip_forwarding(1)
if iptables().http is False and options.filter is None:
iptables().HTTP(options.listen_port)
for protocol in self.protocol_instances:
protocol.start()
def options(self, options):
group = options.add_mutually_exclusive_group(required=False)
group.add_argument('--arp', dest='arp', action='store_true', help='Redirect traffic using ARP spoofing')
group.add_argument('--icmp', dest='icmp', action='store_true', help='Redirect traffic using ICMP redirects')
group.add_argument('--dhcp', dest='dhcp', action='store_true', help='Redirect traffic using DHCP offers')
options.add_argument('--dns', dest='dns', action='store_true', help='Proxy/Modify DNS queries')
options.add_argument('--netmask', dest='netmask', type=str, default='255.255.255.0', help='The netmask of the network')
options.add_argument('--shellshock', type=str, metavar='PAYLOAD', dest='shellshock', help='Trigger the Shellshock vuln when spoofing DHCP, and execute specified command')
options.add_argument('--gateway', dest='gateway', help='Specify the gateway IP')
options.add_argument('--gatewaymac', dest='gatewaymac', help='Specify the gateway MAC [will auto resolve if ommited]')
options.add_argument('--targets', dest='targets', help='Specify host/s to poison [if ommited will default to subnet]')
options.add_argument('--ignore', dest='ignore', help='Specify host/s not to poison')
options.add_argument('--arpmode',type=str, dest='arpmode', default='rep', choices=["rep", "req"], help=' ARP Spoofing mode: replies (rep) or requests (req) [default: rep]')
def on_shutdown(self):
from core.utils import iptables, set_ip_forwarding
for protocol in self.protocol_instances:
if hasattr(protocol, 'stop'):
protocol.stop()
iptables().flush()
set_ip_forwarding(0)
| gpl-3.0 |
daxxi13/CouchPotatoServer | libs/cache/__init__.py | 99 | 8343 | """
copied from
werkzeug.contrib.cache
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from cache.posixemulation import rename
from itertools import izip
from time import time
import os
import re
import tempfile
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
try:
import cPickle as pickle
except ImportError:
import pickle
def _items(mappingorseq):
"""Wrapper for efficient iteration over mappings represented by dicts
or sequences::
>>> for k, v in _items((i, i*i) for i in xrange(5)):
... assert k*k == v
>>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
... assert k*k == v
"""
return mappingorseq.iteritems() if hasattr(mappingorseq, 'iteritems') \
else mappingorseq
class BaseCache(object):
"""Baseclass for the cache systems. All the cache systems implement this
API or a superset of it.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`set`.
"""
def __init__(self, default_timeout = 300):
self.default_timeout = default_timeout
def delete(self, key):
"""Deletes `key` from the cache. If it does not exist in the cache
nothing happens.
:param key: the key to delete.
"""
pass
def get_many(self, *keys):
"""Returns a list of values for the given keys.
For each key a item in the list is created. Example::
foo, bar = cache.get_many("foo", "bar")
If a key can't be looked up `None` is returned for that key
instead.
:param keys: The function accepts multiple keys as positional
arguments.
"""
return map(self.get, keys)
def get_dict(self, *keys):
"""Works like :meth:`get_many` but returns a dict::
d = cache.get_dict("foo", "bar")
foo = d["foo"]
bar = d["bar"]
:param keys: The function accepts multiple keys as positional
arguments.
"""
return dict(izip(keys, self.get_many(*keys)))
def set(self, key, value, timeout = None):
"""Adds a new key/value to the cache (overwrites value, if key already
exists in the cache).
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout).
"""
pass
def add(self, key, value, timeout = None):
"""Works like :meth:`set` but does not overwrite the values of already
existing keys.
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key or the default
timeout if not specified.
"""
pass
def set_many(self, mapping, timeout = None):
"""Sets multiple keys and values from a mapping.
:param mapping: a mapping with the keys/values to set.
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout).
"""
for key, value in _items(mapping):
self.set(key, value, timeout)
def delete_many(self, *keys):
"""Deletes multiple keys at once.
:param keys: The function accepts multiple keys as positional
arguments.
"""
for key in keys:
self.delete(key)
def clear(self):
"""Clears the cache. Keep in mind that not all caches support
completely clearing the cache.
"""
pass
def inc(self, key, delta = 1):
"""Increments the value of a key by `delta`. If the key does
not yet exist it is initialized with `delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to add.
"""
self.set(key, (self.get(key) or 0) + delta)
def dec(self, key, delta = 1):
"""Decrements the value of a key by `delta`. If the key does
not yet exist it is initialized with `-delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to subtract.
"""
self.set(key, (self.get(key) or 0) - delta)
class FileSystemCache(BaseCache):
"""A cache that stores the items on the file system. This cache depends
on being the only user of the `cache_dir`. Make absolutely sure that
nobody but this cache stores files there or otherwise the cache will
randomly delete files therein.
:param cache_dir: the directory where cache files are stored.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`.
:param mode: the file mode wanted for the cache files, default 0600
"""
#: used for temporary files by the FileSystemCache
_fs_transaction_suffix = '.__wz_cache'
def __init__(self, cache_dir, threshold = 500, default_timeout = 300, mode = 0600):
BaseCache.__init__(self, default_timeout)
self._path = cache_dir
self._threshold = threshold
self._mode = mode
if not os.path.exists(self._path):
os.makedirs(self._path)
def _list_dir(self):
"""return a list of (fully qualified) cache filenames
"""
return [os.path.join(self._path, fn) for fn in os.listdir(self._path)
if not fn.endswith(self._fs_transaction_suffix)]
def _prune(self):
entries = self._list_dir()
if len(entries) > self._threshold:
now = time()
for idx, fname in enumerate(entries):
remove = False
f = None
try:
try:
f = open(fname, 'rb')
expires = pickle.load(f)
remove = expires <= now or idx % 3 == 0
finally:
if f is not None:
f.close()
except Exception:
pass
if remove:
try:
os.remove(fname)
except (IOError, OSError):
pass
def clear(self):
for fname in self._list_dir():
try:
os.remove(fname)
except (IOError, OSError):
pass
def _get_filename(self, key):
hash = md5(key).hexdigest()
return os.path.join(self._path, hash)
def get(self, key):
filename = self._get_filename(key)
try:
f = open(filename, 'rb')
try:
if pickle.load(f) >= time():
return pickle.load(f)
finally:
f.close()
os.remove(filename)
except Exception:
return None
def add(self, key, value, timeout = None):
filename = self._get_filename(key)
if not os.path.exists(filename):
self.set(key, value, timeout)
def set(self, key, value, timeout = None):
if timeout is None:
timeout = self.default_timeout
filename = self._get_filename(key)
self._prune()
try:
fd, tmp = tempfile.mkstemp(suffix = self._fs_transaction_suffix,
dir = self._path)
f = os.fdopen(fd, 'wb')
try:
pickle.dump(int(time() + timeout), f, 1)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
finally:
f.close()
rename(tmp, filename)
os.chmod(filename, self._mode)
except (IOError, OSError):
pass
def delete(self, key):
try:
os.remove(self._get_filename(key))
except (IOError, OSError):
pass
| gpl-3.0 |
Saluev/cocos2d-gui | cocosgui/css/style.py | 1 | 5289 | from collections import defaultdict
from utility import to_words, from_words
from utility import expand_sided_value, collapse_sided_value
class StylesContainer(dict):
# e. g. 'border', 'padding', etc.
prefix = None
# names that are available within prefix,
# e. g. 'color' (full name 'border-color').
subnames = []
# default values for properties
defaults = {}
def __init__(self, something = None, **kwargs):
self.node = None
if isinstance(something, dict):
super(StylesContainer, self).__init__(something, **kwargs)
else:
super(StylesContainer, self).__init__(**kwargs)
if something is not None:
self.set_to_value(something)
def apply_to(self, node):
self.node = node
for subitem in self.values():
if isinstance(subitem, StylesContainer):
subitem.apply_to(node)
def __str__(self):
try:
as_value = self.get_as_value()
if as_value == self:
return repr(self)
return str(as_value)
except (NotImplementedError, ValueError):
return repr(self)
def __repr__(self):
return '%s(%s)' % (type(self).__name__, dict.__repr__(self))
def get_as_value(self):
"""Returns CSS representation of the whole style object.
>>> Border().get_as_value()
(0, 'none', 'transparent')
"""
raise NotImplementedError # abstract
def set_to_value(self, value):
"""Gets style object from representation.
>>> b = Border()
>>> b = 1, 'solid', 'black'
>>> b['border-style']
'solid'
"""
raise NotImplementedError # abstract
def on_change(self):
pass
def create_default(self, subname):
default = self.defaults.get(subname)
if hasattr(default, '__call__'):
result = default()
self.set_by_subname(subname, result)
return result
else:
return None
def get_by_subname(self, subname):
if subname not in self.subnames:
raise KeyError(
'%r not in %s' % (subname, self.prefix or 'style'))
subobject = super(StylesContainer, self).get(subname)
if subobject is None:
subobject = self.create_default(subname)
return subobject
def set_by_subname(self, subname, value):
if subname not in self.subnames:
raise KeyError(
'%r not in %s' % (subname, self.prefix or 'style'))
super(StylesContainer, self).__setitem__(subname, value)
def __getitem__(self, which):
words = to_words(which)
if self.prefix is None:
words = [None] + words
assert(words[0] == self.prefix)
if len(words) == 1:
return self.get_as_value()
subname = words[1]
subobject = self.get_by_subname(subname)
if isinstance(subobject, StylesContainer):
return subobject[words[1:]]
elif len(words) == 2:
return subobject
else:
raise KeyError(which)
def __setitem__(self, which, value):
words = to_words(which)
if self.prefix is None:
words = [None] + words
assert(words[0] == self.prefix)
if len(words) == 1:
self.set_to_value(value)
return
subname = words[1]
currobject = self.get_by_subname(subname)
if isinstance(currobject, StylesContainer):
currobject[words[1:]] = value
else:
self.set_by_subname(subname, value)
self.on_change()
def __getattr__(self, which):
try:
return super(StylesContainer, self).__getattr__(self, which)
except AttributeError:
return self.get_by_subname(which)
def update(self, other={}, **kwargs):
other = dict(other)
other.update(kwargs)
for key, value in other.items():
if isinstance(value, StylesContainer):
own = self.get_by_subname(key)
if own is None:
self.set_by_subname(key, value)
else:
own.update(value)
elif key in self.subnames:
own = self.get_by_subname(key)
if isinstance(own, StylesContainer):
own.set_to_value(value)
else:
self.set_by_subname(key, value)
else:
self.__setitem__(key, value)
class SidedStylesContainer(StylesContainer):
subnames = ['top', 'right', 'bottom', 'left']
# WARNING: order is important here. this order
# corresponds to CSS specifications.
class _AbstractIndent(SidedStylesContainer):
def get_as_value(self):
values = map(self.get_by_subname, self.subnames)
if None in values:
raise ValueError
return collapse_sided_value(values)
def set_to_value(self, value):
value = expand_sided_value(value)
for i, subname in enumerate(self.subnames):
self.set_by_subname(subname, value[i])
class Margin(_AbstractIndent):
prefix = 'margin'
class Padding(_AbstractIndent):
prefix = 'padding'
class Style(StylesContainer):
defaults = {
'margin' : Margin,
'padding': Padding,
}
subnames = [
'display', 'position',
'left', 'top', 'width', 'height',
'margin', 'padding',
]
def get_as_value(self):
return self
def set_to_value(self, value):
self.update(value)
styles = defaultdict(Style)
styles['*'] = Style({
'width': 'auto',
'height': 'auto',
'margin': Margin(0),
'padding': Padding(0),
'display': 'block',
'position': 'static',
'left': 'auto',
'top': 'auto',
})
| mit |
tereka114/chainer | tests/functions_tests/test_sigmoid_cross_entropy.py | 1 | 2567 | import math
import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer.testing import attr
from chainer.testing import condition
if cuda.available:
cuda.init()
class TestSigmoidCrossEntropy(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
self.t = numpy.random.randint(0, 2, (4, 3)).astype(numpy.int32)
def check_forward(self, x_data, t_data, use_cudnn=True):
x_val = chainer.Variable(x_data)
t_val = chainer.Variable(t_data)
loss = functions.sigmoid_cross_entropy(x_val, t_val, use_cudnn)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
loss_expect = 0
for i in six.moves.range(self.x.shape[0]):
for j in six.moves.range(self.x.shape[1]):
xd, td = self.x[i, j], self.t[i, j]
loss_expect -= xd * (td - (xd >= 0)) \
- math.log(1 + math.exp(-numpy.abs(xd)))
loss_expect /= self.t.shape[0]
self.assertAlmostEqual(loss_expect, loss_value, places=5)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), False)
def check_backward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.sigmoid_cross_entropy(x, t, use_cudnn)
loss.backward()
self.assertEqual(None, t.grad)
func = loss.creator
f = lambda: func.forward((x.data, t.data))
gx, = gradient_check.numerical_grad(f, (x.data,), (1,), eps=0.01)
gradient_check.assert_allclose(gx, x.grad)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.t)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), False)
| mit |
gedare/GEMS | gems-2.1.1/jgraph/examples.py | 6 | 1650 | #!/s/std/bin/python
# This is an example of how to use the functions from mfgraph to
# generate jgraph images. Run this script and look in examples.pdf
# for two graphs. If you want to generate an eps, you can only
# generate one graph at a time. To do this comment out the line below
# and un-comment the line below it.
graphs = ["graph1", "graph2"]
graphs = ["graph1"]
import sys, string, os, glob, re, mfgraph
def generate_line_example(jgraphs, title):
xlabel = "x label"
ylabel = "y label"
lines = [["line1", [10, 10], [20, 20], [30, 30]],
["line2", [10, 5], [20, 25], [30, 80]],
]
jgraphs.append(mfgraph.line_graph(lines,
title = title,
xlabel = xlabel,
ylabel = ylabel,
))
def generate_bar_example(jgraphs):
bars = [
["group1",
["bar1", 20, 10, 5],
["bar2", 10, 5, 2.5],
],
["group2",
["bar1", 80, 40, 10],
["bar2", [100, 90, 110], 50, 10], # note, this has an error bar
["bar3", 30, 25, 5],
]
]
jgraphs.append(mfgraph.stacked_bar_graph(bars,
bar_segment_labels = ["segment1", "segment2", "segment3"],
xsize = 5.0,
))
jgraph_input = []
for graph in graphs:
generate_line_example(jgraph_input, graph)
generate_bar_example(jgraph_input)
mfgraph.run_jgraph("newpage\n".join(jgraph_input), "examples")
| gpl-2.0 |
wujuguang/motor | test/asyncio_tests/test_asyncio_cursor.py | 1 | 22402 | # Copyright 2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test AsyncIOMotorCursor."""
import asyncio
import sys
import traceback
import unittest
import warnings
from functools import partial
from unittest import SkipTest
import bson
from pymongo import CursorType
from pymongo.errors import InvalidOperation, ExecutionTimeout
from pymongo.errors import OperationFailure
from motor import motor_asyncio
from test.utils import one, safe_get, get_primary_pool, TestListener
from test.asyncio_tests import (asyncio_test,
AsyncIOTestCase,
AsyncIOMockServerTestCase,
server_is_mongos,
get_command_line)
class TestAsyncIOCursor(AsyncIOMockServerTestCase):
def test_cursor(self):
cursor = self.collection.find()
self.assertTrue(isinstance(cursor, motor_asyncio.AsyncIOMotorCursor))
self.assertFalse(cursor.started, "Cursor shouldn't start immediately")
@asyncio_test
def test_count(self):
yield from self.make_test_data()
coll = self.collection
self.assertEqual(
100,
(yield from coll.count_documents({'_id': {'$gt': 99}})))
@asyncio_test
def test_fetch_next(self):
yield from self.make_test_data()
coll = self.collection
# 200 results, only including _id field, sorted by _id.
cursor = coll.find({}, {'_id': 1}).sort('_id').batch_size(75)
self.assertEqual(None, cursor.cursor_id)
self.assertEqual(None, cursor.next_object()) # Haven't fetched yet.
i = 0
while (yield from cursor.fetch_next):
self.assertEqual({'_id': i}, cursor.next_object())
i += 1
# With batch_size 75 and 200 results, cursor should be exhausted on
# the server by third fetch.
if i <= 150:
self.assertNotEqual(0, cursor.cursor_id)
else:
self.assertEqual(0, cursor.cursor_id)
self.assertEqual(False, (yield from cursor.fetch_next))
self.assertEqual(None, cursor.next_object())
self.assertEqual(0, cursor.cursor_id)
self.assertEqual(200, i)
@unittest.skipUnless(sys.version_info >= (3, 4), "Python 3.4 required")
@unittest.skipIf('PyPy' in sys.version, "PyPy")
@asyncio_test
def test_fetch_next_delete(self):
client, server = self.client_server(auto_ismaster=True)
cursor = client.test.coll.find()
self.fetch_next(cursor)
request = yield from self.run_thread(server.receives, "find", "coll")
request.replies({"cursor": {
"id": 123,
"ns": "db.coll",
"firstBatch": [{"_id": 1}]}})
# Decref the cursor and clear from the event loop.
del cursor
yield
request = yield from self.run_thread(
server.receives, "killCursors", "coll")
request.ok()
@asyncio_test
def test_fetch_next_without_results(self):
coll = self.collection
# Nothing matches this query.
cursor = coll.find({'foo': 'bar'})
self.assertEqual(None, cursor.next_object())
self.assertEqual(False, (yield from cursor.fetch_next))
self.assertEqual(None, cursor.next_object())
# Now cursor knows it's exhausted.
self.assertEqual(0, cursor.cursor_id)
@asyncio_test
def test_fetch_next_is_idempotent(self):
# Subsequent calls to fetch_next don't do anything
yield from self.make_test_data()
coll = self.collection
cursor = coll.find()
self.assertEqual(None, cursor.cursor_id)
yield from cursor.fetch_next
self.assertTrue(cursor.cursor_id)
self.assertEqual(101, cursor._buffer_size())
yield from cursor.fetch_next # Does nothing
self.assertEqual(101, cursor._buffer_size())
yield from cursor.close()
@asyncio_test
def test_fetch_next_exception(self):
coll = self.collection
cursor = coll.find()
cursor.delegate._Cursor__id = 1234 # Not valid on server.
with self.assertRaises(OperationFailure):
yield from cursor.fetch_next
# Avoid the cursor trying to close itself when it goes out of scope.
cursor.delegate._Cursor__id = None
@asyncio_test(timeout=30)
def test_each(self):
yield from self.make_test_data()
cursor = self.collection.find({}, {'_id': 1}).sort('_id')
future = asyncio.Future(loop=self.loop)
results = []
def callback(result, error):
if error:
raise error
if result is not None:
results.append(result)
else:
# Done iterating.
future.set_result(True)
cursor.each(callback)
yield from future
expected = [{'_id': i} for i in range(200)]
self.assertEqual(expected, results)
@asyncio_test
def test_to_list_argument_checking(self):
# We need more than 10 documents so the cursor stays alive.
yield from self.make_test_data()
coll = self.collection
cursor = coll.find()
with self.assertRaises(ValueError):
yield from cursor.to_list(-1)
with self.assertRaises(TypeError):
yield from cursor.to_list('foo')
@asyncio_test
def test_to_list_with_length(self):
yield from self.make_test_data()
coll = self.collection
cursor = coll.find().sort('_id')
def expected(start, stop):
return [{'_id': i} for i in range(start, stop)]
self.assertEqual(expected(0, 10), (yield from cursor.to_list(10)))
self.assertEqual(expected(10, 100), (yield from cursor.to_list(90)))
# Test particularly rigorously around the 101-doc mark, since this is
# where the first batch ends
self.assertEqual(expected(100, 101), (yield from cursor.to_list(1)))
self.assertEqual(expected(101, 102), (yield from cursor.to_list(1)))
self.assertEqual(expected(102, 103), (yield from cursor.to_list(1)))
self.assertEqual([], (yield from cursor.to_list(0)))
self.assertEqual(expected(103, 105), (yield from cursor.to_list(2)))
# Only 95 docs left, make sure length=100 doesn't error or hang
self.assertEqual(expected(105, 200), (yield from cursor.to_list(100)))
self.assertEqual(0, cursor.cursor_id)
# Nothing left.
self.assertEqual([], (yield from cursor.to_list(100)))
yield from cursor.close()
@asyncio_test
def test_to_list_exc_info(self):
yield from self.make_test_data()
coll = self.collection
cursor = coll.find()
yield from cursor.to_list(length=10)
yield from self.collection.drop()
try:
yield from cursor.to_list(length=None)
except OperationFailure:
_, _, tb = sys.exc_info()
# The call tree should include PyMongo code we ran on a thread.
formatted = '\n'.join(traceback.format_tb(tb))
self.assertTrue('_unpack_response' in formatted
or '_check_command_response' in formatted)
@asyncio_test
def test_to_list_with_length_of_none(self):
yield from self.make_test_data()
collection = self.collection
cursor = collection.find()
docs = yield from cursor.to_list(None) # Unlimited.
count = yield from collection.count_documents({})
self.assertEqual(count, len(docs))
@asyncio_test
def test_to_list_tailable(self):
coll = self.collection
cursor = coll.find(cursor_type=CursorType.TAILABLE)
# Can't call to_list on tailable cursor.
with self.assertRaises(InvalidOperation):
yield from cursor.to_list(10)
@asyncio_test
def test_cursor_explicit_close(self):
client, server = self.client_server(auto_ismaster=True)
collection = client.test.coll
cursor = collection.find()
future = self.fetch_next(cursor)
self.assertTrue(cursor.alive)
request = yield from self.run_thread(server.receives, "find", "coll")
request.replies({"cursor": {
"id": 123,
"ns": "db.coll",
"firstBatch": [{"_id": 1}]}})
self.assertTrue((yield from future))
self.assertEqual(123, cursor.cursor_id)
future = self.ensure_future(cursor.close())
# No reply to OP_KILLCURSORS.
request = yield from self.run_thread(
server.receives, "killCursors", "coll")
request.ok()
yield from future
# Cursor reports it's alive because it has buffered data, even though
# it's killed on the server.
self.assertTrue(cursor.alive)
self.assertEqual({'_id': 1}, cursor.next_object())
self.assertFalse((yield from cursor.fetch_next))
self.assertFalse(cursor.alive)
@asyncio_test
def test_each_cancel(self):
yield from self.make_test_data()
loop = self.loop
collection = self.collection
results = []
future = asyncio.Future(loop=self.loop)
def cancel(result, error):
if error:
future.set_exception(error)
else:
results.append(result)
loop.call_soon(canceled)
return False # Cancel iteration.
def canceled():
try:
self.assertFalse(cursor.delegate._Cursor__killed)
self.assertTrue(cursor.alive)
# Resume iteration
cursor.each(each)
except Exception as e:
future.set_exception(e)
def each(result, error):
if error:
future.set_exception(error)
elif result:
pass
results.append(result)
else:
# Complete
future.set_result(None)
cursor = collection.find()
cursor.each(cancel)
yield from future
self.assertEqual((yield from collection.count_documents({})),
len(results))
@asyncio_test
def test_rewind(self):
yield from self.collection.insert_many([{}, {}, {}])
cursor = self.collection.find().limit(2)
count = 0
while (yield from cursor.fetch_next):
cursor.next_object()
count += 1
self.assertEqual(2, count)
cursor.rewind()
count = 0
while (yield from cursor.fetch_next):
cursor.next_object()
count += 1
self.assertEqual(2, count)
cursor.rewind()
count = 0
while (yield from cursor.fetch_next):
cursor.next_object()
break
cursor.rewind()
while (yield from cursor.fetch_next):
cursor.next_object()
count += 1
self.assertEqual(2, count)
self.assertEqual(cursor, cursor.rewind())
@unittest.skipUnless(sys.version_info >= (3, 4), "Python 3.4 required")
@unittest.skipIf("PyPy" in sys.version, "PyPy")
@asyncio_test
def test_cursor_del(self):
client, server = self.client_server(auto_ismaster=True)
cursor = client.test.coll.find()
future = self.fetch_next(cursor)
request = yield from self.run_thread(server.receives, "find", "coll")
request.replies({"cursor": {
"id": 123,
"ns": "db.coll",
"firstBatch": [{"_id": 1}]}})
yield from future # Complete the first fetch.
# Dereference the cursor.
del cursor
# Let the event loop iterate once more to clear its references to
# callbacks, allowing the cursor to be freed.
yield from asyncio.sleep(0, loop=self.loop)
request = yield from self.run_thread(
server.receives, "killCursors", "coll")
request.ok()
@unittest.skipUnless(sys.version_info >= (3, 4), "Python 3.4 required")
@asyncio_test
def test_exhaust(self):
if (yield from server_is_mongos(self.cx)):
self.assertRaises(InvalidOperation,
self.db.test.find, cursor_type=CursorType.EXHAUST)
return
self.assertRaises(ValueError, self.db.test.find, cursor_type=5)
cur = self.db.test.find(cursor_type=CursorType.EXHAUST)
self.assertRaises(InvalidOperation, cur.limit, 5)
cur = self.db.test.find(limit=5)
self.assertRaises(InvalidOperation, cur.add_option, 64)
cur = self.db.test.find()
cur.add_option(64)
self.assertRaises(InvalidOperation, cur.limit, 5)
yield from self.db.drop_collection("test")
# Insert enough documents to require more than one batch.
yield from self.db.test.insert_many([{} for _ in range(150)])
client = self.asyncio_client(maxPoolSize=1)
# Ensure a pool.
yield from client.db.collection.find_one()
socks = get_primary_pool(client).sockets
# Make sure the socket is returned after exhaustion.
cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST)
has_next = yield from cur.fetch_next
self.assertTrue(has_next)
self.assertEqual(0, len(socks))
while (yield from cur.fetch_next):
cur.next_object()
self.assertEqual(1, len(socks))
# Same as previous but with to_list instead of next_object.
docs = yield from client[self.db.name].test.find(
cursor_type=CursorType.EXHAUST).to_list(
None)
self.assertEqual(1, len(socks))
self.assertEqual(
(yield from self.db.test.count_documents({})),
len(docs))
# If the Cursor instance is discarded before being
# completely iterated we have to close and
# discard the socket.
sock = one(socks)
cur = client[self.db.name].test.find(
cursor_type=CursorType.EXHAUST).batch_size(1)
has_next = yield from cur.fetch_next
self.assertTrue(has_next)
self.assertEqual(0, len(socks))
if 'PyPy' in sys.version:
# Don't wait for GC or use gc.collect(), it's unreliable.
yield from cur.close()
del cur
yield from asyncio.sleep(0.1, loop=self.loop)
# The exhaust cursor's socket was discarded, although another may
# already have been opened to send OP_KILLCURSORS.
self.assertNotIn(sock, socks)
self.assertTrue(sock.closed)
@asyncio_test
def test_close_with_docs_in_batch(self):
# MOTOR-67 Killed cursor with docs batched is "alive", don't kill again.
yield from self.make_test_data() # Ensure multiple batches.
cursor = self.collection.find()
yield from cursor.fetch_next
yield from cursor.close() # Killed but still "alive": has a batch.
self.cx.close()
with warnings.catch_warnings(record=True) as w:
del cursor # No-op, no error.
self.assertEqual(0, len(w))
@asyncio_test
def test_aggregate_batch_size(self):
listener = TestListener()
cx = self.asyncio_client(event_listeners=[listener])
c = cx.motor_test.collection
yield from c.delete_many({})
yield from c.insert_many({'_id': i} for i in range(3))
# Two ways of setting batchSize.
cursor0 = c.aggregate([{'$sort': {'_id': 1}}]).batch_size(2)
cursor1 = c.aggregate([{'$sort': {'_id': 1}}], batchSize=2)
for cursor in cursor0, cursor1:
lst = []
while (yield from cursor.fetch_next):
lst.append(cursor.next_object())
self.assertEqual(lst, [{'_id': 0}, {'_id': 1}, {'_id': 2}])
aggregate = listener.first_command_started('aggregate')
self.assertEqual(aggregate.command['cursor']['batchSize'], 2)
getMore = listener.first_command_started('getMore')
self.assertEqual(getMore.command['batchSize'], 2)
@asyncio_test
def test_raw_batches(self):
c = self.collection
yield from c.delete_many({})
yield from c.insert_many({'_id': i} for i in range(4))
find = partial(c.find_raw_batches, {})
agg = partial(c.aggregate_raw_batches, [{'$sort': {'_id': 1}}])
for method in find, agg:
cursor = method().batch_size(2)
yield from cursor.fetch_next
batch = cursor.next_object()
self.assertEqual([{'_id': 0}, {'_id': 1}], bson.decode_all(batch))
lst = yield from method().batch_size(2).to_list(length=1)
self.assertEqual([{'_id': 0}, {'_id': 1}], bson.decode_all(lst[0]))
class TestAsyncIOCursorMaxTimeMS(AsyncIOTestCase):
def setUp(self):
super(TestAsyncIOCursorMaxTimeMS, self).setUp()
self.loop.run_until_complete(self.maybe_skip())
def tearDown(self):
self.loop.run_until_complete(self.disable_timeout())
super(TestAsyncIOCursorMaxTimeMS, self).tearDown()
@asyncio.coroutine
def maybe_skip(self):
if (yield from server_is_mongos(self.cx)):
raise SkipTest("mongos has no maxTimeAlwaysTimeOut fail point")
cmdline = yield from get_command_line(self.cx)
if '1' != safe_get(cmdline, 'parsed.setParameter.enableTestCommands'):
if 'enableTestCommands=1' not in cmdline['argv']:
raise SkipTest("testing maxTimeMS requires failpoints")
@asyncio.coroutine
def enable_timeout(self):
yield from self.cx.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="alwaysOn")
@asyncio.coroutine
def disable_timeout(self):
yield from self.cx.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="off")
@asyncio_test
def test_max_time_ms_query(self):
# Cursor parses server timeout error in response to initial query.
yield from self.enable_timeout()
cursor = self.collection.find().max_time_ms(100000)
with self.assertRaises(ExecutionTimeout):
yield from cursor.fetch_next
cursor = self.collection.find().max_time_ms(100000)
with self.assertRaises(ExecutionTimeout):
yield from cursor.to_list(10)
with self.assertRaises(ExecutionTimeout):
yield from self.collection.find_one(max_time_ms=100000)
@asyncio_test(timeout=60)
def test_max_time_ms_getmore(self):
# Cursor handles server timeout during getmore, also.
yield from self.collection.insert_many({} for _ in range(200))
try:
# Send initial query.
cursor = self.collection.find().max_time_ms(100000)
yield from cursor.fetch_next
cursor.next_object()
# Test getmore timeout.
yield from self.enable_timeout()
with self.assertRaises(ExecutionTimeout):
while (yield from cursor.fetch_next):
cursor.next_object()
yield from cursor.close()
# Send another initial query.
yield from self.disable_timeout()
cursor = self.collection.find().max_time_ms(100000)
yield from cursor.fetch_next
cursor.next_object()
# Test getmore timeout.
yield from self.enable_timeout()
with self.assertRaises(ExecutionTimeout):
yield from cursor.to_list(None)
# Avoid 'IOLoop is closing' warning.
yield from cursor.close()
finally:
# Cleanup.
yield from self.disable_timeout()
yield from self.collection.delete_many({})
@asyncio_test
def test_max_time_ms_each_query(self):
# Cursor.each() handles server timeout during initial query.
yield from self.enable_timeout()
cursor = self.collection.find().max_time_ms(100000)
future = asyncio.Future(loop=self.loop)
def callback(result, error):
if error:
future.set_exception(error)
elif not result:
# Done.
future.set_result(None)
with self.assertRaises(ExecutionTimeout):
cursor.each(callback)
yield from future
@asyncio_test(timeout=30)
def test_max_time_ms_each_getmore(self):
# Cursor.each() handles server timeout during getmore.
yield from self.collection.insert_many({} for _ in range(200))
try:
# Send initial query.
cursor = self.collection.find().max_time_ms(100000)
yield from cursor.fetch_next
cursor.next_object()
future = asyncio.Future(loop=self.loop)
def callback(result, error):
if error:
future.set_exception(error)
elif not result:
# Done.
future.set_result(None)
yield from self.enable_timeout()
with self.assertRaises(ExecutionTimeout):
cursor.each(callback)
yield from future
yield from cursor.close()
finally:
# Cleanup.
yield from self.disable_timeout()
yield from self.collection.delete_many({})
def test_iter(self):
# Iteration should be prohibited.
with self.assertRaises(TypeError):
for _ in self.db.test.find():
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ssarangi/numba | numba/cuda/tests/cudapy/test_forall.py | 3 | 1068 | from __future__ import print_function, absolute_import
from numba import cuda
import numba.unittest_support as unittest
from numba.cuda.testing import skip_on_cudasim
import numpy
@skip_on_cudasim('forall API unsupported in the simulator')
class TestForAll(unittest.TestCase):
def test_forall_1(self):
@cuda.jit
def foo(x):
i = cuda.grid(1)
if i < x.size:
x[i] += 1
arr = numpy.arange(11)
orig = arr.copy()
foo.forall(arr.size)(arr)
self.assertTrue(numpy.all(arr == orig + 1))
def test_forall_2(self):
@cuda.jit("void(float32, float32[:], float32[:])")
def bar(a, x, y):
i = cuda.grid(1)
if i < x.size:
y[i] = a * x[i] + y[i]
x = numpy.arange(13, dtype=numpy.float32)
y = numpy.arange(13, dtype=numpy.float32)
oldy = y.copy()
a = 1.234
bar.forall(y.size)(a, x, y)
self.assertTrue(numpy.all(y == (a * x + oldy)))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
AnimeshSinha1309/WebsiteEdunet | WebsiteEdunet/env/Lib/site-packages/django/contrib/admindocs/middleware.py | 477 | 1198 | from django import http
from django.conf import settings
class XViewMiddleware(object):
"""
Adds an X-View header to internal HEAD requests -- used by the documentation system.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
If the request method is HEAD and either the IP is internal or the
user is a logged-in staff member, quickly return with an x-header
indicating the view function. This is used by the documentation module
to lookup the view function for an arbitrary page.
"""
assert hasattr(request, 'user'), (
"The XView middleware requires authentication middleware to be "
"installed. Edit your MIDDLEWARE_CLASSES setting to insert "
"'django.contrib.auth.middleware.AuthenticationMiddleware'.")
if request.method == 'HEAD' and (request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS or
(request.user.is_active and request.user.is_staff)):
response = http.HttpResponse()
response['X-View'] = "%s.%s" % (view_func.__module__, view_func.__name__)
return response
| mit |
sandeepgupta2k4/tensorflow | tensorflow/contrib/keras/python/keras/layers/lstm_test.py | 28 | 6709 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LSTM layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.contrib.keras.python.keras import testing_utils
from tensorflow.python.platform import test
class LSTMLayerTest(test.TestCase):
def test_return_sequences_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
def test_dynamic_behavior_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
layer = keras.layers.LSTM(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile('sgd', 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_implementation_mode_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
for mode in [0, 1, 2]:
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'implementation': mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_statefulness_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.LSTM
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
def test_regularization_LSTM(self):
embedding_dim = 4
layer_class = keras.layers.LSTM
with self.test_session():
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((2, 3, 2))))
self.assertEqual(len(layer.losses), 4)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=keras.constraints.max_norm(0.01),
recurrent_constraint=keras.constraints.max_norm(0.01),
bias_constraint='max_norm')
layer.build((None, None, embedding_dim))
self.assertEqual(len(layer.constraints), 3)
def test_with_masking_layer_LSTM(self):
layer_class = keras.layers.LSTM
with self.test_session():
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_LSTM(self):
layer_class = keras.layers.LSTM
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
if __name__ == '__main__':
test.main()
| apache-2.0 |
dhermes/gcloud-python | vision/google/cloud/vision_v1p3beta1/gapic/image_annotator_client_config.py | 4 | 1191 | config = {
"interfaces": {
"google.cloud.vision.v1p3beta1.ImageAnnotator": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 60000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 60000,
"total_timeout_millis": 600000,
}
},
"methods": {
"BatchAnnotateImages": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"AsyncBatchAnnotateFiles": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
},
}
}
}
| apache-2.0 |
datapythonista/pandas | pandas/tests/series/methods/test_unique.py | 4 | 1432 | import numpy as np
from pandas import (
Categorical,
Series,
)
import pandas._testing as tm
class TestUnique:
def test_unique_data_ownership(self):
# it works! GH#1807
Series(Series(["a", "c", "b"]).unique()).sort_values()
def test_unique(self):
# GH#714 also, dtype=float
ser = Series([1.2345] * 100)
ser[::2] = np.nan
result = ser.unique()
assert len(result) == 2
# explicit f4 dtype
ser = Series([1.2345] * 100, dtype="f4")
ser[::2] = np.nan
result = ser.unique()
assert len(result) == 2
def test_unique_nan_object_dtype(self):
# NAs in object arrays GH#714
ser = Series(["foo"] * 100, dtype="O")
ser[::2] = np.nan
result = ser.unique()
assert len(result) == 2
def test_unique_none(self):
# decision about None
ser = Series([1, 2, 3, None, None, None], dtype=object)
result = ser.unique()
expected = np.array([1, 2, 3, None], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_unique_categorical(self):
# GH#18051
cat = Categorical([])
ser = Series(cat)
result = ser.unique()
tm.assert_categorical_equal(result, cat)
cat = Categorical([np.nan])
ser = Series(cat)
result = ser.unique()
tm.assert_categorical_equal(result, cat)
| bsd-3-clause |
knappador/billing-example | app/billing/androidbilling.py | 1 | 13627 | from kivy.app import App
from kivy.event import EventDispatcher
from kivy.clock import Clock
from kivy.properties import DictProperty
from kivy.logger import Logger
import netcheck
import toast
from jnius import autoclass, PythonJavaClass, java_method, cast
from android import activity
from functools import partial
context = autoclass('org.renpy.android.PythonActivity').mActivity
IabHelper = autoclass('org.kivy.billing.IabHelper')
IabResults = autoclass('org.kivy.billing.IabResult')
Inventory = autoclass('org.kivy.billing.Inventory')
Purchase = autoclass('org.kivy.billing.Purchase')
''' There is a big difference between the twitter module. All twitter
callbacks return through one listener that implements an interface. There
are many fewer places where an object can call back from Java to an object that
was already CG'd in Python. Either sync the Python GC and Java GC or be sure
to follow the Twitter4J style all-in-one listener architecture when implementing
Java objects. This is my advice for writing PyJNIus integrations for now.
Since every callback is it's own object here and there is no error callback,
every callback has to be stored in _refs to keep it alive when it goes out of
the scope where it was created '''
# constants
TIMEOUT = 120.0 # seconds to either succeed or fail
#implement save if you purchase (and consume) things without
#using them. Alternatively implement the inventory without
#consuming items until the user uses them.
#SAVE_PATH = './billing.json'
DEBUG=True
# since our callbacks from Java don't keep their Python
# from getting GC'd, we have to keep refs
_refs = []
# we remove refs when they are called to allow gc
def _allow_gc(fn):
def checked(self, *args, **kwargs):
fn(self, *args, **kwargs)
_refs.remove(self)
return checked
def _protect_callback(new_callback):
'''increment counter and attach to new callback object'''
_refs.append(new_callback)
# Java callbacks that call back into the provided Python callbacks
class _OnIabSetupFinishedListener(PythonJavaClass):
__javainterfaces__ = ['org.kivy.billing.IabHelper$OnIabSetupFinishedListener']
__javacontext__ = 'app'
def __init__(self, callback):
self.callback = callback
super(_OnIabSetupFinishedListener, self).__init__()
@java_method('(Lorg/kivy/billing/IabResult;)V')
@_allow_gc
def onIabSetupFinished(self, result):
self.callback(result)
class _QueryInventoryFinishedListener(PythonJavaClass):
__javainterfaces__ = ['org.kivy.billing.IabHelper$QueryInventoryFinishedListener']
__javacontext__ = 'app'
def __init__(self, callback):
self.callback = callback
super(_QueryInventoryFinishedListener, self).__init__()
@java_method('(Lorg/kivy/billing/IabResult;Lorg/kivy/billing/Inventory;)V')
@_allow_gc
def onQueryInventoryFinished(self, result, inventory):
self.callback(result, inventory)
class _OnPurchaseFinishedListener(PythonJavaClass):
''' This one seems to blow up inside the IabHelper OnActivityResult'''
__javainterfaces__ = ['org.kivy.billing.IabHelper$OnIabPurchaseFinishedListener']
__javacontext__ = 'app'
def __init__(self, callback):
self.callback = callback
super(_OnPurchaseFinishedListener, self).__init__()
@java_method('(Lorg/kivy/billing/IabResult;Lorg/kivy/billing/Purchase;)V')
@_allow_gc
def onIabPurchaseFinished(self, result, purchase):
self.callback(result, purchase)
class _OnConsumeFinishedListener(PythonJavaClass):
__javainterfaces__ = ['org.kivy.billing.IabHelper$OnConsumeFinishedListener']
__javacontext__ = 'app'
def __init__(self, callback):
self.callback = callback
super(_OnConsumeFinishedListener, self).__init__()
@java_method('(Lorg/kivy/billing/Purchase;Lorg/kivy/billing/IabResult;)V')
@_allow_gc
def onConsumeFinished(self, purchase, result):
self.callback(purchase, result)
class AndroidBilling(EventDispatcher):
consumed = DictProperty()
def __init__(self,
app_public_key,
skus,
auto_check_inventory=10,
toasty=True,
**kwargs):
self.app_public_key = app_public_key
self.skus = skus
self.toasty = toasty
# This shouldn't collide, but I will pay you $2 if it does
# for the first occurrence ever. After that, you should fix
# the code to something more unique =)
self.r_code = abs(hash('org.kivy.billing'))
# interal state initialize
self.purchase_requested = None
self.syncing = False
self.setup_complete = False
self.error_msg = 'there was an error'
if auto_check_inventory >= 0:
Clock.schedule_once(self._setup, auto_check_inventory)
def purchase(self, sku):
# Really need to move these debug settings to a global
# settings file. Oh and they say that global settings files are bad.
# Let's get to the bottom of it.
if DEBUG:
self.debug_sku = sku
sku = 'android.test.purchased'
if sku not in self.skus:
self.skus.append(sku)
Logger.warning('IAB is running in DEBUG mode and won\'t buy anything!')
if self.purchase_requested is not None:
self._toast('purchase already in progress')
return False
elif self.syncing:
self.purchase_requested = sku
Clock.schedule_once(self._fail, TIMEOUT)
self._toast('will start purchase shortly')
return True
else:
Logger.info('Purchasing ' + sku)
if not self.setup_complete:
self._toast('will start purchase shortly')
else:
self._toast('purchase started')
self.purchase_requested = sku
Clock.schedule_once(self._fail, TIMEOUT)
self._process_purchase()
return True
def retry_prompt(self, callback):
''' Monkey patch here to implement a real prompt'''
callback(False)
def set_retry_prompt(self, fn):
''' Or use this handy public setter if you really like Java.'''
self.retry_prompt = fn
#################
# Private Methods
#################
# Bound in _setup_callback to activity.on_activity_result
def _on_activity_result(self, requestCode, responseCode, Intent):
if DEBUG:
Logger.info('Request Code: ' + str(requestCode))
Logger.info('Expected Code: ' + str(self.r_code))
if requestCode == self.r_code:
Logger.info('Passing result to IAB helper')
if self.helper.handleActivityResult(requestCode, responseCode, Intent):
Logger.info('Helper completed the request.')
self._get_inventory()
return True
def _setup(self, *args):
Clock.unschedule(self._setup)
if not self.syncing and not \
(hasattr(self, 'helper') and self.helper.mSetupDone) and \
netcheck.connection_available():
self.syncing = True
Logger.info('Attempting startup')
k = self.app_public_key
c = cast('android.app.Activity', context)
self.helper = helper = IabHelper(c, k)
# prints a lot of useful messages that might
# not make it back to python space
helper.enableDebugLogging(DEBUG)
s = _OnIabSetupFinishedListener(self._setup_callback)
_protect_callback(s)
self.helper.startSetup(s)
def _setup_callback(self, result):
if result.isSuccess() and self.helper.mSetupDone:
Logger.info('Setup complete. Scheduling inventory check')
self.setup_complete = True
a = App.get_running_app()
a.bind(on_stop=self._dispose)
activity.bind(on_activity_result=self._on_activity_result)
self._get_inventory()
else:
Logger.info('There was a problem with setup')
self.error_msg = 'could not connect to play store'
self._fail()
def _get_inventory(self, *args):
Logger.info('Getting Inventory')
q = _QueryInventoryFinishedListener(self._got_inventory_callback)
_protect_callback(q)
self.helper.queryInventoryAsync(q)
def _got_inventory_callback(self, result, inventory):
if result.isSuccess():
Logger.info('Got Inventory')
self.inventory = inventory
# Inventory has some map methods that might be slightly more
# straightforward but this is fast already
purchases = list()
for s in self.skus:
Logger.info('Checking for ' + s + ' in the inventory')
if inventory.hasPurchase(s):
purchases.append(inventory.getPurchase(s))
Logger.info(s + ' is ready for consumption')
self.purchases = purchases
if len(self.purchases):
self.syncing = True
else:
self.syncing = False
self.inventory_checked = True
self._process_inventory()
else:
self.error_msg = 'Could not check inventory'
self._fail()
def _process_purchase(self):
Logger.info('in purchase')
if not netcheck.connection_available():
Logger.info('no net avaiable')
netcheck.ask_connect(self._connection_callback)
elif not self.setup_complete:
Logger.info('setup not complete')
self._setup()
else:
Logger.info('doing the purchase')
Logger.info(str(self.purchase_requested))
if self.purchase_requested is not None:
sku = self.purchasing = self.purchase_requested
else:
self.purchasing = self.purchase_requested = None
Logger.info('returning for no good reason')
return
if sku not in self.skus:
raise AttributeError('The sku is not in the skus you initialized with')
Logger.info('Starting purchase workflow for ' + sku)
c = cast('android.app.Activity', context)
r = self.r_code
p = _OnPurchaseFinishedListener(self._purchase_finished)
_protect_callback(p)
self.helper.launchPurchaseFlow(c, sku, r, p)
def _purchase_finished(self, result, purchase):
Logger.info('Result was ' + str(result.isSuccess()) + ' for ' +
purchase.getSku())
if result.isSuccess():
self._consume(purchase)
def _process_inventory(self):
if len(self.purchases):
self._consume(self.purchases[0])
else:
# if we're done with inventory, we go back to purchasing
self._process_purchase()
def _consume(self, purchase):
Logger.info('Consuming ' + purchase.getSku())
c = _OnConsumeFinishedListener(self._consume_finished)
_protect_callback(c)
self.helper.consumeAsync(purchase, c)
def _consume_finished(self, purchase, result):
try:
s = str(purchase.getSku())
except:
s = 'unknown sku'
if result.isSuccess():
if DEBUG:
s = self.debug_sku
# Since we are faking the sku passed in for debug mode,
# there's no way to know if the consumption happened -really-
# for purchase.getSku() or for debug_sku. The information
# is gone. It's in the air. You can never capture it again.
self.purchase_requested = None
Clock.unschedule(self._fail)
self.consumed[s] = self.consumed.get(s, 0) + 1
Logger.info(s + ' was successfully purchased. Time to get rich!')
self.purchases.remove(purchase)
if s == self.purchase_requested:
self.purchase_requested = None
Clock.unschedule(self._fail)
self._process_inventory
else:
Logger.info('There was a problem consuming ' + s)
self._fail()
######################################
# Managing timeouts and retry workflow
######################################
def _fail(self, *args):
Clock.unschedule(self._fail)
# since the setup and everything in between can fail,
# we don't want to prompt the user for background stuff
if self.purchase_requested is not None:
self._toast(self.error_msg)
self._ask_retry()
def _retry_callback(self, retry):
if retry:
self._process_purchase()
else:
self._processing=False
self._purchase_requested = None
self._tries = 0
def _ask_retry(self):
self.retry_prompt(self._retry_callback)
def _connection_callback(self, connected):
Logger.info('in billing connection callback: ' + str(connected))
if connected:
self._process_purchase()
else:
self._fail()
def _dispose(self, *args):
''' Let all callbacks be GC'd and destroy helper'''
self.helper.dispose()
global _refs
_refs = []
def _toast(self, text, length_long=False):
if self.toasty:
toast.toast(text, length_long)
| mit |
huongttlan/statsmodels | statsmodels/examples/ex_nearest_corr.py | 34 | 3388 | # -*- coding: utf-8 -*-
"""Find near positive definite correlation and covariance matrices
Created on Sun Aug 19 15:25:07 2012
Author: Josef Perktold
TODO:
add examples for cov_nearest from script log
Notes
-----
We are looking at eigenvalues before and after the conversion to psd matrix.
As distance measure for how close the change in the matrix is, we consider
the sum of squared differences (Frobenious norm without taking the square root)
"""
from __future__ import print_function
import numpy as np
from statsmodels.stats.correlation_tools import (
corr_nearest, corr_clipped, cov_nearest)
examples = ['all']
if 'all' in examples:
# x0 is positive definite
x0 = np.array([[1, -0.2, -0.9], [-0.2, 1, -0.2], [-0.9, -0.2, 1]])
# x has negative eigenvalues, not definite
x = np.array([[1, -0.9, -0.9], [-0.9, 1, -0.9], [-0.9, -0.9, 1]])
#x = np.array([[1, 0.2, 0.2], [0.2, 1, 0.2], [0.2, 0.2, 1]])
n_fact = 2
print('evals original', np.linalg.eigvalsh(x))
y = corr_nearest(x, n_fact=100)
print('evals nearest', np.linalg.eigvalsh(y))
print(y)
y = corr_nearest(x, n_fact=100, threshold=1e-16)
print('evals nearest', np.linalg.eigvalsh(y))
print(y)
y = corr_clipped(x, threshold=1e-16)
print('evals clipped', np.linalg.eigvalsh(y))
print(y)
np.set_printoptions(precision=4)
print('\nMini Monte Carlo')
# we are simulating a uniformly distributed symmetric matrix
# and find close positive definite matrix
# original can be far away from positive definite,
# then original and converted matrices can be far apart in norm
# results are printed for visual inspection of different cases
k_vars = 5
diag_idx = np.arange(k_vars)
for ii in range(10):
print()
x = np.random.uniform(-1, 1, size=(k_vars, k_vars))
x = (x + x.T) * 0.5
x[diag_idx, diag_idx] = 1
#x_std = np.sqrt(np.diag(x))
#x = x / x_std / x_std[:,None]
print()
print(np.sort(np.linalg.eigvals(x)), 'original')
yn = corr_nearest(x, threshold=1e-12, n_fact=200)
print(np.sort(np.linalg.eigvals(yn)), ((yn - x)**2).sum(), 'nearest')
yc = corr_clipped(x, threshold=1e-12)
print(np.sort(np.linalg.eigvals(yc)), ((yc - x)**2).sum(), 'clipped')
import time
t0 = time.time()
for _ in range(100):
corr_nearest(x, threshold=1e-15, n_fact=100)
t1 = time.time()
for _ in range(1000):
corr_clipped(x, threshold=1e-15)
t2 = time.time()
print('\ntime (nearest, clipped):', t1 - t0, t2 - t1)
if 'all' in examples:
# example for test case against R
x2 = np.array([ 1, 0.477, 0.644, 0.478, 0.651, 0.826,
0.477, 1, 0.516, 0.233, 0.682, 0.75,
0.644, 0.516, 1, 0.599, 0.581, 0.742,
0.478, 0.233, 0.599, 1, 0.741, 0.8,
0.651, 0.682, 0.581, 0.741, 1, 0.798,
0.826, 0.75, 0.742, 0.8, 0.798, 1]).reshape(6,6)
y1 = corr_nearest(x2, threshold=1e-15, n_fact=200)
y2 = corr_clipped(x2, threshold=1e-15)
print('\nmatrix 2')
print(np.sort(np.linalg.eigvals(x2)), 'original')
print(np.sort(np.linalg.eigvals(y1)), ((y1 - x2)**2).sum(), 'nearest')
print(np.sort(np.linalg.eigvals(y1)), ((y2 - x2)**2).sum(), 'clipped')
| bsd-3-clause |
ptsefton/worddown | tools/commandline/WordDownOO.py | 1 | 9980 | #!/usr/bin/env python
import getopt, sys
import uno
import mimetypes
import subprocess
import re
from unohelper import Base, systemPathToFileUrl, absolutize
from os import getcwd
import os.path
import base64
import urllib
import tempfile
tempfile.tempdir = "/tmp"
import shutil
import os
import stat
from bs4 import BeautifulSoup
from com.sun.star.beans import PropertyValue
from com.sun.star.uno import Exception as UnoException
from com.sun.star.io import IOException, XOutputStream
import zipfile
from wordDownOpenOfficeUtils import Bookmarker
from wordDownOpenOfficeUtils import Styles
from wordDownOpenOfficeUtils import Namespaces
from lxml import etree
def convert(path, dest, wordDown, dataURIs, epub):
url = "uno:socket,host=localhost,port=2002;urp;StarOffice.ComponentContext"
(destDir,outFilename) = os.path.split(dest)
(filestem, ext) = os.path.splitext(outFilename)
if not os.path.exists(destDir):
os.makedirs(destDir)
tempDir = tempfile.mkdtemp()
os.chmod(tempDir, 0o2770) #Sets group permissions and "sticky bit"
ctxLocal = uno.getComponentContext()
smgrLocal = ctxLocal.ServiceManager
resolver = smgrLocal.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", ctxLocal)
ctx = resolver.resolve(url)
smgr = ctx.ServiceManager
desktop = smgr.createInstanceWithContext("com.sun.star.frame.Desktop", ctx )
cwd = systemPathToFileUrl( getcwd() )
inProps = PropertyValue( "Hidden" , 0 , True, 0 ),
#try:
#Open initial document
fileUrl = path #"file://" + os.path.abspath(path)
print "XXXXXXXXXX", fileUrl
doc = desktop.loadComponentFromURL(fileUrl , "_blank", 0, inProps )
if not doc:
raise UnoException( "Couldn't open stream for unknown reason", None )
#Write an ODT copy to temp and copy out later
tempOdtDest = os.path.join(tempDir, filestem + "_new.odt")
destUrl = tempOdtDest #systemPathToFileUrl(tempOdtDest)
#Save as ODT
filterName = "writer8"
extension = "odt"
outProps = (
PropertyValue( "FilterName" , 0, filterName , 0 ),
PropertyValue( "Overwrite" , 0, True , 0 ),
PropertyValue( "OutputStream", 0, OutputStream(), 0)
)
doc.storeToURL(destUrl, outProps)
doc.close(True)
#Pre-process the ODT file
odt = zipfile.ZipFile(tempOdtDest, "a")
RemoveExtraImages(odt)
bookmarker = Bookmarker(odt)
fileUrl = systemPathToFileUrl(tempOdtDest)
doc = desktop.loadComponentFromURL( fileUrl , "_blank", 0, inProps )
if not doc:
raise UnoException( "Couldn't open stream for unknown reason", None )
#Save as HTML
tempDest = os.path.join(tempDir, outFilename)
#else:
# tempDest = os.path.join(tempDir, filestem + ".html")
destUrl = tempDest #systemPathToFileUrl(tempDest)
filterName = "HTML (StarWriter)"
#filtername = "writer_web_HTML_help"
extension = "html"
outProps = (
PropertyValue( "FilterName" , 0, filterName , 0 ),
PropertyValue( "Overwrite" , 0, True , 0 ),
PropertyValue( "OutputStream", 0, OutputStream(), 0)
)
doc.storeToURL(destUrl, outProps)
src_files = os.listdir(tempDir)
for file_name in src_files:
full_file_name = os.path.join(tempDir, file_name)
if (os.path.isfile(full_file_name) and full_file_name <> tempOdtDest) and not file_name.startswith("~"):
shutil.copy(full_file_name, destDir)
if wordDown:
myPath, myFile = os.path.split(os.path.abspath(__file__))
command = ["phantomjs",os.path.join(myPath, "render.js"), dest, dest]
print command
subprocess.call(command, shell=False)
if epub:
epubDest = os.path.join(destDir, filestem + ".epub")
command = ["ebook-convert", dest, epubDest]
subprocess.call(command)
def getData(match):
imgName = urllib.unquote(match.group(2))
imgPath = os.path.join(destDir,imgName)
imgData = base64.b64encode(open(imgPath).read())
os.remove(imgPath)
#TODO - proper mime type
mime, encoding = mimetypes.guess_type(imgPath)
return "%sdata:%s;base64,%s%s" % (match.group(1), mime, imgData, match.group(3))
if dataURIs:
try:
html = open(dest, "r").read()
html = re.sub('(<IMG.*?SRC=")(.*?)(".*?>)',getData, html,flags=re.IGNORECASE)
open(dest, "w").write(html)#.close()
except:
print "Could not create Data URIS ", sys.exc_info()[0]
print "Saved: " + dest
## except IOException, e:
## sys.stderr.write( "Error during conversion: " + e.Message + "\n" )
## sys.stderr.write( "Error during conversion: " + str(IOException) + "\n" )
## retVal = 1
## except UnoException, e:
## print sys.exc_traceback.tb_lineno
## sys.stderr.write( "Error ("+repr(e.__class__)+") during conversion:" + e.Message + "\n" )
## retVal = 1
if doc:
doc.dispose()
class OutputStream( Base, XOutputStream ):
def __init__( self ):
self.closed = 0
def closeOutput(self):
self.closed = 1
def writeBytes( self, seq ):
sys.stdout.write( seq.value )
def flush( self ):
pass
def removeFrames(root):
""" Remove all frames containing drawings before first p"""
ns = Namespaces()
pTag = "{%s}p" % ns.get("text")
drawTag = "{%s}frame" % ns.get("draw")
bodyEls = "*/*/*" # % ns.get("office")
for subEl in root.xpath(bodyEls):
if subEl.tag == drawTag:
subEl.getparent().remove(subEl)
elif subEl.tag == pTag:
return
def RemoveExtraImages(odfZip):
"""Nasty hack to clean documents opened as docx and saved as odt
openoffice adds extra images.
TODO shift this to the javasscript part of the tool"""
#contentXml = etree.parse(odfZip.open("content.xml"))
#contentRoot = contentXml.getroot()
#removeFrames(contentRoot)
#odfZip.writestr("content.xml",etree.tostring(contentRoot))
#odfZip.close()
def usage():
sys.stderr.write( "usage: WordDownOO.py --help | "+
" [-c <connection-string> | --connection-string=<connection-string>\n"+
" [--pdf]\n"+
" [--noWordDown]\n"+
" [--dataURIs]\n" +
" [--force]\n" +
" inputFile [outputDir]\n"+
"\n" +
"Exports documents as HTML, and runs them through WordDown to clean them up\n" +
"Requires an OpenOffice.org instance to be running. The script and the\n"+
"running OpenOffice.org instance must be able to access the file with\n"+
"by the same system path. [ To have a listening OpenOffice.org instance, just run:\n"+
"openoffice \"-accept=socket,host=localhost,port=2002;urp;\" \n"
"\n"+
"-c <connection-string> | --connection-string=<connection-string>\n" +
" The connection-string part of a uno url to where the\n" +
" the script should connect to in order to do the conversion.\n" +
" The strings defaults to socket,host=localhost,port=2002\n" +
"--noWordDown \n" +
" Do not run WordDown javascript code\n" +
"--pdf \n" +
" Export PDF as well as HTML (TODO)\n" +
" --dataURIs \n "+
" Convert images to Data URIs embedded in the HTML" +
" --epub\n" +
" Make an EPUB ebook (using Calibre ebook-convert)"
)
def main():
retVal = 0
doc = None
stdout = False
try:
opts, args = getopt.getopt(sys.argv[1:], "hc:",
["help", "connection-string=" , "pdf", "noWordDown", "epub", "dataURIs"])
wordDown = True #default to nice clean HTML
dataURIs = False
deleteOutputDir = False
epub = False
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
# if o == "--pdf":
# exportPDF = True #TODO
if o == "--noWordDown":
wordDown = False
if o == "--dataURIs":
dataURIs = True
if o == "--deleteOutputDir":
deleteOutputDir = True
if o == "--epub":
epub = True
if not len(args) or len(args) > 2:
usage()
sys.exit()
path = args[0]
path = os.path.abspath(path)
dir, outFilename = os.path.split(path)
filestem, ext = os.path.splitext(outFilename)
if len(args) == 2:
destDir = args[1]
dest = os.path.join(destDir, filestem + ".html")
else:
destDir = os.path.join(dir,"_html",outFilename)
dest = os.path.join(destDir,"index.html")
#Todo deal with destdir
convert(path, dest, wordDown, dataURIs, epub)
except UnoException, e:
sys.stderr.write( "Error ("+repr(e.__class__)+") :" + e.Message + "\n" )
retVal = 1
except getopt.GetoptError,e:
sys.stderr.write( str(e) + "\n" )
usage()
retVal = 1
sys.exit(retVal)
def makeReadme(originalPath,title):
readmeString = """
<html><head>%(title)s;</head><body><a href="index.html">%(title)s</a></body></html>
"""
readme = BeautifulSoup(readmeString)
return readme.prettify()
if __name__ == "__main__":
main()
| gpl-3.0 |
seann1/portfolio5 | .meteor/dev_bundle/python/Lib/idlelib/Bindings.py | 25 | 2978 | """Define the menu contents, hotkeys, and event bindings.
There is additional configuration information in the EditorWindow class (and
subclasses): the menus are created there based on the menu_specs (class)
variable, and menus not created are silently skipped in the code here. This
makes it possible, for example, to define a Debug menu which is only present in
the PythonShell window, and a Format menu which is only present in the Editor
windows.
"""
from idlelib.configHandler import idleConf
# Warning: menudefs is altered in macosxSupport.overrideRootMenu()
# after it is determined that an OS X Aqua Tk is in use,
# which cannot be done until after Tk() is first called.
# Do not alter the 'file', 'options', or 'help' cascades here
# without altering overrideRootMenu() as well.
# TODO: Make this more robust
menudefs = [
# underscore prefixes character to underscore
('file', [
('_New File', '<<open-new-window>>'),
('_Open...', '<<open-window-from-file>>'),
('Open _Module...', '<<open-module>>'),
('Class _Browser', '<<open-class-browser>>'),
('_Path Browser', '<<open-path-browser>>'),
None,
('_Save', '<<save-window>>'),
('Save _As...', '<<save-window-as-file>>'),
('Save Cop_y As...', '<<save-copy-of-window-as-file>>'),
None,
('Prin_t Window', '<<print-window>>'),
None,
('_Close', '<<close-window>>'),
('E_xit', '<<close-all-windows>>'),
]),
('edit', [
('_Undo', '<<undo>>'),
('_Redo', '<<redo>>'),
None,
('Cu_t', '<<cut>>'),
('_Copy', '<<copy>>'),
('_Paste', '<<paste>>'),
('Select _All', '<<select-all>>'),
None,
('_Find...', '<<find>>'),
('Find A_gain', '<<find-again>>'),
('Find _Selection', '<<find-selection>>'),
('Find in Files...', '<<find-in-files>>'),
('R_eplace...', '<<replace>>'),
('Go to _Line', '<<goto-line>>'),
]),
('format', [
('_Indent Region', '<<indent-region>>'),
('_Dedent Region', '<<dedent-region>>'),
('Comment _Out Region', '<<comment-region>>'),
('U_ncomment Region', '<<uncomment-region>>'),
('Tabify Region', '<<tabify-region>>'),
('Untabify Region', '<<untabify-region>>'),
('Toggle Tabs', '<<toggle-tabs>>'),
('New Indent Width', '<<change-indentwidth>>'),
]),
('run', [
('Python Shell', '<<open-python-shell>>'),
]),
('shell', [
('_View Last Restart', '<<view-restart>>'),
('_Restart Shell', '<<restart-shell>>'),
]),
('debug', [
('_Go to File/Line', '<<goto-file-line>>'),
('!_Debugger', '<<toggle-debugger>>'),
('_Stack Viewer', '<<open-stack-viewer>>'),
('!_Auto-open Stack Viewer', '<<toggle-jit-stack-viewer>>'),
]),
('options', [
('Configure _IDLE', '<<open-config-dialog>>'),
('Configure _Extensions', '<<open-config-extensions-dialog>>'),
None,
]),
('help', [
('_About IDLE', '<<about-idle>>'),
None,
('_IDLE Help', '<<help>>'),
('Python _Docs', '<<python-docs>>'),
]),
]
default_keydefs = idleConf.GetCurrentKeySet()
| gpl-2.0 |
kayhayen/Nuitka | nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Platform/hpux.py | 2 | 1767 | """engine.SCons.Platform.hpux
Platform-specific initialization for HP-UX systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/hpux.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
from . import posix
def generate(env):
posix.generate(env)
#Based on HP-UX11i: ARG_MAX=2048000 - 3000 for environment expansion
env['MAXLINELENGTH'] = 2045000
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/compiler/pyassem.py | 2 | 26971 | """A flow graph representation for Python bytecode"""
import dis
import types
import sys
from compiler import misc
from compiler.consts \
import CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS
class FlowGraph:
def __init__(self):
self.current = self.entry = Block()
self.exit = Block("exit")
self.blocks = misc.Set()
self.blocks.add(self.entry)
self.blocks.add(self.exit)
def startBlock(self, block):
if self._debug:
if self.current:
print "end", repr(self.current)
print " next", self.current.next
print " ", self.current.get_children()
print repr(block)
self.current = block
def nextBlock(self, block=None):
# XXX think we need to specify when there is implicit transfer
# from one block to the next. might be better to represent this
# with explicit JUMP_ABSOLUTE instructions that are optimized
# out when they are unnecessary.
#
# I think this strategy works: each block has a child
# designated as "next" which is returned as the last of the
# children. because the nodes in a graph are emitted in
# reverse post order, the "next" block will always be emitted
# immediately after its parent.
# Worry: maintaining this invariant could be tricky
if block is None:
block = self.newBlock()
# Note: If the current block ends with an unconditional
# control transfer, then it is incorrect to add an implicit
# transfer to the block graph. The current code requires
# these edges to get the blocks emitted in the right order,
# however. :-( If a client needs to remove these edges, call
# pruneEdges().
self.current.addNext(block)
self.startBlock(block)
def newBlock(self):
b = Block()
self.blocks.add(b)
return b
def startExitBlock(self):
self.startBlock(self.exit)
_debug = 0
def _enable_debug(self):
self._debug = 1
def _disable_debug(self):
self._debug = 0
def emit(self, *inst):
if self._debug:
print "\t", inst
if inst[0] in ['RETURN_VALUE', 'YIELD_VALUE']:
self.current.addOutEdge(self.exit)
if len(inst) == 2 and isinstance(inst[1], Block):
self.current.addOutEdge(inst[1])
self.current.emit(inst)
def getBlocksInOrder(self):
"""Return the blocks in reverse postorder
i.e. each node appears before all of its successors
"""
# XXX make sure every node that doesn't have an explicit next
# is set so that next points to exit
for b in self.blocks.elements():
if b is self.exit:
continue
if not b.next:
b.addNext(self.exit)
order = dfs_postorder(self.entry, {})
order.reverse()
self.fixupOrder(order, self.exit)
# hack alert
if not self.exit in order:
order.append(self.exit)
return order
def fixupOrder(self, blocks, default_next):
"""Fixup bad order introduced by DFS."""
# XXX This is a total mess. There must be a better way to get
# the code blocks in the right order.
self.fixupOrderHonorNext(blocks, default_next)
self.fixupOrderForward(blocks, default_next)
def fixupOrderHonorNext(self, blocks, default_next):
"""Fix one problem with DFS.
The DFS uses child block, but doesn't know about the special
"next" block. As a result, the DFS can order blocks so that a
block isn't next to the right block for implicit control
transfers.
"""
index = {}
for i in range(len(blocks)):
index[blocks[i]] = i
for i in range(0, len(blocks) - 1):
b = blocks[i]
n = blocks[i + 1]
if not b.next or b.next[0] == default_next or b.next[0] == n:
continue
# The blocks are in the wrong order. Find the chain of
# blocks to insert where they belong.
cur = b
chain = []
elt = cur
while elt.next and elt.next[0] != default_next:
chain.append(elt.next[0])
elt = elt.next[0]
# Now remove the blocks in the chain from the current
# block list, so that they can be re-inserted.
l = []
for b in chain:
assert index[b] > i
l.append((index[b], b))
l.sort()
l.reverse()
for j, b in l:
del blocks[index[b]]
# Insert the chain in the proper location
blocks[i:i + 1] = [cur] + chain
# Finally, re-compute the block indexes
for i in range(len(blocks)):
index[blocks[i]] = i
def fixupOrderForward(self, blocks, default_next):
"""Make sure all JUMP_FORWARDs jump forward"""
index = {}
chains = []
cur = []
for b in blocks:
index[b] = len(chains)
cur.append(b)
if b.next and b.next[0] == default_next:
chains.append(cur)
cur = []
chains.append(cur)
while 1:
constraints = []
for i in range(len(chains)):
l = chains[i]
for b in l:
for c in b.get_children():
if index[c] < i:
forward_p = 0
for inst in b.insts:
if inst[0] == 'JUMP_FORWARD':
if inst[1] == c:
forward_p = 1
if not forward_p:
continue
constraints.append((index[c], i))
if not constraints:
break
# XXX just do one for now
# do swaps to get things in the right order
goes_before, a_chain = constraints[0]
assert a_chain > goes_before
c = chains[a_chain]
chains.remove(c)
chains.insert(goes_before, c)
del blocks[:]
for c in chains:
for b in c:
blocks.append(b)
def getBlocks(self):
return self.blocks.elements()
def getRoot(self):
"""Return nodes appropriate for use with dominator"""
return self.entry
def getContainedGraphs(self):
l = []
for b in self.getBlocks():
l.extend(b.getContainedGraphs())
return l
def dfs_postorder(b, seen):
"""Depth-first search of tree rooted at b, return in postorder"""
order = []
seen[b] = b
for c in b.get_children():
if c in seen:
continue
order = order + dfs_postorder(c, seen)
order.append(b)
return order
class Block:
_count = 0
def __init__(self, label=''):
self.insts = []
self.inEdges = misc.Set()
self.outEdges = misc.Set()
self.label = label
self.bid = Block._count
self.next = []
Block._count = Block._count + 1
def __repr__(self):
if self.label:
return "<block %s id=%d>" % (self.label, self.bid)
else:
return "<block id=%d>" % (self.bid)
def __str__(self):
insts = map(str, self.insts)
return "<block %s %d:\n%s>" % (self.label, self.bid,
'\n'.join(insts))
def emit(self, inst):
op = inst[0]
if op[:4] == 'JUMP':
self.outEdges.add(inst[1])
self.insts.append(inst)
def getInstructions(self):
return self.insts
def addInEdge(self, block):
self.inEdges.add(block)
def addOutEdge(self, block):
self.outEdges.add(block)
def addNext(self, block):
self.next.append(block)
assert len(self.next) == 1, map(str, self.next)
_uncond_transfer = ('RETURN_VALUE', 'RAISE_VARARGS', 'YIELD_VALUE',
'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'CONTINUE_LOOP')
def pruneNext(self):
"""Remove bogus edge for unconditional transfers
Each block has a next edge that accounts for implicit control
transfers, e.g. from a JUMP_IF_FALSE to the block that will be
executed if the test is true.
These edges must remain for the current assembler code to
work. If they are removed, the dfs_postorder gets things in
weird orders. However, they shouldn't be there for other
purposes, e.g. conversion to SSA form. This method will
remove the next edge when it follows an unconditional control
transfer.
"""
try:
op, arg = self.insts[-1]
except (IndexError, ValueError):
return
if op in self._uncond_transfer:
self.next = []
def get_children(self):
if self.next and self.next[0] in self.outEdges:
self.outEdges.remove(self.next[0])
return self.outEdges.elements() + self.next
def getContainedGraphs(self):
"""Return all graphs contained within this block.
For example, a MAKE_FUNCTION block will contain a reference to
the graph for the function body.
"""
contained = []
for inst in self.insts:
if len(inst) == 1:
continue
op = inst[1]
if hasattr(op, 'graph'):
contained.append(op.graph)
return contained
# flags for code objects
# the FlowGraph is transformed in place; it exists in one of these states
RAW = "RAW"
FLAT = "FLAT"
CONV = "CONV"
DONE = "DONE"
class PyFlowGraph(FlowGraph):
super_init = FlowGraph.__init__
def __init__(self, name, filename, args=(), optimized=0, klass=None):
self.super_init()
self.name = name
self.filename = filename
self.docstring = None
self.args = args # XXX
self.argcount = getArgCount(args)
self.klass = klass
if optimized:
self.flags = CO_OPTIMIZED | CO_NEWLOCALS
else:
self.flags = 0
self.consts = []
self.names = []
# Free variables found by the symbol table scan, including
# variables used only in nested scopes, are included here.
self.freevars = []
self.cellvars = []
# The closure list is used to track the order of cell
# variables and free variables in the resulting code object.
# The offsets used by LOAD_CLOSURE/LOAD_DEREF refer to both
# kinds of variables.
self.closure = []
self.varnames = list(args) or []
for i in range(len(self.varnames)):
var = self.varnames[i]
if isinstance(var, TupleArg):
self.varnames[i] = var.getName()
self.stage = RAW
def setDocstring(self, doc):
self.docstring = doc
def setFlag(self, flag):
self.flags = self.flags | flag
if flag == CO_VARARGS:
self.argcount = self.argcount - 1
def checkFlag(self, flag):
if self.flags & flag:
return 1
def setFreeVars(self, names):
self.freevars = list(names)
def setCellVars(self, names):
self.cellvars = names
def getCode(self):
"""Get a Python code object"""
assert self.stage == RAW
self.computeStackDepth()
self.flattenGraph()
assert self.stage == FLAT
self.convertArgs()
assert self.stage == CONV
self.makeByteCode()
assert self.stage == DONE
return self.newCodeObject()
def dump(self, io=None):
if io:
save = sys.stdout
sys.stdout = io
pc = 0
for t in self.insts:
opname = t[0]
if opname == "SET_LINENO":
print
if len(t) == 1:
print "\t", "%3d" % pc, opname
pc = pc + 1
else:
print "\t", "%3d" % pc, opname, t[1]
pc = pc + 3
if io:
sys.stdout = save
def computeStackDepth(self):
"""Compute the max stack depth.
Approach is to compute the stack effect of each basic block.
Then find the path through the code with the largest total
effect.
"""
depth = {}
exit = None
for b in self.getBlocks():
depth[b] = findDepth(b.getInstructions())
seen = {}
def max_depth(b, d):
if b in seen:
return d
seen[b] = 1
d = d + depth[b]
children = b.get_children()
if children:
return max([max_depth(c, d) for c in children])
else:
if not b.label == "exit":
return max_depth(self.exit, d)
else:
return d
self.stacksize = max_depth(self.entry, 0)
def flattenGraph(self):
"""Arrange the blocks in order and resolve jumps"""
assert self.stage == RAW
self.insts = insts = []
pc = 0
begin = {}
end = {}
for b in self.getBlocksInOrder():
begin[b] = pc
for inst in b.getInstructions():
insts.append(inst)
if len(inst) == 1:
pc = pc + 1
elif inst[0] != "SET_LINENO":
# arg takes 2 bytes
pc = pc + 3
end[b] = pc
pc = 0
for i in range(len(insts)):
inst = insts[i]
if len(inst) == 1:
pc = pc + 1
elif inst[0] != "SET_LINENO":
pc = pc + 3
opname = inst[0]
if self.hasjrel.has_elt(opname):
oparg = inst[1]
offset = begin[oparg] - pc
insts[i] = opname, offset
elif self.hasjabs.has_elt(opname):
insts[i] = opname, begin[inst[1]]
self.stage = FLAT
hasjrel = misc.Set()
for i in dis.hasjrel:
hasjrel.add(dis.opname[i])
hasjabs = misc.Set()
for i in dis.hasjabs:
hasjabs.add(dis.opname[i])
def convertArgs(self):
"""Convert arguments from symbolic to concrete form"""
assert self.stage == FLAT
self.consts.insert(0, self.docstring)
self.sort_cellvars()
for i in range(len(self.insts)):
t = self.insts[i]
if len(t) == 2:
opname, oparg = t
conv = self._converters.get(opname, None)
if conv:
self.insts[i] = opname, conv(self, oparg)
self.stage = CONV
def sort_cellvars(self):
"""Sort cellvars in the order of varnames and prune from freevars.
"""
cells = {}
for name in self.cellvars:
cells[name] = 1
self.cellvars = [name for name in self.varnames
if name in cells]
for name in self.cellvars:
del cells[name]
self.cellvars = self.cellvars + cells.keys()
self.closure = self.cellvars + self.freevars
def _lookupName(self, name, list):
"""Return index of name in list, appending if necessary
This routine uses a list instead of a dictionary, because a
dictionary can't store two different keys if the keys have the
same value but different types, e.g. 2 and 2L. The compiler
must treat these two separately, so it does an explicit type
comparison before comparing the values.
"""
t = type(name)
for i in range(len(list)):
if t == type(list[i]) and list[i] == name:
return i
end = len(list)
list.append(name)
return end
_converters = {}
def _convert_LOAD_CONST(self, arg):
if hasattr(arg, 'getCode'):
arg = arg.getCode()
return self._lookupName(arg, self.consts)
def _convert_LOAD_FAST(self, arg):
self._lookupName(arg, self.names)
return self._lookupName(arg, self.varnames)
_convert_STORE_FAST = _convert_LOAD_FAST
_convert_DELETE_FAST = _convert_LOAD_FAST
def _convert_LOAD_NAME(self, arg):
if self.klass is None:
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.names)
def _convert_NAME(self, arg):
if self.klass is None:
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.names)
_convert_STORE_NAME = _convert_NAME
_convert_DELETE_NAME = _convert_NAME
_convert_IMPORT_NAME = _convert_NAME
_convert_IMPORT_FROM = _convert_NAME
_convert_STORE_ATTR = _convert_NAME
_convert_LOAD_ATTR = _convert_NAME
_convert_DELETE_ATTR = _convert_NAME
_convert_LOAD_GLOBAL = _convert_NAME
_convert_STORE_GLOBAL = _convert_NAME
_convert_DELETE_GLOBAL = _convert_NAME
def _convert_DEREF(self, arg):
self._lookupName(arg, self.names)
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.closure)
_convert_LOAD_DEREF = _convert_DEREF
_convert_STORE_DEREF = _convert_DEREF
def _convert_LOAD_CLOSURE(self, arg):
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.closure)
_cmp = list(dis.cmp_op)
def _convert_COMPARE_OP(self, arg):
return self._cmp.index(arg)
# similarly for other opcodes...
for name, obj in locals().items():
if name[:9] == "_convert_":
opname = name[9:]
_converters[opname] = obj
del name, obj, opname
def makeByteCode(self):
assert self.stage == CONV
self.lnotab = lnotab = LineAddrTable()
for t in self.insts:
opname = t[0]
if len(t) == 1:
lnotab.addCode(self.opnum[opname])
else:
oparg = t[1]
if opname == "SET_LINENO":
lnotab.nextLine(oparg)
continue
hi, lo = twobyte(oparg)
try:
lnotab.addCode(self.opnum[opname], lo, hi)
except ValueError:
print opname, oparg
print self.opnum[opname], lo, hi
raise
self.stage = DONE
opnum = {}
for num in range(len(dis.opname)):
opnum[dis.opname[num]] = num
del num
def newCodeObject(self):
assert self.stage == DONE
if (self.flags & CO_NEWLOCALS) == 0:
nlocals = 0
else:
nlocals = len(self.varnames)
argcount = self.argcount
if self.flags & CO_VARKEYWORDS:
argcount = argcount - 1
return types.CodeType(argcount, nlocals, self.stacksize, self.flags,
self.lnotab.getCode(), self.getConsts(),
tuple(self.names), tuple(self.varnames),
self.filename, self.name, self.lnotab.firstline,
self.lnotab.getTable(), tuple(self.freevars),
tuple(self.cellvars))
def getConsts(self):
"""Return a tuple for the const slot of the code object
Must convert references to code (MAKE_FUNCTION) to code
objects recursively.
"""
l = []
for elt in self.consts:
if isinstance(elt, PyFlowGraph):
elt = elt.getCode()
l.append(elt)
return tuple(l)
def isJump(opname):
if opname[:4] == 'JUMP':
return 1
class TupleArg:
"""Helper for marking func defs with nested tuples in arglist"""
def __init__(self, count, names):
self.count = count
self.names = names
def __repr__(self):
return "TupleArg(%s, %s)" % (self.count, self.names)
def getName(self):
return ".%d" % self.count
def getArgCount(args):
argcount = len(args)
if args:
for arg in args:
if isinstance(arg, TupleArg):
numNames = len(misc.flatten(arg.names))
argcount = argcount - numNames
return argcount
def twobyte(val):
"""Convert an int argument into high and low bytes"""
assert isinstance(val, int)
return divmod(val, 256)
class LineAddrTable:
"""lnotab
This class builds the lnotab, which is documented in compile.c.
Here's a brief recap:
For each SET_LINENO instruction after the first one, two bytes are
added to lnotab. (In some cases, multiple two-byte entries are
added.) The first byte is the distance in bytes between the
instruction for the last SET_LINENO and the current SET_LINENO.
The second byte is offset in line numbers. If either offset is
greater than 255, multiple two-byte entries are added -- see
compile.c for the delicate details.
"""
def __init__(self):
self.code = []
self.codeOffset = 0
self.firstline = 0
self.lastline = 0
self.lastoff = 0
self.lnotab = []
def addCode(self, *args):
for arg in args:
self.code.append(chr(arg))
self.codeOffset = self.codeOffset + len(args)
def nextLine(self, lineno):
if self.firstline == 0:
self.firstline = lineno
self.lastline = lineno
else:
# compute deltas
addr = self.codeOffset - self.lastoff
line = lineno - self.lastline
# Python assumes that lineno always increases with
# increasing bytecode address (lnotab is unsigned char).
# Depending on when SET_LINENO instructions are emitted
# this is not always true. Consider the code:
# a = (1,
# b)
# In the bytecode stream, the assignment to "a" occurs
# after the loading of "b". This works with the C Python
# compiler because it only generates a SET_LINENO instruction
# for the assignment.
if line >= 0:
push = self.lnotab.append
while addr > 255:
push(255); push(0)
addr -= 255
while line > 255:
push(addr); push(255)
line -= 255
addr = 0
if addr > 0 or line > 0:
push(addr); push(line)
self.lastline = lineno
self.lastoff = self.codeOffset
def getCode(self):
return ''.join(self.code)
def getTable(self):
return ''.join(map(chr, self.lnotab))
class StackDepthTracker:
# XXX 1. need to keep track of stack depth on jumps
# XXX 2. at least partly as a result, this code is broken
def findDepth(self, insts, debug=0):
depth = 0
maxDepth = 0
for i in insts:
opname = i[0]
if debug:
print i,
delta = self.effect.get(opname, None)
if delta is not None:
depth = depth + delta
else:
# now check patterns
for pat, pat_delta in self.patterns:
if opname[:len(pat)] == pat:
delta = pat_delta
depth = depth + delta
break
# if we still haven't found a match
if delta is None:
meth = getattr(self, opname, None)
if meth is not None:
depth = depth + meth(i[1])
if depth > maxDepth:
maxDepth = depth
if debug:
print depth, maxDepth
return maxDepth
effect = {
'POP_TOP': -1,
'DUP_TOP': 1,
'LIST_APPEND': -2,
'SLICE+1': -1,
'SLICE+2': -1,
'SLICE+3': -2,
'STORE_SLICE+0': -1,
'STORE_SLICE+1': -2,
'STORE_SLICE+2': -2,
'STORE_SLICE+3': -3,
'DELETE_SLICE+0': -1,
'DELETE_SLICE+1': -2,
'DELETE_SLICE+2': -2,
'DELETE_SLICE+3': -3,
'STORE_SUBSCR': -3,
'DELETE_SUBSCR': -2,
# PRINT_EXPR?
'PRINT_ITEM': -1,
'RETURN_VALUE': -1,
'YIELD_VALUE': -1,
'EXEC_STMT': -3,
'BUILD_CLASS': -2,
'STORE_NAME': -1,
'STORE_ATTR': -2,
'DELETE_ATTR': -1,
'STORE_GLOBAL': -1,
'BUILD_MAP': 1,
'COMPARE_OP': -1,
'STORE_FAST': -1,
'IMPORT_STAR': -1,
'IMPORT_NAME': -1,
'IMPORT_FROM': 1,
'LOAD_ATTR': 0, # unlike other loads
# close enough...
'SETUP_EXCEPT': 3,
'SETUP_FINALLY': 3,
'FOR_ITER': 1,
'WITH_CLEANUP': -1,
}
# use pattern match
patterns = [
('BINARY_', -1),
('LOAD_', 1),
]
def UNPACK_SEQUENCE(self, count):
return count-1
def BUILD_TUPLE(self, count):
return -count+1
def BUILD_LIST(self, count):
return -count+1
def CALL_FUNCTION(self, argc):
hi, lo = divmod(argc, 256)
return -(lo + hi * 2)
def CALL_FUNCTION_VAR(self, argc):
return self.CALL_FUNCTION(argc)-1
def CALL_FUNCTION_KW(self, argc):
return self.CALL_FUNCTION(argc)-1
def CALL_FUNCTION_VAR_KW(self, argc):
return self.CALL_FUNCTION(argc)-2
def MAKE_FUNCTION(self, argc):
return -argc
def MAKE_CLOSURE(self, argc):
# XXX need to account for free variables too!
return -argc
def BUILD_SLICE(self, argc):
if argc == 2:
return -1
elif argc == 3:
return -2
def DUP_TOPX(self, argc):
return argc
findDepth = StackDepthTracker().findDepth
| mit |
ROGUE-JCTD/geonode | geonode/services/tests.py | 19 | 4233 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import json
import sys
import traceback
from django.test import TestCase
from django.core.urlresolvers import reverse
from .models import Service
class ServicesTests(TestCase):
"""Tests geonode.services app/module
"""
fixtures = ['initial_data.json', 'bobby.json', 'people_data.json']
def setUp(self):
self.user = 'admin'
self.passwd = 'admin'
def test_register_indexed_wms(self):
"""Test registering an indexed WMS
"""
self.client.login(username='admin', password='admin')
response = self.client.post(
reverse('register_service'),
{
'type': 'WMS',
'url': 'http://metaspatial.net/cgi-bin/ogc-wms.xml',
})
self.assertEqual(response.status_code, 200)
service_dict = json.loads(response.content)[0]
try:
service = Service.objects.get(id=service_dict['service_id'])
# Harvested some layers
self.assertTrue(service.layer_set.count() > 0)
self.assertEqual(service.method, "I")
self.assertEqual(service.type, "WMS")
self.assertEqual(service.ptype, 'gxp_wmscsource')
except Exception, e:
traceback.print_exc(file=sys.stdout)
self.fail("Service not created: %s" % str(e))
def test_register_arcrest(self):
"""Test registering an arcrest service
"""
self.client.login(username='admin', password='admin')
response = self.client.post(
reverse('register_service'),
{
'type': 'REST',
'url': 'http://maps1.arcgisonline.com/ArcGIS/rest/services/EPA_Facilities/MapServer',
})
self.assertEqual(response.status_code, 200)
service_dict = json.loads(response.content)[0]
try:
service = Service.objects.get(id=service_dict['service_id'])
# Harvested some layers
self.assertTrue(service.layer_set.count() > 0)
self.assertEqual(service.method, "I")
self.assertEqual(service.type, "REST")
self.assertEqual(service.ptype, 'gxp_arcrestsource')
except Exception, e:
self.fail("Service not created: %s" % str(e))
# Disabled the test below because it uses an external service and fails randomly.
# def test_register_csw(self):
# self.client.login(username='admin', password='admin')
# response = self.client.post(reverse('register_service'),
# {
# 'type':'CSW',
# 'url':'http://demo.pycsw.org/cite/csw',
#
# })
# self.assertEqual(response.status_code, 200)
# service_dict = json.loads(response.content)[0]
# try:
# service = Service.objects.get(id=service_dict['service_id'])
# except Exception, e:
# self.fail("Service not created: %s" % str(e))
# self.assertEqual(service.method, "H")
# self.assertEqual(service.type, "CSW")
# self.assertEqual(service.base_url, 'http://demo.pycsw.org/cite/csw')
# TODO: Use CSW or make mock CSW containing just a few small WMS & ESRI service records
# self.assertEquals(service.service_set.all().count(), 0) #No WMS/REST services
# self.assertEquals(service.layers.count(),0) # No Layers for this one
| gpl-3.0 |
bhamza/ntu-dsi-dcn | src/applications/bindings/callbacks_list.py | 331 | 1249 | callback_classes = [
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| gpl-2.0 |
egabancho/invenio-oauth2server | invenio_oauth2server/errors.py | 16 | 1200 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""OAuth2Server errors."""
class OAuth2ServerError(Exception):
"""Base class for errors in oauth2server module."""
class ScopeDoesNotExists(OAuth2ServerError):
"""Scope is not registered it scopes registry."""
def __init__(self, scope, *args, **kwargs):
"""Initialize exception by storing invalid scope."""
super(ScopeDoesNotExists, self).__init__(*args, **kwargs)
self.scope = scope
| gpl-2.0 |
cmdunkers/DeeperMind | PythonEnv/lib/python2.7/site-packages/numpy/lib/arraypad.py | 48 | 52230 | """
The arraypad module contains a group of functions to pad values onto the edges
of an n-dimensional array.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
__all__ = ['pad']
###############################################################################
# Private utility functions.
def _arange_ndarray(arr, shape, axis, reverse=False):
"""
Create an ndarray of `shape` with increments along specified `axis`
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
shape : tuple of ints
Shape of desired array. Should be equivalent to `arr.shape` except
`shape[axis]` which may have any positive value.
axis : int
Axis to increment along.
reverse : bool
If False, increment in a positive fashion from 1 to `shape[axis]`,
inclusive. If True, the bounds are the same but the order reversed.
Returns
-------
padarr : ndarray
Output array sized to pad `arr` along `axis`, with linear range from
1 to `shape[axis]` along specified `axis`.
Notes
-----
The range is deliberately 1-indexed for this specific use case. Think of
this algorithm as broadcasting `np.arange` to a single `axis` of an
arbitrarily shaped ndarray.
"""
initshape = tuple(1 if i != axis else shape[axis]
for (i, x) in enumerate(arr.shape))
if not reverse:
padarr = np.arange(1, shape[axis] + 1)
else:
padarr = np.arange(shape[axis], 0, -1)
padarr = padarr.reshape(initshape)
for i, dim in enumerate(shape):
if padarr.shape[i] != dim:
padarr = padarr.repeat(dim, axis=i)
return padarr
def _round_ifneeded(arr, dtype):
"""
Rounds arr inplace if destination dtype is integer.
Parameters
----------
arr : ndarray
Input array.
dtype : dtype
The dtype of the destination array.
"""
if np.issubdtype(dtype, np.integer):
arr.round(out=arr)
def _prepend_const(arr, pad_amt, val, axis=-1):
"""
Prepend constant `val` along `axis` of `arr`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
val : scalar
Constant value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` constant `val` prepended along `axis`.
"""
if pad_amt == 0:
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
if val == 0:
return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr),
axis=axis)
else:
return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype),
arr), axis=axis)
def _append_const(arr, pad_amt, val, axis=-1):
"""
Append constant `val` along `axis` of `arr`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
val : scalar
Constant value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` constant `val` appended along `axis`.
"""
if pad_amt == 0:
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
if val == 0:
return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)),
axis=axis)
else:
return np.concatenate(
(arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis)
def _prepend_edge(arr, pad_amt, axis=-1):
"""
Prepend `pad_amt` to `arr` along `axis` by extending edge values.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, extended by `pad_amt` edge values appended along `axis`.
"""
if pad_amt == 0:
return arr
edge_slice = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
edge_arr = arr[edge_slice].reshape(pad_singleton)
return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr),
axis=axis)
def _append_edge(arr, pad_amt, axis=-1):
"""
Append `pad_amt` to `arr` along `axis` by extending edge values.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, extended by `pad_amt` edge values prepended along
`axis`.
"""
if pad_amt == 0:
return arr
edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
edge_arr = arr[edge_slice].reshape(pad_singleton)
return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)),
axis=axis)
def _prepend_ramp(arr, pad_amt, end, axis=-1):
"""
Prepend linear ramp along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
end : scalar
Constal value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region ramps linearly from the edge value to `end`.
"""
if pad_amt == 0:
return arr
# Generate shape for final concatenated array
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
# Generate an n-dimensional array incrementing along `axis`
ramp_arr = _arange_ndarray(arr, padshape, axis,
reverse=True).astype(np.float64)
# Appropriate slicing to extract n-dimensional edge along `axis`
edge_slice = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract edge, reshape to original rank, and extend along `axis`
edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
ramp_arr = ramp_arr * slope
ramp_arr += edge_pad
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis)
def _append_ramp(arr, pad_amt, end, axis=-1):
"""
Append linear ramp along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
end : scalar
Constal value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region ramps linearly from the edge value to `end`.
"""
if pad_amt == 0:
return arr
# Generate shape for final concatenated array
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
# Generate an n-dimensional array incrementing along `axis`
ramp_arr = _arange_ndarray(arr, padshape, axis,
reverse=False).astype(np.float64)
# Slice a chunk from the edge to calculate stats on
edge_slice = tuple(slice(None) if i != axis else -1
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract edge, reshape to original rank, and extend along `axis`
edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
ramp_arr = ramp_arr * slope
ramp_arr += edge_pad
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis)
def _prepend_max(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` maximum values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate maximum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
prepended region is the maximum of the first `num` values along
`axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
max_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate max, reshape to add singleton dimension back
max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr),
axis=axis)
def _append_max(arr, pad_amt, num, axis=-1):
"""
Pad one `axis` of `arr` with the maximum of the last `num` elements.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate maximum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the maximum of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
max_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
max_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate max, reshape to add singleton dimension back
max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)),
axis=axis)
def _prepend_mean(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` mean values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate mean.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region is the mean of the first `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
mean_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate mean, reshape to add singleton dimension back
mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype),
arr), axis=axis)
def _append_mean(arr, pad_amt, num, axis=-1):
"""
Append `pad_amt` mean values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate mean.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the maximum of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
mean_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
mean_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate mean, reshape to add singleton dimension back
mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
return np.concatenate(
(arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
def _prepend_med(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` median values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate median.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region is the median of the first `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
med_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate median, reshape to add singleton dimension back
med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
return np.concatenate(
(med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis)
def _append_med(arr, pad_amt, num, axis=-1):
"""
Append `pad_amt` median values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate median.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the median of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
med_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
med_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate median, reshape to add singleton dimension back
med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
return np.concatenate(
(arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
def _prepend_min(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` minimum values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate minimum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region is the minimum of the first `num` values along
`axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
min_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate min, reshape to add singleton dimension back
min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr),
axis=axis)
def _append_min(arr, pad_amt, num, axis=-1):
"""
Append `pad_amt` median values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate minimum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the minimum of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
min_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
min_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate min, reshape to add singleton dimension back
min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)),
axis=axis)
def _pad_ref(arr, pad_amt, method, axis=-1):
"""
Pad `axis` of `arr` by reflection.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : tuple of ints, length 2
Padding to (prepend, append) along `axis`.
method : str
Controls method of reflection; options are 'even' or 'odd'.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
values appended along `axis`. Both regions are padded with reflected
values from the original array.
Notes
-----
This algorithm does not pad with repetition, i.e. the edges are not
repeated in the reflection. For that behavior, use `mode='symmetric'`.
The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
single function, lest the indexing tricks in non-integer multiples of the
original shape would violate repetition in the final iteration.
"""
# Implicit booleanness to test for zero (or None) in any scalar type
if pad_amt[0] == 0 and pad_amt[1] == 0:
return arr
##########################################################################
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1)
for (i, x) in enumerate(arr.shape))
ref_chunk1 = arr[ref_slice]
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
if pad_amt[0] == 1:
ref_chunk1 = ref_chunk1.reshape(pad_singleton)
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
edge_slice1 = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice1].reshape(pad_singleton)
ref_chunk1 = 2 * edge_chunk - ref_chunk1
del edge_chunk
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
start = arr.shape[axis] - pad_amt[1] - 1
end = arr.shape[axis] - 1
ref_slice = tuple(slice(None) if i != axis else slice(start, end)
for (i, x) in enumerate(arr.shape))
rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
for (i, x) in enumerate(arr.shape))
ref_chunk2 = arr[ref_slice][rev_idx]
if pad_amt[1] == 1:
ref_chunk2 = ref_chunk2.reshape(pad_singleton)
if 'odd' in method:
edge_slice2 = tuple(slice(None) if i != axis else -1
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice2].reshape(pad_singleton)
ref_chunk2 = 2 * edge_chunk - ref_chunk2
del edge_chunk
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis)
def _pad_sym(arr, pad_amt, method, axis=-1):
"""
Pad `axis` of `arr` by symmetry.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : tuple of ints, length 2
Padding to (prepend, append) along `axis`.
method : str
Controls method of symmetry; options are 'even' or 'odd'.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
values appended along `axis`. Both regions are padded with symmetric
values from the original array.
Notes
-----
This algorithm DOES pad with repetition, i.e. the edges are repeated.
For padding without repeated edges, use `mode='reflect'`.
The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
single function, lest the indexing tricks in non-integer multiples of the
original shape would violate repetition in the final iteration.
"""
# Implicit booleanness to test for zero (or None) in any scalar type
if pad_amt[0] == 0 and pad_amt[1] == 0:
return arr
##########################################################################
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0])
for (i, x) in enumerate(arr.shape))
rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
for (i, x) in enumerate(arr.shape))
sym_chunk1 = arr[sym_slice][rev_idx]
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
if pad_amt[0] == 1:
sym_chunk1 = sym_chunk1.reshape(pad_singleton)
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
edge_slice1 = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice1].reshape(pad_singleton)
sym_chunk1 = 2 * edge_chunk - sym_chunk1
del edge_chunk
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
start = arr.shape[axis] - pad_amt[1]
end = arr.shape[axis]
sym_slice = tuple(slice(None) if i != axis else slice(start, end)
for (i, x) in enumerate(arr.shape))
sym_chunk2 = arr[sym_slice][rev_idx]
if pad_amt[1] == 1:
sym_chunk2 = sym_chunk2.reshape(pad_singleton)
if 'odd' in method:
edge_slice2 = tuple(slice(None) if i != axis else -1
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice2].reshape(pad_singleton)
sym_chunk2 = 2 * edge_chunk - sym_chunk2
del edge_chunk
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis)
def _pad_wrap(arr, pad_amt, axis=-1):
"""
Pad `axis` of `arr` via wrapping.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : tuple of ints, length 2
Padding to (prepend, append) along `axis`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
values appended along `axis`. Both regions are padded wrapped values
from the opposite end of `axis`.
Notes
-----
This method of padding is also known as 'tile' or 'tiling'.
The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
single function, lest the indexing tricks in non-integer multiples of the
original shape would violate repetition in the final iteration.
"""
# Implicit booleanness to test for zero (or None) in any scalar type
if pad_amt[0] == 0 and pad_amt[1] == 0:
return arr
##########################################################################
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
start = arr.shape[axis] - pad_amt[0]
end = arr.shape[axis]
wrap_slice = tuple(slice(None) if i != axis else slice(start, end)
for (i, x) in enumerate(arr.shape))
wrap_chunk1 = arr[wrap_slice]
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
if pad_amt[0] == 1:
wrap_chunk1 = wrap_chunk1.reshape(pad_singleton)
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1])
for (i, x) in enumerate(arr.shape))
wrap_chunk2 = arr[wrap_slice]
if pad_amt[1] == 1:
wrap_chunk2 = wrap_chunk2.reshape(pad_singleton)
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)
def _normalize_shape(ndarray, shape, cast_to_int=True):
"""
Private function which does some checks and normalizes the possibly
much simpler representations of 'pad_width', 'stat_length',
'constant_values', 'end_values'.
Parameters
----------
narray : ndarray
Input ndarray
shape : {sequence, array_like, float, int}, optional
The width of padding (pad_width), the number of elements on the
edge of the narray used for statistics (stat_length), the constant
value(s) to use when filling padded regions (constant_values), or the
endpoint target(s) for linear ramps (end_values).
((before_1, after_1), ... (before_N, after_N)) unique number of
elements for each axis where `N` is rank of `narray`.
((before, after),) yields same before and after constants for each
axis.
(constant,) or val is a shortcut for before = after = constant for
all axes.
cast_to_int : bool, optional
Controls if values in ``shape`` will be rounded and cast to int
before being returned.
Returns
-------
normalized_shape : tuple of tuples
val => ((val, val), (val, val), ...)
[[val1, val2], [val3, val4], ...] => ((val1, val2), (val3, val4), ...)
((val1, val2), (val3, val4), ...) => no change
[[val1, val2], ] => ((val1, val2), (val1, val2), ...)
((val1, val2), ) => ((val1, val2), (val1, val2), ...)
[[val , ], ] => ((val, val), (val, val), ...)
((val , ), ) => ((val, val), (val, val), ...)
"""
ndims = ndarray.ndim
# Shortcut shape=None
if shape is None:
return ((None, None), ) * ndims
# Convert any input `info` to a NumPy array
arr = np.asarray(shape)
# Switch based on what input looks like
if arr.ndim <= 1:
if arr.shape == () or arr.shape == (1,):
# Single scalar input
# Create new array of ones, multiply by the scalar
arr = np.ones((ndims, 2), dtype=ndarray.dtype) * arr
elif arr.shape == (2,):
# Apply padding (before, after) each axis
# Create new axis 0, repeat along it for every axis
arr = arr[np.newaxis, :].repeat(ndims, axis=0)
else:
fmt = "Unable to create correctly shaped tuple from %s"
raise ValueError(fmt % (shape,))
elif arr.ndim == 2:
if arr.shape[1] == 1 and arr.shape[0] == ndims:
# Padded before and after by the same amount
arr = arr.repeat(2, axis=1)
elif arr.shape[0] == ndims:
# Input correctly formatted, pass it on as `arr`
arr = shape
else:
fmt = "Unable to create correctly shaped tuple from %s"
raise ValueError(fmt % (shape,))
else:
fmt = "Unable to create correctly shaped tuple from %s"
raise ValueError(fmt % (shape,))
# Cast if necessary
if cast_to_int is True:
arr = np.round(arr).astype(int)
# Convert list of lists to tuple of tuples
return tuple(tuple(axis) for axis in arr.tolist())
def _validate_lengths(narray, number_elements):
"""
Private function which does some checks and reformats pad_width and
stat_length using _normalize_shape.
Parameters
----------
narray : ndarray
Input ndarray
number_elements : {sequence, int}, optional
The width of padding (pad_width) or the number of elements on the edge
of the narray used for statistics (stat_length).
((before_1, after_1), ... (before_N, after_N)) unique number of
elements for each axis.
((before, after),) yields same before and after constants for each
axis.
(constant,) or int is a shortcut for before = after = constant for all
axes.
Returns
-------
_validate_lengths : tuple of tuples
int => ((int, int), (int, int), ...)
[[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...)
((int1, int2), (int3, int4), ...) => no change
[[int1, int2], ] => ((int1, int2), (int1, int2), ...)
((int1, int2), ) => ((int1, int2), (int1, int2), ...)
[[int , ], ] => ((int, int), (int, int), ...)
((int , ), ) => ((int, int), (int, int), ...)
"""
normshp = _normalize_shape(narray, number_elements)
for i in normshp:
chk = [1 if x is None else x for x in i]
chk = [1 if x >= 0 else -1 for x in chk]
if (chk[0] < 0) or (chk[1] < 0):
fmt = "%s cannot contain negative values."
raise ValueError(fmt % (number_elements,))
return normshp
###############################################################################
# Public functions
def pad(array, pad_width, mode=None, **kwargs):
"""
Pads an array.
Parameters
----------
array : array_like of rank N
Input array
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode : str or function
One of the following string values or a user supplied function.
'constant'
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
Pads with the linear ramp between end_value and the
array edge value.
'maximum'
Pads with the maximum value of all or part of the
vector along each axis.
'mean'
Pads with the mean value of all or part of the
vector along each axis.
'median'
Pads with the median value of all or part of the
vector along each axis.
'minimum'
Pads with the minimum value of all or part of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the
end values are used to pad the beginning.
<function>
Padding function, see Notes.
stat_length : sequence or int, optional
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
((before_1, after_1), ... (before_N, after_N)) unique statistic
lengths for each axis.
((before, after),) yields same before and after statistic lengths
for each axis.
(stat_length,) or int is a shortcut for before = after = statistic
length for all axes.
Default is ``None``, to use the entire axis.
constant_values : sequence or int, optional
Used in 'constant'. The values to set the padded values for each
axis.
((before_1, after_1), ... (before_N, after_N)) unique pad constants
for each axis.
((before, after),) yields same before and after constants for each
axis.
(constant,) or int is a shortcut for before = after = constant for
all axes.
Default is 0.
end_values : sequence or int, optional
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
((before_1, after_1), ... (before_N, after_N)) unique end values
for each axis.
((before, after),) yields same before and after end values for each
axis.
(constant,) or int is a shortcut for before = after = end value for
all axes.
Default is 0.
reflect_type : {'even', 'odd'}, optional
Used in 'reflect', and 'symmetric'. The 'even' style is the
default with an unaltered reflection around the edge value. For
the 'odd' style, the extented part of the array is created by
subtracting the reflected values from two times the edge value.
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
Notes
-----
.. versionadded:: 1.7.0
For an array with rank greater than 1, some of the padding of later
axes is calculated from padding of previous axes. This is easiest to
think about with a rank 2 array where the corners of the padded array
are calculated by using padded values from the first axis.
The padding function, if used, should return a rank 1 array equal in
length to the vector argument with padded values replaced. It has the
following signature::
padding_func(vector, iaxis_pad_width, iaxis, **kwargs)
where
vector : ndarray
A rank 1 array already padded with zeros. Padded values are
vector[:pad_tuple[0]] and vector[-pad_tuple[1]:].
iaxis_pad_width : tuple
A 2-tuple of ints, iaxis_pad_width[0] represents the number of
values padded at the beginning of vector where
iaxis_pad_width[1] represents the number of values padded at
the end of vector.
iaxis : int
The axis currently being calculated.
kwargs : misc
Any keyword arguments the function requires.
Examples
--------
>>> a = [1, 2, 3, 4, 5]
>>> np.lib.pad(a, (2,3), 'constant', constant_values=(4, 6))
array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6])
>>> np.lib.pad(a, (2, 3), 'edge')
array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5])
>>> np.lib.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
>>> np.lib.pad(a, (2,), 'maximum')
array([5, 5, 1, 2, 3, 4, 5, 5, 5])
>>> np.lib.pad(a, (2,), 'mean')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> np.lib.pad(a, (2,), 'median')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> a = [[1, 2], [3, 4]]
>>> np.lib.pad(a, ((3, 2), (2, 3)), 'minimum')
array([[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[3, 3, 3, 4, 3, 3, 3],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1]])
>>> a = [1, 2, 3, 4, 5]
>>> np.lib.pad(a, (2, 3), 'reflect')
array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
>>> np.lib.pad(a, (2, 3), 'reflect', reflect_type='odd')
array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
>>> np.lib.pad(a, (2, 3), 'symmetric')
array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
>>> np.lib.pad(a, (2, 3), 'symmetric', reflect_type='odd')
array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
>>> np.lib.pad(a, (2, 3), 'wrap')
array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
>>> def padwithtens(vector, pad_width, iaxis, kwargs):
... vector[:pad_width[0]] = 10
... vector[-pad_width[1]:] = 10
... return vector
>>> a = np.arange(6)
>>> a = a.reshape((2, 3))
>>> np.lib.pad(a, 2, padwithtens)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
"""
if not np.asarray(pad_width).dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
narray = np.array(array)
pad_width = _validate_lengths(narray, pad_width)
allowedkwargs = {
'constant': ['constant_values'],
'edge': [],
'linear_ramp': ['end_values'],
'maximum': ['stat_length'],
'mean': ['stat_length'],
'median': ['stat_length'],
'minimum': ['stat_length'],
'reflect': ['reflect_type'],
'symmetric': ['reflect_type'],
'wrap': [],
}
kwdefaults = {
'stat_length': None,
'constant_values': 0,
'end_values': 0,
'reflect_type': 'even',
}
if isinstance(mode, str):
# Make sure have allowed kwargs appropriate for mode
for key in kwargs:
if key not in allowedkwargs[mode]:
raise ValueError('%s keyword not in allowed keywords %s' %
(key, allowedkwargs[mode]))
# Set kwarg defaults
for kw in allowedkwargs[mode]:
kwargs.setdefault(kw, kwdefaults[kw])
# Need to only normalize particular keywords.
for i in kwargs:
if i == 'stat_length':
kwargs[i] = _validate_lengths(narray, kwargs[i])
if i in ['end_values', 'constant_values']:
kwargs[i] = _normalize_shape(narray, kwargs[i],
cast_to_int=False)
elif mode is None:
raise ValueError('Keyword "mode" must be a function or one of %s.' %
(list(allowedkwargs.keys()),))
else:
# Drop back to old, slower np.apply_along_axis mode for user-supplied
# vector function
function = mode
# Create a new padded array
rank = list(range(len(narray.shape)))
total_dim_increase = [np.sum(pad_width[i]) for i in rank]
offset_slices = [slice(pad_width[i][0],
pad_width[i][0] + narray.shape[i])
for i in rank]
new_shape = np.array(narray.shape) + total_dim_increase
newmat = np.zeros(new_shape, narray.dtype)
# Insert the original array into the padded array
newmat[offset_slices] = narray
# This is the core of pad ...
for iaxis in rank:
np.apply_along_axis(function,
iaxis,
newmat,
pad_width[iaxis],
iaxis,
kwargs)
return newmat
# If we get here, use new padding method
newmat = narray.copy()
# API preserved, but completely new algorithm which pads by building the
# entire block to pad before/after `arr` with in one step, for each axis.
if mode == 'constant':
for axis, ((pad_before, pad_after), (before_val, after_val)) \
in enumerate(zip(pad_width, kwargs['constant_values'])):
newmat = _prepend_const(newmat, pad_before, before_val, axis)
newmat = _append_const(newmat, pad_after, after_val, axis)
elif mode == 'edge':
for axis, (pad_before, pad_after) in enumerate(pad_width):
newmat = _prepend_edge(newmat, pad_before, axis)
newmat = _append_edge(newmat, pad_after, axis)
elif mode == 'linear_ramp':
for axis, ((pad_before, pad_after), (before_val, after_val)) \
in enumerate(zip(pad_width, kwargs['end_values'])):
newmat = _prepend_ramp(newmat, pad_before, before_val, axis)
newmat = _append_ramp(newmat, pad_after, after_val, axis)
elif mode == 'maximum':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_max(newmat, pad_before, chunk_before, axis)
newmat = _append_max(newmat, pad_after, chunk_after, axis)
elif mode == 'mean':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_mean(newmat, pad_before, chunk_before, axis)
newmat = _append_mean(newmat, pad_after, chunk_after, axis)
elif mode == 'median':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_med(newmat, pad_before, chunk_before, axis)
newmat = _append_med(newmat, pad_after, chunk_after, axis)
elif mode == 'minimum':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_min(newmat, pad_before, chunk_before, axis)
newmat = _append_min(newmat, pad_after, chunk_after, axis)
elif mode == 'reflect':
for axis, (pad_before, pad_after) in enumerate(pad_width):
# Recursive padding along any axis where `pad_amt` is too large
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
if ((pad_before > 0) or
(pad_after > 0)) and newmat.shape[axis] == 1:
# Extending singleton dimension for 'reflect' is legacy
# behavior; it really should raise an error.
newmat = _prepend_edge(newmat, pad_before, axis)
newmat = _append_edge(newmat, pad_after, axis)
continue
method = kwargs['reflect_type']
safe_pad = newmat.shape[axis] - 1
while ((pad_before > safe_pad) or (pad_after > safe_pad)):
pad_iter_b = min(safe_pad,
safe_pad * (pad_before // safe_pad))
pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
newmat = _pad_ref(newmat, (pad_iter_b,
pad_iter_a), method, axis)
pad_before -= pad_iter_b
pad_after -= pad_iter_a
safe_pad += pad_iter_b + pad_iter_a
newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis)
elif mode == 'symmetric':
for axis, (pad_before, pad_after) in enumerate(pad_width):
# Recursive padding along any axis where `pad_amt` is too large
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
method = kwargs['reflect_type']
safe_pad = newmat.shape[axis]
while ((pad_before > safe_pad) or
(pad_after > safe_pad)):
pad_iter_b = min(safe_pad,
safe_pad * (pad_before // safe_pad))
pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
newmat = _pad_sym(newmat, (pad_iter_b,
pad_iter_a), method, axis)
pad_before -= pad_iter_b
pad_after -= pad_iter_a
safe_pad += pad_iter_b + pad_iter_a
newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis)
elif mode == 'wrap':
for axis, (pad_before, pad_after) in enumerate(pad_width):
# Recursive padding along any axis where `pad_amt` is too large
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
safe_pad = newmat.shape[axis]
while ((pad_before > safe_pad) or
(pad_after > safe_pad)):
pad_iter_b = min(safe_pad,
safe_pad * (pad_before // safe_pad))
pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis)
pad_before -= pad_iter_b
pad_after -= pad_iter_a
safe_pad += pad_iter_b + pad_iter_a
newmat = _pad_wrap(newmat, (pad_before, pad_after), axis)
return newmat
| bsd-3-clause |
jaruba/chromium.src | tools/checkperms/checkperms.py | 53 | 13637 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure files have the right permissions.
Some developers have broken SCM configurations that flip the executable
permission on for no good reason. Unix developers who run ls --color will then
see .cc files in green and get confused.
- For file extensions that must be executable, add it to EXECUTABLE_EXTENSIONS.
- For file extensions that must not be executable, add it to
NOT_EXECUTABLE_EXTENSIONS.
- To ignore all the files inside a directory, add it to IGNORED_PATHS.
- For file base name with ambiguous state and that should not be checked for
shebang, add it to IGNORED_FILENAMES.
Any file not matching the above will be opened and looked if it has a shebang
or an ELF header. If this does not match the executable bit on the file, the
file will be flagged.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and all
file paths should be only lowercase.
"""
import json
import logging
import optparse
import os
import stat
import string
import subprocess
import sys
#### USER EDITABLE SECTION STARTS HERE ####
# Files with these extensions must have executable bit set.
#
# Case-sensitive.
EXECUTABLE_EXTENSIONS = (
'bat',
'dll',
'dylib',
'exe',
)
# These files must have executable bit set.
#
# Case-insensitive, lower-case only.
EXECUTABLE_PATHS = (
'chrome/test/data/app_shim/app_shim_32_bit.app/contents/'
'macos/app_mode_loader',
'chrome/test/data/extensions/uitest/plugins/plugin.plugin/contents/'
'macos/testnetscapeplugin',
'chrome/test/data/extensions/uitest/plugins_private/plugin.plugin/contents/'
'macos/testnetscapeplugin',
)
# These files must not have the executable bit set. This is mainly a performance
# optimization as these files are not checked for shebang. The list was
# partially generated from:
# git ls-files | grep "\\." | sed 's/.*\.//' | sort | uniq -c | sort -b -g
#
# Case-sensitive.
NON_EXECUTABLE_EXTENSIONS = (
'1',
'3ds',
'S',
'am',
'applescript',
'asm',
'c',
'cc',
'cfg',
'chromium',
'cpp',
'crx',
'cs',
'css',
'cur',
'def',
'der',
'expected',
'gif',
'grd',
'gyp',
'gypi',
'h',
'hh',
'htm',
'html',
'hyph',
'ico',
'idl',
'java',
'jpg',
'js',
'json',
'm',
'm4',
'mm',
'mms',
'mock-http-headers',
'nexe',
'nmf',
'onc',
'pat',
'patch',
'pdf',
'pem',
'plist',
'png',
'proto',
'rc',
'rfx',
'rgs',
'rules',
'spec',
'sql',
'srpc',
'svg',
'tcl',
'test',
'tga',
'txt',
'vcproj',
'vsprops',
'webm',
'word',
'xib',
'xml',
'xtb',
'zip',
)
# These files must not have executable bit set.
#
# Case-insensitive, lower-case only.
NON_EXECUTABLE_PATHS = (
'build/android/tests/symbolize/liba.so',
'build/android/tests/symbolize/libb.so',
'chrome/installer/mac/sign_app.sh.in',
'chrome/installer/mac/sign_versioned_dir.sh.in',
'chrome/test/data/extensions/uitest/plugins/plugin32.so',
'chrome/test/data/extensions/uitest/plugins/plugin64.so',
'chrome/test/data/extensions/uitest/plugins_private/plugin32.so',
'chrome/test/data/extensions/uitest/plugins_private/plugin64.so',
'components/test/data/component_updater/ihfokbkgjpifnbbojhneepfflplebdkc/'
'ihfokbkgjpifnbbojhneepfflplebdkc_1/a_changing_binary_file',
'components/test/data/component_updater/ihfokbkgjpifnbbojhneepfflplebdkc/'
'ihfokbkgjpifnbbojhneepfflplebdkc_2/a_changing_binary_file',
'courgette/testdata/elf-32-1',
'courgette/testdata/elf-32-2',
'courgette/testdata/elf-64',
)
# File names that are always whitelisted. (These are mostly autoconf spew.)
#
# Case-sensitive.
IGNORED_FILENAMES = (
'config.guess',
'config.sub',
'configure',
'depcomp',
'install-sh',
'missing',
'mkinstalldirs',
'naclsdk',
'scons',
)
# File paths starting with one of these will be ignored as well.
# Please consider fixing your file permissions, rather than adding to this list.
#
# Case-insensitive, lower-case only.
IGNORED_PATHS = (
'native_client_sdk/src/build_tools/sdk_tools/third_party/fancy_urllib/'
'__init__.py',
'out/',
# TODO(maruel): Fix these.
'third_party/android_testrunner/',
'third_party/bintrees/',
'third_party/closure_linter/',
'third_party/devscripts/licensecheck.pl.vanilla',
'third_party/hyphen/',
'third_party/jemalloc/',
'third_party/lcov-1.9/contrib/galaxy/conglomerate_functions.pl',
'third_party/lcov-1.9/contrib/galaxy/gen_makefile.sh',
'third_party/lcov/contrib/galaxy/conglomerate_functions.pl',
'third_party/lcov/contrib/galaxy/gen_makefile.sh',
'third_party/libevent/autogen.sh',
'third_party/libevent/test/test.sh',
'third_party/libxml/linux/xml2-config',
'third_party/libxml/src/ltmain.sh',
'third_party/mesa/',
'third_party/protobuf/',
'third_party/python_gflags/gflags.py',
'third_party/sqlite/',
'third_party/talloc/script/mksyms.sh',
'third_party/tcmalloc/',
'third_party/tlslite/setup.py',
)
#### USER EDITABLE SECTION ENDS HERE ####
assert set(EXECUTABLE_EXTENSIONS) & set(NON_EXECUTABLE_EXTENSIONS) == set()
assert set(EXECUTABLE_PATHS) & set(NON_EXECUTABLE_PATHS) == set()
VALID_CHARS = set(string.ascii_lowercase + string.digits + '/-_.')
for paths in (EXECUTABLE_PATHS, NON_EXECUTABLE_PATHS, IGNORED_PATHS):
assert all([set(path).issubset(VALID_CHARS) for path in paths])
def capture(cmd, cwd):
"""Returns the output of a command.
Ignores the error code or stderr.
"""
logging.debug('%s; cwd=%s' % (' '.join(cmd), cwd))
env = os.environ.copy()
env['LANGUAGE'] = 'en_US.UTF-8'
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env)
return p.communicate()[0]
def get_git_root(dir_path):
"""Returns the git checkout root or None."""
root = capture(['git', 'rev-parse', '--show-toplevel'], dir_path).strip()
if root:
return root
def is_ignored(rel_path):
"""Returns True if rel_path is in our whitelist of files to ignore."""
rel_path = rel_path.lower()
return (
os.path.basename(rel_path) in IGNORED_FILENAMES or
rel_path.lower().startswith(IGNORED_PATHS))
def must_be_executable(rel_path):
"""The file name represents a file type that must have the executable bit
set.
"""
return (os.path.splitext(rel_path)[1][1:] in EXECUTABLE_EXTENSIONS or
rel_path.lower() in EXECUTABLE_PATHS)
def must_not_be_executable(rel_path):
"""The file name represents a file type that must not have the executable
bit set.
"""
return (os.path.splitext(rel_path)[1][1:] in NON_EXECUTABLE_EXTENSIONS or
rel_path.lower() in NON_EXECUTABLE_PATHS)
def has_executable_bit(full_path):
"""Returns if any executable bit is set."""
permission = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
return bool(permission & os.stat(full_path).st_mode)
def has_shebang_or_is_elf(full_path):
"""Returns if the file starts with #!/ or is an ELF binary.
full_path is the absolute path to the file.
"""
with open(full_path, 'rb') as f:
data = f.read(4)
return (data[:3] == '#!/' or data == '#! /', data == '\x7fELF')
def check_file(root_path, rel_path):
"""Checks the permissions of the file whose path is root_path + rel_path and
returns an error if it is inconsistent. Returns None on success.
It is assumed that the file is not ignored by is_ignored().
If the file name is matched with must_be_executable() or
must_not_be_executable(), only its executable bit is checked.
Otherwise, the first few bytes of the file are read to verify if it has a
shebang or ELF header and compares this with the executable bit on the file.
"""
full_path = os.path.join(root_path, rel_path)
def result_dict(error):
return {
'error': error,
'full_path': full_path,
'rel_path': rel_path,
}
try:
bit = has_executable_bit(full_path)
except OSError:
# It's faster to catch exception than call os.path.islink(). Chromium
# tree happens to have invalid symlinks under
# third_party/openssl/openssl/test/.
return None
if must_be_executable(rel_path):
if not bit:
return result_dict('Must have executable bit set')
return
if must_not_be_executable(rel_path):
if bit:
return result_dict('Must not have executable bit set')
return
# For the others, it depends on the file header.
(shebang, elf) = has_shebang_or_is_elf(full_path)
if bit != (shebang or elf):
if bit:
return result_dict('Has executable bit but not shebang or ELF header')
if shebang:
return result_dict('Has shebang but not executable bit')
return result_dict('Has ELF header but not executable bit')
def check_files(root, files):
gen = (check_file(root, f) for f in files if not is_ignored(f))
return filter(None, gen)
class ApiBase(object):
def __init__(self, root_dir, bare_output):
self.root_dir = root_dir
self.bare_output = bare_output
self.count = 0
self.count_read_header = 0
def check_file(self, rel_path):
logging.debug('check_file(%s)' % rel_path)
self.count += 1
if (not must_be_executable(rel_path) and
not must_not_be_executable(rel_path)):
self.count_read_header += 1
return check_file(self.root_dir, rel_path)
def check_dir(self, rel_path):
return self.check(rel_path)
def check(self, start_dir):
"""Check the files in start_dir, recursively check its subdirectories."""
errors = []
items = self.list_dir(start_dir)
logging.info('check(%s) -> %d' % (start_dir, len(items)))
for item in items:
full_path = os.path.join(self.root_dir, start_dir, item)
rel_path = full_path[len(self.root_dir) + 1:]
if is_ignored(rel_path):
continue
if os.path.isdir(full_path):
# Depth first.
errors.extend(self.check_dir(rel_path))
else:
error = self.check_file(rel_path)
if error:
errors.append(error)
return errors
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
return sorted(
x for x in os.listdir(os.path.join(self.root_dir, start_dir))
if not x.startswith('.')
)
class ApiAllFilesAtOnceBase(ApiBase):
_files = None
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
if self._files is None:
self._files = sorted(self._get_all_files())
if not self.bare_output:
print 'Found %s files' % len(self._files)
start_dir = start_dir[len(self.root_dir) + 1:]
return [
x[len(start_dir):] for x in self._files if x.startswith(start_dir)
]
def _get_all_files(self):
"""Lists all the files and directory inside self._root_dir."""
raise NotImplementedError()
class ApiGit(ApiAllFilesAtOnceBase):
def _get_all_files(self):
return capture(['git', 'ls-files'], cwd=self.root_dir).splitlines()
def get_scm(dir_path, bare):
"""Returns a properly configured ApiBase instance."""
cwd = os.getcwd()
root = get_git_root(dir_path or cwd)
if root:
if not bare:
print('Found git repository at %s' % root)
return ApiGit(dir_path or root, bare)
# Returns a non-scm aware checker.
if not bare:
print('Failed to determine the SCM for %s' % dir_path)
return ApiBase(dir_path or cwd, bare)
def main():
usage = """Usage: python %prog [--root <root>] [tocheck]
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python %prog
python %prog --root /path/to/source chrome"""
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'--root',
help='Specifies the repository root. This defaults '
'to the checkout repository root')
parser.add_option(
'-v', '--verbose', action='count', default=0, help='Print debug logging')
parser.add_option(
'--bare',
action='store_true',
default=False,
help='Prints the bare filename triggering the checks')
parser.add_option(
'--file', action='append', dest='files',
help='Specifics a list of files to check the permissions of. Only these '
'files will be checked')
parser.add_option('--json', help='Path to JSON output file')
options, args = parser.parse_args()
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(len(levels) - 1, options.verbose)])
if len(args) > 1:
parser.error('Too many arguments used')
if options.root:
options.root = os.path.abspath(options.root)
if options.files:
# --file implies --bare (for PRESUBMIT.py).
options.bare = True
errors = check_files(options.root, options.files)
else:
api = get_scm(options.root, options.bare)
start_dir = args[0] if args else api.root_dir
errors = api.check(start_dir)
if not options.bare:
print('Processed %s files, %d files where tested for shebang/ELF '
'header' % (api.count, api.count_read_header))
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
if options.bare:
print '\n'.join(e['full_path'] for e in errors)
else:
print '\nFAILED\n'
print '\n'.join('%s: %s' % (e['full_path'], e['error']) for e in errors)
return 1
if not options.bare:
print '\nSUCCESS\n'
return 0
if '__main__' == __name__:
sys.exit(main())
| bsd-3-clause |
sertac/django | tests/template_tests/syntax_tests/test_if_equal.py | 368 | 9892 | from django.test import SimpleTestCase
from ..utils import setup
class IfEqualTagTests(SimpleTestCase):
@setup({'ifequal01': '{% ifequal a b %}yes{% endifequal %}'})
def test_ifequal01(self):
output = self.engine.render_to_string('ifequal01', {'a': 1, 'b': 2})
self.assertEqual(output, '')
@setup({'ifequal02': '{% ifequal a b %}yes{% endifequal %}'})
def test_ifequal02(self):
output = self.engine.render_to_string('ifequal02', {'a': 1, 'b': 1})
self.assertEqual(output, 'yes')
@setup({'ifequal03': '{% ifequal a b %}yes{% else %}no{% endifequal %}'})
def test_ifequal03(self):
output = self.engine.render_to_string('ifequal03', {'a': 1, 'b': 2})
self.assertEqual(output, 'no')
@setup({'ifequal04': '{% ifequal a b %}yes{% else %}no{% endifequal %}'})
def test_ifequal04(self):
output = self.engine.render_to_string('ifequal04', {'a': 1, 'b': 1})
self.assertEqual(output, 'yes')
@setup({'ifequal05': '{% ifequal a \'test\' %}yes{% else %}no{% endifequal %}'})
def test_ifequal05(self):
output = self.engine.render_to_string('ifequal05', {'a': 'test'})
self.assertEqual(output, 'yes')
@setup({'ifequal06': '{% ifequal a \'test\' %}yes{% else %}no{% endifequal %}'})
def test_ifequal06(self):
output = self.engine.render_to_string('ifequal06', {'a': 'no'})
self.assertEqual(output, 'no')
@setup({'ifequal07': '{% ifequal a "test" %}yes{% else %}no{% endifequal %}'})
def test_ifequal07(self):
output = self.engine.render_to_string('ifequal07', {'a': 'test'})
self.assertEqual(output, 'yes')
@setup({'ifequal08': '{% ifequal a "test" %}yes{% else %}no{% endifequal %}'})
def test_ifequal08(self):
output = self.engine.render_to_string('ifequal08', {'a': 'no'})
self.assertEqual(output, 'no')
@setup({'ifequal09': '{% ifequal a "test" %}yes{% else %}no{% endifequal %}'})
def test_ifequal09(self):
output = self.engine.render_to_string('ifequal09')
self.assertEqual(output, 'no')
@setup({'ifequal10': '{% ifequal a b %}yes{% else %}no{% endifequal %}'})
def test_ifequal10(self):
output = self.engine.render_to_string('ifequal10')
self.assertEqual(output, 'yes')
# SMART SPLITTING
@setup({'ifequal-split01': '{% ifequal a "test man" %}yes{% else %}no{% endifequal %}'})
def test_ifequal_split01(self):
output = self.engine.render_to_string('ifequal-split01')
self.assertEqual(output, 'no')
@setup({'ifequal-split02': '{% ifequal a "test man" %}yes{% else %}no{% endifequal %}'})
def test_ifequal_split02(self):
output = self.engine.render_to_string('ifequal-split02', {'a': 'foo'})
self.assertEqual(output, 'no')
@setup({'ifequal-split03': '{% ifequal a "test man" %}yes{% else %}no{% endifequal %}'})
def test_ifequal_split03(self):
output = self.engine.render_to_string('ifequal-split03', {'a': 'test man'})
self.assertEqual(output, 'yes')
@setup({'ifequal-split04': '{% ifequal a \'test man\' %}yes{% else %}no{% endifequal %}'})
def test_ifequal_split04(self):
output = self.engine.render_to_string('ifequal-split04', {'a': 'test man'})
self.assertEqual(output, 'yes')
@setup({'ifequal-split05': '{% ifequal a \'i "love" you\' %}yes{% else %}no{% endifequal %}'})
def test_ifequal_split05(self):
output = self.engine.render_to_string('ifequal-split05', {'a': ''})
self.assertEqual(output, 'no')
@setup({'ifequal-split06': '{% ifequal a \'i "love" you\' %}yes{% else %}no{% endifequal %}'})
def test_ifequal_split06(self):
output = self.engine.render_to_string('ifequal-split06', {'a': 'i "love" you'})
self.assertEqual(output, 'yes')
@setup({'ifequal-split07': '{% ifequal a \'i "love" you\' %}yes{% else %}no{% endifequal %}'})
def test_ifequal_split07(self):
output = self.engine.render_to_string('ifequal-split07', {'a': 'i love you'})
self.assertEqual(output, 'no')
@setup({'ifequal-split08': r"{% ifequal a 'I\'m happy' %}yes{% else %}no{% endifequal %}"})
def test_ifequal_split08(self):
output = self.engine.render_to_string('ifequal-split08', {'a': "I'm happy"})
self.assertEqual(output, 'yes')
@setup({'ifequal-split09': r"{% ifequal a 'slash\man' %}yes{% else %}no{% endifequal %}"})
def test_ifequal_split09(self):
output = self.engine.render_to_string('ifequal-split09', {'a': 'slash\man'})
self.assertEqual(output, 'yes')
@setup({'ifequal-split10': r"{% ifequal a 'slash\man' %}yes{% else %}no{% endifequal %}"})
def test_ifequal_split10(self):
output = self.engine.render_to_string('ifequal-split10', {'a': 'slashman'})
self.assertEqual(output, 'no')
# NUMERIC RESOLUTION
@setup({'ifequal-numeric01': '{% ifequal x 5 %}yes{% endifequal %}'})
def test_ifequal_numeric01(self):
output = self.engine.render_to_string('ifequal-numeric01', {'x': '5'})
self.assertEqual(output, '')
@setup({'ifequal-numeric02': '{% ifequal x 5 %}yes{% endifequal %}'})
def test_ifequal_numeric02(self):
output = self.engine.render_to_string('ifequal-numeric02', {'x': 5})
self.assertEqual(output, 'yes')
@setup({'ifequal-numeric03': '{% ifequal x 5.2 %}yes{% endifequal %}'})
def test_ifequal_numeric03(self):
output = self.engine.render_to_string('ifequal-numeric03', {'x': 5})
self.assertEqual(output, '')
@setup({'ifequal-numeric04': '{% ifequal x 5.2 %}yes{% endifequal %}'})
def test_ifequal_numeric04(self):
output = self.engine.render_to_string('ifequal-numeric04', {'x': 5.2})
self.assertEqual(output, 'yes')
@setup({'ifequal-numeric05': '{% ifequal x 0.2 %}yes{% endifequal %}'})
def test_ifequal_numeric05(self):
output = self.engine.render_to_string('ifequal-numeric05', {'x': 0.2})
self.assertEqual(output, 'yes')
@setup({'ifequal-numeric06': '{% ifequal x .2 %}yes{% endifequal %}'})
def test_ifequal_numeric06(self):
output = self.engine.render_to_string('ifequal-numeric06', {'x': 0.2})
self.assertEqual(output, 'yes')
@setup({'ifequal-numeric07': '{% ifequal x 2. %}yes{% endifequal %}'})
def test_ifequal_numeric07(self):
output = self.engine.render_to_string('ifequal-numeric07', {'x': 2})
self.assertEqual(output, '')
@setup({'ifequal-numeric08': '{% ifequal x "5" %}yes{% endifequal %}'})
def test_ifequal_numeric08(self):
output = self.engine.render_to_string('ifequal-numeric08', {'x': 5})
self.assertEqual(output, '')
@setup({'ifequal-numeric09': '{% ifequal x "5" %}yes{% endifequal %}'})
def test_ifequal_numeric09(self):
output = self.engine.render_to_string('ifequal-numeric09', {'x': '5'})
self.assertEqual(output, 'yes')
@setup({'ifequal-numeric10': '{% ifequal x -5 %}yes{% endifequal %}'})
def test_ifequal_numeric10(self):
output = self.engine.render_to_string('ifequal-numeric10', {'x': -5})
self.assertEqual(output, 'yes')
@setup({'ifequal-numeric11': '{% ifequal x -5.2 %}yes{% endifequal %}'})
def test_ifequal_numeric11(self):
output = self.engine.render_to_string('ifequal-numeric11', {'x': -5.2})
self.assertEqual(output, 'yes')
@setup({'ifequal-numeric12': '{% ifequal x +5 %}yes{% endifequal %}'})
def test_ifequal_numeric12(self):
output = self.engine.render_to_string('ifequal-numeric12', {'x': 5})
self.assertEqual(output, 'yes')
# FILTER EXPRESSIONS AS ARGUMENTS
@setup({'ifequal-filter01': '{% ifequal a|upper "A" %}x{% endifequal %}'})
def test_ifequal_filter01(self):
output = self.engine.render_to_string('ifequal-filter01', {'a': 'a'})
self.assertEqual(output, 'x')
@setup({'ifequal-filter02': '{% ifequal "A" a|upper %}x{% endifequal %}'})
def test_ifequal_filter02(self):
output = self.engine.render_to_string('ifequal-filter02', {'a': 'a'})
self.assertEqual(output, 'x')
@setup({'ifequal-filter03': '{% ifequal a|upper b|upper %}x{% endifequal %}'})
def test_ifequal_filter03(self):
output = self.engine.render_to_string('ifequal-filter03', {'a': 'x', 'b': 'X'})
self.assertEqual(output, 'x')
@setup({'ifequal-filter04': '{% ifequal x|slice:"1" "a" %}x{% endifequal %}'})
def test_ifequal_filter04(self):
output = self.engine.render_to_string('ifequal-filter04', {'x': 'aaa'})
self.assertEqual(output, 'x')
@setup({'ifequal-filter05': '{% ifequal x|slice:"1"|upper "A" %}x{% endifequal %}'})
def test_ifequal_filter05(self):
output = self.engine.render_to_string('ifequal-filter05', {'x': 'aaa'})
self.assertEqual(output, 'x')
class IfNotEqualTagTests(SimpleTestCase):
@setup({'ifnotequal01': '{% ifnotequal a b %}yes{% endifnotequal %}'})
def test_ifnotequal01(self):
output = self.engine.render_to_string('ifnotequal01', {'a': 1, 'b': 2})
self.assertEqual(output, 'yes')
@setup({'ifnotequal02': '{% ifnotequal a b %}yes{% endifnotequal %}'})
def test_ifnotequal02(self):
output = self.engine.render_to_string('ifnotequal02', {'a': 1, 'b': 1})
self.assertEqual(output, '')
@setup({'ifnotequal03': '{% ifnotequal a b %}yes{% else %}no{% endifnotequal %}'})
def test_ifnotequal03(self):
output = self.engine.render_to_string('ifnotequal03', {'a': 1, 'b': 2})
self.assertEqual(output, 'yes')
@setup({'ifnotequal04': '{% ifnotequal a b %}yes{% else %}no{% endifnotequal %}'})
def test_ifnotequal04(self):
output = self.engine.render_to_string('ifnotequal04', {'a': 1, 'b': 1})
self.assertEqual(output, 'no')
| bsd-3-clause |
tombstone/models | research/object_detection/legacy/eval.py | 2 | 5462 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Evaluation executable for detection models.
This executable is used to evaluate DetectionModels. There are two ways of
configuring the eval job.
1) A single pipeline_pb2.TrainEvalPipelineConfig file maybe specified instead.
In this mode, the --eval_training_data flag may be given to force the pipeline
to evaluate on training data instead.
Example usage:
./eval \
--logtostderr \
--checkpoint_dir=path/to/checkpoint_dir \
--eval_dir=path/to/eval_dir \
--pipeline_config_path=pipeline_config.pbtxt
2) Three configuration files may be provided: a model_pb2.DetectionModel
configuration file to define what type of DetectionModel is being evaluated, an
input_reader_pb2.InputReader file to specify what data the model is evaluating
and an eval_pb2.EvalConfig file to configure evaluation parameters.
Example usage:
./eval \
--logtostderr \
--checkpoint_dir=path/to/checkpoint_dir \
--eval_dir=path/to/eval_dir \
--eval_config_path=eval_config.pbtxt \
--model_config_path=model_config.pbtxt \
--input_config_path=eval_input_config.pbtxt
"""
import functools
import os
import tensorflow.compat.v1 as tf
from tensorflow.python.util.deprecation import deprecated
from object_detection.builders import dataset_builder
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.legacy import evaluator
from object_detection.utils import config_util
from object_detection.utils import label_map_util
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
flags.DEFINE_boolean('eval_training_data', False,
'If training data should be evaluated for this job.')
flags.DEFINE_string(
'checkpoint_dir', '',
'Directory containing checkpoints to evaluate, typically '
'set to `train_dir` used in the training job.')
flags.DEFINE_string('eval_dir', '', 'Directory to write eval summaries to.')
flags.DEFINE_string(
'pipeline_config_path', '',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file. If provided, other configs are ignored')
flags.DEFINE_string('eval_config_path', '',
'Path to an eval_pb2.EvalConfig config file.')
flags.DEFINE_string('input_config_path', '',
'Path to an input_reader_pb2.InputReader config file.')
flags.DEFINE_string('model_config_path', '',
'Path to a model_pb2.DetectionModel config file.')
flags.DEFINE_boolean(
'run_once', False, 'Option to only run a single pass of '
'evaluation. Overrides the `max_evals` parameter in the '
'provided config.')
FLAGS = flags.FLAGS
@deprecated(None, 'Use object_detection/model_main.py.')
def main(unused_argv):
assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
assert FLAGS.eval_dir, '`eval_dir` is missing.'
tf.gfile.MakeDirs(FLAGS.eval_dir)
if FLAGS.pipeline_config_path:
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
tf.gfile.Copy(
FLAGS.pipeline_config_path,
os.path.join(FLAGS.eval_dir, 'pipeline.config'),
overwrite=True)
else:
configs = config_util.get_configs_from_multiple_files(
model_config_path=FLAGS.model_config_path,
eval_config_path=FLAGS.eval_config_path,
eval_input_config_path=FLAGS.input_config_path)
for name, config in [('model.config', FLAGS.model_config_path),
('eval.config', FLAGS.eval_config_path),
('input.config', FLAGS.input_config_path)]:
tf.gfile.Copy(config, os.path.join(FLAGS.eval_dir, name), overwrite=True)
model_config = configs['model']
eval_config = configs['eval_config']
input_config = configs['eval_input_config']
if FLAGS.eval_training_data:
input_config = configs['train_input_config']
model_fn = functools.partial(
model_builder.build, model_config=model_config, is_training=False)
def get_next(config):
return dataset_builder.make_initializable_iterator(
dataset_builder.build(config)).get_next()
create_input_dict_fn = functools.partial(get_next, input_config)
categories = label_map_util.create_categories_from_labelmap(
input_config.label_map_path)
if FLAGS.run_once:
eval_config.max_evals = 1
graph_rewriter_fn = None
if 'graph_rewriter_config' in configs:
graph_rewriter_fn = graph_rewriter_builder.build(
configs['graph_rewriter_config'], is_training=False)
evaluator.evaluate(
create_input_dict_fn,
model_fn,
eval_config,
categories,
FLAGS.checkpoint_dir,
FLAGS.eval_dir,
graph_hook_fn=graph_rewriter_fn)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
Jarn/jarn.mkrelease | jarn/mkrelease/configparser.py | 2 | 6696 | import sys
import re
if sys.version_info[:2] >= (3, 2):
from configparser import Error
from configparser import MissingSectionHeaderError
from configparser import ConfigParser as _BaseParser
elif sys.version_info[0] >= 3:
from configparser import Error
from configparser import MissingSectionHeaderError
from configparser import SafeConfigParser as _BaseParser
else:
from ConfigParser import Error
from ConfigParser import MissingSectionHeaderError
from ConfigParser import SafeConfigParser as _BaseParser
class MultipleValueError(Error):
pass
class errors2warnings(object):
"""Turn ConfigParser.Errors into warnings."""
def __init__(self, parser):
self.parser = parser
def __enter__(self):
pass
def __exit__(self, type, value, tb):
if isinstance(value, MissingSectionHeaderError):
self._reformat_exception(value)
if isinstance(value, Error):
self.parser.warn(str(value))
return True
def _reformat_exception(self, value):
value.message = 'File contains no section headers: %r\n\t[line %2d]: %r' % (
value.source if sys.version_info >= (3, 2) else value.filename,
value.lineno,
value.line)
class ConfigParser(object):
def __init__(self, warn_func=None, raw=True):
self.warnings = []
self.warn_func = warn_func
self.raw = raw
self._valid = False
self._base = _BaseParser()
self._base.optionxform = lambda x: x.lower().replace('-', '_')
# Python < 3.2
if hasattr(self._base, '_boolean_states'):
self._base.BOOLEAN_STATES = self._base._boolean_states
def warn(self, msg):
self.warnings.append(msg)
if self.warn_func is not None:
self.warn_func(msg)
def read(self, filenames):
self.warnings = []
with errors2warnings(self):
self._base.read(filenames)
self._valid = not self.warnings
return self._valid
def has_section(self, section):
return self._base.has_section(section) and self._valid
def has_option(self, section, option):
return self._base.has_option(section, option) and self._valid
def sections(self, default=None):
return self._base.sections() if self._valid else default
def options(self, section, default=None):
return self._base.options(section) if self._valid else default
def items(self, section, default=None):
if self.has_section(section):
with errors2warnings(self):
value = self._base.items(section, raw=self.raw)
return value
return default
def get(self, section, option, default=None):
if self.has_option(section, option):
with errors2warnings(self):
value = self._base.get(section, option, raw=self.raw)
return value
return default
def getlist(self, section, option, default=None):
if self.has_option(section, option):
with errors2warnings(self):
value = self._base.get(section, option, raw=self.raw)
return self.to_list(value)
return default
def getstring(self, section, option, default=None):
if self.has_option(section, option):
with errors2warnings(self):
value = self._base.get(section, option, raw=self.raw)
try:
return self.to_string(value)
except MultipleValueError as e:
self.warn("Multiple values not allowed: %s = %r" % (option, self._value_from_exc(e)))
return default
def getboolean(self, section, option, default=None):
if self.has_option(section, option):
with errors2warnings(self):
value = self._base.get(section, option, raw=self.raw)
try:
return self.to_boolean(value)
except MultipleValueError as e:
self.warn("Multiple values not allowed: %s = %r" % (option, self._value_from_exc(e)))
except ValueError as e:
self.warn('Not a boolean: %s = %r' % (option, self._value_from_exc(e)))
return default
def getint(self, section, option, default=None):
if self.has_option(section, option):
with errors2warnings(self):
value = self._base.get(section, option, raw=self.raw)
try:
return self.to_int(value)
except MultipleValueError as e:
self.warn('Multiple values not allowed: %s = %r' % (option, self._value_from_exc(e)))
except ValueError as e:
self.warn('Not an integer: %s = %r' % (option, self._value_from_exc(e)))
return default
def getfloat(self, section, option, default=None):
if self.has_option(section, option):
with errors2warnings(self):
value = self._base.get(section, option, raw=self.raw)
try:
return self.to_float(value)
except MultipleValueError as e:
self.warn('Multiple values not allowed: %s = %r' % (option, self._value_from_exc(e)))
except ValueError as e:
self.warn('Not a float: %s = %r' % (option, self._value_from_exc(e)))
return default
def to_list(self, value):
v = re.split(r',\s*|\s+', value)
return [x for x in v if x]
def to_string(self, value):
v = self._single_value(value)
return v
def to_boolean(self, value):
v = self._single_value(value).lower()
if v not in self._base.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % v)
return self._base.BOOLEAN_STATES[v]
def to_int(self, value):
v = self._single_value(value)
return int(v)
def to_float(self, value):
v = self._single_value(value)
return float(v)
def _single_value(self, value):
v = value.strip()
if len(v.split()) > 1:
raise MultipleValueError('Multiple values not allowed: %s' % v)
return v
def _value_from_exc(self, exc):
# e.g.: invalid literal for int() with base 10: 'a'
msg = str(exc)
colon = msg.find(':')
if colon >= 0:
value = msg[colon+1:].lstrip()
if (value.startswith("'") and value.endswith("'")) or \
(value.startswith('"') and value.endswith('"')):
value = value[1:-1]
return value
return ''
| bsd-2-clause |
YixiaoLi/binutils-srec64 | gdb/contrib/excheck.py | 46 | 11670 | # Copyright 2011-2015 Free Software Foundation, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
# This is a GCC plugin that computes some exception-handling data for
# gdb. This data can then be summarized and checked by the
# exsummary.py script.
# To use:
# * First, install the GCC Python plugin. See
# https://fedorahosted.org/gcc-python-plugin/
# * export PYTHON_PLUGIN=/full/path/to/plugin/directory
# This should be the directory holding "python.so".
# * cd build/gdb; make mostlyclean
# * make CC=.../gcc-with-excheck
# This will write a number of .py files in the build directory.
# * python .../exsummary.py
# This will show the violations.
import gcc
import gccutils
import sys
# Where our output goes.
output_file = None
# Cleanup functions require special treatment, because they take a
# function argument, but in theory the function must be nothrow.
cleanup_functions = {
'make_cleanup': 1,
'make_cleanup_dtor': 1,
'make_final_cleanup': 1,
'make_my_cleanup2': 1,
'make_my_cleanup': 1
}
# Functions which may throw but which we want to ignore.
ignore_functions = {
# This one is super special.
'exceptions_state_mc': 1,
# gdb generally pretends that internal_error cannot throw, even
# though it can.
'internal_error': 1,
# do_cleanups and friends are supposedly nothrow but we don't want
# to run afoul of the indirect function call logic.
'do_cleanups': 1,
'do_final_cleanups': 1
}
# Functions which take a function argument, but which are not
# interesting, usually because the argument is not called in the
# current context.
non_passthrough_functions = {
'signal': 1,
'add_internal_function': 1
}
# Return True if the type is from Python.
def type_is_pythonic(t):
if isinstance(t, gcc.ArrayType):
t = t.type
if not isinstance(t, gcc.RecordType):
return False
# Hack.
return str(t).find('struct Py') == 0
# Examine all the fields of a struct. We don't currently need any
# sort of recursion, so this is simple for now.
def examine_struct_fields(initializer):
global output_file
for idx2, value2 in initializer.elements:
if isinstance(idx2, gcc.Declaration):
if isinstance(value2, gcc.AddrExpr):
value2 = value2.operand
if isinstance(value2, gcc.FunctionDecl):
output_file.write("declare_nothrow(%s)\n"
% repr(str(value2.name)))
# Examine all global variables looking for pointers to functions in
# structures whose types were defined by Python.
def examine_globals():
global output_file
vars = gcc.get_variables()
for var in vars:
if not isinstance(var.decl, gcc.VarDecl):
continue
output_file.write("################\n")
output_file.write("# Analysis for %s\n" % var.decl.name)
if not var.decl.initial:
continue
if not type_is_pythonic(var.decl.type):
continue
if isinstance(var.decl.type, gcc.ArrayType):
for idx, value in var.decl.initial.elements:
examine_struct_fields(value)
else:
gccutils.check_isinstance(var.decl.type, gcc.RecordType)
examine_struct_fields(var.decl.initial)
# Called at the end of compilation to write out some data derived from
# globals and to close the output.
def close_output(*args):
global output_file
examine_globals()
output_file.close()
# The pass which derives some exception-checking information. We take
# a two-step approach: first we get a call graph from the compiler.
# This is emitted by the plugin as Python code. Then, we run a second
# program that reads all the generated Python and uses it to get a
# global view of exception routes in gdb.
class GdbExceptionChecker(gcc.GimplePass):
def __init__(self, output_file):
gcc.GimplePass.__init__(self, 'gdb_exception_checker')
self.output_file = output_file
def log(self, obj):
self.output_file.write("# %s\n" % str(obj))
# Return true if FN is a call to a method on a Python object.
# We know these cannot throw in the gdb sense.
def fn_is_python_ignorable(self, fn):
if not isinstance(fn, gcc.SsaName):
return False
stmt = fn.def_stmt
if not isinstance(stmt, gcc.GimpleAssign):
return False
if stmt.exprcode is not gcc.ComponentRef:
return False
rhs = stmt.rhs[0]
if not isinstance(rhs, gcc.ComponentRef):
return False
if not isinstance(rhs.field, gcc.FieldDecl):
return False
return rhs.field.name == 'tp_dealloc' or rhs.field.name == 'tp_free'
# Decode a function call and write something to the output.
# THIS_FUN is the enclosing function that we are processing.
# FNDECL is the call to process; it might not actually be a DECL
# node.
# LOC is the location of the call.
def handle_one_fndecl(self, this_fun, fndecl, loc):
callee_name = ''
if isinstance(fndecl, gcc.AddrExpr):
fndecl = fndecl.operand
if isinstance(fndecl, gcc.FunctionDecl):
# Ordinary call to a named function.
callee_name = str(fndecl.name)
self.output_file.write("function_call(%s, %s, %s)\n"
% (repr(callee_name),
repr(this_fun.decl.name),
repr(str(loc))))
elif self.fn_is_python_ignorable(fndecl):
# Call to tp_dealloc.
pass
elif (isinstance(fndecl, gcc.SsaName)
and isinstance(fndecl.var, gcc.ParmDecl)):
# We can ignore an indirect call via a parameter to the
# current function, because this is handled via the rule
# for passthrough functions.
pass
else:
# Any other indirect call.
self.output_file.write("has_indirect_call(%s, %s)\n"
% (repr(this_fun.decl.name),
repr(str(loc))))
return callee_name
# This does most of the work for examine_one_bb.
# THIS_FUN is the enclosing function.
# BB is the basic block to process.
# Returns True if this block is the header of a TRY_CATCH, False
# otherwise.
def examine_one_bb_inner(self, this_fun, bb):
if not bb.gimple:
return False
try_catch = False
for stmt in bb.gimple:
loc = stmt.loc
if not loc:
loc = this_fun.decl.location
if not isinstance(stmt, gcc.GimpleCall):
continue
callee_name = self.handle_one_fndecl(this_fun, stmt.fn, loc)
if callee_name == 'exceptions_state_mc_action_iter':
try_catch = True
global non_passthrough_functions
if callee_name in non_passthrough_functions:
continue
# We have to specially handle calls where an argument to
# the call is itself a function, e.g., qsort. In general
# we model these as "passthrough" -- we assume that in
# addition to the call the qsort there is also a call to
# the argument function.
for arg in stmt.args:
# We are only interested in arguments which are functions.
t = arg.type
if isinstance(t, gcc.PointerType):
t = t.dereference
if not isinstance(t, gcc.FunctionType):
continue
if isinstance(arg, gcc.AddrExpr):
arg = arg.operand
global cleanup_functions
if callee_name in cleanup_functions:
if not isinstance(arg, gcc.FunctionDecl):
gcc.inform(loc, 'cleanup argument not a DECL: %s' % repr(arg))
else:
# Cleanups must be nothrow.
self.output_file.write("declare_cleanup(%s)\n"
% repr(str(arg.name)))
else:
# Assume we have a passthrough function, like
# qsort or an iterator. We model this by
# pretending there is an ordinary call at this
# point.
self.handle_one_fndecl(this_fun, arg, loc)
return try_catch
# Examine all the calls in a basic block and generate output for
# them.
# THIS_FUN is the enclosing function.
# BB is the basic block to examine.
# BB_WORKLIST is a list of basic blocks to work on; we add the
# appropriate successor blocks to this.
# SEEN_BBS is a map whose keys are basic blocks we have already
# processed. We use this to ensure that we only visit a given
# block once.
def examine_one_bb(self, this_fun, bb, bb_worklist, seen_bbs):
try_catch = self.examine_one_bb_inner(this_fun, bb)
for edge in bb.succs:
if edge.dest in seen_bbs:
continue
seen_bbs[edge.dest] = 1
if try_catch:
# This is bogus, but we magically know the right
# answer.
if edge.false_value:
bb_worklist.append(edge.dest)
else:
bb_worklist.append(edge.dest)
# Iterate over all basic blocks in THIS_FUN.
def iterate_bbs(self, this_fun):
# Iteration must be in control-flow order, because if we see a
# TRY_CATCH construct we need to drop all the contained blocks.
bb_worklist = [this_fun.cfg.entry]
seen_bbs = {}
seen_bbs[this_fun.cfg.entry] = 1
for bb in bb_worklist:
self.examine_one_bb(this_fun, bb, bb_worklist, seen_bbs)
def execute(self, fun):
if fun and fun.cfg and fun.decl:
self.output_file.write("################\n")
self.output_file.write("# Analysis for %s\n" % fun.decl.name)
self.output_file.write("define_function(%s, %s)\n"
% (repr(fun.decl.name),
repr(str(fun.decl.location))))
global ignore_functions
if fun.decl.name not in ignore_functions:
self.iterate_bbs(fun)
def main(**kwargs):
global output_file
output_file = open(gcc.get_dump_base_name() + '.gdb_exc.py', 'w')
# We used to use attributes here, but there didn't seem to be a
# big benefit over hard-coding.
output_file.write('declare_throw("throw_exception")\n')
output_file.write('declare_throw("throw_verror")\n')
output_file.write('declare_throw("throw_vfatal")\n')
output_file.write('declare_throw("throw_error")\n')
gcc.register_callback(gcc.PLUGIN_FINISH_UNIT, close_output)
ps = GdbExceptionChecker(output_file)
ps.register_after('ssa')
main()
| gpl-2.0 |
jolyonb/edx-platform | cms/djangoapps/contentstore/management/commands/prompt.py | 1 | 1123 | import sys
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {
"yes": True,
"y": True,
"ye": True,
"no": False,
"n": False,
}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError(u"invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
| agpl-3.0 |
ArcherSys/ArcherSys | Lib/test/test_strftime.py | 1 | 22946 | <<<<<<< HEAD
<<<<<<< HEAD
"""
Unittest for time.strftime
"""
import calendar
import sys
import re
from test import support
import time
import unittest
# helper functions
def fixasctime(s):
if s[8] == ' ':
s = s[:8] + '0' + s[9:]
return s
def escapestr(text, ampm):
"""
Escape text to deal with possible locale values that have regex
syntax while allowing regex syntax used for comparison.
"""
new_text = re.escape(text)
new_text = new_text.replace(re.escape(ampm), ampm)
new_text = new_text.replace('\%', '%')
new_text = new_text.replace('\:', ':')
new_text = new_text.replace('\?', '?')
return new_text
class StrftimeTest(unittest.TestCase):
def _update_variables(self, now):
# we must update the local variables on every cycle
self.gmt = time.gmtime(now)
now = time.localtime(now)
if now[3] < 12: self.ampm='(AM|am)'
else: self.ampm='(PM|pm)'
self.jan1 = time.localtime(time.mktime((now[0], 1, 1, 0, 0, 0, 0, 1, 0)))
try:
if now[8]: self.tz = time.tzname[1]
else: self.tz = time.tzname[0]
except AttributeError:
self.tz = ''
if now[3] > 12: self.clock12 = now[3] - 12
elif now[3] > 0: self.clock12 = now[3]
else: self.clock12 = 12
self.now = now
def setUp(self):
try:
import java
java.util.Locale.setDefault(java.util.Locale.US)
except ImportError:
import locale
locale.setlocale(locale.LC_TIME, 'C')
def test_strftime(self):
now = time.time()
self._update_variables(now)
self.strftest1(now)
self.strftest2(now)
if support.verbose:
print("Strftime test, platform: %s, Python version: %s" % \
(sys.platform, sys.version.split()[0]))
for j in range(-5, 5):
for i in range(25):
arg = now + (i+j*100)*23*3603
self._update_variables(arg)
self.strftest1(arg)
self.strftest2(arg)
def strftest1(self, now):
if support.verbose:
print("strftime test for", time.ctime(now))
now = self.now
# Make sure any characters that could be taken as regex syntax is
# escaped in escapestr()
expectations = (
('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'),
('%A', calendar.day_name[now[6]], 'full weekday name'),
('%b', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%B', calendar.month_name[now[1]], 'full month name'),
# %c see below
('%d', '%02d' % now[2], 'day of month as number (00-31)'),
('%H', '%02d' % now[3], 'hour (00-23)'),
('%I', '%02d' % self.clock12, 'hour (01-12)'),
('%j', '%03d' % now[7], 'julian day (001-366)'),
('%m', '%02d' % now[1], 'month as number (01-12)'),
('%M', '%02d' % now[4], 'minute, (00-59)'),
('%p', self.ampm, 'AM or PM as appropriate'),
('%S', '%02d' % now[5], 'seconds of current time (00-60)'),
('%U', '%02d' % ((now[7] + self.jan1[6])//7),
'week number of the year (Sun 1st)'),
('%w', '0?%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'),
('%W', '%02d' % ((now[7] + (self.jan1[6] - 1)%7)//7),
'week number of the year (Mon 1st)'),
# %x see below
('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%y', '%02d' % (now[0]%100), 'year without century'),
('%Y', '%d' % now[0], 'year with century'),
# %Z see below
('%%', '%', 'single percent sign'),
)
for e in expectations:
# musn't raise a value error
try:
result = time.strftime(e[0], now)
except ValueError as error:
self.fail("strftime '%s' format gave error: %s" % (e[0], error))
if re.match(escapestr(e[1], self.ampm), result):
continue
if not result or result[0] == '%':
self.fail("strftime does not support standard '%s' format (%s)"
% (e[0], e[2]))
else:
self.fail("Conflict for %s (%s): expected %s, but got %s"
% (e[0], e[2], e[1], result))
def strftest2(self, now):
nowsecs = str(int(now))[:-1]
now = self.now
nonstandard_expectations = (
# These are standard but don't have predictable output
('%c', fixasctime(time.asctime(now)), 'near-asctime() format'),
('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)),
'%m/%d/%y %H:%M:%S'),
('%Z', '%s' % self.tz, 'time zone name'),
# These are some platform specific extensions
('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'),
('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'),
('%h', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'),
('%n', '\n', 'newline character'),
('%r', '%02d:%02d:%02d %s' % (self.clock12, now[4], now[5], self.ampm),
'%I:%M:%S %p'),
('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'),
('%s', nowsecs, 'seconds since the Epoch in UCT'),
('%t', '\t', 'tab character'),
('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%3y', '%03d' % (now[0]%100),
'year without century rendered using fieldwidth'),
)
for e in nonstandard_expectations:
try:
result = time.strftime(e[0], now)
except ValueError as result:
msg = "Error for nonstandard '%s' format (%s): %s" % \
(e[0], e[2], str(result))
if support.verbose:
print(msg)
continue
if re.match(escapestr(e[1], self.ampm), result):
if support.verbose:
print("Supports nonstandard '%s' format (%s)" % (e[0], e[2]))
elif not result or result[0] == '%':
if support.verbose:
print("Does not appear to support '%s' format (%s)" % \
(e[0], e[2]))
else:
if support.verbose:
print("Conflict for nonstandard '%s' format (%s):" % \
(e[0], e[2]))
print(" Expected %s, but got %s" % (e[1], result))
class Y1900Tests(unittest.TestCase):
"""A limitation of the MS C runtime library is that it crashes if
a date before 1900 is passed with a format string containing "%y"
"""
def test_y_before_1900(self):
# Issue #13674, #19634
t = (1899, 1, 1, 0, 0, 0, 0, 0, 0)
if (sys.platform == "win32"
or sys.platform.startswith(("aix", "sunos", "solaris"))):
with self.assertRaises(ValueError):
time.strftime("%y", t)
else:
self.assertEqual(time.strftime("%y", t), "99")
def test_y_1900(self):
self.assertEqual(
time.strftime("%y", (1900, 1, 1, 0, 0, 0, 0, 0, 0)), "00")
def test_y_after_1900(self):
self.assertEqual(
time.strftime("%y", (2013, 1, 1, 0, 0, 0, 0, 0, 0)), "13")
if __name__ == '__main__':
unittest.main()
=======
"""
Unittest for time.strftime
"""
import calendar
import sys
import re
from test import support
import time
import unittest
# helper functions
def fixasctime(s):
if s[8] == ' ':
s = s[:8] + '0' + s[9:]
return s
def escapestr(text, ampm):
"""
Escape text to deal with possible locale values that have regex
syntax while allowing regex syntax used for comparison.
"""
new_text = re.escape(text)
new_text = new_text.replace(re.escape(ampm), ampm)
new_text = new_text.replace('\%', '%')
new_text = new_text.replace('\:', ':')
new_text = new_text.replace('\?', '?')
return new_text
class StrftimeTest(unittest.TestCase):
def _update_variables(self, now):
# we must update the local variables on every cycle
self.gmt = time.gmtime(now)
now = time.localtime(now)
if now[3] < 12: self.ampm='(AM|am)'
else: self.ampm='(PM|pm)'
self.jan1 = time.localtime(time.mktime((now[0], 1, 1, 0, 0, 0, 0, 1, 0)))
try:
if now[8]: self.tz = time.tzname[1]
else: self.tz = time.tzname[0]
except AttributeError:
self.tz = ''
if now[3] > 12: self.clock12 = now[3] - 12
elif now[3] > 0: self.clock12 = now[3]
else: self.clock12 = 12
self.now = now
def setUp(self):
try:
import java
java.util.Locale.setDefault(java.util.Locale.US)
except ImportError:
import locale
locale.setlocale(locale.LC_TIME, 'C')
def test_strftime(self):
now = time.time()
self._update_variables(now)
self.strftest1(now)
self.strftest2(now)
if support.verbose:
print("Strftime test, platform: %s, Python version: %s" % \
(sys.platform, sys.version.split()[0]))
for j in range(-5, 5):
for i in range(25):
arg = now + (i+j*100)*23*3603
self._update_variables(arg)
self.strftest1(arg)
self.strftest2(arg)
def strftest1(self, now):
if support.verbose:
print("strftime test for", time.ctime(now))
now = self.now
# Make sure any characters that could be taken as regex syntax is
# escaped in escapestr()
expectations = (
('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'),
('%A', calendar.day_name[now[6]], 'full weekday name'),
('%b', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%B', calendar.month_name[now[1]], 'full month name'),
# %c see below
('%d', '%02d' % now[2], 'day of month as number (00-31)'),
('%H', '%02d' % now[3], 'hour (00-23)'),
('%I', '%02d' % self.clock12, 'hour (01-12)'),
('%j', '%03d' % now[7], 'julian day (001-366)'),
('%m', '%02d' % now[1], 'month as number (01-12)'),
('%M', '%02d' % now[4], 'minute, (00-59)'),
('%p', self.ampm, 'AM or PM as appropriate'),
('%S', '%02d' % now[5], 'seconds of current time (00-60)'),
('%U', '%02d' % ((now[7] + self.jan1[6])//7),
'week number of the year (Sun 1st)'),
('%w', '0?%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'),
('%W', '%02d' % ((now[7] + (self.jan1[6] - 1)%7)//7),
'week number of the year (Mon 1st)'),
# %x see below
('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%y', '%02d' % (now[0]%100), 'year without century'),
('%Y', '%d' % now[0], 'year with century'),
# %Z see below
('%%', '%', 'single percent sign'),
)
for e in expectations:
# musn't raise a value error
try:
result = time.strftime(e[0], now)
except ValueError as error:
self.fail("strftime '%s' format gave error: %s" % (e[0], error))
if re.match(escapestr(e[1], self.ampm), result):
continue
if not result or result[0] == '%':
self.fail("strftime does not support standard '%s' format (%s)"
% (e[0], e[2]))
else:
self.fail("Conflict for %s (%s): expected %s, but got %s"
% (e[0], e[2], e[1], result))
def strftest2(self, now):
nowsecs = str(int(now))[:-1]
now = self.now
nonstandard_expectations = (
# These are standard but don't have predictable output
('%c', fixasctime(time.asctime(now)), 'near-asctime() format'),
('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)),
'%m/%d/%y %H:%M:%S'),
('%Z', '%s' % self.tz, 'time zone name'),
# These are some platform specific extensions
('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'),
('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'),
('%h', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'),
('%n', '\n', 'newline character'),
('%r', '%02d:%02d:%02d %s' % (self.clock12, now[4], now[5], self.ampm),
'%I:%M:%S %p'),
('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'),
('%s', nowsecs, 'seconds since the Epoch in UCT'),
('%t', '\t', 'tab character'),
('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%3y', '%03d' % (now[0]%100),
'year without century rendered using fieldwidth'),
)
for e in nonstandard_expectations:
try:
result = time.strftime(e[0], now)
except ValueError as result:
msg = "Error for nonstandard '%s' format (%s): %s" % \
(e[0], e[2], str(result))
if support.verbose:
print(msg)
continue
if re.match(escapestr(e[1], self.ampm), result):
if support.verbose:
print("Supports nonstandard '%s' format (%s)" % (e[0], e[2]))
elif not result or result[0] == '%':
if support.verbose:
print("Does not appear to support '%s' format (%s)" % \
(e[0], e[2]))
else:
if support.verbose:
print("Conflict for nonstandard '%s' format (%s):" % \
(e[0], e[2]))
print(" Expected %s, but got %s" % (e[1], result))
class Y1900Tests(unittest.TestCase):
"""A limitation of the MS C runtime library is that it crashes if
a date before 1900 is passed with a format string containing "%y"
"""
def test_y_before_1900(self):
# Issue #13674, #19634
t = (1899, 1, 1, 0, 0, 0, 0, 0, 0)
if (sys.platform == "win32"
or sys.platform.startswith(("aix", "sunos", "solaris"))):
with self.assertRaises(ValueError):
time.strftime("%y", t)
else:
self.assertEqual(time.strftime("%y", t), "99")
def test_y_1900(self):
self.assertEqual(
time.strftime("%y", (1900, 1, 1, 0, 0, 0, 0, 0, 0)), "00")
def test_y_after_1900(self):
self.assertEqual(
time.strftime("%y", (2013, 1, 1, 0, 0, 0, 0, 0, 0)), "13")
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""
Unittest for time.strftime
"""
import calendar
import sys
import re
from test import support
import time
import unittest
# helper functions
def fixasctime(s):
if s[8] == ' ':
s = s[:8] + '0' + s[9:]
return s
def escapestr(text, ampm):
"""
Escape text to deal with possible locale values that have regex
syntax while allowing regex syntax used for comparison.
"""
new_text = re.escape(text)
new_text = new_text.replace(re.escape(ampm), ampm)
new_text = new_text.replace('\%', '%')
new_text = new_text.replace('\:', ':')
new_text = new_text.replace('\?', '?')
return new_text
class StrftimeTest(unittest.TestCase):
def _update_variables(self, now):
# we must update the local variables on every cycle
self.gmt = time.gmtime(now)
now = time.localtime(now)
if now[3] < 12: self.ampm='(AM|am)'
else: self.ampm='(PM|pm)'
self.jan1 = time.localtime(time.mktime((now[0], 1, 1, 0, 0, 0, 0, 1, 0)))
try:
if now[8]: self.tz = time.tzname[1]
else: self.tz = time.tzname[0]
except AttributeError:
self.tz = ''
if now[3] > 12: self.clock12 = now[3] - 12
elif now[3] > 0: self.clock12 = now[3]
else: self.clock12 = 12
self.now = now
def setUp(self):
try:
import java
java.util.Locale.setDefault(java.util.Locale.US)
except ImportError:
import locale
locale.setlocale(locale.LC_TIME, 'C')
def test_strftime(self):
now = time.time()
self._update_variables(now)
self.strftest1(now)
self.strftest2(now)
if support.verbose:
print("Strftime test, platform: %s, Python version: %s" % \
(sys.platform, sys.version.split()[0]))
for j in range(-5, 5):
for i in range(25):
arg = now + (i+j*100)*23*3603
self._update_variables(arg)
self.strftest1(arg)
self.strftest2(arg)
def strftest1(self, now):
if support.verbose:
print("strftime test for", time.ctime(now))
now = self.now
# Make sure any characters that could be taken as regex syntax is
# escaped in escapestr()
expectations = (
('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'),
('%A', calendar.day_name[now[6]], 'full weekday name'),
('%b', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%B', calendar.month_name[now[1]], 'full month name'),
# %c see below
('%d', '%02d' % now[2], 'day of month as number (00-31)'),
('%H', '%02d' % now[3], 'hour (00-23)'),
('%I', '%02d' % self.clock12, 'hour (01-12)'),
('%j', '%03d' % now[7], 'julian day (001-366)'),
('%m', '%02d' % now[1], 'month as number (01-12)'),
('%M', '%02d' % now[4], 'minute, (00-59)'),
('%p', self.ampm, 'AM or PM as appropriate'),
('%S', '%02d' % now[5], 'seconds of current time (00-60)'),
('%U', '%02d' % ((now[7] + self.jan1[6])//7),
'week number of the year (Sun 1st)'),
('%w', '0?%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'),
('%W', '%02d' % ((now[7] + (self.jan1[6] - 1)%7)//7),
'week number of the year (Mon 1st)'),
# %x see below
('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%y', '%02d' % (now[0]%100), 'year without century'),
('%Y', '%d' % now[0], 'year with century'),
# %Z see below
('%%', '%', 'single percent sign'),
)
for e in expectations:
# musn't raise a value error
try:
result = time.strftime(e[0], now)
except ValueError as error:
self.fail("strftime '%s' format gave error: %s" % (e[0], error))
if re.match(escapestr(e[1], self.ampm), result):
continue
if not result or result[0] == '%':
self.fail("strftime does not support standard '%s' format (%s)"
% (e[0], e[2]))
else:
self.fail("Conflict for %s (%s): expected %s, but got %s"
% (e[0], e[2], e[1], result))
def strftest2(self, now):
nowsecs = str(int(now))[:-1]
now = self.now
nonstandard_expectations = (
# These are standard but don't have predictable output
('%c', fixasctime(time.asctime(now)), 'near-asctime() format'),
('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)),
'%m/%d/%y %H:%M:%S'),
('%Z', '%s' % self.tz, 'time zone name'),
# These are some platform specific extensions
('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'),
('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'),
('%h', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'),
('%n', '\n', 'newline character'),
('%r', '%02d:%02d:%02d %s' % (self.clock12, now[4], now[5], self.ampm),
'%I:%M:%S %p'),
('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'),
('%s', nowsecs, 'seconds since the Epoch in UCT'),
('%t', '\t', 'tab character'),
('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%3y', '%03d' % (now[0]%100),
'year without century rendered using fieldwidth'),
)
for e in nonstandard_expectations:
try:
result = time.strftime(e[0], now)
except ValueError as result:
msg = "Error for nonstandard '%s' format (%s): %s" % \
(e[0], e[2], str(result))
if support.verbose:
print(msg)
continue
if re.match(escapestr(e[1], self.ampm), result):
if support.verbose:
print("Supports nonstandard '%s' format (%s)" % (e[0], e[2]))
elif not result or result[0] == '%':
if support.verbose:
print("Does not appear to support '%s' format (%s)" % \
(e[0], e[2]))
else:
if support.verbose:
print("Conflict for nonstandard '%s' format (%s):" % \
(e[0], e[2]))
print(" Expected %s, but got %s" % (e[1], result))
class Y1900Tests(unittest.TestCase):
"""A limitation of the MS C runtime library is that it crashes if
a date before 1900 is passed with a format string containing "%y"
"""
def test_y_before_1900(self):
# Issue #13674, #19634
t = (1899, 1, 1, 0, 0, 0, 0, 0, 0)
if (sys.platform == "win32"
or sys.platform.startswith(("aix", "sunos", "solaris"))):
with self.assertRaises(ValueError):
time.strftime("%y", t)
else:
self.assertEqual(time.strftime("%y", t), "99")
def test_y_1900(self):
self.assertEqual(
time.strftime("%y", (1900, 1, 1, 0, 0, 0, 0, 0, 0)), "00")
def test_y_after_1900(self):
self.assertEqual(
time.strftime("%y", (2013, 1, 1, 0, 0, 0, 0, 0, 0)), "13")
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit |
hoafaloaf/seqparse | seqparse/files.py | 1 | 3961 | """Singleton file-related data structures utilized by the Seqparse module."""
# Standard Libraries
import os
from functools import total_ordering
__all__ = ("File", )
###############################################################################
# Class: File
@total_ordering
class File(object):
"""
Simple representation of files on disk.
Args:
file_name (str): Full path to the input file.
stat (stat_result, optional): Disk stats you'd like to cache for the
specified file.
"""
def __init__(self, file_name, stat=None):
"""Initialise the instance."""
self._info = dict(full=None, name=None, path=None)
self._stat = None
self._cache_stat(stat)
self._set_name(file_name)
def __eq__(self, other):
"""Define equality between instances."""
if type(other) is type(self):
return self.full_name == other.full_name
return False
def __lt__(self, other):
"""Define equality between instances."""
if type(other) is type(self):
return self.full_name < other.full_name
return True
def __repr__(self): # pragma: no cover
"""Pretty representation of the instance."""
blurb = ("{cls}({full!r})")
return blurb.format(cls=type(self).__name__, **self._info)
def __str__(self):
"""String representation of a File instance."""
return str(self.full_name)
@property
def full_name(self):
"""str: Full name of the sequence, including containing directory."""
return self._info["full"]
@property
def mtime(self):
"""
int: Modification time of the file.
Returns None if the files have not been stat'd on disk.
"""
if not self._stat:
return None
return self._stat.st_mtime
@property
def name(self):
"""str: Base name of the file sequence (no containing directory)."""
return self._info["name"]
@property
def path(self):
"""str: Directory in which the contained files are located."""
return self._info["path"]
@property
def size(self):
"""
int: Size of the file in bytes.
Returns None if the files have not been stat'd on disk.
"""
if not self._stat:
return None
return self._stat.st_size
def _cache_stat(self, input_stat):
"""
Cache file system stat data.
Args:
input_stat (stat_result): Value that you'd like to cache.
Returns:
stat_result that was successfully cached.
"""
from . import get_stat_result
self._stat = None
if input_stat:
self._stat = get_stat_result(input_stat)
return self._stat
def _set_name(self, full_name):
"""
Set all name-related fields on the instance.
Args:
full_name (str): Full path to the contained file.
Returns:
dict of path-related strings (full name, base name, path).
"""
path_name, file_name = os.path.split(full_name)
self._info.update(full=full_name, name=file_name, path=path_name)
return self._info
def stat(self, force=False, lazy=False):
"""
File system status.
Args:
force (bool, optional): Whether to force disk stat query,
regardless of caching status.
lazy (bool, optional): Whether to query disk stats should no cached
value exist.
Returns:
None if a frame has been specified but disk stats have not been
cached.
stat_result if a frame has been specified and disk stats have
been previously cached.
"""
if force or (lazy and self._stat is None):
self._cache_stat(os.stat(self.full_name))
return self._stat
| mit |
AnishShah/tensorflow | tensorflow/python/framework/meta_graph_test.py | 10 | 40641 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.framework.meta_graph.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os.path
import random
import shutil
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner_impl
# pylint: disable=invalid-name
def _TestDir(test_name):
test_dir = os.path.join(test.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
gfile.MakeDirs(test_dir)
return test_dir
# pylint: enable=invalid-name
class SimpleMetaGraphTest(test.TestCase):
def testNoVariables(self):
test_dir = _TestDir("no_variables")
filename = os.path.join(test_dir, "metafile")
input_feed_value = -10 # Arbitrary input value for feed_dict.
orig_graph = ops.Graph()
with self.session(graph=orig_graph) as sess:
# Create a minimal graph with zero variables.
input_tensor = array_ops.placeholder(
dtypes.float32, shape=[], name="input")
offset = constant_op.constant(42, dtype=dtypes.float32, name="offset")
output_tensor = math_ops.add(input_tensor, offset, name="add_offset")
# Add input and output tensors to graph collections.
ops.add_to_collection("input_tensor", input_tensor)
ops.add_to_collection("output_tensor", output_tensor)
output_value = sess.run(output_tensor, {input_tensor: input_feed_value})
self.assertEqual(output_value, 32)
# Generates MetaGraphDef.
meta_graph_def, var_list = meta_graph.export_scoped_meta_graph(
filename=filename,
graph_def=ops.get_default_graph().as_graph_def(add_shapes=True),
collection_list=["input_tensor", "output_tensor"],
saver_def=None)
self.assertTrue(meta_graph_def.HasField("meta_info_def"))
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_version, "")
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_git_version,
"")
self.assertEqual({}, var_list)
# Create a clean graph and import the MetaGraphDef nodes.
new_graph = ops.Graph()
with self.session(graph=new_graph) as sess:
# Import the previously export meta graph.
meta_graph.import_scoped_meta_graph(filename)
# Re-exports the current graph state for comparison to the original.
new_meta_graph_def, _ = meta_graph.export_scoped_meta_graph(filename +
"_new")
test_util.assert_meta_graph_protos_equal(self, meta_graph_def,
new_meta_graph_def)
# Ensures that we can still get a reference to our graph collections.
new_input_tensor = ops.get_collection("input_tensor")[0]
new_output_tensor = ops.get_collection("output_tensor")[0]
# Verifies that the new graph computes the same result as the original.
new_output_value = sess.run(new_output_tensor,
{new_input_tensor: input_feed_value})
self.assertEqual(new_output_value, output_value)
def testStrippedOpListNestedFunctions(self):
with self.cached_session():
# Square two levels deep
@function.Defun(dtypes.int32)
def f0(x):
return math_ops.square(x)
@function.Defun(dtypes.int32)
def f1(x):
return f0(x)
# At this point we've defined two functions but haven't called them, so
# there should be no used ops.
op_list = meta_graph.stripped_op_list_for_graph(ops.get_default_graph()
.as_graph_def())
self.assertEqual(len(op_list.op), 0)
# If we call the function on a constant, there should be two ops
_ = f1(constant_op.constant(7))
op_list = meta_graph.stripped_op_list_for_graph(ops.get_default_graph()
.as_graph_def())
self.assertEqual(["Const", "Square"], [op.name for op in op_list.op])
def testStrippedOpListRecursiveFunctions(self):
# The function module doesn't support recursive functions, so we build a
# recursive function situation by ourselves: A calls B calls A and Const.
graph = graph_pb2.GraphDef()
a = graph.library.function.add()
b = graph.library.function.add()
a.signature.name = "A"
b.signature.name = "B"
a.node_def.add().op = "B"
b.node_def.add().op = "Const"
b.node_def.add().op = "A"
# Use A in the graph
graph.node.add().op = "A"
# The stripped op list should contain just Const.
op_list = meta_graph.stripped_op_list_for_graph(graph)
self.assertEqual(["Const"], [op.name for op in op_list.op])
def testDefaultAttrStripping(self):
"""Verifies that default attributes are stripped from a graph def."""
# Complex Op has 2 attributes with defaults:
# o "T" : float32.
# o "Tout" : complex64.
# When inputs to the Complex Op are float32 instances, "T" maps to float32
# and "Tout" maps to complex64. Since these attr values map to their
# defaults, they must be stripped unless stripping of default attrs is
# disabled.
with self.cached_session():
real_num = constant_op.constant(1.0, dtype=dtypes.float32, name="real")
imag_num = constant_op.constant(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
# strip_default_attrs is enabled.
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
# strip_default_attrs is disabled.
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=False)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
self.assertFalse(meta_graph_def.meta_info_def.stripped_default_attrs)
# When inputs to the Complex Op are float64 instances, "T" maps to float64
# and "Tout" maps to complex128. Since these attr values don't map to their
# defaults, they must not be stripped.
with self.session(graph=ops.Graph()):
real_num = constant_op.constant(1.0, dtype=dtypes.float64, name="real")
imag_num = constant_op.constant(2.0, dtype=dtypes.float64, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertEqual(node_def.attr["T"].type, dtypes.float64)
self.assertEqual(node_def.attr["Tout"].type, dtypes.complex128)
self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
def testDefaultAttrStrippingNestedFunctions(self):
"""Verifies that default attributes are stripped from function node defs."""
with self.cached_session():
@function.Defun(dtypes.float32, dtypes.float32)
def f0(i, j):
return math_ops.complex(i, j, name="double_nested_complex")
@function.Defun(dtypes.float32, dtypes.float32)
def f1(i, j):
return f0(i, j)
_ = f1(constant_op.constant(1.0), constant_op.constant(2.0))
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
graph_def=ops.get_default_graph().as_graph_def(),
strip_default_attrs=True)
double_nested_complex_node_def = None
for function_def in meta_graph_def.graph_def.library.function:
for node_def in function_def.node_def:
if node_def.name.startswith("double_nested_complex"):
double_nested_complex_node_def = node_def
break
if double_nested_complex_node_def:
break
self.assertIsNotNone(double_nested_complex_node_def)
self.assertNotIn("T", double_nested_complex_node_def.attr)
self.assertNotIn("Tout", double_nested_complex_node_def.attr)
self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
def testDefaultAttrStrippingUnregisteredOps(self):
"""Verifies that nodes with un-registered ops are not stripped."""
graph_def = graph_pb2.GraphDef()
node = graph_def.node.add()
node.name = "node_with_unreg_op"
node.op = "unreg_op"
node.attr["attr_1"].i = 1
meta_info_def = meta_graph_pb2.MetaGraphDef.MetaInfoDef()
meta_info_def.stripped_op_list.op.add()
with self.cached_session():
meta_graph_def = meta_graph.create_meta_graph_def(
meta_info_def=meta_info_def, graph_def=graph_def,
strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("node_with_unreg_op",
meta_graph_def.graph_def)
self.assertEqual(node_def.attr["attr_1"].i, 1)
self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
def testVariableObjectsAreSharedAmongCollections(self):
with ops.Graph().as_default() as graph1:
v = variables.Variable(3.0)
# A single instance of Variable is shared among the collections:
global_vars = graph1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
trainable_vars = graph1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(global_vars), 1)
self.assertEqual(len(trainable_vars), 1)
self.assertIs(global_vars[0], trainable_vars[0])
self.assertIs(v, global_vars[0])
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(graph=graph1)
del graph1 # To avoid accidental references in code involving graph2.
with ops.Graph().as_default() as graph2:
meta_graph.import_scoped_meta_graph(orig_meta_graph)
global_vars = graph2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
trainable_vars = graph2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(global_vars), 1)
self.assertEqual(len(trainable_vars), 1)
# A single instance of Variable is shared among the collections:
self.assertIs(global_vars[0], trainable_vars[0])
class ScopedMetaGraphTest(test.TestCase):
def _testScopedExport(self, test_dir, exported_filenames):
graph = ops.Graph()
with graph.as_default():
# Creates an inference graph.
# Hidden 1
colocate_constraint = constant_op.constant(1.2, name="constraint")
images = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with ops.name_scope("hidden1"):
with graph.colocate_with(colocate_constraint.op):
weights1 = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
biases1 = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights1) + biases1)
# Hidden 2
with ops.name_scope("hidden2"):
weights2 = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases2):
biases2 += constant_op.constant(0.1, shape=[32])
return it + 1, biases2
_, biases2 = control_flow_ops.while_loop(
loop_cond,
loop_body, [
constant_op.constant(0), variables.Variable(
array_ops.zeros([32]), name="biases")
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights2) + biases2)
# Linear
with ops.name_scope("softmax_linear"):
weights3 = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases3 = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights3) + biases3
ops.add_to_collection("logits", logits)
# Exports each sub-graph.
# Exports the first one with unbound_inputs_col_name set to default.
orig_meta_graph1, var_list = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[0]),
graph=ops.get_default_graph(),
export_scope="hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["hidden1/biases:0", "hidden1/weights:0"],
sorted(var_names))
# Exports the rest with no unbound_inputs_col_name.
orig_meta_graph2, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[1]),
graph=ops.get_default_graph(),
export_scope="hidden2",
unbound_inputs_col_name=None)
orig_meta_graph3, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[2]),
graph=ops.get_default_graph(),
export_scope="softmax_linear",
unbound_inputs_col_name=None)
return [orig_meta_graph1, orig_meta_graph2, orig_meta_graph3]
def _testScopedImport(self, test_dir, exported_filenames):
graph = ops.Graph()
# Create all the missing inputs.
with graph.as_default():
new_image = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
import_scope="new_hidden1")
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
input_map={"image:0": new_image},
import_scope="new_hidden1")
# Verifies we can import the original "hidden1" into "new_hidden1".
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
input_map={"$unbound_inputs_images": new_image},
import_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["new_hidden1/biases:0", "new_hidden1/weights:0"],
sorted(new_var_names))
# Verifies we can import the original "hidden2" into "new_hidden2".
hidden1 = array_ops.identity(
graph.as_graph_element("new_hidden1/Relu:0"), name="hidden1/Relu")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[1]),
graph=graph,
input_map={"$unbound_inputs_hidden1/Relu": hidden1},
import_scope="new_hidden2",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["new_hidden2/biases:0", "new_hidden2/weights:0"],
sorted(new_var_names))
# Verifies we can import the original "softmax_linear" into
# "new_softmax_linear".
hidden2 = array_ops.identity(
graph.as_graph_element("new_hidden2/Relu:0"), name="hidden2/Relu")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[2]),
graph=graph,
input_map={"$unbound_inputs_hidden2/Relu": hidden2},
import_scope="new_softmax_linear",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(
["new_softmax_linear/biases:0", "new_softmax_linear/weights:0"],
sorted(new_var_names))
# Exports the scoped meta graphs again.
new_meta_graph1, var_list = meta_graph.export_scoped_meta_graph(
graph=graph, export_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_meta_graph2, var_list = meta_graph.export_scoped_meta_graph(
graph=graph, export_scope="new_hidden2", unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_meta_graph3, var_list = meta_graph.export_scoped_meta_graph(
graph=graph,
export_scope="new_softmax_linear",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
return [new_meta_graph1, new_meta_graph2, new_meta_graph3]
# Verifies that we can export the subgraph under each layer and import
# them into new layers in a new graph.
def testScopedExportAndImport(self):
test_dir = _TestDir("scoped_export_import")
filenames = [
"exported_hidden1.pbtxt", "exported_hidden2.pbtxt",
"exported_softmax_linear.pbtxt"
]
orig_meta_graphs = self._testScopedExport(test_dir, filenames)
new_meta_graphs = self._testScopedImport(test_dir, filenames)
for a, b in zip(orig_meta_graphs, new_meta_graphs):
# The unbound input strings are slightly different with the C API enabled
# ("images" vs "images:0") due to the original import_graph_def code
# vs. ImportGraphDef in C++.
# TODO(skyewm): update the pbtxts once _USE_C_API is removed.
del a.collection_def["unbound_inputs"]
del b.collection_def["unbound_inputs"]
test_util.assert_meta_graph_protos_equal(self, a, b)
def testWhileLoopGradients(self):
# Create a simple while loop.
with ops.Graph().as_default():
with ops.name_scope("export"):
var = variables.Variable(0.)
var_name = var.name
_, output = control_flow_ops.while_loop(
lambda i, x: i < 5,
lambda i, x: (i + 1, x + math_ops.cast(i, dtypes.float32)),
[0, var])
output_name = output.name
# Generate a MetaGraphDef containing the while loop with an export scope.
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
export_scope="export")
# Build and run the gradients of the while loop. We use this below to
# verify that the gradients are correct with the imported MetaGraphDef.
init_op = variables.global_variables_initializer()
grad = gradients_impl.gradients([output], [var])
with session.Session() as sess:
sess.run(init_op)
expected_grad_value = sess.run(grad)
# Restore the MetaGraphDef into a new Graph with an import scope.
with ops.Graph().as_default():
meta_graph.import_scoped_meta_graph(meta_graph_def, import_scope="import")
# Re-export and make sure we get the same MetaGraphDef.
new_meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
export_scope="import")
test_util.assert_meta_graph_protos_equal(
self, meta_graph_def, new_meta_graph_def)
# Make sure we can still build gradients and get the same result.
def new_name(tensor_name):
base_tensor_name = tensor_name.replace("export/", "")
return "import/" + base_tensor_name
var = ops.get_default_graph().get_tensor_by_name(new_name(var_name))
output = ops.get_default_graph().get_tensor_by_name(new_name(output_name))
grad = gradients_impl.gradients([output], [var])
init_op = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init_op)
actual_grad_value = sess.run(grad)
self.assertEqual(expected_grad_value, actual_grad_value)
def testImportWhileLoopInWhileLoop(self):
# Create a simple while loop.
with ops.Graph().as_default():
var = variables.Variable(0.0)
_, output = control_flow_ops.while_loop(lambda i, x: i < 5,
lambda i, x: (i + 1, x * 2.0),
[0, var])
output_name = output.name
# Generate a MetaGraphDef containing the while loop with an export scope.
meta_graph_def, _ = meta_graph.export_scoped_meta_graph()
# Restore the MetaGraphDef in a while loop in a new graph.
with ops.Graph().as_default():
def body(i, _):
meta_graph.import_scoped_meta_graph(meta_graph_def)
return i + 1, ops.get_default_graph().get_tensor_by_name(output_name)
_, x = control_flow_ops.while_loop(lambda i, x: i < 2, body, [0, 0.0],
name="")
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(x)
def testScopedImportUnderNameScope(self):
graph = ops.Graph()
with graph.as_default():
variables.Variable(initial_value=1.0, trainable=True, name="myvar")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(graph=graph)
graph = ops.Graph()
with graph.as_default():
with ops.name_scope("foo"):
imported_variables = meta_graph.import_scoped_meta_graph(
meta_graph_def, import_scope="bar")
self.assertEqual(len(imported_variables), 1)
self.assertEqual(list(imported_variables.values())[0].name,
"foo/bar/myvar:0")
def testScopedImportUnderNameScopeNoVarScope(self):
graph = ops.Graph()
with graph.as_default():
variables.Variable(initial_value=1.0, trainable=True, name="myvar")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(graph=graph)
graph = ops.Graph()
with graph.as_default():
with ops.name_scope("foo"):
imported_variables = meta_graph.import_scoped_meta_graph(
meta_graph_def)
self.assertEqual(len(imported_variables), 1)
self.assertEqual(list(imported_variables.values())[0].name,
"foo/myvar:0")
def testImportsUsingSameScopeName(self):
with ops.Graph().as_default():
variables.Variable(0, name="v")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph()
with ops.Graph().as_default():
for suffix in ["", "_1"]:
imported_variables = meta_graph.import_scoped_meta_graph(
meta_graph_def, import_scope="s")
self.assertEqual(len(imported_variables), 1)
self.assertEqual(list(imported_variables.keys())[0], "v:0")
self.assertEqual(list(imported_variables.values())[0].name,
"s" + suffix + "/v:0")
def testScopedImportWithSelectedCollections(self):
meta_graph_filename = os.path.join(
_TestDir("selected_collections_import"), "meta_graph.pb")
graph = ops.Graph()
# Add a variable to populate two collections. The functionality tested is
# not specific to variables, but using variables in the test is convenient.
with graph.as_default():
variables.Variable(initial_value=1.0, trainable=True)
self.assertTrue(
all([
graph.get_collection(key)
for key in
[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES]
]))
meta_graph.export_scoped_meta_graph(
filename=meta_graph_filename, graph=graph)
def _test_import(include_collection_keys, omit_collection_keys):
assert set(include_collection_keys).isdisjoint(omit_collection_keys)
newgraph = ops.Graph()
import_scope = "some_scope_name"
def _restore_collections_predicate(collection_key):
return (collection_key in include_collection_keys and
collection_key not in omit_collection_keys)
meta_graph.import_scoped_meta_graph(
meta_graph_filename,
graph=newgraph,
import_scope=import_scope,
restore_collections_predicate=_restore_collections_predicate)
collection_values = [
newgraph.get_collection(name=key, scope=import_scope)
for key in include_collection_keys
]
self.assertTrue(all(collection_values))
collection_values = [
newgraph.get_collection(name=key, scope=import_scope)
for key in omit_collection_keys
]
self.assertFalse(any(collection_values))
_test_import(
include_collection_keys=[
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES
],
omit_collection_keys=[])
_test_import(
include_collection_keys=[ops.GraphKeys.GLOBAL_VARIABLES],
omit_collection_keys=[ops.GraphKeys.TRAINABLE_VARIABLES])
_test_import(
include_collection_keys=[ops.GraphKeys.TRAINABLE_VARIABLES],
omit_collection_keys=[ops.GraphKeys.GLOBAL_VARIABLES])
_test_import(
include_collection_keys=[],
omit_collection_keys=[
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES
])
def _testScopedExportWithQueue(self, test_dir, exported_filename):
graph = ops.Graph()
with graph.as_default():
with ops.name_scope("queue1"):
input_queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
enqueue = input_queue.enqueue((9876), name="enqueue")
close = input_queue.close(name="close")
qr = queue_runner_impl.QueueRunner(input_queue, [enqueue], close)
queue_runner_impl.add_queue_runner(qr)
input_queue.dequeue(name="dequeue")
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filename),
graph=ops.get_default_graph(),
export_scope="queue1")
return orig_meta_graph
def _testScopedImportWithQueue(self, test_dir, exported_filename,
new_exported_filename):
graph = ops.Graph()
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filename),
graph=graph,
import_scope="new_queue1")
graph.as_graph_element("new_queue1/dequeue:0")
graph.as_graph_element("new_queue1/close")
with graph.as_default():
new_meta_graph, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, new_exported_filename),
graph=graph,
export_scope="new_queue1")
return new_meta_graph
# Verifies that we can export the subgraph containing a FIFOQueue under
# "queue1" and import it into "new_queue1" in a new graph.
def testScopedWithQueue(self):
test_dir = _TestDir("scoped_with_queue")
orig_meta_graph = self._testScopedExportWithQueue(test_dir,
"exported_queue1.pbtxt")
new_meta_graph = self._testScopedImportWithQueue(
test_dir, "exported_queue1.pbtxt", "exported_new_queue1.pbtxt")
test_util.assert_meta_graph_protos_equal(self, orig_meta_graph,
new_meta_graph)
# Verifies that we can export a subgraph in a nested name scope containing a
# "hidden1/hidden2" and import it into "new_hidden1/new_hidden2" in a new
# graph.
def doTestExportNestedNames(self, use_resource=False):
graph1 = ops.Graph()
with graph1.as_default():
with ops.name_scope("hidden1/hidden2/hidden3"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
if use_resource:
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = resource_variable_ops.ResourceVariable(
[0.1] * 3, name="biases")
else:
biases1 = variables.Variable([0.1] * 3, name="biases")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
orig_meta_graph, var_list = meta_graph.export_scoped_meta_graph(
export_scope="hidden1/hidden2", graph=graph1)
var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["hidden3/biases:0", "hidden3/weights:0"],
sorted(var_list.keys()))
self.assertEqual([
"hidden1/hidden2/hidden3/biases:0", "hidden1/hidden2/hidden3/weights:0"
], sorted(var_names))
for node in orig_meta_graph.graph_def.node:
self.assertTrue(node.name.startswith("hidden3"))
graph2 = ops.Graph()
new_var_list = meta_graph.import_scoped_meta_graph(
orig_meta_graph, import_scope="new_hidden1/new_hidden2", graph=graph2)
self.assertEqual(["hidden3/biases:0", "hidden3/weights:0"],
sorted(new_var_list.keys()))
new_var_names = [v.name for _, v in new_var_list.items()]
self.assertEqual([
"new_hidden1/new_hidden2/hidden3/biases:0",
"new_hidden1/new_hidden2/hidden3/weights:0"
], sorted(new_var_names))
nodes = [
"new_hidden1/new_hidden2/hidden3/biases/Assign",
"new_hidden1/new_hidden2/hidden3/weights/Assign"
]
expected = [
b"loc:@new_hidden1/new_hidden2/hidden3/biases",
b"loc:@new_hidden1/new_hidden2/hidden3/weights"
]
for n, e in zip(nodes, expected):
self.assertEqual([e], graph2.get_operation_by_name(n).get_attr("_class"))
def testExportNestedNames(self):
self.doTestExportNestedNames(use_resource=False)
def testExportNestedNamesResource(self):
self.doTestExportNestedNames(use_resource=True)
def testPotentialCycle(self):
graph1 = ops.Graph()
with graph1.as_default():
a = constant_op.constant(1.0, shape=[2, 2])
b = constant_op.constant(2.0, shape=[2, 2])
matmul = math_ops.matmul(a, b)
with ops.name_scope("hidden1"):
c = nn_ops.relu(matmul)
d = constant_op.constant(3.0, shape=[2, 2])
matmul = math_ops.matmul(c, d)
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
export_scope="hidden1", graph=graph1)
graph2 = ops.Graph()
with graph2.as_default():
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
orig_meta_graph, import_scope="new_hidden1")
meta_graph.import_scoped_meta_graph(
orig_meta_graph,
import_scope="new_hidden1",
input_map={
"$unbound_inputs_MatMul": constant_op.constant(
4.0, shape=[2, 2])
})
def testClearDevices(self):
graph1 = ops.Graph()
with graph1.as_default():
with ops.device("/device:CPU:0"):
a = variables.Variable(
constant_op.constant(
1.0, shape=[2, 2]), name="a")
with ops.device("/job:ps/replica:0/task:0/device:GPU:0"):
b = variables.Variable(
constant_op.constant(
2.0, shape=[2, 2]), name="b")
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
math_ops.matmul(a, b, name="matmul")
self.assertEqual("/device:CPU:0", str(graph1.as_graph_element("a").device))
self.assertEqual("/job:ps/replica:0/task:0/device:GPU:0",
str(graph1.as_graph_element("b").device))
self.assertEqual("/job:localhost/replica:0/task:0/device:CPU:0",
str(graph1.as_graph_element("matmul").device))
# Verifies that devices are cleared on export.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph=graph1, clear_devices=True)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=False)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
# Verifies that devices are cleared on export when passing in graph_def.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph_def=graph1.as_graph_def(), clear_devices=True)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=False)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
# Verifies that devices are cleared on import.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph=graph1, clear_devices=False)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=True)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
class MetaGraphWithVariableScopeTest(test.TestCase):
def testMetricsCollection(self):
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
meta_graph_filename = os.path.join(
_TestDir("metrics_export"), "meta_graph.pb")
graph = ops.Graph()
with self.session(graph=graph) as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
_, update_op = metrics.mean(values)
initializer = variables.local_variables_initializer()
sess.run(initializer)
sess.run(update_op)
meta_graph.export_scoped_meta_graph(
filename=meta_graph_filename, graph=graph)
# Verifies that importing a meta_graph with LOCAL_VARIABLES collection
# works correctly.
graph = ops.Graph()
with self.session(graph=graph) as sess:
meta_graph.import_scoped_meta_graph(meta_graph_filename)
initializer = variables.local_variables_initializer()
sess.run(initializer)
# Verifies that importing an old meta_graph where "local_variables"
# collection is of node_list type works, but cannot build initializer
# with the collection.
graph = ops.Graph()
with self.session(graph=graph) as sess:
meta_graph.import_scoped_meta_graph(
test.test_src_dir_path(
"python/framework/testdata/metrics_export_meta_graph.pb"))
self.assertEqual(len(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)),
2)
with self.assertRaisesRegexp(
AttributeError, "'Tensor' object has no attribute 'initializer'"):
initializer = variables.local_variables_initializer()
class ExportImportAcrossScopesTest(test.TestCase):
def testPartionedVariables(self):
def make_graph_with_partitioned_variables(use_resource):
variable_scope.get_variable(
name="weights",
partitioner=partitioned_variables.fixed_size_partitioner(3, axis=0),
initializer=random_ops.truncated_normal([100, 10]),
use_resource=use_resource)
# The next variable illustrates the necessity of restoring collections
# in a deterministic fashion when using ResourceVariables.
variable_scope.get_variable(
name="another",
shape=[],
collections=["a", "b", "z", "f", "e", "d", "g"],
use_resource=use_resource)
self._testExportImportAcrossScopes(
make_graph_with_partitioned_variables, use_resource=False)
self._testExportImportAcrossScopes(
make_graph_with_partitioned_variables, use_resource=True)
def _testExportImportAcrossScopes(self, graph_fn, use_resource):
"""Tests export and importing a graph across scopes.
Args:
graph_fn: A closure that creates a graph on the current scope.
use_resource: A bool indicating whether or not to use ResourceVariables.
"""
with ops.Graph().as_default() as original_graph:
with variable_scope.variable_scope("dropA/dropB/keepA"):
graph_fn(use_resource=use_resource)
exported_meta_graph_def = meta_graph.export_scoped_meta_graph(
graph=original_graph,
export_scope="dropA/dropB")[0]
with ops.Graph().as_default() as imported_graph:
meta_graph.import_scoped_meta_graph(
exported_meta_graph_def,
import_scope="importA")
with ops.Graph().as_default() as expected_graph:
with variable_scope.variable_scope("importA/keepA"):
graph_fn(use_resource=use_resource)
result = meta_graph.export_scoped_meta_graph(graph=imported_graph)[0]
expected = meta_graph.export_scoped_meta_graph(graph=expected_graph)[0]
if use_resource:
# Clear all shared_name attributes before comparing, since they are
# orthogonal to scopes and are not updated on export/import.
for meta_graph_def in [result, expected]:
for node in meta_graph_def.graph_def.node:
shared_name_attr = "shared_name"
shared_name_value = node.attr.get(shared_name_attr, None)
if shared_name_value and shared_name_value.HasField("s"):
if shared_name_value.s:
node.attr[shared_name_attr].s = b""
test_util.assert_meta_graph_protos_equal(self, expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
kosgroup/odoo | addons/purchase/tests/test_create_picking.py | 4 | 4036 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
from odoo.addons.product.tests import common
class TestCreatePicking(common.TestProductCommon):
def setUp(self):
super(TestCreatePicking, self).setUp()
self.partner_id = self.env.ref('base.res_partner_1')
self.product_id_1 = self.env.ref('product.product_product_8')
self.product_id_2 = self.env.ref('product.product_product_11')
res_users_purchase_user = self.env.ref('purchase.group_purchase_user')
Users = self.env['res.users'].with_context({'no_reset_password': True, 'mail_create_nosubscribe': True})
self.user_purchase_user = Users.create({
'name': 'Pauline Poivraisselle',
'login': 'pauline',
'email': 'pur@example.com',
'notify_email': 'none',
'groups_id': [(6, 0, [res_users_purchase_user.id])]})
self.po_vals = {
'partner_id': self.partner_id.id,
'order_line': [
(0, 0, {
'name': self.product_id_1.name,
'product_id': self.product_id_1.id,
'product_qty': 5.0,
'product_uom': self.product_id_1.uom_po_id.id,
'price_unit': 500.0,
'date_planned': datetime.today().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
})],
}
def test_00_create_picking(self):
# Draft purchase order created
self.po = self.env['purchase.order'].create(self.po_vals)
self.assertTrue(self.po, 'Purchase: no purchase order created')
# Purchase order confirm
self.po.button_confirm()
self.assertEqual(self.po.state, 'purchase', 'Purchase: PO state should be "Purchase')
self.assertEqual(self.po.picking_count, 1, 'Purchase: one picking should be created')
self.assertEqual(len(self.po.order_line.move_ids), 1, 'One move should be created')
# Change purchase order line product quantity
self.po.order_line.write({'product_qty': 7.0})
self.assertEqual(len(self.po.order_line.move_ids), 2, 'Two move should be created')
# Validate first shipment
self.picking = self.po.picking_ids[0]
self.picking.force_assign()
self.picking.pack_operation_product_ids.write({'qty_done': 7.0})
self.picking.do_new_transfer()
self.assertEqual(self.po.order_line.mapped('qty_received'), [7.0], 'Purchase: all products should be received')
# create new order line
self.po.write({'order_line': [
(0, 0, {
'name': self.product_id_2.name,
'product_id': self.product_id_2.id,
'product_qty': 5.0,
'product_uom': self.product_id_2.uom_po_id.id,
'price_unit': 250.0,
'date_planned': datetime.today().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
})]})
self.assertEqual(self.po.picking_count, 2, 'New picking should be created')
moves = self.po.order_line.mapped('move_ids').filtered(lambda x: x.state not in ('done', 'cancel'))
self.assertEqual(len(moves), 1, 'One move should be created')
def test_01_check_double_validation(self):
# make double validation two step
self.env.user.company_id.write({'po_double_validation': 'two_step'})
# Draft purchase order created
self.po = self.env['purchase.order'].sudo(self.user_purchase_user).create(self.po_vals)
self.assertTrue(self.po, 'Purchase: no purchase order created')
# Purchase order confirm
self.po.button_confirm()
self.assertEqual(self.po.state, 'to approve', 'Purchase: PO state should be "to approve".')
# PO approved by manager
self.po.button_approve()
self.assertEqual(self.po.state, 'purchase', 'PO state should be "Purchase".')
| gpl-3.0 |
GarySparrow/mFlaskWeb | venv/Lib/site-packages/flask/signals.py | 783 | 2140 | # -*- coding: utf-8 -*-
"""
flask.signals
~~~~~~~~~~~~~
Implements signals based on blinker if available, otherwise
falls silently back to a noop
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
signals_available = False
try:
from blinker import Namespace
signals_available = True
except ImportError:
class Namespace(object):
def signal(self, name, doc=None):
return _FakeSignal(name, doc)
class _FakeSignal(object):
"""If blinker is unavailable, create a fake class with the same
interface that allows sending of signals but will fail with an
error on anything else. Instead of doing anything on send, it
will just ignore the arguments and do nothing instead.
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
def _fail(self, *args, **kwargs):
raise RuntimeError('signalling support is unavailable '
'because the blinker library is '
'not installed.')
send = lambda *a, **kw: None
connect = disconnect = has_receivers_for = receivers_for = \
temporarily_connected_to = connected_to = _fail
del _fail
# the namespace for code signals. If you are not flask code, do
# not put signals in here. Create your own namespace instead.
_signals = Namespace()
# core signals. For usage examples grep the sourcecode or consult
# the API documentation in docs/api.rst as well as docs/signals.rst
template_rendered = _signals.signal('template-rendered')
request_started = _signals.signal('request-started')
request_finished = _signals.signal('request-finished')
request_tearing_down = _signals.signal('request-tearing-down')
got_request_exception = _signals.signal('got-request-exception')
appcontext_tearing_down = _signals.signal('appcontext-tearing-down')
appcontext_pushed = _signals.signal('appcontext-pushed')
appcontext_popped = _signals.signal('appcontext-popped')
message_flashed = _signals.signal('message-flashed')
| mit |
stylianos-kampakis/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
TathagataChakraborti/resource-conflicts | PLANROB-2015/seq-sat-lama/Python-2.5.2/Lib/lib-tk/tkMessageBox.py | 6 | 3238 | # tk common message boxes
#
# this module provides an interface to the native message boxes
# available in Tk 4.2 and newer.
#
# written by Fredrik Lundh, May 1997
#
#
# options (all have default values):
#
# - default: which button to make default (one of the reply codes)
#
# - icon: which icon to display (see below)
#
# - message: the message to display
#
# - parent: which window to place the dialog on top of
#
# - title: dialog title
#
# - type: dialog type; that is, which buttons to display (see below)
#
from tkCommonDialog import Dialog
#
# constants
# icons
ERROR = "error"
INFO = "info"
QUESTION = "question"
WARNING = "warning"
# types
ABORTRETRYIGNORE = "abortretryignore"
OK = "ok"
OKCANCEL = "okcancel"
RETRYCANCEL = "retrycancel"
YESNO = "yesno"
YESNOCANCEL = "yesnocancel"
# replies
ABORT = "abort"
RETRY = "retry"
IGNORE = "ignore"
OK = "ok"
CANCEL = "cancel"
YES = "yes"
NO = "no"
#
# message dialog class
class Message(Dialog):
"A message box"
command = "tk_messageBox"
#
# convenience stuff
# Rename _icon and _type options to allow overriding them in options
def _show(title=None, message=None, _icon=None, _type=None, **options):
if _icon and "icon" not in options: options["icon"] = _icon
if _type and "type" not in options: options["type"] = _type
if title: options["title"] = title
if message: options["message"] = message
res = Message(**options).show()
# In some Tcl installations, Tcl converts yes/no into a boolean
if isinstance(res, bool):
if res: return YES
return NO
return res
def showinfo(title=None, message=None, **options):
"Show an info message"
return _show(title, message, INFO, OK, **options)
def showwarning(title=None, message=None, **options):
"Show a warning message"
return _show(title, message, WARNING, OK, **options)
def showerror(title=None, message=None, **options):
"Show an error message"
return _show(title, message, ERROR, OK, **options)
def askquestion(title=None, message=None, **options):
"Ask a question"
return _show(title, message, QUESTION, YESNO, **options)
def askokcancel(title=None, message=None, **options):
"Ask if operation should proceed; return true if the answer is ok"
s = _show(title, message, QUESTION, OKCANCEL, **options)
return s == OK
def askyesno(title=None, message=None, **options):
"Ask a question; return true if the answer is yes"
s = _show(title, message, QUESTION, YESNO, **options)
return s == YES
def askretrycancel(title=None, message=None, **options):
"Ask if operation should be retried; return true if the answer is yes"
s = _show(title, message, WARNING, RETRYCANCEL, **options)
return s == RETRY
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
print "info", showinfo("Spam", "Egg Information")
print "warning", showwarning("Spam", "Egg Warning")
print "error", showerror("Spam", "Egg Alert")
print "question", askquestion("Spam", "Question?")
print "proceed", askokcancel("Spam", "Proceed?")
print "yes/no", askyesno("Spam", "Got it?")
print "try again", askretrycancel("Spam", "Try again?")
| mit |
ChangsoonKim/STM32F7DiscTutor | toolchain/osx/gcc-arm-none-eabi-6-2017-q1-update/share/gcc-arm-none-eabi/libstdcxx/v6/xmethods.py | 5 | 21799 | # Xmethods for libstdc++.
# Copyright (C) 2014-2016 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb
import gdb.xmethod
import re
matcher_name_prefix = 'libstdc++::'
def get_bool_type():
return gdb.lookup_type('bool')
def get_std_size_type():
return gdb.lookup_type('std::size_t')
class LibStdCxxXMethod(gdb.xmethod.XMethod):
def __init__(self, name, worker_class):
gdb.xmethod.XMethod.__init__(self, name)
self.worker_class = worker_class
# Xmethods for std::array
class ArrayWorkerBase(gdb.xmethod.XMethodWorker):
def __init__(self, val_type, size):
self._val_type = val_type
self._size = size
def null_value(self):
nullptr = gdb.parse_and_eval('(void *) 0')
return nullptr.cast(self._val_type.pointer()).dereference()
class ArraySizeWorker(ArrayWorkerBase):
def __init__(self, val_type, size):
ArrayWorkerBase.__init__(self, val_type, size)
def get_arg_types(self):
return None
def get_result_type(self, obj):
return get_std_size_type()
def __call__(self, obj):
return self._size
class ArrayEmptyWorker(ArrayWorkerBase):
def __init__(self, val_type, size):
ArrayWorkerBase.__init__(self, val_type, size)
def get_arg_types(self):
return None
def get_result_type(self, obj):
return get_bool_type()
def __call__(self, obj):
return (int(self._size) == 0)
class ArrayFrontWorker(ArrayWorkerBase):
def __init__(self, val_type, size):
ArrayWorkerBase.__init__(self, val_type, size)
def get_arg_types(self):
return None
def get_result_type(self, obj):
return self._val_type
def __call__(self, obj):
if int(self._size) > 0:
return obj['_M_elems'][0]
else:
return self.null_value()
class ArrayBackWorker(ArrayWorkerBase):
def __init__(self, val_type, size):
ArrayWorkerBase.__init__(self, val_type, size)
def get_arg_types(self):
return None
def get_result_type(self, obj):
return self._val_type
def __call__(self, obj):
if int(self._size) > 0:
return obj['_M_elems'][self._size - 1]
else:
return self.null_value()
class ArrayAtWorker(ArrayWorkerBase):
def __init__(self, val_type, size):
ArrayWorkerBase.__init__(self, val_type, size)
def get_arg_types(self):
return get_std_size_type()
def get_result_type(self, obj, index):
return self._val_type
def __call__(self, obj, index):
if int(index) >= int(self._size):
raise IndexError('Array index "%d" should not be >= %d.' %
((int(index), self._size)))
return obj['_M_elems'][index]
class ArraySubscriptWorker(ArrayWorkerBase):
def __init__(self, val_type, size):
ArrayWorkerBase.__init__(self, val_type, size)
def get_arg_types(self):
return get_std_size_type()
def get_result_type(self, obj, index):
return self._val_type
def __call__(self, obj, index):
if int(self._size) > 0:
return obj['_M_elems'][index]
else:
return self.null_value()
class ArrayMethodsMatcher(gdb.xmethod.XMethodMatcher):
def __init__(self):
gdb.xmethod.XMethodMatcher.__init__(self,
matcher_name_prefix + 'array')
self._method_dict = {
'size': LibStdCxxXMethod('size', ArraySizeWorker),
'empty': LibStdCxxXMethod('empty', ArrayEmptyWorker),
'front': LibStdCxxXMethod('front', ArrayFrontWorker),
'back': LibStdCxxXMethod('back', ArrayBackWorker),
'at': LibStdCxxXMethod('at', ArrayAtWorker),
'operator[]': LibStdCxxXMethod('operator[]', ArraySubscriptWorker),
}
self.methods = [self._method_dict[m] for m in self._method_dict]
def match(self, class_type, method_name):
if not re.match('^std::array<.*>$', class_type.tag):
return None
method = self._method_dict.get(method_name)
if method is None or not method.enabled:
return None
try:
value_type = class_type.template_argument(0)
size = class_type.template_argument(1)
except:
return None
return method.worker_class(value_type, size)
# Xmethods for std::deque
class DequeWorkerBase(gdb.xmethod.XMethodWorker):
def __init__(self, val_type):
self._val_type = val_type
self._bufsize = 512 // val_type.sizeof or 1
def size(self, obj):
first_node = obj['_M_impl']['_M_start']['_M_node']
last_node = obj['_M_impl']['_M_finish']['_M_node']
cur = obj['_M_impl']['_M_finish']['_M_cur']
first = obj['_M_impl']['_M_finish']['_M_first']
return (last_node - first_node) * self._bufsize + (cur - first)
def index(self, obj, idx):
first_node = obj['_M_impl']['_M_start']['_M_node']
index_node = first_node + int(idx) // self._bufsize
return index_node[0][idx % self._bufsize]
class DequeEmptyWorker(DequeWorkerBase):
def get_arg_types(self):
return None
def get_result_type(self, obj):
return get_bool_type()
def __call__(self, obj):
return (obj['_M_impl']['_M_start']['_M_cur'] ==
obj['_M_impl']['_M_finish']['_M_cur'])
class DequeSizeWorker(DequeWorkerBase):
def get_arg_types(self):
return None
def get_result_type(self, obj):
return get_std_size_type()
def __call__(self, obj):
return self.size(obj)
class DequeFrontWorker(DequeWorkerBase):
def get_arg_types(self):
return None
def get_result_type(self, obj):
return self._val_type
def __call__(self, obj):
return obj['_M_impl']['_M_start']['_M_cur'][0]
class DequeBackWorker(DequeWorkerBase):
def get_arg_types(self):
return None
def get_result_type(self, obj):
return self._val_type
def __call__(self, obj):
if (obj['_M_impl']['_M_finish']['_M_cur'] ==
obj['_M_impl']['_M_finish']['_M_first']):
prev_node = obj['_M_impl']['_M_finish']['_M_node'] - 1
return prev_node[0][self._bufsize - 1]
else:
return obj['_M_impl']['_M_finish']['_M_cur'][-1]
class DequeSubscriptWorker(DequeWorkerBase):
def get_arg_types(self):
return get_std_size_type()
def get_result_type(self, obj, subscript):
return self._val_type
def __call__(self, obj, subscript):
return self.index(obj, subscript)
class DequeAtWorker(DequeWorkerBase):
def get_arg_types(self):
return get_std_size_type()
def get_result_type(self, obj, index):
return self._val_type
def __call__(self, obj, index):
deque_size = int(self.size(obj))
if int(index) >= deque_size:
raise IndexError('Deque index "%d" should not be >= %d.' %
(int(index), deque_size))
else:
return self.index(obj, index)
class DequeMethodsMatcher(gdb.xmethod.XMethodMatcher):
def __init__(self):
gdb.xmethod.XMethodMatcher.__init__(self,
matcher_name_prefix + 'deque')
self._method_dict = {
'empty': LibStdCxxXMethod('empty', DequeEmptyWorker),
'size': LibStdCxxXMethod('size', DequeSizeWorker),
'front': LibStdCxxXMethod('front', DequeFrontWorker),
'back': LibStdCxxXMethod('back', DequeBackWorker),
'operator[]': LibStdCxxXMethod('operator[]', DequeSubscriptWorker),
'at': LibStdCxxXMethod('at', DequeAtWorker)
}
self.methods = [self._method_dict[m] for m in self._method_dict]
def match(self, class_type, method_name):
if not re.match('^std::deque<.*>$', class_type.tag):
return None
method = self._method_dict.get(method_name)
if method is None or not method.enabled:
return None
return method.worker_class(class_type.template_argument(0))
# Xmethods for std::forward_list
class ForwardListWorkerBase(gdb.xmethod.XMethodMatcher):
def __init__(self, val_type, node_type):
self._val_type = val_type
self._node_type = node_type
def get_arg_types(self):
return None
class ForwardListEmptyWorker(ForwardListWorkerBase):
def get_result_type(self, obj):
return get_bool_type()
def __call__(self, obj):
return obj['_M_impl']['_M_head']['_M_next'] == 0
class ForwardListFrontWorker(ForwardListWorkerBase):
def get_result_type(self, obj):
return self._val_type
def __call__(self, obj):
node = obj['_M_impl']['_M_head']['_M_next'].cast(self._node_type)
val_address = node['_M_storage']['_M_storage'].address
return val_address.cast(self._val_type.pointer()).dereference()
class ForwardListMethodsMatcher(gdb.xmethod.XMethodMatcher):
def __init__(self):
matcher_name = matcher_name_prefix + 'forward_list'
gdb.xmethod.XMethodMatcher.__init__(self, matcher_name)
self._method_dict = {
'empty': LibStdCxxXMethod('empty', ForwardListEmptyWorker),
'front': LibStdCxxXMethod('front', ForwardListFrontWorker)
}
self.methods = [self._method_dict[m] for m in self._method_dict]
def match(self, class_type, method_name):
if not re.match('^std::forward_list<.*>$', class_type.tag):
return None
method = self._method_dict.get(method_name)
if method is None or not method.enabled:
return None
val_type = class_type.template_argument(0)
node_type = gdb.lookup_type(str(class_type) + '::_Node').pointer()
return method.worker_class(val_type, node_type)
# Xmethods for std::list
class ListWorkerBase(gdb.xmethod.XMethodWorker):
def __init__(self, val_type, node_type):
self._val_type = val_type
self._node_type = node_type
def get_arg_types(self):
return None
def get_value_from_node(self, node):
node = node.dereference()
if node.type.fields()[1].name == '_M_data':
# C++03 implementation, node contains the value as a member
return node['_M_data']
# C++11 implementation, node stores value in __aligned_membuf
addr = node['_M_storage'].address
return addr.cast(self._val_type.pointer()).dereference()
class ListEmptyWorker(ListWorkerBase):
def get_result_type(self, obj):
return get_bool_type()
def __call__(self, obj):
base_node = obj['_M_impl']['_M_node']
if base_node['_M_next'] == base_node.address:
return True
else:
return False
class ListSizeWorker(ListWorkerBase):
def get_result_type(self, obj):
return get_std_size_type()
def __call__(self, obj):
begin_node = obj['_M_impl']['_M_node']['_M_next']
end_node = obj['_M_impl']['_M_node'].address
size = 0
while begin_node != end_node:
begin_node = begin_node['_M_next']
size += 1
return size
class ListFrontWorker(ListWorkerBase):
def get_result_type(self, obj):
return self._val_type
def __call__(self, obj):
node = obj['_M_impl']['_M_node']['_M_next'].cast(self._node_type)
return self.get_value_from_node(node)
class ListBackWorker(ListWorkerBase):
def get_result_type(self, obj):
return self._val_type
def __call__(self, obj):
prev_node = obj['_M_impl']['_M_node']['_M_prev'].cast(self._node_type)
return self.get_value_from_node(prev_node)
class ListMethodsMatcher(gdb.xmethod.XMethodMatcher):
def __init__(self):
gdb.xmethod.XMethodMatcher.__init__(self,
matcher_name_prefix + 'list')
self._method_dict = {
'empty': LibStdCxxXMethod('empty', ListEmptyWorker),
'size': LibStdCxxXMethod('size', ListSizeWorker),
'front': LibStdCxxXMethod('front', ListFrontWorker),
'back': LibStdCxxXMethod('back', ListBackWorker)
}
self.methods = [self._method_dict[m] for m in self._method_dict]
def match(self, class_type, method_name):
if not re.match('^std::(__cxx11::)?list<.*>$', class_type.tag):
return None
method = self._method_dict.get(method_name)
if method is None or not method.enabled:
return None
val_type = class_type.template_argument(0)
node_type = gdb.lookup_type(str(class_type) + '::_Node').pointer()
return method.worker_class(val_type, node_type)
# Xmethods for std::vector
class VectorWorkerBase(gdb.xmethod.XMethodWorker):
def __init__(self, val_type):
self._val_type = val_type
def size(self, obj):
if self._val_type.code == gdb.TYPE_CODE_BOOL:
start = obj['_M_impl']['_M_start']['_M_p']
finish = obj['_M_impl']['_M_finish']['_M_p']
finish_offset = obj['_M_impl']['_M_finish']['_M_offset']
bit_size = start.dereference().type.sizeof * 8
return (finish - start) * bit_size + finish_offset
else:
return obj['_M_impl']['_M_finish'] - obj['_M_impl']['_M_start']
def get(self, obj, index):
if self._val_type.code == gdb.TYPE_CODE_BOOL:
start = obj['_M_impl']['_M_start']['_M_p']
bit_size = start.dereference().type.sizeof * 8
valp = start + index // bit_size
offset = index % bit_size
return (valp.dereference() & (1 << offset)) > 0
else:
return obj['_M_impl']['_M_start'][index]
class VectorEmptyWorker(VectorWorkerBase):
def get_arg_types(self):
return None
def get_result_type(self, obj):
return get_bool_type()
def __call__(self, obj):
return int(self.size(obj)) == 0
class VectorSizeWorker(VectorWorkerBase):
def get_arg_types(self):
return None
def get_result_type(self, obj):
return get_std_size_type()
def __call__(self, obj):
return self.size(obj)
class VectorFrontWorker(VectorWorkerBase):
def get_arg_types(self):
return None
def get_result_type(self, obj):
return self._val_type
def __call__(self, obj):
return self.get(obj, 0)
class VectorBackWorker(VectorWorkerBase):
def get_arg_types(self):
return None
def get_result_type(self, obj):
return self._val_type
def __call__(self, obj):
return self.get(obj, int(self.size(obj)) - 1)
class VectorAtWorker(VectorWorkerBase):
def get_arg_types(self):
return get_std_size_type()
def get_result_type(self, obj, index):
return self._val_type
def __call__(self, obj, index):
size = int(self.size(obj))
if int(index) >= size:
raise IndexError('Vector index "%d" should not be >= %d.' %
((int(index), size)))
return self.get(obj, int(index))
class VectorSubscriptWorker(VectorWorkerBase):
def get_arg_types(self):
return get_std_size_type()
def get_result_type(self, obj, subscript):
return self._val_type
def __call__(self, obj, subscript):
return self.get(obj, int(subscript))
class VectorMethodsMatcher(gdb.xmethod.XMethodMatcher):
def __init__(self):
gdb.xmethod.XMethodMatcher.__init__(self,
matcher_name_prefix + 'vector')
self._method_dict = {
'size': LibStdCxxXMethod('size', VectorSizeWorker),
'empty': LibStdCxxXMethod('empty', VectorEmptyWorker),
'front': LibStdCxxXMethod('front', VectorFrontWorker),
'back': LibStdCxxXMethod('back', VectorBackWorker),
'at': LibStdCxxXMethod('at', VectorAtWorker),
'operator[]': LibStdCxxXMethod('operator[]',
VectorSubscriptWorker),
}
self.methods = [self._method_dict[m] for m in self._method_dict]
def match(self, class_type, method_name):
if not re.match('^std::vector<.*>$', class_type.tag):
return None
method = self._method_dict.get(method_name)
if method is None or not method.enabled:
return None
return method.worker_class(class_type.template_argument(0))
# Xmethods for associative containers
class AssociativeContainerWorkerBase(gdb.xmethod.XMethodWorker):
def __init__(self, unordered):
self._unordered = unordered
def node_count(self, obj):
if self._unordered:
return obj['_M_h']['_M_element_count']
else:
return obj['_M_t']['_M_impl']['_M_node_count']
def get_arg_types(self):
return None
class AssociativeContainerEmptyWorker(AssociativeContainerWorkerBase):
def get_result_type(self, obj):
return get_bool_type()
def __call__(self, obj):
return int(self.node_count(obj)) == 0
class AssociativeContainerSizeWorker(AssociativeContainerWorkerBase):
def get_result_type(self, obj):
return get_std_size_type()
def __call__(self, obj):
return self.node_count(obj)
class AssociativeContainerMethodsMatcher(gdb.xmethod.XMethodMatcher):
def __init__(self, name):
gdb.xmethod.XMethodMatcher.__init__(self,
matcher_name_prefix + name)
self._name = name
self._method_dict = {
'size': LibStdCxxXMethod('size', AssociativeContainerSizeWorker),
'empty': LibStdCxxXMethod('empty',
AssociativeContainerEmptyWorker),
}
self.methods = [self._method_dict[m] for m in self._method_dict]
def match(self, class_type, method_name):
if not re.match('^std::%s<.*>$' % self._name, class_type.tag):
return None
method = self._method_dict.get(method_name)
if method is None or not method.enabled:
return None
unordered = 'unordered' in self._name
return method.worker_class(unordered)
# Xmethods for std::unique_ptr
class UniquePtrGetWorker(gdb.xmethod.XMethodWorker):
def __init__(self, elem_type):
self._elem_type = elem_type
def get_arg_types(self):
return None
def get_result_type(self, obj):
return self._elem_type.pointer()
def __call__(self, obj):
return obj['_M_t']['_M_head_impl']
class UniquePtrDerefWorker(UniquePtrGetWorker):
def __init__(self, elem_type):
UniquePtrGetWorker.__init__(self, elem_type)
def get_result_type(self, obj):
return self._elem_type
def __call__(self, obj):
return UniquePtrGetWorker.__call__(self, obj).dereference()
class UniquePtrMethodsMatcher(gdb.xmethod.XMethodMatcher):
def __init__(self):
gdb.xmethod.XMethodMatcher.__init__(self,
matcher_name_prefix + 'unique_ptr')
self._method_dict = {
'get': LibStdCxxXMethod('get', UniquePtrGetWorker),
'operator->': LibStdCxxXMethod('operator->', UniquePtrGetWorker),
'operator*': LibStdCxxXMethod('operator*', UniquePtrDerefWorker),
}
self.methods = [self._method_dict[m] for m in self._method_dict]
def match(self, class_type, method_name):
if not re.match('^std::unique_ptr<.*>$', class_type.tag):
return None
method = self._method_dict.get(method_name)
if method is None or not method.enabled:
return None
return method.worker_class(class_type.template_argument(0))
def register_libstdcxx_xmethods(locus):
gdb.xmethod.register_xmethod_matcher(locus, ArrayMethodsMatcher())
gdb.xmethod.register_xmethod_matcher(locus, ForwardListMethodsMatcher())
gdb.xmethod.register_xmethod_matcher(locus, DequeMethodsMatcher())
gdb.xmethod.register_xmethod_matcher(locus, ListMethodsMatcher())
gdb.xmethod.register_xmethod_matcher(locus, VectorMethodsMatcher())
gdb.xmethod.register_xmethod_matcher(
locus, AssociativeContainerMethodsMatcher('set'))
gdb.xmethod.register_xmethod_matcher(
locus, AssociativeContainerMethodsMatcher('map'))
gdb.xmethod.register_xmethod_matcher(
locus, AssociativeContainerMethodsMatcher('multiset'))
gdb.xmethod.register_xmethod_matcher(
locus, AssociativeContainerMethodsMatcher('multimap'))
gdb.xmethod.register_xmethod_matcher(
locus, AssociativeContainerMethodsMatcher('unordered_set'))
gdb.xmethod.register_xmethod_matcher(
locus, AssociativeContainerMethodsMatcher('unordered_map'))
gdb.xmethod.register_xmethod_matcher(
locus, AssociativeContainerMethodsMatcher('unordered_multiset'))
gdb.xmethod.register_xmethod_matcher(
locus, AssociativeContainerMethodsMatcher('unordered_multimap'))
gdb.xmethod.register_xmethod_matcher(locus, UniquePtrMethodsMatcher())
| mit |
gaurav38/QosRouting | pox/info/switch_info.py | 46 | 2469 | # Copyright 2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dumps info about switches when they first connect
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpid_to_str
log = core.getLogger()
# Formatted switch descriptions we've logged
# (We rememeber them so that we only print them once)
_switches = set()
# .. unless always is True in which case we always print them
_always = False
def _format_entry (desc):
def fmt (v):
if not v: return "<Empty>"
return str(v)
dpid = dpid_to_str(desc.connection.dpid)
ofp = desc.ofp.body
s = []
ports = [(p.port_no,p.name) for p in desc.connection.ports.values()]
ports.sort()
ports = " ".join(p[1] for p in ports)
#if len(ports) > len(dpid)+12:
# ports = "%s ports" % (len(desc.connection.ports),)
s.append("New Switch: " + dpid)
s.append("Hardware: " + fmt(ofp.hw_desc))
s.append("Software: " + fmt(ofp.sw_desc))
s.append("SerialNum: " + fmt(ofp.serial_num))
s.append("Desc: " + fmt(ofp.dp_desc))
s.append("Ports: " + fmt(ports))
# Let's get fancy
width = max(len(line) for line in s)
s.insert(0, "=" * width)
s.insert(2, "-" * width)
s.append( "=" * width)
return "\n".join(s)
def _handle_ConnectionUp (event):
msg = of.ofp_stats_request(body=of.ofp_desc_stats_request())
msg.type = 0 # For betta bug, can be removed
event.connection.send(msg)
def _handle_SwitchDescReceived (event):
s = _format_entry(event)
if not _always and s in _switches:
# We've already logged it.
return
_switches.add(s)
ss = s.split("\n")
logger = core.getLogger("info." + dpid_to_str(event.connection.dpid))
for s in ss:
logger.info(s)
def launch (always = False):
global _always
_always = always
core.openflow.addListenerByName("ConnectionUp",
_handle_ConnectionUp)
core.openflow.addListenerByName("SwitchDescReceived",
_handle_SwitchDescReceived)
| apache-2.0 |
AngelkPetkov/titanium_mobile | support/android/android.py | 33 | 15397 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Appcelerator Titanium Mobile
# Copyright (c) 2011-2013 by Appcelerator, Inc. All Rights Reserved.
# Licensed under the terms of the Apache Public License
# Please see the LICENSE included with this distribution for details.
#
# Android Application Script
#
import os, sys, shutil, platform, zipfile
import string, subprocess, re
from xml.etree.ElementTree import ElementTree
from StringIO import StringIO
from os.path import join, splitext, split, exists
from shutil import copyfile
from androidsdk import AndroidSDK
from compiler import Compiler
import bindings
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
module_dir = os.path.join(os.path.dirname(template_dir), 'module')
common_dir = os.path.join(os.path.dirname(template_dir), 'common')
sys.path.extend([os.path.dirname(template_dir), module_dir, common_dir])
from mako.template import Template
from tiapp import TiAppXML, touch_tiapp_xml
from manifest import Manifest
from module import ModuleDetector
import simplejson
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store'];
ignoreDirs = ['.git','.svn','_svn', 'CVS'];
def run(args):
return subprocess.Popen(args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()[0]
def pipe(args1,args2):
p1 = subprocess.Popen(args1, stdout=subprocess.PIPE)
p2 = subprocess.Popen(args2, stdin=p1.stdout, stdout=subprocess.PIPE)
return p2.communicate()[0]
def copy_resources(source, target):
if not os.path.exists(os.path.expanduser(target)):
os.mkdir(os.path.expanduser(target))
for root, dirs, files in os.walk(source, True, None, True):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles:
continue
from_ = join(root, file)
to_ = os.path.expanduser(from_.replace(source, target, 1))
to_directory = os.path.expanduser(split(to_)[0])
if not exists(to_directory):
os.makedirs(to_directory)
print "[TRACE] copying: %s to: %s" % (from_,to_)
copyfile(from_, to_)
class Android(object):
def __init__(self, name, myid, sdk, deploy_type, java):
self.name = name
# android requires at least one dot in packageid
if len(re.findall(r'\.',myid))==0:
myid = 'com.%s' % myid
self.id = myid
self.sdk = sdk
# Used in templating
self.config = {
'appid': self.id,
'appname' : self.name,
'appversion' : '1',
'apiversion' : '7', #Android 2.1
'deploy_type': deploy_type,
'compile_js': False
}
self.config['classname'] = Android.strip_classname(self.name)
self.deploy_type = deploy_type
self.java = java
@classmethod
def strip_classname(cls, name):
classname = ''.join([str.capitalize() for str in re.split('[^A-Za-z0-9_]', name)])
if re.search("^[0-9]", classname) != None:
classname = "_" + classname
return classname
def newdir(self, *segments):
path = os.path.join(*segments)
if not os.path.exists(path):
os.makedirs(path)
return path
def copyfile(self, file, src, dest):
shutil.copy(os.path.join(src, file), os.path.join(dest, file))
def load_template(self, template):
return Template(filename=template, output_encoding='utf-8', encoding_errors='replace')
def render_android_manifest(self):
template_dir = os.path.dirname(sys._getframe(0).f_code.co_filename)
tmpl = self.load_template(os.path.join(template_dir, 'templates', 'AndroidManifest.xml'))
return tmpl.render(config = self.config)
def render(self, template_dir, template_file, dest, dest_file, **kwargs):
tmpl = self.load_template(os.path.join(template_dir, 'templates', template_file))
f = None
try:
print "[TRACE] Generating %s" % os.path.join(dest, dest_file)
f = open(os.path.join(dest, dest_file), "w")
f.write(tmpl.render(config = self.config, **kwargs))
finally:
if f!=None: f.close
def build_app_info(self, project_dir):
tiapp = ElementTree()
assets_tiappxml = os.path.join(project_dir, 'build', 'android', 'bin', 'assets', 'tiapp.xml')
self.app_info = {'fullscreen':'false','navbar-hidden':'false','deploy-type':self.deploy_type}
self.app_properties = {}
if not os.path.exists(assets_tiappxml):
shutil.copy(os.path.join(project_dir, 'tiapp.xml'), assets_tiappxml)
tiapp.parse(open(assets_tiappxml, 'r'))
for key in ['id', 'name', 'version', 'publisher', 'url', 'copyright',
'description', 'icon', 'analytics', 'guid', 'navbar-hidden', 'fullscreen']:
el = tiapp.find(key)
if el != None:
self.app_info[key] = el.text
for property_el in tiapp.findall("property"):
name = property_el.get("name")
type = property_el.get("type")
value = property_el.text
if name == None: continue
if type == None: type = "string"
if value == None: value = ""
self.app_properties[name] = {"type": type, "value": value}
def generate_activities(self, app_package_dir):
if not 'activities' in self.tiapp.android: return
for key in self.tiapp.android['activities'].keys():
activity = self.tiapp.android['activities'][key]
print '[DEBUG] generating activity class: ' + activity['classname']
self.render(template_dir, 'JSActivity.java', app_package_dir, activity['classname']+'.java', activity=activity)
def generate_services(self, app_package_dir):
if not 'services' in self.tiapp.android: return
for key in self.tiapp.android['services'].keys():
service = self.tiapp.android['services'][key]
service_type = service['service_type']
print '[DEBUG] generating service type "%s", class "%s"' %(service_type, service['classname'])
if service_type == 'interval':
self.render(template_dir, 'JSIntervalService.java', app_package_dir, service['classname']+'.java', service=service)
else:
self.render(template_dir, 'JSService.java', app_package_dir, service['classname']+'.java', service=service)
def build_modules_info(self, resources_dir, app_bin_dir, include_all_ti_modules=False):
self.app_modules = []
(modules, external_child_modules) = bindings.get_all_module_bindings()
compiler = Compiler(self.tiapp, resources_dir, self.java, app_bin_dir,
None, os.path.dirname(app_bin_dir),
include_all_modules=include_all_ti_modules)
compiler.compile(compile_bytecode=False, info_message=None)
for module in compiler.modules:
module_bindings = []
# TODO: we should also detect module properties
for method in compiler.module_methods:
if method.lower().startswith(module+'.') and '.' not in method:
module_bindings.append(method[len(module)+1:])
module_onAppCreate = None
module_class = None
module_apiName = None
for m in modules.keys():
if modules[m]['fullAPIName'].lower() == module:
module_class = m
module_apiName = modules[m]['fullAPIName']
if 'onAppCreate' in modules[m]:
module_onAppCreate = modules[m]['onAppCreate']
break
if module_apiName == None: continue # module wasn't found
ext_modules = []
if module_class in external_child_modules:
for child_module in external_child_modules[module_class]:
if child_module['fullAPIName'].lower() in compiler.modules:
ext_modules.append(child_module)
self.app_modules.append({
'api_name': module_apiName,
'class_name': module_class,
'bindings': module_bindings,
'external_child_modules': ext_modules,
'on_app_create': module_onAppCreate
})
# discover app modules
detector = ModuleDetector(self.project_dir)
missing, detected_modules = detector.find_app_modules(self.tiapp, 'android', self.deploy_type)
for missing_module in missing: print '[WARN] Couldn\'t find app module: %s' % missing_module['id']
self.custom_modules = []
for module in detected_modules:
if module.jar == None: continue
module_jar = zipfile.ZipFile(module.jar)
module_bindings = bindings.get_module_bindings(module_jar)
if module_bindings is None: continue
for module_class in module_bindings['modules'].keys():
module_apiName = module_bindings['modules'][module_class]['apiName']
module_proxy = module_bindings['proxies'][module_class]
module_id = module_proxy['proxyAttrs']['id']
module_proxy_class_name = module_proxy['proxyClassName']
module_onAppCreate = None
if 'onAppCreate' in module_proxy:
module_onAppCreate = module_proxy['onAppCreate']
print '[DEBUG] module_id = %s' % module_id
if module_id == module.manifest.moduleid:
# make sure that the module was not built before 1.8.0.1
try:
module_api_version = int(module.manifest.apiversion)
if module_api_version < 2:
print "[ERROR] The 'apiversion' for '%s' in the module manifest is less than version 2. The module was likely built against a Titanium SDK pre 1.8.0.1. Please use a version of the module that has 'apiversion' 2 or greater" % module_id
touch_tiapp_xml(os.path.join(self.project_dir, 'tiapp.xml'))
sys.exit(1)
except(TypeError, ValueError):
print "[ERROR] The 'apiversion' for '%s' in the module manifest is not a valid value. Please use a version of the module that has an 'apiversion' value of 2 or greater set in it's manifest file" % module_id
touch_tiapp_xml(os.path.join(self.project_dir, 'tiapp.xml'))
sys.exit(1)
is_native_js_module = (hasattr(module.manifest, 'commonjs') and module.manifest.commonjs)
print '[DEBUG] appending module: %s' % module_class
self.custom_modules.append({
'module_id': module_id,
'module_apiName': module_apiName,
'proxy_name': module_proxy_class_name,
'class_name': module_class,
'manifest': module.manifest,
'on_app_create': module_onAppCreate,
'is_native_js_module': is_native_js_module
})
if is_native_js_module:
# Need to look at the app modules used in this external js module
metadata_file = os.path.join(module.path, "metadata.json")
metadata = None
try:
f = open(metadata_file, "r")
metadata = f.read()
finally:
f.close()
if metadata:
metadata = simplejson.loads(metadata)
if metadata.has_key("exports"):
exported_module_ids = metadata["exports"]
already_included_module_ids = [m["api_name"].lower() for m in self.app_modules]
need_to_add = [m for m in exported_module_ids if m not in already_included_module_ids]
if need_to_add:
for to_add in need_to_add:
module_onAppCreate = None
module_class = None
module_apiName = None
for m in modules.keys():
if modules[m]['fullAPIName'].lower() == to_add:
module_class = m
module_apiName = modules[m]['fullAPIName']
if 'onAppCreate' in modules[m]:
module_onAppCreate = modules[m]['onAppCreate']
break
if module_apiName == None: continue # module wasn't found
ext_modules = []
if module_class in external_child_modules:
for child_module in external_child_modules[module_class]:
if child_module['fullAPIName'].lower() in compiler.modules:
ext_modules.append(child_module)
self.app_modules.append({
'api_name': module_apiName,
'class_name': module_class,
'bindings': [],
'external_child_modules': ext_modules,
'on_app_create': module_onAppCreate
})
def create(self, dir, build_time=False, project_dir=None, include_all_ti_modules=False):
template_dir = os.path.dirname(sys._getframe(0).f_code.co_filename)
# Build up output directory tree
if project_dir is None:
project_dir = self.newdir(dir, self.name)
self.project_dir = project_dir
# Paths to Titanium assets that need to be linked into eclipse structure
self.config['ti_tiapp_xml'] = os.path.join(project_dir, 'tiapp.xml')
self.tiapp = TiAppXML(self.config['ti_tiapp_xml'])
resource_dir = os.path.join(project_dir, 'Resources')
self.config['ti_resources_dir'] = resource_dir
json_contents = open(os.path.join(template_dir,'dependency.json')).read()
depends_map = simplejson.loads(json_contents)
app_build_dir = self.newdir(project_dir, 'build')
app_dir = self.newdir(app_build_dir, 'android')
#if os.path.exists(os.path.join(app_dir,'bin')):
# shutil.rmtree(os.path.join(app_dir,'bin'))
if os.path.exists(os.path.join(app_dir,'src')):
shutil.rmtree(os.path.join(app_dir,'src'))
if os.path.exists(os.path.join(app_dir,'res')):
shutil.rmtree(os.path.join(app_dir,'res'))
app_bin_dir = self.newdir(app_dir, 'bin')
app_lib_dir = self.newdir(app_dir, 'lib')
app_src_dir = self.newdir(app_dir, 'src')
app_res_dir = self.newdir(app_dir, 'res')
app_gen_dir = self.newdir(app_dir, 'gen')
app_bin_classes_dir = self.newdir(app_bin_dir, 'classes')
app_res_drawable_dir = self.newdir(app_res_dir, 'drawable')
app_assets_dir = self.newdir(app_dir, 'assets')
app_package_dir = self.newdir(app_gen_dir, *self.id.split('.'))
app_bin_assets_dir = self.newdir(app_bin_dir, 'assets')
app_bin_assets_resources_dir = self.newdir(app_bin_assets_dir, 'Resources')
self.build_app_info(project_dir)
self.build_modules_info(app_bin_assets_resources_dir, app_bin_dir, include_all_ti_modules=include_all_ti_modules)
# Create android source
self.render(template_dir, 'AppInfo.java', app_package_dir, self.config['classname'] + 'AppInfo.java',
app_properties = self.app_properties, app_info = self.app_info)
self.render(template_dir, 'AndroidManifest.xml', app_dir, 'AndroidManifest.xml')
self.render(template_dir, 'App.java', app_package_dir, self.config['classname'] + 'Application.java',
app_modules = self.app_modules, custom_modules = self.custom_modules)
self.render(template_dir, 'Activity.java', app_package_dir, self.config['classname'] + 'Activity.java')
self.generate_activities(app_package_dir)
self.generate_services(app_package_dir)
self.render(template_dir, 'classpath', app_dir, '.classpath')
self.render(template_dir, 'project', app_dir, '.project')
self.render(template_dir, 'default.properties', app_dir, 'default.properties')
print "[TRACE] Generating app.json"
f = None
try:
f = open(os.path.join(app_bin_assets_dir, "app.json"), "w")
f.write(simplejson.dumps({"app_modules":self.app_modules}))
finally:
if f is not None:
f.close()
# Don't override a pre-existing .gitignore in case users have their own preferences
# for what should be in it. (LH #2446)
if not os.path.exists(os.path.join(app_dir, '.gitignore')):
self.render(template_dir, 'gitignore', app_dir, '.gitignore')
else:
print "[TRACE] Skipping copying gitignore -> .gitignore because already exists"
android_project_resources = os.path.join(project_dir,'Resources','android')
if build_time==False and os.path.exists(android_project_resources):
shutil.rmtree(android_project_resources)
if not os.path.exists(android_project_resources):
copy_resources(os.path.join(template_dir,'resources'),android_project_resources)
if __name__ == '__main__':
# this is for testing only for the time being
if len(sys.argv) != 5 or sys.argv[1]=='--help':
print "Usage: %s <name> <id> <directory> <sdk>" % os.path.basename(sys.argv[0])
sys.exit(1)
sdk = AndroidSDK(sys.argv[4])
android = Android(sys.argv[1], sys.argv[2], sdk, None, 'java')
android.create(sys.argv[3])
| apache-2.0 |
r0zar/ember-rails-stocks | stocks/node_modules/ember-cli/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py | 1558 | 4945 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
try:
xml_string = xml_string.encode(encoding)
except Exception:
xml_string = unicode(xml_string, 'latin-1').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
| mit |
shinate/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py | 120 | 8022 | #!/usr/bin/env python
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.layout_tests.models import test_expectations
from webkitpy.common.net import resultsjsonparser
TestExpectations = test_expectations.TestExpectations
TestExpectationParser = test_expectations.TestExpectationParser
class BuildBotPrinter(object):
# This output is parsed by buildbots and must only be changed in coordination with buildbot scripts (see webkit.org's
# Tools/BuildSlaveSupport/build.webkit.org-config/master.cfg: RunWebKitTests._parseNewRunWebKitTestsOutput
# and chromium.org's buildbot/master.chromium/scripts/master/log_parser/webkit_test_command.py).
def __init__(self, stream, debug_logging):
self.stream = stream
self.debug_logging = debug_logging
def print_results(self, run_details):
if self.debug_logging:
self.print_run_results(run_details.initial_results)
self.print_unexpected_results(run_details.summarized_results, run_details.enabled_pixel_tests_in_retry)
def _print(self, msg):
self.stream.write(msg + '\n')
def print_run_results(self, run_results):
failed = run_results.total_failures
total = run_results.total
passed = total - failed - run_results.remaining
percent_passed = 0.0
if total > 0:
percent_passed = float(passed) * 100 / total
self._print("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total, percent_passed))
self._print("")
self._print_run_results_entry(run_results, test_expectations.NOW, "Tests to be fixed")
self._print("")
# FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
self._print_run_results_entry(run_results, test_expectations.WONTFIX,
"Tests that will only be fixed if they crash (WONTFIX)")
self._print("")
def _print_run_results_entry(self, run_results, timeline, heading):
total = len(run_results.tests_by_timeline[timeline])
not_passing = (total -
len(run_results.tests_by_expectation[test_expectations.PASS] &
run_results.tests_by_timeline[timeline]))
self._print("=> %s (%d):" % (heading, not_passing))
for result in TestExpectations.EXPECTATION_ORDER:
if result in (test_expectations.PASS, test_expectations.SKIP):
continue
results = (run_results.tests_by_expectation[result] & run_results.tests_by_timeline[timeline])
desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
if not_passing and len(results):
pct = len(results) * 100.0 / not_passing
self._print(" %5d %-24s (%4.1f%%)" % (len(results), desc, pct))
def print_unexpected_results(self, summarized_results, enabled_pixel_tests_in_retry=False):
passes = {}
flaky = {}
regressions = {}
def add_to_dict_of_lists(dict, key, value):
dict.setdefault(key, []).append(value)
def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
actual = results['actual'].split(" ")
expected = results['expected'].split(" ")
def is_expected(result):
return (result in expected) or (result in ('AUDIO', 'TEXT', 'IMAGE+TEXT') and 'FAIL' in expected)
if all(is_expected(actual_result) for actual_result in actual):
# Don't print anything for tests that ran as expected.
return
if actual == ['PASS']:
if 'CRASH' in expected:
add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
elif 'TIMEOUT' in expected:
add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
else:
add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
elif enabled_pixel_tests_in_retry and actual == ['TEXT', 'IMAGE+TEXT']:
add_to_dict_of_lists(regressions, actual[0], test)
elif len(actual) > 1:
# We group flaky tests by the first actual result we got.
add_to_dict_of_lists(flaky, actual[0], test)
else:
add_to_dict_of_lists(regressions, results['actual'], test)
resultsjsonparser.for_each_test(summarized_results['tests'], add_result)
if len(passes) or len(flaky) or len(regressions):
self._print("")
if len(passes):
for key, tests in passes.iteritems():
self._print("%s: (%d)" % (key, len(tests)))
tests.sort()
for test in tests:
self._print(" %s" % test)
self._print("")
self._print("")
if len(flaky):
descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
for key, tests in flaky.iteritems():
result = TestExpectations.EXPECTATIONS[key.lower()]
self._print("Unexpected flakiness: %s (%d)" % (descriptions[result], len(tests)))
tests.sort()
for test in tests:
result = resultsjsonparser.result_for_test(summarized_results['tests'], test)
actual = result['actual'].split(" ")
expected = result['expected'].split(" ")
result = TestExpectations.EXPECTATIONS[key.lower()]
# FIXME: clean this up once the old syntax is gone
new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp] for exp in list(set(actual) | set(expected))]
self._print(" %s [ %s ]" % (test, " ".join(new_expectations_list)))
self._print("")
self._print("")
if len(regressions):
descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
for key, tests in regressions.iteritems():
result = TestExpectations.EXPECTATIONS[key.lower()]
self._print("Regressions: Unexpected %s (%d)" % (descriptions[result], len(tests)))
tests.sort()
for test in tests:
self._print(" %s [ %s ]" % (test, TestExpectationParser._inverted_expectation_tokens[key]))
self._print("")
if len(summarized_results['tests']) and self.debug_logging:
self._print("%s" % ("-" * 78))
| bsd-3-clause |
kl-cruz/mbed-os | tools/arm_pack_manager/pack_manager.py | 40 | 8243 | import argparse
from os.path import basename
from tools.arm_pack_manager import Cache
from os.path import basename, join, dirname, exists
from os import makedirs
from itertools import takewhile
from fuzzywuzzy import process
from tools.arm_pack_manager import Cache
parser = argparse.ArgumentParser(description='A Handy little utility for keeping your cache of pack files up to date.')
subparsers = parser.add_subparsers(title="Commands")
def subcommand(name, *args, **kwargs):
def subcommand(command):
subparser = subparsers.add_parser(name, **kwargs)
for arg in args:
arg = dict(arg)
opt = arg['name']
del arg['name']
if isinstance(opt, basestring):
subparser.add_argument(opt, **arg)
else:
subparser.add_argument(*opt, **arg)
subparser.add_argument("-v", "--verbose", action="store_true", dest="verbose", help="Verbose diagnostic output")
subparser.add_argument("-vv", "--very_verbose", action="store_true", dest="very_verbose", help="Very verbose diagnostic output")
subparser.add_argument("--no-timeouts", action="store_true", help="Remove all timeouts and try to download unconditionally")
subparser.add_argument("--and", action="store_true", dest="intersection", help="combine search terms as if with an and")
subparser.add_argument("--or", action="store_false", dest="intersection", help="combine search terms as if with an or")
subparser.add_argument("--union", action="store_false", dest="intersection", help="combine search terms as if with a set union")
subparser.add_argument("--intersection", action="store_true", dest="intersection", help="combine search terms as if with a set intersection")
def thunk(parsed_args):
cache = Cache(not parsed_args.verbose, parsed_args.no_timeouts)
argv = [arg['dest'] if 'dest' in arg else arg['name'] for arg in args]
argv = [(arg if isinstance(arg, basestring) else arg[-1]).strip('-')
for arg in argv]
argv = {arg: vars(parsed_args)[arg] for arg in argv
if vars(parsed_args)[arg] is not None}
return command(cache, **argv)
subparser.set_defaults(command=thunk)
return command
return subcommand
def user_selection (message, options) :
print(message)
for choice, index in zip(options, range(len(options))) :
print("({}) {}".format(index, choice))
pick = None
while pick is None :
stdout.write("please select an integer from 0 to {} or \"all\"".format(len(options)-1))
input = raw_input()
try :
if input == "all" :
pick = options
else :
pick = [options[int(input)]]
except ValueError :
print("I did not understand your input")
return pick
def fuzzy_find(matches, urls) :
choices = {}
for match in matches :
for key, value in process.extract(match, urls, limit=None) :
choices.setdefault(key, 0)
choices[key] += value
choices = sorted([(v, k) for k, v in choices.iteritems()], reverse=True)
if not choices : return []
elif len(choices) == 1 : return [choices[0][1]]
elif choices[0][0] > choices[1][0] : choices = choices[:1]
else : choices = list(takewhile(lambda t: t[0] == choices[0][0], choices))
return [v for k,v in choices]
@subcommand('cache',
dict(name='matches', nargs="*",
help="a bunch of things to search for in part names"),
dict(name=['-e','--everything'], action="store_true",
help="download everything possible"),
dict(name=['-d','--descriptors'], action="store_true",
help="download all descriptors"),
dict(name=["-b","--batch"], action="store_true",
help="don't ask for user input and assume download all"),
help="Cache a group of PACK or PDSC files")
def command_cache (cache, matches, everything=False, descriptors=False, batch=False, verbose= False, intersection=True) :
if everything :
cache.cache_everything()
return True
if descriptors :
cache.cache_descriptors()
return True
if not matches :
print("No action specified nothing to do")
else :
urls = cache.get_urls()
if intersection :
choices = fuzzy_find(matches, map(basename, urls))
else :
choices = sum([fuzzy_find([m], map(basename, urls)) for m in matches], [])
if not batch and len(choices) > 1 :
choices = user_selection("Please select a file to cache", choices)
to_download = []
for choice in choices :
for url in urls :
if choice in url :
to_download.append(url)
cache.cache_pack_list(to_download)
return True
@subcommand('find-part',
dict(name='matches', nargs="+", help="words to match to processors"),
dict(name=['-l',"--long"], action="store_true",
help="print out part details with part"),
dict(name=['-p', '--parts-only'], action="store_false", dest="print_aliases"),
dict(name=['-a', '--aliases-only'], action="store_false", dest="print_parts"),
help="Find a Part and it's description within the cache")
def command_find_part (cache, matches, long=False, intersection=True,
print_aliases=True, print_parts=True) :
if long :
import pprint
pp = pprint.PrettyPrinter()
parts = cache.index
if intersection :
choices = fuzzy_find(matches, parts.keys())
aliases = fuzzy_find(matches, cache.aliases.keys())
else :
choices = sum([fuzzy_find([m], parts.keys()) for m in matches], [])
aliases = sum([fuzzy_find([m], cache.aliases.keys()) for m in matches], [])
if print_parts:
for part in choices :
print part
if long :
pp.pprint(cache.index[part])
if print_aliases:
for alias in aliases :
print alias
if long :
pp.pprint(cache.index[cache.aliases[alias]])
@subcommand('dump-parts',
dict(name='out', help='directory to dump to'),
dict(name='parts', nargs='+', help='parts to dump'),
help='Create a directory with an index.json describing the part and all of their associated flashing algorithms.'
)
def command_dump_parts (cache, out, parts, intersection=False) :
index = {}
if intersection :
for part in fuzzy_find(parts, cache.index):
index.update(cache.index[part])
else :
for part in parts :
index.update(dict(cache.find_device(part)))
for n, p in index.iteritems() :
try :
if not exists(join(out, dirname(p['algorithm']['file']))) :
makedirs(join(out, dirname(p['algorithm']['file'])))
with open(join(out, p['algorithm']['file']), "wb+") as fd :
fd.write(cache.get_flash_algorthim_binary(n).read())
except KeyError:
print("[Warning] {} does not have an associated flashing algorithm".format(n))
with open(join(out, "index.json"), "wb+") as fd :
dump(index,fd)
@subcommand('cache-part',
dict(name='matches', nargs="+", help="words to match to devices"),
help='Cache PACK files associated with the parts matching the provided words')
def command_cache_part (cache, matches, intersection=True) :
index = cache.index
if intersection :
choices = fuzzy_find(matches, index.keys())
aliases = fuzzy_find(matches, cache.aliases.keys())
else :
choices = sum([fuzzy_find([m], index.keys()) for m in matches], [])
aliases = sum([fuzzy_find([m], cache.aliases.keys()) for m in matches], [])
urls = set([index[c]['pdsc_file'] for c in choices])
urls += set([index[cache.aliasse[a]] for a in aliases])
cache.cache_pack_list(list(urls))
def get_argparse() :
return parser
def main() :
args = parser.parse_args()
args.command(args)
| apache-2.0 |
credativ/pulp | playpen/deploy/deploy-environment.py | 10 | 2701 | #!/usr/bin/env python
import argparse
import sys
import time
import traceback
from utils import os1_utils, setup_utils, config_utils
# Setup the CLI
description = 'Deploy a Pulp environment; this can be used in conjunction with the run-integrations-tests.py script'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--config', help='path to the configuration file to use to deploy the environment', nargs='+',
required=True)
parser.add_argument('--deployed-config', help='path to save the deployed instance configuration to; defaults to the'
' given config file with a json file extension.')
parser.add_argument('--test-branch', help='test suite branch to checkout on the tester instance')
parser.add_argument('--repo', help='path the the repository; will override repositories set in the configuration')
parser.add_argument('--no-teardown', action='store_true', help='do not clean up instances if an error occurs')
args = parser.parse_args()
print 'Parsing and validating the configuration file(s)...'
config = config_utils.parse_and_validate_config_files(args.config, args.repo, args.test_branch)
os1_auth = config.get(config_utils.CONFIG_OS1_CREDENTIALS, {})
print 'Done. \n\nAuthenticating with OS1...'
os1 = os1_utils.OS1Manager(**os1_auth)
print 'Done.\n'
try:
# This metadata is attached to all instances to allow cleanup to find
# stale instances made by this utility
instance_metadata = {
'pulp_instance': 'True',
'build_time': str(time.time()),
}
print 'Deploying instances...'
os1.build_instances(config, instance_metadata)
print 'Applying role-specific configurations...'
setup_utils.configure_instances(config)
# Save the configuration for later cleanup
if args.deployed_config is None:
args.deployed_config = args.config[0] + '.json'
config_utils.save_config(config, args.deployed_config)
# Print out machine information and configuration
print '\nThe following instances have been built:'
for instance in config_utils.config_generator(config):
print """
Instance name: %(instance_name)s
Role: %(role)s
SSH: %(host_string)s
""" % instance
print 'The configuration file has been written to ' + args.deployed_config
except (Exception, KeyboardInterrupt), e:
# Print exception message and quit
exception_type, exception_value, exception_tb = sys.exc_info()
print 'Error: %s - %s' % (exception_type, exception_value)
traceback.print_tb(exception_tb)
if not args.no_teardown:
os1.teardown_instances(config)
sys.exit(1)
| gpl-2.0 |
Emergya/icm-openedx-educamadrid-platform-basic | lms/djangoapps/ccx/tests/test_models.py | 27 | 8934 | """
tests for the models
"""
from datetime import datetime, timedelta
from django.utils.timezone import UTC
from mock import patch
from nose.plugins.attrib import attr
from student.roles import CourseCcxCoachRole
from student.tests.factories import (
AdminFactory,
)
from util.tests.test_date_utils import fake_ugettext
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import (
CourseFactory,
check_mongo_calls
)
from .factories import (
CcxFactory,
)
from ..overrides import override_field_for_ccx
@attr('shard_1')
class TestCCX(ModuleStoreTestCase):
"""Unit tests for the CustomCourseForEdX model
"""
def setUp(self):
"""common setup for all tests"""
super(TestCCX, self).setUp()
self.course = course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
def set_ccx_override(self, field, value):
"""Create a field override for the test CCX on <field> with <value>"""
override_field_for_ccx(self.ccx, self.course, field, value)
def test_ccx_course_is_correct_course(self):
"""verify that the course property of a ccx returns the right course"""
expected = self.course
actual = self.ccx.course
self.assertEqual(expected, actual)
def test_ccx_course_caching(self):
"""verify that caching the propery works to limit queries"""
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.course # pylint: disable=pointless-statement
with check_mongo_calls(0):
self.ccx.course # pylint: disable=pointless-statement
def test_ccx_start_is_correct(self):
"""verify that the start datetime for a ccx is correctly retrieved
Note that after setting the start field override microseconds are
truncated, so we can't do a direct comparison between before and after.
For this reason we test the difference between and make sure it is less
than one second.
"""
expected = datetime.now(UTC())
self.set_ccx_override('start', expected)
actual = self.ccx.start # pylint: disable=no-member
diff = expected - actual
self.assertLess(abs(diff.total_seconds()), 1)
def test_ccx_start_caching(self):
"""verify that caching the start property works to limit queries"""
now = datetime.now(UTC())
self.set_ccx_override('start', now)
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.start # pylint: disable=pointless-statement, no-member
with check_mongo_calls(0):
self.ccx.start # pylint: disable=pointless-statement, no-member
def test_ccx_due_without_override(self):
"""verify that due returns None when the field has not been set"""
actual = self.ccx.due # pylint: disable=no-member
self.assertIsNone(actual)
def test_ccx_due_is_correct(self):
"""verify that the due datetime for a ccx is correctly retrieved"""
expected = datetime.now(UTC())
self.set_ccx_override('due', expected)
actual = self.ccx.due # pylint: disable=no-member
diff = expected - actual
self.assertLess(abs(diff.total_seconds()), 1)
def test_ccx_due_caching(self):
"""verify that caching the due property works to limit queries"""
expected = datetime.now(UTC())
self.set_ccx_override('due', expected)
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.due # pylint: disable=pointless-statement, no-member
with check_mongo_calls(0):
self.ccx.due # pylint: disable=pointless-statement, no-member
def test_ccx_has_started(self):
"""verify that a ccx marked as starting yesterday has started"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now - delta
self.set_ccx_override('start', then)
self.assertTrue(self.ccx.has_started()) # pylint: disable=no-member
def test_ccx_has_not_started(self):
"""verify that a ccx marked as starting tomorrow has not started"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now + delta
self.set_ccx_override('start', then)
self.assertFalse(self.ccx.has_started()) # pylint: disable=no-member
def test_ccx_has_ended(self):
"""verify that a ccx that has a due date in the past has ended"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now - delta
self.set_ccx_override('due', then)
self.assertTrue(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_has_not_ended(self):
"""verify that a ccx that has a due date in the future has not eneded
"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now + delta
self.set_ccx_override('due', then)
self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_without_due_date_has_not_ended(self):
"""verify that a ccx without a due date has not ended"""
self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member
# ensure that the expected localized format will be found by the i18n
# service
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%b %d, %Y",
}))
def test_start_datetime_short_date(self):
"""verify that the start date for a ccx formats properly by default"""
start = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015"
self.set_ccx_override('start', start)
actual = self.ccx.start_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_start_datetime_date_time_format(self):
"""verify that the DATE_TIME format also works as expected"""
start = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015 at 12:00 UTC"
self.set_ccx_override('start', start)
actual = self.ccx.start_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%b %d, %Y",
}))
def test_end_datetime_short_date(self):
"""verify that the end date for a ccx formats properly by default"""
end = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015"
self.set_ccx_override('due', end)
actual = self.ccx.end_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_end_datetime_date_time_format(self):
"""verify that the DATE_TIME format also works as expected"""
end = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015 at 12:00 UTC"
self.set_ccx_override('due', end)
actual = self.ccx.end_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_end_datetime_no_due_date(self):
"""verify that without a due date, the end date is an empty string"""
expected = ''
actual = self.ccx.end_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
actual = self.ccx.end_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
def test_ccx_max_student_enrollment_correct(self):
"""
Verify the override value for max_student_enrollments_allowed
"""
expected = 200
self.set_ccx_override('max_student_enrollments_allowed', expected)
actual = self.ccx.max_student_enrollments_allowed # pylint: disable=no-member
self.assertEqual(expected, actual)
| agpl-3.0 |
rafasis1986/EngineeringMidLevel | tests/conftest.py | 1 | 1619 | # -*- coding: utf-8 -*-
"""Defines fixtures available to all tests."""
import pytest
from webtest import TestApp
from flaskiwsapp.app import create_app
from flaskiwsapp.database import db as _db
from .factories import UserFactory
from flaskiwsapp.settings.testConfig import TestConfig
from flaskiwsapp.snippets.helpers import register_token_auth
from tests.factories import AdminFactory
@pytest.yield_fixture(scope='function')
def app():
"""An application for the tests."""
_app = create_app(TestConfig)
ctx = _app.test_request_context()
ctx.push()
yield _app
ctx.pop()
@pytest.yield_fixture(scope='function')
def client(app):
"""A flask test client."""
with app.test_client() as client:
yield client
@pytest.fixture(scope='function')
def testapp(app):
"""A Webtest app."""
return TestApp(app)
@pytest.yield_fixture(scope='function')
def db(app):
"""A database for the tests."""
_db.app = app
with app.app_context():
_db.create_all()
yield _db
# Explicitly close DB connection
_db.session.close()
_db.drop_all()
@pytest.yield_fixture(scope='function')
def jwt(app):
"""A JWT for the tests."""
_jwt = register_token_auth(app)
yield _jwt
@pytest.fixture
def user(db):
"""A user for the tests."""
user = UserFactory(email="testuser@example.com", password='myprecious')
db.session.commit()
return user
@pytest.fixture
def admin(db):
"""A admin for the tests."""
admin = AdminFactory(email="admin@example.com", password="admin", admin=True)
db.session.commit()
return admin
| mit |
ZefQ/Flexget | tests/test_migrate.py | 22 | 1076 | from __future__ import unicode_literals, division, absolute_import
import os
from tests import FlexGetBase
class TestMigrate(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
- {title: 'foobar'}
accept_all: yes
"""
def setup(self):
import logging
logging.critical('TestMigrate.setup()')
db_filename = os.path.join(self.base_path, 'upgrade_test.sqlite')
# in case running on windows, needs double \\
filename = db_filename.replace('\\', '\\\\')
self.database_uri = 'sqlite:///%s' % filename
super(TestMigrate, self).setup()
# This fails on windows when it tries to delete upgrade_test.sqlite
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process: 'upgrade_test.sqlite'
#@with_filecopy('db-r1042.sqlite', 'upgrade_test.sqlite')
def test_upgrade(self):
# TODO: for some reason this will fail
return
self.execute_task('test')
assert self.task.accepted
| mit |
forevernull/incubator-airflow | airflow/contrib/auth/backends/password_auth.py | 39 | 4386 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from sys import version_info
import flask_login
from flask_login import login_required, current_user, logout_user
from flask import flash
from wtforms import (
Form, PasswordField, StringField)
from wtforms.validators import InputRequired
from flask import url_for, redirect
from flask_bcrypt import generate_password_hash, check_password_hash
from sqlalchemy import (
Column, String, DateTime)
from sqlalchemy.ext.hybrid import hybrid_property
from airflow import settings
from airflow import models
from airflow import configuration
import logging
login_manager = flask_login.LoginManager()
login_manager.login_view = 'airflow.login' # Calls login() below
login_manager.login_message = None
LOG = logging.getLogger(__name__)
PY3 = version_info[0] == 3
class AuthenticationError(Exception):
pass
class PasswordUser(models.User):
_password = Column('password', String(255))
def __init__(self, user):
self.user = user
@hybrid_property
def password(self):
return self._password
@password.setter
def _set_password(self, plaintext):
self._password = generate_password_hash(plaintext, 12)
if PY3:
self._password = str(self._password, 'utf-8')
def authenticate(self, plaintext):
return check_password_hash(self._password, plaintext)
def is_active(self):
'''Required by flask_login'''
return True
def is_authenticated(self):
'''Required by flask_login'''
return True
def is_anonymous(self):
'''Required by flask_login'''
return False
def get_id(self):
'''Returns the current user id as required by flask_login'''
return str(self.id)
def data_profiling(self):
'''Provides access to data profiling tools'''
return True
def is_superuser(self):
'''Access all the things'''
return True
@login_manager.user_loader
def load_user(userid):
LOG.debug("Loading user %s", userid)
if not userid or userid == 'None':
return None
session = settings.Session()
user = session.query(models.User).filter(models.User.id == int(userid)).first()
session.expunge_all()
session.commit()
session.close()
return PasswordUser(user)
def login(self, request):
if current_user.is_authenticated():
flash("You are already logged in")
return redirect(url_for('admin.index'))
username = None
password = None
form = LoginForm(request.form)
if request.method == 'POST' and form.validate():
username = request.form.get("username")
password = request.form.get("password")
if not username or not password:
return self.render('airflow/login.html',
title="Airflow - Login",
form=form)
try:
session = settings.Session()
user = session.query(PasswordUser).filter(
PasswordUser.username == username).first()
if not user:
session.close()
raise AuthenticationError()
if not user.authenticate(password):
session.close()
raise AuthenticationError()
LOG.info("User %s successfully authenticated", username)
flask_login.login_user(user)
session.commit()
session.close()
return redirect(request.args.get("next") or url_for("admin.index"))
except AuthenticationError:
flash("Incorrect login details")
return self.render('airflow/login.html',
title="Airflow - Login",
form=form)
class LoginForm(Form):
username = StringField('Username', [InputRequired()])
password = PasswordField('Password', [InputRequired()])
| apache-2.0 |
luofei98/qgis | tests/src/python/test_qgsspatialiteprovider.py | 7 | 4586 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsSpatialiteProvider
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Vincent Mora'
__date__ = '09/07/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import tempfile
import qgis
import sys
from qgis.core import *
from utilities import (getQgisTestApp,
TestCase,
unittest
)
try:
from pyspatialite import dbapi2 as sqlite3
except ImportError:
print "You should install pyspatialite to run the tests"
sys.exit(0)
# Convenience instances in case you may need them
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
def die(error_message):
raise Exception(error_message)
class TestQgsSpatialiteProvider(TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# create test db
cls.dbname = os.path.join( tempfile.gettempdir(), "test.sqlite" )
if os.path.exists( cls.dbname ):
os.remove( cls.dbname )
con = sqlite3.connect(cls.dbname, isolation_level=None)
cur = con.cursor()
cur.execute( "BEGIN" )
sql = "SELECT InitSpatialMetadata()"
cur.execute(sql)
# simple table with primary key
sql = "CREATE TABLE test_pg (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_pg', 'geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_pg (id, name, geometry) "
sql += "VALUES (1, 'toto', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
# table with multiple column primary key
sql = "CREATE TABLE test_pg_mk (id INTEGER NOT NULL, name TEXT NOT NULL, PRIMARY KEY(id,name))"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_pg_mk', 'geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_pg_mk (id, name, geometry) "
sql += "VALUES (1, 'toto', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
cur.execute( "COMMIT" )
con.close()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
# for the time being, keep the file to check with qgis
#if os.path.exists(cls.dbname) :
# os.remove(cls.dbname)
pass
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
def test_SplitFeature(self):
"""Create spatialite database"""
layer = QgsVectorLayer("dbname=%s table=test_pg (geometry)" % self.dbname, "test_pg", "spatialite")
assert(layer.isValid())
assert(layer.hasGeometryType())
layer.startEditing()
layer.splitFeatures([QgsPoint(0.5, -0.5), QgsPoint(0.5, 1.5)], 0)==0 or die("error in split")
layer.splitFeatures([QgsPoint(-0.5, 0.5), QgsPoint(1.5, 0.5)], 0)==0 or die("error in split")
if not layer.commitChanges():
die("this commit should work")
layer.featureCount() == 4 or die("we should have 4 features after 2 split")
def xtest_SplitFeatureWithFailedCommit(self):
"""Create spatialite database"""
layer = QgsVectorLayer("dbname=%s table=test_pg_mk (geometry)" % self.dbname, "test_pg_mk", "spatialite")
assert(layer.isValid())
assert(layer.hasGeometryType())
layer.startEditing()
layer.splitFeatures([QgsPoint(0.5, -0.5), QgsPoint(0.5, 1.5)], 0)==0 or die("error in split")
layer.splitFeatures([QgsPoint(-0.5, 0.5), QgsPoint(1.5, 0.5)], 0)==0 or die("error in split")
if layer.commitChanges():
die("this commit should fail")
layer.rollBack()
feat = QgsFeature()
it=layer.getFeatures()
it.nextFeature(feat)
ref = [[(0,0), (1,0), (1,1), (0,1), (0,0)]]
res = feat.geometry().asPolygon()
for ring1, ring2 in zip(ref, res):
for p1, p2 in zip(ring1, ring2):
for c1, c2 in zip(p1, p2):
c1 == c2 or die("polygon has been altered by failed edition")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
s40523125/2016fallcp_hw | course/ggame/pygamedeps.py | 228 | 12266 | def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
# PYTHON 3 and PYGAME DEPENDENCIES
if module_exists('pygame'):
import pygame
class _body(object):
def __init__(self):
self.events = {}
def appendChild(self, obj):
self.child = obj
def bind(self, evt, action):
self.events[evt] = action
print("Binding {} to {}".format(evt, action))
class _document(object):
def __init__(self):
self.body = _body()
class _window(object):
def __init__(self):
self.document = _document()
self.animatex = 0
def open(self, s1, s2):
return self
def requestAnimationFrame(self, target):
if self.animatex < 10:
self.animatex += 1
target('dummy')
print("Animation frame")
class _Container(object):
def __init__(self):
pass
def destroy(self):
pass
class _Renderer(object):
def __init__(self, x, y, argsdict):
self.x = x
self.y = y
self.argsdict = argsdict
self.view = 'view'
print("Rendering created with {}x{} area".format(x, y))
def render(self, stage):
pass
class _GFX(object):
def __init__(self):
self.Container = _Container
self.autoDetectRenderer = _Renderer
window = _window()
GFX = _GFX()
#document = object()
def JSConstructor(cls):
return cls
def JSObject(obj):
return obj
class _GFX_Rectangle(pygame.Rect):
pass
GFX_Rectangle = _GFX_Rectangle
class _Texture(object):
def __init__(self, img='', crossdomain=False):
self.name = img
if not img == '':
self.img = pygame.image.load(img) # pygame surface
self.basewidth = self.img.get_width()
self.baseheight = self.img.get_height()
self.width = self.basewidth
self.height = self.baseheight
print("Texture from image {}, {}x{} pixels".format(img, self.basewidth, self.baseheight))
self.baserect = _GFX_Rectangle(0, 0, self.basewidth, self.baseheight)
self.framerect = self.baserect
@classmethod
def fromTexture(cls, texture, frame):
inst = cls()
inst.img = pygame.Surface((frame.width, frame.height))
inst.img.blit(texture.img, (0,0), frame)
inst.name = texture.name
inst.basewidth = texture.basewidth
inst.baseheight = texture.baseheight
inst.baserect = texture.baserect
inst.framerect = frame
inst.width = frame.width
inst.height = frame.height
print("Texture from base texture {}, {}x{} subframe {}x{}".format(inst.name, inst.basewidth, inst.baseheight, inst.framerect.width, inst.framerect.height))
return inst
def destroy(self):
try:
self.img.close()
print("Destroying an image")
except:
print("Destroying a non-image")
GFX_Texture = _Texture.fromTexture
GFX_Texture_fromImage = _Texture
class vector(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise KeyError
def __setitem(self, key, value):
if key == 0:
self.x = value
elif key == 1:
self.y = value
else:
raise KeyError
class GFX_Sprite(object):
def __init__(self, texture):
self.basetexture = texture
self.texture = self.basetexture
self.visible = True
self.pos = vector(0,0)
self.anch = vector(0,0)
self.scal = vector(1.0, 1.0)
self.width = texture.width
self.height = texture.height
self.rotation = 0.0
@property
def position(self):
return self.pos
@position.setter
def position(self, value):
self.pos.x = value[0]
self.pos.y = value[1]
@property
def anchor(self):
return self.anch
@anchor.setter
def anchor(self, value):
self.anch.x = value[0]
self.anch.y = value[1]
@property
def scale(self):
return self.scal
@scale.setter
def scale(self, value):
self.scal.x = value[0]
self.scal.y = value[1]
def destroy(self):
pass
class _GFX_Graphics(object):
def __init__(self):
self.clear()
def clear(self):
self.cleared = True
self.visible = True
self.lwidth = None
self.color = None
self.alpha = None
self.fillcolor = None
self.fillalpha = None
self.x = None
self.y = None
self.rwidth = None
self.rheight = None
self.radius = None
self.ehw = None
self.ehh = None
self.xto = None
self.yto = None
self.jpath = None
self.width = None
self.height = None
self.position = vector(0,0)
def destroy(self):
self.clear()
def clone(self):
clone = type(self)()
clone.cleared = self.cleared
clone.visible = self.visible
clone.lwidth = self.lwidth
clone.color = self.color
clone.alpha = self.alpha
clone.fillalpha = self.fillalpha
clone.fillcolor = self.fillcolor
clone.x = self.x
clone.y = self.y
clone.rwidth = self.rwidth
clone.rheight = self.rheight
clone.radius = self.radius
clone.ehw = self.ehw
clone.ehh = self.ehh
clone.xto = self.xto
clone.yto = self.yto
clone.jpath = self.jpath
clone.width = self.width
clone.height = self.height
clone.position = self.position
return clone
def lineStyle(self, width, color, alpha):
self.width = width
self.color = color
self.alpha = alpha
def beginFill(self, color, alpha):
self.fillcolor = color
self.fillalpha = alpha
def drawRect(self, x, y, w, h):
self.x = x
self.y = y
self.position = vector(x,y)
self.rwidth = w
self.rheight = h
self.width = w
self.height = h
self.cleared = False
print("Rectangle {}x{} at {},{}".format(w,h,x,y))
return self
def drawCircle(self, x, y, radius):
self.x = x
self.y = y
self.position = vector(x,y)
self.radius = radius
self.cleared = False
self.width = radius*2
self.height = radius*2
print("Circle, radius {} at {},{}".format(radius,x,y))
return self
def drawEllipse(self, x, y, hw, hh):
self.x = x
self.y = y
self.position = vector(x,y)
self.ehw = hw
self.ehh = hh
self.width = hw*2
self.height = hh*2
self.cleared = False
print("Ellipse, {}x{} at {},{}".format(hw,hh,x,y))
return self
def drawPolygon(self, jpath):
self.jpath = jpath
self.cleared = False
self.position = vector(jpath[0],jpath[1])
x = []
y = []
for i in range(0,len(jpath)-1,2):
x.append(jpath[i])
y.append(jpath[i+1])
self.width = max(x)-min(x)
self.height = max(y)-min(y)
print("Polygon")
return self
def moveTo(self, x, y):
self.x = x
self.y = y
self.position = vector(x,y)
return self
def lineTo(self, x, y):
self.xto = x
self.yto = y
self.width = abs(x)
self.height = abs(y)
self.cleared = False
print("Line from {},{} to {},{}".format(self.x, self.y, x, y))
return self
class _GFX_Text(object):
def __init__(self, text, styledict):
self.text = text
self.styledict = styledict
self.alpha = None
self.visible = None
self.width = 99
self.height = 99
self.position = vector(0,0)
print("Text: {} in {}".format(text, styledict['font']))
def clone(self):
clone = type(self)(self.text, self.styledict)
return clone
def destroy(self):
self.text = ''
GFX_Text = _GFX_Text
_globalGraphics = _GFX_Graphics()
GFX_Graphics = _globalGraphics
def GFX_DetectRenderer():
pass
class _SND_all(object):
def __init__(self):
pass
def stop(self):
print("Stopping all sounds")
class _SND(object):
def __init__(self):
self.all = _SND_all
SND = _SND()
class _SND_Sound(object):
def __init__(self, url):
self.url = url
print("Creating sound object {}".format(url))
def load(self):
pass
def play(self):
print("Playing sound object {}".format(self.url))
SND_Sound = _SND_Sound
class HwEvent(object):
evtmap = {2: 'keydown', 3: 'keyup', 4: 'mousemove', 5: 'mousedown', 6: 'mouseup'}
keymap = {304:16,
303:16,
306:17,
308:18,
301:20,
276:37,
273:38,
275:39,
274:40,
97:65,
98:66,
99:67,
100:68,
101:69,
102:70,
103:71,
104:72,
105:73,
106:74,
107:75,
108:76,
109:77,
110:78,
111:79,
112:80,
113:81,
114:82,
115:83,
116:84,
117:85,
118:86,
119:87,
120:88,
121:89,
122:90,
282:112,
283:113,
284:114,
285:115,
286:116,
287:117,
288:118,
289:119,
290:120,
291:121,
292:122,
293:123,
59:186,
61:187,
44:188,
46:190,
45:189,
47:191,
96:192,
92:220,
91:219,
93:221,
39:222}
def __init__(self, pevent):
self.type = HwEvent.evtmap.get(pevent.type, None)
if self.type in ['keydown', 'keyup']:
self.keyCode = HwEvent.keymap.get(pevent.key, pevent.key)
elif self.type in ['mousemove', 'mousedown', 'mouseup']:
self.wheelDelta = 0
if self.type != 'mousemove' and pevent.button == 5:
if self.type == 'mousedown':
self.wheelDelta = 1
else:
self.wheelDelta = -1
self.clientX = pevent.pos[0]
self.clientY = pevent.pos[1]
class GFX_Window(object):
def __init__(self, width, height, onclose):
pygame.init()
self._w = pygame.display.set_mode((width, height))
self.clock = pygame.time.Clock()
self.sprites = []
self.animatestarted = False
self.bindings = {}
self.onclose = onclose
self.stop = False
#self._w = window.open("", "")
#self._stage = JSConstructor(GFX.Container)()
#self._renderer = GFX.autoDetectRenderer(width, height, {'transparent':True})
#self._w.document.body.appendChild(self._renderer.view)
#self._w.onunload = onclose
def bind(self, evtspec, callback):
self.bindings[evtspec] = callback
def add(self, obj):
self.sprites.append(obj)
#self._stage.addChild(obj)
def remove(self, obj):
self.sprites.remove(obj)
#self._stage.removeChild(obj)
def animate(self, stepcallback):
# do stuff required to display
self._w.fill(pygame.Color('white'))
for s in self.sprites:
self._w.blit(s.texture.img, (s.pos.x, s.pos.y))
pygame.display.flip()
events = pygame.event.get()
for event in events:
hwevent = HwEvent(event)
if hwevent.type != None:
self.bindings[hwevent.type](hwevent)
if event.type == 12:
print("Close!")
self.onclose()
self.destroy()
self.stop = True
if not self.animatestarted:
self.animatestarted = True
while not self.stop:
self.clock.tick_busy_loop(30)
stepcallback(0)
#self._renderer.render(self._stage)
#self._w.requestAnimationFrame(stepcallback)
def destroy(self):
pass
#SND.all().stop()
#self._stage.destroy()
| agpl-3.0 |
ewandor/home-assistant | homeassistant/components/logentries.py | 25 | 1901 | """
Support for sending data to Logentries webhook endpoint.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/logentries/
"""
import json
import logging
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (CONF_TOKEN, EVENT_STATE_CHANGED)
from homeassistant.helpers import state as state_helper
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'logentries'
DEFAULT_HOST = 'https://webhook.logentries.com/noformat/logs/'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_TOKEN): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Logentries component."""
conf = config[DOMAIN]
token = conf.get(CONF_TOKEN)
le_wh = '{}{}'.format(DEFAULT_HOST, token)
def logentries_event_listener(event):
"""Listen for new messages on the bus and sends them to Logentries."""
state = event.data.get('new_state')
if state is None:
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
json_body = [
{
'domain': state.domain,
'entity_id': state.object_id,
'attributes': dict(state.attributes),
'time': str(event.time_fired),
'value': _state,
}
]
try:
payload = {
"host": le_wh,
"event": json_body
}
requests.post(le_wh, data=json.dumps(payload), timeout=10)
except requests.exceptions.RequestException as error:
_LOGGER.exception("Error sending to Logentries: %s", error)
hass.bus.listen(EVENT_STATE_CHANGED, logentries_event_listener)
return True
| apache-2.0 |
coolblaze03/WSNNS3Port | src/nix-vector-routing/bindings/callbacks_list.py | 331 | 1249 | callback_classes = [
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| gpl-2.0 |
DNFcode/edx-platform | common/djangoapps/edxmako/middleware.py | 26 | 1116 | # Copyright (c) 2008 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distribuetd under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from django.template import RequestContext
from util.request import safe_get_host
REQUEST_CONTEXT = threading.local()
class MakoMiddleware(object):
def process_request(self, request):
REQUEST_CONTEXT.context = RequestContext(request)
REQUEST_CONTEXT.context['is_secure'] = request.is_secure()
REQUEST_CONTEXT.context['site'] = safe_get_host(request)
def process_response(self, request, response):
REQUEST_CONTEXT.context = None
return response
| agpl-3.0 |
ichbinder23/mailsend | ConfPaser.py | 1 | 4109 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'jakob'
import ConfigParser
from StringIO import StringIO
import os
from zope.interface import implements
from IMailOption import IMailOption
class ConfPaser(object):
implements(IMailOption)
__pathToConfig = None
__config = None
__subject = ""
__receiver = ""
__transmitter = ""
__message = ""
__loginname = ""
__password = ""
__smtpserver = ""
__smtpport = ""
__attachments = ""
def __init__(self, pathToConfig):
try:
if os.path.exists(pathToConfig):
self.__pathToConfig = pathToConfig
else:
raise
except:
print "Config file not found.", pathToConfig
exit(-1)
#raise
self.__config = ConfigParser.SafeConfigParser()
def __ConfigSectionMap(self, section):
dictOptions = {}
options = self.__config.options(section)
for option in options:
try:
dictOptions[option] = self.__config.get(section, option).strip('"').strip("'")
if dictOptions[option] == "":
exit(-1)
#raise
except:
print("exception on %s ! maybe no value?" % option)
exit(-1)
#raise
return dictOptions
def paser(self):
try:
data = StringIO('\n'.join(line.strip() for line in open(self.__pathToConfig)))
self.__config.readfp(data)
except Exception as e:
print "Configuration file could not be loaded ", e
sections = self.__config.sections()
try:
sections = [x.lower() for x in sections]
if ("global" not in sections):
raise ImportError("global section error!")
except ImportError as e:
print e, " Maybe no global section in Configuration file?"
exit(-1)
#raise
for section in self.__config.sections():
try:
dictOptions = self.__ConfigSectionMap(section)
if section.lower() in "global":
self.__subject = dictOptions["subject"] if "subject" in dictOptions else None
self.__receiver = dictOptions["receiver"] if "receiver" in dictOptions else None
self.__transmitter = dictOptions["transmitter"] if "transmitter" in dictOptions else None
self.__message = dictOptions["message"] if "message" in dictOptions else None
self.__loginname = dictOptions["loginname"] if "loginname" in dictOptions else None
self.__password = dictOptions["password"] if "password" in dictOptions else None
self.__smtpserver = dictOptions["smtpserver"] if "smtpserver" in dictOptions else None
self.__smtpport = dictOptions["smtpport"] if "smtpport" in dictOptions else None
self.__attachments = dictOptions["attachments"] if "attachments" in dictOptions else None
except:
print "Error Configuration file please take a look."
exit(-1)
#raise
def get_subject(self):
return self.__subject
def get_receiver(self):
if self.__receiver == None:
return None
else:
return self.__receiver.replace(' ', '').split(",")
def get_transmitter(self):
return self.__transmitter
def get_message(self):
return self.__message
def get_loginname(self):
return self.__loginname
def get_password(self):
return self.__password
def get_SMTP_Server_URL(self):
#print self.__smtpserver
return self.__smtpserver
def get_SMTP_Server_Prot(self):
return self.__smtpport
def get_attachments(self):
if self.__attachments == None:
return None
else:
return self.__attachments.replace(' ', '').split(",")
| gpl-2.0 |
stefan2904/traube | traube/config.py | 1 | 1759 | #!/usr/bin/env python
__author__ = "stefan"
import configparser
import os
def initParser(config_path):
parser = configparser.ConfigParser()
parser.read(config_path)
return parser
def parseGlobal(parser, base):
if 'GLOBAL' not in parser:
raise KeyError('Invalid config ... No GLOBAL section.')
glob = parser['GLOBAL']
if 'remote' not in glob or 'index' not in glob or 'key' not in glob:
raise KeyError('Invalid config... No remote/index/key.')
idx = glob['index']
if not idx.startswith('/'):
idx = (idx[1:] if idx.startswith('.') else idx).strip()
idx = '/' + idx if not idx.startswith('/') else idx
idx = base + idx
return idx, glob['remote'], glob['key']
def parseKeys(parser):
if 'KEYS' not in parser:
raise KeyError('Invalid config ... No KEYS section.')
return parser['KEYS']
def parseSources(parser):
sections = parser.sections()
sections.remove('GLOBAL')
sections.remove('KEYS')
sources = {}
for section in sections:
s = parser[section]
if 'dir' not in s or 'keys' not in s:
raise KeyError('Invalid config... No dir/keys for %s' % s['name'])
name = s['name'].strip().replace(' ', '_')
source = {}
source['dir'] = (s['dir'][1:] if s['dir'].startswith('.') else s['dir']).strip()
source['keys'] = list(map(lambda x: x.strip(), s['keys'].split(',')))
sources[name] = source
return sources
def loadConfig(config_path):
parser = initParser(config_path)
base = os.path.split(config_path)[0]
idx, remote, key = parseGlobal(parser, base)
keys = parseKeys(parser)
sources = parseSources(parser)
return base, idx, remote, key, keys, sources
| gpl-3.0 |
reubano/csv2ofx | csv2ofx/ofx.py | 1 | 18883 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
# pylint: disable=no-self-use
"""
csv2ofx.ofx
~~~~~~~~~~~
Provides methods for generating OFX content
Examples:
literal blocks::
python example_google.py
Attributes:
ENCODING (str): Default file encoding.
"""
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
from datetime import datetime as dt
from builtins import *
from meza.fntools import chunk, xmlize
from meza.process import group
from . import Content, utils
class OFX(Content):
""" An OFX object """
def __init__(self, mapping=None, **kwargs):
""" OFX constructor
Args:
mapping (dict): bank mapper (see csv2ofx.mappings)
kwargs (dict): Keyword arguments
Kwargs:
def_type (str): Default account type.
start (date): Date from which to begin including transactions.
end (date): Date from which to exclude transactions.
Examples:
>>> from csv2ofx.mappings.mint import mapping
>>> OFX(mapping) # doctest: +ELLIPSIS
<csv2ofx.ofx.OFX object at 0x...>
"""
# TODO: Add timezone info # pylint: disable=fixme
super(OFX, self).__init__(mapping, **kwargs)
self.resp_type = 'INTRATRNRS' if self.split_account else 'STMTTRNRS'
self.def_type = kwargs.get('def_type')
self.prev_group = None
self.account_types = {
'CHECKING': ('checking', 'income', 'receivable', 'payable'),
'SAVINGS': ('savings',),
'MONEYMRKT': ('market', 'cash', 'expenses'),
'CREDITLINE': ('visa', 'master', 'express', 'discover')
}
def header(self, **kwargs):
""" Gets OFX format transaction content
Kwargs:
date (datetime): The datetime (default: `datetime.now()`).
language (str:) The ISO formatted language (defaul: ENG).
Returns:
(str): the OFX content
Examples:
>>> kwargs = {'date': dt(2012, 1, 15)}
>>> header = 'DATA:OFXSGMLENCODING:UTF-8<OFX><SIGNONMSGSRSV1>\
<SONRS><STATUS><CODE>0</CODE><SEVERITY>INFO</SEVERITY></STATUS><DTSERVER>\
20120115000000</DTSERVER><LANGUAGE>ENG</LANGUAGE></SONRS></SIGNONMSGSRSV1>\
<BANKMSGSRSV1><STMTTRNRS><TRNUID></TRNUID><STATUS><CODE>0</CODE><SEVERITY>INFO\
</SEVERITY></STATUS>'
>>> result = OFX().header(**kwargs)
>>> header == result.replace('\\n', '').replace('\\t', '')
True
"""
kwargs.setdefault('language', 'ENG')
# yyyymmddhhmmss
time_stamp = kwargs.get('date', dt.now()).strftime('%Y%m%d%H%M%S')
content = 'DATA:OFXSGML\n'
content += 'ENCODING:UTF-8\n'
content += '<OFX>\n'
content += '\t<SIGNONMSGSRSV1>\n'
content += '\t\t<SONRS>\n'
content += '\t\t\t<STATUS>\n'
content += '\t\t\t\t<CODE>0</CODE>\n'
content += '\t\t\t\t<SEVERITY>INFO</SEVERITY>\n'
content += '\t\t\t</STATUS>\n'
content += '\t\t\t<DTSERVER>%s</DTSERVER>\n' % time_stamp
content += '\t\t\t<LANGUAGE>%(language)s</LANGUAGE>\n' % kwargs
content += '\t\t</SONRS>\n'
content += '\t</SIGNONMSGSRSV1>\n'
content += '\t<BANKMSGSRSV1>\n'
content += '\t\t<%s>\n' % self.resp_type
content += '\t\t\t<TRNUID></TRNUID>\n'
content += '\t\t\t<STATUS>\n'
content += '\t\t\t\t<CODE>0</CODE>\n'
content += '\t\t\t\t<SEVERITY>INFO</SEVERITY>\n'
content += '\t\t\t</STATUS>\n'
return content
def transaction_data(self, trxn):
""" gets OFX transaction data
Args:
trxn (dict): the transaction
Returns:
(dict): the OFX transaction data
Examples:
>>> import datetime
>>> from csv2ofx.mappings.mint import mapping
>>> from decimal import Decimal
>>> trxn = {
... 'Transaction Type': 'DEBIT', 'Amount': 1000.00,
... 'Date': '06/12/10', 'Description': 'payee',
... 'Original Description': 'description', 'Notes': 'notes',
... 'Category': 'Checking', 'Account Name': 'account'}
>>> OFX(mapping, def_type='CHECKING').transaction_data(trxn) == {
... 'account_id': 'e268443e43d93dab7ebef303bbe9642f',
... 'account': 'account', 'currency': 'USD',
... 'account_type': 'CHECKING', 'shares': Decimal('0'),
... 'is_investment': False, 'bank': 'account',
... 'split_account_type': 'CHECKING',
... 'split_account_id': '195917574edc9b6bbeb5be9785b6a479',
... 'class': None, 'amount': Decimal('-1000.00'),
... 'memo': 'description notes',
... 'id': 'ee86450a47899254e2faa82dca3c2cf2',
... 'split_account': 'Checking', 'action': '', 'payee': 'payee',
... 'date': dt(2010, 6, 12, 0, 0), 'category': '',
... 'bank_id': 'e268443e43d93dab7ebef303bbe9642f',
... 'price': Decimal('0'), 'symbol': '', 'check_num': None,
... 'inv_split_account': None, 'x_action': '', 'type': 'DEBIT'}
True
"""
data = super(OFX, self).transaction_data(trxn)
args = [self.account_types, self.def_type]
split = data['split_account']
sa_type = utils.get_account_type(split, *args) if split else None
memo = data.get('memo')
_class = data.get('class')
memo = '%s %s' % (memo, _class) if memo and _class else memo or _class
new_data = {
'account_type': utils.get_account_type(data['account'], *args),
'split_account_type': sa_type,
'memo': memo}
data.update(new_data)
return data
def account_start(self, **kwargs):
""" Gets OFX format transaction account start content
Args:
kwargs (dict): Output from `transaction_data`.
Kwargs:
currency (str): The ISO formatted currency (required).
bank_id (str): A unique bank identifier (required).
account_id (str): A unique account identifier (required).
account_type (str): The account type. One of [
'CHECKING', 'SAVINGS', 'MONEYMRKT', 'CREDITLINE'] (required).
Returns:
(str): the OFX content
Examples:
>>> kwargs = {'start': dt(2012, 1, 1), 'end': dt(2012, 2, 1)}
>>> akwargs = {'currency': 'USD', 'bank_id': 1, 'account_id': 1, \
'account_type': 'CHECKING'}
>>> start = '<STMTRS><CURDEF>USD</CURDEF><BANKACCTFROM><BANKID>1\
</BANKID><ACCTID>1</ACCTID><ACCTTYPE>CHECKING</ACCTTYPE></BANKACCTFROM>\
<BANKTRANLIST><DTSTART>20120101</DTSTART><DTEND>20120201</DTEND>'
>>> result = OFX(**kwargs).account_start(**akwargs)
>>> start == result.replace('\\n', '').replace('\\t', '')
True
"""
kwargs.update({
'start_date': self.start.strftime('%Y%m%d'),
'end_date': self.end.strftime('%Y%m%d')})
content = '\t\t\t<STMTRS>\n'
content += '\t\t\t\t<CURDEF>%(currency)s</CURDEF>\n' % kwargs
content += '\t\t\t\t<BANKACCTFROM>\n'
content += '\t\t\t\t\t<BANKID>%(bank_id)s</BANKID>\n' % kwargs
content += '\t\t\t\t\t<ACCTID>%(account_id)s</ACCTID>\n' % kwargs
content += '\t\t\t\t\t<ACCTTYPE>%(account_type)s</ACCTTYPE>\n' % kwargs
content += '\t\t\t\t</BANKACCTFROM>\n'
content += '\t\t\t\t<BANKTRANLIST>\n'
content += '\t\t\t\t\t<DTSTART>%(start_date)s</DTSTART>\n' % kwargs
content += '\t\t\t\t\t<DTEND>%(end_date)s</DTEND>\n' % kwargs
return content
def transaction(self, **kwargs):
""" Gets OFX format transaction content
Args:
kwargs (dict): Output from `transaction_data`.
Kwargs:
date (datetime): the transaction date (required)
type (str): the transaction type (required)
amount (number): the transaction amount (required)
id (str): the transaction id (required)
check_num (str): the check num
payee (str): the payee (required)
memo (str): the transaction memo
Returns:
(str): the OFX content
Examples:
>>> kwargs = {'date': dt(2012, 1, 15), 'type': 'DEBIT', \
'amount': 100, 'id': 1, 'check_num': 1, 'payee': 'payee', 'memo': 'memo'}
>>> trxn = '<STMTTRN><TRNTYPE>DEBIT</TRNTYPE><DTPOSTED>\
20120115000000</DTPOSTED><TRNAMT>100.00</TRNAMT><FITID>1</FITID><CHECKNUM>1\
</CHECKNUM><NAME>payee</NAME><MEMO>memo</MEMO></STMTTRN>'
>>> result = OFX().transaction(**kwargs)
>>> trxn == result.replace('\\n', '').replace('\\t', '')
True
"""
time_stamp = kwargs['date'].strftime('%Y%m%d%H%M%S') # yyyymmddhhmmss
content = '\t\t\t\t\t<STMTTRN>\n'
content += '\t\t\t\t\t\t<TRNTYPE>%(type)s</TRNTYPE>\n' % kwargs
content += '\t\t\t\t\t\t<DTPOSTED>%s</DTPOSTED>\n' % time_stamp
content += '\t\t\t\t\t\t<TRNAMT>%(amount)0.2f</TRNAMT>\n' % kwargs
content += '\t\t\t\t\t\t<FITID>%(id)s</FITID>\n' % kwargs
if kwargs.get('check_num') is not None:
extra = '\t\t\t\t\t\t<CHECKNUM>%(check_num)s</CHECKNUM>\n'
content += extra % kwargs
if kwargs.get('payee') is not None:
content += '\t\t\t\t\t\t<NAME>%(payee)s</NAME>\n' % kwargs
if kwargs.get('memo'):
content += '\t\t\t\t\t\t<MEMO>%(memo)s</MEMO>\n' % kwargs
content += '\t\t\t\t\t</STMTTRN>\n'
return content
def account_end(self, **kwargs):
""" Gets OFX format transaction account end content
Kwargs:
date (datetime): the transaction date (required)
balance (number): the account balance
Returns:
(str): the OFX content
Examples:
>>> kwargs = {'balance': 150, 'date': dt(2012, 1, 15)}
>>> end = '</BANKTRANLIST><LEDGERBAL><BALAMT>150.00</BALAMT>\
<DTASOF>20120115000000</DTASOF></LEDGERBAL></STMTRS>'
>>> result = OFX().account_end(**kwargs)
>>> end == result.replace('\\n', '').replace('\\t', '')
True
"""
time_stamp = kwargs['date'].strftime('%Y%m%d%H%M%S') # yyyymmddhhmmss
content = '\t\t\t\t</BANKTRANLIST>\n'
if kwargs.get('balance') is not None:
content += '\t\t\t\t<LEDGERBAL>\n'
content += '\t\t\t\t\t<BALAMT>%(balance)0.2f</BALAMT>\n' % kwargs
content += '\t\t\t\t\t<DTASOF>%s</DTASOF>\n' % time_stamp
content += '\t\t\t\t</LEDGERBAL>\n'
content += '\t\t\t</STMTRS>\n'
return content
def transfer(self, **kwargs):
""" Gets OFX transfer start
Args:
kwargs (dict): Output from `transaction_data`.
Kwargs:
account_type (str): The account type. One of [
'CHECKING', 'SAVINGS', 'MONEYMRKT', 'CREDITLINE']
currency (str): The ISO formatted currency (required).
id (str):
amount (number): the transaction amount (required)
bank_id (str): A unique bank identifier (required).
account_id (str): A unique account identifier (required).
account_type (str): The account type. One of [
'CHECKING', 'SAVINGS', 'MONEYMRKT', 'CREDITLINE'] (required).
Returns:
(str): the start of an OFX transfer
Examples:
>>> kwargs = {'currency': 'USD', 'date': dt(2012, 1, 15), \
'bank_id': 1, 'account_id': 1, 'account_type': 'CHECKING', 'amount': 100, \
'id': 'jbaevf'}
>>> trxn = '<INTRARS><CURDEF>USD</CURDEF><SRVRTID>jbaevf</SRVRTID>\
<XFERINFO><TRNAMT>100.00</TRNAMT><BANKACCTFROM><BANKID>1</BANKID><ACCTID>1\
</ACCTID><ACCTTYPE>CHECKING</ACCTTYPE></BANKACCTFROM>'
>>> result = OFX().transfer(**kwargs)
>>> trxn == result.replace('\\n', '').replace('\\t', '')
True
"""
content = '\t\t\t<INTRARS>\n'
content += '\t\t\t\t<CURDEF>%(currency)s</CURDEF>\n' % kwargs
content += '\t\t\t\t<SRVRTID>%(id)s</SRVRTID>\n' % kwargs
content += '\t\t\t\t<XFERINFO>\n'
content += '\t\t\t\t\t<TRNAMT>%(amount)0.2f</TRNAMT>\n' % kwargs
content += '\t\t\t\t\t<BANKACCTFROM>\n'
content += '\t\t\t\t\t\t<BANKID>%(bank_id)s</BANKID>\n' % kwargs
content += '\t\t\t\t\t\t<ACCTID>%(account_id)s</ACCTID>\n' % kwargs
content += '\t\t\t\t\t\t<ACCTTYPE>%(account_type)s' % kwargs
content += '</ACCTTYPE>\n'
content += '\t\t\t\t\t</BANKACCTFROM>\n'
return content
def split_content(self, **kwargs):
""" Gets OFX split content
Args:
kwargs (dict): Output from `transaction_data`.
Kwargs:
split_account (str): Account to use as the transfer recipient.
(useful in cases when the transaction data isn't already split)
bank_id (str): A unique bank identifier (required).
split_account_id (str): A unique account identifier (required if a
`split_account` is given).
split_account_type (str): The account type. One of [
'CHECKING', 'SAVINGS', 'MONEYMRKT', 'CREDITLINE'] (required if
a `split_account` is given).
account_id (str): A unique account identifier (required if a
`split_account` isn't given).
account_type (str): The account type. One of [
'CHECKING', 'SAVINGS', 'MONEYMRKT', 'CREDITLINE'] (required if
a `split_account` isn't given).
Returns:
(str): the OFX split content
Examples:
>>> kwargs = {'bank_id': 1, 'split_account': 'Checking', \
'split_account_id': 2, 'split_account_type': 'CHECKING', 'amount': 100 , \
'id': 'jbaevf'}
>>> split = '<BANKACCTTO><BANKID>1</BANKID><ACCTID>2</ACCTID>\
<ACCTTYPE>CHECKING</ACCTTYPE></BANKACCTTO>'
>>> result = OFX().split_content(**kwargs)
>>> split == result.replace('\\n', '').replace('\\t', '')
True
>>> kwargs = {'bank_id': 1, 'account': 'Checking', 'account_id': \
3, 'account_type': 'CHECKING', 'amount': 100 , 'id': 'jbaevf'}
>>> split = '<BANKACCTTO><BANKID>1</BANKID><ACCTID>3</ACCTID>\
<ACCTTYPE>CHECKING</ACCTTYPE></BANKACCTTO>'
>>> result = OFX().split_content(**kwargs)
>>> split == result.replace('\\n', '').replace('\\t', '')
True
"""
content = '\t\t\t\t\t<BANKACCTTO>\n'
content += '\t\t\t\t\t\t<BANKID>%(bank_id)s</BANKID>\n' % kwargs
if kwargs.get('split_account'):
content += '\t\t\t\t\t\t<ACCTID>%(split_account_id)s' % kwargs
else:
content += '\t\t\t\t\t\t<ACCTID>%(account_id)s' % kwargs
content += '</ACCTID>\n'
if kwargs.get('split_account'):
content += '\t\t\t\t\t\t<ACCTTYPE>%(split_account_type)s' % kwargs
else:
content += '\t\t\t\t\t\t<ACCTTYPE>%(account_type)s' % kwargs
content += '</ACCTTYPE>\n'
content += '\t\t\t\t\t</BANKACCTTO>\n'
return content
# pylint: disable=unused-argument
def transfer_end(self, date=None, **kwargs):
""" Gets OFX transfer end
Args:
date (datetime): the transfer date (required)
Returns:
(str): the end of an OFX transfer
Examples:
>>> end = '</XFERINFO><DTPOSTED>20120115000000</DTPOSTED></INTRARS>'
>>> result = OFX().transfer_end(dt(2012, 1, 15))
>>> end == result.replace('\\n', '').replace('\\t', '')
True
"""
time_stamp = date.strftime('%Y%m%d%H%M%S') # yyyymmddhhmmss
content = '\t\t\t\t</XFERINFO>\n'
content += '\t\t\t\t<DTPOSTED>%s</DTPOSTED>\n' % time_stamp
content += '\t\t\t</INTRARS>\n'
return content
def footer(self, **kwargs):
""" Gets OFX transfer end
Kwargs:
date (datetime): The datetime (default: `datetime.now()`).
Returns:
(str): the OFX content
Examples:
>>> ft = '</BANKTRANLIST></STMTRS></STMTTRNRS></BANKMSGSRSV1></OFX>'
>>> result = OFX().footer(date=dt(2012, 1, 15))
>>> ft == result.replace('\\n', '').replace('\\t', '')
True
"""
kwargs.setdefault('date', dt.now())
if self.is_split:
content = self.transfer_end(**kwargs)
elif not self.split_account:
content = self.account_end(**kwargs)
else:
content = ''
content += "\t\t</%s>\n\t</BANKMSGSRSV1>\n</OFX>\n" % self.resp_type
return content
def gen_body(self, data): # noqa: C901
""" Generate the OFX body """
for datum in data:
grp = datum['group']
if self.is_split and datum['len'] > 2:
# OFX doesn't support more than 2 splits
raise TypeError('Group %s has too many splits.\n' % grp)
trxn_data = self.transaction_data(datum['trxn'])
split_like = self.is_split or self.split_account
full_split = self.is_split and self.split_account
new_group = self.prev_group and self.prev_group != grp
if new_group and full_split:
yield self.transfer_end(**trxn_data)
elif new_group and not split_like:
yield self.account_end(**trxn_data)
if self.split_account:
yield self.transfer(**trxn_data)
yield self.split_content(**trxn_data)
yield self.transfer_end(**trxn_data)
elif self.is_split and datum['is_main']:
yield self.transfer(**trxn_data)
elif self.is_split:
yield self.split_content(**trxn_data)
elif datum['is_main']:
yield self.account_start(**trxn_data)
yield self.transaction(**trxn_data)
else:
yield self.transaction(**trxn_data)
self.prev_group = grp
def gen_groups(self, records, chunksize=None):
""" Generate the OFX groups """
for chnk in chunk(records, chunksize):
cleansed = [
{k: next(xmlize([v])) for k, v in c.items()} for c in chnk]
keyfunc = self.id if self.is_split else self.account
for gee in group(cleansed, keyfunc):
yield gee
| mit |
cainmatt/django | django/contrib/gis/gdal/datasource.py | 357 | 4777 | """
DataSource is a wrapper for the OGR Data Source object, which provides
an interface for reading vector geometry data from many different file
formats (including ESRI shapefiles).
When instantiating a DataSource object, use the filename of a
GDAL-supported data source. For example, a SHP file or a
TIGER/Line file from the government.
The ds_driver keyword is used internally when a ctypes pointer
is passed in directly.
Example:
ds = DataSource('/home/foo/bar.shp')
for layer in ds:
for feature in layer:
# Getting the geometry for the feature.
g = feature.geom
# Getting the 'description' field for the feature.
desc = feature['description']
# We can also increment through all of the fields
# attached to this feature.
for field in feature:
# Get the name of the field (e.g. 'description')
nm = field.name
# Get the type (integer) of the field, e.g. 0 => OFTInteger
t = field.type
# Returns the value the field; OFTIntegers return ints,
# OFTReal returns floats, all else returns string.
val = field.value
"""
from ctypes import byref
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import GDALException, OGRIndexError
from django.contrib.gis.gdal.layer import Layer
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_DS_* routines are relevant here.
class DataSource(GDALBase):
"Wraps an OGR Data Source object."
def __init__(self, ds_input, ds_driver=False, write=False, encoding='utf-8'):
# The write flag.
if write:
self._write = 1
else:
self._write = 0
# See also http://trac.osgeo.org/gdal/wiki/rfc23_ogr_unicode
self.encoding = encoding
Driver.ensure_registered()
if isinstance(ds_input, six.string_types):
# The data source driver is a void pointer.
ds_driver = Driver.ptr_type()
try:
# OGROpen will auto-detect the data source type.
ds = capi.open_ds(force_bytes(ds_input), self._write, byref(ds_driver))
except GDALException:
# Making the error message more clear rather than something
# like "Invalid pointer returned from OGROpen".
raise GDALException('Could not open the datasource at "%s"' % ds_input)
elif isinstance(ds_input, self.ptr_type) and isinstance(ds_driver, Driver.ptr_type):
ds = ds_input
else:
raise GDALException('Invalid data source input type: %s' % type(ds_input))
if ds:
self.ptr = ds
self.driver = Driver(ds_driver)
else:
# Raise an exception if the returned pointer is NULL
raise GDALException('Invalid data source file "%s"' % ds_input)
def __del__(self):
"Destroys this DataStructure object."
if self._ptr and capi:
capi.destroy_ds(self._ptr)
def __iter__(self):
"Allows for iteration over the layers in a data source."
for i in range(self.layer_count):
yield self[i]
def __getitem__(self, index):
"Allows use of the index [] operator to get a layer at the index."
if isinstance(index, six.string_types):
l = capi.get_layer_by_name(self.ptr, force_bytes(index))
if not l:
raise OGRIndexError('invalid OGR Layer name given: "%s"' % index)
elif isinstance(index, int):
if index < 0 or index >= self.layer_count:
raise OGRIndexError('index out of range')
l = capi.get_layer(self._ptr, index)
else:
raise TypeError('Invalid index type: %s' % type(index))
return Layer(l, self)
def __len__(self):
"Returns the number of layers within the data source."
return self.layer_count
def __str__(self):
"Returns OGR GetName and Driver for the Data Source."
return '%s (%s)' % (self.name, str(self.driver))
@property
def layer_count(self):
"Returns the number of layers in the data source."
return capi.get_layer_count(self._ptr)
@property
def name(self):
"Returns the name of the data source."
name = capi.get_ds_name(self._ptr)
return force_text(name, self.encoding, strings_only=True)
| bsd-3-clause |
t0mk/ansible | lib/ansible/modules/network/avi/avi_pool.py | 8 | 16763 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_pool
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Pool Avi RESTful Object
description:
- This module is used to configure Pool object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
a_pool:
description:
- Name of container cloud application that constitutes a pool in a a-b pool configuration, if different from vs app.
ab_pool:
description:
- A/b pool configuration.
ab_priority:
description:
- Priority of this pool in a a-b pool pair.
- Internally used.
apic_epg_name:
description:
- Synchronize cisco apic epg members with pool servers.
application_persistence_profile_ref:
description:
- Persistence will ensure the same user sticks to the same server for a desired duration of time.
- It is a reference to an object of type applicationpersistenceprofile.
autoscale_launch_config_ref:
description:
- Reference to the launch configuration profile.
- It is a reference to an object of type autoscalelaunchconfig.
autoscale_networks:
description:
- Network ids for the launch configuration.
autoscale_policy_ref:
description:
- Reference to server autoscale policy.
- It is a reference to an object of type serverautoscalepolicy.
capacity_estimation:
description:
- Inline estimation of capacity of servers.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
capacity_estimation_ttfb_thresh:
description:
- The maximum time-to-first-byte of a server.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
cloud_config_cksum:
description:
- Checksum of cloud configuration for pool.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
connection_ramp_duration:
description:
- Duration for which new connections will be gradually ramped up to a server recently brought online.
- Useful for lb algorithms that are least connection based.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
created_by:
description:
- Creator name.
default_server_port:
description:
- Traffic sent to servers will use this destination server port unless overridden by the server's specific port attribute.
- The ssl checkbox enables avi to server encryption.
- Default value when not specified in API or module is interpreted by Avi Controller as 80.
description:
description:
- A description of the pool.
domain_name:
description:
- Comma separated list of domain names which will be used to verify the common names or subject alternative names presented by server certificates.
- It is performed only when common name check host_check_enabled is enabled.
east_west:
description:
- Inherited config from virtualservice.
enabled:
description:
- Enable or disable the pool.
- Disabling will terminate all open connections and pause health monitors.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
fail_action:
description:
- Enable an action - close connection, http redirect, local http response, or backup pool - when a pool failure happens.
- By default, a connection will be closed, in case the pool experiences a failure.
fewest_tasks_feedback_delay:
description:
- Periodicity of feedback for fewest tasks server selection algorithm.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
graceful_disable_timeout:
description:
- Used to gracefully disable a server.
- Virtual service waits for the specified time before terminating the existing connections to the servers that are disabled.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
health_monitor_refs:
description:
- Verify server health by applying one or more health monitors.
- Active monitors generate synthetic traffic from each service engine and mark a server up or down based on the response.
- The passive monitor listens only to client to server communication.
- It raises or lowers the ratio of traffic destined to a server based on successful responses.
- It is a reference to an object of type healthmonitor.
host_check_enabled:
description:
- Enable common name check for server certificate.
- If enabled and no explicit domain name is specified, avi will use the incoming host header to do the match.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
inline_health_monitor:
description:
- The passive monitor will monitor client to server connections and requests and adjust traffic load to servers based on successful responses.
- This may alter the expected behavior of the lb method, such as round robin.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ipaddrgroup_ref:
description:
- Use list of servers from ip address group.
- It is a reference to an object of type ipaddrgroup.
lb_algorithm:
description:
- The load balancing algorithm will pick a server within the pool's list of available servers.
- Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_LEAST_CONNECTIONS.
lb_algorithm_consistent_hash_hdr:
description:
- Http header name to be used for the hash key.
lb_algorithm_hash:
description:
- Criteria used as a key for determining the hash between the client and server.
- Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS.
max_concurrent_connections_per_server:
description:
- The maximum number of concurrent connections allowed to each server within the pool.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
max_conn_rate_per_server:
description:
- Rate limit connections to each server.
name:
description:
- The name of the pool.
required: true
networks:
description:
- (internal-use) networks designated as containing servers for this pool.
- The servers may be further narrowed down by a filter.
- This field is used internally by avi, not editable by the user.
pki_profile_ref:
description:
- Avi will validate the ssl certificate present by a server against the selected pki profile.
- It is a reference to an object of type pkiprofile.
placement_networks:
description:
- Manually select the networks and subnets used to provide reachability to the pool's servers.
- Specify the subnet using the following syntax 10-1-1-0/24.
- Use static routes in vrf configuration when pool servers are not directly connected butroutable from the service engine.
prst_hdr_name:
description:
- Header name for custom header persistence.
request_queue_depth:
description:
- Minimum number of requests to be queued when pool is full.
- Default value when not specified in API or module is interpreted by Avi Controller as 128.
request_queue_enabled:
description:
- Enable request queue when pool is full.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
rewrite_host_header_to_server_name:
description:
- Rewrite incoming host header to server name of the server to which the request is proxied.
- Enabling this feature rewrites host header for requests to all servers in the pool.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
rewrite_host_header_to_sni:
description:
- If sni server name is specified, rewrite incoming host header to the sni server name.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
server_auto_scale:
description:
- Server autoscale.
- Not used anymore.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
server_count:
description:
- Number of server_count.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
server_name:
description:
- Fully qualified dns hostname which will be used in the tls sni extension in server connections if sni is enabled.
- If no value is specified, avi will use the incoming host header instead.
server_reselect:
description:
- Server reselect configuration for http requests.
servers:
description:
- The pool directs load balanced traffic to this list of destination servers.
- The servers can be configured by ip address, name, network or via ip address group.
sni_enabled:
description:
- Enable tls sni for server connections.
- If disabled, avi will not send the sni extension as part of the handshake.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ssl_key_and_certificate_ref:
description:
- Service engines will present a client ssl certificate to the server.
- It is a reference to an object of type sslkeyandcertificate.
ssl_profile_ref:
description:
- When enabled, avi re-encrypts traffic to the backend servers.
- The specific ssl profile defines which ciphers and ssl versions will be supported.
- It is a reference to an object of type sslprofile.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
use_service_port:
description:
- Do not translate the client's destination port when sending the connection to the server.
- The pool or servers specified service port will still be used for health monitoring.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
uuid:
description:
- Uuid of the pool.
vrf_ref:
description:
- Virtual routing context that the pool is bound to.
- This is used to provide the isolation of the set of networks the pool is attached to.
- The pool inherits the virtual routing conext of the virtual service, and this field is used only internally, and is set by pb-transform.
- It is a reference to an object of type vrfcontext.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a Pool with two servers and HTTP monitor
avi_pool:
controller: 10.10.1.20
username: avi_user
password: avi_password
name: testpool1
description: testpool1
state: present
health_monitor_refs:
- '/api/healthmonitor?name=System-HTTP'
servers:
- ip:
addr: 10.10.2.20
type: V4
- ip:
addr: 10.10.2.21
type: V4
'''
RETURN = '''
obj:
description: Pool (api/pool) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
a_pool=dict(type='str',),
ab_pool=dict(type='dict',),
ab_priority=dict(type='int',),
apic_epg_name=dict(type='str',),
application_persistence_profile_ref=dict(type='str',),
autoscale_launch_config_ref=dict(type='str',),
autoscale_networks=dict(type='list',),
autoscale_policy_ref=dict(type='str',),
capacity_estimation=dict(type='bool',),
capacity_estimation_ttfb_thresh=dict(type='int',),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
connection_ramp_duration=dict(type='int',),
created_by=dict(type='str',),
default_server_port=dict(type='int',),
description=dict(type='str',),
domain_name=dict(type='list',),
east_west=dict(type='bool',),
enabled=dict(type='bool',),
fail_action=dict(type='dict',),
fewest_tasks_feedback_delay=dict(type='int',),
graceful_disable_timeout=dict(type='int',),
health_monitor_refs=dict(type='list',),
host_check_enabled=dict(type='bool',),
inline_health_monitor=dict(type='bool',),
ipaddrgroup_ref=dict(type='str',),
lb_algorithm=dict(type='str',),
lb_algorithm_consistent_hash_hdr=dict(type='str',),
lb_algorithm_hash=dict(type='str',),
max_concurrent_connections_per_server=dict(type='int',),
max_conn_rate_per_server=dict(type='dict',),
name=dict(type='str', required=True),
networks=dict(type='list',),
pki_profile_ref=dict(type='str',),
placement_networks=dict(type='list',),
prst_hdr_name=dict(type='str',),
request_queue_depth=dict(type='int',),
request_queue_enabled=dict(type='bool',),
rewrite_host_header_to_server_name=dict(type='bool',),
rewrite_host_header_to_sni=dict(type='bool',),
server_auto_scale=dict(type='bool',),
server_count=dict(type='int',),
server_name=dict(type='str',),
server_reselect=dict(type='dict',),
servers=dict(type='list',),
sni_enabled=dict(type='bool',),
ssl_key_and_certificate_ref=dict(type='str',),
ssl_profile_ref=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
use_service_port=dict(type='bool',),
uuid=dict(type='str',),
vrf_ref=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'pool',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
bklang/GO2 | login.py | 1 | 16349 | """
Handlers for user-related pages: login, logout, signup, verify
"""
from google.appengine.api import users
from webapp2_extras.auth import *
from requestmodel import *
from webapp2_extras.appengine.auth.models import UserToken
from webapp2_extras.appengine.auth.models import User
from google.appengine.ext import ndb
from webapp2_extras.i18n import gettext as _
import logging
import member
import goemail
import datetime
import lang
import assoc
import gigoexceptions
ENABLE_EMAIL = True
class LoginPage(BaseHandler):
def get(self):
the_url = self.request.get('originalurl',None)
self._serve_page(the_url=the_url)
def post(self):
email = self.request.get('email').lower()
password = self.request.get('password')
remember = self.request.get('remember',False)
if remember:
remember = True
try:
u = self.auth.get_user_by_password(email, password, remember=remember,
save_session=True)
the_user = self.user_model.get_by_id(u['user_id'])
if the_user.verified is False:
self.session.clear()
self.auth.unset_session()
self._serve_page(unverified=True)
else:
the_url = self.request.get('originalurl',None)
if the_url:
self.redirect(str(the_url))
else:
self.redirect(self.uri_for('home'))
except (InvalidAuthIdError, InvalidPasswordError) as e:
self._serve_page(failed=True)
def _serve_page(self, the_url=None, failed=False, unverified=False):
username = self.request.get('username')
locale=self.request.get('locale','en')
params = {
'username': username,
'failed': failed,
'unverified': unverified,
'originalurl': the_url,
'locale': locale,
'languages': lang.LOCALES
}
self.render_template('login.html', params=params)
class LogoutHandler(BaseHandler):
def get(self):
# if you actually log out, we'll clear the session to reset band lists and stuff
self.session.clear()
self.auth.unset_session()
self.redirect(self.uri_for('home'))
##########
#
# SignupHandler
#
##########
class SignupPage(BaseHandler):
""" class for handling signup requests """
def get(self):
self._serve_page()
def post(self):
email = self.request.get('email').lower()
name = self.request.get('name')
password = self.request.get('password')
try:
(success, result) = member.create_new_member(email=email, name=name, password=password)
except member.MemberError as e:
return self._serve_page(e.value)
if not success:
return self._serve_page(_('User could not be created (email address may be in use)'))
user = result
user_id = user.get_id()
locale = self.request.get('locale','en')
user.preferences.locale=locale
user.put()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='v', user_id=user_id,
signup_token=token, _full=True)
if not ENABLE_EMAIL:
msg=verification_url
else:
goemail.send_registration_email(the_email=email, the_url=verification_url)
msg=''
params = {
'msg': msg,
'locale': locale
}
self.render_template('confirm_signup.html', params=params)
def _serve_page(self, error=None):
locale=self.request.get('locale',None)
params = {
'failed': error is not None,
'locale' : locale,
'error': error
}
self.render_template('signup.html', params=params)
##########
#
# VerificationHandler
#
##########
class VerificationHandler(BaseHandler):
""" handles user verification """
def get(self, *args, **kwargs):
user = None
user_id = kwargs['user_id']
signup_token = kwargs['signup_token']
verification_type = kwargs['type']
# it should be something more concise like
# self.auth.get_user_by_token(user_id, signup_token
# unfortunately the auth interface does not (yet) allow to manipulate
# signup tokens concisely
user, ts = self.user_model.get_by_auth_token(int(user_id),
signup_token,
'signup')
if not user:
logging.error( \
'Could not find any user with id "%s" signup token "%s"',
user_id, signup_token)
locale=self.request.get('locale',None)
if locale:
self.redirect('{0}?locale={1}'.format(self.uri_for('linkerror'),locale))
else:
self.redirect(self.uri_for('linkerror'))
return
# store user data in the session
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if verification_type == 'v':
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), signup_token)
if not user.verified:
user.verified = True
user.put()
self.auth.unset_session()
# self.display_message('User email address has been verified. Proceed <a href="/login">here</a>')
self.redirect(self.uri_for('login'))
elif verification_type == 'p':
# supply user to the page
params = {
'user': user,
'token': signup_token
}
self.render_template('resetpassword.html', params=params)
else:
raise gigoexceptions.GigoException('verification type not supported')
##########
#
# EmailVerificationHandler
#
##########
class EmailVerificationHandler(BaseHandler):
""" handles user verification """
def get(self, *args, **kwargs):
user = None
user_id = kwargs['user_id']
signup_token = kwargs['signup_token']
verification_type = kwargs['type']
# it should be something more concise like
# self.auth.get_user_by_token(user_id, signup_token
# unfortunately the auth interface does not (yet) allow to manipulate
# signup tokens concisely
user, ts = self.user_model.get_by_auth_token(int(user_id),
signup_token,
'email')
if not user:
raise gigoexceptions.GigoException('Email verification handler could not find any user with id "{0}" signup token "{1}"'.format(user_id, signup_token))
# store user data in the session
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if verification_type == 'e':
new_email = user.set_email_to_pending()
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_email_token(user.get_id(), signup_token)
template_args = {
'success' : new_email != None,
'the_email' : new_email
}
self.render_template('confirm_email_change.html', params=template_args)
else:
raise gigoexceptions.GigoException('Email verification handler: verification type not supported')
##########
#
# InviteVerificationHandler
#
##########
class InviteVerificationHandler(BaseHandler):
""" handles user invite verification """
def get(self, *args, **kwargs):
user = None
user_id = kwargs['user_id']
signup_token = kwargs['signup_token']
verification_type = kwargs['type']
# it should be something more concise like
# self.auth.get_user_by_token(user_id, signup_token
# unfortunately the auth interface does not (yet) allow to manipulate
# signup tokens concisely
user, ts = self.user_model.get_by_auth_token(int(user_id),
signup_token,
'invite')
if not user:
logging.error( \
'Could not find any user with id "%s" invite token "%s"',
user_id, signup_token)
locale=self.request.get('locale',None)
if locale:
self.redirect('{0}?locale={1}'.format(self.uri_for('linkerror'),locale))
else:
self.redirect(self.uri_for('linkerror'))
return
if verification_type == 'i':
# ok, this is a user who has one or more invites pending. They have a user but
# not a password. We need to do this:
# - direct them to a welcome page where they can enter a password
template_args = {
'mk': user.key.urlsafe(),
'st': signup_token,
'locale': user.preferences.locale
}
self.render_template('invite_welcome.html', params=template_args)
else:
raise gigoexceptions.GigoException('invite verification handler: verification type not supported')
def post(self):
mk = self.request.get('mk', None)
st = self.request.get('st', None)
password = self.request.get('password')
if mk is None or st is None:
raise gigoexceptions.GigoException('invite verification handler: no mk or st')
the_member = ndb.Key(urlsafe = mk).get()
# store user data in the session
self.auth.set_session(self.auth.store.user_to_dict(the_member), remember=True)
# - invalidate the invite link so they can't use it again
self.user_model.delete_invite_token(the_member.get_id(), st)
# - turn their 'invite' assocs into real assocs
assoc.confirm_invites_for_member_key(the_member.key)
the_member.set_password(password)
the_member.verified = True
name = self.request.get('member_name', '')
nickname = self.request.get('member_nickname', '')
if name != '':
the_member.name=name
if nickname != '':
the_member.nickname=nickname
the_member.put()
self.redirect(self.uri_for('home'))
##########
#
# SetPasswordHandler
#
##########
class SetPasswordHandler(BaseHandler):
@user_required
def post(self):
password = self.request.get('password')
old_token = self.request.get('t')
if not password or password != self.request.get('confirm_password'):
self.display_message('passwords do not match')
return
try:
user = self.user
user.set_password(password)
user.put()
except member.MemberError as e:
self.display_message(e.value)
return
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), old_token)
self.auth.unset_session()
self.redirect(self.uri_for('home'))
##########
#
# AuthenticatedHandler
#
##########
class AuthenticatedHandler(BaseHandler):
@user_required
def get(self):
self.render_template('authenticated.html', params=None)
##########
#
# LinkErrorHandler
#
##########
class LinkErrorHandler(BaseHandler):
def get(self):
locale=self.request.get('locale',None)
if locale:
args={
'locale': locale
}
else:
args = None
self.render_template('link_error.html', params=args)
##########
#
# ForgotPasswordHandler
#
##########
class ForgotPasswordHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
user = self.user_model.get_by_auth_id(username)
if not user:
logging.info('Could not find any user entry for username %s', username)
self._serve_page(not_found=True)
return
user_id = user.get_id()
token = member.Member.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='p', user_id=user_id,
signup_token=token, _full=True)
if ENABLE_EMAIL:
goemail.send_forgot_email(the_email=user.email_address, the_url=verification_url)
msg=""
else:
msg = verification_url
locale=self.request.get('locale',None)
params = {
'msg' : msg,
'locale' : locale
}
self.render_template('confirm_forgot.html', params=params)
def _serve_page(self, not_found=False):
locale=self.request.get('locale',None)
username = self.request.get('username')
params = {
'username': username,
'not_found': not_found,
'locale' : locale
}
self.render_template('forgot.html', params=params)
##########
#
# CheckEmail - verifies an email address is available
#
##########
class CheckEmail(BaseHandler):
def post(self):
if self.user.is_superuser:
logging.info("superuser overriding email check")
self.response.write('true')
else:
test_email = self.request.get('member_email')
email_ok = 'true'
if test_email != self.user.email_address:
if self.user_model.get_by_auth_id(test_email):
email_ok = 'false'
self.response.write(email_ok)
##########
#
# request_new_email
#
##########
def request_new_email(the_request, the_new_address):
"""" if a member has asked for a new email address, send a confirmation email, and
if it comes back, make the change. """
# in theory, we've already verified that the email is unique - we'll trust that
# set up a token
user_id = the_request.user.get_id()
token = the_request.user_model.create_email_token(user_id)
verification_url = the_request.uri_for('emailverification', type='e', user_id=user_id,
signup_token=token, _full=True)
goemail.send_the_pending_email(the_email_address=the_new_address, the_confirm_link=verification_url)
def get_all_signup_tokens():
""" Return query with subject 'signup' """
signupTokensQuery = UserToken.query(UserToken.subject=='signup')
signupTokens = signupTokensQuery.fetch()
return signupTokens
def get_old_signup_tokens():
""" Return query with subject 'signup' """
expiredTokensQuery = UserToken.query(UserToken.subject=='signup', UserToken.created <= (datetime.datetime.utcnow() - datetime.timedelta(days=2)))
expiredTokens = expiredTokensQuery.fetch(keys_only=True)
return expiredTokens
def get_old_auth_tokens():
""" Return query with subject 'auth' """
expiredTokensQuery = UserToken.query(UserToken.subject=='auth', UserToken.created <= (datetime.datetime.utcnow() - datetime.timedelta(weeks=3)))
expiredTokens = expiredTokensQuery.fetch(keys_only=True)
return expiredTokens
##########
#
# auto delete old signup tokens - we don't want them hanging around forever
#
##########
class AutoDeleteSignupTokenHandler(BaseHandler):
""" automatically delete old tokens """
def get(self):
the_token_keys = get_old_signup_tokens()
logging.info("deleting {0} unused signup tokens".format(len(the_token_keys)))
if len(the_token_keys):
ndb.delete_multi(the_token_keys)
the_token_keys = get_old_auth_tokens()
logging.info("deleting {0} old auth tokens".format(len(the_token_keys)))
if len(the_token_keys):
ndb.delete_multi(the_token_keys)
# member.update_all_uniques()
# member.clean_up_verified()
# assoc.update_all_assocs()
class WhatisPageHandler(BaseHandler):
""" handle the whatis page """
def get(self):
params = {}
self.render_template('whatis.html', params=params)
| gpl-3.0 |
endolith/scikit-image | doc/examples/xx_applications/plot_geometric.py | 28 | 3253 | """
===============================
Using geometric transformations
===============================
In this example, we will see how to use geometric transformations in the context
of image processing.
"""
from __future__ import print_function
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage import transform as tf
margins = dict(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
"""
Basics
======
Several different geometric transformation types are supported: similarity,
affine, projective and polynomial.
Geometric transformations can either be created using the explicit parameters
(e.g. scale, shear, rotation and translation) or the transformation matrix:
First we create a transformation using explicit parameters:
"""
tform = tf.SimilarityTransform(scale=1, rotation=math.pi / 2,
translation=(0, 1))
print(tform.params)
"""
Alternatively you can define a transformation by the transformation matrix
itself:
"""
matrix = tform.params.copy()
matrix[1, 2] = 2
tform2 = tf.SimilarityTransform(matrix)
"""
These transformation objects can then be used to apply forward and inverse
coordinate transformations between the source and destination coordinate
systems:
"""
coord = [1, 0]
print(tform2(coord))
print(tform2.inverse(tform(coord)))
"""
Image warping
=============
Geometric transformations can also be used to warp images:
"""
text = data.text()
tform = tf.SimilarityTransform(scale=1, rotation=math.pi / 4,
translation=(text.shape[0] / 2, -100))
rotated = tf.warp(text, tform)
back_rotated = tf.warp(rotated, tform.inverse)
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
fig.subplots_adjust(**margins)
plt.gray()
ax1.imshow(text)
ax1.axis('off')
ax2.imshow(rotated)
ax2.axis('off')
ax3.imshow(back_rotated)
ax3.axis('off')
"""
.. image:: PLOT2RST.current_figure
Parameter estimation
====================
In addition to the basic functionality mentioned above you can also estimate the
parameters of a geometric transformation using the least-squares method.
This can amongst other things be used for image registration or rectification,
where you have a set of control points or homologous/corresponding points in two
images.
Let's assume we want to recognize letters on a photograph which was not taken
from the front but at a certain angle. In the simplest case of a plane paper
surface the letters are projectively distorted. Simple matching algorithms would
not be able to match such symbols. One solution to this problem would be to warp
the image so that the distortion is removed and then apply a matching algorithm:
"""
text = data.text()
src = np.array((
(0, 0),
(0, 50),
(300, 50),
(300, 0)
))
dst = np.array((
(155, 15),
(65, 40),
(260, 130),
(360, 95)
))
tform3 = tf.ProjectiveTransform()
tform3.estimate(src, dst)
warped = tf.warp(text, tform3, output_shape=(50, 300))
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(8, 3))
fig.subplots_adjust(**margins)
plt.gray()
ax1.imshow(text)
ax1.plot(dst[:, 0], dst[:, 1], '.r')
ax1.axis('off')
ax2.imshow(warped)
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
"""
plt.show()
| bsd-3-clause |
ESS-LLP/erpnext-healthcare | erpnext/selling/doctype/product_bundle/product_bundle.py | 58 | 1444 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class ProductBundle(Document):
def autoname(self):
self.name = self.new_item_code
def validate(self):
self.validate_main_item()
self.validate_child_items()
from erpnext.utilities.transaction_base import validate_uom_is_integer
validate_uom_is_integer(self, "uom", "qty")
def validate_main_item(self):
"""Validates, main Item is not a stock item"""
if frappe.db.get_value("Item", self.new_item_code, "is_stock_item"):
frappe.throw(_("Parent Item {0} must not be a Stock Item").format(self.new_item_code))
def validate_child_items(self):
for item in self.items:
if frappe.db.exists("Product Bundle", item.item_code):
frappe.throw(_("Child Item should not be a Product Bundle. Please remove item `{0}` and save").format(item.item_code))
def get_new_item_code(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
return frappe.db.sql("""select name, item_name, description from tabItem
where is_stock_item=0 and name not in (select name from `tabProduct Bundle`)
and %s like %s %s limit %s, %s""" % (searchfield, "%s",
get_match_cond(doctype),"%s", "%s"),
("%%%s%%" % txt, start, page_len))
| gpl-3.0 |
carolFrohlich/nipype | nipype/pipeline/plugins/sge.py | 1 | 18552 | # -*- coding: utf-8 -*-
"""Parallel workflow execution via SGE
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import object
import os
import pwd
import re
import subprocess
import time
import xml.dom.minidom
import random
from ...interfaces.base import CommandLine
from .base import (SGELikeBatchManagerBase, logger, iflogger, logging)
DEBUGGING_PREFIX = str(int(random.uniform(100, 999)))
def sge_debug_print(message):
""" Needed for debugging on big jobs. Once this is fully vetted, it can be removed.
"""
logger.debug(DEBUGGING_PREFIX + " " + "=!" * 3 + " " + message)
# print DEBUGGING_PREFIX + " " + "=!" * 3 + " " + message
class QJobInfo(object):
"""Information about a single job created by OGE/SGE or similar
Each job is responsible for knowing it's own refresh state
:author Hans J. Johnson
"""
def __init__(self, job_num, job_queue_state, job_time, job_queue_name, job_slots, qsub_command_line):
# self._jobName = None # Ascii text name of job not unique
self._job_num = int(
job_num) # The primary unique identifier for this job, must be an integer!
# self._jobOwn = None # Who owns this job
self._job_queue_state = str(
job_queue_state) # ["running","zombie",...??]
# self._jobActionState = str(jobActionState) # ['r','qw','S',...??]
self._job_time = job_time # The job start time
self._job_info_creation_time = time.time(
) # When this job was created (for comparing against initalization)
self._job_queue_name = job_queue_name # Where the job is running
self._job_slots = job_slots # How many slots are being used
self._qsub_command_line = qsub_command_line
def __repr__(self):
return '{:<8d}{:12}{:<3d}{:20}{:8}{}'.format(
self._job_num, self._queue_state, self._job_slots,
time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(self._job_time)),
self._job_queue_name, self._qsub_command_line)
def is_initializing(self):
return self._job_queue_state == "initializing"
def is_zombie(self):
return self._job_queue_state == "zombie" or self._job_queue_state == "finished"
def is_running(self):
return self._job_queue_state == "running"
def is_pending(self):
return self._job_queue_state == "pending"
def is_job_state_pending(self):
""" Return True, unless job is in the "zombie" status
"""
time_diff = (time.time() - self._job_info_creation_time)
if self.is_zombie():
sge_debug_print(
"DONE! QJobInfo.IsPending found in 'zombie' list, returning False so claiming done!\n{0}".format(self))
is_pending_status = False # Job explicitly found as being completed!
elif self.is_initializing() and (time_diff > 600):
# if initializing for more than 5 minute, failure due to
# initialization and completion before registration
sge_debug_print(
"FAILURE! QJobInfo.IsPending found long running at {1} seconds"
"'initializing' returning False for to break loop!\n{0}".format(self, time_diff))
is_pending_status = True # Job initialization took too long, so report!
else: # self.is_running() || self.is_pending():
is_pending_status = True # Job cache last listed as running
return is_pending_status # The job is in one of the hold states
def update_info(self, job_queue_state, job_time, job_queue_name, job_slots):
self._job_queue_state = job_queue_state
self._job_time = job_time
self._job_queue_name = job_queue_name
self._job_slots = job_slots
def set_state(self, new_state):
self._job_queue_state = new_state
class QstatSubstitute(object):
"""A wrapper for Qstat to avoid overloading the
SGE/OGS server with rapid continuous qstat requests"""
def __init__(self, qstat_instant_executable='qstat', qstat_cached_executable='qstat'):
"""
:param qstat_instant_executable:
:param qstat_cached_executable:
"""
self._qstat_instant_executable = qstat_instant_executable
self._qstat_cached_executable = qstat_cached_executable
self._out_of_scope_jobs = list() # Initialize first
self._task_dictionary = dict(
) # {'taskid': QJobInfo(), .... } The dictionaryObject
self._remove_old_jobs()
def _remove_old_jobs(self):
""" This is only called during initialization of the function for the purpose
of identifying jobs that are not part of this run of nipype. They
are jobs that existed prior to starting a new jobs, so they are irrelevant.
"""
self._run_qstat("QstatInitialization", True)
# If qstat does not exist on this system, then quietly
# fail during init
def add_startup_job(self, taskid, qsub_command_line):
"""
:param taskid: The job id
:param qsub_command_line: When initializing, re-use the job_queue_name
:return: NONE
"""
taskid = int(taskid) # Ensure that it is an integer
self._task_dictionary[taskid] = QJobInfo(taskid, "initializing", time.time(),
"noQueue", 1, qsub_command_line)
@staticmethod
def _qacct_verified_complete(taskid):
""" request definitive job completion information for the current job
from the qacct report
"""
sge_debug_print("WARNING: "
"CONTACTING qacct for finished jobs, "
"{0}: {1}".format(time.time(), "Verifying Completion"))
this_command = 'qacct'
qacct_retries = 10
is_complete = False
while qacct_retries > 0:
qacct_retries -= 1
try:
proc = subprocess.Popen(
[this_command, '-o', pwd.getpwuid(os.getuid())[0], '-j', str(taskid)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
qacct_result, _ = proc.communicate()
if qacct_result.find(str(taskid)):
is_complete = True
sge_debug_print(
"NOTE: qacct for jobs\n{0}".format(qacct_result))
break
except:
sge_debug_print("NOTE: qacct call failed")
time.sleep(5)
pass
return is_complete
def _parse_qstat_job_list(self, xml_job_list):
current_jobs_parsed = list()
for current_job_element in xml_job_list:
# jobname = current_job_element.getElementsByTagName('JB_name')[0].childNodes[0].data
# jobown =
# current_job_element.getElementsByTagName('JB_owner')[0].childNodes[0].data
try:
job_queue_name = current_job_element.getElementsByTagName(
'queue_name')[0].childNodes[0].data
except:
job_queue_name = "unknown"
try:
job_slots = current_job_element.getElementsByTagName(
'slots')[0].childNodes[0].data
except:
job_slots = "unknown"
job_queue_state = current_job_element.getAttribute('state')
job_num = int(current_job_element.getElementsByTagName(
'JB_job_number')[0].childNodes[0].data)
try:
job_time_text = current_job_element.getElementsByTagName(
'JAT_start_time')[0].childNodes[0].data
job_time = float(time.mktime(time.strptime(
job_time_text, "%Y-%m-%dT%H:%M:%S")))
except:
job_time = float(0.0)
# Make job entry
task_id = int(job_num)
if task_id in self._task_dictionary:
self._task_dictionary[task_id].update_info(
job_queue_state, job_time, job_queue_name, job_slots)
sge_debug_print("Updating job: {0}".format(
self._task_dictionary[task_id]))
current_jobs_parsed.append(task_id)
# Changed from job_num as "in" is used to check which does not cast
else:
# Any Job that was not explicitly added with qsub command is
# out of scope
self._out_of_scope_jobs.append(task_id)
# To ensure that every job is in the dictionary has a state reported
# by the SGE environment, it is necessary to explicitly check jobs
# that are not reported by the qstat command to determine if they
# were started and finished, and pushed out of the window of review
# before their state being recorded. The qacct command is slower, but
# much more robust for ensuring that a job has completed.
for dictionary_job in list(self._task_dictionary.keys()):
if dictionary_job not in current_jobs_parsed:
is_completed = self._qacct_verified_complete(dictionary_job)
if is_completed:
self._task_dictionary[dictionary_job].set_state("zombie")
else:
sge_debug_print("ERROR: Job not in current parselist, "
"and not in done list {0}: {1}".format(dictionary_job,
self._task_dictionary[dictionary_job]))
pass
if self._task_dictionary[dictionary_job].is_initializing():
is_completed = self._qacct_verified_complete(dictionary_job)
if is_completed:
self._task_dictionary[dictionary_job].set_state("zombie")
else:
sge_debug_print("ERROR: Job not in still in intializing mode, "
"and not in done list {0}: {1}".format(dictionary_job,
self._task_dictionary[dictionary_job]))
pass
def _run_qstat(self, reason_for_qstat, force_instant=True):
""" request all job information for the current user in xmlformat.
See documentation from java documentation:
http://arc.liv.ac.uk/SGE/javadocs/jgdi/com/sun/grid/jgdi/monitoring/filter/JobStateFilter.html
-s r gives running jobs
-s z gives recently completed jobs (**recently** is very ambiguous)
-s s suspended jobs
"""
sge_debug_print("WARNING: CONTACTING qmaster for jobs, "
"{0}: {1}".format(time.time(), reason_for_qstat))
if force_instant:
this_command = self._qstat_instant_executable
else:
this_command = self._qstat_cached_executable
qstat_retries = 10
while qstat_retries > 0:
qstat_retries -= 1
try:
proc = subprocess.Popen(
[this_command, '-u', pwd.getpwuid(os.getuid())[0], '-xml', '-s', 'psrz'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
qstat_xml_result, _ = proc.communicate()
dom = xml.dom.minidom.parseString(qstat_xml_result)
jobs = dom.getElementsByTagName('job_info')
run = jobs[0]
runjobs = run.getElementsByTagName('job_list')
self._parse_qstat_job_list(runjobs)
break
except Exception as inst:
exception_message = "QstatParsingError:\n\t{0}\n\t{1}\n".format(
type(
inst), # the exception instance
inst # __str__ allows args to printed directly
)
sge_debug_print(exception_message)
time.sleep(5)
pass
def print_dictionary(self):
"""For debugging"""
for vv in list(self._task_dictionary.values()):
sge_debug_print(str(vv))
def is_job_pending(self, task_id):
task_id = int(task_id) # Ensure that it is an integer
# Check if the task is in the dictionary first (before running qstat)
if task_id in self._task_dictionary:
# Trust the cache, only False if state='zombie'
job_is_pending = self._task_dictionary[task_id].is_job_state_pending()
# Double check pending jobs in case of change (since we don't check at the beginning)
if job_is_pending:
self._run_qstat("checking job pending status {0}".format(task_id), False)
job_is_pending = self._task_dictionary[task_id].is_job_state_pending()
else:
self._run_qstat("checking job pending status {0}".format(task_id), True)
if task_id in self._task_dictionary:
# Trust the cache, only False if state='zombie'
job_is_pending = self._task_dictionary[task_id].is_job_state_pending()
else:
sge_debug_print("ERROR: Job {0} not in task list, "
"even after forced qstat!".format(task_id))
job_is_pending = False
if not job_is_pending:
sge_debug_print("DONE! Returning for {0} claiming done!".format(task_id))
if task_id in self._task_dictionary:
sge_debug_print("NOTE: Adding {0} to OutOfScopeJobs list!".format(task_id))
self._out_of_scope_jobs.append(int(task_id))
self._task_dictionary.pop(task_id)
else:
sge_debug_print("ERROR: Job {0} not in task list, "
"but attempted to be removed!".format(task_id))
return job_is_pending
def qsub_sanitize_job_name(testjobname):
""" Ensure that qsub job names must begin with a letter.
Numbers and punctuation are not allowed.
>>> qsub_sanitize_job_name('01') # doctest: +IGNORE_UNICODE
'J01'
>>> qsub_sanitize_job_name('a01') # doctest: +IGNORE_UNICODE
'a01'
"""
if testjobname[0].isalpha():
return testjobname
else:
return 'J' + testjobname
class SGEPlugin(SGELikeBatchManagerBase):
"""Execute using SGE (OGE not tested)
The plugin_args input to run can be used to control the SGE execution.
Currently supported options are:
- template : template to use for batch job submission
- qsub_args : arguments to be prepended to the job execution script in the
qsub call
"""
def __init__(self, **kwargs):
template = """
#$ -V
#$ -S /bin/sh
"""
self._retry_timeout = 2
self._max_tries = 2
instant_qstat = 'qstat'
cached_qstat = 'qstat'
if 'plugin_args' in kwargs and kwargs['plugin_args']:
if 'retry_timeout' in kwargs['plugin_args']:
self._retry_timeout = kwargs['plugin_args']['retry_timeout']
if 'max_tries' in kwargs['plugin_args']:
self._max_tries = kwargs['plugin_args']['max_tries']
if 'qstatProgramPath' in kwargs['plugin_args']:
instant_qstat = kwargs['plugin_args']['qstatProgramPath']
if 'qstatCachedProgramPath' in kwargs['plugin_args']:
cached_qstat = kwargs['plugin_args']['qstatCachedProgramPath']
self._refQstatSubstitute = QstatSubstitute(instant_qstat, cached_qstat)
super(SGEPlugin, self).__init__(template, **kwargs)
def _is_pending(self, taskid):
return self._refQstatSubstitute.is_job_pending(int(taskid))
def _submit_batchtask(self, scriptfile, node):
cmd = CommandLine('qsub', environ=dict(os.environ),
terminal_output='allatonce')
path = os.path.dirname(scriptfile)
qsubargs = ''
if self._qsub_args:
qsubargs = self._qsub_args
if 'qsub_args' in node.plugin_args:
if 'overwrite' in node.plugin_args and \
node.plugin_args['overwrite']:
qsubargs = node.plugin_args['qsub_args']
else:
qsubargs += (" " + node.plugin_args['qsub_args'])
if '-o' not in qsubargs:
qsubargs = '%s -o %s' % (qsubargs, path)
if '-e' not in qsubargs:
qsubargs = '%s -e %s' % (qsubargs, path)
if node._hierarchy:
jobname = '.'.join((dict(os.environ)['LOGNAME'],
node._hierarchy,
node._id))
else:
jobname = '.'.join((dict(os.environ)['LOGNAME'],
node._id))
jobnameitems = jobname.split('.')
jobnameitems.reverse()
jobname = '.'.join(jobnameitems)
jobname = qsub_sanitize_job_name(jobname)
cmd.inputs.args = '%s -N %s %s' % (qsubargs,
jobname,
scriptfile)
oldlevel = iflogger.level
iflogger.setLevel(logging.getLevelName('CRITICAL'))
tries = 0
result = list()
while True:
try:
result = cmd.run()
except Exception as e:
if tries < self._max_tries:
tries += 1
time.sleep(
self._retry_timeout) # sleep 2 seconds and try again.
else:
iflogger.setLevel(oldlevel)
raise RuntimeError('\n'.join((('Could not submit sge task'
' for node %s') % node._id,
str(e))))
else:
break
iflogger.setLevel(oldlevel)
# retrieve sge taskid
lines = [line for line in result.runtime.stdout.split('\n') if line]
taskid = int(re.match("Your job ([0-9]*) .* has been submitted",
lines[-1]).groups()[0])
self._pending[taskid] = node.output_dir()
self._refQstatSubstitute.add_startup_job(taskid, cmd.cmdline)
logger.debug('submitted sge task: %d for node %s with %s' %
(taskid, node._id, cmd.cmdline))
return taskid
| bsd-3-clause |
gminds/rapidnewsng | django/core/cache/backends/db.py | 102 | 7648 | "Database cache backend."
import base64
import time
from datetime import datetime
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
from django.conf import settings
from django.core.cache.backends.base import BaseCache
from django.db import connections, router, transaction, DatabaseError
from django.utils import timezone, six
from django.utils.encoding import force_bytes
class Options(object):
"""A class that will quack like a Django model _meta class.
This allows cache operations to be controlled by the router
"""
def __init__(self, table):
self.db_table = table
self.app_label = 'django_cache'
self.module_name = 'cacheentry'
self.verbose_name = 'cache entry'
self.verbose_name_plural = 'cache entries'
self.object_name = 'CacheEntry'
self.abstract = False
self.managed = True
self.proxy = False
class BaseDatabaseCache(BaseCache):
def __init__(self, table, params):
BaseCache.__init__(self, params)
self._table = table
class CacheEntry(object):
_meta = Options(table)
self.cache_model_class = CacheEntry
class DatabaseCache(BaseDatabaseCache):
# This class uses cursors provided by the database connection. This means
# it reads expiration values as aware or naive datetimes depending on the
# value of USE_TZ. They must be compared to aware or naive representations
# of "now" respectively.
# But it bypasses the ORM for write operations. As a consequence, aware
# datetimes aren't made naive for databases that don't support time zones.
# We work around this problem by always using naive datetimes when writing
# expiration values, in UTC when USE_TZ = True and in local time otherwise.
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute("SELECT cache_key, value, expires FROM %s "
"WHERE cache_key = %%s" % table, [key])
row = cursor.fetchone()
if row is None:
return default
now = timezone.now()
if row[2] < now:
db = router.db_for_write(self.cache_model_class)
cursor = connections[db].cursor()
cursor.execute("DELETE FROM %s "
"WHERE cache_key = %%s" % table, [key])
transaction.commit_unless_managed(using=db)
return default
value = connections[db].ops.process_clob(row[1])
return pickle.loads(base64.b64decode(force_bytes(value)))
def set(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
self._base_set('set', key, value, timeout)
def add(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return self._base_set('add', key, value, timeout)
def _base_set(self, mode, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute("SELECT COUNT(*) FROM %s" % table)
num = cursor.fetchone()[0]
now = timezone.now()
now = now.replace(microsecond=0)
if settings.USE_TZ:
exp = datetime.utcfromtimestamp(time.time() + timeout)
else:
exp = datetime.fromtimestamp(time.time() + timeout)
exp = exp.replace(microsecond=0)
if num > self._max_entries:
self._cull(db, cursor, now)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
b64encoded = base64.b64encode(pickled)
# The DB column is expecting a string, so make sure the value is a
# string, not bytes. Refs #19274.
if six.PY3:
b64encoded = b64encoded.decode('latin1')
cursor.execute("SELECT cache_key, expires FROM %s "
"WHERE cache_key = %%s" % table, [key])
try:
result = cursor.fetchone()
if result and (mode == 'set' or
(mode == 'add' and result[1] < now)):
cursor.execute("UPDATE %s SET value = %%s, expires = %%s "
"WHERE cache_key = %%s" % table,
[b64encoded, connections[db].ops.value_to_db_datetime(exp), key])
else:
cursor.execute("INSERT INTO %s (cache_key, value, expires) "
"VALUES (%%s, %%s, %%s)" % table,
[key, b64encoded, connections[db].ops.value_to_db_datetime(exp)])
except DatabaseError:
# To be threadsafe, updates/inserts are allowed to fail silently
transaction.rollback_unless_managed(using=db)
return False
else:
transaction.commit_unless_managed(using=db)
return True
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
transaction.commit_unless_managed(using=db)
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
if settings.USE_TZ:
now = datetime.utcnow()
else:
now = datetime.now()
now = now.replace(microsecond=0)
cursor.execute("SELECT cache_key FROM %s "
"WHERE cache_key = %%s and expires > %%s" % table,
[key, connections[db].ops.value_to_db_datetime(now)])
return cursor.fetchone() is not None
def _cull(self, db, cursor, now):
if self._cull_frequency == 0:
self.clear()
else:
# When USE_TZ is True, 'now' will be an aware datetime in UTC.
now = now.replace(tzinfo=None)
table = connections[db].ops.quote_name(self._table)
cursor.execute("DELETE FROM %s WHERE expires < %%s" % table,
[connections[db].ops.value_to_db_datetime(now)])
cursor.execute("SELECT COUNT(*) FROM %s" % table)
num = cursor.fetchone()[0]
if num > self._max_entries:
cull_num = num // self._cull_frequency
cursor.execute(
connections[db].ops.cache_key_culling_sql() % table,
[cull_num])
cursor.execute("DELETE FROM %s "
"WHERE cache_key < %%s" % table,
[cursor.fetchone()[0]])
def clear(self):
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute('DELETE FROM %s' % table)
# For backwards compatibility
class CacheClass(DatabaseCache):
pass
| bsd-3-clause |
xindaya/bosen | app/sparsecoding/script/merge_data.py | 15 | 4103 | #!/usr/bin/python
import os, re, glob, sys
if len(sys.argv) < 5:
print 'Merge partitioned matrix data Si.txt generated by the Petuum Sparse Coding application'
print ''
print 'Usage: python %s <data-dirname> <data-format> <dictionary-size> <n> [<output-dirname>]' % sys.argv[0]
print ''
print 'Output: 1 merged file in directory specified in <output-dirname>.'
print ''
print 'Params:'
print ('<data-dirname>: The directory containing S matrices generated by Petuum Sparse Coding application, '
'in which the name of S matrices must be of the form S*.client_id')
print '<data-format>: Format of the matrix data. Can be "binary" or "text".'
print '<dictionary-size>: Number of columns in S matrix. It shall be the dictionary size in Sparse Coding.'
print '<n>: Number of rows in unpartitioned matrix. It shall be the size of unpartitioned data.'
print '<output-dirname>: Optional. The directory to put the output files. Default value is the working directory.'
print ''
sys.exit(1)
data_dirname = sys.argv[1]
data_format = sys.argv[2]
m = int(sys.argv[3])
n = int(sys.argv[4])
if len(sys.argv) < 6:
output_dirname = os.getcwd()
else:
output_dirname = os.path.realpath(sys.argv[5])
if not os.path.exists(data_dirname):
print 'Directory', data_dirname, 'does not exist!'
sys.exit(1)
if not os.path.exists(output_dirname):
print 'Directory', output_dirname, 'does not exist!'
sys.exit(1)
fname_list = glob.glob(os.path.join(data_dirname, '*'))
flist = {}
for file in fname_list:
fname_base = os.path.basename(file)
m_re = re.match(r'(?P<base_name>S.*?)\.(?P<client_id>[0-9]+)', fname_base)
if m_re:
flist[int(m_re.group('client_id'))] = {}
flist[int(m_re.group('client_id'))]['file_name'] = file
base_name = m_re.group('base_name')
if len(flist) == 0:
print ('No file matching S*.client_id found, make sure the names of files '
'to be merged is in the form of S*.client_id!')
sys.exit(1)
file_basename = os.path.join(data_dirname, base_name)
output_file = os.path.join(output_dirname, base_name)
if data_format == 'binary':
with open(output_file, 'wb') as f:
clients = flist.keys()
client_id_max = max(clients)
num_clients = client_id_max + 1
for client_id in range(0, num_clients):
if client_id not in flist:
print 'A complete list of files Si.txt shall exist!',
(file_basename + '.' + str(client_id)), 'does not exist!'
sys.exit(1)
flist[client_id]['file'] = open(flist[client_id]['file_name'], 'rb')
for j in range(0, n):
client_id = j % num_clients
for i in range(0, m):
f.write(flist[client_id]['file'].read(4))
elif data_format == 'text':
with open(output_file, 'w') as f:
clients = flist.keys()
client_id_max = max(clients)
num_clients = client_id_max + 1
for client_id in range(0, num_clients):
if client_id not in flist:
print 'A complete list of files Si.txt shall exist!',
(file_basename + '.' + str(client_id)), 'does not exist!'
sys.exit(1)
flist[client_id]['file'] = open(flist[client_id]['file_name'], 'r')
flist[client_id]['line_buffer'] = flist[client_id]['file'].readline().split()
flist[client_id]['line_pointer'] = 0
for j in range(0, n):
client_id = j % num_clients
for i in range(0, m):
if not flist[client_id]['line_pointer'] < len(flist[client_id]['line_buffer']):
flist[client_id]['line_buffer'] = flist[client_id]['file'].readline().split()
flist[client_id]['line_pointer'] = 0
f.write(str(flist[client_id]['line_buffer'][flist[client_id]['line_pointer']]) + '\t')
flist[client_id]['line_pointer'] = flist[client_id]['line_pointer'] + 1
f.write('\n')
else:
print 'Unrecognized data format:', data_format
| bsd-3-clause |
Mitali-Sodhi/CodeLingo | Dataset/python/test_services.py | 7 | 3420 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient.v1_1 import services
from novaclient.tests.v1_1 import fakes
from novaclient.tests import utils
cs = fakes.FakeClient()
class ServicesTest(utils.TestCase):
def test_list_services(self):
svs = cs.services.list()
cs.assert_called('GET', '/os-services')
[self.assertTrue(isinstance(s, services.Service)) for s in svs]
[self.assertEqual(s.binary, 'nova-compute') for s in svs]
[self.assertEqual(s.host, 'host1') for s in svs]
def test_list_services_with_hostname(self):
svs = cs.services.list(host='host2')
cs.assert_called('GET', '/os-services?host=host2')
[self.assertTrue(isinstance(s, services.Service)) for s in svs]
[self.assertEqual(s.binary, 'nova-compute') for s in svs]
[self.assertEqual(s.host, 'host2') for s in svs]
def test_list_services_with_binary(self):
svs = cs.services.list(binary='nova-cert')
cs.assert_called('GET', '/os-services?binary=nova-cert')
[self.assertTrue(isinstance(s, services.Service)) for s in svs]
[self.assertEqual(s.binary, 'nova-cert') for s in svs]
[self.assertEqual(s.host, 'host1') for s in svs]
def test_list_services_with_host_binary(self):
svs = cs.services.list(host='host2', binary='nova-cert')
cs.assert_called('GET', '/os-services?host=host2&binary=nova-cert')
[self.assertTrue(isinstance(s, services.Service)) for s in svs]
[self.assertEqual(s.binary, 'nova-cert') for s in svs]
[self.assertEqual(s.host, 'host2') for s in svs]
def test_services_enable(self):
service = cs.services.enable('host1', 'nova-cert')
values = {"host": "host1", 'binary': 'nova-cert'}
cs.assert_called('PUT', '/os-services/enable', values)
self.assertTrue(isinstance(service, services.Service))
self.assertEqual(service.status, 'enabled')
def test_services_disable(self):
service = cs.services.disable('host1', 'nova-cert')
values = {"host": "host1", 'binary': 'nova-cert'}
cs.assert_called('PUT', '/os-services/disable', values)
self.assertTrue(isinstance(service, services.Service))
self.assertEqual(service.status, 'disabled')
def test_services_disable_log_reason(self):
service = cs.services.disable_log_reason('compute1', 'nova-compute',
'disable bad host')
values = {'host': 'compute1', 'binary': 'nova-compute',
'disabled_reason': 'disable bad host'}
cs.assert_called('PUT', '/os-services/disable-log-reason', values)
self.assertTrue(isinstance(service, services.Service))
self.assertEqual(service.status, 'disabled')
| mit |
zanderle/django | django/core/handlers/base.py | 234 | 13346 | from __future__ import unicode_literals
import logging
import sys
import types
import warnings
from django import http
from django.conf import settings
from django.core import signals, urlresolvers
from django.core.exceptions import (
MiddlewareNotUsed, PermissionDenied, SuspiciousOperation,
)
from django.db import connections, transaction
from django.http.multipartparser import MultiPartParserError
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.module_loading import import_string
from django.views import debug
logger = logging.getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.conditional_content_removal,
]
def __init__(self):
self._request_middleware = None
self._view_middleware = None
self._template_response_middleware = None
self._response_middleware = None
self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
"""
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
mw_class = import_string(middleware_path)
try:
mw_instance = mw_class()
except MiddlewareNotUsed as exc:
if settings.DEBUG:
if six.text_type(exc):
logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc)
else:
logger.debug('MiddlewareNotUsed: %r', middleware_path)
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def make_view_atomic(self, view):
non_atomic_requests = getattr(view, '_non_atomic_requests', set())
for db in connections.all():
if (db.settings_dict['ATOMIC_REQUESTS']
and db.alias not in non_atomic_requests):
view = transaction.atomic(using=db.alias)(view)
return view
def get_exception_response(self, request, resolver, status_code, exception):
try:
callback, param_dict = resolver.resolve_error_handler(status_code)
# Unfortunately, inspect.getargspec result is not trustable enough
# depending on the callback wrapping in decorators (frequent for handlers).
# Falling back on try/except:
try:
response = callback(request, **dict(param_dict, exception=exception))
except TypeError:
warnings.warn(
"Error handlers should accept an exception parameter. Update "
"your code as this parameter will be required in Django 2.0",
RemovedInDjango20Warning, stacklevel=2
)
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.get_resolver(urlconf)
# Use a flag to check if the response was rendered to prevent
# multiple renderings or to force rendering if necessary.
response_is_rendered = False
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, 'urlconf'):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.get_resolver(urlconf)
resolver_match = resolver.resolve(request.path_info)
callback, callback_args, callback_kwargs = resolver_match
request.resolver_match = resolver_match
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
wrapped_callback = self.make_view_atomic(callback)
try:
response = wrapped_callback(request, *callback_args, **callback_kwargs)
except Exception as e:
response = self.process_exception_by_middleware(e, request)
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError("The view %s.%s didn't return an HttpResponse object. It returned None instead."
% (callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and then render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
# Complain if the template response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_template_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__))
try:
response = response.render()
except Exception as e:
response = self.process_exception_by_middleware(e, request)
response_is_rendered = True
except http.Http404 as exc:
logger.warning('Not Found: %s', request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
response = debug.technical_404_response(request, exc)
else:
response = self.get_exception_response(request, resolver, 404, exc)
except PermissionDenied as exc:
logger.warning(
'Forbidden (Permission denied): %s', request.path,
extra={
'status_code': 403,
'request': request
})
response = self.get_exception_response(request, resolver, 403, exc)
except MultiPartParserError as exc:
logger.warning(
'Bad request (Unable to parse request body): %s', request.path,
extra={
'status_code': 400,
'request': request
})
response = self.get_exception_response(request, resolver, 400, exc)
except SuspiciousOperation as exc:
# The request logger receives events for any problematic request
# The security logger receives events for all SuspiciousOperations
security_logger = logging.getLogger('django.security.%s' %
exc.__class__.__name__)
security_logger.error(
force_text(exc),
extra={
'status_code': 400,
'request': request
})
if settings.DEBUG:
return debug.technical_500_response(request, *sys.exc_info(), status_code=400)
response = self.get_exception_response(request, resolver, 400, exc)
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else.
# Get the exception info now, in case another exception is thrown later.
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
# Complain if the response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__))
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
response._closable_objects.append(request)
# If the exception handler returns a TemplateResponse that has not
# been rendered, force it to be rendered.
if not response_is_rendered and callable(getattr(response, 'render', None)):
response = response.render()
return response
def process_exception_by_middleware(self, exception, request):
"""
Pass the exception to the exception middleware. If no middleware
return a response for this exception, raise it.
"""
for middleware_method in self._exception_middleware:
response = middleware_method(request, exception)
if response:
return response
raise
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
if settings.DEBUG:
return debug.technical_500_response(request, *exc_info)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
six.reraise(*exc_info)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve_error_handler(500)
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
| bsd-3-clause |
vribeiro1/ia896_2s2016 | mnist/logistic_regression.py | 1 | 1232 | import numpy as np
import theano
import theano.tensor as T
class LogisticRegression(object):
def __init__(self, input_, n_in, n_out):
self.W = theano.shared(
value=np.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
)
self.b = theano.shared(
value=np.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
self.p_y_given_x = T.nnet.softmax(T.dot(input_, self.W) + self.b)
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
self.params = [self.W, self.b]
self.input = input_
def negative_log_likelihood(self, y):
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
if y.dtype.startswith('int'):
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
| gpl-3.0 |
dulems/hue | desktop/core/ext-py/Django-1.6.10/tests/utils_tests/test_datastructures.py | 46 | 11075 | """
Tests for stuff in django.utils.datastructures.
"""
import copy
import pickle
import warnings
from django.test import SimpleTestCase
from django.utils.datastructures import (DictWrapper, ImmutableList,
MultiValueDict, MultiValueDictKeyError, MergeDict, SortedDict)
from django.utils import six
class SortedDictTests(SimpleTestCase):
def setUp(self):
self.d1 = SortedDict()
self.d1[7] = 'seven'
self.d1[1] = 'one'
self.d1[9] = 'nine'
self.d2 = SortedDict()
self.d2[1] = 'one'
self.d2[9] = 'nine'
self.d2[0] = 'nil'
self.d2[7] = 'seven'
def test_basic_methods(self):
self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9])
self.assertEqual(list(six.itervalues(self.d1)), ['seven', 'one', 'nine'])
self.assertEqual(list(six.iteritems(self.d1)), [(7, 'seven'), (1, 'one'), (9, 'nine')])
def test_overwrite_ordering(self):
""" Overwriting an item keeps its place. """
self.d1[1] = 'ONE'
self.assertEqual(list(six.itervalues(self.d1)), ['seven', 'ONE', 'nine'])
def test_append_items(self):
""" New items go to the end. """
self.d1[0] = 'nil'
self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9, 0])
def test_delete_and_insert(self):
"""
Deleting an item, then inserting the same key again will place it
at the end.
"""
del self.d2[7]
self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0])
self.d2[7] = 'lucky number 7'
self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0, 7])
if six.PY2:
def test_change_keys(self):
"""
Changing the keys won't do anything, it's only a copy of the
keys dict.
This test doesn't make sense under Python 3 because keys is
an iterator.
"""
k = self.d2.keys()
k.remove(9)
self.assertEqual(self.d2.keys(), [1, 9, 0, 7])
def test_init_keys(self):
"""
Initialising a SortedDict with two keys will just take the first one.
A real dict will actually take the second value so we will too, but
we'll keep the ordering from the first key found.
"""
tuples = ((2, 'two'), (1, 'one'), (2, 'second-two'))
d = SortedDict(tuples)
self.assertEqual(list(six.iterkeys(d)), [2, 1])
real_dict = dict(tuples)
self.assertEqual(sorted(six.itervalues(real_dict)), ['one', 'second-two'])
# Here the order of SortedDict values *is* what we are testing
self.assertEqual(list(six.itervalues(d)), ['second-two', 'one'])
def test_overwrite(self):
self.d1[1] = 'not one'
self.assertEqual(self.d1[1], 'not one')
self.assertEqual(list(six.iterkeys(self.d1)), list(six.iterkeys(self.d1.copy())))
def test_append(self):
self.d1[13] = 'thirteen'
self.assertEqual(
repr(self.d1),
"{7: 'seven', 1: 'one', 9: 'nine', 13: 'thirteen'}"
)
def test_pop(self):
self.assertEqual(self.d1.pop(1, 'missing'), 'one')
self.assertEqual(self.d1.pop(1, 'missing'), 'missing')
# We don't know which item will be popped in popitem(), so we'll
# just check that the number of keys has decreased.
l = len(self.d1)
self.d1.popitem()
self.assertEqual(l - len(self.d1), 1)
def test_dict_equality(self):
d = SortedDict((i, i) for i in range(3))
self.assertEqual(d, {0: 0, 1: 1, 2: 2})
def test_tuple_init(self):
d = SortedDict(((1, "one"), (0, "zero"), (2, "two")))
self.assertEqual(repr(d), "{1: 'one', 0: 'zero', 2: 'two'}")
def test_pickle(self):
self.assertEqual(
pickle.loads(pickle.dumps(self.d1, 2)),
{7: 'seven', 1: 'one', 9: 'nine'}
)
def test_copy(self):
orig = SortedDict(((1, "one"), (0, "zero"), (2, "two")))
copied = copy.copy(orig)
self.assertEqual(list(six.iterkeys(orig)), [1, 0, 2])
self.assertEqual(list(six.iterkeys(copied)), [1, 0, 2])
def test_clear(self):
self.d1.clear()
self.assertEqual(self.d1, {})
self.assertEqual(self.d1.keyOrder, [])
def test_reversed(self):
self.assertEqual(list(self.d1), [7, 1, 9])
self.assertEqual(list(self.d2), [1, 9, 0, 7])
self.assertEqual(list(reversed(self.d1)), [9, 1, 7])
self.assertEqual(list(reversed(self.d2)), [7, 0, 9, 1])
def test_insert(self):
d = SortedDict()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
d.insert(0, "hello", "world")
assert w[0].category is DeprecationWarning
def test_value_for_index(self):
d = SortedDict({"a": 3})
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertEqual(d.value_for_index(0), 3)
assert w[0].category is DeprecationWarning
class MergeDictTests(SimpleTestCase):
def test_simple_mergedict(self):
d1 = {'chris':'cool', 'camri':'cute', 'cotton':'adorable',
'tulip':'snuggable', 'twoofme':'firstone'}
d2 = {'chris2':'cool2', 'camri2':'cute2', 'cotton2':'adorable2',
'tulip2':'snuggable2'}
d3 = {'chris3':'cool3', 'camri3':'cute3', 'cotton3':'adorable3',
'tulip3':'snuggable3'}
d4 = {'twoofme': 'secondone'}
md = MergeDict(d1, d2, d3)
self.assertEqual(md['chris'], 'cool')
self.assertEqual(md['camri'], 'cute')
self.assertEqual(md['twoofme'], 'firstone')
md2 = md.copy()
self.assertEqual(md2['chris'], 'cool')
def test_mergedict_merges_multivaluedict(self):
""" MergeDict can merge MultiValueDicts """
multi1 = MultiValueDict({'key1': ['value1'],
'key2': ['value2', 'value3']})
multi2 = MultiValueDict({'key2': ['value4'],
'key4': ['value5', 'value6']})
mm = MergeDict(multi1, multi2)
# Although 'key2' appears in both dictionaries,
# only the first value is used.
self.assertEqual(mm.getlist('key2'), ['value2', 'value3'])
self.assertEqual(mm.getlist('key4'), ['value5', 'value6'])
self.assertEqual(mm.getlist('undefined'), [])
self.assertEqual(sorted(six.iterkeys(mm)), ['key1', 'key2', 'key4'])
self.assertEqual(len(list(six.itervalues(mm))), 3)
self.assertTrue('value1' in six.itervalues(mm))
self.assertEqual(sorted(six.iteritems(mm), key=lambda k: k[0]),
[('key1', 'value1'), ('key2', 'value3'),
('key4', 'value6')])
self.assertEqual([(k,mm.getlist(k)) for k in sorted(mm)],
[('key1', ['value1']),
('key2', ['value2', 'value3']),
('key4', ['value5', 'value6'])])
def test_bool_casting(self):
empty = MergeDict({}, {}, {})
not_empty = MergeDict({}, {}, {"key": "value"})
self.assertFalse(empty)
self.assertTrue(not_empty)
def test_key_error(self):
"""
Test that the message of KeyError contains the missing key name.
"""
d1 = MergeDict({'key1': 42})
with six.assertRaisesRegex(self, KeyError, 'key2'):
d1['key2']
class MultiValueDictTests(SimpleTestCase):
def test_multivaluedict(self):
d = MultiValueDict({'name': ['Adrian', 'Simon'],
'position': ['Developer']})
self.assertEqual(d['name'], 'Simon')
self.assertEqual(d.get('name'), 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
self.assertEqual(sorted(list(six.iteritems(d))),
[('name', 'Simon'), ('position', 'Developer')])
self.assertEqual(sorted(list(six.iterlists(d))),
[('name', ['Adrian', 'Simon']),
('position', ['Developer'])])
six.assertRaisesRegex(self, MultiValueDictKeyError, 'lastname',
d.__getitem__, 'lastname')
self.assertEqual(d.get('lastname'), None)
self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
self.assertEqual(d.getlist('lastname'), [])
self.assertEqual(d.getlist('doesnotexist', ['Adrian', 'Simon']),
['Adrian', 'Simon'])
d.setlist('lastname', ['Holovaty', 'Willison'])
self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
self.assertEqual(sorted(list(six.itervalues(d))),
['Developer', 'Simon', 'Willison'])
def test_appendlist(self):
d = MultiValueDict()
d.appendlist('name', 'Adrian')
d.appendlist('name', 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
def test_copy(self):
for copy_func in [copy.copy, lambda d: d.copy()]:
d1 = MultiValueDict({
"developers": ["Carl", "Fred"]
})
self.assertEqual(d1["developers"], "Fred")
d2 = copy_func(d1)
d2.update({"developers": "Groucho"})
self.assertEqual(d2["developers"], "Groucho")
self.assertEqual(d1["developers"], "Fred")
d1 = MultiValueDict({
"key": [[]]
})
self.assertEqual(d1["key"], [])
d2 = copy_func(d1)
d2["key"].append("Penguin")
self.assertEqual(d1["key"], ["Penguin"])
self.assertEqual(d2["key"], ["Penguin"])
def test_dict_translation(self):
mvd = MultiValueDict({
'devs': ['Bob', 'Joe'],
'pm': ['Rory'],
})
d = mvd.dict()
self.assertEqual(sorted(six.iterkeys(d)), sorted(six.iterkeys(mvd)))
for key in six.iterkeys(mvd):
self.assertEqual(d[key], mvd[key])
self.assertEqual({}, MultiValueDict().dict())
class ImmutableListTests(SimpleTestCase):
def test_sort(self):
d = ImmutableList(range(10))
# AttributeError: ImmutableList object is immutable.
self.assertRaisesMessage(AttributeError,
'ImmutableList object is immutable.', d.sort)
self.assertEqual(repr(d), '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)')
def test_custom_warning(self):
d = ImmutableList(range(10), warning="Object is immutable!")
self.assertEqual(d[1], 1)
# AttributeError: Object is immutable!
self.assertRaisesMessage(AttributeError,
'Object is immutable!', d.__setitem__, 1, 'test')
class DictWrapperTests(SimpleTestCase):
def test_dictwrapper(self):
f = lambda x: "*%s" % x
d = DictWrapper({'a': 'a'}, f, 'xx_')
self.assertEqual("Normal: %(a)s. Modified: %(xx_a)s" % d,
'Normal: a. Modified: *a')
| apache-2.0 |
alu042/edx-platform | openedx/core/djangoapps/ccxcon/tasks.py | 40 | 1694 | """
This file contains celery tasks for ccxcon
"""
from celery.task import task # pylint: disable=no-name-in-module, import-error
from celery.utils.log import get_task_logger # pylint: disable=no-name-in-module, import-error
from requests.exceptions import (
ConnectionError,
HTTPError,
RequestException,
TooManyRedirects
)
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.ccxcon import api
log = get_task_logger(__name__)
@task()
def update_ccxcon(course_id, cur_retry=0):
"""
Pass through function to update course information on CCXCon.
Takes care of retries in case of some specific exceptions.
Args:
course_id (str): string representing a course key
cur_retry (int): integer representing the current task retry
"""
course_key = CourseKey.from_string(course_id)
try:
api.course_info_to_ccxcon(course_key)
log.info('Course update to CCXCon returned no errors. Course key: %s', course_id)
except (ConnectionError, HTTPError, RequestException, TooManyRedirects, api.CCXConnServerError) as exp:
log.error('Course update to CCXCon failed for course_id %s with error: %s', course_id, exp)
# in case the maximum amount of retries has not been reached,
# insert another task delayed exponentially up to 5 retries
if cur_retry < 5:
update_ccxcon.apply_async(
kwargs={'course_id': course_id, 'cur_retry': cur_retry + 1},
countdown=10 ** cur_retry # number of seconds the task should be delayed
)
log.info('Requeued celery task for course key %s ; retry # %s', course_id, cur_retry + 1)
| agpl-3.0 |
puneetgkaur/backup_sugar_shell_for_cordova | src/jarabe/model/sound.py | 11 | 2818 | # Copyright (C) 2006-2008 Red Hat, Inc.
# Copyright (C) 2014 Emil Dudev
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import Gio
from gi.repository import GObject
from gi.repository import SugarExt
from sugar3 import dispatch
_PLAYBACK = 0
_CAPTURE = 1
_SAVE_TIMEOUT = 500
class PlaybackSound(object):
_volume = SugarExt.VolumeAlsa.new(_PLAYBACK)
muted_changed = dispatch.Signal()
volume_changed = dispatch.Signal()
VOLUME_STEP = 10
def __init__(self):
self._save_timeout_id = -1
def get_muted(self):
return self._volume.get_mute()
def get_volume(self):
return self._volume.get_volume()
def set_volume(self, new_volume):
self._volume.set_volume(new_volume)
self.volume_changed.send(None)
if self._save_timeout_id != -1:
GObject.source_remove(self._save_timeout_id)
self._save_timeout_id = GObject.timeout_add(_SAVE_TIMEOUT, self.save)
def set_muted(self, new_state):
self._volume.set_mute(new_state)
self.muted_changed.send(None)
if self._save_timeout_id != -1:
GObject.source_remove(self._save_timeout_id)
self._save_timeout_id = GObject.timeout_add(_SAVE_TIMEOUT, self.save)
def save(self):
self._save_timeout_id = -1
settings = Gio.Settings('org.sugarlabs.sound')
settings.set_int('volume', self.get_volume())
return False
def restore(self):
settings = Gio.Settings('org.sugarlabs.sound')
self.set_volume(settings.get_int('volume'))
class CaptureSound(object):
_volume = SugarExt.VolumeAlsa.new(_CAPTURE)
muted_changed = dispatch.Signal()
volume_changed = dispatch.Signal()
def get_muted(self):
return self._volume.get_mute()
def get_volume(self):
return self._volume.get_volume()
def set_volume(self, new_volume):
self._volume.set_volume(new_volume)
self.volume_changed.send(None)
def set_muted(self, new_state):
self._volume.set_mute(new_state)
self.muted_changed.send(None)
sound = PlaybackSound()
capture_sound = CaptureSound()
| gpl-2.0 |
maryklayne/Funcao | sympy/physics/optics/tests/test_gaussopt.py | 41 | 3760 | from sympy import atan2, factor, Float, I, Matrix, N, oo, pi, sqrt, symbols
from sympy.physics.optics import (BeamParameter, CurvedMirror,
CurvedRefraction, FlatMirror, FlatRefraction, FreeSpace, GeometricRay,
RayTransferMatrix, ThinLens, conjugate_gauss_beams,
gaussian_conj, geometric_conj_ab, geometric_conj_af, geometric_conj_bf,
rayleigh2waist, waist2rayleigh)
def streq(a, b):
return str(a) == str(b)
def test_gauss_opt():
mat = RayTransferMatrix(1, 2, 3, 4)
assert mat == Matrix([[1, 2], [3, 4]])
assert mat == RayTransferMatrix( Matrix([[1, 2], [3, 4]]) )
assert [mat.A, mat.B, mat.C, mat.D] == [1, 2, 3, 4]
d, f, h, n1, n2, R = symbols('d f h n1 n2 R')
lens = ThinLens(f)
assert lens == Matrix([[ 1, 0], [-1/f, 1]])
assert lens.C == -1/f
assert FreeSpace(d) == Matrix([[ 1, d], [0, 1]])
assert FlatRefraction(n1, n2) == Matrix([[1, 0], [0, n1/n2]])
assert CurvedRefraction(
R, n1, n2) == Matrix([[1, 0], [(n1 - n2)/(R*n2), n1/n2]])
assert FlatMirror() == Matrix([[1, 0], [0, 1]])
assert CurvedMirror(R) == Matrix([[ 1, 0], [-2/R, 1]])
assert ThinLens(f) == Matrix([[ 1, 0], [-1/f, 1]])
mul = CurvedMirror(R)*FreeSpace(d)
mul_mat = Matrix([[ 1, 0], [-2/R, 1]])*Matrix([[ 1, d], [0, 1]])
assert mul.A == mul_mat[0, 0]
assert mul.B == mul_mat[0, 1]
assert mul.C == mul_mat[1, 0]
assert mul.D == mul_mat[1, 1]
angle = symbols('angle')
assert GeometricRay(h, angle) == Matrix([[ h], [angle]])
assert FreeSpace(
d)*GeometricRay(h, angle) == Matrix([[angle*d + h], [angle]])
assert GeometricRay( Matrix( ((h,), (angle,)) ) ) == Matrix([[h], [angle]])
assert (FreeSpace(d)*GeometricRay(h, angle)).height == angle*d + h
assert (FreeSpace(d)*GeometricRay(h, angle)).angle == angle
p = BeamParameter(530e-9, 1, w=1e-3)
assert streq(p.q, 1 + 1.88679245283019*I*pi)
assert streq(N(p.q), 1.0 + 5.92753330865999*I)
assert streq(N(p.w_0), Float(0.00100000000000000))
assert streq(N(p.z_r), Float(5.92753330865999))
fs = FreeSpace(10)
p1 = fs*p
assert streq(N(p.w), Float(0.00101413072159615))
assert streq(N(p1.w), Float(0.00210803120913829))
w, wavelen = symbols('w wavelen')
assert waist2rayleigh(w, wavelen) == pi*w**2/wavelen
z_r, wavelen = symbols('z_r wavelen')
assert rayleigh2waist(z_r, wavelen) == sqrt(wavelen*z_r)/sqrt(pi)
a, b, f = symbols('a b f')
assert geometric_conj_ab(a, b) == a*b/(a + b)
assert geometric_conj_af(a, f) == a*f/(a - f)
assert geometric_conj_bf(b, f) == b*f/(b - f)
assert geometric_conj_ab(oo, b) == b
assert geometric_conj_ab(a, oo) == a
s_in, z_r_in, f = symbols('s_in z_r_in f')
assert gaussian_conj(
s_in, z_r_in, f)[0] == 1/(-1/(s_in + z_r_in**2/(-f + s_in)) + 1/f)
assert gaussian_conj(
s_in, z_r_in, f)[1] == z_r_in/(1 - s_in**2/f**2 + z_r_in**2/f**2)
assert gaussian_conj(
s_in, z_r_in, f)[2] == 1/sqrt(1 - s_in**2/f**2 + z_r_in**2/f**2)
l, w_i, w_o, f = symbols('l w_i w_o f')
assert conjugate_gauss_beams(l, w_i, w_o, f=f)[0] == f*(
-sqrt(w_i**2/w_o**2 - pi**2*w_i**4/(f**2*l**2)) + 1)
assert factor(conjugate_gauss_beams(l, w_i, w_o, f=f)[1]) == f*w_o**2*(
w_i**2/w_o**2 - sqrt(w_i**2/w_o**2 - pi**2*w_i**4/(f**2*l**2)))/w_i**2
assert conjugate_gauss_beams(l, w_i, w_o, f=f)[2] == f
z, l, w = symbols('z l r', positive=True)
p = BeamParameter(l, z, w=w)
assert p.radius == z*(l**2*z**2/(pi**2*w**4) + 1)
assert p.w == w*sqrt(l**2*z**2/(pi**2*w**4) + 1)
assert p.w_0 == w
assert p.divergence == l/(pi*w)
assert p.gouy == atan2(z, pi*w**2/l)
assert p.waist_approximation_limit == 2*l/pi
| bsd-3-clause |
Jgarcia-IAS/SAT | openerp/addons/base/module/wizard/base_module_upgrade.py | 294 | 5164 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.osv import osv, fields
from openerp.tools.translate import _
class base_module_upgrade(osv.osv_memory):
""" Module Upgrade """
_name = "base.module.upgrade"
_description = "Module Upgrade"
_columns = {
'module_info': fields.text('Modules to Update',readonly=True),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(base_module_upgrade, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
if view_type != 'form':
return res
context = {} if context is None else context
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if (not record_id) or (not active_model):
return res
ids = self.get_module_list(cr, uid, context=context)
if not ids:
res['arch'] = '''<form string="Upgrade Completed" version="7.0">
<separator string="Upgrade Completed" colspan="4"/>
<footer>
<button name="config" string="Start Configuration" type="object" class="oe_highlight"/> or
<button special="cancel" string="Close" class="oe_link"/>
</footer>
</form>'''
return res
def get_module_list(self, cr, uid, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove', 'to install'])])
return ids
def default_get(self, cr, uid, fields, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = self.get_module_list(cr, uid, context=context)
res = mod_obj.read(cr, uid, ids, ['name','state'], context)
return {'module_info': '\n'.join(map(lambda x: x['name']+' : '+x['state'], res))}
def upgrade_module_cancel(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.module.module')
to_installed_ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove'])])
if to_installed_ids:
mod_obj.write(cr, uid, to_installed_ids, {'state': 'installed'}, context=context)
to_uninstalled_ids = mod_obj.search(cr, uid, [
('state', '=', 'to install')])
if to_uninstalled_ids:
mod_obj.write(cr, uid, to_uninstalled_ids, {'state': 'uninstalled'}, context=context)
return {'type': 'ir.actions.act_window_close'}
def upgrade_module(self, cr, uid, ids, context=None):
ir_module = self.pool.get('ir.module.module')
# install/upgrade: double-check preconditions
ids = ir_module.search(cr, uid, [('state', 'in', ['to upgrade', 'to install'])])
if ids:
cr.execute("""SELECT d.name FROM ir_module_module m
JOIN ir_module_module_dependency d ON (m.id = d.module_id)
LEFT JOIN ir_module_module m2 ON (d.name = m2.name)
WHERE m.id in %s and (m2.state IS NULL or m2.state IN %s)""",
(tuple(ids), ('uninstalled',)))
unmet_packages = [x[0] for x in cr.fetchall()]
if unmet_packages:
raise osv.except_osv(_('Unmet Dependency!'),
_('Following modules are not installed or unknown: %s') % ('\n\n' + '\n'.join(unmet_packages)))
ir_module.download(cr, uid, ids, context=context)
cr.commit() # save before re-creating cursor below
openerp.api.Environment.reset()
openerp.modules.registry.RegistryManager.new(cr.dbname, update_module=True)
return {'type': 'ir.actions.act_window_close'}
def config(self, cr, uid, ids, context=None):
return self.pool.get('res.config').next(cr, uid, [], context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ds-hwang/chromium-crosswalk | tools/perf/profile_creators/small_profile_extender.py | 31 | 1424 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import page_sets
from profile_creators import fast_navigation_profile_extender
class SmallProfileExtender(
fast_navigation_profile_extender.FastNavigationProfileExtender):
"""Creates a small profile by performing 25 navigations."""
def __init__(self, finder_options):
# Use exactly 5 tabs to generate the profile. This is because consumers of
# this profile will perform a session restore, and expect 5 restored tabs.
maximum_batch_size = 5
super(SmallProfileExtender, self).__init__(
finder_options, maximum_batch_size)
# Get the list of urls from the typical 25 page set.
self._page_set = page_sets.Typical25PageSet()
urls = []
for story in self._page_set.stories:
urls.append(story.url)
self._navigation_urls = urls
def GetUrlIterator(self):
"""Superclass override."""
return iter(self._navigation_urls)
def ShouldExitAfterBatchNavigation(self):
"""Superclass override."""
return False
def WebPageReplayArchivePath(self):
"""Superclass override."""
return self._page_set.WprFilePathForStory(
self._page_set.stories[0])
def FetchWebPageReplayArchives(self):
"""Superclass override."""
self._page_set.wpr_archive_info.DownloadArchivesIfNeeded()
| bsd-3-clause |
scipy/scipy | scipy/sparse/linalg/isolve/setup.py | 18 | 1678 | from os.path import join
def configuration(parent_package='',top_path=None):
from scipy._build_utils.system_info import get_info
from numpy.distutils.misc_util import Configuration
from scipy._build_utils import (get_g77_abi_wrappers, uses_blas64,
blas_ilp64_pre_build_hook, get_f2py_int64_options)
config = Configuration('isolve',parent_package,top_path)
if uses_blas64():
lapack_opt = get_info('lapack_ilp64_opt')
f2py_options = get_f2py_int64_options()
pre_build_hook = blas_ilp64_pre_build_hook(lapack_opt)
else:
lapack_opt = get_info('lapack_opt')
f2py_options = None
pre_build_hook = None
# iterative methods
methods = ['BiCGREVCOM.f.src',
'BiCGSTABREVCOM.f.src',
'CGREVCOM.f.src',
'CGSREVCOM.f.src',
# 'ChebyREVCOM.f.src',
'GMRESREVCOM.f.src',
# 'JacobiREVCOM.f.src',
'QMRREVCOM.f.src',
# 'SORREVCOM.f.src'
]
Util = ['getbreak.f.src']
sources = Util + methods + ['_iterative.pyf.src']
sources = [join('iterative', x) for x in sources]
sources += get_g77_abi_wrappers(lapack_opt)
ext = config.add_extension('_iterative',
sources=sources,
f2py_options=f2py_options,
extra_info=lapack_opt)
ext._pre_build_hook = pre_build_hook
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
GinnyN/Team-Fortress-RPG-Generators | build/lib/django/conf/__init__.py | 77 | 7764 | """
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import os
import re
import time # Needed for Windows
import warnings
from django.conf import global_settings
from django.utils.functional import LazyObject, empty
from django.utils import importlib
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
try:
settings_module = os.environ[ENVIRONMENT_VARIABLE]
if not settings_module: # If it's set but is an empty string.
raise KeyError
except KeyError:
# NOTE: This is arguably an EnvironmentError, but that causes
# problems with Python's interactive help.
raise ImportError("Settings cannot be imported, because environment variable %s is undefined." % ENVIRONMENT_VARIABLE)
self._wrapped = Settings(settings_module)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
warnings.warn("If set, %s must end with a slash" % name,
DeprecationWarning)
elif name == "ADMIN_MEDIA_PREFIX":
warnings.warn("The ADMIN_MEDIA_PREFIX setting has been removed; "
"use STATIC_URL instead.", DeprecationWarning)
elif name == "ALLOWED_INCLUDE_ROOTS" and isinstance(value, basestring):
raise ValueError("The ALLOWED_INCLUDE_ROOTS setting must be set "
"to a tuple, not a string.")
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
try:
mod = importlib.import_module(self.SETTINGS_MODULE)
except ImportError, e:
raise ImportError("Could not import settings '%s' (Is it on sys.path?): %s" % (self.SETTINGS_MODULE, e))
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and \
isinstance(setting_value, basestring):
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(self, setting, setting_value)
if not self.SECRET_KEY:
warnings.warn("The SECRET_KEY setting must not be empty.", DeprecationWarning)
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
# Settings are configured, so we can set up the logger if required
if self.LOGGING_CONFIG:
# First find the logging configuration function ...
logging_config_path, logging_config_func_name = self.LOGGING_CONFIG.rsplit('.', 1)
logging_config_module = importlib.import_module(logging_config_path)
logging_config_func = getattr(logging_config_module, logging_config_func_name)
# Backwards-compatibility shim for #16288 fix
compat_patch_logging_config(self.LOGGING)
# ... then invoke it with the logging settings
logging_config_func(self.LOGGING)
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.default_settings = default_settings
def __getattr__(self, name):
return getattr(self.default_settings, name)
def __dir__(self):
return self.__dict__.keys() + dir(self.default_settings)
# For Python < 2.6:
__members__ = property(lambda self: self.__dir__())
settings = LazySettings()
def compat_patch_logging_config(logging_config):
"""
Backwards-compatibility shim for #16288 fix. Takes initial value of
``LOGGING`` setting and patches it in-place (issuing deprecation warning)
if "mail_admins" logging handler is configured but has no filters.
"""
# Shim only if LOGGING["handlers"]["mail_admins"] exists,
# but has no "filters" key
if "filters" not in logging_config.get(
"handlers", {}).get(
"mail_admins", {"filters": []}):
warnings.warn(
"You have no filters defined on the 'mail_admins' logging "
"handler: adding implicit debug-false-only filter. "
"See http://docs.djangoproject.com/en/dev/releases/1.4/"
"#request-exceptions-are-now-always-logged",
PendingDeprecationWarning)
filter_name = "require_debug_false"
filters = logging_config.setdefault("filters", {})
while filter_name in filters:
filter_name = filter_name + "_"
filters[filter_name] = {
"()": "django.utils.log.RequireDebugFalse",
}
logging_config["handlers"]["mail_admins"]["filters"] = [filter_name]
| bsd-3-clause |
mpetyx/palmdrop | venv/lib/python2.7/site-packages/south/tests/logic.py | 127 | 33513 | from south.tests import unittest
import datetime
import sys
try:
set # builtin, python >=2.6
except NameError:
from sets import Set as set # in stdlib, python >=2.3
from south import exceptions
from south.migration import migrate_app
from south.migration.base import all_migrations, Migrations
from south.creator.changes import ManualChanges
from south.migration.utils import depends, flatten, get_app_label
from south.models import MigrationHistory
from south.tests import Monkeypatcher
from south.db import db
class TestBrokenMigration(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp", "brokenapp"]
def test_broken_dependencies(self):
self.assertRaises(
exceptions.DependsOnUnmigratedApplication,
Migrations.calculate_dependencies,
force=True,
)
#depends_on_unknown = self.brokenapp['0002_depends_on_unknown']
#self.assertRaises(exceptions.DependsOnUnknownMigration,
# depends_on_unknown.dependencies)
#depends_on_higher = self.brokenapp['0003_depends_on_higher']
#self.assertRaises(exceptions.DependsOnHigherMigration,
# depends_on_higher.dependencies)
class TestMigration(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def setUp(self):
super(TestMigration, self).setUp()
self.fakeapp = Migrations('fakeapp')
self.otherfakeapp = Migrations('otherfakeapp')
Migrations.calculate_dependencies(force=True)
def test_str(self):
migrations = [str(m) for m in self.fakeapp]
self.assertEqual(['fakeapp:0001_spam',
'fakeapp:0002_eggs',
'fakeapp:0003_alter_spam'],
migrations)
def test_repr(self):
migrations = [repr(m) for m in self.fakeapp]
self.assertEqual(['<Migration: fakeapp:0001_spam>',
'<Migration: fakeapp:0002_eggs>',
'<Migration: fakeapp:0003_alter_spam>'],
migrations)
def test_app_label(self):
self.assertEqual(['fakeapp', 'fakeapp', 'fakeapp'],
[m.app_label() for m in self.fakeapp])
def test_name(self):
self.assertEqual(['0001_spam', '0002_eggs', '0003_alter_spam'],
[m.name() for m in self.fakeapp])
def test_full_name(self):
self.assertEqual(['fakeapp.migrations.0001_spam',
'fakeapp.migrations.0002_eggs',
'fakeapp.migrations.0003_alter_spam'],
[m.full_name() for m in self.fakeapp])
def test_migration(self):
# Can't use vanilla import, modules beginning with numbers aren't in grammar
M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
M2 = __import__("fakeapp.migrations.0002_eggs", {}, {}, ['Migration']).Migration
M3 = __import__("fakeapp.migrations.0003_alter_spam", {}, {}, ['Migration']).Migration
self.assertEqual([M1, M2, M3],
[m.migration().Migration for m in self.fakeapp])
self.assertRaises(exceptions.UnknownMigration,
self.fakeapp['9999_unknown'].migration)
def test_previous(self):
self.assertEqual([None,
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs']],
[m.previous() for m in self.fakeapp])
def test_dependencies(self):
"Test that the dependency detection works."
self.assertEqual([
set([]),
set([self.fakeapp['0001_spam']]),
set([self.fakeapp['0002_eggs']])
],
[m.dependencies for m in self.fakeapp],
)
self.assertEqual([
set([self.fakeapp['0001_spam']]),
set([self.otherfakeapp['0001_first']]),
set([
self.otherfakeapp['0002_second'],
self.fakeapp['0003_alter_spam'],
])
],
[m.dependencies for m in self.otherfakeapp],
)
def test_forwards_plan(self):
self.assertEqual([
[self.fakeapp['0001_spam']],
[
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs']
],
[
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs'],
self.fakeapp['0003_alter_spam'],
]
],
[m.forwards_plan() for m in self.fakeapp],
)
self.assertEqual([
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first']
],
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first'],
self.otherfakeapp['0002_second']
],
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first'],
self.otherfakeapp['0002_second'],
self.fakeapp['0002_eggs'],
self.fakeapp['0003_alter_spam'],
self.otherfakeapp['0003_third'],
]
],
[m.forwards_plan() for m in self.otherfakeapp],
)
def test_is_before(self):
F1 = self.fakeapp['0001_spam']
F2 = self.fakeapp['0002_eggs']
F3 = self.fakeapp['0003_alter_spam']
O1 = self.otherfakeapp['0001_first']
O2 = self.otherfakeapp['0002_second']
O3 = self.otherfakeapp['0003_third']
self.assertTrue(F1.is_before(F2))
self.assertTrue(F1.is_before(F3))
self.assertTrue(F2.is_before(F3))
self.assertEqual(O3.is_before(O1), False)
self.assertEqual(O3.is_before(O2), False)
self.assertEqual(O2.is_before(O2), False)
self.assertEqual(O2.is_before(O1), False)
self.assertEqual(F2.is_before(O1), None)
self.assertEqual(F2.is_before(O2), None)
self.assertEqual(F2.is_before(O3), None)
class TestMigrationDependencies(Monkeypatcher):
installed_apps = ['deps_a', 'deps_b', 'deps_c']
def setUp(self):
super(TestMigrationDependencies, self).setUp()
self.deps_a = Migrations('deps_a')
self.deps_b = Migrations('deps_b')
self.deps_c = Migrations('deps_c')
Migrations.calculate_dependencies(force=True)
def test_dependencies(self):
self.assertEqual(
[
set([]),
set([self.deps_a['0001_a']]),
set([self.deps_a['0002_a']]),
set([
self.deps_a['0003_a'],
self.deps_b['0003_b'],
]),
set([self.deps_a['0004_a']]),
],
[m.dependencies for m in self.deps_a],
)
self.assertEqual(
[
set([]),
set([
self.deps_b['0001_b'],
self.deps_a['0002_a']
]),
set([
self.deps_b['0002_b'],
self.deps_a['0003_a']
]),
set([self.deps_b['0003_b']]),
set([self.deps_b['0004_b']]),
],
[m.dependencies for m in self.deps_b],
)
self.assertEqual(
[
set([]),
set([self.deps_c['0001_c']]),
set([self.deps_c['0002_c']]),
set([self.deps_c['0003_c']]),
set([
self.deps_c['0004_c'],
self.deps_a['0002_a']
]),
],
[m.dependencies for m in self.deps_c],
)
def test_dependents(self):
self.assertEqual([set([self.deps_a['0002_a']]),
set([self.deps_c['0005_c'],
self.deps_b['0002_b'],
self.deps_a['0003_a']]),
set([self.deps_b['0003_b'],
self.deps_a['0004_a']]),
set([self.deps_a['0005_a']]),
set([])],
[m.dependents for m in self.deps_a])
self.assertEqual([set([self.deps_b['0002_b']]),
set([self.deps_b['0003_b']]),
set([self.deps_b['0004_b'],
self.deps_a['0004_a']]),
set([self.deps_b['0005_b']]),
set([])],
[m.dependents for m in self.deps_b])
self.assertEqual([set([self.deps_c['0002_c']]),
set([self.deps_c['0003_c']]),
set([self.deps_c['0004_c']]),
set([self.deps_c['0005_c']]),
set([])],
[m.dependents for m in self.deps_c])
def test_forwards_plan(self):
self.assertEqual([[self.deps_a['0001_a']],
[self.deps_a['0001_a'],
self.deps_a['0002_a']],
[self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_a['0003_a']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_a['0004_a']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_a['0004_a'],
self.deps_a['0005_a']]],
[m.forwards_plan() for m in self.deps_a])
self.assertEqual([[self.deps_b['0001_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_b['0004_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_b['0004_b'],
self.deps_b['0005_b']]],
[m.forwards_plan() for m in self.deps_b])
self.assertEqual([[self.deps_c['0001_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c'],
self.deps_c['0003_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c'],
self.deps_c['0003_c'],
self.deps_c['0004_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c'],
self.deps_c['0003_c'],
self.deps_c['0004_c'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_c['0005_c']]],
[m.forwards_plan() for m in self.deps_c])
def test_backwards_plan(self):
self.assertEqual([
[
self.deps_c['0005_c'],
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_a['0002_a'],
self.deps_a['0001_a'],
],
[
self.deps_c['0005_c'],
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_a['0002_a'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_a['0003_a'],
],
[
self.deps_a['0005_a'],
self.deps_a['0004_a'],
],
[
self.deps_a['0005_a'],
]
], [m.backwards_plan() for m in self.deps_a])
self.assertEqual([
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
self.deps_b['0001_b'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
],
[
self.deps_b['0005_b'],
],
], [m.backwards_plan() for m in self.deps_b])
self.assertEqual([
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
self.deps_c['0003_c'],
self.deps_c['0002_c'],
self.deps_c['0001_c'],
],
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
self.deps_c['0003_c'],
self.deps_c['0002_c'],
],
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
self.deps_c['0003_c'],
],
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
],
[self.deps_c['0005_c']]
], [m.backwards_plan() for m in self.deps_c])
class TestCircularDependencies(Monkeypatcher):
installed_apps = ["circular_a", "circular_b"]
def test_plans(self):
Migrations.calculate_dependencies(force=True)
circular_a = Migrations('circular_a')
circular_b = Migrations('circular_b')
self.assertRaises(
exceptions.CircularDependency,
circular_a[-1].forwards_plan,
)
self.assertRaises(
exceptions.CircularDependency,
circular_b[-1].forwards_plan,
)
self.assertRaises(
exceptions.CircularDependency,
circular_a[-1].backwards_plan,
)
self.assertRaises(
exceptions.CircularDependency,
circular_b[-1].backwards_plan,
)
class TestMigrations(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def test_all(self):
M1 = Migrations(__import__("fakeapp", {}, {}, ['']))
M2 = Migrations(__import__("otherfakeapp", {}, {}, ['']))
self.assertEqual(
[M1, M2],
list(all_migrations()),
)
def test(self):
M1 = Migrations(__import__("fakeapp", {}, {}, ['']))
self.assertEqual(M1, Migrations("fakeapp"))
self.assertEqual(M1, Migrations(self.create_fake_app("fakeapp")))
def test_application(self):
fakeapp = Migrations("fakeapp")
application = __import__("fakeapp", {}, {}, [''])
self.assertEqual(application, fakeapp.application)
def test_migration(self):
# Can't use vanilla import, modules beginning with numbers aren't in grammar
M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
M2 = __import__("fakeapp.migrations.0002_eggs", {}, {}, ['Migration']).Migration
migration = Migrations('fakeapp')
self.assertEqual(M1, migration['0001_spam'].migration().Migration)
self.assertEqual(M2, migration['0002_eggs'].migration().Migration)
self.assertRaises(exceptions.UnknownMigration,
migration['0001_jam'].migration)
def test_guess_migration(self):
# Can't use vanilla import, modules beginning with numbers aren't in grammar
M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
migration = Migrations('fakeapp')
self.assertEqual(M1, migration.guess_migration("0001_spam").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_spa").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_sp").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_s").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001").migration().Migration)
self.assertRaises(exceptions.UnknownMigration,
migration.guess_migration, "0001-spam")
self.assertRaises(exceptions.MultiplePrefixMatches,
migration.guess_migration, "000")
self.assertRaises(exceptions.MultiplePrefixMatches,
migration.guess_migration, "")
self.assertRaises(exceptions.UnknownMigration,
migration.guess_migration, "0001_spams")
self.assertRaises(exceptions.UnknownMigration,
migration.guess_migration, "0001_jam")
def test_app_label(self):
names = ['fakeapp', 'otherfakeapp']
self.assertEqual(names,
[Migrations(n).app_label() for n in names])
def test_full_name(self):
names = ['fakeapp', 'otherfakeapp']
self.assertEqual([n + '.migrations' for n in names],
[Migrations(n).full_name() for n in names])
class TestMigrationLogic(Monkeypatcher):
"""
Tests if the various logic functions in migration actually work.
"""
installed_apps = ["fakeapp", "otherfakeapp"]
def setUp(self):
super(TestMigrationLogic, self).setUp()
MigrationHistory.objects.all().delete()
def assertListEqual(self, list1, list2, msg=None):
list1 = set(list1)
list2 = set(list2)
return self.assert_(list1 == list2, "%s is not equal to %s" % (list1, list2))
def test_find_ghost_migrations(self):
pass
def test_apply_migrations(self):
migrations = Migrations("fakeapp")
# We should start with no migrations
self.assertEqual(list(MigrationHistory.objects.all()), [])
# Apply them normally
migrate_app(migrations, target_name=None, fake=False,
load_initial_data=True)
# We should finish with all migrations
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),
("fakeapp", "0003_alter_spam"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Now roll them backwards
migrate_app(migrations, target_name="zero", fake=False)
# Finish with none
self.assertEqual(list(MigrationHistory.objects.all()), [])
def test_migration_merge_forwards(self):
migrations = Migrations("fakeapp")
# We should start with no migrations
self.assertEqual(list(MigrationHistory.objects.all()), [])
# Insert one in the wrong order
MigrationHistory.objects.create(app_name = "fakeapp",
migration = "0002_eggs",
applied = datetime.datetime.now())
# Did it go in?
self.assertListEqual(
(("fakeapp", "0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Apply them normally
self.assertRaises(exceptions.InconsistentMigrationHistory,
migrate_app,
migrations, target_name=None, fake=False)
self.assertRaises(exceptions.InconsistentMigrationHistory,
migrate_app,
migrations, target_name='zero', fake=False)
try:
migrate_app(migrations, target_name=None, fake=False)
except exceptions.InconsistentMigrationHistory as e:
self.assertEqual(
[
(
migrations['0002_eggs'],
migrations['0001_spam'],
)
],
e.problems,
)
try:
migrate_app(migrations, target_name="zero", fake=False)
except exceptions.InconsistentMigrationHistory as e:
self.assertEqual(
[
(
migrations['0002_eggs'],
migrations['0001_spam'],
)
],
e.problems,
)
# Nothing should have changed (no merge mode!)
self.assertListEqual(
(("fakeapp", "0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Apply with merge
migrate_app(migrations, target_name=None, merge=True, fake=False)
# We should finish with all migrations
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),
("fakeapp", "0003_alter_spam"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Now roll them backwards
migrate_app(migrations, target_name="0002", fake=False)
migrate_app(migrations, target_name="0001", fake=True)
migrate_app(migrations, target_name="zero", fake=False)
# Finish with none
self.assertEqual(list(MigrationHistory.objects.all()), [])
def test_alter_column_null(self):
def null_ok(eat_exception=True):
from django.db import connection, transaction
# the DBAPI introspection module fails on postgres NULLs.
cursor = connection.cursor()
# SQLite has weird now()
if db.backend_name == "sqlite3":
now_func = "DATETIME('NOW')"
# So does SQLServer... should we be using a backend attribute?
elif db.backend_name == "pyodbc":
now_func = "GETDATE()"
elif db.backend_name == "oracle":
now_func = "SYSDATE"
else:
now_func = "NOW()"
try:
if db.backend_name == "pyodbc":
cursor.execute("SET IDENTITY_INSERT southtest_spam ON;")
cursor.execute("INSERT INTO southtest_spam (id, weight, expires, name) VALUES (100, NULL, %s, 'whatever');" % now_func)
except:
if eat_exception:
transaction.rollback()
return False
else:
raise
else:
cursor.execute("DELETE FROM southtest_spam")
transaction.commit()
return True
MigrationHistory.objects.all().delete()
migrations = Migrations("fakeapp")
# by default name is NOT NULL
migrate_app(migrations, target_name="0002", fake=False)
self.failIf(null_ok())
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# after 0003, it should be NULL
migrate_app(migrations, target_name="0003", fake=False)
self.assert_(null_ok(False))
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),
("fakeapp", "0003_alter_spam"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# make sure it is NOT NULL again
migrate_app(migrations, target_name="0002", fake=False)
self.failIf(null_ok(), 'weight not null after migration')
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# finish with no migrations, otherwise other tests fail...
migrate_app(migrations, target_name="zero", fake=False)
self.assertEqual(list(MigrationHistory.objects.all()), [])
def test_dependencies(self):
fakeapp = Migrations("fakeapp")
otherfakeapp = Migrations("otherfakeapp")
# Test a simple path
self.assertEqual([fakeapp['0001_spam'],
fakeapp['0002_eggs'],
fakeapp['0003_alter_spam']],
fakeapp['0003_alter_spam'].forwards_plan())
# And a complex one.
self.assertEqual(
[
fakeapp['0001_spam'],
otherfakeapp['0001_first'],
otherfakeapp['0002_second'],
fakeapp['0002_eggs'],
fakeapp['0003_alter_spam'],
otherfakeapp['0003_third']
],
otherfakeapp['0003_third'].forwards_plan(),
)
class TestMigrationUtils(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def test_get_app_label(self):
self.assertEqual(
"southtest",
get_app_label(self.create_fake_app("southtest.models")),
)
self.assertEqual(
"baz",
get_app_label(self.create_fake_app("foo.bar.baz.models")),
)
class TestUtils(unittest.TestCase):
def test_flatten(self):
self.assertEqual([], list(flatten(iter([]))))
self.assertEqual([], list(flatten(iter([iter([]), ]))))
self.assertEqual([1], list(flatten(iter([1]))))
self.assertEqual([1, 2], list(flatten(iter([1, 2]))))
self.assertEqual([1, 2], list(flatten(iter([iter([1]), 2]))))
self.assertEqual([1, 2], list(flatten(iter([iter([1, 2])]))))
self.assertEqual([1, 2, 3], list(flatten(iter([iter([1, 2]), 3]))))
self.assertEqual([1, 2, 3],
list(flatten(iter([iter([1]), iter([2]), 3]))))
self.assertEqual([1, 2, 3],
list(flatten([[1], [2], 3])))
def test_depends(self):
graph = {'A1': []}
self.assertEqual(['A1'],
depends('A1', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2']}
self.assertEqual(['A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1']}
self.assertEqual(['A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1', 'B1'],
'B1': []}
self.assertEqual(
['B1', 'A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1', 'B2'],
'B1': [],
'B2': ['B1']}
self.assertEqual(
['B1', 'B2', 'A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
graph = {'A1': [],
'A2': ['A1', 'B1'],
'A3': ['A2'],
'B1': ['A1']}
self.assertEqual(['A1', 'B1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1', 'B2'],
'B1': [],
'B2': ['B1', 'C1'],
'C1': ['B1']}
self.assertEqual(
['B1', 'C1', 'B2', 'A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'B2', 'A1', 'C1'],
'B1': ['A1'],
'B2': ['B1', 'C2', 'A1'],
'C1': ['B1'],
'C2': ['C1', 'A1'],
'C3': ['C2']}
self.assertEqual(
['A1', 'B1', 'C1', 'C2', 'B2', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
def assertCircularDependency(self, trace, target, graph):
"Custom assertion that checks a circular dependency is detected correctly."
self.assertRaises(
exceptions.CircularDependency,
depends,
target,
lambda n: graph[n],
)
try:
depends(target, lambda n: graph[n])
except exceptions.CircularDependency as e:
self.assertEqual(trace, e.trace)
def test_depends_cycle(self):
graph = {'A1': ['A1']}
self.assertCircularDependency(
['A1', 'A1'],
'A1',
graph,
)
graph = {'A1': [],
'A2': ['A1', 'A2'],
'A3': ['A2']}
self.assertCircularDependency(
['A2', 'A2'],
'A3',
graph,
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A3'],
'A4': ['A3']}
self.assertCircularDependency(
['A3', 'A3'],
'A4',
graph,
)
graph = {'A1': ['B1'],
'B1': ['A1']}
self.assertCircularDependency(
['A1', 'B1', 'A1'],
'A1',
graph,
)
graph = {'A1': [],
'A2': ['A1', 'B2'],
'A3': ['A2'],
'B1': [],
'B2': ['B1', 'A2'],
'B3': ['B2']}
self.assertCircularDependency(
['A2', 'B2', 'A2'],
'A3',
graph,
)
graph = {'A1': [],
'A2': ['A1', 'B3'],
'A3': ['A2'],
'B1': [],
'B2': ['B1', 'A2'],
'B3': ['B2']}
self.assertCircularDependency(
['A2', 'B3', 'B2', 'A2'],
'A3',
graph,
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'B2'],
'A4': ['A3'],
'B1': ['A3'],
'B2': ['B1']}
self.assertCircularDependency(
['A3', 'B2', 'B1', 'A3'],
'A4',
graph,
)
class TestManualChanges(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def test_suggest_name(self):
migrations = Migrations('fakeapp')
change = ManualChanges(migrations,
[],
['fakeapp.slug'],
[])
self.assertEquals(change.suggest_name(),
'add_field_fakeapp_slug')
change = ManualChanges(migrations,
[],
[],
['fakeapp.slug'])
self.assertEquals(change.suggest_name(),
'add_index_fakeapp_slug')
| apache-2.0 |
antonve/s4-project-mooc | common/lib/xmodule/xmodule/xml_module.py | 35 | 21777 | import json
import copy
import logging
import os
import sys
from lxml import etree
from xblock.fields import Dict, Scope, ScopeIds
from xblock.runtime import KvsFieldData
from xmodule.x_module import XModuleDescriptor, DEPRECATION_VSCOMPAT_EVENT
from xmodule.modulestore.inheritance import own_metadata, InheritanceKeyValueStore
from xmodule.modulestore import EdxJSONEncoder
import dogstats_wrapper as dog_stats_api
from lxml.etree import ( # pylint: disable=no-name-in-module
Element, ElementTree, XMLParser,
)
log = logging.getLogger(__name__)
# assume all XML files are persisted as utf-8.
EDX_XML_PARSER = XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True,
encoding='utf-8')
def name_to_pathname(name):
"""
Convert a location name for use in a path: replace ':' with '/'.
This allows users of the xml format to organize content into directories
"""
return name.replace(':', '/')
def is_pointer_tag(xml_obj):
"""
Check if xml_obj is a pointer tag: <blah url_name="something" />.
No children, one attribute named url_name, no text.
Special case for course roots: the pointer is
<course url_name="something" org="myorg" course="course">
xml_obj: an etree Element
Returns a bool.
"""
if xml_obj.tag != "course":
expected_attr = set(['url_name'])
else:
expected_attr = set(['url_name', 'course', 'org'])
actual_attr = set(xml_obj.attrib.keys())
has_text = xml_obj.text is not None and len(xml_obj.text.strip()) > 0
return len(xml_obj) == 0 and actual_attr == expected_attr and not has_text
def serialize_field(value):
"""
Return a string version of the value (where value is the JSON-formatted, internally stored value).
If the value is a string, then we simply return what was passed in.
Otherwise, we return json.dumps on the input value.
"""
if isinstance(value, basestring):
return value
return json.dumps(value, cls=EdxJSONEncoder)
def deserialize_field(field, value):
"""
Deserialize the string version to the value stored internally.
Note that this is not the same as the value returned by from_json, as model types typically store
their value internally as JSON. By default, this method will return the result of calling json.loads
on the supplied value, unless json.loads throws a TypeError, or the type of the value returned by json.loads
is not supported for this class (from_json throws an Error). In either of those cases, this method returns
the input value.
"""
try:
deserialized = json.loads(value)
if deserialized is None:
return deserialized
try:
field.from_json(deserialized)
return deserialized
except (ValueError, TypeError):
# Support older serialized version, which was just a string, not result of json.dumps.
# If the deserialized version cannot be converted to the type (via from_json),
# just return the original value. For example, if a string value of '3.4' was
# stored for a String field (before we started storing the result of json.dumps),
# then it would be deserialized as 3.4, but 3.4 is not supported for a String
# field. Therefore field.from_json(3.4) will throw an Error, and we should
# actually return the original value of '3.4'.
return value
except (ValueError, TypeError):
# Support older serialized version.
return value
class XmlParserMixin(object):
"""
Class containing XML parsing functionality shared between XBlock and XModuleDescriptor.
"""
# Extension to append to filename paths
filename_extension = 'xml'
xml_attributes = Dict(help="Map of unhandled xml attributes, used only for storage between import and export",
default={}, scope=Scope.settings)
# VS[compat]. Backwards compatibility code that can go away after
# importing 2012 courses.
# A set of metadata key conversions that we want to make
metadata_translations = {
'slug': 'url_name',
'name': 'display_name',
}
@classmethod
def _translate(cls, key):
"""
VS[compat]
"""
return cls.metadata_translations.get(key, key)
# The attributes will be removed from the definition xml passed
# to definition_from_xml, and from the xml returned by definition_to_xml
# Note -- url_name isn't in this list because it's handled specially on
# import and export.
metadata_to_strip = ('data_dir',
'tabs', 'grading_policy',
'discussion_blackouts',
# VS[compat] -- remove the below attrs once everything is in the CMS
'course', 'org', 'url_name', 'filename',
# Used for storing xml attributes between import and export, for roundtrips
'xml_attributes')
metadata_to_export_to_policy = ('discussion_topics', 'checklists')
@staticmethod
def _get_metadata_from_xml(xml_object, remove=True):
"""
Extract the metadata from the XML.
"""
meta = xml_object.find('meta')
if meta is None:
return ''
dmdata = meta.text
if remove:
xml_object.remove(meta)
return dmdata
@classmethod
def definition_from_xml(cls, xml_object, system):
"""
Return the definition to be passed to the newly created descriptor
during from_xml
xml_object: An etree Element
"""
raise NotImplementedError("%s does not implement definition_from_xml" % cls.__name__)
@classmethod
def clean_metadata_from_xml(cls, xml_object):
"""
Remove any attribute named for a field with scope Scope.settings from the supplied
xml_object
"""
for field_name, field in cls.fields.items():
if field.scope == Scope.settings and xml_object.get(field_name) is not None:
del xml_object.attrib[field_name]
@classmethod
def file_to_xml(cls, file_object):
"""
Used when this module wants to parse a file object to xml
that will be converted to the definition.
Returns an lxml Element
"""
return etree.parse(file_object, parser=EDX_XML_PARSER).getroot() # pylint: disable=no-member
@classmethod
def load_file(cls, filepath, fs, def_id): # pylint: disable=invalid-name
"""
Open the specified file in fs, and call cls.file_to_xml on it,
returning the lxml object.
Add details and reraise on error.
"""
try:
with fs.open(filepath) as xml_file:
return cls.file_to_xml(xml_file)
except Exception as err:
# Add info about where we are, but keep the traceback
msg = 'Unable to load file contents at path %s for item %s: %s ' % (
filepath, def_id, err)
raise Exception, msg, sys.exc_info()[2]
@classmethod
def load_definition(cls, xml_object, system, def_id, id_generator):
"""
Load a descriptor definition from the specified xml_object.
Subclasses should not need to override this except in special
cases (e.g. html module)
Args:
xml_object: an lxml.etree._Element containing the definition to load
system: the modulestore system (aka, runtime) which accesses data and provides access to services
def_id: the definition id for the block--used to compute the usage id and asides ids
id_generator: used to generate the usage_id
"""
# VS[compat] -- the filename attr should go away once everything is
# converted. (note: make sure html files still work once this goes away)
filename = xml_object.get('filename')
if filename is None:
definition_xml = copy.deepcopy(xml_object)
filepath = ''
else:
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_load_definition_filename"]
)
filepath = cls._format_filepath(xml_object.tag, filename)
# VS[compat]
# TODO (cpennington): If the file doesn't exist at the right path,
# give the class a chance to fix it up. The file will be written out
# again in the correct format. This should go away once the CMS is
# online and has imported all current (fall 2012) courses from xml
if not system.resources_fs.exists(filepath) and hasattr(cls, 'backcompat_paths'):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_load_definition_backcompat"]
)
candidates = cls.backcompat_paths(filepath)
for candidate in candidates:
if system.resources_fs.exists(candidate):
filepath = candidate
break
definition_xml = cls.load_file(filepath, system.resources_fs, def_id)
usage_id = id_generator.create_usage(def_id)
system.parse_asides(definition_xml, def_id, usage_id, id_generator)
# Add the attributes from the pointer node
definition_xml.attrib.update(xml_object.attrib)
definition_metadata = cls._get_metadata_from_xml(definition_xml)
cls.clean_metadata_from_xml(definition_xml)
definition, children = cls.definition_from_xml(definition_xml, system)
if definition_metadata:
definition['definition_metadata'] = definition_metadata
definition['filename'] = [filepath, filename]
return definition, children
@classmethod
def load_metadata(cls, xml_object):
"""
Read the metadata attributes from this xml_object.
Returns a dictionary {key: value}.
"""
metadata = {'xml_attributes': {}}
for attr, val in xml_object.attrib.iteritems():
# VS[compat]. Remove after all key translations done
attr = cls._translate(attr)
if attr in cls.metadata_to_strip:
if attr in ('course', 'org', 'url_name', 'filename'):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:xmlparser_util_mixin_load_metadata",
"metadata:{}".format(attr),
)
)
# don't load these
continue
if attr not in cls.fields:
metadata['xml_attributes'][attr] = val
else:
metadata[attr] = deserialize_field(cls.fields[attr], val)
return metadata
@classmethod
def apply_policy(cls, metadata, policy):
"""
Add the keys in policy to metadata, after processing them
through the attrmap. Updates the metadata dict in place.
"""
for attr, value in policy.iteritems():
attr = cls._translate(attr)
if attr not in cls.fields:
# Store unknown attributes coming from policy.json
# in such a way that they will export to xml unchanged
metadata['xml_attributes'][attr] = value
else:
metadata[attr] = value
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator): # pylint: disable=unused-argument
"""
Use `node` to construct a new block.
Arguments:
node (etree.Element): The xml node to parse into an xblock.
runtime (:class:`.Runtime`): The runtime to use while parsing.
keys (:class:`.ScopeIds`): The keys identifying where this block
will store its data.
id_generator (:class:`.IdGenerator`): An object that will allow the
runtime to generate correct definition and usage ids for
children of this block.
Returns (XBlock): The newly parsed XBlock
"""
# VS[compat] -- just have the url_name lookup, once translation is done
url_name = node.get('url_name', node.get('slug'))
def_id = id_generator.create_definition(node.tag, url_name)
usage_id = id_generator.create_usage(def_id)
# VS[compat] -- detect new-style each-in-a-file mode
if is_pointer_tag(node):
# new style:
# read the actual definition file--named using url_name.replace(':','/')
filepath = cls._format_filepath(node.tag, name_to_pathname(url_name))
definition_xml = cls.load_file(filepath, runtime.resources_fs, def_id)
runtime.parse_asides(definition_xml, def_id, usage_id, id_generator)
else:
filepath = None
definition_xml = node
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_parse_xml"]
)
# Note: removes metadata.
definition, children = cls.load_definition(definition_xml, runtime, def_id, id_generator)
# VS[compat] -- make Ike's github preview links work in both old and
# new file layouts
if is_pointer_tag(node):
# new style -- contents actually at filepath
definition['filename'] = [filepath, filepath]
metadata = cls.load_metadata(definition_xml)
# move definition metadata into dict
dmdata = definition.get('definition_metadata', '')
if dmdata:
metadata['definition_metadata_raw'] = dmdata
try:
metadata.update(json.loads(dmdata))
except Exception as err:
log.debug('Error in loading metadata %r', dmdata, exc_info=True)
metadata['definition_metadata_err'] = str(err)
# Set/override any metadata specified by policy
cls.apply_policy(metadata, runtime.get_policy(usage_id))
field_data = {}
field_data.update(metadata)
field_data.update(definition)
field_data['children'] = children
field_data['xml_attributes']['filename'] = definition.get('filename', ['', None]) # for git link
kvs = InheritanceKeyValueStore(initial_values=field_data)
field_data = KvsFieldData(kvs)
return runtime.construct_xblock_from_class(
cls,
# We're loading a descriptor, so student_id is meaningless
ScopeIds(None, node.tag, def_id, usage_id),
field_data,
)
@classmethod
def _format_filepath(cls, category, name):
return u'{category}/{name}.{ext}'.format(category=category,
name=name,
ext=cls.filename_extension)
def export_to_file(self):
"""If this returns True, write the definition of this descriptor to a separate
file.
NOTE: Do not override this without a good reason. It is here
specifically for customtag...
"""
return True
def add_xml_to_node(self, node):
"""
For exporting, set data on `node` from ourselves.
"""
# Get the definition
xml_object = self.definition_to_xml(self.runtime.export_fs)
self.clean_metadata_from_xml(xml_object)
# Set the tag on both nodes so we get the file path right.
xml_object.tag = self.category
node.tag = self.category
# Add the non-inherited metadata
for attr in sorted(own_metadata(self)):
# don't want e.g. data_dir
if attr not in self.metadata_to_strip and attr not in self.metadata_to_export_to_policy:
val = serialize_field(self._field_data.get(self, attr))
try:
xml_object.set(attr, val)
except Exception:
logging.exception(
u'Failed to serialize metadata attribute %s with value %s in module %s. This could mean data loss!!!',
attr, val, self.url_name
)
for key, value in self.xml_attributes.items():
if key not in self.metadata_to_strip:
xml_object.set(key, serialize_field(value))
if self.export_to_file():
# Write the definition to a file
url_path = name_to_pathname(self.url_name)
filepath = self._format_filepath(self.category, url_path)
self.runtime.export_fs.makedir(os.path.dirname(filepath), recursive=True, allow_recreate=True)
with self.runtime.export_fs.open(filepath, 'w') as fileobj:
ElementTree(xml_object).write(fileobj, pretty_print=True, encoding='utf-8')
else:
# Write all attributes from xml_object onto node
node.clear()
node.tag = xml_object.tag
node.text = xml_object.text
node.tail = xml_object.tail
node.attrib.update(xml_object.attrib)
node.extend(xml_object)
node.set('url_name', self.url_name)
# Special case for course pointers:
if self.category == 'course':
# add org and course attributes on the pointer tag
node.set('org', self.location.org)
node.set('course', self.location.course)
def definition_to_xml(self, resource_fs):
"""
Return a new etree Element object created from this modules definition.
"""
raise NotImplementedError(
"%s does not implement definition_to_xml" % self.__class__.__name__)
@property
def non_editable_metadata_fields(self):
"""
Return a list of all metadata fields that cannot be edited.
"""
non_editable_fields = super(XmlParserMixin, self).non_editable_metadata_fields
non_editable_fields.append(XmlParserMixin.xml_attributes)
return non_editable_fields
class XmlDescriptor(XmlParserMixin, XModuleDescriptor): # pylint: disable=abstract-method
"""
Mixin class for standardized parsing of XModule xml.
"""
@classmethod
def from_xml(cls, xml_data, system, id_generator):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses.
Args:
xml_data (str): A string of xml that will be translated into data and children
for this module
system (:class:`.XMLParsingSystem):
id_generator (:class:`xblock.runtime.IdGenerator`): Used to generate the
usage_ids and definition_ids when loading this xml
"""
# Shim from from_xml to the parse_xml defined in XmlParserMixin.
# This only exists to satisfy subclasses that both:
# a) define from_xml themselves
# b) call super(..).from_xml(..)
return super(XmlDescriptor, cls).parse_xml(
etree.fromstring(xml_data), # pylint: disable=no-member
system,
None, # This is ignored by XmlParserMixin
id_generator,
)
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""
Interpret the parsed XML in `node`, creating an XModuleDescriptor.
"""
if cls.from_xml != XmlDescriptor.from_xml:
# Skip the parse_xml from XmlParserMixin to get the shim parse_xml
# from XModuleDescriptor, which actually calls `from_xml`.
return super(XmlParserMixin, cls).parse_xml(node, runtime, keys, id_generator) # pylint: disable=bad-super-call
else:
return super(XmlDescriptor, cls).parse_xml(node, runtime, keys, id_generator)
def export_to_xml(self, resource_fs): # pylint: disable=unused-argument
"""
Returns an xml string representing this module, and all modules
underneath it. May also write required resources out to resource_fs.
Assumes that modules have single parentage (that no module appears twice
in the same course), and that it is thus safe to nest modules as xml
children as appropriate.
The returned XML should be able to be parsed back into an identical
XModuleDescriptor using the from_xml method with the same system, org,
and course
"""
# Shim from export_to_xml to the add_xml_to_node defined in XmlParserMixin.
# This only exists to satisfy subclasses that both:
# a) define export_to_xml themselves
# b) call super(..).export_to_xml(..)
node = Element(self.category)
super(XmlDescriptor, self).add_xml_to_node(node)
return etree.tostring(node) # pylint: disable=no-member
def add_xml_to_node(self, node):
"""
Export this :class:`XModuleDescriptor` as XML, by setting attributes on the provided
`node`.
"""
if self.export_to_xml != XmlDescriptor.export_to_xml:
# Skip the add_xml_to_node from XmlParserMixin to get the shim add_xml_to_node
# from XModuleDescriptor, which actually calls `export_to_xml`.
super(XmlParserMixin, self).add_xml_to_node(node) # pylint: disable=bad-super-call
else:
super(XmlDescriptor, self).add_xml_to_node(node)
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.