repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
koehlermichael/olympia | apps/amo/tests/test_readonly.py | 15 | 2098 | from django.conf import settings
from django.db import models
from django.utils import importlib
import MySQLdb as mysql
from nose.tools import assert_raises, eq_
from pyquery import PyQuery as pq
import amo.tests
from addons.models import Addon
def pubdir(ob):
for name in dir(ob):
if not name.startswith('_'):
yield name
def quickcopy(val):
if isinstance(val, dict):
val = val.copy()
elif isinstance(val, list):
val = list(val)
return val
class ReadOnlyModeTest(amo.tests.TestCase):
extra = ('amo.middleware.ReadOnlyMiddleware',)
def setUp(self):
super(ReadOnlyModeTest, self).setUp()
models.signals.pre_save.connect(self.db_error)
models.signals.pre_delete.connect(self.db_error)
self.old_settings = dict((k, quickcopy(getattr(settings, k)))
for k in pubdir(settings))
settings.SLAVE_DATABASES = ['default']
settings_module = importlib.import_module(settings.SETTINGS_MODULE)
settings_module.read_only_mode(settings._wrapped.__dict__)
self.client.handler.load_middleware()
def tearDown(self):
for k in pubdir(settings):
if k not in self.old_settings:
delattr(self.old_settings, k)
for k, v in self.old_settings.items():
try:
setattr(settings, k, v)
except AttributeError:
# __weakref__
pass
models.signals.pre_save.disconnect(self.db_error)
models.signals.pre_delete.disconnect(self.db_error)
super(ReadOnlyModeTest, self).tearDown()
def db_error(self, *args, **kwargs):
raise mysql.OperationalError("You can't do this in read-only mode.")
def test_db_error(self):
assert_raises(mysql.OperationalError, Addon.objects.create, id=12)
def test_bail_on_post(self):
r = self.client.post('/en-US/firefox/')
eq_(r.status_code, 503)
title = pq(r.content)('title').text()
assert title.startswith('Maintenance in progress'), title
| bsd-3-clause |
eclipse-ease-addons/engines | jython/org.jython/Lib/encodings/koi8_u.py | 593 | 14018 | """ Python Character Mapping Codec koi8_u generated from 'python-mappings/KOI8-U.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-u',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
u'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u2580' # 0x8B -> UPPER HALF BLOCK
u'\u2584' # 0x8C -> LOWER HALF BLOCK
u'\u2588' # 0x8D -> FULL BLOCK
u'\u258c' # 0x8E -> LEFT HALF BLOCK
u'\u2590' # 0x8F -> RIGHT HALF BLOCK
u'\u2591' # 0x90 -> LIGHT SHADE
u'\u2592' # 0x91 -> MEDIUM SHADE
u'\u2593' # 0x92 -> DARK SHADE
u'\u2320' # 0x93 -> TOP HALF INTEGRAL
u'\u25a0' # 0x94 -> BLACK SQUARE
u'\u2219' # 0x95 -> BULLET OPERATOR
u'\u221a' # 0x96 -> SQUARE ROOT
u'\u2248' # 0x97 -> ALMOST EQUAL TO
u'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
u'\xa0' # 0x9A -> NO-BREAK SPACE
u'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
u'\xb0' # 0x9C -> DEGREE SIGN
u'\xb2' # 0x9D -> SUPERSCRIPT TWO
u'\xb7' # 0x9E -> MIDDLE DOT
u'\xf7' # 0x9F -> DIVISION SIGN
u'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
u'\u0454' # 0xA4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u0456' # 0xA6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0xA7 -> CYRILLIC SMALL LETTER YI (UKRAINIAN)
u'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u0491' # 0xAD -> CYRILLIC SMALL LETTER UKRAINIAN GHE WITH UPTURN
u'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
u'\u0404' # 0xB4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u0406' # 0xB6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0407' # 0xB7 -> CYRILLIC CAPITAL LETTER YI (UKRAINIAN)
u'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u0490' # 0xBD -> CYRILLIC CAPITAL LETTER UKRAINIAN GHE WITH UPTURN
u'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa9' # 0xBF -> COPYRIGHT SIGN
u'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
u'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
u'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
u'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
u'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
u'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
u'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
u'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
u'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
u'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
u'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
u'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
u'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
u'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
u'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
u'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
u'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
u'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
u'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
u'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
u'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
u'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
u'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
u'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
u'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
u'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
u'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
u'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
u'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
u'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
u'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
u'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
u'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| epl-1.0 |
vliulan/pydelicious | tools/dlcs_feeds.py | 23 | 3051 | #!/usr/bin/env python
try:
from hashlib import md5
except ImportError:
from md5 import md5
from itertools import chain
import locale
import optparse
import os
from pprint import pprint, pformat
import re
import sys
import pydelicious
#ENCODING = locale.getpreferredencoding()
__usage__ = """%prog [options] [command] [args...] """
__options__ = [
# (('-e', '--encoding'),{'default':ENCODING,
# 'help':"Use custom character encoding [locale: %default]"}),
(('-C', '--clear-screen'),{'action':'store_true'}),
(('-L', '--list-feeds'),{'action':'store_true','help':'List available feeds for given parameters. '}),
(('-f', '--format'),{'default':'rss'}),
(('-k', '--key'),{}),
(('-l', '--url'),{}),
(('-H', '--urlmd5'),{}),
(('-t', '--tag'),{'dest':'tags','action':'append'}),
(('-u', '--username'),{}),
# (('-v', '--verboseness'),{'default':0,
# 'help':"TODO: Increase or set DEBUG (defaults to 0 or the DLCS_DEBUG env. var.)"})
]
def parse_argv(options, argv, usage="%prog [args] [options]"):
parser = optparse.OptionParser(usage)
for opt in options:
parser.add_option(*opt[0], **opt[1])
optsv, args = parser.parse_args(argv)
if optsv.url and not optsv.urlmd5:
optsv.urlmd5 = md5(optsv.url).hexdigest()
if optsv.tags:
optsv.tag = ' '.join(optsv.tags)
else:
optsv.tag = None
return parser, optsv, args
find_patterns = re.compile('%\(([^)]*)\)s').findall
def feeds_for_params(**params):
kandidates = {}
"Feed paths that need more parameters. "
matches = []
"Feed paths that match current paramters. "
params = set([p for p in params if type(params[p]) != type(None)])
for name, path in pydelicious.delicious_v2_feeds.items():
ptrns = set(find_patterns(path))
if ptrns > params:
kandidates[name] = params.difference(ptrns)
elif ptrns == params:
matches.append(name)
return matches, kandidates
def main(argv):
optparser, opts, args = parse_argv(__options__, argv, __usage__)
kwds = {}
for k in ('format','username','tag','urlmd5','key'):
v = getattr(opts, k)
if type(v) != type(None):
kwds[k] = v
if opts.clear_screen:
if os.name == 'posix':
os.system('clear')
else:
os.system('cls')
matches, candidates = feeds_for_params(**kwds)
if opts.list_feeds:
print >>sys.stderr,"Feeds for current parameters (%s)" % kwds
if matches:
print "Exact matches:"
for m in matches:
print '\t'+m+':', pydelicious.delicious_v2_feeds[m] % kwds
if candidates:
print "Candidates:"
for m in candidates:
print '\t'+pydelicious.delicious_v2_feeds[m]
sys.exit()
path = args and args.pop(0)
if not path:
if len(matches) == 1:
path = matches[0]
print >>sys.stderr, "Setting path to %s" % path
elif matches:
print >>sys.stderr, "Multiple paths for given parameters, see -L"
sys.exit()
assert path in pydelicious.delicious_v2_feeds
return pydelicious.dlcs_feed(path, **kwds)
def _main():
try:
print sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
print >>sys.stderr, "User interrupt"
# Entry point
if __name__ == '__main__':
_main()
# vim:noet:
| bsd-2-clause |
postlund/home-assistant | homeassistant/components/netgear_lte/binary_sensor.py | 7 | 1376 | """Support for Netgear LTE binary sensors."""
import logging
from homeassistant.components.binary_sensor import DOMAIN, BinarySensorDevice
from homeassistant.exceptions import PlatformNotReady
from . import CONF_MONITORED_CONDITIONS, DATA_KEY, LTEEntity
from .sensor_types import BINARY_SENSOR_CLASSES
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info):
"""Set up Netgear LTE binary sensor devices."""
if discovery_info is None:
return
modem_data = hass.data[DATA_KEY].get_modem_data(discovery_info)
if not modem_data or not modem_data.data:
raise PlatformNotReady
binary_sensor_conf = discovery_info[DOMAIN]
monitored_conditions = binary_sensor_conf[CONF_MONITORED_CONDITIONS]
binary_sensors = []
for sensor_type in monitored_conditions:
binary_sensors.append(LTEBinarySensor(modem_data, sensor_type))
async_add_entities(binary_sensors)
class LTEBinarySensor(LTEEntity, BinarySensorDevice):
"""Netgear LTE binary sensor entity."""
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return getattr(self.modem_data.data, self.sensor_type)
@property
def device_class(self):
"""Return the class of binary sensor."""
return BINARY_SENSOR_CLASSES[self.sensor_type]
| apache-2.0 |
ArcherSys/ArcherSys | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml_test.py | 1 | 9950 | <<<<<<< HEAD
<<<<<<< HEAD
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
=======
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit |
jphilipsen05/zulip | zerver/migrations/0053_emailchangestatus.py | 19 | 1109 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-23 05:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('zerver', '0052_auto_fix_realmalias_realm_nullable'),
]
operations = [
migrations.CreateModel(
name='EmailChangeStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_email', models.EmailField(max_length=254)),
('old_email', models.EmailField(max_length=254)),
('updated_at', models.DateTimeField(auto_now=True)),
('status', models.IntegerField(default=0)),
('realm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='zerver.Realm')),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| apache-2.0 |
ecosoft-odoo/odoo | addons/portal/wizard/share_wizard.py | 224 | 10392 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
UID_ROOT = SUPERUSER_ID
SHARED_DOCS_MENU = "Documents"
SHARED_DOCS_CHILD_MENU = "Shared Documents"
class share_wizard_portal(osv.TransientModel):
"""Inherited share wizard to automatically create appropriate
menus in the selected portal upon sharing with a portal group."""
_inherit = "share.wizard"
def _user_type_selection(self, cr, uid, context=None):
selection = super(share_wizard_portal, self)._user_type_selection(cr, uid, context=context)
selection.extend([('existing',_('Users you already shared with')),
('groups',_('Existing Groups (e.g Portal Groups)'))])
return selection
_columns = {
'user_ids': fields.many2many('res.users', 'share_wizard_res_user_rel', 'share_id', 'user_id', 'Existing users', domain=[('share', '=', True)]),
'group_ids': fields.many2many('res.groups', 'share_wizard_res_group_rel', 'share_id', 'group_id', 'Existing groups', domain=[('share', '=', False)]),
}
def _check_preconditions(self, cr, uid, wizard_data, context=None):
if wizard_data.user_type == 'existing':
self._assert(wizard_data.user_ids,
_('Please select at least one user to share with'),
context=context)
elif wizard_data.user_type == 'groups':
self._assert(wizard_data.group_ids,
_('Please select at least one group to share with'),
context=context)
return super(share_wizard_portal, self)._check_preconditions(cr, uid, wizard_data, context=context)
def _create_or_get_submenu_named(self, cr, uid, parent_menu_id, menu_name, context=None):
if context is None:
context = {}
Menus = self.pool.get('ir.ui.menu')
if not parent_menu_id and context.get('group_id'):
cxt = dict(context)
cxt['ir.ui.menu.full_list'] = True
parent_menu_ids = Menus.search(cr, SUPERUSER_ID,
[('groups_id', 'in', [context.get('group_id')]), ('parent_id', '=', False)], limit=1, context=cxt)
parent_menu_id = parent_menu_ids and parent_menu_ids[0] or False
if not parent_menu_id:
return False
parent_menu = Menus.browse(cr, uid, parent_menu_id) # No context
menu_id = None
max_seq = 10
for child_menu in parent_menu.child_id:
max_seq = max(max_seq, child_menu.sequence)
if child_menu.name == menu_name:
menu_id = child_menu.id
break
if not menu_id:
# not found, create it
menu_id = Menus.create(cr, UID_ROOT,
{'name': menu_name,
'parent_id': parent_menu.id,
'sequence': max_seq + 10, # at the bottom
})
return menu_id
def _sharing_root_menu_id(self, cr, uid, portal, context=None):
"""Create or retrieve root ID of sharing menu in portal menu
:param portal: browse_record of shared group, constructed with a context WITHOUT language
"""
if context is None:
context = {}
ctx = dict(context, group_id=portal.id)
parent_menu_id = self._create_or_get_submenu_named(cr, uid, False, SHARED_DOCS_MENU, context=ctx)
if parent_menu_id:
child_menu_id = self._create_or_get_submenu_named(cr, uid, parent_menu_id, SHARED_DOCS_CHILD_MENU, context=context)
return child_menu_id
def _create_shared_data_menu(self, cr, uid, wizard_data, portal, context=None):
"""Create sharing menus in portal menu according to share wizard options.
:param wizard_data: browse_record of share.wizard
:param portal: browse_record of shared group, constructed with a context WITHOUT language
"""
root_menu_id = self._sharing_root_menu_id(cr, uid, portal, context=context)
if not root_menu_id:
# no specific parent menu, cannot create the sharing menu at all.
return
# Create the shared action and menu
action_def = self._shared_action_def(cr, uid, wizard_data, context=None)
action_id = self.pool.get('ir.actions.act_window').create(cr, UID_ROOT, action_def)
menu_data = {'name': action_def['name'],
'sequence': 10,
'action': 'ir.actions.act_window,'+str(action_id),
'parent_id': root_menu_id,
'icon': 'STOCK_JUSTIFY_FILL'}
menu_id = self.pool.get('ir.ui.menu').create(cr, UID_ROOT, menu_data)
return menu_id
def _create_share_users_group(self, cr, uid, wizard_data, context=None):
# Override of super() to handle the possibly selected "existing users"
# and "existing groups".
# In both cases, we call super() to create the share group, but when
# sharing with existing groups, we will later delete it, and copy its
# access rights and rules to the selected groups.
super_result = super(share_wizard_portal,self)._create_share_users_group(cr, uid, wizard_data, context=context)
# For sharing with existing groups, we don't create a share group, instead we'll
# alter the rules of the groups so they can see the shared data
if wizard_data.group_ids:
# get the list of portals and the related groups to install their menus.
res_groups = self.pool.get('res.groups')
all_portal_group_ids = res_groups.search(cr, UID_ROOT, [('is_portal', '=', True)])
# populate result lines with the users of each group and
# setup the menu for portal groups
for group in wizard_data.group_ids:
if group.id in all_portal_group_ids:
self._create_shared_data_menu(cr, uid, wizard_data, group, context=context)
for user in group.users:
new_line = {'user_id': user.id,
'newly_created': False}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
selected_group_ids = [x.id for x in wizard_data.group_ids]
res_groups.write(cr, SUPERUSER_ID, selected_group_ids, {'implied_ids': [(4, super_result[0])]})
elif wizard_data.user_ids:
# must take care of existing users, by adding them to the new group, which is super_result[0],
# and adding the shortcut
selected_user_ids = [x.id for x in wizard_data.user_ids]
self.pool.get('res.users').write(cr, UID_ROOT, selected_user_ids, {'groups_id': [(4, super_result[0])]})
self._setup_action_and_shortcut(cr, uid, wizard_data, selected_user_ids, make_home=False, context=context)
# populate the result lines for existing users too
for user in wizard_data.user_ids:
new_line = { 'user_id': user.id,
'newly_created': False}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
return super_result
def copy_share_group_access_and_delete(self, cr, wizard_data, share_group_id, context=None):
# In the case of sharing with existing groups, the strategy is to copy
# access rights and rules from the share group, so that we can
if not wizard_data.group_ids: return
Groups = self.pool.get('res.groups')
Rules = self.pool.get('ir.rule')
Rights = self.pool.get('ir.model.access')
share_group = Groups.browse(cr, UID_ROOT, share_group_id)
share_rule_ids = [r.id for r in share_group.rule_groups]
for target_group in wizard_data.group_ids:
# Link the rules to the group. This is appropriate because as of
# v6.1, the algorithm for combining them will OR the rules, hence
# extending the visible data.
Rules.write(cr, UID_ROOT, share_rule_ids, {'groups': [(4,target_group.id)]})
_logger.debug("Linked sharing rules from temporary sharing group to group %s", target_group)
# Copy the access rights. This is appropriate too because
# groups have the UNION of all permissions granted by their
# access right lines.
for access_line in share_group.model_access:
Rights.copy(cr, UID_ROOT, access_line.id, default={'group_id': target_group.id})
_logger.debug("Copied access rights from temporary sharing group to group %s", target_group)
# finally, delete it after removing its users
Groups.write(cr, UID_ROOT, [share_group_id], {'users': [(6,0,[])]})
Groups.unlink(cr, UID_ROOT, [share_group_id])
_logger.debug("Deleted temporary sharing group %s", share_group_id)
def _finish_result_lines(self, cr, uid, wizard_data, share_group_id, context=None):
super(share_wizard_portal,self)._finish_result_lines(cr, uid, wizard_data, share_group_id, context=context)
self.copy_share_group_access_and_delete(cr, wizard_data, share_group_id, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
redhat-openstack/django | tests/reserved_names/tests.py | 59 | 1693 | from __future__ import absolute_import
import datetime
from django.test import TestCase
from .models import Thing
class ReservedNameTests(TestCase):
def generate(self):
day1 = datetime.date(2005, 1, 1)
t = Thing.objects.create(when='a', join='b', like='c', drop='d',
alter='e', having='f', where=day1, has_hyphen='h')
day2 = datetime.date(2006, 2, 2)
u = Thing.objects.create(when='h', join='i', like='j', drop='k',
alter='l', having='m', where=day2)
def test_simple(self):
day1 = datetime.date(2005, 1, 1)
t = Thing.objects.create(when='a', join='b', like='c', drop='d',
alter='e', having='f', where=day1, has_hyphen='h')
self.assertEqual(t.when, 'a')
day2 = datetime.date(2006, 2, 2)
u = Thing.objects.create(when='h', join='i', like='j', drop='k',
alter='l', having='m', where=day2)
self.assertEqual(u.when, 'h')
def test_order_by(self):
self.generate()
things = [t.when for t in Thing.objects.order_by('when')]
self.assertEqual(things, ['a', 'h'])
def test_fields(self):
self.generate()
v = Thing.objects.get(pk='a')
self.assertEqual(v.join, 'b')
self.assertEqual(v.where, datetime.date(year=2005, month=1, day=1))
def test_dates(self):
self.generate()
resp = Thing.objects.dates('where', 'year')
self.assertEqual(list(resp), [
datetime.date(2005, 1, 1),
datetime.date(2006, 1, 1),
])
def test_month_filter(self):
self.generate()
self.assertEqual(Thing.objects.filter(where__month=1)[0].when, 'a')
| bsd-3-clause |
Dhivyap/ansible | lib/ansible/modules/windows/win_reboot.py | 28 | 4324 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_reboot
short_description: Reboot a windows machine
description:
- Reboot a Windows machine, wait for it to go down, come back up, and respond to commands.
- For non-Windows targets, use the M(reboot) module instead.
version_added: '2.1'
options:
pre_reboot_delay:
description:
- Seconds to wait before reboot. Passed as a parameter to the reboot command.
type: int
default: 2
aliases: [ pre_reboot_delay_sec ]
post_reboot_delay:
description:
- Seconds to wait after the reboot command was successful before attempting to validate the system rebooted successfully.
- This is useful if you want wait for something to settle despite your connection already working.
type: int
default: 0
version_added: '2.4'
aliases: [ post_reboot_delay_sec ]
shutdown_timeout:
description:
- Maximum seconds to wait for shutdown to occur.
- Increase this timeout for very slow hardware, large update applications, etc.
- This option has been removed since Ansible 2.5 as the win_reboot behavior has changed.
type: int
default: 600
aliases: [ shutdown_timeout_sec ]
reboot_timeout:
description:
- Maximum seconds to wait for machine to re-appear on the network and respond to a test command.
- This timeout is evaluated separately for both reboot verification and test command success so maximum clock time is actually twice this value.
type: int
default: 600
aliases: [ reboot_timeout_sec ]
connect_timeout:
description:
- Maximum seconds to wait for a single successful TCP connection to the WinRM endpoint before trying again.
type: int
default: 5
aliases: [ connect_timeout_sec ]
test_command:
description:
- Command to expect success for to determine the machine is ready for management.
type: str
default: whoami
msg:
description:
- Message to display to users.
type: str
default: Reboot initiated by Ansible
notes:
- If a shutdown was already scheduled on the system, C(win_reboot) will abort the scheduled shutdown and enforce its own shutdown.
- Beware that when C(win_reboot) returns, the Windows system may not have settled yet and some base services could be in limbo.
This can result in unexpected behavior. Check the examples for ways to mitigate this.
- The connection user must have the C(SeRemoteShutdownPrivilege) privilege enabled, see
U(https://docs.microsoft.com/en-us/windows/security/threat-protection/security-policy-settings/force-shutdown-from-a-remote-system)
for more information.
seealso:
- module: reboot
author:
- Matt Davis (@nitzmahone)
'''
EXAMPLES = r'''
- name: Reboot the machine with all defaults
win_reboot:
- name: Reboot a slow machine that might have lots of updates to apply
win_reboot:
reboot_timeout: 3600
# Install a Windows feature and reboot if necessary
- name: Install IIS Web-Server
win_feature:
name: Web-Server
register: iis_install
- name: Reboot when Web-Server feature requires it
win_reboot:
when: iis_install.reboot_required
# One way to ensure the system is reliable, is to set WinRM to a delayed startup
- name: Ensure WinRM starts when the system has settled and is ready to work reliably
win_service:
name: WinRM
start_mode: delayed
# Additionally, you can add a delay before running the next task
- name: Reboot a machine that takes time to settle after being booted
win_reboot:
post_reboot_delay: 120
# Or you can make win_reboot validate exactly what you need to work before running the next task
- name: Validate that the netlogon service has started, before running the next task
win_reboot:
test_command: 'exit (Get-Service -Name Netlogon).Status -ne "Running"'
'''
RETURN = r'''
rebooted:
description: True if the machine was rebooted.
returned: always
type: bool
sample: true
elapsed:
description: The number of seconds that elapsed waiting for the system to be rebooted.
returned: always
type: float
sample: 23.2
'''
| gpl-3.0 |
Stratio/stratio-cassandra | pylib/cqlshlib/test/basecase.py | 45 | 2555 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import logging
from itertools import izip
from os.path import dirname, join, normpath, islink
cqlshlog = logging.getLogger('test_cqlsh')
try:
# a backport of python2.7 unittest features, so we can test against older
# pythons as necessary. python2.7 users who don't care about testing older
# versions need not install.
import unittest2 as unittest
except ImportError:
import unittest
rundir = dirname(__file__)
path_to_cqlsh = normpath(join(rundir, '..', '..', '..', 'bin', 'cqlsh'))
# symlink a ".py" file to cqlsh main script, so we can load it as a module
modulepath = join(rundir, 'cqlsh.py')
try:
if islink(modulepath):
os.unlink(modulepath)
except OSError:
pass
os.symlink(path_to_cqlsh, modulepath)
sys.path.append(rundir)
import cqlsh
cql = cqlsh.cassandra.cluster.Cluster
TEST_HOST = os.environ.get('CQL_TEST_HOST', '127.0.0.1')
TEST_PORT = int(os.environ.get('CQL_TEST_PORT', 9042))
class BaseTestCase(unittest.TestCase):
def assertNicelyFormattedTableHeader(self, line, msg=None):
return self.assertRegexpMatches(line, r'^ +\w+( +\| \w+)*\s*$', msg=msg)
def assertNicelyFormattedTableRule(self, line, msg=None):
return self.assertRegexpMatches(line, r'^-+(\+-+)*\s*$', msg=msg)
def assertNicelyFormattedTableData(self, line, msg=None):
return self.assertRegexpMatches(line, r'^ .* \| ', msg=msg)
def dedent(s):
lines = [ln.rstrip() for ln in s.splitlines()]
if lines[0] == '':
lines = lines[1:]
spaces = [len(line) - len(line.lstrip()) for line in lines if line]
minspace = min(spaces if len(spaces) > 0 else (0,))
return '\n'.join(line[minspace:] for line in lines)
def at_a_time(i, num):
return izip(*([iter(i)] * num))
| apache-2.0 |
msmbuilder/mdentropy | mdentropy/cli/main.py | 1 | 1372 | from __future__ import print_function, absolute_import, division
import sys
import argparse
from .. import __version__
from . import dmutinf
from . import dtent
def main():
help = ('MDEntropy is a python library that allows users to perform '
'information-theoretic analyses on molecular dynamics (MD) '
'trajectories.')
p = argparse.ArgumentParser(description=help)
p.add_argument(
'-V', '--version',
action='version',
version='mdentropy %s' % __version__,
)
sub_parsers = p.add_subparsers(
metavar='command',
dest='cmd',
)
dmutinf.configure_parser(sub_parsers)
dtent.configure_parser(sub_parsers)
if len(sys.argv) == 1:
sys.argv.append('-h')
args = p.parse_args()
args_func(args, p)
def args_func(args, p):
try:
args.func(args, p)
except RuntimeError as e:
sys.exit("Error: %s" % e)
except Exception as e:
if e.__class__.__name__ not in ('ScannerError', 'ParserError'):
message = """\
An unexpected error has occurred with mdentropy (version %s), please
consider sending the following traceback to the mdentropy GitHub issue tracker at:
https://github.com/msmbuilder/mdentropy/issues
"""
print(message % __version__, file=sys.stderr)
raise # as if we did not catch it
| mit |
Naoya-Horiguchi/linux | tools/perf/scripts/python/export-to-postgresql.py | 368 | 39760 | # export-to-postgresql.py: export perf data to a postgresql database
# Copyright (c) 2014, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
from __future__ import print_function
import os
import sys
import struct
import datetime
# To use this script you will need to have installed package python-pyside which
# provides LGPL-licensed Python bindings for Qt. You will also need the package
# libqt4-sql-psql for Qt postgresql support.
#
# The script assumes postgresql is running on the local machine and that the
# user has postgresql permissions to create databases. Examples of installing
# postgresql and adding such a user are:
#
# fedora:
#
# $ sudo yum install postgresql postgresql-server qt-postgresql
# $ sudo su - postgres -c initdb
# $ sudo service postgresql start
# $ sudo su - postgres
# $ createuser -s <your user id here> # Older versions may not support -s, in which case answer the prompt below:
# Shall the new role be a superuser? (y/n) y
# $ sudo yum install python-pyside
#
# Alternately, to use Python3 and/or pyside 2, one of the following:
# $ sudo yum install python3-pyside
# $ pip install --user PySide2
# $ pip3 install --user PySide2
#
# ubuntu:
#
# $ sudo apt-get install postgresql
# $ sudo su - postgres
# $ createuser -s <your user id here>
# $ sudo apt-get install python-pyside.qtsql libqt4-sql-psql
#
# Alternately, to use Python3 and/or pyside 2, one of the following:
#
# $ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql
# $ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql
# $ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql
#
# An example of using this script with Intel PT:
#
# $ perf record -e intel_pt//u ls
# $ perf script -s ~/libexec/perf-core/scripts/python/export-to-postgresql.py pt_example branches calls
# 2015-05-29 12:49:23.464364 Creating database...
# 2015-05-29 12:49:26.281717 Writing to intermediate files...
# 2015-05-29 12:49:27.190383 Copying to database...
# 2015-05-29 12:49:28.140451 Removing intermediate files...
# 2015-05-29 12:49:28.147451 Adding primary keys
# 2015-05-29 12:49:28.655683 Adding foreign keys
# 2015-05-29 12:49:29.365350 Done
#
# To browse the database, psql can be used e.g.
#
# $ psql pt_example
# pt_example=# select * from samples_view where id < 100;
# pt_example=# \d+
# pt_example=# \d+ samples_view
# pt_example=# \q
#
# An example of using the database is provided by the script
# exported-sql-viewer.py. Refer to that script for details.
#
# Tables:
#
# The tables largely correspond to perf tools' data structures. They are largely self-explanatory.
#
# samples
#
# 'samples' is the main table. It represents what instruction was executing at a point in time
# when something (a selected event) happened. The memory address is the instruction pointer or 'ip'.
#
# calls
#
# 'calls' represents function calls and is related to 'samples' by 'call_id' and 'return_id'.
# 'calls' is only created when the 'calls' option to this script is specified.
#
# call_paths
#
# 'call_paths' represents all the call stacks. Each 'call' has an associated record in 'call_paths'.
# 'calls_paths' is only created when the 'calls' option to this script is specified.
#
# branch_types
#
# 'branch_types' provides descriptions for each type of branch.
#
# comm_threads
#
# 'comm_threads' shows how 'comms' relates to 'threads'.
#
# comms
#
# 'comms' contains a record for each 'comm' - the name given to the executable that is running.
#
# dsos
#
# 'dsos' contains a record for each executable file or library.
#
# machines
#
# 'machines' can be used to distinguish virtual machines if virtualization is supported.
#
# selected_events
#
# 'selected_events' contains a record for each kind of event that has been sampled.
#
# symbols
#
# 'symbols' contains a record for each symbol. Only symbols that have samples are present.
#
# threads
#
# 'threads' contains a record for each thread.
#
# Views:
#
# Most of the tables have views for more friendly display. The views are:
#
# calls_view
# call_paths_view
# comm_threads_view
# dsos_view
# machines_view
# samples_view
# symbols_view
# threads_view
#
# More examples of browsing the database with psql:
# Note that some of the examples are not the most optimal SQL query.
# Note that call information is only available if the script's 'calls' option has been used.
#
# Top 10 function calls (not aggregated by symbol):
#
# SELECT * FROM calls_view ORDER BY elapsed_time DESC LIMIT 10;
#
# Top 10 function calls (aggregated by symbol):
#
# SELECT symbol_id,(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,
# SUM(elapsed_time) AS tot_elapsed_time,SUM(branch_count) AS tot_branch_count
# FROM calls_view GROUP BY symbol_id ORDER BY tot_elapsed_time DESC LIMIT 10;
#
# Note that the branch count gives a rough estimation of cpu usage, so functions
# that took a long time but have a relatively low branch count must have spent time
# waiting.
#
# Find symbols by pattern matching on part of the name (e.g. names containing 'alloc'):
#
# SELECT * FROM symbols_view WHERE name LIKE '%alloc%';
#
# Top 10 function calls for a specific symbol (e.g. whose symbol_id is 187):
#
# SELECT * FROM calls_view WHERE symbol_id = 187 ORDER BY elapsed_time DESC LIMIT 10;
#
# Show function calls made by function in the same context (i.e. same call path) (e.g. one with call_path_id 254):
#
# SELECT * FROM calls_view WHERE parent_call_path_id = 254;
#
# Show branches made during a function call (e.g. where call_id is 29357 and return_id is 29370 and tid is 29670)
#
# SELECT * FROM samples_view WHERE id >= 29357 AND id <= 29370 AND tid = 29670 AND event LIKE 'branches%';
#
# Show transactions:
#
# SELECT * FROM samples_view WHERE event = 'transactions';
#
# Note transaction start has 'in_tx' true whereas, transaction end has 'in_tx' false.
# Transaction aborts have branch_type_name 'transaction abort'
#
# Show transaction aborts:
#
# SELECT * FROM samples_view WHERE event = 'transactions' AND branch_type_name = 'transaction abort';
#
# To print a call stack requires walking the call_paths table. For example this python script:
# #!/usr/bin/python2
#
# import sys
# from PySide.QtSql import *
#
# if __name__ == '__main__':
# if (len(sys.argv) < 3):
# print >> sys.stderr, "Usage is: printcallstack.py <database name> <call_path_id>"
# raise Exception("Too few arguments")
# dbname = sys.argv[1]
# call_path_id = sys.argv[2]
# db = QSqlDatabase.addDatabase('QPSQL')
# db.setDatabaseName(dbname)
# if not db.open():
# raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
# query = QSqlQuery(db)
# print " id ip symbol_id symbol dso_id dso_short_name"
# while call_path_id != 0 and call_path_id != 1:
# ret = query.exec_('SELECT * FROM call_paths_view WHERE id = ' + str(call_path_id))
# if not ret:
# raise Exception("Query failed: " + query.lastError().text())
# if not query.next():
# raise Exception("Query failed")
# print "{0:>6} {1:>10} {2:>9} {3:<30} {4:>6} {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5))
# call_path_id = query.value(6)
pyside_version_1 = True
if not "pyside-version-1" in sys.argv:
try:
from PySide2.QtSql import *
pyside_version_1 = False
except:
pass
if pyside_version_1:
from PySide.QtSql import *
if sys.version_info < (3, 0):
def toserverstr(str):
return str
def toclientstr(str):
return str
else:
# Assume UTF-8 server_encoding and client_encoding
def toserverstr(str):
return bytes(str, "UTF_8")
def toclientstr(str):
return bytes(str, "UTF_8")
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
PQconnectdb.argtypes = [ c_char_p ]
PQfinish = libpq.PQfinish
PQfinish.argtypes = [ c_void_p ]
PQstatus = libpq.PQstatus
PQstatus.restype = c_int
PQstatus.argtypes = [ c_void_p ]
PQexec = libpq.PQexec
PQexec.restype = c_void_p
PQexec.argtypes = [ c_void_p, c_char_p ]
PQresultStatus = libpq.PQresultStatus
PQresultStatus.restype = c_int
PQresultStatus.argtypes = [ c_void_p ]
PQputCopyData = libpq.PQputCopyData
PQputCopyData.restype = c_int
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
PQputCopyEnd.restype = c_int
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *
perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False
def printerr(*args, **kw_args):
print(*args, file=sys.stderr, **kw_args)
def printdate(*args, **kw_args):
print(datetime.datetime.today(), *args, sep=' ', **kw_args)
def usage():
printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>] [<pyside-version-1>]");
printerr("where: columns 'all' or 'branches'");
printerr(" calls 'calls' => create calls and call_paths table");
printerr(" callchains 'callchains' => create call_paths table");
printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1");
raise Exception("Too few or bad arguments")
if (len(sys.argv) < 2):
usage()
dbname = sys.argv[1]
if (len(sys.argv) >= 3):
columns = sys.argv[2]
else:
columns = "all"
if columns not in ("all", "branches"):
usage()
branches = (columns == "branches")
for i in range(3,len(sys.argv)):
if (sys.argv[i] == "calls"):
perf_db_export_calls = True
elif (sys.argv[i] == "callchains"):
perf_db_export_callchains = True
elif (sys.argv[i] == "pyside-version-1"):
pass
else:
usage()
output_dir_name = os.getcwd() + "/" + dbname + "-perf-data"
os.mkdir(output_dir_name)
def do_query(q, s):
if (q.exec_(s)):
return
raise Exception("Query failed: " + q.lastError().text())
printdate("Creating database...")
db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
db.setDatabaseName('postgres')
db.open()
try:
do_query(query, 'CREATE DATABASE ' + dbname)
except:
os.rmdir(output_dir_name)
raise
query.finish()
query.clear()
db.close()
db.setDatabaseName(dbname)
db.open()
query = QSqlQuery(db)
do_query(query, 'SET client_min_messages TO WARNING')
do_query(query, 'CREATE TABLE selected_events ('
'id bigint NOT NULL,'
'name varchar(80))')
do_query(query, 'CREATE TABLE machines ('
'id bigint NOT NULL,'
'pid integer,'
'root_dir varchar(4096))')
do_query(query, 'CREATE TABLE threads ('
'id bigint NOT NULL,'
'machine_id bigint,'
'process_id bigint,'
'pid integer,'
'tid integer)')
do_query(query, 'CREATE TABLE comms ('
'id bigint NOT NULL,'
'comm varchar(16),'
'c_thread_id bigint,'
'c_time bigint,'
'exec_flag boolean)')
do_query(query, 'CREATE TABLE comm_threads ('
'id bigint NOT NULL,'
'comm_id bigint,'
'thread_id bigint)')
do_query(query, 'CREATE TABLE dsos ('
'id bigint NOT NULL,'
'machine_id bigint,'
'short_name varchar(256),'
'long_name varchar(4096),'
'build_id varchar(64))')
do_query(query, 'CREATE TABLE symbols ('
'id bigint NOT NULL,'
'dso_id bigint,'
'sym_start bigint,'
'sym_end bigint,'
'binding integer,'
'name varchar(2048))')
do_query(query, 'CREATE TABLE branch_types ('
'id integer NOT NULL,'
'name varchar(80))')
if branches:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'branch_type integer,'
'in_tx boolean,'
'call_path_id bigint,'
'insn_count bigint,'
'cyc_count bigint)')
else:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'period bigint,'
'weight bigint,'
'transaction bigint,'
'data_src bigint,'
'branch_type integer,'
'in_tx boolean,'
'call_path_id bigint,'
'insn_count bigint,'
'cyc_count bigint)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'CREATE TABLE call_paths ('
'id bigint NOT NULL,'
'parent_id bigint,'
'symbol_id bigint,'
'ip bigint)')
if perf_db_export_calls:
do_query(query, 'CREATE TABLE calls ('
'id bigint NOT NULL,'
'thread_id bigint,'
'comm_id bigint,'
'call_path_id bigint,'
'call_time bigint,'
'return_time bigint,'
'branch_count bigint,'
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
'flags integer,'
'parent_id bigint,'
'insn_count bigint,'
'cyc_count bigint)')
do_query(query, 'CREATE TABLE ptwrite ('
'id bigint NOT NULL,'
'payload bigint,'
'exact_ip boolean)')
do_query(query, 'CREATE TABLE cbr ('
'id bigint NOT NULL,'
'cbr integer,'
'mhz integer,'
'percent integer)')
do_query(query, 'CREATE TABLE mwait ('
'id bigint NOT NULL,'
'hints integer,'
'extensions integer)')
do_query(query, 'CREATE TABLE pwre ('
'id bigint NOT NULL,'
'cstate integer,'
'subcstate integer,'
'hw boolean)')
do_query(query, 'CREATE TABLE exstop ('
'id bigint NOT NULL,'
'exact_ip boolean)')
do_query(query, 'CREATE TABLE pwrx ('
'id bigint NOT NULL,'
'deepest_cstate integer,'
'last_cstate integer,'
'wake_reason integer)')
do_query(query, 'CREATE TABLE context_switches ('
'id bigint NOT NULL,'
'machine_id bigint,'
'time bigint,'
'cpu integer,'
'thread_out_id bigint,'
'comm_out_id bigint,'
'thread_in_id bigint,'
'comm_in_id bigint,'
'flags integer)')
do_query(query, 'CREATE VIEW machines_view AS '
'SELECT '
'id,'
'pid,'
'root_dir,'
'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
' FROM machines')
do_query(query, 'CREATE VIEW dsos_view AS '
'SELECT '
'id,'
'machine_id,'
'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
'short_name,'
'long_name,'
'build_id'
' FROM dsos')
do_query(query, 'CREATE VIEW symbols_view AS '
'SELECT '
'id,'
'name,'
'(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
'dso_id,'
'sym_start,'
'sym_end,'
'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
' FROM symbols')
do_query(query, 'CREATE VIEW threads_view AS '
'SELECT '
'id,'
'machine_id,'
'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
'process_id,'
'pid,'
'tid'
' FROM threads')
do_query(query, 'CREATE VIEW comm_threads_view AS '
'SELECT '
'comm_id,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'thread_id,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid'
' FROM comm_threads')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'CREATE VIEW call_paths_view AS '
'SELECT '
'c.id,'
'to_hex(c.ip) AS ip,'
'c.symbol_id,'
'(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
'(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
'(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,'
'c.parent_id,'
'to_hex(p.ip) AS parent_ip,'
'p.symbol_id AS parent_symbol_id,'
'(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
'(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
'(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name'
' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
if perf_db_export_calls:
do_query(query, 'CREATE VIEW calls_view AS '
'SELECT '
'calls.id,'
'thread_id,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'call_path_id,'
'to_hex(ip) AS ip,'
'symbol_id,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'call_time,'
'return_time,'
'return_time - call_time AS elapsed_time,'
'branch_count,'
'insn_count,'
'cyc_count,'
'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC,'
'call_id,'
'return_id,'
'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE CAST ( flags AS VARCHAR(6) ) END AS flags,'
'parent_call_path_id,'
'calls.parent_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
'SELECT '
'id,'
'time,'
'cpu,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
'to_hex(ip) AS ip_hex,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'sym_offset,'
'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
'to_hex(to_ip) AS to_ip_hex,'
'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
'to_sym_offset,'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
'in_tx,'
'insn_count,'
'cyc_count,'
'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC'
' FROM samples')
do_query(query, 'CREATE VIEW ptwrite_view AS '
'SELECT '
'ptwrite.id,'
'time,'
'cpu,'
'to_hex(payload) AS payload_hex,'
'CASE WHEN exact_ip=FALSE THEN \'False\' ELSE \'True\' END AS exact_ip'
' FROM ptwrite'
' INNER JOIN samples ON samples.id = ptwrite.id')
do_query(query, 'CREATE VIEW cbr_view AS '
'SELECT '
'cbr.id,'
'time,'
'cpu,'
'cbr,'
'mhz,'
'percent'
' FROM cbr'
' INNER JOIN samples ON samples.id = cbr.id')
do_query(query, 'CREATE VIEW mwait_view AS '
'SELECT '
'mwait.id,'
'time,'
'cpu,'
'to_hex(hints) AS hints_hex,'
'to_hex(extensions) AS extensions_hex'
' FROM mwait'
' INNER JOIN samples ON samples.id = mwait.id')
do_query(query, 'CREATE VIEW pwre_view AS '
'SELECT '
'pwre.id,'
'time,'
'cpu,'
'cstate,'
'subcstate,'
'CASE WHEN hw=FALSE THEN \'False\' ELSE \'True\' END AS hw'
' FROM pwre'
' INNER JOIN samples ON samples.id = pwre.id')
do_query(query, 'CREATE VIEW exstop_view AS '
'SELECT '
'exstop.id,'
'time,'
'cpu,'
'CASE WHEN exact_ip=FALSE THEN \'False\' ELSE \'True\' END AS exact_ip'
' FROM exstop'
' INNER JOIN samples ON samples.id = exstop.id')
do_query(query, 'CREATE VIEW pwrx_view AS '
'SELECT '
'pwrx.id,'
'time,'
'cpu,'
'deepest_cstate,'
'last_cstate,'
'CASE WHEN wake_reason=1 THEN \'Interrupt\''
' WHEN wake_reason=2 THEN \'Timer Deadline\''
' WHEN wake_reason=4 THEN \'Monitored Address\''
' WHEN wake_reason=8 THEN \'HW\''
' ELSE CAST ( wake_reason AS VARCHAR(2) )'
'END AS wake_reason'
' FROM pwrx'
' INNER JOIN samples ON samples.id = pwrx.id')
do_query(query, 'CREATE VIEW power_events_view AS '
'SELECT '
'samples.id,'
'samples.time,'
'samples.cpu,'
'selected_events.name AS event,'
'FORMAT(\'%6s\', cbr.cbr) AS cbr,'
'FORMAT(\'%6s\', cbr.mhz) AS MHz,'
'FORMAT(\'%5s\', cbr.percent) AS percent,'
'to_hex(mwait.hints) AS hints_hex,'
'to_hex(mwait.extensions) AS extensions_hex,'
'FORMAT(\'%3s\', pwre.cstate) AS cstate,'
'FORMAT(\'%3s\', pwre.subcstate) AS subcstate,'
'CASE WHEN pwre.hw=FALSE THEN \'False\' WHEN pwre.hw=TRUE THEN \'True\' ELSE NULL END AS hw,'
'CASE WHEN exstop.exact_ip=FALSE THEN \'False\' WHEN exstop.exact_ip=TRUE THEN \'True\' ELSE NULL END AS exact_ip,'
'FORMAT(\'%3s\', pwrx.deepest_cstate) AS deepest_cstate,'
'FORMAT(\'%3s\', pwrx.last_cstate) AS last_cstate,'
'CASE WHEN pwrx.wake_reason=1 THEN \'Interrupt\''
' WHEN pwrx.wake_reason=2 THEN \'Timer Deadline\''
' WHEN pwrx.wake_reason=4 THEN \'Monitored Address\''
' WHEN pwrx.wake_reason=8 THEN \'HW\''
' ELSE FORMAT(\'%2s\', pwrx.wake_reason)'
'END AS wake_reason'
' FROM cbr'
' FULL JOIN mwait ON mwait.id = cbr.id'
' FULL JOIN pwre ON pwre.id = cbr.id'
' FULL JOIN exstop ON exstop.id = cbr.id'
' FULL JOIN pwrx ON pwrx.id = cbr.id'
' INNER JOIN samples ON samples.id = coalesce(cbr.id, mwait.id, pwre.id, exstop.id, pwrx.id)'
' INNER JOIN selected_events ON selected_events.id = samples.evsel_id'
' ORDER BY samples.id')
do_query(query, 'CREATE VIEW context_switches_view AS '
'SELECT '
'context_switches.id,'
'context_switches.machine_id,'
'context_switches.time,'
'context_switches.cpu,'
'th_out.pid AS pid_out,'
'th_out.tid AS tid_out,'
'comm_out.comm AS comm_out,'
'th_in.pid AS pid_in,'
'th_in.tid AS tid_in,'
'comm_in.comm AS comm_in,'
'CASE WHEN context_switches.flags = 0 THEN \'in\''
' WHEN context_switches.flags = 1 THEN \'out\''
' WHEN context_switches.flags = 3 THEN \'out preempt\''
' ELSE CAST ( context_switches.flags AS VARCHAR(11) )'
'END AS flags'
' FROM context_switches'
' INNER JOIN threads AS th_out ON th_out.id = context_switches.thread_out_id'
' INNER JOIN threads AS th_in ON th_in.id = context_switches.thread_in_id'
' INNER JOIN comms AS comm_out ON comm_out.id = context_switches.comm_out_id'
' INNER JOIN comms AS comm_in ON comm_in.id = context_switches.comm_in_id')
file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0)
file_trailer = b"\377\377"
def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
file = open(path_name, "wb+")
file.write(file_header)
return file
def close_output_file(file):
file.write(file_trailer)
file.close()
def copy_output_file_direct(file, table_name):
close_output_file(file)
sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')"
do_query(query, sql)
# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
conn = PQconnectdb(toclientstr("dbname = " + dbname))
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
res = PQexec(conn, toclientstr(sql))
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
while (len(data)):
ret = PQputCopyData(conn, data, len(data))
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret))
data = file.read(65536)
ret = PQputCopyEnd(conn, None)
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret))
PQfinish(conn)
def remove_output_file(file):
name = file.name
file.close()
os.unlink(name)
evsel_file = open_output_file("evsel_table.bin")
machine_file = open_output_file("machine_table.bin")
thread_file = open_output_file("thread_table.bin")
comm_file = open_output_file("comm_table.bin")
comm_thread_file = open_output_file("comm_thread_table.bin")
dso_file = open_output_file("dso_table.bin")
symbol_file = open_output_file("symbol_table.bin")
branch_type_file = open_output_file("branch_type_table.bin")
sample_file = open_output_file("sample_table.bin")
if perf_db_export_calls or perf_db_export_callchains:
call_path_file = open_output_file("call_path_table.bin")
if perf_db_export_calls:
call_file = open_output_file("call_table.bin")
ptwrite_file = open_output_file("ptwrite_table.bin")
cbr_file = open_output_file("cbr_table.bin")
mwait_file = open_output_file("mwait_table.bin")
pwre_file = open_output_file("pwre_table.bin")
exstop_file = open_output_file("exstop_table.bin")
pwrx_file = open_output_file("pwrx_table.bin")
context_switches_file = open_output_file("context_switches_table.bin")
def trace_begin():
printdate("Writing to intermediate files...")
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
thread_table(0, 0, 0, -1, -1)
comm_table(0, "unknown", 0, 0, 0)
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
unhandled_count = 0
def is_table_empty(table_name):
do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1');
if query.next():
return False
return True
def drop(table_name):
do_query(query, 'DROP VIEW ' + table_name + '_view');
do_query(query, 'DROP TABLE ' + table_name);
def trace_end():
printdate("Copying to database...")
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
copy_output_file(comm_file, "comms")
copy_output_file(comm_thread_file, "comm_threads")
copy_output_file(dso_file, "dsos")
copy_output_file(symbol_file, "symbols")
copy_output_file(branch_type_file, "branch_types")
copy_output_file(sample_file, "samples")
if perf_db_export_calls or perf_db_export_callchains:
copy_output_file(call_path_file, "call_paths")
if perf_db_export_calls:
copy_output_file(call_file, "calls")
copy_output_file(ptwrite_file, "ptwrite")
copy_output_file(cbr_file, "cbr")
copy_output_file(mwait_file, "mwait")
copy_output_file(pwre_file, "pwre")
copy_output_file(exstop_file, "exstop")
copy_output_file(pwrx_file, "pwrx")
copy_output_file(context_switches_file, "context_switches")
printdate("Removing intermediate files...")
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
remove_output_file(comm_file)
remove_output_file(comm_thread_file)
remove_output_file(dso_file)
remove_output_file(symbol_file)
remove_output_file(branch_type_file)
remove_output_file(sample_file)
if perf_db_export_calls or perf_db_export_callchains:
remove_output_file(call_path_file)
if perf_db_export_calls:
remove_output_file(call_file)
remove_output_file(ptwrite_file)
remove_output_file(cbr_file)
remove_output_file(mwait_file)
remove_output_file(pwre_file)
remove_output_file(exstop_file)
remove_output_file(pwrx_file)
remove_output_file(context_switches_file)
os.rmdir(output_dir_name)
printdate("Adding primary keys")
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE ptwrite ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE cbr ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE mwait ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE pwre ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE exstop ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE pwrx ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE context_switches ADD PRIMARY KEY (id)')
printdate("Adding foreign keys")
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE comms '
'ADD CONSTRAINT threadfk FOREIGN KEY (c_thread_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE comm_threads '
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE dsos '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)')
do_query(query, 'ALTER TABLE symbols '
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)')
do_query(query, 'ALTER TABLE samples '
'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),'
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),'
'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'ALTER TABLE call_paths '
'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls '
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),'
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
do_query(query, 'ALTER TABLE comms ADD has_calls boolean')
do_query(query, 'UPDATE comms SET has_calls = TRUE WHERE comms.id IN (SELECT DISTINCT comm_id FROM calls)')
do_query(query, 'ALTER TABLE ptwrite '
'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
do_query(query, 'ALTER TABLE cbr '
'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
do_query(query, 'ALTER TABLE mwait '
'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
do_query(query, 'ALTER TABLE pwre '
'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
do_query(query, 'ALTER TABLE exstop '
'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
do_query(query, 'ALTER TABLE pwrx '
'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
do_query(query, 'ALTER TABLE context_switches '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT toutfk FOREIGN KEY (thread_out_id) REFERENCES threads (id),'
'ADD CONSTRAINT tinfk FOREIGN KEY (thread_in_id) REFERENCES threads (id),'
'ADD CONSTRAINT coutfk FOREIGN KEY (comm_out_id) REFERENCES comms (id),'
'ADD CONSTRAINT cinfk FOREIGN KEY (comm_in_id) REFERENCES comms (id)')
printdate("Dropping unused tables")
if is_table_empty("ptwrite"):
drop("ptwrite")
if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"):
do_query(query, 'DROP VIEW power_events_view');
drop("mwait")
drop("pwre")
drop("exstop")
drop("pwrx")
if is_table_empty("cbr"):
drop("cbr")
if is_table_empty("context_switches"):
drop("context_switches")
if (unhandled_count):
printdate("Warning: ", unhandled_count, " unhandled events")
printdate("Done")
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
unhandled_count += 1
def sched__sched_switch(*x):
pass
def evsel_table(evsel_id, evsel_name, *x):
evsel_name = toserverstr(evsel_name)
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)
def machine_table(machine_id, pid, root_dir, *x):
root_dir = toserverstr(root_dir)
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
machine_file.write(value)
def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid)
thread_file.write(value)
def comm_table(comm_id, comm_str, thread_id, time, exec_flag, *x):
comm_str = toserverstr(comm_str)
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s" + "iqiqiB"
value = struct.pack(fmt, 5, 8, comm_id, n, comm_str, 8, thread_id, 8, time, 1, exec_flag)
comm_file.write(value)
def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
fmt = "!hiqiqiq"
value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id)
comm_thread_file.write(value)
def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
short_name = toserverstr(short_name)
long_name = toserverstr(long_name)
build_id = toserverstr(build_id)
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s"
value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
dso_file.write(value)
def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
symbol_name = toserverstr(symbol_name)
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def branch_type_table(branch_type, name, *x):
name = toserverstr(name)
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
branch_type_file.write(value)
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, insn_cnt, cyc_cnt, *x):
if branches:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiqiqiq", 20, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt)
else:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiqiqiq", 24, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt)
sample_file.write(value)
def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
fmt = "!hiqiqiqiq"
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, insn_cnt, cyc_cnt, *x):
fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiqiqiq"
value = struct.pack(fmt, 14, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id, 8, insn_cnt, 8, cyc_cnt)
call_file.write(value)
def ptwrite(id, raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
flags = data[0]
payload = data[1]
exact_ip = flags & 1
value = struct.pack("!hiqiqiB", 3, 8, id, 8, payload, 1, exact_ip)
ptwrite_file.write(value)
def cbr(id, raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
cbr = data[0]
MHz = (data[4] + 500) / 1000
percent = ((cbr * 1000 / data[2]) + 5) / 10
value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, int(MHz), 4, int(percent))
cbr_file.write(value)
def mwait(id, raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hints = payload & 0xff
extensions = (payload >> 32) & 0x3
value = struct.pack("!hiqiiii", 3, 8, id, 4, hints, 4, extensions)
mwait_file.write(value)
def pwre(id, raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hw = (payload >> 7) & 1
cstate = (payload >> 12) & 0xf
subcstate = (payload >> 8) & 0xf
value = struct.pack("!hiqiiiiiB", 4, 8, id, 4, cstate, 4, subcstate, 1, hw)
pwre_file.write(value)
def exstop(id, raw_buf):
data = struct.unpack_from("<I", raw_buf)
flags = data[0]
exact_ip = flags & 1
value = struct.pack("!hiqiB", 2, 8, id, 1, exact_ip)
exstop_file.write(value)
def pwrx(id, raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
deepest_cstate = payload & 0xf
last_cstate = (payload >> 4) & 0xf
wake_reason = (payload >> 8) & 0xf
value = struct.pack("!hiqiiiiii", 4, 8, id, 4, deepest_cstate, 4, last_cstate, 4, wake_reason)
pwrx_file.write(value)
def synth_data(id, config, raw_buf, *x):
if config == 0:
ptwrite(id, raw_buf)
elif config == 1:
mwait(id, raw_buf)
elif config == 2:
pwre(id, raw_buf)
elif config == 3:
exstop(id, raw_buf)
elif config == 4:
pwrx(id, raw_buf)
elif config == 5:
cbr(id, raw_buf)
def context_switch_table(id, machine_id, time, cpu, thread_out_id, comm_out_id, thread_in_id, comm_in_id, flags, *x):
fmt = "!hiqiqiqiiiqiqiqiqii"
value = struct.pack(fmt, 9, 8, id, 8, machine_id, 8, time, 4, cpu, 8, thread_out_id, 8, comm_out_id, 8, thread_in_id, 8, comm_in_id, 4, flags)
context_switches_file.write(value)
| gpl-2.0 |
hrubi/fabric | tests/test_decorators.py | 35 | 6800 | from __future__ import with_statement
import random
import sys
from nose.tools import eq_, ok_, assert_true, assert_false, assert_equal
import fudge
from fudge import Fake, with_fakes, patched_context
from fabric import decorators, tasks
from fabric.state import env
import fabric # for patching fabric.state.xxx
from fabric.tasks import _parallel_tasks, requires_parallel, execute
from fabric.context_managers import lcd, settings, hide
from utils import mock_streams
#
# Support
#
def fake_function(*args, **kwargs):
"""
Returns a ``fudge.Fake`` exhibiting function-like attributes.
Passes in all args/kwargs to the ``fudge.Fake`` constructor. However, if
``callable`` or ``expect_call`` kwargs are not given, ``callable`` will be
set to True by default.
"""
# Must define __name__ to be compatible with function wrapping mechanisms
# like @wraps().
if 'callable' not in kwargs and 'expect_call' not in kwargs:
kwargs['callable'] = True
return Fake(*args, **kwargs).has_attr(__name__='fake')
#
# @task
#
def test_task_returns_an_instance_of_wrappedfunctask_object():
def foo():
pass
task = decorators.task(foo)
ok_(isinstance(task, tasks.WrappedCallableTask))
def test_task_will_invoke_provided_class():
def foo(): pass
fake = Fake()
fake.expects("__init__").with_args(foo)
fudge.clear_calls()
fudge.clear_expectations()
foo = decorators.task(foo, task_class=fake)
fudge.verify()
def test_task_passes_args_to_the_task_class():
random_vars = ("some text", random.randint(100, 200))
def foo(): pass
fake = Fake()
fake.expects("__init__").with_args(foo, *random_vars)
fudge.clear_calls()
fudge.clear_expectations()
foo = decorators.task(foo, task_class=fake, *random_vars)
fudge.verify()
def test_passes_kwargs_to_the_task_class():
random_vars = {
"msg": "some text",
"number": random.randint(100, 200),
}
def foo(): pass
fake = Fake()
fake.expects("__init__").with_args(foo, **random_vars)
fudge.clear_calls()
fudge.clear_expectations()
foo = decorators.task(foo, task_class=fake, **random_vars)
fudge.verify()
def test_integration_tests_for_invoked_decorator_with_no_args():
r = random.randint(100, 200)
@decorators.task()
def foo():
return r
eq_(r, foo())
def test_integration_tests_for_decorator():
r = random.randint(100, 200)
@decorators.task(task_class=tasks.WrappedCallableTask)
def foo():
return r
eq_(r, foo())
def test_original_non_invoked_style_task():
r = random.randint(100, 200)
@decorators.task
def foo():
return r
eq_(r, foo())
#
# @runs_once
#
@with_fakes
def test_runs_once_runs_only_once():
"""
@runs_once prevents decorated func from running >1 time
"""
func = fake_function(expect_call=True).times_called(1)
task = decorators.runs_once(func)
for i in range(2):
task()
def test_runs_once_returns_same_value_each_run():
"""
@runs_once memoizes return value of decorated func
"""
return_value = "foo"
task = decorators.runs_once(fake_function().returns(return_value))
for i in range(2):
eq_(task(), return_value)
@decorators.runs_once
def single_run():
pass
def test_runs_once():
assert_false(hasattr(single_run, 'return_value'))
single_run()
assert_true(hasattr(single_run, 'return_value'))
assert_equal(None, single_run())
#
# @serial / @parallel
#
@decorators.serial
def serial():
pass
@decorators.serial
@decorators.parallel
def serial2():
pass
@decorators.parallel
@decorators.serial
def serial3():
pass
@decorators.parallel
def parallel():
pass
@decorators.parallel(pool_size=20)
def parallel2():
pass
fake_tasks = {
'serial': serial,
'serial2': serial2,
'serial3': serial3,
'parallel': parallel,
'parallel2': parallel2,
}
def parallel_task_helper(actual_tasks, expected):
commands_to_run = map(lambda x: [x], actual_tasks)
with patched_context(fabric.state, 'commands', fake_tasks):
eq_(_parallel_tasks(commands_to_run), expected)
def test_parallel_tasks():
for desc, task_names, expected in (
("One @serial-decorated task == no parallelism",
['serial'], False),
("One @parallel-decorated task == parallelism",
['parallel'], True),
("One @parallel-decorated and one @serial-decorated task == paralellism",
['parallel', 'serial'], True),
("Tasks decorated with both @serial and @parallel count as @parallel",
['serial2', 'serial3'], True)
):
parallel_task_helper.description = desc
yield parallel_task_helper, task_names, expected
del parallel_task_helper.description
def test_parallel_wins_vs_serial():
"""
@parallel takes precedence over @serial when both are used on one task
"""
ok_(requires_parallel(serial2))
ok_(requires_parallel(serial3))
@mock_streams('stdout')
def test_global_parallel_honors_runs_once():
"""
fab -P (or env.parallel) should honor @runs_once
"""
@decorators.runs_once
def mytask():
print("yolo") # 'Carpe diem' for stupid people!
with settings(hide('everything'), parallel=True):
execute(mytask, hosts=['localhost', '127.0.0.1'])
result = sys.stdout.getvalue()
eq_(result, "yolo\n")
assert result != "yolo\nyolo\n"
#
# @roles
#
@decorators.roles('test')
def use_roles():
pass
def test_roles():
assert_true(hasattr(use_roles, 'roles'))
assert_equal(use_roles.roles, ['test'])
#
# @hosts
#
@decorators.hosts('test')
def use_hosts():
pass
def test_hosts():
assert_true(hasattr(use_hosts, 'hosts'))
assert_equal(use_hosts.hosts, ['test'])
#
# @with_settings
#
def test_with_settings_passes_env_vars_into_decorated_function():
env.value = True
random_return = random.randint(1000, 2000)
def some_task():
return env.value
decorated_task = decorators.with_settings(value=random_return)(some_task)
ok_(some_task(), msg="sanity check")
eq_(random_return, decorated_task())
def test_with_settings_with_other_context_managers():
"""
with_settings() should take other context managers, and use them with other
overrided key/value pairs.
"""
env.testval1 = "outer 1"
prev_lcwd = env.lcwd
def some_task():
eq_(env.testval1, "inner 1")
ok_(env.lcwd.endswith("here")) # Should be the side-effect of adding cd to settings
decorated_task = decorators.with_settings(
lcd("here"),
testval1="inner 1"
)(some_task)
decorated_task()
ok_(env.testval1, "outer 1")
eq_(env.lcwd, prev_lcwd)
| bsd-2-clause |
luyishisi/tensorflow | 4.Object_Detection/object_detection/builders/box_predictor_builder_test.py | 21 | 15071 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_predictor_builder."""
import mock
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.protos import box_predictor_pb2
from object_detection.protos import hyperparams_pb2
class ConvolutionalBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_conv_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_construct_non_default_conv_box_predictor(self):
box_predictor_text_proto = """
convolutional_box_predictor {
min_depth: 2
max_depth: 16
num_layers_before_predictor: 2
use_dropout: false
dropout_keep_probability: 0.4
kernel_size: 3
box_code_size: 3
apply_sigmoid_to_scores: true
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
self.assertEqual(box_predictor._min_depth, 2)
self.assertEqual(box_predictor._max_depth, 16)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertFalse(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.4)
self.assertTrue(box_predictor._apply_sigmoid_to_scores)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
def test_construct_default_conv_box_predictor(self):
box_predictor_text_proto = """
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor._min_depth, 0)
self.assertEqual(box_predictor._max_depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertTrue(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.8)
self.assertFalse(box_predictor._apply_sigmoid_to_scores)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
class MaskRCNNBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_builder_calls_fc_argscope_fn(self):
fc_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
op: FC
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom(
hyperparams_proto)
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
mock_argscope_fn.assert_called_with(hyperparams_proto, False)
self.assertEqual(box_predictor._fc_hyperparams, 'arg_scope')
def test_non_default_mask_rcnn_box_predictor(self):
fc_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
op: FC
"""
box_predictor_text_proto = """
mask_rcnn_box_predictor {
use_dropout: true
dropout_keep_probability: 0.8
box_code_size: 3
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto)
def mock_fc_argscope_builder(fc_hyperparams_arg, is_training):
return (fc_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_fc_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertTrue(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.8)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 3)
def test_build_default_mask_rcnn_box_predictor(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor = box_predictor_builder.build(
argscope_fn=mock.Mock(return_value='arg_scope'),
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertFalse(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 4)
self.assertFalse(box_predictor._predict_instance_masks)
self.assertFalse(box_predictor._predict_keypoints)
def test_build_box_predictor_with_mask_branch(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = (
hyperparams_pb2.Hyperparams.CONV)
box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True
box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
mock_argscope_fn.assert_has_calls(
[mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams,
True),
mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams,
True)], any_order=True)
self.assertFalse(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 4)
self.assertTrue(box_predictor._predict_instance_masks)
self.assertEqual(box_predictor._mask_prediction_conv_depth, 512)
self.assertFalse(box_predictor._predict_keypoints)
class RfcnBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_fc_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_non_default_rfcn_box_predictor(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
box_predictor_text_proto = """
rfcn_box_predictor {
num_spatial_bins_height: 4
num_spatial_bins_width: 4
depth: 4
box_code_size: 3
crop_height: 16
crop_width: 16
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 3)
self.assertEqual(box_predictor._num_spatial_bins, [4, 4])
self.assertEqual(box_predictor._crop_size, [16, 16])
def test_default_rfcn_box_predictor(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 4)
self.assertEqual(box_predictor._num_spatial_bins, [3, 3])
self.assertEqual(box_predictor._crop_size, [12, 12])
if __name__ == '__main__':
tf.test.main()
| bsd-2-clause |
GladeRom/android_external_chromium_org | third_party/protobuf/python/google/protobuf/internal/wire_format.py | 561 | 8431 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Constants and static functions to support protocol buffer wire format."""
__author__ = 'robinson@google.com (Will Robinson)'
import struct
from google.protobuf import descriptor
from google.protobuf import message
TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag.
TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7
# These numbers identify the wire type of a protocol buffer value.
# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded
# tag-and-type to store one of these WIRETYPE_* constants.
# These values must match WireType enum in google/protobuf/wire_format.h.
WIRETYPE_VARINT = 0
WIRETYPE_FIXED64 = 1
WIRETYPE_LENGTH_DELIMITED = 2
WIRETYPE_START_GROUP = 3
WIRETYPE_END_GROUP = 4
WIRETYPE_FIXED32 = 5
_WIRETYPE_MAX = 5
# Bounds for various integer types.
INT32_MAX = int((1 << 31) - 1)
INT32_MIN = int(-(1 << 31))
UINT32_MAX = (1 << 32) - 1
INT64_MAX = (1 << 63) - 1
INT64_MIN = -(1 << 63)
UINT64_MAX = (1 << 64) - 1
# "struct" format strings that will encode/decode the specified formats.
FORMAT_UINT32_LITTLE_ENDIAN = '<I'
FORMAT_UINT64_LITTLE_ENDIAN = '<Q'
FORMAT_FLOAT_LITTLE_ENDIAN = '<f'
FORMAT_DOUBLE_LITTLE_ENDIAN = '<d'
# We'll have to provide alternate implementations of AppendLittleEndian*() on
# any architectures where these checks fail.
if struct.calcsize(FORMAT_UINT32_LITTLE_ENDIAN) != 4:
raise AssertionError('Format "I" is not a 32-bit number.')
if struct.calcsize(FORMAT_UINT64_LITTLE_ENDIAN) != 8:
raise AssertionError('Format "Q" is not a 64-bit number.')
def PackTag(field_number, wire_type):
"""Returns an unsigned 32-bit integer that encodes the field number and
wire type information in standard protocol message wire format.
Args:
field_number: Expected to be an integer in the range [1, 1 << 29)
wire_type: One of the WIRETYPE_* constants.
"""
if not 0 <= wire_type <= _WIRETYPE_MAX:
raise message.EncodeError('Unknown wire type: %d' % wire_type)
return (field_number << TAG_TYPE_BITS) | wire_type
def UnpackTag(tag):
"""The inverse of PackTag(). Given an unsigned 32-bit number,
returns a (field_number, wire_type) tuple.
"""
return (tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK)
def ZigZagEncode(value):
"""ZigZag Transform: Encodes signed integers so that they can be
effectively used with varint encoding. See wire_format.h for
more details.
"""
if value >= 0:
return value << 1
return (value << 1) ^ (~0)
def ZigZagDecode(value):
"""Inverse of ZigZagEncode()."""
if not value & 0x1:
return value >> 1
return (value >> 1) ^ (~0)
# The *ByteSize() functions below return the number of bytes required to
# serialize "field number + type" information and then serialize the value.
def Int32ByteSize(field_number, int32):
return Int64ByteSize(field_number, int32)
def Int32ByteSizeNoTag(int32):
return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32)
def Int64ByteSize(field_number, int64):
# Have to convert to uint before calling UInt64ByteSize().
return UInt64ByteSize(field_number, 0xffffffffffffffff & int64)
def UInt32ByteSize(field_number, uint32):
return UInt64ByteSize(field_number, uint32)
def UInt64ByteSize(field_number, uint64):
return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64)
def SInt32ByteSize(field_number, int32):
return UInt32ByteSize(field_number, ZigZagEncode(int32))
def SInt64ByteSize(field_number, int64):
return UInt64ByteSize(field_number, ZigZagEncode(int64))
def Fixed32ByteSize(field_number, fixed32):
return TagByteSize(field_number) + 4
def Fixed64ByteSize(field_number, fixed64):
return TagByteSize(field_number) + 8
def SFixed32ByteSize(field_number, sfixed32):
return TagByteSize(field_number) + 4
def SFixed64ByteSize(field_number, sfixed64):
return TagByteSize(field_number) + 8
def FloatByteSize(field_number, flt):
return TagByteSize(field_number) + 4
def DoubleByteSize(field_number, double):
return TagByteSize(field_number) + 8
def BoolByteSize(field_number, b):
return TagByteSize(field_number) + 1
def EnumByteSize(field_number, enum):
return UInt32ByteSize(field_number, enum)
def StringByteSize(field_number, string):
return BytesByteSize(field_number, string.encode('utf-8'))
def BytesByteSize(field_number, b):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(len(b))
+ len(b))
def GroupByteSize(field_number, message):
return (2 * TagByteSize(field_number) # START and END group.
+ message.ByteSize())
def MessageByteSize(field_number, message):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(message.ByteSize())
+ message.ByteSize())
def MessageSetItemByteSize(field_number, msg):
# First compute the sizes of the tags.
# There are 2 tags for the beginning and ending of the repeated group, that
# is field number 1, one with field number 2 (type_id) and one with field
# number 3 (message).
total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3))
# Add the number of bytes for type_id.
total_size += _VarUInt64ByteSizeNoTag(field_number)
message_size = msg.ByteSize()
# The number of bytes for encoding the length of the message.
total_size += _VarUInt64ByteSizeNoTag(message_size)
# The size of the message.
total_size += message_size
return total_size
def TagByteSize(field_number):
"""Returns the bytes required to serialize a tag with this field number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0))
# Private helper function for the *ByteSize() functions above.
def _VarUInt64ByteSizeNoTag(uint64):
"""Returns the number of bytes required to serialize a single varint
using boundary value comparisons. (unrolled loop optimization -WPierce)
uint64 must be unsigned.
"""
if uint64 <= 0x7f: return 1
if uint64 <= 0x3fff: return 2
if uint64 <= 0x1fffff: return 3
if uint64 <= 0xfffffff: return 4
if uint64 <= 0x7ffffffff: return 5
if uint64 <= 0x3ffffffffff: return 6
if uint64 <= 0x1ffffffffffff: return 7
if uint64 <= 0xffffffffffffff: return 8
if uint64 <= 0x7fffffffffffffff: return 9
if uint64 > UINT64_MAX:
raise message.EncodeError('Value out of range: %d' % uint64)
return 10
NON_PACKABLE_TYPES = (
descriptor.FieldDescriptor.TYPE_STRING,
descriptor.FieldDescriptor.TYPE_GROUP,
descriptor.FieldDescriptor.TYPE_MESSAGE,
descriptor.FieldDescriptor.TYPE_BYTES
)
def IsTypePackable(field_type):
"""Return true iff packable = true is valid for fields of this type.
Args:
field_type: a FieldDescriptor::Type value.
Returns:
True iff fields of this type are packable.
"""
return field_type not in NON_PACKABLE_TYPES
| bsd-3-clause |
donkawechico/arguman.org | web/profiles/migrations/0002_notification.py | 7 | 1286 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('notification_type', models.IntegerField(choices=[(0, b'added-premise-for-contention'), (1, b'added-premise-for-premise'), (2, b'reported-as-fallacy'), (3, b'followed')])),
('is_read', models.BooleanField(default=False)),
('target_object_id', models.IntegerField(null=True, blank=True)),
('recipient', models.ForeignKey(related_name=b'notifications', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(related_name=b'sent_notifications', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ['is_read', '-date_created'],
},
bases=(models.Model,),
),
]
| mit |
fedora-infra/bodhi | bodhi/server/services/errors.py | 2 | 3793 | # Copyright © 2011-2017 Red Hat, Inc. and others.
#
# This file is part of Bodhi.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Define utilities for handling errors in the service modules."""
import logging
import re
import mako.exceptions
import mako.lookup
import pyramid.httpexceptions
import pyramid.response
from bodhi.server.config import config
from bodhi.server.util import get_absolute_path
try: # pragma: no cover
from cornice.util import json_error
json_handler = json_error
jsonp_handler = json_error
except ImportError: # pragma: no cover
from cornice.renderer import CorniceRenderer
renderer = CorniceRenderer()
json_handler = renderer.render_errors
jsonp_handler = renderer.render_errors
log = logging.getLogger('bodhi')
def camel2space(camel):
"""
Convert CamelCaseText to Space Separated Text.
Args:
camel (str): Camel cased text you wish to convert to space separated text.
Returns:
str: A space separated version of the given camel cased text.
"""
regexp = r'([A-Z][a-z0-9]+|[a-z0-9]+|[A-Z0-9]+)'
return ' '.join(re.findall(regexp, camel))
def status2summary(status):
"""
Convert numerical HTTP status codes to human readable error strings.
For example, this converts 404 to "Not Found".
Args:
status (int): The status you wish to have a human readable string for.
Returns:
str: A human readable error message.
"""
cls = pyramid.httpexceptions.status_map[status]
camel = cls.__name__[4:]
return camel2space(camel)
class html_handler(pyramid.httpexceptions.HTTPError):
"""An HTML formatting handler for all our errors."""
def __init__(self, request):
"""
Initialize the HTML error handler to render an error message for human readers.
This method sets the Response body to rendered HTML version of the given errors, and the
status code to the code specified by errors.
Args:
request (pyramid.request.Request): The current Request.
"""
location = config.get('mako.directories')
directory = get_absolute_path(location)
lookup = mako.lookup.TemplateLookup(
directories=[directory],
output_encoding='utf-8',
input_encoding='utf-8',
)
template = lookup.get_template('errors.html')
errors = request.errors
try:
body = template.render(
errors=errors,
status=errors.status,
request=request,
summary=status2summary(errors.status),
)
except Exception:
log.error(mako.exceptions.text_error_template().render())
raise
# This thing inherits from both Exception *and* Response, so.. take the
# Response path in the diamond inheritance chain and ignore the
# exception side.
# That turns this thing into a "real boy" like pinnochio.
pyramid.response.Response.__init__(self, body)
self.status = errors.status
self.content_type = 'text/html'
| gpl-2.0 |
etos/django | tests/template_tests/filter_tests/test_slice.py | 428 | 1317 | from django.template.defaultfilters import slice_filter
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class SliceTests(SimpleTestCase):
@setup({'slice01': '{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}'})
def test_slice01(self):
output = self.engine.render_to_string('slice01', {'a': 'a&b', 'b': mark_safe('a&b')})
self.assertEqual(output, '&b &b')
@setup({'slice02': '{% autoescape off %}{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}{% endautoescape %}'})
def test_slice02(self):
output = self.engine.render_to_string('slice02', {'a': 'a&b', 'b': mark_safe('a&b')})
self.assertEqual(output, '&b &b')
class FunctionTests(SimpleTestCase):
def test_zero_length(self):
self.assertEqual(slice_filter('abcdefg', '0'), '')
def test_index(self):
self.assertEqual(slice_filter('abcdefg', '1'), 'a')
def test_negative_index(self):
self.assertEqual(slice_filter('abcdefg', '-1'), 'abcdef')
def test_range(self):
self.assertEqual(slice_filter('abcdefg', '1:2'), 'b')
def test_range_multiple(self):
self.assertEqual(slice_filter('abcdefg', '1:3'), 'bc')
def test_range_step(self):
self.assertEqual(slice_filter('abcdefg', '0::2'), 'aceg')
| bsd-3-clause |
ryanbackman/zulip | zerver/webhooks/freshdesk/view.py | 17 | 5886 | """Webhooks for external integrations."""
from __future__ import absolute_import
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.models import get_client, UserProfile
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.lib.notifications import convert_html_to_markdown
from zerver.decorator import REQ, has_request_variables, authenticated_rest_api_view
import logging
import ujson
from typing import Any, Dict, List, Optional, Tuple, Union, Text
class TicketDict(dict):
"""
A helper class to turn a dictionary with ticket information into
an object where each of the keys is an attribute for easy access.
"""
def __getattr__(self, field):
# type: (str) -> Any
if "_" in field:
return self.get(field)
else:
return self.get("ticket_" + field)
def property_name(property, index):
# type: (str, int) -> str
"""The Freshdesk API is currently pretty broken: statuses are customizable
but the API will only tell you the number associated with the status, not
the name. While we engage the Freshdesk developers about exposing this
information through the API, since only FlightCar uses this integration,
hardcode their statuses.
"""
statuses = ["", "", "Open", "Pending", "Resolved", "Closed",
"Waiting on Customer", "Job Application", "Monthly"]
priorities = ["", "Low", "Medium", "High", "Urgent"]
if property == "status":
return statuses[index] if index < len(statuses) else str(index)
elif property == "priority":
return priorities[index] if index < len(priorities) else str(index)
else:
raise ValueError("Unknown property")
def parse_freshdesk_event(event_string):
# type: (str) -> List[str]
"""These are always of the form "{ticket_action:created}" or
"{status:{from:4,to:6}}". Note the lack of string quoting: this isn't
valid JSON so we have to parse it ourselves.
"""
data = event_string.replace("{", "").replace("}", "").replace(",", ":").split(":")
if len(data) == 2:
# This is a simple ticket action event, like
# {ticket_action:created}.
return data
else:
# This is a property change event, like {status:{from:4,to:6}}. Pull out
# the property, from, and to states.
property, _, from_state, _, to_state = data
return [property, property_name(property, int(from_state)),
property_name(property, int(to_state))]
def format_freshdesk_note_message(ticket, event_info):
# type: (TicketDict, List[str]) -> str
"""There are public (visible to customers) and private note types."""
note_type = event_info[1]
content = "%s <%s> added a %s note to [ticket #%s](%s)." % (
ticket.requester_name, ticket.requester_email, note_type,
ticket.id, ticket.url)
return content
def format_freshdesk_property_change_message(ticket, event_info):
# type: (TicketDict, List[str]) -> str
"""Freshdesk will only tell us the first event to match our webhook
configuration, so if we change multiple properties, we only get the before
and after data for the first one.
"""
content = "%s <%s> updated [ticket #%s](%s):\n\n" % (
ticket.requester_name, ticket.requester_email, ticket.id, ticket.url)
# Why not `"%s %s %s" % event_info`? Because the linter doesn't like it.
content += "%s: **%s** => **%s**" % (
event_info[0].capitalize(), event_info[1], event_info[2])
return content
def format_freshdesk_ticket_creation_message(ticket):
# type: (TicketDict) -> str
"""They send us the description as HTML."""
cleaned_description = convert_html_to_markdown(ticket.description)
content = "%s <%s> created [ticket #%s](%s):\n\n" % (
ticket.requester_name, ticket.requester_email, ticket.id, ticket.url)
content += """~~~ quote
%s
~~~\n
""" % (cleaned_description,)
content += "Type: **%s**\nPriority: **%s**\nStatus: **%s**" % (
ticket.type, ticket.priority, ticket.status)
return content
@authenticated_rest_api_view(is_webhook=True)
@has_request_variables
def api_freshdesk_webhook(request, user_profile, payload=REQ(argument_type='body'),
stream=REQ(default='freshdesk')):
# type: (HttpRequest, UserProfile, Dict[str, Any], Text) -> HttpResponse
ticket_data = payload["freshdesk_webhook"]
required_keys = [
"triggered_event", "ticket_id", "ticket_url", "ticket_type",
"ticket_subject", "ticket_description", "ticket_status",
"ticket_priority", "requester_name", "requester_email",
]
for key in required_keys:
if ticket_data.get(key) is None:
logging.warning("Freshdesk webhook error. Payload was:")
logging.warning(request.body)
return json_error(_("Missing key %s in JSON") % (key,))
ticket = TicketDict(ticket_data)
subject = "#%s: %s" % (ticket.id, ticket.subject)
try:
event_info = parse_freshdesk_event(ticket.triggered_event)
except ValueError:
return json_error(_("Malformed event %s") % (ticket.triggered_event,))
if event_info[1] == "created":
content = format_freshdesk_ticket_creation_message(ticket)
elif event_info[0] == "note_type":
content = format_freshdesk_note_message(ticket, event_info)
elif event_info[0] in ("status", "priority"):
content = format_freshdesk_property_change_message(ticket, event_info)
else:
# Not an event we know handle; do nothing.
return json_success()
check_send_message(user_profile, get_client("ZulipFreshdeskWebhook"), "stream",
[stream], subject, content)
return json_success()
| apache-2.0 |
Cinntax/home-assistant | tests/auth/test_init.py | 4 | 31542 | """Tests for the Home Assistant auth module."""
from datetime import timedelta
from unittest.mock import Mock, patch
import jwt
import pytest
import voluptuous as vol
from homeassistant import auth, data_entry_flow
from homeassistant.auth import models as auth_models, auth_store, const as auth_const
from homeassistant.auth.const import MFA_SESSION_EXPIRATION
from homeassistant.core import callback
from homeassistant.util import dt as dt_util
from tests.common import MockUser, ensure_auth_manager_loaded, flush_store, CLIENT_ID
@pytest.fixture
def mock_hass(loop):
"""Hass mock with minimum amount of data set to make it work with auth."""
hass = Mock()
hass.config.skip_pip = True
return hass
async def test_auth_manager_from_config_validates_config(mock_hass):
"""Test get auth providers."""
with pytest.raises(vol.Invalid):
manager = await auth.auth_manager_from_config(
mock_hass,
[
{"name": "Test Name", "type": "insecure_example", "users": []},
{
"name": "Invalid config because no users",
"type": "insecure_example",
"id": "invalid_config",
},
],
[],
)
manager = await auth.auth_manager_from_config(
mock_hass,
[
{"name": "Test Name", "type": "insecure_example", "users": []},
{
"name": "Test Name 2",
"type": "insecure_example",
"id": "another",
"users": [],
},
],
[],
)
providers = [
{"name": provider.name, "id": provider.id, "type": provider.type}
for provider in manager.auth_providers
]
assert providers == [
{"name": "Test Name", "type": "insecure_example", "id": None},
{"name": "Test Name 2", "type": "insecure_example", "id": "another"},
]
async def test_auth_manager_from_config_auth_modules(mock_hass):
"""Test get auth modules."""
with pytest.raises(vol.Invalid):
manager = await auth.auth_manager_from_config(
mock_hass,
[
{"name": "Test Name", "type": "insecure_example", "users": []},
{
"name": "Test Name 2",
"type": "insecure_example",
"id": "another",
"users": [],
},
],
[
{"name": "Module 1", "type": "insecure_example", "data": []},
{
"name": "Invalid config because no data",
"type": "insecure_example",
"id": "another",
},
],
)
manager = await auth.auth_manager_from_config(
mock_hass,
[
{"name": "Test Name", "type": "insecure_example", "users": []},
{
"name": "Test Name 2",
"type": "insecure_example",
"id": "another",
"users": [],
},
],
[
{"name": "Module 1", "type": "insecure_example", "data": []},
{
"name": "Module 2",
"type": "insecure_example",
"id": "another",
"data": [],
},
],
)
providers = [
{"name": provider.name, "type": provider.type, "id": provider.id}
for provider in manager.auth_providers
]
assert providers == [
{"name": "Test Name", "type": "insecure_example", "id": None},
{"name": "Test Name 2", "type": "insecure_example", "id": "another"},
]
modules = [
{"name": module.name, "type": module.type, "id": module.id}
for module in manager.auth_mfa_modules
]
assert modules == [
{"name": "Module 1", "type": "insecure_example", "id": "insecure_example"},
{"name": "Module 2", "type": "insecure_example", "id": "another"},
]
async def test_create_new_user(hass):
"""Test creating new user."""
events = []
@callback
def user_added(event):
events.append(event)
hass.bus.async_listen("user_added", user_added)
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[],
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
assert step["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step["result"]
assert user is not None
assert user.is_owner is False
assert user.name == "Test Name"
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["user_id"] == user.id
async def test_login_as_existing_user(mock_hass):
"""Test login as existing user."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[],
)
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add a fake user that we're not going to log in with
user = MockUser(
id="mock-user2", is_owner=False, is_active=False, name="Not user"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id2",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "other-user"},
is_new=False,
)
)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
assert step["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step["result"]
assert user is not None
assert user.id == "mock-user"
assert user.is_owner is False
assert user.is_active is False
assert user.name == "Paulus"
async def test_linking_user_to_two_auth_providers(hass, hass_storage):
"""Test linking user to two auth providers."""
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [{"username": "test-user", "password": "test-pass"}],
},
{
"type": "insecure_example",
"id": "another-provider",
"users": [{"username": "another-user", "password": "another-password"}],
},
],
[],
)
step = await manager.login_flow.async_init(("insecure_example", None))
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
user = step["result"]
assert user is not None
step = await manager.login_flow.async_init(
("insecure_example", "another-provider"), context={"credential_only": True}
)
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "another-user", "password": "another-password"}
)
new_credential = step["result"]
await manager.async_link_user(user, new_credential)
assert len(user.credentials) == 2
async def test_saving_loading(hass, hass_storage):
"""Test storing and saving data.
Creates one of each type that we store to test we restore correctly.
"""
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [{"username": "test-user", "password": "test-pass"}],
}
],
[],
)
step = await manager.login_flow.async_init(("insecure_example", None))
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
user = step["result"]
await manager.async_activate_user(user)
# the first refresh token will be used to create access token
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
manager.async_create_access_token(refresh_token, "192.168.0.1")
# the second refresh token will not be used
await manager.async_create_refresh_token(user, "dummy-client")
await flush_store(manager._store._store)
store2 = auth_store.AuthStore(hass)
users = await store2.async_get_users()
assert len(users) == 1
assert users[0].permissions == user.permissions
assert users[0] == user
assert len(users[0].refresh_tokens) == 2
for r_token in users[0].refresh_tokens.values():
if r_token.client_id == CLIENT_ID:
# verify the first refresh token
assert r_token.last_used_at is not None
assert r_token.last_used_ip == "192.168.0.1"
elif r_token.client_id == "dummy-client":
# verify the second refresh token
assert r_token.last_used_at is None
assert r_token.last_used_ip is None
else:
assert False, "Unknown client_id: %s" % r_token.client_id
async def test_cannot_retrieve_expired_access_token(hass):
"""Test that we cannot retrieve expired access tokens."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
assert refresh_token.user.id is user.id
assert refresh_token.client_id == CLIENT_ID
access_token = manager.async_create_access_token(refresh_token)
assert await manager.async_validate_access_token(access_token) is refresh_token
with patch(
"homeassistant.util.dt.utcnow",
return_value=dt_util.utcnow()
- auth_const.ACCESS_TOKEN_EXPIRATION
- timedelta(seconds=11),
):
access_token = manager.async_create_access_token(refresh_token)
assert await manager.async_validate_access_token(access_token) is None
async def test_generating_system_user(hass):
"""Test that we can add a system user."""
events = []
@callback
def user_added(event):
events.append(event)
hass.bus.async_listen("user_added", user_added)
manager = await auth.auth_manager_from_config(hass, [], [])
user = await manager.async_create_system_user("Hass.io")
token = await manager.async_create_refresh_token(user)
assert user.system_generated
assert token is not None
assert token.client_id is None
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["user_id"] == user.id
async def test_refresh_token_requires_client_for_user(hass):
"""Test create refresh token for a user with client_id."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
assert user.system_generated is False
with pytest.raises(ValueError):
await manager.async_create_refresh_token(user)
token = await manager.async_create_refresh_token(user, CLIENT_ID)
assert token is not None
assert token.client_id == CLIENT_ID
assert token.token_type == auth_models.TOKEN_TYPE_NORMAL
# default access token expiration
assert token.access_token_expiration == auth_const.ACCESS_TOKEN_EXPIRATION
async def test_refresh_token_not_requires_client_for_system_user(hass):
"""Test create refresh token for a system user w/o client_id."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = await manager.async_create_system_user("Hass.io")
assert user.system_generated is True
with pytest.raises(ValueError):
await manager.async_create_refresh_token(user, CLIENT_ID)
token = await manager.async_create_refresh_token(user)
assert token is not None
assert token.client_id is None
assert token.token_type == auth_models.TOKEN_TYPE_SYSTEM
async def test_refresh_token_with_specific_access_token_expiration(hass):
"""Test create a refresh token with specific access token expiration."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
token = await manager.async_create_refresh_token(
user, CLIENT_ID, access_token_expiration=timedelta(days=100)
)
assert token is not None
assert token.client_id == CLIENT_ID
assert token.access_token_expiration == timedelta(days=100)
async def test_refresh_token_type(hass):
"""Test create a refresh token with token type."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
with pytest.raises(ValueError):
await manager.async_create_refresh_token(
user, CLIENT_ID, token_type=auth_models.TOKEN_TYPE_SYSTEM
)
token = await manager.async_create_refresh_token(
user, CLIENT_ID, token_type=auth_models.TOKEN_TYPE_NORMAL
)
assert token is not None
assert token.client_id == CLIENT_ID
assert token.token_type == auth_models.TOKEN_TYPE_NORMAL
async def test_refresh_token_type_long_lived_access_token(hass):
"""Test create a refresh token has long-lived access token type."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
with pytest.raises(ValueError):
await manager.async_create_refresh_token(
user, token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
)
token = await manager.async_create_refresh_token(
user,
client_name="GPS LOGGER",
client_icon="mdi:home",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
)
assert token is not None
assert token.client_id is None
assert token.client_name == "GPS LOGGER"
assert token.client_icon == "mdi:home"
assert token.token_type == auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
async def test_cannot_deactive_owner(mock_hass):
"""Test that we cannot deactive the owner."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
owner = MockUser(is_owner=True).add_to_auth_manager(manager)
with pytest.raises(ValueError):
await manager.async_deactivate_user(owner)
async def test_remove_refresh_token(mock_hass):
"""Test that we can remove a refresh token."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
access_token = manager.async_create_access_token(refresh_token)
await manager.async_remove_refresh_token(refresh_token)
assert await manager.async_get_refresh_token(refresh_token.id) is None
assert await manager.async_validate_access_token(access_token) is None
async def test_create_access_token(mock_hass):
"""Test normal refresh_token's jwt_key keep same after used."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
assert refresh_token.token_type == auth_models.TOKEN_TYPE_NORMAL
jwt_key = refresh_token.jwt_key
access_token = manager.async_create_access_token(refresh_token)
assert access_token is not None
assert refresh_token.jwt_key == jwt_key
jwt_payload = jwt.decode(access_token, jwt_key, algorithm=["HS256"])
assert jwt_payload["iss"] == refresh_token.id
assert (
jwt_payload["exp"] - jwt_payload["iat"] == timedelta(minutes=30).total_seconds()
)
async def test_create_long_lived_access_token(mock_hass):
"""Test refresh_token's jwt_key changed for long-lived access token."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(
user,
client_name="GPS Logger",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=300),
)
assert refresh_token.token_type == auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
access_token = manager.async_create_access_token(refresh_token)
jwt_payload = jwt.decode(access_token, refresh_token.jwt_key, algorithm=["HS256"])
assert jwt_payload["iss"] == refresh_token.id
assert (
jwt_payload["exp"] - jwt_payload["iat"] == timedelta(days=300).total_seconds()
)
async def test_one_long_lived_access_token_per_refresh_token(mock_hass):
"""Test one refresh_token can only have one long-lived access token."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(
user,
client_name="GPS Logger",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=3000),
)
assert refresh_token.token_type == auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
access_token = manager.async_create_access_token(refresh_token)
jwt_key = refresh_token.jwt_key
rt = await manager.async_validate_access_token(access_token)
assert rt.id == refresh_token.id
with pytest.raises(ValueError):
await manager.async_create_refresh_token(
user,
client_name="GPS Logger",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=3000),
)
await manager.async_remove_refresh_token(refresh_token)
assert refresh_token.id not in user.refresh_tokens
rt = await manager.async_validate_access_token(access_token)
assert rt is None, "Previous issued access token has been invoked"
refresh_token_2 = await manager.async_create_refresh_token(
user,
client_name="GPS Logger",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=3000),
)
assert refresh_token_2.id != refresh_token.id
assert refresh_token_2.token_type == auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
access_token_2 = manager.async_create_access_token(refresh_token_2)
jwt_key_2 = refresh_token_2.jwt_key
assert access_token != access_token_2
assert jwt_key != jwt_key_2
rt = await manager.async_validate_access_token(access_token_2)
jwt_payload = jwt.decode(access_token_2, rt.jwt_key, algorithm=["HS256"])
assert jwt_payload["iss"] == refresh_token_2.id
assert (
jwt_payload["exp"] - jwt_payload["iat"] == timedelta(days=3000).total_seconds()
)
async def test_login_with_auth_module(mock_hass):
"""Test login as existing user with auth module."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[
{
"type": "insecure_example",
"data": [{"user_id": "mock-user", "pin": "test-pin"}],
}
],
)
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
# After auth_provider validated, request auth module input form
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "mfa"
step = await manager.login_flow.async_configure(
step["flow_id"], {"pin": "invalid-pin"}
)
# Invalid code error
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "mfa"
assert step["errors"] == {"base": "invalid_code"}
step = await manager.login_flow.async_configure(
step["flow_id"], {"pin": "test-pin"}
)
# Finally passed, get user
assert step["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step["result"]
assert user is not None
assert user.id == "mock-user"
assert user.is_owner is False
assert user.is_active is False
assert user.name == "Paulus"
async def test_login_with_multi_auth_module(mock_hass):
"""Test login as existing user with multiple auth modules."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[
{
"type": "insecure_example",
"data": [{"user_id": "mock-user", "pin": "test-pin"}],
},
{
"type": "insecure_example",
"id": "module2",
"data": [{"user_id": "mock-user", "pin": "test-pin2"}],
},
],
)
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
# After auth_provider validated, request select auth module
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "select_mfa_module"
step = await manager.login_flow.async_configure(
step["flow_id"], {"multi_factor_auth_module": "module2"}
)
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "mfa"
step = await manager.login_flow.async_configure(
step["flow_id"], {"pin": "test-pin2"}
)
# Finally passed, get user
assert step["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step["result"]
assert user is not None
assert user.id == "mock-user"
assert user.is_owner is False
assert user.is_active is False
assert user.name == "Paulus"
async def test_auth_module_expired_session(mock_hass):
"""Test login as existing user."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[
{
"type": "insecure_example",
"data": [{"user_id": "mock-user", "pin": "test-pin"}],
}
],
)
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "mfa"
with patch(
"homeassistant.util.dt.utcnow",
return_value=dt_util.utcnow() + MFA_SESSION_EXPIRATION,
):
step = await manager.login_flow.async_configure(
step["flow_id"], {"pin": "test-pin"}
)
# login flow abort due session timeout
assert step["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert step["reason"] == "login_expired"
async def test_enable_mfa_for_user(hass, hass_storage):
"""Test enable mfa module for user."""
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [{"username": "test-user", "password": "test-pass"}],
}
],
[{"type": "insecure_example", "data": []}],
)
step = await manager.login_flow.async_init(("insecure_example", None))
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
user = step["result"]
assert user is not None
# new user don't have mfa enabled
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 0
module = manager.get_auth_mfa_module("insecure_example")
# mfa module don't have data
assert bool(module._data) is False
# test enable mfa for user
await manager.async_enable_user_mfa(user, "insecure_example", {"pin": "test-pin"})
assert len(module._data) == 1
assert module._data[0] == {"user_id": user.id, "pin": "test-pin"}
# test get enabled mfa
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 1
assert "insecure_example" in modules
# re-enable mfa for user will override
await manager.async_enable_user_mfa(
user, "insecure_example", {"pin": "test-pin-new"}
)
assert len(module._data) == 1
assert module._data[0] == {"user_id": user.id, "pin": "test-pin-new"}
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 1
assert "insecure_example" in modules
# system user cannot enable mfa
system_user = await manager.async_create_system_user("system-user")
with pytest.raises(ValueError):
await manager.async_enable_user_mfa(
system_user, "insecure_example", {"pin": "test-pin"}
)
assert len(module._data) == 1
modules = await manager.async_get_enabled_mfa(system_user)
assert len(modules) == 0
# disable mfa for user
await manager.async_disable_user_mfa(user, "insecure_example")
assert bool(module._data) is False
# test get enabled mfa
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 0
# disable mfa for user don't enabled just silent fail
await manager.async_disable_user_mfa(user, "insecure_example")
async def test_async_remove_user(hass):
"""Test removing a user."""
events = []
@callback
def user_removed(event):
events.append(event)
hass.bus.async_listen("user_removed", user_removed)
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[],
)
hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
assert len(user.credentials) == 1
await hass.auth.async_remove_user(user)
assert len(await manager.async_get_users()) == 0
assert len(user.credentials) == 0
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["user_id"] == user.id
async def test_new_users_admin(mock_hass):
"""Test newly created users are admin."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[],
)
ensure_auth_manager_loaded(manager)
user = await manager.async_create_user("Hello")
assert user.is_admin
user_cred = await manager.async_get_or_create_user(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=True,
)
)
assert user_cred.is_admin
| apache-2.0 |
ask/celery | celery/tests/utilities/test_local.py | 2 | 7546 | from __future__ import absolute_import
from __future__ import with_statement
import sys
from nose import SkipTest
from celery.local import Proxy, PromiseProxy, maybe_evaluate, try_import
from celery.tests.utils import Case
class test_try_import(Case):
def test_imports(self):
self.assertTrue(try_import(__name__))
def test_when_default(self):
default = object()
self.assertIs(try_import("foobar.awqewqe.asdwqewq", default), default)
class test_Proxy(Case):
def test_name(self):
def real():
"""real function"""
return "REAL"
x = Proxy(lambda: real, name="xyz")
self.assertEqual(x.__name__, "xyz")
y = Proxy(lambda: real)
self.assertEqual(y.__name__, "real")
self.assertEqual(x.__doc__, "real function")
self.assertEqual(x.__class__, type(real))
self.assertEqual(x.__dict__, real.__dict__)
self.assertEqual(repr(x), repr(real))
def test_nonzero(self):
class X(object):
def __nonzero__(self):
return False
x = Proxy(lambda: X())
self.assertFalse(x)
def test_slots(self):
class X(object):
__slots__ = ()
x = Proxy(X)
with self.assertRaises(AttributeError):
x.__dict__
def test_unicode(self):
class X(object):
def __unicode__(self):
return u"UNICODE"
def __repr__(self):
return "REPR"
x = Proxy(lambda: X())
self.assertEqual(unicode(x), u"UNICODE")
del(X.__unicode__)
self.assertEqual(unicode(x), "REPR")
def test_dir(self):
if sys.version_info < (2, 6):
raise SkipTest("Not relevant for Py2.5")
class X(object):
def __dir__(self):
return ["a", "b", "c"]
x = Proxy(lambda: X())
self.assertListEqual(dir(x), ["a", "b", "c"])
class Y(object):
def __dir__(self):
raise RuntimeError()
y = Proxy(lambda: Y())
self.assertListEqual(dir(y), [])
def test_getsetdel_attr(self):
if sys.version_info < (2, 6):
raise SkipTest("Not relevant for Py2.5")
class X(object):
a = 1
b = 2
c = 3
def __dir__(self):
return ["a", "b", "c"]
v = X()
x = Proxy(lambda: v)
self.assertListEqual(x.__members__, ["a", "b", "c"])
self.assertEqual(x.a, 1)
self.assertEqual(x.b, 2)
self.assertEqual(x.c, 3)
setattr(x, "a", 10)
self.assertEqual(x.a, 10)
del(x.a)
self.assertEqual(x.a, 1)
def test_dictproxy(self):
v = {}
x = Proxy(lambda: v)
x["foo"] = 42
self.assertEqual(x["foo"], 42)
self.assertEqual(len(x), 1)
self.assertIn("foo", x)
del(x["foo"])
with self.assertRaises(KeyError):
x["foo"]
self.assertTrue(iter(x))
def test_listproxy(self):
v = []
x = Proxy(lambda: v)
x.append(1)
x.extend([2, 3, 4])
self.assertEqual(x[0], 1)
self.assertEqual(x[:-1], [1, 2, 3])
del(x[-1])
self.assertEqual(x[:-1], [1, 2])
x[0] = 10
self.assertEqual(x[0], 10)
self.assertIn(10, x)
self.assertEqual(len(x), 3)
self.assertTrue(iter(x))
def test_int(self):
self.assertEqual(Proxy(lambda: 10) + 1, Proxy(lambda: 11))
self.assertEqual(Proxy(lambda: 10) - 1, Proxy(lambda: 9))
self.assertEqual(Proxy(lambda: 10) * 2, Proxy(lambda: 20))
self.assertEqual(Proxy(lambda: 10) ** 2, Proxy(lambda: 100))
self.assertEqual(Proxy(lambda: 20) / 2, Proxy(lambda: 10))
self.assertEqual(Proxy(lambda: 20) // 2, Proxy(lambda: 10))
self.assertEqual(Proxy(lambda: 11) % 2, Proxy(lambda: 1))
self.assertEqual(Proxy(lambda: 10) << 2, Proxy(lambda: 40))
self.assertEqual(Proxy(lambda: 10) >> 2, Proxy(lambda: 2))
self.assertEqual(Proxy(lambda: 10) ^ 7, Proxy(lambda: 13))
self.assertEqual(Proxy(lambda: 10) | 40, Proxy(lambda: 42))
self.assertEqual(~Proxy(lambda: 10), Proxy(lambda: -11))
self.assertEqual(-Proxy(lambda: 10), Proxy(lambda: -10))
self.assertEqual(+Proxy(lambda: -10), Proxy(lambda: -10))
self.assertTrue(Proxy(lambda: 10) < Proxy(lambda: 20))
self.assertTrue(Proxy(lambda: 20) > Proxy(lambda: 10))
self.assertTrue(Proxy(lambda: 10) >= Proxy(lambda: 10))
self.assertTrue(Proxy(lambda: 10) <= Proxy(lambda: 10))
self.assertTrue(Proxy(lambda: 10) == Proxy(lambda: 10))
self.assertTrue(Proxy(lambda: 20) != Proxy(lambda: 10))
x = Proxy(lambda: 10)
x -= 1
self.assertEqual(x, 9)
x = Proxy(lambda: 9)
x += 1
self.assertEqual(x, 10)
x = Proxy(lambda: 10)
x *= 2
self.assertEqual(x, 20)
x = Proxy(lambda: 20)
x /= 2
self.assertEqual(x, 10)
x = Proxy(lambda: 10)
x %= 2
self.assertEqual(x, 0)
x = Proxy(lambda: 10)
x <<= 3
self.assertEqual(x, 80)
x = Proxy(lambda: 80)
x >>= 4
self.assertEqual(x, 5)
x = Proxy(lambda: 5)
x ^= 1
self.assertEqual(x, 4)
x = Proxy(lambda: 4)
x **= 4
self.assertEqual(x, 256)
x = Proxy(lambda: 256)
x //= 2
self.assertEqual(x, 128)
x = Proxy(lambda: 128)
x |= 2
self.assertEqual(x, 130)
x = Proxy(lambda: 130)
x &= 10
self.assertEqual(x, 2)
x = Proxy(lambda: 10)
self.assertEqual(type(x.__float__()), float)
self.assertEqual(type(x.__int__()), int)
self.assertEqual(type(x.__long__()), long)
self.assertTrue(hex(x))
self.assertTrue(oct(x))
def test_hash(self):
class X(object):
def __hash__(self):
return 1234
self.assertEqual(hash(Proxy(lambda: X())), 1234)
def test_call(self):
class X(object):
def __call__(self):
return 1234
self.assertEqual(Proxy(lambda: X())(), 1234)
def test_context(self):
class X(object):
entered = exited = False
def __enter__(self):
self.entered = True
return 1234
def __exit__(self, *exc_info):
self.exited = True
v = X()
x = Proxy(lambda: v)
with x as val:
self.assertEqual(val, 1234)
self.assertTrue(x.entered)
self.assertTrue(x.exited)
def test_reduce(self):
class X(object):
def __reduce__(self):
return 123
x = Proxy(lambda: X())
self.assertEqual(x.__reduce__(), 123)
class test_PromiseProxy(Case):
def test_only_evaluated_once(self):
class X(object):
attr = 123
evals = 0
def __init__(self):
self.__class__.evals += 1
p = PromiseProxy(X)
self.assertEqual(p.attr, 123)
self.assertEqual(p.attr, 123)
self.assertEqual(X.evals, 1)
def test_maybe_evaluate(self):
x = PromiseProxy(lambda: 30)
self.assertEqual(maybe_evaluate(x), 30)
self.assertEqual(maybe_evaluate(x), 30)
self.assertEqual(maybe_evaluate(30), 30)
| bsd-3-clause |
wkhtmltopdf/qtwebkit | Source/WebCore/inspector/generate_protocol_externs.py | 117 | 9058 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
type_traits = {
"any": "*",
"string": "string",
"integer": "number",
"number": "number",
"boolean": "boolean",
"array": "Array.<*>",
"object": "Object",
}
ref_types = {}
def full_qualified_type_id(domain_name, type_id):
if type_id.find(".") == -1:
return "%s.%s" % (domain_name, type_id)
return type_id
def fix_camel_case(name):
refined = re.sub(r'-(\w)', lambda pat: pat.group(1).upper(), name)
refined = to_title_case(refined)
return re.sub(r'(?i)HTML|XML|WML|API', lambda pat: pat.group(0).upper(), refined)
def to_title_case(name):
return name[:1].upper() + name[1:]
def generate_enum(name, json):
enum_members = []
for member in json["enum"]:
enum_members.append(" %s: \"%s\"" % (fix_camel_case(member), member))
return "\n/** @enum {string} */\n%s = {\n%s\n};\n" % (name, (",\n".join(enum_members)))
def param_type(domain_name, param):
if "type" in param:
if param["type"] == "array":
items = param["items"]
return "Array.<%s>" % param_type(domain_name, items)
else:
return type_traits[param["type"]]
if "$ref" in param:
type_id = full_qualified_type_id(domain_name, param["$ref"])
if type_id in ref_types:
return ref_types[type_id]
else:
print "Type not found: " + type_id
return "!! Type not found: " + type_id
def generate_protocol_externs(output_path, input_path):
input_file = open(input_path, "r")
json_string = input_file.read()
json_string = json_string.replace(": true", ": True")
json_string = json_string.replace(": false", ": False")
json_api = eval(json_string)["domains"]
output_file = open(output_path, "w")
output_file.write(
"""
var Protocol = {};
/** @typedef {string}*/
Protocol.Error;
""")
for domain in json_api:
domain_name = domain["domain"]
if "types" in domain:
for type in domain["types"]:
type_id = full_qualified_type_id(domain_name, type["id"])
ref_types[type_id] = "%sAgent.%s" % (domain_name, type["id"])
for domain in json_api:
domain_name = domain["domain"]
output_file.write("\n\n\nvar %sAgent = {};\n" % domain_name)
if "types" in domain:
for type in domain["types"]:
if type["type"] == "object":
typedef_args = []
if "properties" in type:
for property in type["properties"]:
suffix = ""
if ("optional" in property):
suffix = "|undefined"
if "enum" in property:
enum_name = "%sAgent.%s%s" % (domain_name, type["id"], to_title_case(property["name"]))
output_file.write(generate_enum(enum_name, property))
typedef_args.append("%s:(%s%s)" % (property["name"], enum_name, suffix))
else:
typedef_args.append("%s:(%s%s)" % (property["name"], param_type(domain_name, property), suffix))
if (typedef_args):
output_file.write("\n/** @typedef {{%s}|null} */\n%sAgent.%s;\n" % (", ".join(typedef_args), domain_name, type["id"]))
else:
output_file.write("\n/** @typedef {Object} */\n%sAgent.%s;\n" % (domain_name, type["id"]))
elif type["type"] == "string" and "enum" in type:
output_file.write(generate_enum("%sAgent.%s" % (domain_name, type["id"]), type))
elif type["type"] == "array":
suffix = ""
if ("optional" in property):
suffix = "|undefined"
output_file.write("\n/** @typedef {Array.<%s>%s} */\n%sAgent.%s;\n" % (param_type(domain_name, type["items"]), suffix, domain_name, type["id"]))
else:
output_file.write("\n/** @typedef {%s} */\n%sAgent.%s;\n" % (type_traits[type["type"]], domain_name, type["id"]))
if "commands" in domain:
for command in domain["commands"]:
output_file.write("\n/**\n")
params = []
if ("parameters" in command):
for in_param in command["parameters"]:
if ("optional" in in_param):
params.append("opt_%s" % in_param["name"])
output_file.write(" * @param {%s=} opt_%s\n" % (param_type(domain_name, in_param), in_param["name"]))
else:
params.append(in_param["name"])
output_file.write(" * @param {%s} %s\n" % (param_type(domain_name, in_param), in_param["name"]))
returns = ["?Protocol.Error"]
if ("returns" in command):
for out_param in command["returns"]:
if ("optional" in out_param):
returns.append("%s=" % param_type(domain_name, out_param))
else:
returns.append("%s" % param_type(domain_name, out_param))
output_file.write(" * @param {function(%s):void=} opt_callback\n" % ", ".join(returns))
output_file.write(" */\n")
params.append("opt_callback")
output_file.write("%sAgent.%s = function(%s) {}\n" % (domain_name, command["name"], ", ".join(params)))
output_file.write("/** @param {function(%s):void=} opt_callback */\n" % ", ".join(returns))
output_file.write("%sAgent.%s.invoke = function(obj, opt_callback) {}\n" % (domain_name, command["name"]))
output_file.write("/** @interface */\n")
output_file.write("%sAgent.Dispatcher = function() {};\n" % domain_name)
if "events" in domain:
for event in domain["events"]:
params = []
if ("parameters" in event):
output_file.write("/**\n")
for param in event["parameters"]:
if ("optional" in param):
params.append("opt_%s" % param["name"])
output_file.write(" * @param {%s=} opt_%s\n" % (param_type(domain_name, param), param["name"]))
else:
params.append(param["name"])
output_file.write(" * @param {%s} %s\n" % (param_type(domain_name, param), param["name"]))
output_file.write(" */\n")
output_file.write("%sAgent.Dispatcher.prototype.%s = function(%s) {};\n" % (domain_name, event["name"], ", ".join(params)))
output_file.write("/**\n * @param {%sAgent.Dispatcher} dispatcher\n */\n" % domain_name)
output_file.write("InspectorBackend.register%sDispatcher = function(dispatcher) {}\n" % domain_name)
output_file.close()
if __name__ == "__main__":
import sys
import os.path
program_name = os.path.basename(__file__)
if len(sys.argv) < 4 or sys.argv[1] != "-o":
sys.stderr.write("Usage: %s -o OUTPUT_FILE INPUT_FILE\n" % program_name)
exit(1)
output_path = sys.argv[2]
input_path = sys.argv[3]
generate_protocol_externs(output_path, input_path)
| gpl-2.0 |
sloanyang/aquantic | Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory_unittest.py | 124 | 3269 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.cleanworkingdirectory import CleanWorkingDirectory
from webkitpy.common.system.executive import ScriptError
class CleanWorkingDirectoryTest(unittest.TestCase):
def test_run_working_directory_changes_no_force(self):
tool = MockTool()
tool._scm = Mock()
step = CleanWorkingDirectory(tool, MockOptions(clean=True, force_clean=False))
tool._scm.has_working_directory_changes = lambda: True
self.assertRaises(ScriptError, step.run, {})
self.assertEqual(tool._scm.discard_working_directory_changes.call_count, 0)
def test_run_working_directory_changes_force(self):
tool = MockTool()
tool._scm = Mock()
step = CleanWorkingDirectory(tool, MockOptions(clean=True, force_clean=True))
tool._scm.has_working_directory_changes = lambda: True
step.run({})
self.assertEqual(tool._scm.discard_working_directory_changes.call_count, 1)
def test_run_no_local_changes(self):
tool = MockTool()
tool._scm = Mock()
step = CleanWorkingDirectory(tool, MockOptions(clean=True, force_clean=False))
tool._scm.has_working_directory_changes = lambda: False
tool._scm.has_local_commits = lambda: False
step.run({})
self.assertEqual(tool._scm.discard_working_directory_changes.call_count, 1)
def test_no_clean(self):
tool = MockTool()
tool._scm = Mock()
step = CleanWorkingDirectory(tool, MockOptions(clean=False))
step.run({})
self.assertEqual(tool._scm.discard_working_directory_changes.call_count, 0)
| gpl-2.0 |
ResearchComputing/oide-slurm-assist | sandstone_slurm/config_utils.py | 2 | 1487 | import copy
import json
from jsonschema import Draft4Validator
from sandstone import settings
class ConfigLoader:
@staticmethod
def getFormConfig():
base_schema = settings.BASE_CONFIG
Draft4Validator.check_schema(base_schema)
form_config = settings.FORM_CONFIG
full_config = {}
feat = form_config.get('features',[])
gres = form_config.get('gres',[])
# profiles must be defined
profiles = form_config.get('profiles')
profiles.update({
'custom': {
'initial': [],
'schema': {},
}
})
full_config['features'] = feat
full_config['gres'] = gres
full_config['profiles'] = {}
for k,v in profiles.iteritems():
full_config['profiles'][k] = {}
temp = copy.deepcopy(base_schema)
if 'initial' in v:
full_config['profiles'][k]['initial'] = v['initial']
if 'schema' in v:
try:
temp['required'] = v['schema']['required']
except KeyError:
pass
try:
for pk,pv in v['schema']['properties'].iteritems():
temp['properties'][pk].update(pv)
except KeyError:
pass
Draft4Validator.check_schema(temp)
full_config['profiles'][k]['schema'] = temp
return full_config
| mit |
michal-stuglik/django-mips | mips/example/examples2.py | 2 | 5789 | __author__ = 'AniaF'
import os
import django
# set the DJANGO_SETTINGS_MODULE environment variable to "mips.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mips.settings")
# setup
django.setup()
# import proper classes to play with
# from mips.models import Mip, Subspecies, SampleSubspecies, Samples, Paralog, Instance
from mips.models import Mip, SampleSubspecies, Samples, Paralog, Instance
# example data
reference_name="c132510_g1_i1"
reference_list = ["c132510_g1_i1","c150407_g1_i1","c150408_g1_i1","c107721_g1_i1","c129997_g1_i1", "c129907_g1_i1","c130723_g1_i1"]
mip_list = ["177","178","179","180"]
sample_list = ["1737","1738","1739","1740","1741","1742","1743"]
# How many mips are in a particular reference transcript?
#print Mip.objects.filter(reference_id=reference_name).count()
# For a reference select and print all mips and their sequences
def select_mips_for_reference(reference):
for mip in Mip.objects.filter(reference_id=reference):
print mip, mip.mip_sequence
#select_mips_for_reference(reference_name)
# For a list of references, check if they have a mip, how many, and how many mapping mips there are
def check_mips_for_reference_list(lista):
for ref in lista:
if Mip.objects.filter(reference_id=ref).exists():
mapping_mips = [mip.mip_func_mapping for mip in Mip.objects.filter(reference_id=ref)].count(True)
print ref, Mip.objects.filter(reference_id=ref).count(), mapping_mips
else:
print ref, 'no mip'
#check_mips_for_reference_list(reference_list)
# For a list of references, print mips they have (mip_id), and their function
def mips_for_reference_list(lista):
for ref in lista:
for mip in Mip.objects.filter(reference_id=ref):
print ref, mip.mip_id, ['immuno' if mip.mip_func_immuno else '-'][0], ['mapping' if mip.mip_func_mapping else '-'][0], \
['random' if mip.mip_func_random else '-'][0], ['utr' if mip.mip_func_utr else '-'][0]
#mips_for_reference_list(reference_list)
# For a list of references check if there are mips, for which individuals they were sequenced and their performance
def samples_for_mips(lista):
for ref in lista:
for mip in Mip.objects.filter(reference_id=ref):
for sample in Samples.objects.filter(mip_fk=mip):
print ref, mip, sample.sample_fk, sample.mip_performance
#samples_for_mips(reference_list)
# For a list of references, print all mips and count in how many samples they were sequenced
def count_samples_for_mips(lista):
for ref in lista:
for mip in Mip.objects.filter(reference_id=ref):
samp = Samples.objects.filter(mip_fk=mip).count()
print ref, mip, samp
#count_samples_for_mips(reference_list)
# For a list of mips check in which subspecies they are paralogs
def mips_with_paralogs(lista):
for mip in lista:
for subsp in Paralog.objects.filter(mip_fk=mip):
print mip, subsp.subspecies_fk, subsp.mip_subspecies_paralog
#mips_with_paralogs(mip_list)
# Print all mips that are paralogs and in which subspecies
def all_mips_paralogs():
for para in Paralog.objects.filter(mip_subspecies_paralog="paralog"):
print para.mip_fk, para.subspecies_fk, para.mip_subspecies_paralog
#all_mips_paralogs()
# Find all mips for montandoni
def all_mips_for_species():
for montandoni_samples in SampleSubspecies.objects.filter(subspecies_fk="montandoni"):
for mip in Samples.objects.filter(sample_fk=montandoni_samples.sample_id):
print mip.mip_fk, mip.sample_fk
#all_mips_for_species()
# For a list of samples find all mips and their reference
def mips_for_samples(lista):
for indiv in lista:
for samps in Samples.objects.filter(sample_fk=indiv):
print samps.sample_fk, samps.mip_fk, samps.mip_fk.reference_id
#mips_for_samples(sample_list)
# For all mips in montandoni, print only mapping mips
def func_mips_for_montandoni():
for montandoni_samples in SampleSubspecies.objects.filter(subspecies_fk="montandoni"):
for Sample_mip in Samples.objects.filter(sample_fk=montandoni_samples.sample_id):
if Sample_mip.mip_fk.mip_func_mapping is True:
print Sample_mip.mip_fk.mip_id
#func_mips_for_montandoni()
# Select one random mapping mip for a list of references
import random
def random_mip_for_reference(lista):
for ref in lista:
if Mip.objects.filter(reference_id=ref, mip_func_mapping=True).exists():
ref_mips = list(Mip.objects.filter(reference_id=ref, mip_func_mapping=True))
one_mip = random.sample(ref_mips, 1)[0]
print one_mip.reference_id, one_mip.mip_id
#random_mip_for_reference(reference_list)
# Read in one column (txt file) and convert it into a list
def read_column(txt_file):
fh = open(txt_file,'r')
linie = fh.readlines()
K = [ele.split()[0] for ele in linie]
print K
#read_column("example_list.txt")
# For a list of mips print producer and production date
def instance_for_mip(lista):
for mip in lista:
for mip_inst in Instance.objects.filter(mip_fk=mip):
print mip_inst.mip_fk, mip_inst.mip_production_data, mip_inst.mip_producer
#instance_for_mip(mip_list)
# For a list of mips print producer and production date and write it to a file
wh = open('output.tab','w')
def instance_for_mip_and_write(lista):
for mip in lista:
for mip_inst in Instance.objects.filter(mip_fk=mip):
output = str(mip_inst.mip_fk) +'\t'+ str(mip_inst.mip_production_data) +'\t'+ str(mip_inst.mip_producer)+'\n'
wh.write(output)
instance_for_mip_and_write(mip_list)
wh.flush()
wh.close()
| mit |
thorkd1t/lurkbot | query.py | 1 | 1330 | import sqlite3
import time
import joinlist
# the tables look like this:
# db name 'imaqtpie.db' (each channel in joinlist has its own .db)
# Table name chat (each db has one table named 'chat')
#___________________________________________________________________
# usr | mesg | id | flags | channel | date/time |
#===================================================================
# bob | hi | 1 | @badges= | imaqtpie |2017-05-01 12:00:00 |
#-------------------------------------------------------------------
# jim | Kappa | 2 | @badges= | imaqtpie |2017-05-01 12:00:01 |
#-------------------------------------------------------------------
target = "imaqtpie" #channel name
conn = sqlite3.connect(target + '.db')
c = conn.cursor()
#====================================================================
#example queries:
#------------------
#c.execute('select usr from chat')
#c.execute('select * from chat')
#c.execute('select * from chat where usr == ""')
#c.execute('select mesg from chat where usr == "moobot" order by id desc limit 5')
c.execute('select * from chat order by id desc limit 50')
bla = c.fetchall()
if bla != None:
for i in bla:
print i[0] + ": " + i[1]
else:
print "nothing to print"
conn.close()
time.sleep(100)
| mit |
punalpatel/st2 | contrib/runners/action_chain_runner/tests/unit/test_actionchain.py | 4 | 40041 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import action_chain_runner as acr
from st2actions.container.service import RunnerContainerService
from st2common.constants.action import LIVEACTION_STATUS_RUNNING
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
from st2common.constants.action import LIVEACTION_STATUS_CANCELED
from st2common.constants.action import LIVEACTION_STATUS_TIMED_OUT
from st2common.constants.action import LIVEACTION_STATUS_FAILED
from st2common.exceptions import actionrunner as runnerexceptions
from st2common.models.api.notification import NotificationsHelper
from st2common.models.db.liveaction import LiveActionDB
from st2common.models.db.keyvalue import KeyValuePairDB
from st2common.models.system.common import ResourceReference
from st2common.persistence.keyvalue import KeyValuePair
from st2common.persistence.runner import RunnerType
from st2common.services import action as action_service
from st2common.util import action_db as action_db_util
from st2common.exceptions.action import ParameterRenderingFailedException
from st2tests import DbTestCase
from st2tests.fixturesloader import FixturesLoader
class DummyActionExecution(object):
def __init__(self, status=LIVEACTION_STATUS_SUCCEEDED, result=''):
self.id = None
self.status = status
self.result = result
FIXTURES_PACK = 'generic'
TEST_MODELS = {
'actions': ['a1.yaml', 'a2.yaml', 'action_4_action_context_param.yaml'],
'runners': ['testrunner1.yaml']
}
MODELS = FixturesLoader().load_models(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
ACTION_1 = MODELS['actions']['a1.yaml']
ACTION_2 = MODELS['actions']['a2.yaml']
ACTION_3 = MODELS['actions']['action_4_action_context_param.yaml']
RUNNER = MODELS['runners']['testrunner1.yaml']
CHAIN_1_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain1.yaml')
CHAIN_2_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain2.yaml')
CHAIN_ACTION_CALL_NO_PARAMS_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_action_call_no_params.yaml')
CHAIN_NO_DEFAULT = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'no_default_chain.yaml')
CHAIN_NO_DEFAULT_2 = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'no_default_chain_2.yaml')
CHAIN_BAD_DEFAULT = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'bad_default_chain.yaml')
CHAIN_BROKEN_ON_SUCCESS_PATH_STATIC_TASK_NAME = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_broken_on_success_path_static_task_name.yaml')
CHAIN_BROKEN_ON_FAILURE_PATH_STATIC_TASK_NAME = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_broken_on_failure_path_static_task_name.yaml')
CHAIN_FIRST_TASK_RENDER_FAIL_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_first_task_parameter_render_fail.yaml')
CHAIN_SECOND_TASK_RENDER_FAIL_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_second_task_parameter_render_fail.yaml')
CHAIN_LIST_TEMP_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_list_template.yaml')
CHAIN_DICT_TEMP_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_dict_template.yaml')
CHAIN_DEP_INPUT = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_dependent_input.yaml')
CHAIN_DEP_RESULTS_INPUT = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_dep_result_input.yaml')
MALFORMED_CHAIN_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'malformedchain.yaml')
CHAIN_TYPED_PARAMS = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_typed_params.yaml')
CHAIN_SYSTEM_PARAMS = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_typed_system_params.yaml')
CHAIN_WITH_ACTIONPARAM_VARS = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_with_actionparam_vars.yaml')
CHAIN_WITH_SYSTEM_VARS = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_with_system_vars.yaml')
CHAIN_WITH_PUBLISH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_with_publish.yaml')
CHAIN_WITH_PUBLISH_PARAM_RENDERING_FAILURE = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_publish_params_rendering_failure.yaml')
CHAIN_WITH_INVALID_ACTION = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_with_invalid_action.yaml')
CHAIN_ACTION_PARAMS_AND_PARAMETERS_ATTRIBUTE = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_action_params_and_parameters.yaml')
CHAIN_ACTION_PARAMS_ATTRIBUTE = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_action_params_attribute.yaml')
CHAIN_ACTION_PARAMETERS_ATTRIBUTE = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_action_parameters_attribute.yaml')
CHAIN_ACTION_INVALID_PARAMETER_TYPE = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_invalid_parameter_type_passed_to_action.yaml')
CHAIN_NOTIFY_API = {'notify': {'on-complete': {'message': 'foo happened.'}}}
CHAIN_NOTIFY_DB = NotificationsHelper.to_model(CHAIN_NOTIFY_API)
@mock.patch.object(action_db_util, 'get_runnertype_by_name',
mock.MagicMock(return_value=RUNNER))
class TestActionChainRunner(DbTestCase):
def test_runner_creation(self):
runner = acr.get_runner()
self.assertTrue(runner)
self.assertTrue(runner.runner_id)
def test_malformed_chain(self):
try:
chain_runner = acr.get_runner()
chain_runner.entry_point = MALFORMED_CHAIN_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
self.assertTrue(False, 'Expected pre_run to fail.')
except runnerexceptions.ActionRunnerPreRunError:
self.assertTrue(True)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_success_path(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_1_PATH
chain_runner.action = ACTION_1
action_ref = ResourceReference.to_string_reference(name=ACTION_1.name,
pack=ACTION_1.pack)
chain_runner.liveaction = LiveActionDB(action=action_ref)
chain_runner.liveaction.notify = CHAIN_NOTIFY_DB
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# based on the chain the callcount is known to be 3. Not great but works.
self.assertEqual(request.call_count, 3)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_chain_second_task_times_out(self, request):
# Second task in the chain times out so the action chain status should be timeout
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_2_PATH
chain_runner.action = ACTION_1
original_run_action = chain_runner._run_action
def mock_run_action(*args, **kwargs):
original_live_action = args[0]
liveaction = original_run_action(*args, **kwargs)
if original_live_action.action == 'wolfpack.a2':
# Mock a timeout for second task
liveaction.status = LIVEACTION_STATUS_TIMED_OUT
return liveaction
chain_runner._run_action = mock_run_action
action_ref = ResourceReference.to_string_reference(name=ACTION_1.name,
pack=ACTION_1.pack)
chain_runner.liveaction = LiveActionDB(action=action_ref)
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
status, _, _ = chain_runner.run({})
self.assertEqual(status, LIVEACTION_STATUS_TIMED_OUT)
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# based on the chain the callcount is known to be 3. Not great but works.
self.assertEqual(request.call_count, 3)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_task_is_canceled_while_running(self, request):
# Second task in the action is CANCELED, make sure runner doesn't get stuck in an infinite
# loop
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_2_PATH
chain_runner.action = ACTION_1
original_run_action = chain_runner._run_action
def mock_run_action(*args, **kwargs):
original_live_action = args[0]
if original_live_action.action == 'wolfpack.a2':
status = LIVEACTION_STATUS_CANCELED
else:
status = LIVEACTION_STATUS_SUCCEEDED
request.return_value = (DummyActionExecution(status=status), None)
liveaction = original_run_action(*args, **kwargs)
return liveaction
chain_runner._run_action = mock_run_action
action_ref = ResourceReference.to_string_reference(name=ACTION_1.name,
pack=ACTION_1.pack)
chain_runner.liveaction = LiveActionDB(action=action_ref)
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
status, _, _ = chain_runner.run({})
self.assertEqual(status, LIVEACTION_STATUS_CANCELED)
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# Chain count should be 2 since the last task doesn't get called since the second one was
# canceled
self.assertEqual(request.call_count, 2)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_success_task_action_call_with_no_params(self, request):
# Make sure that the runner doesn't explode if task definition contains
# no "params" section
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_ACTION_CALL_NO_PARAMS_PATH
chain_runner.action = ACTION_1
action_ref = ResourceReference.to_string_reference(name=ACTION_1.name,
pack=ACTION_1.pack)
chain_runner.liveaction = LiveActionDB(action=action_ref)
chain_runner.liveaction.notify = CHAIN_NOTIFY_DB
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# based on the chain the callcount is known to be 3. Not great but works.
self.assertEqual(request.call_count, 3)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_no_default(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_NO_DEFAULT
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# In case of this chain default_node is the first_node.
default_node = chain_runner.chain_holder.actionchain.default
first_node = chain_runner.chain_holder.actionchain.chain[0]
self.assertEqual(default_node, first_node.name)
# based on the chain the callcount is known to be 3. Not great but works.
self.assertEqual(request.call_count, 3)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_no_default_multiple_options(self, request):
# subtle difference is that when there are multiple possible default nodes
# the order per chain definition may not be preseved. This is really a
# poorly formatted chain but we still the best attempt to work.
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_NO_DEFAULT_2
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# In case of this chain default_node is the first_node.
default_node = chain_runner.chain_holder.actionchain.default
first_node = chain_runner.chain_holder.actionchain.chain[0]
self.assertEqual(default_node, first_node.name)
# based on the chain the callcount is known to be 2.
self.assertEqual(request.call_count, 2)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_bad_default(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_BAD_DEFAULT
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
expected_msg = 'Unable to find node with name "bad_default" referenced in "default".'
self.assertRaisesRegexp(runnerexceptions.ActionRunnerPreRunError,
expected_msg, chain_runner.pre_run)
@mock.patch('eventlet.sleep', mock.MagicMock())
@mock.patch.object(action_db_util, 'get_liveaction_by_id', mock.MagicMock(
return_value=DummyActionExecution()))
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(status=LIVEACTION_STATUS_RUNNING), None))
def test_chain_runner_success_path_with_wait(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_1_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# based on the chain the callcount is known to be 3. Not great but works.
self.assertEqual(request.call_count, 3)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(status=LIVEACTION_STATUS_FAILED), None))
def test_chain_runner_failure_path(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_1_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
status, _, _ = chain_runner.run({})
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# based on the chain the callcount is known to be 2. Not great but works.
self.assertEqual(request.call_count, 2)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(), None))
def test_chain_runner_broken_on_success_path_static_task_name(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_BROKEN_ON_SUCCESS_PATH_STATIC_TASK_NAME
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
expected_msg = ('Unable to find node with name "c5" referenced in "on-success" '
'in task "c2"')
self.assertRaisesRegexp(runnerexceptions.ActionRunnerPreRunError,
expected_msg, chain_runner.pre_run)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(), None))
def test_chain_runner_broken_on_failure_path_static_task_name(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_BROKEN_ON_FAILURE_PATH_STATIC_TASK_NAME
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
expected_msg = ('Unable to find node with name "c6" referenced in "on-failure" '
'in task "c2"')
self.assertRaisesRegexp(runnerexceptions.ActionRunnerPreRunError,
expected_msg, chain_runner.pre_run)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', side_effect=RuntimeError('Test Failure.'))
def test_chain_runner_action_exception(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_1_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
status, results, _ = chain_runner.run({})
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# based on the chain the callcount is known to be 2. Not great but works.
self.assertEqual(request.call_count, 2)
error_count = 0
for task_result in results['tasks']:
if task_result['result'].get('error', None):
error_count += 1
self.assertEqual(error_count, 2)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_str_param_temp(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_FIRST_TASK_RENDER_FAIL_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({'s1': 1, 's2': 2, 's3': 3, 's4': 4})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, {"p1": "1"})
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_list_param_temp(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_LIST_TEMP_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({'s1': 1, 's2': 2, 's3': 3, 's4': 4})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, {"p1": "[2, 3, 4]"})
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_dict_param_temp(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_DICT_TEMP_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({'s1': 1, 's2': 2, 's3': 3, 's4': 4})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_value = {"p1": {"p1.3": "[3, 4]", "p1.2": "2", "p1.1": "1"}}
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, expected_value)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(result={'o1': '1'}), None))
def test_chain_runner_dependent_param_temp(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_DEP_INPUT
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({'s1': 1, 's2': 2, 's3': 3, 's4': 4})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_values = [{u'p1': u'1'},
{u'p1': u'1'},
{u'p2': u'1', u'p3': u'1', u'p1': u'1'}]
# Each of the call_args must be one of
for call_args in request.call_args_list:
self.assertTrue(call_args[0][0].parameters in expected_values)
expected_values.remove(call_args[0][0].parameters)
self.assertEqual(len(expected_values), 0, 'Not all expected values received.')
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(result={'o1': '1'}), None))
def test_chain_runner_dependent_results_param(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_DEP_RESULTS_INPUT
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({'s1': 1})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_values = [{u'p1': u'1'},
{u'p1': u'1'},
{u'out': u"{'c2': {'o1': '1'}, 'c1': {'o1': '1'}}"}]
# Each of the call_args must be one of
self.assertEqual(request.call_count, 3)
for call_args in request.call_args_list:
self.assertTrue(call_args[0][0].parameters in expected_values)
expected_values.remove(call_args[0][0].parameters)
self.assertEqual(len(expected_values), 0, 'Not all expected values received.')
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(RunnerType, 'get_by_name',
mock.MagicMock(return_value=RUNNER))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_missing_param_temp(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_FIRST_TASK_RENDER_FAIL_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertEqual(request.call_count, 0, 'No call expected.')
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_failure_during_param_rendering_single_task(self, request):
# Parameter rendering should result in a top level error which aborts
# the whole chain
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_FIRST_TASK_RENDER_FAIL_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
status, result, _ = chain_runner.run({})
# No tasks ran because rendering of parameters for the first task failed
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
self.assertEqual(result['tasks'], [])
self.assertTrue('error' in result)
self.assertTrue('traceback' in result)
self.assertTrue('Failed to run task "c1". Parameter rendering failed' in result['error'])
self.assertTrue('Traceback' in result['traceback'])
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_failure_during_param_rendering_multiple_tasks(self, request):
# Parameter rendering should result in a top level error which aborts
# the whole chain
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_SECOND_TASK_RENDER_FAIL_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
status, result, _ = chain_runner.run({})
# Verify that only first task has ran
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
self.assertEqual(len(result['tasks']), 1)
self.assertEqual(result['tasks'][0]['name'], 'c1')
expected_error = ('Failed rendering value for action parameter "p1" in '
'task "c2" (template string={{s1}}):')
self.assertTrue('error' in result)
self.assertTrue('traceback' in result)
self.assertTrue('Failed to run task "c2". Parameter rendering failed' in result['error'])
self.assertTrue(expected_error in result['error'])
self.assertTrue('Traceback' in result['traceback'])
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_2))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_typed_params(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_TYPED_PARAMS
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({'s1': 1, 's2': 'two', 's3': 3.14})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_value = {'booltype': True,
'inttype': 1,
'numbertype': 3.14,
'strtype': 'two',
'arrtype': ['1', 'two'],
'objtype': {'s2': 'two',
'k1': '1'}}
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, expected_value)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_2))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_typed_system_params(self, request):
kvps = []
try:
kvps.append(KeyValuePair.add_or_update(KeyValuePairDB(name='a', value='1')))
kvps.append(KeyValuePair.add_or_update(KeyValuePairDB(name='a.b.c', value='two')))
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_SYSTEM_PARAMS
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_value = {'inttype': 1,
'strtype': 'two'}
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, expected_value)
finally:
for kvp in kvps:
KeyValuePair.delete(kvp)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_2))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_vars_system_params(self, request):
kvps = []
try:
kvps.append(KeyValuePair.add_or_update(KeyValuePairDB(name='a', value='two')))
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_WITH_SYSTEM_VARS
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_value = {'inttype': 1,
'strtype': 'two',
'strtype_legacy': 'two',
'booltype': True}
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, expected_value)
finally:
for kvp in kvps:
KeyValuePair.delete(kvp)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_2))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_vars_action_params(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_WITH_ACTIONPARAM_VARS
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({'input_a': 'two'})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_value = {'inttype': 1,
'strtype': 'two',
'booltype': True}
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, expected_value)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_2))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(result={'raw_out': 'published'}), None))
def test_chain_runner_publish(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_WITH_PUBLISH
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.runner_parameters = {'display_published': True}
chain_runner.pre_run()
action_parameters = {'action_param_1': 'test value 1'}
_, result, _ = chain_runner.run(action_parameters=action_parameters)
# We also assert that the action parameters are available in the
# "publish" scope
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_value = {'inttype': 1,
'strtype': 'published',
'booltype': True,
'published_action_param': action_parameters['action_param_1']}
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, expected_value)
# Assert that the variables are correctly published
self.assertEqual(result['published'],
{'published_action_param': u'test value 1', 'o1': u'published'})
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_publish_param_rendering_failure(self, request):
# Parameter rendering should result in a top level error which aborts
# the whole chain
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_WITH_PUBLISH_PARAM_RENDERING_FAILURE
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
try:
chain_runner.run({})
except ParameterRenderingFailedException as e:
# TODO: Should we treat this as task error? Right now it bubbles all
# the way up and it's not really consistent with action param
# rendering failure
expected_error = ('Failed rendering value for publish parameter "p1" in '
'task "c2" (template string={{ not_defined }}):')
self.assertTrue(expected_error in str(e))
pass
else:
self.fail('Exception was not thrown')
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_2))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_task_passes_invalid_parameter_type_to_action(self, mock_request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_ACTION_INVALID_PARAMETER_TYPE
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
action_parameters = {}
expected_msg = ('Failed to cast value "stringnotanarray" \(type: str\) for parameter '
'"arrtype" of type "array"')
self.assertRaisesRegexp(ValueError, expected_msg, chain_runner.run,
action_parameters=action_parameters)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=None))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(result={'raw_out': 'published'}), None))
def test_action_chain_runner_referenced_action_doesnt_exist(self, mock_request):
# Action referenced by a task doesn't exist, should result in a top level error
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_WITH_INVALID_ACTION
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
action_parameters = {}
status, output, _ = chain_runner.run(action_parameters=action_parameters)
expected_error = ('Failed to run task "c1". Action with reference "wolfpack.a2" '
'doesn\'t exist.')
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
self.assertTrue(expected_error in output['error'])
self.assertTrue('Traceback' in output['traceback'], output['traceback'])
def test_exception_is_thrown_if_both_params_and_parameters_attributes_are_provided(self):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_ACTION_PARAMS_AND_PARAMETERS_ATTRIBUTE
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
expected_msg = ('Either "params" or "parameters" attribute needs to be provided, but '
'not both')
self.assertRaisesRegexp(runnerexceptions.ActionRunnerPreRunError, expected_msg,
chain_runner.pre_run)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_params_and_parameters_attributes_both_work(self, _):
# "params" attribute used
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_ACTION_PARAMS_ATTRIBUTE
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
original_build_liveaction_object = chain_runner._build_liveaction_object
def mock_build_liveaction_object(action_node, resolved_params, parent_context):
# Verify parameters are correctly passed to the action
self.assertEqual(resolved_params, {'pparams': 'v1'})
original_build_liveaction_object(action_node=action_node,
resolved_params=resolved_params,
parent_context=parent_context)
chain_runner._build_liveaction_object = mock_build_liveaction_object
action_parameters = {}
status, output, _ = chain_runner.run(action_parameters=action_parameters)
self.assertEqual(status, LIVEACTION_STATUS_SUCCEEDED)
# "parameters" attribute used
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_ACTION_PARAMETERS_ATTRIBUTE
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
def mock_build_liveaction_object(action_node, resolved_params, parent_context):
# Verify parameters are correctly passed to the action
self.assertEqual(resolved_params, {'pparameters': 'v1'})
original_build_liveaction_object(action_node=action_node,
resolved_params=resolved_params,
parent_context=parent_context)
chain_runner._build_liveaction_object = mock_build_liveaction_object
action_parameters = {}
status, output, _ = chain_runner.run(action_parameters=action_parameters)
self.assertEqual(status, LIVEACTION_STATUS_SUCCEEDED)
@classmethod
def tearDownClass(cls):
FixturesLoader().delete_models_from_db(MODELS)
| apache-2.0 |
NicoVarg99/daf-recipes | ckan/ckan/ckan/ckan/tests/helpers.py | 1 | 16313 | # encoding: utf-8
'''This is a collection of helper functions for use in tests.
We want to avoid sharing test helper functions between test modules as
much as possible, and we definitely don't want to share test fixtures between
test modules, or to introduce a complex hierarchy of test class subclasses,
etc.
We want to reduce the amount of "travel" that a reader needs to undertake to
understand a test method -- reducing the number of other files they need to go
and read to understand what the test code does. And we want to avoid tightly
coupling test modules to each other by having them share code.
But some test helper functions just increase the readability of tests so much
and make writing tests so much easier, that it's worth having them despite the
potential drawbacks.
This module is reserved for these very useful functions.
'''
import webtest
import nose.tools
from nose.tools import assert_in, assert_not_in
import mock
from ckan.common import config
import ckan.lib.search as search
import ckan.config.middleware
import ckan.model as model
import ckan.logic as logic
def reset_db():
'''Reset CKAN's database.
If a test class uses the database, then it should call this function in its
``setup()`` method to make sure that it has a clean database to start with
(nothing left over from other test classes or from previous test runs).
If a test class doesn't use the database (and most test classes shouldn't
need to) then it doesn't need to call this function.
:returns: ``None``
'''
# Close any database connections that have been left open.
# This prevents CKAN from hanging waiting for some unclosed connection.
model.Session.close_all()
model.repo.rebuild_db()
def call_action(action_name, context=None, **kwargs):
'''Call the named ``ckan.logic.action`` function and return the result.
This is just a nicer way for user code to call action functions, nicer than
either calling the action function directly or via
:py:func:`ckan.logic.get_action`.
For example::
user_dict = call_action('user_create', name='seanh',
email='seanh@seanh.com', password='pass')
Any keyword arguments given will be wrapped in a dict and passed to the
action function as its ``data_dict`` argument.
Note: this skips authorization! It passes 'ignore_auth': True to action
functions in their ``context`` dicts, so the corresponding authorization
functions will not be run.
This is because ckan.tests.logic.action tests only the actions, the
authorization functions are tested separately in
ckan.tests.logic.auth.
See the :doc:`testing guidelines </contributing/testing>` for more info.
This function should eventually be moved to
:py:func:`ckan.logic.call_action` and the current
:py:func:`ckan.logic.get_action` function should be
deprecated. The tests may still need their own wrapper function for
:py:func:`ckan.logic.call_action`, e.g. to insert ``'ignore_auth': True``
into the ``context`` dict.
:param action_name: the name of the action function to call, e.g.
``'user_update'``
:type action_name: string
:param context: the context dict to pass to the action function
(optional, if no context is given a default one will be supplied)
:type context: dict
:returns: the dict or other value that the action function returns
'''
if context is None:
context = {}
context.setdefault('user', '127.0.0.1')
context.setdefault('ignore_auth', True)
return logic.get_action(action_name)(context=context, data_dict=kwargs)
def call_auth(auth_name, context, **kwargs):
'''Call the named ``ckan.logic.auth`` function and return the result.
This is just a convenience function for tests in
:py:mod:`ckan.tests.logic.auth` to use.
Usage::
result = helpers.call_auth('user_update', context=context,
id='some_user_id',
name='updated_user_name')
:param auth_name: the name of the auth function to call, e.g.
``'user_update'``
:type auth_name: string
:param context: the context dict to pass to the auth function, must
contain ``'user'`` and ``'model'`` keys,
e.g. ``{'user': 'fred', 'model': my_mock_model_object}``
:type context: dict
:returns: the dict that the auth function returns, e.g.
``{'success': True}`` or ``{'success': False, msg: '...'}``
or just ``{'success': False}``
:rtype: dict
'''
assert 'user' in context, ('Test methods must put a user name in the '
'context dict')
assert 'model' in context, ('Test methods must put a model in the '
'context dict')
return logic.check_access(auth_name, context, data_dict=kwargs)
class CKANTestApp(webtest.TestApp):
'''A wrapper around webtest.TestApp
It adds some convenience methods for CKAN
'''
_flask_app = None
@property
def flask_app(self):
if not self._flask_app:
self._flask_app = self.app.apps['flask_app']._wsgi_app
return self._flask_app
def _get_test_app():
'''Return a webtest.TestApp for CKAN, with legacy templates disabled.
For functional tests that need to request CKAN pages or post to the API.
Unit tests shouldn't need this.
'''
config['ckan.legacy_templates'] = False
app = ckan.config.middleware.make_app(config['global_conf'], **config)
app = CKANTestApp(app)
return app
class FunctionalTestBase(object):
'''A base class for functional test classes to inherit from.
Allows configuration changes by overriding _apply_config_changes and
resetting the CKAN config after your test class has run. It creates a
webtest.TestApp at self.app for your class to use to make HTTP requests
to the CKAN web UI or API.
If you're overriding methods that this class provides, like setup_class()
and teardown_class(), make sure to use super() to call this class's methods
at the top of yours!
'''
@classmethod
def _get_test_app(cls): # leading _ because nose is terrible
# FIXME: remove this method and switch to using helpers.get_test_app
# in each test once the old functional tests are fixed or removed
if not hasattr(cls, '_test_app'):
cls._test_app = _get_test_app()
return cls._test_app
@classmethod
def setup_class(cls):
# Make a copy of the Pylons config, so we can restore it in teardown.
cls._original_config = dict(config)
cls._apply_config_changes(config)
cls._get_test_app()
@classmethod
def _apply_config_changes(cls, cfg):
pass
def setup(self):
'''Reset the database and clear the search indexes.'''
reset_db()
if hasattr(self, '_test_app'):
self._test_app.reset()
search.clear_all()
@classmethod
def teardown_class(cls):
# Restore the Pylons config to its original values, in case any tests
# changed any config settings.
config.clear()
config.update(cls._original_config)
def submit_and_follow(app, form, extra_environ=None, name=None,
value=None, **args):
'''
Call webtest_submit with name/value passed expecting a redirect
and return the response from following that redirect.
'''
response = webtest_submit(form, name, value=value, status=302,
extra_environ=extra_environ, **args)
return app.get(url=response.headers['Location'],
extra_environ=extra_environ)
## FIXME: remove webtest_* functions below when we upgrade webtest
def webtest_submit(form, name=None, index=None, value=None, **args):
'''
backported version of webtest.Form.submit that actually works
for submitting with different submit buttons.
We're stuck on an old version of webtest because we're stuck
on an old version of webob because we're stuck on an old version
of Pylons. This prolongs our suffering, but on the bright side
it lets us have functional tests that work.
'''
fields = webtest_submit_fields(form, name, index=index, submit_value=value)
if form.method.upper() != "GET":
args.setdefault("content_type", form.enctype)
return form.response.goto(form.action, method=form.method,
params=fields, **args)
def webtest_submit_fields(form, name=None, index=None, submit_value=None):
'''
backported version of webtest.Form.submit_fields that actually works
for submitting with different submit buttons.
'''
from webtest.app import File
submit = []
# Use another name here so we can keep function param the same for BWC.
submit_name = name
if index is not None and submit_value is not None:
raise ValueError("Can't specify both submit_value and index.")
# If no particular button was selected, use the first one
if index is None and submit_value is None:
index = 0
# This counts all fields with the submit name not just submit fields.
current_index = 0
for name, field in form.field_order:
if name is None: # pragma: no cover
continue
if submit_name is not None and name == submit_name:
if index is not None and current_index == index:
submit.append((name, field.value_if_submitted()))
if submit_value is not None and \
field.value_if_submitted() == submit_value:
submit.append((name, field.value_if_submitted()))
current_index += 1
else:
value = field.value
if value is None:
continue
if isinstance(field, File):
submit.append((name, field))
continue
if isinstance(value, list):
for item in value:
submit.append((name, item))
else:
submit.append((name, value))
return submit
def webtest_maybe_follow(response, **kw):
"""
Follow all redirects. If this response is not a redirect, do nothing.
Returns another response object.
(backported from WebTest 2.0.1)
"""
remaining_redirects = 100 # infinite loops protection
while 300 <= response.status_int < 400 and remaining_redirects:
response = response.follow(**kw)
remaining_redirects -= 1
assert remaining_redirects > 0, "redirects chain looks infinite"
return response
def change_config(key, value):
'''Decorator to temporarily change CKAN's config to a new value
This allows you to easily create tests that need specific config values to
be set, making sure it'll be reverted to what it was originally, after your
test is run.
Usage::
@helpers.change_config('ckan.site_title', 'My Test CKAN')
def test_ckan_site_title(self):
assert config['ckan.site_title'] == 'My Test CKAN'
:param key: the config key to be changed, e.g. ``'ckan.site_title'``
:type key: string
:param value: the new config key's value, e.g. ``'My Test CKAN'``
:type value: string
'''
def decorator(func):
def wrapper(*args, **kwargs):
_original_config = config.copy()
config[key] = value
try:
return_value = func(*args, **kwargs)
finally:
config.clear()
config.update(_original_config)
return return_value
return nose.tools.make_decorator(func)(wrapper)
return decorator
def mock_auth(auth_function_path):
'''
Decorator to easily mock a CKAN auth method in the context of a test
function
It adds a mock object for the provided auth_function_path as a parameter to
the test function.
Essentially it makes sure that `ckan.authz.clear_auth_functions_cache` is
called before and after to make sure that the auth functions pick up
the newly changed values.
Usage::
@helpers.mock_auth('ckan.logic.auth.create.package_create')
def test_mock_package_create(self, mock_package_create):
from ckan import logic
mock_package_create.return_value = {'success': True}
# package_create is mocked
eq_(logic.check_access('package_create', {}), True)
assert mock_package_create.called
:param action_name: the full path to the auth function to be mocked,
e.g. ``ckan.logic.auth.create.package_create``
:type action_name: string
'''
from ckan.authz import clear_auth_functions_cache
def decorator(func):
def wrapper(*args, **kwargs):
try:
with mock.patch(auth_function_path) as mocked_auth:
clear_auth_functions_cache()
new_args = args + tuple([mocked_auth])
return_value = func(*new_args, **kwargs)
finally:
clear_auth_functions_cache()
return return_value
return nose.tools.make_decorator(func)(wrapper)
return decorator
def mock_action(action_name):
'''
Decorator to easily mock a CKAN action in the context of a test function
It adds a mock object for the provided action as a parameter to the test
function. The mock is discarded at the end of the function, even if there
is an exception raised.
Note that this mocks the action both when it's called directly via
``ckan.logic.get_action`` and via ``ckan.plugins.toolkit.get_action``.
Usage::
@mock_action('user_list')
def test_mock_user_list(self, mock_user_list):
mock_user_list.return_value = 'hi'
# user_list is mocked
eq_(helpers.call_action('user_list', {}), 'hi')
assert mock_user_list.called
:param action_name: the name of the action to be mocked,
e.g. ``package_create``
:type action_name: string
'''
def decorator(func):
def wrapper(*args, **kwargs):
mock_action = mock.MagicMock()
from ckan.logic import get_action as original_get_action
def side_effect(called_action_name):
if called_action_name == action_name:
return mock_action
else:
return original_get_action(called_action_name)
try:
with mock.patch('ckan.logic.get_action') as mock_get_action, \
mock.patch('ckan.plugins.toolkit.get_action') \
as mock_get_action_toolkit:
mock_get_action.side_effect = side_effect
mock_get_action_toolkit.side_effect = side_effect
new_args = args + tuple([mock_action])
return_value = func(*new_args, **kwargs)
finally:
# Make sure to stop the mock, even with an exception
mock_action.stop()
return return_value
return nose.tools.make_decorator(func)(wrapper)
return decorator
def set_extra_environ(key, value):
'''Decorator to temporarily changes a single request environemnt value
Create a new test app and use the a side effect of making a request
to set an extra_environ value. Reset the value to '' after the test.
Usage::
@helpers.extra_environ('SCRIPT_NAME', '/myscript')
def test_ckan_thing_affected_by_script_name(self):
# ...
:param key: the extra_environ key to be changed, e.g. ``'SCRIPT_NAME'``
:type key: string
:param value: the new extra_environ key's value, e.g. ``'/myscript'``
:type value: string
'''
def decorator(func):
def wrapper(*args, **kwargs):
app = _get_test_app()
app.get('/', extra_environ={key: value})
try:
return_value = func(*args, **kwargs)
finally:
app.get('/', extra_environ={key: ''})
return return_value
return nose.tools.make_decorator(func)(wrapper)
return decorator
| gpl-3.0 |
mancoast/CPythonPyc_test | fail/332_test_pep352.py | 93 | 7146 | import unittest
import builtins
import warnings
from test.support import run_unittest
import os
from platform import system as platform_system
class ExceptionClassTests(unittest.TestCase):
"""Tests for anything relating to exception objects themselves (e.g.,
inheritance hierarchy)"""
def test_builtins_new_style(self):
self.assertTrue(issubclass(Exception, object))
def verify_instance_interface(self, ins):
for attr in ("args", "__str__", "__repr__"):
self.assertTrue(hasattr(ins, attr),
"%s missing %s attribute" %
(ins.__class__.__name__, attr))
def test_inheritance(self):
# Make sure the inheritance hierarchy matches the documentation
exc_set = set()
for object_ in builtins.__dict__.values():
try:
if issubclass(object_, BaseException):
exc_set.add(object_.__name__)
except TypeError:
pass
inheritance_tree = open(os.path.join(os.path.split(__file__)[0],
'exception_hierarchy.txt'))
try:
superclass_name = inheritance_tree.readline().rstrip()
try:
last_exc = getattr(builtins, superclass_name)
except AttributeError:
self.fail("base class %s not a built-in" % superclass_name)
self.assertIn(superclass_name, exc_set,
'%s not found' % superclass_name)
exc_set.discard(superclass_name)
superclasses = [] # Loop will insert base exception
last_depth = 0
for exc_line in inheritance_tree:
exc_line = exc_line.rstrip()
depth = exc_line.rindex('-')
exc_name = exc_line[depth+2:] # Slice past space
if '(' in exc_name:
paren_index = exc_name.index('(')
platform_name = exc_name[paren_index+1:-1]
exc_name = exc_name[:paren_index-1] # Slice off space
if platform_system() != platform_name:
exc_set.discard(exc_name)
continue
if '[' in exc_name:
left_bracket = exc_name.index('[')
exc_name = exc_name[:left_bracket-1] # cover space
try:
exc = getattr(builtins, exc_name)
except AttributeError:
self.fail("%s not a built-in exception" % exc_name)
if last_depth < depth:
superclasses.append((last_depth, last_exc))
elif last_depth > depth:
while superclasses[-1][0] >= depth:
superclasses.pop()
self.assertTrue(issubclass(exc, superclasses[-1][1]),
"%s is not a subclass of %s" % (exc.__name__,
superclasses[-1][1].__name__))
try: # Some exceptions require arguments; just skip them
self.verify_instance_interface(exc())
except TypeError:
pass
self.assertIn(exc_name, exc_set)
exc_set.discard(exc_name)
last_exc = exc
last_depth = depth
finally:
inheritance_tree.close()
self.assertEqual(len(exc_set), 0, "%s not accounted for" % exc_set)
interface_tests = ("length", "args", "str", "repr")
def interface_test_driver(self, results):
for test_name, (given, expected) in zip(self.interface_tests, results):
self.assertEqual(given, expected, "%s: %s != %s" % (test_name,
given, expected))
def test_interface_single_arg(self):
# Make sure interface works properly when given a single argument
arg = "spam"
exc = Exception(arg)
results = ([len(exc.args), 1], [exc.args[0], arg],
[str(exc), str(arg)],
[repr(exc), exc.__class__.__name__ + repr(exc.args)])
self.interface_test_driver(results)
def test_interface_multi_arg(self):
# Make sure interface correct when multiple arguments given
arg_count = 3
args = tuple(range(arg_count))
exc = Exception(*args)
results = ([len(exc.args), arg_count], [exc.args, args],
[str(exc), str(args)],
[repr(exc), exc.__class__.__name__ + repr(exc.args)])
self.interface_test_driver(results)
def test_interface_no_arg(self):
# Make sure that with no args that interface is correct
exc = Exception()
results = ([len(exc.args), 0], [exc.args, tuple()],
[str(exc), ''],
[repr(exc), exc.__class__.__name__ + '()'])
self.interface_test_driver(results)
class UsageTests(unittest.TestCase):
"""Test usage of exceptions"""
def raise_fails(self, object_):
"""Make sure that raising 'object_' triggers a TypeError."""
try:
raise object_
except TypeError:
return # What is expected.
self.fail("TypeError expected for raising %s" % type(object_))
def catch_fails(self, object_):
"""Catching 'object_' should raise a TypeError."""
try:
try:
raise Exception
except object_:
pass
except TypeError:
pass
except Exception:
self.fail("TypeError expected when catching %s" % type(object_))
try:
try:
raise Exception
except (object_,):
pass
except TypeError:
return
except Exception:
self.fail("TypeError expected when catching %s as specified in a "
"tuple" % type(object_))
def test_raise_new_style_non_exception(self):
# You cannot raise a new-style class that does not inherit from
# BaseException; the ability was not possible until BaseException's
# introduction so no need to support new-style objects that do not
# inherit from it.
class NewStyleClass(object):
pass
self.raise_fails(NewStyleClass)
self.raise_fails(NewStyleClass())
def test_raise_string(self):
# Raising a string raises TypeError.
self.raise_fails("spam")
def test_catch_non_BaseException(self):
# Tryinng to catch an object that does not inherit from BaseException
# is not allowed.
class NonBaseException(object):
pass
self.catch_fails(NonBaseException)
self.catch_fails(NonBaseException())
def test_catch_BaseException_instance(self):
# Catching an instance of a BaseException subclass won't work.
self.catch_fails(BaseException())
def test_catch_string(self):
# Catching a string is bad.
self.catch_fails("spam")
def test_main():
run_unittest(ExceptionClassTests, UsageTests)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
40223236/40223236-50 | static/Brython3.1.1-20150328-091302/Lib/xml/etree/ElementInclude.py | 784 | 5146 | #
# ElementTree
# $Id: ElementInclude.py 3375 2008-02-13 08:05:08Z fredrik $
#
# limited xinclude support for element trees
#
# history:
# 2003-08-15 fl created
# 2003-11-14 fl fixed default loader
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
##
# Limited XInclude support for the ElementTree package.
##
import copy
from . import ElementTree
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
##
# Fatal include error.
class FatalIncludeError(SyntaxError):
pass
##
# Default loader. This loader reads an included resource from disk.
#
# @param href Resource reference.
# @param parse Parse mode. Either "xml" or "text".
# @param encoding Optional text encoding (UTF-8 by default for "text").
# @return The expanded resource. If the parse mode is "xml", this
# is an ElementTree instance. If the parse mode is "text", this
# is a Unicode string. If the loader fails, it can return None
# or raise an IOError exception.
# @throws IOError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
if parse == "xml":
file = open(href, 'rb')
data = ElementTree.parse(file).getroot()
else:
if not encoding:
encoding = 'UTF-8'
file = open(href, 'r', encoding=encoding)
data = file.read()
file.close()
return data
##
# Expand XInclude directives.
#
# @param elem Root element.
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
# @throws IOError If the function fails to load a given resource.
def include(elem, loader=None):
if loader is None:
loader = default_loader
# look for xinclude elements
i = 0
while i < len(elem):
e = elem[i]
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = e.get("href")
parse = e.get("parse", "xml")
if parse == "xml":
node = loader(href, parse)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = copy.copy(node)
if e.tail:
node.tail = (node.tail or "") + e.tail
elem[i] = node
elif parse == "text":
text = loader(href, parse, e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
if i:
node = elem[i-1]
node.tail = (node.tail or "") + text + (e.tail or "")
else:
elem.text = (elem.text or "") + text + (e.tail or "")
del elem[i]
continue
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
include(e, loader)
i = i + 1
| gpl-3.0 |
teeple/pns_server | work/install/Python-2.7.4/Tools/bgen/bgen/bgenObjectDefinition.py | 44 | 17822 | from bgenOutput import *
from bgenGeneratorGroup import GeneratorGroup
class ObjectDefinition(GeneratorGroup):
"Spit out code that together defines a new Python object type"
basechain = "NULL"
tp_flags = "Py_TPFLAGS_DEFAULT"
basetype = None
argref = "" # set to "*" if arg to <type>_New should be pointer
argconst = "" # set to "const " if arg to <type>_New should be const
def __init__(self, name, prefix, itselftype):
"""ObjectDefinition constructor. May be extended, but do not override.
- name: the object's official name, e.g. 'SndChannel'.
- prefix: the prefix used for the object's functions and data, e.g. 'SndCh'.
- itselftype: the C type actually contained in the object, e.g. 'SndChannelPtr'.
XXX For official Python data types, rules for the 'Py' prefix are a problem.
"""
GeneratorGroup.__init__(self, prefix or name)
self.name = name
self.itselftype = itselftype
self.objecttype = name + 'Object'
self.typename = name + '_Type'
self.static = "static " # set to "" to make <type>_New and <type>_Convert public
self.modulename = None
if hasattr(self, "assertions"):
self.assertions()
def add(self, g, dupcheck=0):
g.setselftype(self.objecttype, self.itselftype)
GeneratorGroup.add(self, g, dupcheck)
def reference(self):
# In case we are referenced from a module
pass
def setmodulename(self, name):
self.modulename = name
def generate(self):
# XXX This should use long strings and %(varname)s substitution!
OutHeader2("Object type " + self.name)
self.outputCheck()
Output("typedef struct %s {", self.objecttype)
IndentLevel()
Output("PyObject_HEAD")
self.outputStructMembers()
DedentLevel()
Output("} %s;", self.objecttype)
self.outputNew()
self.outputConvert()
self.outputDealloc()
GeneratorGroup.generate(self)
Output()
self.outputMethodChain()
self.outputGetattr()
self.outputSetattr()
self.outputCompare()
self.outputRepr()
self.outputHash()
self.outputPEP253Hooks()
self.outputTypeObject()
OutHeader2("End object type " + self.name)
def outputCheck(self):
sf = self.static and "static "
Output("%sPyTypeObject %s;", sf, self.typename)
Output()
Output("#define %s_Check(x) ((x)->ob_type == &%s || PyObject_TypeCheck((x), &%s))",
self.prefix, self.typename, self.typename)
Output()
def outputMethodChain(self):
Output("%sPyMethodChain %s_chain = { %s_methods, %s };",
self.static, self.prefix, self.prefix, self.basechain)
def outputStructMembers(self):
Output("%s ob_itself;", self.itselftype)
def outputNew(self):
Output()
Output("%sPyObject *%s_New(%s%s %sitself)", self.static, self.prefix,
self.argconst, self.itselftype, self.argref)
OutLbrace()
Output("%s *it;", self.objecttype)
self.outputCheckNewArg()
Output("it = PyObject_NEW(%s, &%s);", self.objecttype, self.typename)
Output("if (it == NULL) return NULL;")
if self.basetype:
Output("/* XXXX Should we tp_init or tp_new our basetype? */")
self.outputInitStructMembers()
Output("return (PyObject *)it;")
OutRbrace()
def outputInitStructMembers(self):
Output("it->ob_itself = %sitself;", self.argref)
def outputCheckNewArg(self):
"Override this method to apply additional checks/conversions"
def outputConvert(self):
Output()
Output("%sint %s_Convert(PyObject *v, %s *p_itself)", self.static, self.prefix,
self.itselftype)
OutLbrace()
self.outputCheckConvertArg()
Output("if (!%s_Check(v))", self.prefix)
OutLbrace()
Output('PyErr_SetString(PyExc_TypeError, "%s required");', self.name)
Output("return 0;")
OutRbrace()
Output("*p_itself = ((%s *)v)->ob_itself;", self.objecttype)
Output("return 1;")
OutRbrace()
def outputCheckConvertArg(self):
"Override this method to apply additional conversions"
def outputDealloc(self):
Output()
Output("static void %s_dealloc(%s *self)", self.prefix, self.objecttype)
OutLbrace()
self.outputCleanupStructMembers()
if self.basetype:
Output("%s.tp_dealloc((PyObject *)self);", self.basetype)
elif hasattr(self, 'output_tp_free'):
# This is a new-style object with tp_free slot
Output("self->ob_type->tp_free((PyObject *)self);")
else:
Output("PyObject_Free((PyObject *)self);")
OutRbrace()
def outputCleanupStructMembers(self):
self.outputFreeIt("self->ob_itself")
def outputFreeIt(self, name):
Output("/* Cleanup of %s goes here */", name)
def outputGetattr(self):
Output()
Output("static PyObject *%s_getattr(%s *self, char *name)", self.prefix, self.objecttype)
OutLbrace()
self.outputGetattrBody()
OutRbrace()
def outputGetattrBody(self):
self.outputGetattrHook()
Output("return Py_FindMethodInChain(&%s_chain, (PyObject *)self, name);",
self.prefix)
def outputGetattrHook(self):
pass
def outputSetattr(self):
Output()
Output("#define %s_setattr NULL", self.prefix)
def outputCompare(self):
Output()
Output("#define %s_compare NULL", self.prefix)
def outputRepr(self):
Output()
Output("#define %s_repr NULL", self.prefix)
def outputHash(self):
Output()
Output("#define %s_hash NULL", self.prefix)
def outputTypeObject(self):
sf = self.static and "static "
Output()
Output("%sPyTypeObject %s = {", sf, self.typename)
IndentLevel()
Output("PyObject_HEAD_INIT(NULL)")
Output("0, /*ob_size*/")
if self.modulename:
Output("\"%s.%s\", /*tp_name*/", self.modulename, self.name)
else:
Output("\"%s\", /*tp_name*/", self.name)
Output("sizeof(%s), /*tp_basicsize*/", self.objecttype)
Output("0, /*tp_itemsize*/")
Output("/* methods */")
Output("(destructor) %s_dealloc, /*tp_dealloc*/", self.prefix)
Output("0, /*tp_print*/")
Output("(getattrfunc) %s_getattr, /*tp_getattr*/", self.prefix)
Output("(setattrfunc) %s_setattr, /*tp_setattr*/", self.prefix)
Output("(cmpfunc) %s_compare, /*tp_compare*/", self.prefix)
Output("(reprfunc) %s_repr, /*tp_repr*/", self.prefix)
Output("(PyNumberMethods *)0, /* tp_as_number */")
Output("(PySequenceMethods *)0, /* tp_as_sequence */")
Output("(PyMappingMethods *)0, /* tp_as_mapping */")
Output("(hashfunc) %s_hash, /*tp_hash*/", self.prefix)
DedentLevel()
Output("};")
def outputTypeObjectInitializer(self):
Output("""%s.ob_type = &PyType_Type;""", self.typename)
if self.basetype:
Output("%s.tp_base = &%s;", self.typename, self.basetype)
Output("if (PyType_Ready(&%s) < 0) return;", self.typename)
Output("""Py_INCREF(&%s);""", self.typename)
Output("PyModule_AddObject(m, \"%s\", (PyObject *)&%s);", self.name, self.typename);
self.outputTypeObjectInitializerCompat()
def outputTypeObjectInitializerCompat(self):
Output("/* Backward-compatible name */")
Output("""Py_INCREF(&%s);""", self.typename);
Output("PyModule_AddObject(m, \"%sType\", (PyObject *)&%s);", self.name, self.typename);
def outputPEP253Hooks(self):
pass
class PEP252Mixin:
getsetlist = []
def assertions(self):
# Check that various things aren't overridden. If they are it could
# signify a bgen-client that has been partially converted to PEP252.
assert self.outputGetattr.im_func == PEP252Mixin.outputGetattr.im_func
assert self.outputSetattr.im_func == PEP252Mixin.outputSetattr.im_func
assert self.outputGetattrBody == None
assert self.outputGetattrHook == None
assert self.basechain == "NULL"
def outputGetattr(self):
pass
outputGetattrBody = None
outputGetattrHook = None
def outputSetattr(self):
pass
def outputMethodChain(self):
# This is a good place to output the getters and setters
self.outputGetSetList()
def outputHook(self, name):
methodname = "outputHook_" + name
if hasattr(self, methodname):
func = getattr(self, methodname)
func()
else:
Output("0, /*%s*/", name)
def outputTypeObject(self):
sf = self.static and "static "
Output()
Output("%sPyTypeObject %s = {", sf, self.typename)
IndentLevel()
Output("PyObject_HEAD_INIT(NULL)")
Output("0, /*ob_size*/")
if self.modulename:
Output("\"%s.%s\", /*tp_name*/", self.modulename, self.name)
else:
Output("\"%s\", /*tp_name*/", self.name)
Output("sizeof(%s), /*tp_basicsize*/", self.objecttype)
Output("0, /*tp_itemsize*/")
Output("/* methods */")
Output("(destructor) %s_dealloc, /*tp_dealloc*/", self.prefix)
Output("0, /*tp_print*/")
Output("(getattrfunc)0, /*tp_getattr*/")
Output("(setattrfunc)0, /*tp_setattr*/")
Output("(cmpfunc) %s_compare, /*tp_compare*/", self.prefix)
Output("(reprfunc) %s_repr, /*tp_repr*/", self.prefix)
Output("(PyNumberMethods *)0, /* tp_as_number */")
Output("(PySequenceMethods *)0, /* tp_as_sequence */")
Output("(PyMappingMethods *)0, /* tp_as_mapping */")
Output("(hashfunc) %s_hash, /*tp_hash*/", self.prefix)
self.outputHook("tp_call")
Output("0, /*tp_str*/")
Output("PyObject_GenericGetAttr, /*tp_getattro*/")
Output("PyObject_GenericSetAttr, /*tp_setattro */")
self.outputHook("tp_as_buffer")
Output("%s, /* tp_flags */", self.tp_flags)
self.outputHook("tp_doc")
self.outputHook("tp_traverse")
self.outputHook("tp_clear")
self.outputHook("tp_richcompare")
self.outputHook("tp_weaklistoffset")
self.outputHook("tp_iter")
self.outputHook("tp_iternext")
Output("%s_methods, /* tp_methods */", self.prefix)
self.outputHook("tp_members")
Output("%s_getsetlist, /*tp_getset*/", self.prefix)
self.outputHook("tp_base")
self.outputHook("tp_dict")
self.outputHook("tp_descr_get")
self.outputHook("tp_descr_set")
self.outputHook("tp_dictoffset")
self.outputHook("tp_init")
self.outputHook("tp_alloc")
self.outputHook("tp_new")
self.outputHook("tp_free")
DedentLevel()
Output("};")
def outputGetSetList(self):
if self.getsetlist:
for name, get, set, doc in self.getsetlist:
if get:
self.outputGetter(name, get)
else:
Output("#define %s_get_%s NULL", self.prefix, name)
Output()
if set:
self.outputSetter(name, set)
else:
Output("#define %s_set_%s NULL", self.prefix, name)
Output()
Output("static PyGetSetDef %s_getsetlist[] = {", self.prefix)
IndentLevel()
for name, get, set, doc in self.getsetlist:
if doc:
doc = '"' + doc + '"'
else:
doc = "NULL"
Output("{\"%s\", (getter)%s_get_%s, (setter)%s_set_%s, %s},",
name, self.prefix, name, self.prefix, name, doc)
Output("{NULL, NULL, NULL, NULL},")
DedentLevel()
Output("};")
else:
Output("#define %s_getsetlist NULL", self.prefix)
Output()
def outputGetter(self, name, code):
Output("static PyObject *%s_get_%s(%s *self, void *closure)",
self.prefix, name, self.objecttype)
OutLbrace()
Output(code)
OutRbrace()
Output()
def outputSetter(self, name, code):
Output("static int %s_set_%s(%s *self, PyObject *v, void *closure)",
self.prefix, name, self.objecttype)
OutLbrace()
Output(code)
Output("return 0;")
OutRbrace()
Output()
class PEP253Mixin(PEP252Mixin):
tp_flags = "Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE"
def outputHook_tp_init(self):
Output("%s_tp_init, /* tp_init */", self.prefix)
def outputHook_tp_alloc(self):
Output("%s_tp_alloc, /* tp_alloc */", self.prefix)
def outputHook_tp_new(self):
Output("%s_tp_new, /* tp_new */", self.prefix)
def outputHook_tp_free(self):
Output("%s_tp_free, /* tp_free */", self.prefix)
def output_tp_initBody_basecall(self):
"""If a type shares its init call with its base type set output_tp_initBody
to output_tp_initBody_basecall"""
if self.basetype:
Output("if (%s.tp_init)", self.basetype)
OutLbrace()
Output("if ( (*%s.tp_init)(_self, _args, _kwds) < 0) return -1;", self.basetype)
OutRbrace()
output_tp_initBody = None
def output_tp_init(self):
if self.output_tp_initBody:
Output("static int %s_tp_init(PyObject *_self, PyObject *_args, PyObject *_kwds)", self.prefix)
OutLbrace()
self.output_tp_initBody()
OutRbrace()
else:
Output("#define %s_tp_init 0", self.prefix)
Output()
output_tp_allocBody = None
def output_tp_alloc(self):
if self.output_tp_allocBody:
Output("static PyObject *%s_tp_alloc(PyTypeObject *type, int nitems)",
self.prefix)
OutLbrace()
self.output_tp_allocBody()
OutRbrace()
else:
Output("#define %s_tp_alloc PyType_GenericAlloc", self.prefix)
Output()
def output_tp_newBody(self):
Output("PyObject *_self;");
Output("%s itself;", self.itselftype);
Output("char *kw[] = {\"itself\", 0};")
Output()
Output("if (!PyArg_ParseTupleAndKeywords(_args, _kwds, \"O&\", kw, %s_Convert, &itself)) return NULL;",
self.prefix);
if self.basetype:
Output("if (%s.tp_new)", self.basetype)
OutLbrace()
Output("if ( (*%s.tp_new)(type, _args, _kwds) == NULL) return NULL;", self.basetype)
Dedent()
Output("} else {")
Indent()
Output("if ((_self = type->tp_alloc(type, 0)) == NULL) return NULL;")
OutRbrace()
else:
Output("if ((_self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("((%s *)_self)->ob_itself = itself;", self.objecttype)
Output("return _self;")
def output_tp_new(self):
if self.output_tp_newBody:
Output("static PyObject *%s_tp_new(PyTypeObject *type, PyObject *_args, PyObject *_kwds)", self.prefix)
OutLbrace()
self.output_tp_newBody()
OutRbrace()
else:
Output("#define %s_tp_new PyType_GenericNew", self.prefix)
Output()
output_tp_freeBody = None
def output_tp_free(self):
if self.output_tp_freeBody:
Output("static void %s_tp_free(PyObject *self)", self.prefix)
OutLbrace()
self.output_tp_freeBody()
OutRbrace()
else:
Output("#define %s_tp_free PyObject_Del", self.prefix)
Output()
def outputPEP253Hooks(self):
self.output_tp_init()
self.output_tp_alloc()
self.output_tp_new()
self.output_tp_free()
class GlobalObjectDefinition(ObjectDefinition):
"""Like ObjectDefinition but exports some parts.
XXX Should also somehow generate a .h file for them.
"""
def __init__(self, name, prefix = None, itselftype = None):
ObjectDefinition.__init__(self, name, prefix or name, itselftype or name)
self.static = ""
class ObjectIdentityMixin:
"""A mixin class for objects that makes the identity of ob_itself
govern comparisons and dictionary lookups. Useful if the C object can
be returned by library calls and it is difficult (or impossible) to find
the corresponding Python objects. With this you can create Python object
wrappers on the fly"""
def outputCompare(self):
Output()
Output("static int %s_compare(%s *self, %s *other)", self.prefix, self.objecttype,
self.objecttype)
OutLbrace()
Output("unsigned long v, w;")
Output()
Output("if (!%s_Check((PyObject *)other))", self.prefix)
OutLbrace()
Output("v=(unsigned long)self;")
Output("w=(unsigned long)other;")
OutRbrace()
Output("else")
OutLbrace()
Output("v=(unsigned long)self->ob_itself;")
Output("w=(unsigned long)other->ob_itself;")
OutRbrace()
Output("if( v < w ) return -1;")
Output("if( v > w ) return 1;")
Output("return 0;")
OutRbrace()
def outputHash(self):
Output()
Output("static long %s_hash(%s *self)", self.prefix, self.objecttype)
OutLbrace()
Output("return (long)self->ob_itself;")
OutRbrace()
| gpl-2.0 |
jeasoft/odoo | marcos_addons/marcos_ipf_module/__openerp__.py | 2 | 1035 | # -*- coding: utf-8 -*-
{
'name': "marcos_ipf_module",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "Your Company",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Tools',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'web', 'account', "mail", "website", "auth_signup"],
# always loaded
'data': [
'security/marcos_ipf_module.xml',
'security/ir.model.access.csv',
'templates.xml',
'models_view.xml',
'wizard/book_generator_view.xml'
],
# only loaded in demonstration mode
'demo': [
# 'demo.xml',
],
} | agpl-3.0 |
jlspyaozhongkai/Uter | third_party_build/Python-2.7.9/lib/python2.7/lib2to3/fixes/fix_isinstance.py | 326 | 1609 | # Copyright 2008 Armin Ronacher.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that cleans up a tuple argument to isinstance after the tokens
in it were fixed. This is mainly used to remove double occurrences of
tokens as a leftover of the long -> int / unicode -> str conversion.
eg. isinstance(x, (int, long)) -> isinstance(x, (int, int))
-> isinstance(x, int)
"""
from .. import fixer_base
from ..fixer_util import token
class FixIsinstance(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power<
'isinstance'
trailer< '(' arglist< any ',' atom< '('
args=testlist_gexp< any+ >
')' > > ')' >
>
"""
run_order = 6
def transform(self, node, results):
names_inserted = set()
testlist = results["args"]
args = testlist.children
new_args = []
iterator = enumerate(args)
for idx, arg in iterator:
if arg.type == token.NAME and arg.value in names_inserted:
if idx < len(args) - 1 and args[idx + 1].type == token.COMMA:
iterator.next()
continue
else:
new_args.append(arg)
if arg.type == token.NAME:
names_inserted.add(arg.value)
if new_args and new_args[-1].type == token.COMMA:
del new_args[-1]
if len(new_args) == 1:
atom = testlist.parent
new_args[0].prefix = atom.prefix
atom.replace(new_args[0])
else:
args[:] = new_args
node.changed()
| gpl-3.0 |
jank3/django | tests/admin_widgets/models.py | 227 | 4760 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class MyFileField(models.FileField):
pass
@python_2_unicode_compatible
class Member(models.Model):
name = models.CharField(max_length=100)
birthdate = models.DateTimeField(blank=True, null=True)
gender = models.CharField(max_length=1, blank=True, choices=[('M', 'Male'), ('F', 'Female')])
email = models.EmailField(blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Band(models.Model):
name = models.CharField(max_length=100)
style = models.CharField(max_length=20)
members = models.ManyToManyField(Member)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Album(models.Model):
band = models.ForeignKey(Band, models.CASCADE)
name = models.CharField(max_length=100)
cover_art = models.FileField(upload_to='albums')
backside_art = MyFileField(upload_to='albums_back', null=True)
def __str__(self):
return self.name
class HiddenInventoryManager(models.Manager):
def get_queryset(self):
return super(HiddenInventoryManager, self).get_queryset().filter(hidden=False)
@python_2_unicode_compatible
class Inventory(models.Model):
barcode = models.PositiveIntegerField(unique=True)
parent = models.ForeignKey('self', models.SET_NULL, to_field='barcode', blank=True, null=True)
name = models.CharField(blank=False, max_length=20)
hidden = models.BooleanField(default=False)
# see #9258
default_manager = models.Manager()
objects = HiddenInventoryManager()
def __str__(self):
return self.name
class Event(models.Model):
main_band = models.ForeignKey(
Band,
models.CASCADE,
limit_choices_to=models.Q(pk__gt=0),
related_name='events_main_band_at',
)
supporting_bands = models.ManyToManyField(Band, blank=True, related_name='events_supporting_band_at')
start_date = models.DateField(blank=True, null=True)
start_time = models.TimeField(blank=True, null=True)
description = models.TextField(blank=True)
link = models.URLField(blank=True)
min_age = models.IntegerField(blank=True, null=True)
@python_2_unicode_compatible
class Car(models.Model):
owner = models.ForeignKey(User, models.CASCADE)
make = models.CharField(max_length=30)
model = models.CharField(max_length=30)
def __str__(self):
return "%s %s" % (self.make, self.model)
class CarTire(models.Model):
"""
A single car tire. This to test that a user can only select their own cars.
"""
car = models.ForeignKey(Car, models.CASCADE)
class Honeycomb(models.Model):
location = models.CharField(max_length=20)
class Bee(models.Model):
"""
A model with a FK to a model that won't be registered with the admin
(Honeycomb) so the corresponding raw ID widget won't have a magnifying
glass link to select related honeycomb instances.
"""
honeycomb = models.ForeignKey(Honeycomb, models.CASCADE)
class Individual(models.Model):
"""
A model with a FK to itself. It won't be registered with the admin, so the
corresponding raw ID widget won't have a magnifying glass link to select
related instances (rendering will be called programmatically in this case).
"""
name = models.CharField(max_length=20)
parent = models.ForeignKey('self', models.SET_NULL, null=True)
soulmate = models.ForeignKey('self', models.CASCADE, null=True, related_name='soulmates')
class Company(models.Model):
name = models.CharField(max_length=20)
class Advisor(models.Model):
"""
A model with a m2m to a model that won't be registered with the admin
(Company) so the corresponding raw ID widget won't have a magnifying
glass link to select related company instances.
"""
name = models.CharField(max_length=20)
companies = models.ManyToManyField(Company)
@python_2_unicode_compatible
class Student(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
@python_2_unicode_compatible
class School(models.Model):
name = models.CharField(max_length=255)
students = models.ManyToManyField(Student, related_name='current_schools')
alumni = models.ManyToManyField(Student, related_name='previous_schools')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Profile(models.Model):
user = models.ForeignKey('auth.User', models.CASCADE, to_field='username')
def __str__(self):
return self.user.username
| bsd-3-clause |
umeboshi2/vignewton | vignewton/resources.py | 1 | 1855 | from pyramid.security import Allow, Everyone, Authenticated
from fanstatic import Library, Resource
from js.lightbox import lightbox
from haberdashery.resources import jqueryui, fc_css, deform_css
#from trumpet.resources import jqueryui
from trumpet.resources import StaticResources as TrumpetResources
library = Library('vignewton_lib', 'static')
css = Library('vignewton_css', 'static/css')
js = Library('vignewton_js', 'static/js')
favicon = Resource(library, 'favicon.ico')
main_screen = Resource(css, 'mainscreen.css', depends=[deform_css])
admin_screen = Resource(css, 'adminscreen.css', depends=[deform_css])
post_to_url = Resource(js, 'post2url.js', depends=[jqueryui])
main_calendar_view = Resource(js, 'main-calendar-view.js', depends=[fc_css])
main_betgames_view = Resource(js, 'main-betgames-view.js', depends=[jqueryui])
main_betgames_confirm_bet = Resource(js, 'main-betgames-confirm-bet.js',
depends=[jqueryui])
from vignewton.security import authn_policy
class StaticResources(TrumpetResources):
main_screen = main_screen
admin_screen = admin_screen
# override trumpet favicon
favicon = favicon
main_calendar_view = main_calendar_view
main_betgames_view = main_betgames_view
main_betgames_confirm_bet = main_betgames_confirm_bet
post_to_url = post_to_url
lightbox = lightbox
# the acl entries are allow/deny, group, permission
class RootGroupFactory(object):
__name__ = ""
__acl__ = [
(Allow, Everyone, 'public'),
(Allow, Authenticated, 'user'),
(Allow, 'manager', 'manage'),
(Allow, 'editor', ('wiki_add', 'wiki_edit')),
(Allow, 'admin', ('admin', 'manage')),
]
authn_policy = authn_policy
def __init__(self, request):
# comment
pass
| unlicense |
mikeboers/PyAV | tests/test_codec_context.py | 1 | 10799 | from fractions import Fraction
from unittest import SkipTest
import os
from av import AudioResampler, Codec, Packet
from av.codec.codec import UnknownCodecError
import av
from .common import TestCase, fate_suite
def iter_frames(container, stream):
for packet in container.demux(stream):
for frame in packet.decode():
yield frame
def iter_raw_frames(path, packet_sizes, ctx):
with open(path, 'rb') as f:
for i, size in enumerate(packet_sizes):
packet = Packet(size)
read_size = f.readinto(packet)
assert size
assert read_size == size
if not read_size:
break
for frame in ctx.decode(packet):
yield frame
while True:
try:
frames = ctx.decode(None)
except EOFError:
break
for frame in frames:
yield frame
if not frames:
break
class TestCodecContext(TestCase):
def test_skip_frame_default(self):
ctx = Codec('png', 'w').create()
self.assertEqual(ctx.skip_frame.name, 'DEFAULT')
def test_parse(self):
# This one parses into a single packet.
self._assert_parse('mpeg4', fate_suite('h264/interlaced_crop.mp4'))
# This one parses into many small packets.
self._assert_parse('mpeg2video', fate_suite('mpeg2/mpeg2_field_encoding.ts'))
def _assert_parse(self, codec_name, path):
fh = av.open(path)
packets = []
for packet in fh.demux(video=0):
packets.append(packet)
full_source = b''.join(p.to_bytes() for p in packets)
for size in 1024, 8192, 65535:
ctx = Codec(codec_name).create()
packets = []
for i in range(0, len(full_source), size):
block = full_source[i:i + size]
packets.extend(ctx.parse(block))
packets.extend(ctx.parse())
parsed_source = b''.join(p.to_bytes() for p in packets)
self.assertEqual(len(parsed_source), len(full_source))
self.assertEqual(full_source, parsed_source)
class TestEncoding(TestCase):
def test_encoding_png(self):
self.image_sequence_encode('png')
def test_encoding_mjpeg(self):
self.image_sequence_encode('mjpeg')
def test_encoding_tiff(self):
self.image_sequence_encode('tiff')
def image_sequence_encode(self, codec_name):
try:
codec = Codec(codec_name, 'w')
except UnknownCodecError:
raise SkipTest()
container = av.open(fate_suite('h264/interlaced_crop.mp4'))
video_stream = container.streams.video[0]
width = 640
height = 480
ctx = codec.create()
pix_fmt = ctx.codec.video_formats[0].name
ctx.width = width
ctx.height = height
ctx.time_base = video_stream.codec_context.time_base
ctx.pix_fmt = pix_fmt
ctx.open()
frame_count = 1
path_list = []
for frame in iter_frames(container, video_stream):
new_frame = frame.reformat(width, height, pix_fmt)
new_packets = ctx.encode(new_frame)
self.assertEqual(len(new_packets), 1)
new_packet = new_packets[0]
path = self.sandboxed('%s/encoder.%04d.%s' % (
codec_name,
frame_count,
codec_name if codec_name != 'mjpeg' else 'jpg',
))
path_list.append(path)
with open(path, 'wb') as f:
f.write(new_packet)
frame_count += 1
if frame_count > 5:
break
ctx = av.Codec(codec_name, 'r').create()
for path in path_list:
with open(path, 'rb') as f:
size = os.fstat(f.fileno()).st_size
packet = Packet(size)
size = f.readinto(packet)
frame = ctx.decode(packet)[0]
self.assertEqual(frame.width, width)
self.assertEqual(frame.height, height)
self.assertEqual(frame.format.name, pix_fmt)
def test_encoding_h264(self):
self.video_encoding('libx264', {'crf': '19'})
def test_encoding_mpeg4(self):
self.video_encoding('mpeg4')
def test_encoding_mpeg1video(self):
self.video_encoding('mpeg1video')
def test_encoding_dvvideo(self):
options = {'pix_fmt': 'yuv411p',
'width': 720,
'height': 480}
self.video_encoding('dvvideo', options)
def test_encoding_dnxhd(self):
options = {'b': '90M', # bitrate
'pix_fmt': 'yuv422p',
'width': 1920,
'height': 1080,
'time_base': '1001/30000',
'max_frames': 5}
self.video_encoding('dnxhd', options)
def video_encoding(self, codec_name, options={}):
try:
codec = Codec(codec_name, 'w')
except UnknownCodecError:
raise SkipTest()
container = av.open(fate_suite('h264/interlaced_crop.mp4'))
video_stream = container.streams.video[0]
pix_fmt = options.pop('pix_fmt', 'yuv420p')
width = options.pop('width', 640)
height = options.pop('height', 480)
max_frames = options.pop('max_frames', 50)
time_base = options.pop('time_base', video_stream.codec_context.time_base)
ctx = codec.create()
ctx.width = width
ctx.height = height
ctx.time_base = time_base
ctx.framerate = 1 / ctx.time_base
ctx.pix_fmt = pix_fmt
ctx.options = options # TODO
ctx.open()
path = self.sandboxed('encoder.%s' % codec_name)
packet_sizes = []
frame_count = 0
with open(path, 'wb') as f:
for frame in iter_frames(container, video_stream):
"""
bad_frame = frame.reformat(width, 100, pix_fmt)
with self.assertRaises(ValueError):
ctx.encode(bad_frame)
bad_frame = frame.reformat(100, height, pix_fmt)
with self.assertRaises(ValueError):
ctx.encode(bad_frame)
bad_frame = frame.reformat(width, height, "rgb24")
with self.assertRaises(ValueError):
ctx.encode(bad_frame)
"""
if frame:
frame_count += 1
new_frame = frame.reformat(width, height, pix_fmt) if frame else None
for packet in ctx.encode(new_frame):
packet_sizes.append(packet.size)
f.write(packet)
if frame_count >= max_frames:
break
for packet in ctx.encode(None):
packet_sizes.append(packet.size)
f.write(packet)
dec_codec_name = codec_name
if codec_name == 'libx264':
dec_codec_name = 'h264'
ctx = av.Codec(dec_codec_name, 'r').create()
ctx.open()
decoded_frame_count = 0
for frame in iter_raw_frames(path, packet_sizes, ctx):
decoded_frame_count += 1
self.assertEqual(frame.width, width)
self.assertEqual(frame.height, height)
self.assertEqual(frame.format.name, pix_fmt)
self.assertEqual(frame_count, decoded_frame_count)
def test_encoding_pcm_s24le(self):
self.audio_encoding('pcm_s24le')
def test_encoding_aac(self):
self.audio_encoding('aac')
def test_encoding_mp2(self):
self.audio_encoding('mp2')
def audio_encoding(self, codec_name):
try:
codec = Codec(codec_name, 'w')
except UnknownCodecError:
raise SkipTest()
ctx = codec.create()
if ctx.codec.experimental:
raise SkipTest()
sample_fmt = ctx.codec.audio_formats[-1].name
sample_rate = 48000
channel_layout = "stereo"
channels = 2
ctx.time_base = Fraction(1) / sample_rate
ctx.sample_rate = sample_rate
ctx.format = sample_fmt
ctx.layout = channel_layout
ctx.channels = channels
ctx.open()
resampler = AudioResampler(sample_fmt, channel_layout, sample_rate)
container = av.open(fate_suite('audio-reference/chorusnoise_2ch_44kHz_s16.wav'))
audio_stream = container.streams.audio[0]
path = self.sandboxed('encoder.%s' % codec_name)
samples = 0
packet_sizes = []
with open(path, 'wb') as f:
for frame in iter_frames(container, audio_stream):
# We need to let the encoder retime.
frame.pts = None
"""
bad_resampler = AudioResampler(sample_fmt, "mono", sample_rate)
bad_frame = bad_resampler.resample(frame)
with self.assertRaises(ValueError):
next(encoder.encode(bad_frame))
bad_resampler = AudioResampler(sample_fmt, channel_layout, 3000)
bad_frame = bad_resampler.resample(frame)
with self.assertRaises(ValueError):
next(encoder.encode(bad_frame))
bad_resampler = AudioResampler('u8', channel_layout, 3000)
bad_frame = bad_resampler.resample(frame)
with self.assertRaises(ValueError):
next(encoder.encode(bad_frame))
"""
resampled_frame = resampler.resample(frame)
samples += resampled_frame.samples
for packet in ctx.encode(resampled_frame):
# bytearray because python can
# freaks out if the first byte is NULL
f.write(bytearray(packet))
packet_sizes.append(packet.size)
for packet in ctx.encode(None):
packet_sizes.append(packet.size)
f.write(bytearray(packet))
ctx = Codec(codec_name, 'r').create()
ctx.time_base = Fraction(1) / sample_rate
ctx.sample_rate = sample_rate
ctx.format = sample_fmt
ctx.layout = channel_layout
ctx.channels = channels
ctx.open()
result_samples = 0
# should have more asserts but not sure what to check
# libav and ffmpeg give different results
# so can really use checksums
for frame in iter_raw_frames(path, packet_sizes, ctx):
result_samples += frame.samples
self.assertEqual(frame.rate, sample_rate)
self.assertEqual(len(frame.layout.channels), channels)
| bsd-3-clause |
mguijarr/mxcube3 | mxcube3/remote_access.py | 2 | 2948 | import logging
from flask import request, session
from mxcube3 import socketio
from collections import deque
from mxcube3 import app as mxcube
MASTER = None
MASTER_ROOM = None
PENDING_EVENTS = deque()
DISCONNECT_HANDLED = True
OBSERVERS = {}
def set_master(master_sid):
global MASTER
MASTER = master_sid
def is_master(sid):
return MASTER == sid
def flush():
global MASTER
global PENDING_EVENTS
MASTER = None
PENDING_EVENTS = deque()
def _event_callback():
event_id, event, json_dict, kw = PENDING_EVENTS.popleft()
emit_pending_events()
def emit_pending_events():
try:
event_id, event, json_dict, kwargs = PENDING_EVENTS[0]
except IndexError:
pass
else:
return _emit(event, json_dict, **kwargs)
def _emit(event, json_dict, **kwargs):
kw = dict(kwargs)
kw['callback'] = _event_callback
kw['room'] = MASTER_ROOM
socketio.emit(event, json_dict, **kw)
def safe_emit(event, json_dict, **kwargs):
PENDING_EVENTS.append((id(json_dict), event, json_dict, kwargs))
if len(PENDING_EVENTS) == 1:
emit_pending_events()
@socketio.on('connect', namespace='/hwr')
def connect():
global MASTER_ROOM, DISCONNECT_HANDLED
if is_master(session.sid):
MASTER_ROOM = request.sid
emit_pending_events()
if not mxcube.queue.queue_hwobj.is_executing() and not DISCONNECT_HANDLED:
DISCONNECT_HANDLED = True
socketio.emit("resumeQueueDialog", namespace='/hwr')
msg = 'Client reconnected, Queue was previously stopped, asking '
msg += 'client for action'
logging.getLogger('HWR').info(msg)
@socketio.on('disconnect', namespace='/hwr')
def disconnect():
global DISCONNECT_HANDLED, MASTER_ROOM
if is_master(session.sid) and MASTER_ROOM == request.sid and \
mxcube.queue.queue_hwobj.is_executing():
DISCONNECT_HANDLED = False
mxcube.queue.queue_hwobj.stop()
logging.getLogger('HWR').info('Client disconnected, stopping queue')
@socketio.on('setRaMaster', namespace='/hwr')
def set_master_id(data):
global MASTER_ROOM, OBSERVERS
if data['master']:
MASTER_ROOM = request.sid
emit_pending_events()
else:
OBSERVERS[remote_addr()] = {"host": remote_addr(),
"name": data["name"],
"requestsControl": False,
"message": '',
"sid": session.sid}
socketio.emit("observersChanged", OBSERVERS.values(), namespace='/hwr')
return session.sid
def observer_name():
global OBSERVERS
observer_name = ''
try:
observer_name = OBSERVERS[remote_addr()]['name']
except KeyError:
pass
return observer_name
def remote_addr():
return str(request.headers.get('x-forwarded-for', request.remote_addr))
| gpl-2.0 |
Elettronik/SickRage | lib/pgi/signals.py | 19 | 3336 | # Copyright 2013 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
from ctypes import byref
from .util import cached_property, escape_parameter, decode_return
from .clib.glib import guint
from .clib.gobject import signal_list_ids, signal_query, GSignalQuery
from .codegen import generate_dummy_callable
from .gtype import PGType
class GSignal(object):
def __init__(self, info, signal_id):
self._id = signal_id
self._info = info
@property
def _func(self):
try:
sig_info = self._info.find_signal(self._query.signal_name)
except AttributeError:
# older libgirepository
sig_info = None
if sig_info:
func_name = sig_info.name.replace("-", "_")
return generate_dummy_callable(
sig_info, func_name, signal_owner_type=self.instance_type)
else:
# FIXME: either too old libgirepository or signal
# that is not in the typelib.
f = lambda: None
f.__doc__ = "%s()" % self.name.replace("-", "_")
return f
@property
def __doc__(self):
return self._func.__doc__
def __call__(self, *args, **kwargs):
assert self._func
return self._func(*args, **kwargs)
@cached_property
def _query(self):
query = GSignalQuery()
signal_query(self._id, byref(query))
return query
@property
def id(self):
return self._query.signal_id
@property
@decode_return()
def name(self):
return self._query.signal_name
@property
def instance_type(self):
return PGType(self._query.itype)
@property
def return_type(self):
return PGType(self._query.return_type)
@property
def flags(self):
return self._query.signal_flags.value
@property
def param_types(self):
count = int(self._query.n_params)
return [PGType(t) for t in self._query.param_types[:count]]
class _GSignalQuery(object):
def __init__(self, info, pgtype):
# FIXME: DBusGLib.Proxy ?
if pgtype == PGType.from_name("void"):
return
gtype = pgtype._type
is_interface = pgtype.is_interface()
def get_sig_ids(gtype):
length = guint()
array = signal_list_ids(gtype, byref(length))
return array[:length.value]
if is_interface:
iface = gtype.default_interface_ref()
sig_ids = get_sig_ids(gtype)
gtype.default_interface_unref(iface)
else:
klass = gtype.class_ref()
sig_ids = get_sig_ids(gtype)
gtype.class_unref(klass)
for id_ in sig_ids:
sig = GSignal(info, id_)
setattr(self, escape_parameter(sig.name), sig)
_GSignalQuery.__name__ = "GSignalQuery"
class SignalsDescriptor(object):
def __init__(self, info):
self.info = info
def __get__(self, instance, owner):
return _GSignalQuery(self.info, owner.__gtype__)
def SignalsAttribute(info):
return SignalsDescriptor(info)
| gpl-3.0 |
savanu/servo | components/script/dom/bindings/codegen/pythonpath.py | 131 | 1404 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Run a python script, adding extra directories to the python path.
"""
def main(args):
def usage():
print >>sys.stderr, "pythonpath.py -I directory script.py [args...]"
sys.exit(150)
paths = []
while True:
try:
arg = args[0]
except IndexError:
usage()
if arg == '-I':
args.pop(0)
try:
path = args.pop(0)
except IndexError:
usage()
paths.append(os.path.abspath(path))
continue
if arg.startswith('-I'):
paths.append(os.path.abspath(args.pop(0)[2:]))
continue
if arg.startswith('-D'):
os.chdir(args.pop(0)[2:])
continue
break
script = args[0]
sys.path[0:0] = [os.path.abspath(os.path.dirname(script))] + paths
sys.argv = args
sys.argc = len(args)
frozenglobals['__name__'] = '__main__'
frozenglobals['__file__'] = script
execfile(script, frozenglobals)
# Freeze scope here ... why this makes things work I have no idea ...
frozenglobals = globals()
import sys
import os
if __name__ == '__main__':
main(sys.argv[1:])
| mpl-2.0 |
cjmathy/ann_bmi203 | ann/rap1.py | 1 | 6499 | import numpy as np
import re
import random
def prepare_data():
"""This method prepares input positive and negative datasets as bitvectors for the Rap1 binding problem. Output: three lists of bitvectors, one containing positive samples, negative samples that are similar to positive samples, and negative examples that are randomly chosen from the fasta sequences. All bitvectors are 17 bp (34 bits) long"""
# read in all positive data, convert to bitvectors
pos_str = read_positives()
pos_vec = str_to_vec(pos_str)
# read in all negative data. then, remove false negatives from the negative fa sequences and their reverse complements. Call this new set of sequences and their reverse complements "neg_str".
neg_str = read_negatives()
neg_str = remove_falseneg(neg_str, pos_str)
rc_neg_str = reverse_complement(neg_str)
rc_neg_str = remove_falseneg(rc_neg_str, pos_str)
neg_str = reverse_complement(rc_neg_str)
neg_str = neg_str + rc_neg_str
# cache interesting cases as "neg_simiar". interesting cases are those that look similar to the positive sequences (in that they contain cysteines at positions 5, 6, and 10) but are considered negative. also cache randomly chosen sequences, so that the neural net can be trained on sequences that are not similar to positive examples.
neg_sim, neg_rand = cache_cases(neg_str)
neg_sim_vec = str_to_vec(neg_sim)
neg_rand_vec = str_to_vec(neg_rand)
return pos_vec, neg_sim_vec, neg_rand_vec
def read_positives():
"reads in positive samples as strings"
seqs = []
file = '/Users/cjmathy/Documents/courses/bmi203/Final-Project/ann_bmi203/rap1-lieb-positives.txt'
with open(file, 'rb') as f:
for seq in f:
seqs.append(seq.strip())
return seqs
def read_negatives():
"reads in negative samples as strings"
seqs = []
file = '/Users/cjmathy/Documents/courses/bmi203/Final-Project/ann_bmi203/yeast-upstream-1k-negative.fa'
with open(file, 'rb') as f:
sequence = ''
for line in f:
if line[0] is not '>':
sequence += line.strip()
else:
if sequence:
seqs.append(sequence)
sequence = ""
return seqs
def str_to_vec(sequences):
"""converts nucleotide strings into vectors using a 2-bit encoding scheme."""
vecs = []
nuc2bit = {"A": (0, 0),
"C": (0, 1),
"T": (1, 0),
"G": (1, 1)}
for seq in sequences:
vec = []
for nuc in seq:
vec.append(nuc2bit[nuc][0])
vec.append(nuc2bit[nuc][1])
vecs.append(vec)
return vecs
def remove_falseneg(negatives, positives):
"""this method removes any negative fasta sequences that contain one of the positive sample sequences (essentially making them false negatives."""
seqs = []
for n in negatives:
if not any(p in n for p in positives):
seqs.append(n)
return seqs
def reverse_complement(sequences):
"""returns a list of reverse complemented sequences"""
rc = []
complement = {'A': 'T',
'C': 'G',
'G': 'C',
'T': 'A'}
for seq in sequences:
seq = list(seq)
seq = reversed([complement.get(nuc) for nuc in seq])
seq = ''.join(seq)
rc.append(seq)
return rc
def cache_cases(sequences):
"""this method separates the negative data into two sets: those that contain the Rap1 binding signature sequence, and a set that is randomly chosen from the negative data."""
# 1) cache negative cases that are similar to positives
sim_cache = []
for seq in sequences:
matches = re.findall(r'....CC...C.......', seq)
for match in matches:
sim_cache.append(match)
sim_cache = list(set(sim_cache))
# 2) cache randomly chosen 17 bp negatives. 5 from each fa sequence (including reverse complements). there are about 30000 neg_sim samples, so this will create about 30000 neg_rand samples from the 3000 sequences and their 3000 reverse complements.
bp = 17
rand_cache = []
for seq in sequences:
for _ in xrange(5):
i = random.randint(0, len(seq)-bp)
substr = seq[i:i+bp]
rand_cache.append(substr)
return sim_cache, rand_cache
def build_training_set(pos, neg_sim, neg_rand):
"""Builds a training set using 50% positive data, and 50% negative data. Negative data consists equally of similar-to-positve and random negative sequences"""
# we have 137 positive examples, 30000 special negative examples, and 30000 random negative examples, all 34 bits long. take 69 special negative examples and 68 random negative examples. add them to the positive examples to make our training set.
neg = []
for _ in xrange(69):
i = np.random.randint(0, len(neg_sim))
neg.append(neg_sim[i])
for _ in xrange(68):
i = np.random.randint(0, len(neg_rand))
neg.append(neg_rand[i])
Xp = np.array(pos)
Xn = np.array(neg)
X = np.concatenate((Xp, Xn), axis=0) # nd array, 274 x 34
yp = np.ones((Xp.shape[0],))
yn = np.zeros((Xn.shape[0],))
y = np.concatenate((yp, yn), axis=0) # nd array, 34 x 1
return X, y
def build_training_set_100(pos, neg_sim, neg_rand):
"""same as above, but allowing for some positive and negative samples to be held out as a test set"""
neg = []
for _ in xrange(50):
i = np.random.randint(0, len(neg_sim))
neg.append(neg_sim[i])
for _ in xrange(50):
i = np.random.randint(0, len(neg_rand))
neg.append(neg_rand[i])
Xp = np.array(pos)
Xn = np.array(neg)
X = np.concatenate((Xp, Xn), axis=0)
yp = np.ones((Xp.shape[0],))
yn = np.zeros((Xn.shape[0],))
y = np.concatenate((yp, yn), axis=0)
return X, y
def build_test_set(pos, neg_sim, neg_rand):
"""same as above, but allowing for some positive and negative samples to be held out as a test set"""
neg = []
for _ in xrange(19):
i = np.random.randint(0, len(neg_sim))
neg.append(neg_sim[i])
for _ in xrange(18):
i = np.random.randint(0, len(neg_rand))
neg.append(neg_rand[i])
Xp = np.array(pos)
Xn = np.array(neg)
X = np.concatenate((Xp, Xn), axis=0)
yp = np.ones((Xp.shape[0],))
yn = np.zeros((Xn.shape[0],))
y = np.concatenate((yp, yn), axis=0)
return X, y
| apache-2.0 |
m87/pyEM | stepwise.py | 1 | 3019 | from thirdparty import log_mvnpdf, log_mvnpdf_diag
import numpy as np
from online import *
from scipy.misc import logsumexp
from gaussEM import GaussEM
class Stepwise(OnlineEM):
def __init__(self, param):
super().__init__(param)
self.param = float(param['alpha'])
self.skip = int(param['skip'])
self.mbsize= int(param['mb'])
def prepare(self, dataset):
super().prepare(dataset)
class StepwiseGauss(Stepwise, GaussEM):
def __init__(self, param):
super().__init__(param)
self.cov = param['cov']
self.C = float(param['smoothing'])
self.mvnpdf = {'full': log_mvnpdf, 'diag': log_mvnpdf_diag}
def e(self, X):
lg = self.mvnpdf[self.cov](np.array([X]), self.means, self.COV[self.cov])
#s = np.inner((X - self.means),(X-self.means))
#print(s)
#print(self.means[0])
logResps = lg[0] + np.log(self.weights)
self.histAcc += logsumexp(logResps)
self.hist.append(-self.histAcc/self.N)
#self.hist.append(logsumexp(logResps))
maxLg = np.max(logResps)
logResps -= maxLg
self.resps = np.exp(logResps)
np.clip(self.resps, 10*EPS, np.inf, out=self.resps)
self.resps /= np.sum(self.resps)
self.N += 1
lam = np.power(self.N+2, -float(self.param))
for c in np.arange(self.n):
self.accResps[c]= (1-lam) * self.accResps[c] + lam * self.resps[c]
self.accMeans[c]= (1-lam)* self.accMeans[c] + lam * X * self.resps[c]
tmp = self.accMeans[c] / self.accResps[c]
diff = X - tmp
self.accCovars[c] = (1-lam) * self.accCovars[c] + lam * np.outer(self.resps[c] * diff, diff)
self.accResps /= np.sum(self.accResps)
def m(self, X):
if self.N < self.skip: return
if self.N % self.mbsize != 0:
return
for c in np.arange(self.n):
self.weights[c] = self.accResps[c] / (self.N+ 10*EPS ) + EPS
self.means[c] = (self.accMeans[c] + 10* EPS)/ (self.accResps[c] + 10 * EPS )
self.covars[c] = (self.accCovars[c] + 10* EPS * np.identity(self.dim))/ (self.accResps[c] + 10 * EPS ) * self.I[self.cov]
self.diagCovars[c] = np.diag(self.covars[c])
self.weights /= sum(self.weights)
def prepare(self,dataset):
super().prepare(dataset)
self.accResps = np.zeros((self.n,))
self.accMeans = np.zeros((self.n,self.dim))
self.accCovars = np.zeros((self.n,self.dim,self.dim))
self.weights = np.ones((self.n,))
self.weights /= self.n
self.means = np.zeros((self.n,self.dim))
for it,x in enumerate(dataset.I):
self.means[it] = x
self.covars = np.array([np.identity(self.dim) for x in range(self.n)])
self.diagCovars = np.ones((self.n,self.dim))
self.COV = {'full' : self.covars, 'diag' : self.diagCovars}
self.I ={'full': 1.0, 'diag': np.identity(self.dim)}
| mit |
MattsFleaMarket/python-for-android | python3-alpha/python3-src/Lib/lib2to3/fixes/fix_types.py | 164 | 1797 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for removing uses of the types module.
These work for only the known names in the types module. The forms above
can include types. or not. ie, It is assumed the module is imported either as:
import types
from types import ... # either * or specific types
The import statements are not modified.
There should be another fixer that handles at least the following constants:
type([]) -> list
type(()) -> tuple
type('') -> str
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name
_TYPE_MAPPING = {
'BooleanType' : 'bool',
'BufferType' : 'memoryview',
'ClassType' : 'type',
'ComplexType' : 'complex',
'DictType': 'dict',
'DictionaryType' : 'dict',
'EllipsisType' : 'type(Ellipsis)',
#'FileType' : 'io.IOBase',
'FloatType': 'float',
'IntType': 'int',
'ListType': 'list',
'LongType': 'int',
'ObjectType' : 'object',
'NoneType': 'type(None)',
'NotImplementedType' : 'type(NotImplemented)',
'SliceType' : 'slice',
'StringType': 'bytes', # XXX ?
'StringTypes' : 'str', # XXX ?
'TupleType': 'tuple',
'TypeType' : 'type',
'UnicodeType': 'str',
'XRangeType' : 'range',
}
_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING]
class FixTypes(fixer_base.BaseFix):
BM_compatible = True
PATTERN = '|'.join(_pats)
def transform(self, node, results):
new_value = _TYPE_MAPPING.get(results["name"].value)
if new_value:
return Name(new_value, prefix=node.prefix)
return None
| apache-2.0 |
tawsifkhan/scikit-learn | sklearn/pipeline.py | 162 | 21103 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
razvanphp/arangodb | 3rdParty/V8-3.31.74.1/build/gyp/test/variables/filelist/gyptest-filelist-golden.py | 228 | 1584 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<|(list.txt ...)' syntax commands.
"""
import os
import sys
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('filelist.gyp.stdout')
if sys.platform == 'win32':
expect = expect.replace('/', r'\\').replace('\r\n', '\n')
test.run_gyp('src/filelist.gyp',
'--debug', 'variables',
stdout=expect, ignore_line_numbers=True)
# Verify the filelist.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('src/filelist.gypd').replace(
'\r', '').replace('\\\\', '/')
expect = test.read('filelist.gypd.golden').replace('\r', '')
if not test.match(contents, expect):
print "Unexpected contents of `src/filelist.gypd'"
test.diff(expect, contents, 'src/filelist.gypd ')
test.fail_test()
contents = test.read('src/names.txt')
expect = 'John\nJacob\nJingleheimer\nSchmidt\n'
if not test.match(contents, expect):
print "Unexpected contents of `src/names.txt'"
test.diff(expect, contents, 'src/names.txt ')
test.fail_test()
test.pass_test()
| apache-2.0 |
ramadhane/odoo | addons/portal_project/project.py | 285 | 1809 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-TODAY OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class portal_project(osv.Model):
""" Update of mail_mail class, to add the signin URL to notifications. """
_inherit = 'project.project'
def _get_visibility_selection(self, cr, uid, context=None):
""" Override to add portal option. """
selection = super(portal_project, self)._get_visibility_selection(cr, uid, context=context)
idx = [item[0] for item in selection].index('public')
selection.insert((idx + 1), ('portal', _('Customer related project: visible through portal')))
return selection
# return [('public', 'All Users'),
# ('portal', 'Portal Users and Employees'),
# ('employees', 'Employees Only'),
# ('followers', 'Followers Only')]
| agpl-3.0 |
bguillot/OpenUpgrade | addons/hr_payroll/report/report_payslip.py | 377 | 1982 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.report import report_sxw
class payslip_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(payslip_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_payslip_lines': self.get_payslip_lines,
})
def get_payslip_lines(self, obj):
payslip_line = self.pool.get('hr.payslip.line')
res = []
ids = []
for id in range(len(obj)):
if obj[id].appears_on_payslip is True:
ids.append(obj[id].id)
if ids:
res = payslip_line.browse(self.cr, self.uid, ids)
return res
class wrapped_report_payslip(osv.AbstractModel):
_name = 'report.hr_payroll.report_payslip'
_inherit = 'report.abstract_report'
_template = 'hr_payroll.report_payslip'
_wrapped_report_class = payslip_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
orestkreminskyi/taf | unittests/test_staticcross.py | 2 | 2599 | # Copyright (c) 2011 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``test_staticcross.py``
`Unittests for static cross functions`
"""
import pytest
from testlib.dev_staticcross_ons import StaticCrossONS
# setup config
SETUP = {"env": [{"id": 0, "ports": [[1, 5, 11], [1, 5, 12], [1, 5, 13], [1, 5, 14], [1, 1, 16]]},
{"id": 412},
{"id": "31", "related_id": [0, 412]},
{"id": 201}],
"cross": {"31": [[0, 1, 412, 1], [0, 2, 412, 2], [0, 3, 412, 3], [0, 4, 412, 4], [0, 5, 412, 5]]}}
# config of environment
ENV = {"name": "Zero Cross", "entry_type": "cross", "instance_type": "static_ons", "id": "31"}
# fake class for options
class FakeOpts(object):
# fake json file
setup = "setup.json"
# fake json file
env = ""
get_only = False
build_path = ''
class FakeDev1(object):
def __init__(self):
self.ports = ["eth0", "eth2"]
def connect_port(self, port):
return True
def disconnect_port(self, port):
return True
class FakeDev2(object):
def __init__(self):
self.ports = ["eth3", "eth4"]
def connect_port(self, port):
return True
def disconnect_port(self, port):
return True
@pytest.fixture()
def cross(request, monkeypatch):
"""
"""
# first method for monkeypatching
def _setup(self, x):
return SETUP
# second method for monkeypatching
def _conf(self, x):
return ENV
# define environment with fake class
cross = StaticCrossONS(ENV, FakeOpts())
cross.connections = SETUP['cross']['31']
cross.related_conf = {0: {}, 412: {}}
cross.related_obj = {0: FakeDev1(), 412: FakeDev2()}
return cross
def test_staticcross_get_device(cross):
"""
"""
result = cross._get_device(412)
assert isinstance(result, object)
def test_staticcross_connect(cross):
"""
"""
cross.xconnect(conn=SETUP["cross"]["31"][0])
def test_staticcross_disconnect(cross):
"""
"""
cross.xdisconnect(conn=SETUP["cross"]["31"][0])
| apache-2.0 |
vane/pywinauto | pywinauto/timings.py | 15 | 12142 | # GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"""Timing settings for all of pywinauto
This module has one object that should be used for all timing adjustments
timings.Timings
There are a couple of predefined settings
timings.Timings.Fast()
timings.Timings.Defaults()
timings.Timings.Slow()
The Following are the individual timing settings that can be adjusted:
* window_find_timeout (default 3)
* window_find_retry (default .09)
* app_start_timeout (default 10)
* app_start_retry (default .90)
* exists_timeout (default .5)
* exists_retry (default .3)
* after_click_wait (default .09)
* after_clickinput_wait (default .01)
* after_menu_wait (default .05)
* after_sendkeys_key_wait (default .01)
* after_button_click_wait (default 0)
* before_closeclick_wait (default .1)
* closeclick_retry (default .05)
* closeclick_dialog_close_wait (default .05)
* after_closeclick_wait (default .2)
* after_windowclose_timeout (default 2)
* after_windowclose_retry (default .5)
* after_setfocus_wait (default .06)
* after_setcursorpos_wait (default .01)
* sendmessagetimeout_timeout (default .001)
* after_tabselect_wait (default .01)
* after_listviewselect_wait (default .01)
* after_listviewcheck_wait default(.001)
* after_treeviewselect_wait default(.001)
* after_toobarpressbutton_wait default(.01)
* after_updownchange_wait default(.001)
* after_movewindow_wait default(0)
* after_buttoncheck_wait default(0)
* after_comboselect_wait default(0)
* after_listboxselect_wait default(0)
* after_listboxfocuschange_wait default(0)
* after_editsetedittext_wait default(0)
* after_editselect_wait default(0)
"""
import time
import operator
__revision__ = "$Revision: 453 $"
#=========================================================================
class TimeConfig(object):
"Central storage and minipulation of timing values"
__default_timing = {
'window_find_timeout' : 3,
'window_find_retry' : .09,
'app_start_timeout' : 10,
'app_start_retry' : .90,
'exists_timeout' : .5,
'exists_retry' : .3,
'after_click_wait' : .09,
'after_clickinput_wait' : .01,
'after_menu_wait' : .05,
'after_sendkeys_key_wait' : .01,
'after_button_click_wait' : 0,
'before_closeclick_wait' : .1,
'closeclick_retry' : .05,
'closeclick_dialog_close_wait' : .05,
'after_closeclick_wait' : .2,
'after_windowclose_timeout': 2,
'after_windowclose_retry': .5,
'after_setfocus_wait' : .06,
'after_setcursorpos_wait' : .01,
'sendmessagetimeout_timeout' : .001,
'after_tabselect_wait': .01,
'after_listviewselect_wait': .01,
'after_listviewcheck_wait': .001,
'after_treeviewselect_wait': .001,
'after_toobarpressbutton_wait': .01,
'after_updownchange_wait': .001,
'after_movewindow_wait': 0,
'after_buttoncheck_wait': 0,
'after_comboboxselect_wait': 0,
'after_listboxselect_wait': 0,
'after_listboxfocuschange_wait': 0,
'after_editsetedittext_wait': 0,
'after_editselect_wait': 0,
}
_timings = __default_timing.copy()
_cur_speed = 1
def __getattr__(self, attr):
"Get the value for a particular timing"
if attr in TimeConfig.__default_timing:
return TimeConfig._timings[attr]
else:
raise KeyError(
"Unknown timing setting: %s" % attr)
def __setattr__(self, attr, value):
"Set a particular timing"
if attr in TimeConfig.__default_timing:
TimeConfig._timings[attr] = value
else:
raise KeyError(
"Unknown timing setting: %s" % attr)
def Fast(self):
"""Set fast timing values
Currently this changes the timing in the following ways:
timeouts = 1 second
waits = 0 seconds
retries = .001 seconds (minimum!)
(if existing times are faster then keep existing times)
"""
for setting in TimeConfig.__default_timing:
# set timeouts to the min of the current speed or 1 second
if "_timeout" in setting:
TimeConfig._timings[setting] = \
min(1, TimeConfig._timings[setting])
if "_wait" in setting:
TimeConfig._timings[setting] = TimeConfig._timings[setting] / 2
elif setting.endswith("_retry"):
TimeConfig._timings[setting] = 0.001
#self._timings['app_start_timeout'] = .5
def Slow(self):
"""Set slow timing values
Currently this changes the timing in the following ways:
timeouts = default timeouts * 10
waits = default waits * 3
retries = default retries * 3
(if existing times are slower then keep existing times)
"""
for setting in TimeConfig.__default_timing:
if "_timeout" in setting:
TimeConfig._timings[setting] = max(
TimeConfig.__default_timing[setting] * 10,
TimeConfig._timings[setting])
if "_wait" in setting:
TimeConfig._timings[setting] = max(
TimeConfig.__default_timing[setting] * 3,
TimeConfig._timings[setting])
elif setting.endswith("_retry"):
TimeConfig._timings[setting] = max(
TimeConfig.__default_timing[setting] * 3,
TimeConfig._timings[setting])
if TimeConfig._timings[setting] < .2:
TimeConfig._timings[setting]= .2
def Defaults(self):
"Set all timings to the default time"
TimeConfig._timings = TimeConfig.__default_timing.copy()
Timings = TimeConfig()
#=========================================================================
class TimeoutError(RuntimeError):
pass
#=========================================================================
def WaitUntil(
timeout,
retry_interval,
func,
value = True,
op = operator.eq,
*args):
"""Wait until ``op(function(*args), value)`` is True or until timeout
expires
* **timeout** how long the function will try the function
* **retry_interval** how long to wait between retries
* **func** the function that will be executed
* **value** the value to be compared against (defaults to True)
* **op** the comparison function (defaults to equality)\
* **args** optional arguments to be passed to func when called
Returns the return value of the function
If the operation times out then the return value of the the function
is in the 'function_value' attribute of the raised exception.
e.g. ::
try:
# wait a maximum of 10.5 seconds for the
# the objects ItemCount() method to return 10
# in increments of .5 of a second
WaitUntil(10.5, .5, self.ItemCount, 10)
except TimeoutError, e:
print "timed out"
"""
start = time.time()
func_val = func(*args)
# while the function hasn't returned what we are waiting for
while not op(func_val, value):
# find out how much of the time is left
time_left = timeout - ( time.time() - start)
# if we have to wait some more
if time_left > 0:
# wait either the retry_interval or else the amount of
# time until the timeout expires (whichever is less)
time.sleep(min(retry_interval, time_left))
func_val = func(*args)
else:
err = TimeoutError("timed out")
err.function_value = func_val
raise err
return func_val
#def WaitUntilNot(timeout, retry_interval, func, value = True)
# return WaitUntil(timeout, retry_interval, func, value = True)
def WaitUntilPasses(
timeout,
retry_interval,
func,
exceptions = (Exception),
*args):
"""Wait until ``func(*args)`` does not raise one of the exceptions in
exceptions
* **timeout** how long the function will try the function
* **retry_interval** how long to wait between retries
* **func** the function that will be executed
* **exceptions** list of exceptions to test against (default: Exception)
* **args** optional arguments to be passed to func when called
Returns the return value of the function
If the operation times out then the original exception raised is in
the 'original_exception' attribute of the raised exception.
e.g. ::
try:
# wait a maximum of 10.5 seconds for the
# window to be found in increments of .5 of a second.
# P.int a message and re-raise the original exception if never found.
WaitUntilPasses(10.5, .5, self.Exists, (WindowNotFoundError))
except TimeoutError, e:
print "timed out"
raise e.
"""
start = time.time()
waited = 0
# keep trying until the timeout is passed
while True:
try:
# Call the function with any arguments
func_val = func(*args)
# if this did not raise an exception -then we are finised
break
# An exception was raised - so wait and try again
except exceptions, e:
# find out how much of the time is left
time_left = timeout - ( time.time() - start)
# if we have to wait some more
if time_left > 0:
# wait either the retry_interval or else the amount of
# time until the timeout expires (whichever is less)
time.sleep(min(retry_interval, time_left))
else:
# Raise a TimeoutError - and put the original exception
# inside it
err = TimeoutError()
err.original_exception = e
raise err
# return the function value
return func_val
#
#
#
#def Defaults():
# _current_timing = __default_timing.copy()
#
#
#def Slow():
# for setting in __default_timing:
# if "_timeout" in setting:
# _current_timing[setting] = _default_timing[setting] * 10
#
# if "_wait" in setting:
# _current_timing[setting] = _default_timing[setting] * 3
#
# elif setting.endswith("_retry"):
# _current_timing[setting] = _default_timing[setting] * 3
#
#
#
#def SetTiming(**kwargs):
# ""
#
# for setting, time in kwargs.items():
# if setting in __default_timing:
# _current_timing[setting] = time
# else:
# raise KeyError(
# "Unknown timing setting: %s" % setting)
#
#def Get(setting):
# if setting in __default_timing:
# return _current_timing[setting]
# else:
# raise KeyError(
# "Unknown timing setting: %s" % setting)
| lgpl-2.1 |
szhem/spark | python/docs/conf.py | 98 | 10607 | # -*- coding: utf-8 -*-
#
# pyspark documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 28 15:17:47 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'epytext',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PySpark'
copyright = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'master'
# The full version, including alpha/beta/rc tags.
release = os.environ.get('RELEASE_VERSION', version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for autodoc --------------------------------------------------
# Look at the first line of the docstring for function and method signatures.
autodoc_docstring_signature = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../docs/img/spark-logo-hd.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysparkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyspark.tex', u'pyspark Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyspark', u'pyspark Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyspark', u'pyspark Documentation',
u'Author', 'pyspark', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'pyspark'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2014, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'pyspark'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Skip sample endpoint link (not expected to resolve)
linkcheck_ignore = [r'https://kinesis.us-east-1.amazonaws.com']
| apache-2.0 |
mm1ke/portage | pym/portage/package/ebuild/_config/LicenseManager.py | 7 | 7408 | # Copyright 2010-2015 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = (
'LicenseManager',
)
from portage import os
from portage.dep import ExtendedAtomDict, use_reduce
from portage.exception import InvalidDependString
from portage.localization import _
from portage.util import grabdict, grabdict_package, writemsg
from portage.versions import cpv_getkey, _pkg_str
from portage.package.ebuild._config.helper import ordered_by_atom_specificity
class LicenseManager(object):
def __init__(self, license_group_locations, abs_user_config, user_config=True):
self._accept_license_str = None
self._accept_license = None
self._license_groups = {}
self._plicensedict = ExtendedAtomDict(dict)
self._undef_lic_groups = set()
if user_config:
license_group_locations = list(license_group_locations) + [abs_user_config]
self._read_license_groups(license_group_locations)
if user_config:
self._read_user_config(abs_user_config)
def _read_user_config(self, abs_user_config):
licdict = grabdict_package(os.path.join(
abs_user_config, "package.license"), recursive=1, allow_wildcard=True, allow_repo=True, verify_eapi=False)
for k, v in licdict.items():
self._plicensedict.setdefault(k.cp, {})[k] = \
self.expandLicenseTokens(v)
def _read_license_groups(self, locations):
for loc in locations:
for k, v in grabdict(
os.path.join(loc, "license_groups")).items():
self._license_groups.setdefault(k, []).extend(v)
for k, v in self._license_groups.items():
self._license_groups[k] = frozenset(v)
def extract_global_changes(self, old=""):
ret = old
atom_license_map = self._plicensedict.get("*/*")
if atom_license_map is not None:
v = atom_license_map.pop("*/*", None)
if v is not None:
ret = " ".join(v)
if old:
ret = old + " " + ret
if not atom_license_map:
#No tokens left in atom_license_map, remove it.
del self._plicensedict["*/*"]
return ret
def expandLicenseTokens(self, tokens):
""" Take a token from ACCEPT_LICENSE or package.license and expand it
if it's a group token (indicated by @) or just return it if it's not a
group. If a group is negated then negate all group elements."""
expanded_tokens = []
for x in tokens:
expanded_tokens.extend(self._expandLicenseToken(x, None))
return expanded_tokens
def _expandLicenseToken(self, token, traversed_groups):
negate = False
rValue = []
if token.startswith("-"):
negate = True
license_name = token[1:]
else:
license_name = token
if not license_name.startswith("@"):
rValue.append(token)
return rValue
group_name = license_name[1:]
if traversed_groups is None:
traversed_groups = set()
license_group = self._license_groups.get(group_name)
if group_name in traversed_groups:
writemsg(_("Circular license group reference"
" detected in '%s'\n") % group_name, noiselevel=-1)
rValue.append("@"+group_name)
elif license_group:
traversed_groups.add(group_name)
for l in license_group:
if l.startswith("-"):
writemsg(_("Skipping invalid element %s"
" in license group '%s'\n") % (l, group_name),
noiselevel=-1)
else:
rValue.extend(self._expandLicenseToken(l, traversed_groups))
else:
if self._license_groups and \
group_name not in self._undef_lic_groups:
self._undef_lic_groups.add(group_name)
writemsg(_("Undefined license group '%s'\n") % group_name,
noiselevel=-1)
rValue.append("@"+group_name)
if negate:
rValue = ["-" + token for token in rValue]
return rValue
def _getPkgAcceptLicense(self, cpv, slot, repo):
"""
Get an ACCEPT_LICENSE list, accounting for package.license.
"""
accept_license = self._accept_license
cp = cpv_getkey(cpv)
cpdict = self._plicensedict.get(cp)
if cpdict:
if not hasattr(cpv, "slot"):
cpv = _pkg_str(cpv, slot=slot, repo=repo)
plicence_list = ordered_by_atom_specificity(cpdict, cpv)
if plicence_list:
accept_license = list(self._accept_license)
for x in plicence_list:
accept_license.extend(x)
return accept_license
def get_prunned_accept_license(self, cpv, use, lic, slot, repo):
"""
Generate a pruned version of ACCEPT_LICENSE, by intersection with
LICENSE. This is required since otherwise ACCEPT_LICENSE might be
too big (bigger than ARG_MAX), causing execve() calls to fail with
E2BIG errors as in bug #262647.
"""
try:
licenses = set(use_reduce(lic, uselist=use, flat=True))
except InvalidDependString:
licenses = set()
licenses.discard('||')
accept_license = self._getPkgAcceptLicense(cpv, slot, repo)
if accept_license:
acceptable_licenses = set()
for x in accept_license:
if x == '*':
acceptable_licenses.update(licenses)
elif x == '-*':
acceptable_licenses.clear()
elif x[:1] == '-':
acceptable_licenses.discard(x[1:])
elif x in licenses:
acceptable_licenses.add(x)
licenses = acceptable_licenses
return ' '.join(sorted(licenses))
def getMissingLicenses(self, cpv, use, lic, slot, repo):
"""
Take a LICENSE string and return a list of any licenses that the user
may need to accept for the given package. The returned list will not
contain any licenses that have already been accepted. This method
can throw an InvalidDependString exception.
@param cpv: The package name (for package.license support)
@type cpv: String
@param use: "USE" from the cpv's metadata
@type use: String
@param lic: "LICENSE" from the cpv's metadata
@type lic: String
@param slot: "SLOT" from the cpv's metadata
@type slot: String
@rtype: List
@return: A list of licenses that have not been accepted.
"""
licenses = set(use_reduce(lic, matchall=1, flat=True))
licenses.discard('||')
acceptable_licenses = set()
for x in self._getPkgAcceptLicense(cpv, slot, repo):
if x == '*':
acceptable_licenses.update(licenses)
elif x == '-*':
acceptable_licenses.clear()
elif x[:1] == '-':
acceptable_licenses.discard(x[1:])
else:
acceptable_licenses.add(x)
license_str = lic
if "?" in license_str:
use = use.split()
else:
use = []
license_struct = use_reduce(license_str, uselist=use, opconvert=True)
return self._getMaskedLicenses(license_struct, acceptable_licenses)
def _getMaskedLicenses(self, license_struct, acceptable_licenses):
if not license_struct:
return []
if license_struct[0] == "||":
ret = []
for element in license_struct[1:]:
if isinstance(element, list):
if element:
tmp = self._getMaskedLicenses(element, acceptable_licenses)
if not tmp:
return []
ret.extend(tmp)
else:
if element in acceptable_licenses:
return []
ret.append(element)
# Return all masked licenses, since we don't know which combination
# (if any) the user will decide to unmask.
return ret
ret = []
for element in license_struct:
if isinstance(element, list):
if element:
ret.extend(self._getMaskedLicenses(element,
acceptable_licenses))
else:
if element not in acceptable_licenses:
ret.append(element)
return ret
def set_accept_license_str(self, accept_license_str):
if accept_license_str != self._accept_license_str:
self._accept_license_str = accept_license_str
self._accept_license = tuple(self.expandLicenseTokens(accept_license_str.split()))
| gpl-2.0 |
katyushacccp/ISN_projet_final | alpha 1.4/modules/recherche.py | 3 | 2873 | # Fonction très utile. Tu lui envoie le numéro d'une face et elle te renvoie une liste sous la forme :
# [faceAuDessus[numéroFaceDessus,coin adjacent à la face principal en partant de la gauche,arrete adjacente,coin adjacent droit],
# numéroFaceDroite[...],numéroFaceGauche[...],nméroFaceOppose]
def posRel(face):
if face==0:
return [[4,0,3,6],[1,0,3,6],[5,6,3,0],[3,2,5,8],2]
elif face==1:
return [[4,6,7,8],[2,0,3,6],[5,0,1,2],[0,2,5,8],3]
elif face==2:
return [[4,8,5,2],[3,0,3,6],[5,2,5,8],[1,2,5,8],0]
elif face==3:
return [[4,2,1,0],[0,0,3,6],[5,8,7,6],[2,2,5,8],1]
elif face==4:
return [[3,2,1,0],[2,2,1,0],[1,0,1,2],[0,0,1,2],5]
elif face==5:
return [[1,6,7,8],[2,6,7,8],[3,8,7,6],[0,8,7,6],4]
def boussole(sensChiffre):
if sensChiffre<0:
while sensChiffre<0:
sensChiffre+=4
elif sensChiffre>3:
while sensChiffre>3:
sensChiffre-=4
return sensChiffre
def reconnaissanceDirection(sensChiffre):
sensChiffre=boussole(sensChiffre)
if sensChiffre==0:
return "haut"
elif sensChiffre==1:
return "droite"
elif sensChiffre==2:
return "bas"
elif sensChiffre==3:
return "gauche"
def deReconnaissanceDirection(sens):
if sens=="haut":
return 0
elif sens=="droite":
return 1
elif sens=="bas":
return 2
elif sens=="gauche":
return 3
def arreteOppose(indice):
if indice==1:
return 7
elif indice==5:
return 3
elif indice==7:
return 1
elif indice==3:
return 5
def findCoin(cube,can,coul,coul2,coul3):
for face in range(6):
if coul is cube[face][0]:
possible=[ cube[posRel(face)[0][0]][posRel(face)[0][1]], cube[posRel(face)[3][0]][posRel(face)[3][1]] ]
if coul2 in possible and coul3 in possible:
return [face,0]
if coul is cube[face][2]:
possible=[cube[posRel(face)[0][0]][posRel(face)[0][3]], cube[posRel(face)[1][0]][posRel(face)[1][1]]]
if coul2 in possible and coul3 in possible:
return [face,2]
if coul is cube[face][8]:
possible=[cube[posRel(face)[2][0]][posRel(face)[2][3]], cube[posRel(face)[1][0]][posRel(face)[1][3]]]
if coul2 in possible and coul3 in possible:
return [face,8]
if coul is cube[face][6]:
possible=[cube[posRel(face)[2][0]][posRel(face)[2][1]], cube[posRel(face)[3][0]][posRel(face)[3][3]]]
if coul2 in possible and coul3 in possible:
return [face,6]
def findArrete(cube,can,coul,coul2):
for face in range(6):
if coul is cube[face][1] and coul2 is cube[posRel(face)[0][0]] [posRel(face)[0][2]]:
return [face,1]
if coul is cube[face][5] and coul2 is cube[posRel(face)[1][0]][posRel(face)[1][2]]:
return [face,5]
if coul is cube[face][7] and coul2 is cube[posRel(face)[2][0]][posRel(face)[2][2]]:
return [face,7]
if coul is cube[face][3] and coul2 is cube[posRel(face)[3][0]][posRel(face)[3][2]]:
return [face,3] | cc0-1.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Source/Python/Lib/python27/plat-irix5/panel.py | 132 | 7862 | # Module 'panel'
#
# Support for the Panel library.
# Uses built-in module 'pnl'.
# Applications should use 'panel.function' instead of 'pnl.function';
# most 'pnl' functions are transparently exported by 'panel',
# but dopanel() is overridden and you have to use this version
# if you want to use callbacks.
from warnings import warnpy3k
warnpy3k("the panel module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import pnl
debug = 0
# Test if an object is a list.
#
def is_list(x):
return type(x) == type([])
# Reverse a list.
#
def reverse(list):
res = []
for item in list:
res.insert(0, item)
return res
# Get an attribute of a list, which may itself be another list.
# Don't use 'prop' for name.
#
def getattrlist(list, name):
for item in list:
if item and is_list(item) and item[0] == name:
return item[1:]
return []
# Get a property of a list, which may itself be another list.
#
def getproplist(list, name):
for item in list:
if item and is_list(item) and item[0] == 'prop':
if len(item) > 1 and item[1] == name:
return item[2:]
return []
# Test if an actuator description contains the property 'end-of-group'
#
def is_endgroup(list):
x = getproplist(list, 'end-of-group')
return (x and x[0] == '#t')
# Neatly display an actuator definition given as S-expression
# the prefix string is printed before each line.
#
def show_actuator(prefix, a):
for item in a:
if not is_list(item):
print prefix, item
elif item and item[0] == 'al':
print prefix, 'Subactuator list:'
for a in item[1:]:
show_actuator(prefix + ' ', a)
elif len(item) == 2:
print prefix, item[0], '=>', item[1]
elif len(item) == 3 and item[0] == 'prop':
print prefix, 'Prop', item[1], '=>',
print item[2]
else:
print prefix, '?', item
# Neatly display a panel.
#
def show_panel(prefix, p):
for item in p:
if not is_list(item):
print prefix, item
elif item and item[0] == 'al':
print prefix, 'Actuator list:'
for a in item[1:]:
show_actuator(prefix + ' ', a)
elif len(item) == 2:
print prefix, item[0], '=>', item[1]
elif len(item) == 3 and item[0] == 'prop':
print prefix, 'Prop', item[1], '=>',
print item[2]
else:
print prefix, '?', item
# Exception raised by build_actuator or build_panel.
#
panel_error = 'panel error'
# Dummy callback used to initialize the callbacks.
#
def dummy_callback(arg):
pass
# Assign attributes to members of the target.
# Attribute names in exclist are ignored.
# The member name is the attribute name prefixed with the prefix.
#
def assign_members(target, attrlist, exclist, prefix):
for item in attrlist:
if is_list(item) and len(item) == 2 and item[0] not in exclist:
name, value = item[0], item[1]
ok = 1
if value[0] in '-0123456789':
value = eval(value)
elif value[0] == '"':
value = value[1:-1]
elif value == 'move-then-resize':
# Strange default set by Panel Editor...
ok = 0
else:
print 'unknown value', value, 'for', name
ok = 0
if ok:
lhs = 'target.' + prefix + name
stmt = lhs + '=' + repr(value)
if debug: print 'exec', stmt
try:
exec stmt + '\n'
except KeyboardInterrupt: # Don't catch this!
raise KeyboardInterrupt
except:
print 'assign failed:', stmt
# Build a real actuator from an actuator description.
# Return a pair (actuator, name).
#
def build_actuator(descr):
namelist = getattrlist(descr, 'name')
if namelist:
# Assume it is a string
actuatorname = namelist[0][1:-1]
else:
actuatorname = ''
type = descr[0]
if type[:4] == 'pnl_': type = type[4:]
act = pnl.mkact(type)
act.downfunc = act.activefunc = act.upfunc = dummy_callback
#
assign_members(act, descr[1:], ['al', 'data', 'name'], '')
#
# Treat actuator-specific data
#
datalist = getattrlist(descr, 'data')
prefix = ''
if type[-4:] == 'puck':
prefix = 'puck_'
elif type == 'mouse':
prefix = 'mouse_'
assign_members(act, datalist, [], prefix)
#
return act, actuatorname
# Build all sub-actuators and add them to the super-actuator.
# The super-actuator must already have been added to the panel.
# Sub-actuators with defined names are added as members to the panel
# so they can be referenced as p.name.
#
# Note: I have no idea how panel.endgroup() works when applied
# to a sub-actuator.
#
def build_subactuators(panel, super_act, al):
#
# This is nearly the same loop as below in build_panel(),
# except a call is made to addsubact() instead of addact().
#
for a in al:
act, name = build_actuator(a)
act.addsubact(super_act)
if name:
stmt = 'panel.' + name + ' = act'
if debug: print 'exec', stmt
exec stmt + '\n'
if is_endgroup(a):
panel.endgroup()
sub_al = getattrlist(a, 'al')
if sub_al:
build_subactuators(panel, act, sub_al)
#
# Fix the actuator to which whe just added subactuators.
# This can't hurt (I hope) and is needed for the scroll actuator.
#
super_act.fixact()
# Build a real panel from a panel definition.
# Return a panel object p, where for each named actuator a, p.name is a
# reference to a.
#
def build_panel(descr):
#
# Sanity check
#
if (not descr) or descr[0] != 'panel':
raise panel_error, 'panel description must start with "panel"'
#
if debug: show_panel('', descr)
#
# Create an empty panel
#
panel = pnl.mkpanel()
#
# Assign panel attributes
#
assign_members(panel, descr[1:], ['al'], '')
#
# Look for actuator list
#
al = getattrlist(descr, 'al')
#
# The order in which actuators are created is important
# because of the endgroup() operator.
# Unfortunately the Panel Editor outputs the actuator list
# in reverse order, so we reverse it here.
#
al = reverse(al)
#
for a in al:
act, name = build_actuator(a)
act.addact(panel)
if name:
stmt = 'panel.' + name + ' = act'
exec stmt + '\n'
if is_endgroup(a):
panel.endgroup()
sub_al = getattrlist(a, 'al')
if sub_al:
build_subactuators(panel, act, sub_al)
#
return panel
# Wrapper around pnl.dopanel() which calls call-back functions.
#
def my_dopanel():
# Extract only the first 4 elements to allow for future expansion
a, down, active, up = pnl.dopanel()[:4]
if down:
down.downfunc(down)
if active:
active.activefunc(active)
if up:
up.upfunc(up)
return a
# Create one or more panels from a description file (S-expressions)
# generated by the Panel Editor.
#
def defpanellist(file):
import panelparser
descrlist = panelparser.parse_file(open(file, 'r'))
panellist = []
for descr in descrlist:
panellist.append(build_panel(descr))
return panellist
# Import everything from built-in method pnl, so the user can always
# use panel.foo() instead of pnl.foo().
# This gives *no* performance penalty once this module is imported.
#
from pnl import * # for export
dopanel = my_dopanel # override pnl.dopanel
| mit |
Scemoon/lpts | site-packages/pychart/afm/AvantGarde_DemiOblique.py | 12 | 1506 | # AFM font AvantGarde-DemiOblique (path: /usr/share/fonts/afms/adobe/pagdo8a.afm).
# Derived from Ghostscript distribution.
# Go to www.cs.wisc.edu/~ghost to get the Ghostcript source code.
import dir
dir.afm["AvantGarde-DemiOblique"] = (500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 280, 280, 360, 560, 560, 860, 680, 280, 380, 380, 440, 600, 280, 420, 280, 460, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 280, 280, 600, 600, 600, 560, 740, 740, 580, 780, 700, 520, 480, 840, 680, 280, 480, 620, 440, 900, 740, 840, 560, 840, 580, 520, 420, 640, 700, 900, 680, 620, 500, 320, 640, 320, 600, 500, 280, 660, 660, 640, 660, 640, 280, 660, 600, 240, 260, 580, 240, 940, 600, 640, 660, 660, 320, 440, 300, 600, 560, 800, 560, 580, 460, 340, 600, 340, 600, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 280, 560, 560, 160, 560, 560, 560, 560, 220, 480, 460, 240, 240, 520, 520, 500, 500, 560, 560, 280, 500, 600, 600, 280, 480, 480, 460, 1000, 1280, 500, 560, 500, 420, 420, 540, 480, 420, 480, 280, 500, 500, 360, 340, 500, 700, 340, 540, 1000, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 900, 500, 360, 500, 500, 500, 500, 480, 840, 1060, 360, 500, 500, 500, 500, 500, 1080, 500, 500, 500, 240, 500, 500, 320, 660, 1080, 600, )
| gpl-2.0 |
joariasl/odoo | addons/survey_crm/__openerp__.py | 312 | 1593 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Survey CRM',
'version': '2.0',
'category': 'Marketing',
'complexity': 'easy',
'website': 'https://www.odoo.com/page/survey',
'description': """
Survey - CRM (bridge module)
=================================================================================
This module adds a Survey mass mailing button inside the more option of lead/customers views
""",
'author': 'OpenERP SA',
'depends': ['crm', 'survey'],
'data': [
'crm_view.xml',
],
'installable': True,
'auto_install': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MichaeLeroy/Escher.jl | assets/bower_components/katex/metrics/extract_tfms.py | 20 | 2698 | #!/usr/bin/env python
import collections
import json
import parse_tfm
import subprocess
import sys
def find_font_path(font_name):
try:
font_path = subprocess.check_output(['kpsewhich', font_name])
except OSError:
raise RuntimeError("Couldn't find kpsewhich program, make sure you" +
" have TeX installed")
except subprocess.CalledProcessError:
raise RuntimeError("Couldn't find font metrics: '%s'" % font_name)
return font_path.strip()
def main():
mapping = json.load(sys.stdin)
fonts = [
'cmbsy10.tfm',
'cmbx10.tfm',
'cmex10.tfm',
'cmmi10.tfm',
'cmmib10.tfm',
'cmr10.tfm',
'cmsy10.tfm',
'cmti10.tfm',
'msam10.tfm',
'msbm10.tfm'
]
# Extracted by running `\font\a=<font>` and then `\showthe\skewchar\a` in
# TeX, where `<font>` is the name of the font listed here. The skewchar
# will be printed out in the output. If it outputs `-1`, that means there
# is no skewchar, so we use `None` here.
font_skewchar = {
'cmbsy10': None,
'cmbx10': None,
'cmex10': None,
'cmmi10': 127,
'cmmib10': None,
'cmr10': None,
'cmsy10': 48,
'cmti10': None,
'msam10': None,
'msbm10': None
}
font_name_to_tfm = {}
for font_name in fonts:
font_basename = font_name.split('.')[0]
font_path = find_font_path(font_name)
font_name_to_tfm[font_basename] = parse_tfm.read_tfm_file(font_path)
families = collections.defaultdict(dict)
for family, chars in mapping.iteritems():
for char, char_data in chars.iteritems():
char_num = int(char)
font = char_data['font']
tex_char_num = int(char_data['char'])
yshift = float(char_data['yshift'])
tfm_char = font_name_to_tfm[font].get_char_metrics(tex_char_num)
height = round(tfm_char.height + yshift / 1000.0, 5)
depth = round(tfm_char.depth - yshift / 1000.0, 5)
italic = round(tfm_char.italic_correction, 5)
skewkern = 0.0
if (font_skewchar[font] and
font_skewchar[font] in tfm_char.kern_table):
skewkern = round(
tfm_char.kern_table[font_skewchar[font]], 5)
families[family][char_num] = {
'height': height,
'depth': depth,
'italic': italic,
'skew': skewkern,
}
sys.stdout.write(
json.dumps(families, separators=(',', ':'), sort_keys=True))
if __name__ == '__main__':
main()
| mit |
agry/NGECore2 | scripts/mobiles/endor/hanadak_ancient.py | 2 | 1711 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('hanadak_ancient')
mobileTemplate.setLevel(61)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1.2)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(40)
mobileTemplate.setHideType("Bristley Hide")
mobileTemplate.setHideAmount(40)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(40)
mobileTemplate.setSocialGroup("hanadak")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_hanadak.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_dampen_pain_4')
attacks.add('bm_shaken_2')
attacks.add('bm_stomp_4')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('hanadak_ancient', mobileTemplate)
return | lgpl-3.0 |
cherrygirl/micronaet7 | menuitem_account/__init__.py | 18 | 1114 | # -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lpsinger/astropy | astropy/io/ascii/tests/test_qdp.py | 5 | 9090 | import numpy as np
import pytest
from astropy.io import ascii
from astropy.io.ascii.qdp import _read_table_qdp, _write_table_qdp
from astropy.io.ascii.qdp import _get_lines_from_file
from astropy.table import Table, Column, MaskedColumn
from astropy.utils.exceptions import AstropyUserWarning
def test_get_tables_from_qdp_file(tmpdir):
example_qdp = """
! Swift/XRT hardness ratio of trigger: XXXX, name: BUBU X-2
! Columns are as labelled
READ TERR 1
READ SERR 2
! WT -- hard data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.212439 0.212439
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 0.000000
NO NO NO NO NO
! WT -- soft data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 0.726155 0.583890
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 2.410935 1.393592
NO NO NO NO NO
! WT -- hardness ratio
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.292553 -0.374935
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 -nan
"""
path = str(tmpdir.join('test.qdp'))
with open(path, "w") as fp:
print(example_qdp, file=fp)
table0 = _read_table_qdp(fp.name, names=["MJD", "Rate"], table_id=0)
assert table0.meta["initial_comments"][0].startswith("Swift")
assert table0.meta["comments"][0].startswith("WT -- hard data")
table2 = _read_table_qdp(fp.name, names=["MJD", "Rate"], table_id=2)
assert table2.meta["initial_comments"][0].startswith("Swift")
assert table2.meta["comments"][0].startswith("WT -- hardness")
assert np.isclose(table2["MJD_nerr"][0], -2.37847222222222e-05)
def test_roundtrip(tmpdir):
example_qdp = """
! Swift/XRT hardness ratio of trigger: XXXX, name: BUBU X-2
! Columns are as labelled
READ TERR 1
READ SERR 2
! WT -- hard data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 NO 0.212439
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 0.000000
NO NO NO NO NO
! WT -- soft data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 0.726155 0.583890
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 2.410935 1.393592
NO NO NO NO NO
! WT -- hardness ratio
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.292553 -0.374935
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 NO
! Add command, just to raise the warning.
READ TERR 1
! WT -- whatever
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.292553 -0.374935
NO 1.14467592592593e-05 -1.14467592592593e-05 0.000000 NO
"""
path = str(tmpdir.join('test.qdp'))
path2 = str(tmpdir.join('test2.qdp'))
with open(path, "w") as fp:
print(example_qdp, file=fp)
with pytest.warns(AstropyUserWarning) as record:
table = _read_table_qdp(path, names=["MJD", "Rate"],
table_id=0)
assert np.any(["This file contains multiple command blocks"
in r.message.args[0]
for r in record])
_write_table_qdp(table, path2)
new_table = _read_table_qdp(path2, names=["MJD", "Rate"], table_id=0)
for col in new_table.colnames:
is_masked = np.array([np.ma.is_masked(val) for val in new_table[col]])
if np.any(is_masked):
# All NaN values are read as such.
assert np.ma.is_masked(table[col][is_masked])
is_nan = np.array([(not np.ma.is_masked(val) and np.isnan(val))
for val in new_table[col]])
# All non-NaN values are the same
assert np.allclose(new_table[col][~is_nan], table[col][~is_nan])
if np.any(is_nan):
# All NaN values are read as such.
assert np.isnan(table[col][is_nan])
assert np.allclose(new_table['MJD_perr'], [2.378472e-05, 1.1446759e-05])
for meta_name in ['initial_comments', 'comments']:
assert meta_name in new_table.meta
def test_read_example(tmpdir):
example_qdp = """
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a a(pos) a(neg) b c ce d
53000.5 0.25 -0.5 1 1.5 3.5 2
54000.5 1.25 -1.5 2 2.5 4.5 3
NO NO NO NO NO
! Table 1 comment
!a a(pos) a(neg) b c ce d
54000.5 2.25 -2.5 NO 3.5 5.5 5
55000.5 3.25 -3.5 4 4.5 6.5 nan
"""
dat = ascii.read(example_qdp, format='qdp', table_id=1,
names=['a', 'b', 'c', 'd'])
t = Table.read(example_qdp, format='ascii.qdp', table_id=1,
names=['a', 'b', 'c', 'd'])
assert np.allclose(t['a'], [54000, 55000])
assert t['c_err'][0] == 5.5
assert np.ma.is_masked(t['b'][0])
assert np.isnan(t['d'][1])
for col1, col2 in zip(t.itercols(), dat.itercols()):
assert np.allclose(col1, col2, equal_nan=True)
def test_roundtrip_example(tmpdir):
example_qdp = """
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a a(pos) a(neg) b c ce d
53000.5 0.25 -0.5 1 1.5 3.5 2
54000.5 1.25 -1.5 2 2.5 4.5 3
NO NO NO NO NO
! Table 1 comment
!a a(pos) a(neg) b c ce d
54000.5 2.25 -2.5 NO 3.5 5.5 5
55000.5 3.25 -3.5 4 4.5 6.5 nan
"""
test_file = str(tmpdir.join('test.qdp'))
t = Table.read(example_qdp, format='ascii.qdp', table_id=1,
names=['a', 'b', 'c', 'd'])
t.write(test_file, err_specs={'terr': [1], 'serr': [3]})
t2 = Table.read(test_file, names=['a', 'b', 'c', 'd'], table_id=0)
for col1, col2 in zip(t.itercols(), t2.itercols()):
assert np.allclose(col1, col2, equal_nan=True)
def test_roundtrip_example_comma(tmpdir):
example_qdp = """
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a,a(pos),a(neg),b,c,ce,d
53000.5,0.25,-0.5,1,1.5,3.5,2
54000.5,1.25,-1.5,2,2.5,4.5,3
NO,NO,NO,NO,NO
! Table 1 comment
!a,a(pos),a(neg),b,c,ce,d
54000.5,2.25,-2.5,NO,3.5,5.5,5
55000.5,3.25,-3.5,4,4.5,6.5,nan
"""
test_file = str(tmpdir.join('test.qdp'))
t = Table.read(example_qdp, format='ascii.qdp', table_id=1,
names=['a', 'b', 'c', 'd'], sep=',')
t.write(test_file, err_specs={'terr': [1], 'serr': [3]})
t2 = Table.read(test_file, names=['a', 'b', 'c', 'd'], table_id=0)
# t.values_equal(t2)
for col1, col2 in zip(t.itercols(), t2.itercols()):
assert np.allclose(col1, col2, equal_nan=True)
def test_read_write_simple(tmpdir):
test_file = str(tmpdir.join('test.qdp'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3, 4]))
t1.add_column(MaskedColumn(data=[4., np.nan, 3., 1.], name='b',
mask=[False, False, False, True]))
t1.write(test_file, format='ascii.qdp')
with pytest.warns(UserWarning) as record:
t2 = Table.read(test_file, format='ascii.qdp')
assert np.any(["table_id not specified. Reading the first available table"
in r.message.args[0]
for r in record])
assert np.allclose(t2['col1'], t1['a'])
assert np.all(t2['col1'] == t1['a'])
good = ~np.isnan(t1['b'])
assert np.allclose(t2['col2'][good], t1['b'][good])
def test_read_write_simple_specify_name(tmpdir):
test_file = str(tmpdir.join('test.qdp'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
# Give a non-None err_specs
t1.write(test_file, format='ascii.qdp')
t2 = Table.read(test_file, table_id=0, format='ascii.qdp', names=['a'])
assert np.all(t2['a'] == t1['a'])
def test_get_lines_from_qdp(tmpdir):
test_file = str(tmpdir.join('test.qdp'))
text_string = "A\nB"
text_output = _get_lines_from_file(text_string)
with open(test_file, "w") as fobj:
print(text_string, file=fobj)
file_output = _get_lines_from_file(test_file)
list_output = _get_lines_from_file(["A", "B"])
for i, line in enumerate(["A", "B"]):
assert file_output[i] == line
assert list_output[i] == line
assert text_output[i] == line
| bsd-3-clause |
lama7/blogtool | doc/source/conf.py | 1 | 7957 | # -*- coding: utf-8 -*-
#
# blogtool documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 21 11:36:34 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'blogtool'
copyright = u'2013, Gerry LaMontagne'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
VERSIONFILE = "../../blogtool/__version__.py"
m = re.search("^__version__ = [\"](.*?)[\"]", open(VERSIONFILE, "r").read())
if m:
version = m.group(1)
else:
raise RuntimeError("Could not extract version string.\n")
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'blogtooldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'blogtool.tex', u'blogtool Documentation',
u'Gerry LaMontagne', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'blogtool', u'blogtool Documentation',
[u'Gerry LaMontagne'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'blogtool', u'blogtool Documentation',
u'Gerry LaMontagne', 'blogtool', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit |
google/google-ctf | third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Demo/metaclasses/Synch.py | 6 | 8194 | """Synchronization metaclass.
This metaclass makes it possible to declare synchronized methods.
"""
import thread
# First we need to define a reentrant lock.
# This is generally useful and should probably be in a standard Python
# library module. For now, we in-line it.
class Lock:
"""Reentrant lock.
This is a mutex-like object which can be acquired by the same
thread more than once. It keeps a reference count of the number
of times it has been acquired by the same thread. Each acquire()
call must be matched by a release() call and only the last
release() call actually releases the lock for acquisition by
another thread.
The implementation uses two locks internally:
__mutex is a short term lock used to protect the instance variables
__wait is the lock for which other threads wait
A thread intending to acquire both locks should acquire __wait
first.
The implementation uses two other instance variables, protected by
locking __mutex:
__tid is the thread ID of the thread that currently has the lock
__count is the number of times the current thread has acquired it
When the lock is released, __tid is None and __count is zero.
"""
def __init__(self):
"""Constructor. Initialize all instance variables."""
self.__mutex = thread.allocate_lock()
self.__wait = thread.allocate_lock()
self.__tid = None
self.__count = 0
def acquire(self, flag=1):
"""Acquire the lock.
If the optional flag argument is false, returns immediately
when it cannot acquire the __wait lock without blocking (it
may still block for a little while in order to acquire the
__mutex lock).
The return value is only relevant when the flag argument is
false; it is 1 if the lock is acquired, 0 if not.
"""
self.__mutex.acquire()
try:
if self.__tid == thread.get_ident():
self.__count = self.__count + 1
return 1
finally:
self.__mutex.release()
locked = self.__wait.acquire(flag)
if not flag and not locked:
return 0
try:
self.__mutex.acquire()
assert self.__tid == None
assert self.__count == 0
self.__tid = thread.get_ident()
self.__count = 1
return 1
finally:
self.__mutex.release()
def release(self):
"""Release the lock.
If this thread doesn't currently have the lock, an assertion
error is raised.
Only allow another thread to acquire the lock when the count
reaches zero after decrementing it.
"""
self.__mutex.acquire()
try:
assert self.__tid == thread.get_ident()
assert self.__count > 0
self.__count = self.__count - 1
if self.__count == 0:
self.__tid = None
self.__wait.release()
finally:
self.__mutex.release()
def _testLock():
done = []
def f2(lock, done=done):
lock.acquire()
print "f2 running in thread %d\n" % thread.get_ident(),
lock.release()
done.append(1)
def f1(lock, f2=f2, done=done):
lock.acquire()
print "f1 running in thread %d\n" % thread.get_ident(),
try:
f2(lock)
finally:
lock.release()
done.append(1)
lock = Lock()
lock.acquire()
f1(lock) # Adds 2 to done
lock.release()
lock.acquire()
thread.start_new_thread(f1, (lock,)) # Adds 2
thread.start_new_thread(f1, (lock, f1)) # Adds 3
thread.start_new_thread(f2, (lock,)) # Adds 1
thread.start_new_thread(f2, (lock,)) # Adds 1
lock.release()
import time
while len(done) < 9:
print len(done)
time.sleep(0.001)
print len(done)
# Now, the Locking metaclass is a piece of cake.
# As an example feature, methods whose name begins with exactly one
# underscore are not synchronized.
from Meta import MetaClass, MetaHelper, MetaMethodWrapper
class LockingMethodWrapper(MetaMethodWrapper):
def __call__(self, *args, **kw):
if self.__name__[:1] == '_' and self.__name__[1:] != '_':
return apply(self.func, (self.inst,) + args, kw)
self.inst.__lock__.acquire()
try:
return apply(self.func, (self.inst,) + args, kw)
finally:
self.inst.__lock__.release()
class LockingHelper(MetaHelper):
__methodwrapper__ = LockingMethodWrapper
def __helperinit__(self, formalclass):
MetaHelper.__helperinit__(self, formalclass)
self.__lock__ = Lock()
class LockingMetaClass(MetaClass):
__helper__ = LockingHelper
Locking = LockingMetaClass('Locking', (), {})
def _test():
# For kicks, take away the Locking base class and see it die
class Buffer(Locking):
def __init__(self, initialsize):
assert initialsize > 0
self.size = initialsize
self.buffer = [None]*self.size
self.first = self.last = 0
def put(self, item):
# Do we need to grow the buffer?
if (self.last+1) % self.size != self.first:
# Insert the new item
self.buffer[self.last] = item
self.last = (self.last+1) % self.size
return
# Double the buffer size
# First normalize it so that first==0 and last==size-1
print "buffer =", self.buffer
print "first = %d, last = %d, size = %d" % (
self.first, self.last, self.size)
if self.first <= self.last:
temp = self.buffer[self.first:self.last]
else:
temp = self.buffer[self.first:] + self.buffer[:self.last]
print "temp =", temp
self.buffer = temp + [None]*(self.size+1)
self.first = 0
self.last = self.size-1
self.size = self.size*2
print "Buffer size doubled to", self.size
print "new buffer =", self.buffer
print "first = %d, last = %d, size = %d" % (
self.first, self.last, self.size)
self.put(item) # Recursive call to test the locking
def get(self):
# Is the buffer empty?
if self.first == self.last:
raise EOFError # Avoid defining a new exception
item = self.buffer[self.first]
self.first = (self.first+1) % self.size
return item
def producer(buffer, wait, n=1000):
import time
i = 0
while i < n:
print "put", i
buffer.put(i)
i = i+1
print "Producer: done producing", n, "items"
wait.release()
def consumer(buffer, wait, n=1000):
import time
i = 0
tout = 0.001
while i < n:
try:
x = buffer.get()
if x != i:
raise AssertionError, \
"get() returned %s, expected %s" % (x, i)
print "got", i
i = i+1
tout = 0.001
except EOFError:
time.sleep(tout)
tout = tout*2
print "Consumer: done consuming", n, "items"
wait.release()
pwait = thread.allocate_lock()
pwait.acquire()
cwait = thread.allocate_lock()
cwait.acquire()
buffer = Buffer(1)
n = 1000
thread.start_new_thread(consumer, (buffer, cwait, n))
thread.start_new_thread(producer, (buffer, pwait, n))
pwait.acquire()
print "Producer done"
cwait.acquire()
print "All done"
print "buffer size ==", len(buffer.buffer)
if __name__ == '__main__':
_testLock()
_test()
| apache-2.0 |
kamyu104/django | tests/sessions_tests/tests.py | 116 | 30900 | import base64
import os
import shutil
import string
import sys
import tempfile
import unittest
from datetime import timedelta
from django.conf import settings
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import \
SessionStore as CacheDBSession
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import \
SessionStore as CookieSession
from django.contrib.sessions.exceptions import InvalidSessionKey
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sessions.models import Session
from django.contrib.sessions.serializers import (
JSONSerializer, PickleSerializer,
)
from django.core import management
from django.core.cache import caches
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.test import (
RequestFactory, TestCase, ignore_warnings, override_settings,
)
from django.test.utils import patch_logger
from django.utils import six, timezone
from django.utils.encoding import force_text
from django.utils.six.moves import http_cookies
from .custom_db_backend import SessionStore as CustomDatabaseSession
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn('some key', self.session)
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(list(self.session.values()), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iterkeys(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_itervalues(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.itervalues(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iteritems(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x', 1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [('x', 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
if (hasattr(self.session, '_cache') and 'DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND']):
raise unittest.SkipTest("Session saving tests require a real cache backend")
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertIsNone(self.session.session_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_save_doesnt_clear_data(self):
self.session['a'] = 'b'
self.session.save()
self.assertEqual(self.session['a'], 'b')
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
try:
session.save()
except AttributeError:
self.fail(
"The session object did not save properly. "
"Middleware may be saving cache items without namespaces."
)
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_empty_string_invalid(self):
"""Falsey values (Such as an empty string) are rejected."""
self.session._session_key = ''
self.assertIsNone(self.session.session_key)
def test_session_key_too_short_invalid(self):
"""Strings shorter than 8 characters are rejected."""
self.session._session_key = '1234567'
self.assertIsNone(self.session.session_key)
def test_session_key_valid_string_saved(self):
"""Strings of length 8 and up are accepted and stored."""
self.session._session_key = '12345678'
self.assertEqual(self.session.session_key, '12345678')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
self.assertRaises(AttributeError, set_session_key, self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_failure_logged_to_security(self):
bad_encode = base64.b64encode(b'flaskdj:alkdjf')
with patch_logger('django.security.SuspiciousSession', 'warning') as calls:
self.assertEqual({}, self.session.decode(bad_encode))
# check that the failed decode is logged
self.assertEqual(len(calls), 1)
self.assertIn('corrupted', calls[0])
def test_actual_expiry(self):
# this doesn't work with JSONSerializer (serializing timedelta)
with override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'):
self.session = self.backend() # reinitialize after overriding settings
# Regression test for #19200
old_session_key = None
new_session_key = None
try:
self.session['foo'] = 'bar'
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn('foo', new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
def test_session_load_does_not_create_record(self):
"""
Loading an unknown session key does not create a session record.
Creating session records on load is a DOS vulnerability.
"""
if self.backend is CookieSession:
raise unittest.SkipTest("Cookie backend doesn't have an external store to create records in.")
session = self.backend('someunknownkey')
session.load()
self.assertFalse(session.exists(session.session_key))
# provided unknown key was cycled, not reused
self.assertNotEqual(session.session_key, 'someunknownkey')
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
session_engine = 'django.contrib.sessions.backends.db'
@property
def model(self):
return self.backend.get_model_class()
def test_session_str(self):
"Session repr should be the session key."
self.session['x'] = 1
self.session.save()
session_key = self.session.session_key
s = self.model.objects.get(session_key=session_key)
self.assertEqual(force_text(s), session_key)
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = self.model.objects.get(session_key=self.session.session_key)
# Change it
self.model.objects.save(s.session_key, {'y': 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
self.assertEqual(0, self.model.objects.count())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the database before clearsessions...
self.assertEqual(2, self.model.objects.count())
with override_settings(SESSION_ENGINE=self.session_engine):
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, self.model.objects.count())
@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CustomDatabaseSessionTests(DatabaseSessionTests):
backend = CustomDatabaseSession
session_engine = 'sessions_tests.custom_db_backend'
def test_extra_session_field(self):
# Set the account ID to be picked up by a custom session storage
# and saved to a custom session model database column.
self.session['_auth_user_id'] = 42
self.session.save()
# Make sure that the customized create_model_instance() was called.
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.account_id, 42)
# Make the session "anonymous".
self.session.pop('_auth_user_id')
self.session.save()
# Make sure that save() on an existing session did the right job.
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.account_id, None)
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
@unittest.skipIf('DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND'],
"Session saving tests require a real cache backend")
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertTrue(self.session.exists(self.session.session_key))
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
@override_settings(SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS.
self.assertRaises(InvalidCacheBackendError, self.backend)
@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
# Reset the file session backend's internal caches
if hasattr(self.backend, '_storage_path'):
del self.backend._storage_path
super(FileSessionTests, self).setUp()
def tearDown(self):
super(FileSessionTests, self).tearDown()
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer")
def test_configuration_check(self):
del self.backend._storage_path
# Make sure the file backend checks for a good storage dir
self.assertRaises(ImproperlyConfigured, self.backend)
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal.
# This is tested directly on _key_to_file, as load() will swallow
# a SuspiciousOperation in the same way as an IOError - by creating
# a new session, making it unclear whether the slashes were detected.
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a\\b\\c")
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a/b/c")
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.file")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
storage_path = self.backend._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
def count_sessions():
return len([session_file for session_file in os.listdir(storage_path)
if session_file.startswith(file_prefix)])
self.assertEqual(0, count_sessions())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the filesystem before clearsessions...
self.assertEqual(2, count_sessions())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, count_sessions())
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertNotEqual(caches['default'].get(self.session.cache_key), None)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'sessions': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'session',
},
}, SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# Re-initialize the session backend to make use of overridden settings.
self.session = self.backend()
self.session.save()
self.assertEqual(caches['default'].get(self.session.cache_key), None)
self.assertNotEqual(caches['sessions'].get(self.session.cache_key), None)
class SessionMiddlewareTests(TestCase):
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['secure'])
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertIn(http_cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME]))
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertNotIn(http_cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME]))
def test_session_save_on_500(self):
request = RequestFactory().get('/')
response = HttpResponse('Horrible error')
response.status_code = 500
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the value wasn't saved above.
self.assertNotIn('hello', request.session.load())
def test_session_delete_on_end(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the cookie was deleted, not recreated.
# A deleted cookie header looks like:
# Set-Cookie: sessionid=; expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/
self.assertEqual(
'Set-Cookie: {}={}; expires=Thu, 01-Jan-1970 00:00:00 GMT; '
'Max-Age=0; Path=/'.format(
settings.SESSION_COOKIE_NAME,
'""' if sys.version_info >= (3, 5) else '',
),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
@override_settings(SESSION_COOKIE_DOMAIN='.example.local')
def test_session_delete_on_end_with_custom_domain(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the cookie was deleted, not recreated.
# A deleted cookie header with a custom domain looks like:
# Set-Cookie: sessionid=; Domain=.example.local;
# expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/
self.assertEqual(
'Set-Cookie: {}={}; Domain=.example.local; expires=Thu, '
'01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/'.format(
settings.SESSION_COOKIE_NAME,
'""' if sys.version_info >= (3, 5) else '',
),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
def test_flush_empty_without_session_cookie_doesnt_set_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# A cookie should not be set.
self.assertEqual(response.cookies, {})
# The session is accessed so "Vary: Cookie" should be set.
self.assertEqual(response['Vary'], 'Cookie')
def test_empty_session_saved(self):
""""
If a session is emptied of data but still has a key, it should still
be updated.
"""
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Set a session key and some data.
middleware.process_request(request)
request.session['foo'] = 'bar'
# Handle the response through the middleware.
response = middleware.process_response(request, response)
self.assertEqual(tuple(request.session.items()), (('foo', 'bar'),))
# A cookie should be set, along with Vary: Cookie.
self.assertIn(
'Set-Cookie: sessionid=%s' % request.session.session_key,
str(response.cookies)
)
self.assertEqual(response['Vary'], 'Cookie')
# Empty the session data.
del request.session['foo']
# Handle the response through the middleware.
response = HttpResponse('Session test')
response = middleware.process_response(request, response)
self.assertEqual(dict(request.session.values()), {})
session = Session.objects.get(session_key=request.session.session_key)
self.assertEqual(session.get_decoded(), {})
# While the session is empty, it hasn't been flushed so a cookie should
# still be set, along with Vary: Cookie.
self.assertGreater(len(request.session.session_key), 8)
self.assertIn(
'Set-Cookie: sessionid=%s' % request.session.session_key,
str(response.cookies)
)
self.assertEqual(response['Vary'], 'Cookie')
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class CookieSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
@unittest.expectedFailure
def test_actual_expiry(self):
# The cookie backend doesn't handle non-default expiry dates, see #19201
super(CookieSessionTests, self).test_actual_expiry()
def test_unpickling_exception(self):
# signed_cookies backend should handle unpickle exceptions gracefully
# by creating a new session
self.assertEqual(self.session.serializer, JSONSerializer)
self.session.save()
self.session.serializer = PickleSerializer
self.session.load()
| bsd-3-clause |
smmosquera/serge | sandbox/shadow.py | 2 | 2074 | """Test of making a shadow"""
import pygame
import Numeric
import os
import time
import serge.visual
import serge.engine
import serge.blocks.utils
import serge.blocks.visualblocks
serge.visual.Register.setPath(os.path.join('..', 'test', 'images'))
serge.visual.Register.registerItem('green', 'greenship.png')
serge.visual.Register.registerItem('blue', 'blueship.png')
e = serge.engine.Engine()
r = e.getRenderer()
serge.blocks.utils.createLayersForEngine(e, ['back', 'middle', 'front'])
serge.blocks.utils.createWorldsForEngine(e, ['main'])
main = e.setCurrentWorldByName('main')
serge.blocks.utils.addSpriteActorToWorld(main, 't', 't', 'green', 'front', (150, 150))
serge.blocks.utils.addSpriteActorToWorld(main, 't', 't', 'blue', 'front', (275, 225))
serge.blocks.utils.addVisualActorToWorld(main, 'b', 'b',
serge.blocks.visualblocks.Rectangle((800,600), (255,255,255)), 'back', (0, 0))
b = serge.visual.SurfaceDrawing(800, 600)
main.renderTo(r, 0)
s = r.getLayer('front').getSurface()
def doit(s):
ambience = 0.5
image_alpha = pygame.surfarray.pixels_alpha(s)
if ambience > 0.0:
shadow_alpha = (image_alpha *
(1.0 - ambience)).astype(Numeric.UInt8)
else:
shadow_alpha = image_alpha
shadow = s.convert_alpha()
shading = pygame.Surface(s.get_size(), pygame.SRCALPHA, 32)
pygame.surfarray.pixels_alpha(shading)[...] = image_alpha
shadow.blit(shading, (0, 0))
pygame.surfarray.pixels_alpha(shadow)[...] = shadow_alpha
image_alpha = None
return shadow
s = doit(s)
b.setSurface(s)
sa = serge.blocks.utils.addVisualActorToWorld(main, 's', 's', b, 'middle', (325, 245))
pygame.image.save(sa.visual.getSurface(), 'test.png')
sa.log.debug('%s %s' % (sa.visual.getSurface(), id(sa.visual.getSurface())))
s = r.getLayer('front').active = True
s = r.getLayer('back').active = True
s = r.getLayer('middle').active = True
e.run(60, time.time()+2)
pygame.image.save(sa.visual.getSurface(), 'test3.png')
sa.log.debug('%s %s' % (sa.visual.getSurface(), id(sa.visual.getSurface())))
| lgpl-3.0 |
securestate/king-phisher-plugins | client/uri_spoof_generator.py | 3 | 3621 | import os
import king_phisher.client.gui_utilities as gui_utilities
import king_phisher.client.mailer as mailer
import king_phisher.client.plugins as plugins
def _expand_path(output_file, *joins, pathmod=os.path):
output_file = pathmod.expandvars(output_file)
output_file = pathmod.expanduser(output_file)
output_file.join(output_file, *joins)
return output_file
class Plugin(plugins.ClientPlugin):
authors = ['Jeremy Schoeneman']
classifiers = ['Plugin :: Client :: Tool']
title = 'URI Spoof Generator'
description = """
Exports a redirect page which allows URI spoofing in the address bar of the
target's browser.
"""
homepage = 'https://github.com/securestate/king-phisher-plugins'
options = [
plugins.ClientOptionString(
'redir_url',
'URL to redirect to',
display_name='Redirect URL'
),
plugins.ClientOptionString(
'spoofed_uri',
'URI to spoof from',
display_name='Spoofed URI'
),
plugins.ClientOptionString(
'output_html_file',
'HTML file to output to',
display_name='Output HTML File',
default='~/redir.html'
)
]
version = '1.1'
def initialize(self):
self.add_menu_item('Tools > Create Data URI Phish', self.make_page)
return True
def make_page(self, _):
if not self.config['redir_url']:
gui_utilities.show_dialog_error(
'Missing Option',
self.application.get_active_window(),
'Please configure the "Redirect URL" option.'
)
return
if not self.config['spoofed_uri']:
gui_utilities.show_dialog_error(
'Missing Option',
self.application.get_active_window(),
'Please configure the "Spoofed URI" option.'
)
return
if not self.config['output_html_file']:
gui_utilities.show_dialog_error(
'Missing Option',
self.application.get_active_window(),
'Please configure the "Output HTML File" option.'
)
return
outfile = self.expand_path(self.config['output_html_file'])
try:
with open (outfile, 'w') as file_h:
file_h.write(self.build_html())
self.logger.info('html file exported successfully')
file_h.close()
except IOError as e:
self.logger.error('outputting file error', e)
return
def build_html(self):
page = '<html>'
page += ' <script>'
page += ' window.location.href = decodeURIComponent("'+ self.escape_url(self.config['redir_url'], self.config['spoofed_uri']) + '")'
page += ' </script>'
page += '</html>'
return page
def escape_url(self, url, spoofed_url):
full_url = 'data:text/html,' + spoofed_url
full_url += ' ' * 653
full_url += '<iframe width=\"100%\" height=\"100%\" src=\"' + url + '?id={{ client.message_id }}\"></iframe>'
full_url += '<style>body{color:#fff; overflow:hidden;margin:-10px 0px 0px 0px; background-color: #fff;} iframe { border:none; outline:none;}</style>")'
return '%' + '%'.join([ "%x" % ord(x) for x in full_url]).strip()
def expand_path(self, output_file, *args, **kwargs):
expanded_path = _expand_path(output_file, *args, **kwargs)
try:
expanded_path = mailer.render_message_template(expanded_path, self.application.config)
except jinja2.exceptions.TemplateSyntaxError as error:
self.logger.error("jinja2 syntax error ({0}) in directory: {1}".format(error.message, output_file))
gui_utilities.show_dialog_error('Error', self.application.get_active_window(), 'Error creating the HTML file.')
return None
except ValueError as error:
self.logger.error("value error ({0}) in directory: {1}".format(error, output_file))
gui_utilities.show_dialog_error('Error', self.application.get_active_window(), 'Error creating the HTML file.')
return None
return expanded_path
| bsd-3-clause |
repotvsupertuga/tvsupertuga.repository | script.module.universalscrapers/lib/universalscrapers/modules/js2py/constructors/jsdate.py | 64 | 10612 | from ..base import *
from .time_helpers import *
TZ_OFFSET = (time.altzone//3600)
ABS_OFFSET = abs(TZ_OFFSET)
TZ_NAME = time.tzname[1]
ISO_FORMAT = '%s-%s-%sT%s:%s:%s.%sZ'
@Js
def Date(year, month, date, hours, minutes, seconds, ms):
return now().to_string()
Date.Class = 'Date'
def now():
return PyJsDate(int(time.time()*1000), prototype=DatePrototype)
@Js
def UTC(year, month, date, hours, minutes, seconds, ms): # todo complete this
args = arguments
y = args[0].to_number()
m = args[1].to_number()
l = len(args)
dt = args[2].to_number() if l>2 else Js(1)
h = args[3].to_number() if l>3 else Js(0)
mi = args[4].to_number() if l>4 else Js(0)
sec = args[5].to_number() if l>5 else Js(0)
mili = args[6].to_number() if l>6 else Js(0)
if not y.is_nan() and 0<=y.value<=99:
y = y + Js(1900)
t = TimeClip(MakeDate(MakeDay(y, m, dt), MakeTime(h, mi, sec, mili)))
return PyJsDate(t, prototype=DatePrototype)
@Js
def parse(string):
return PyJsDate(TimeClip(parse_date(string.to_string().value)), prototype=DatePrototype)
Date.define_own_property('now', {'value': Js(now),
'enumerable': False,
'writable': True,
'configurable': True})
Date.define_own_property('parse', {'value': parse,
'enumerable': False,
'writable': True,
'configurable': True})
Date.define_own_property('UTC', {'value': UTC,
'enumerable': False,
'writable': True,
'configurable': True})
class PyJsDate(PyJs):
Class = 'Date'
extensible = True
def __init__(self, value, prototype=None):
self.value = value
self.own = {}
self.prototype = prototype
# todo fix this problematic datetime part
def to_local_dt(self):
return datetime.datetime.utcfromtimestamp(UTCToLocal(self.value)//1000)
def to_utc_dt(self):
return datetime.datetime.utcfromtimestamp(self.value//1000)
def local_strftime(self, pattern):
if self.value is NaN:
return 'Invalid Date'
try:
dt = self.to_local_dt()
except:
raise MakeError('TypeError', 'unsupported date range. Will fix in future versions')
try:
return dt.strftime(pattern)
except:
raise MakeError('TypeError', 'Could not generate date string from this date (limitations of python.datetime)')
def utc_strftime(self, pattern):
if self.value is NaN:
return 'Invalid Date'
try:
dt = self.to_utc_dt()
except:
raise MakeError('TypeError', 'unsupported date range. Will fix in future versions')
try:
return dt.strftime(pattern)
except:
raise MakeError('TypeError', 'Could not generate date string from this date (limitations of python.datetime)')
def parse_date(py_string): # todo support all date string formats
try:
dt = datetime.datetime.strptime(py_string, "%Y-%m-%dT%H:%M:%S.%fZ")
return MakeDate(MakeDay(Js(dt.year), Js(dt.month-1), Js(dt.day)), MakeTime(Js(dt.hour), Js(dt.minute), Js(dt.second), Js(dt.microsecond//1000)))
except:
raise MakeError('TypeError', 'Could not parse date %s - unsupported date format. Currently only supported format is RFC3339 utc. Sorry!' % py_string)
def date_constructor(*args):
if len(args)>=2:
return date_constructor2(*args)
elif len(args)==1:
return date_constructor1(args[0])
else:
return date_constructor0()
def date_constructor0():
return now()
def date_constructor1(value):
v = value.to_primitive()
if v._type()=='String':
v = parse_date(v.value)
else:
v = v.to_int()
return PyJsDate(TimeClip(v), prototype=DatePrototype)
def date_constructor2(*args):
y = args[0].to_number()
m = args[1].to_number()
l = len(args)
dt = args[2].to_number() if l>2 else Js(1)
h = args[3].to_number() if l>3 else Js(0)
mi = args[4].to_number() if l>4 else Js(0)
sec = args[5].to_number() if l>5 else Js(0)
mili = args[6].to_number() if l>6 else Js(0)
if not y.is_nan() and 0<=y.value<=99:
y = y + Js(1900)
t = TimeClip(LocalToUTC(MakeDate(MakeDay(y, m, dt), MakeTime(h, mi, sec, mili))))
return PyJsDate(t, prototype=DatePrototype)
Date.create = date_constructor
DatePrototype = PyJsDate(float('nan'), prototype=ObjectPrototype)
def check_date(obj):
if obj.Class!='Date':
raise MakeError('TypeError', 'this is not a Date object')
class DateProto:
def toString():
check_date(this)
if this.value is NaN:
return 'Invalid Date'
offset = (UTCToLocal(this.value) - this.value)//msPerHour
return this.local_strftime('%a %b %d %Y %H:%M:%S GMT') + '%s00 (%s)' % (pad(offset, 2, True), GetTimeZoneName(this.value))
def toDateString():
check_date(this)
return this.local_strftime('%d %B %Y')
def toTimeString():
check_date(this)
return this.local_strftime('%H:%M:%S')
def toLocaleString():
check_date(this)
return this.local_strftime('%d %B %Y %H:%M:%S')
def toLocaleDateString():
check_date(this)
return this.local_strftime('%d %B %Y')
def toLocaleTimeString():
check_date(this)
return this.local_strftime('%H:%M:%S')
def valueOf():
check_date(this)
return this.value
def getTime():
check_date(this)
return this.value
def getFullYear():
check_date(this)
if this.value is NaN:
return NaN
return YearFromTime(UTCToLocal(this.value))
def getUTCFullYear():
check_date(this)
if this.value is NaN:
return NaN
return YearFromTime(this.value)
def getMonth():
check_date(this)
if this.value is NaN:
return NaN
return MonthFromTime(UTCToLocal(this.value))
def getDate():
check_date(this)
if this.value is NaN:
return NaN
return DateFromTime(UTCToLocal(this.value))
def getUTCMonth():
check_date(this)
if this.value is NaN:
return NaN
return MonthFromTime(this.value)
def getUTCDate():
check_date(this)
if this.value is NaN:
return NaN
return DateFromTime(this.value)
def getDay():
check_date(this)
if this.value is NaN:
return NaN
return WeekDay(UTCToLocal(this.value))
def getUTCDay():
check_date(this)
if this.value is NaN:
return NaN
return WeekDay(this.value)
def getHours():
check_date(this)
if this.value is NaN:
return NaN
return HourFromTime(UTCToLocal(this.value))
def getUTCHours():
check_date(this)
if this.value is NaN:
return NaN
return HourFromTime(this.value)
def getMinutes():
check_date(this)
if this.value is NaN:
return NaN
return MinFromTime(UTCToLocal(this.value))
def getUTCMinutes():
check_date(this)
if this.value is NaN:
return NaN
return MinFromTime(this.value)
def getSeconds():
check_date(this)
if this.value is NaN:
return NaN
return SecFromTime(UTCToLocal(this.value))
def getUTCSeconds():
check_date(this)
if this.value is NaN:
return NaN
return SecFromTime(this.value)
def getMilliseconds():
check_date(this)
if this.value is NaN:
return NaN
return msFromTime(UTCToLocal(this.value))
def getUTCMilliseconds():
check_date(this)
if this.value is NaN:
return NaN
return msFromTime(this.value)
def getTimezoneOffset():
check_date(this)
if this.value is NaN:
return NaN
return (this.value - UTCToLocal(this.value))//60000
def setTime(time):
check_date(this)
this.value = TimeClip(time.to_number().to_int())
return this.value
def setMilliseconds(ms):
check_date(this)
t = UTCToLocal(this.value)
tim = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms.to_int())
u = TimeClip(LocalToUTC(MakeDate(Day(t), tim)))
this.value = u
return u
def setUTCMilliseconds(ms):
check_date(this)
t = this.value
tim = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms.to_int())
u = TimeClip(MakeDate(Day(t), tim))
this.value = u
return u
# todo Complete all setters!
def toUTCString():
check_date(this)
return this.utc_strftime('%d %B %Y %H:%M:%S')
def toISOString():
check_date(this)
t = this.value
year = YearFromTime(t)
month, day, hour, minute, second, milli = pad(MonthFromTime(t)+1), pad(DateFromTime(t)), pad(HourFromTime(t)), pad(MinFromTime(t)), pad(SecFromTime(t)), pad(msFromTime(t))
return ISO_FORMAT % (unicode(year) if 0<=year<=9999 else pad(year, 6, True), month, day, hour, minute, second, milli)
def toJSON(key):
o = this.to_object()
tv = o.to_primitive('Number')
if tv.Class=='Number' and not tv.is_finite():
return this.null
toISO = o.get('toISOString')
if not toISO.is_callable():
raise this.MakeError('TypeError', 'toISOString is not callable')
return toISO.call(o, ())
def pad(num, n=2, sign=False):
'''returns n digit string representation of the num'''
s = unicode(abs(num))
if len(s)<n:
s = '0'*(n-len(s)) + s
if not sign:
return s
if num>=0:
return '+'+s
else:
return '-'+s
fill_prototype(DatePrototype, DateProto, default_attrs)
Date.define_own_property('prototype', {'value': DatePrototype,
'enumerable': False,
'writable': False,
'configurable': False})
DatePrototype.define_own_property('constructor', {'value': Date,
'enumerable': False,
'writable': True,
'configurable': True}) | gpl-2.0 |
shangwuhencc/scikit-learn | doc/tutorial/machine_learning_map/pyparsing.py | 258 | 137838 | # module pyparsing.py
#
# Copyright (c) 2003-2008 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#from __future__ import generators
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form "<salutation>, <addressee>!")::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from parseString() can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.4.11"
__versionTime__ = "10 February 2008 17:28"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy,sys
import warnings
import re
import sre_constants
import xml.sax.saxutils
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
"""
Detect if we are running version 3.X and make appropriate changes
Robert A. Clark
"""
if sys.version_info[0] > 2:
__MAX_INT__ = sys.maxsize
__BASE_STRING__ = str
else:
__MAX_INT__ = sys.maxint
__BASE_STRING__ = basestring
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
def _str2dict(strg):
return dict( [(c,0) for c in strg] )
#~ return set( [c for c in strg] )
class _Constants(object):
pass
alphas = string.lowercase + string.uppercase
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = "\\"
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
__slots__ = ( "loc","msg","pstr","parserElement" )
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError, aname
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by validate() if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (len(results))
- by list index (results[0], results[1], etc.)
- by attribute (results.<resultsName>)
"""
__slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
# this line is related to debugging the asXML bug
#~ asList = False
if name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,__BASE_STRING__):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),-1)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),-1)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = range(*i.indices(mylen))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict.keys():
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return self.__tokdict.has_key(k)
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
def __nonzero__( self ): return self.__bool__()
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( reversed(self.__toklist) )
def keys( self ):
"""Returns all named result keys."""
return self.__tokdict.keys()
def pop( self, index=-1 ):
"""Removes and returns item at specified index (default=last).
Will work with either numeric indices or dict-key indicies."""
ret = self[index]
del self[index]
return ret
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given defaultValue or None if no
defaultValue is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict.keys():
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > j))
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return [(k,self[k]) for k in self.__tokdict.keys()]
def values( self ):
"""Returns all named result values."""
return [ v[-1][0] for v in self.__tokdict.values() ]
def __getattr__( self, name ):
if name not in self.__slots__:
if self.__tokdict.has_key( name ):
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
return None
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
del other
return self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = "["
sep = ""
for i in self.__toklist:
if isinstance(i, ParseResults):
out += sep + _ustr(i)
else:
out += sep + repr(i)
sep = ", "
out += "]"
return out
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
return dict( self.items() )
def copy( self ):
"""Returns a new copy of a ParseResults object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist ] )
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = xml.sax.saxutils.escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a ParseResults.
Accepts an optional indent argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
keys = self.items()
keys.sort()
for k,v in keys:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.keys():
#~ out.append('\n')
out.append( v.dump(indent,depth+1) )
#~ out.append('\n')
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
#~ out.append('\n')
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
self.__tokdict, \
par, \
inAccumNames, \
self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR > 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print ("Match",_ustr(expr),"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched",_ustr(expr),"->",toks.asList())
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:", _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this ParserElement. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original ParserElement object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
"""
newself = self.copy()
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set breakFlag to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
_parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def _normalizeParseActionArgs( f ):
"""Internal method used to decorate parse actions that take fewer than 3 arguments,
so that all parse actions can be called as f(s,l,t)."""
STAR_ARGS = 4
try:
restore = None
if isinstance(f,type):
restore = f
f = f.__init__
if f.func_code.co_flags & STAR_ARGS:
return f
numargs = f.func_code.co_argcount
if hasattr(f,"im_self"):
numargs -= 1
if restore:
f = restore
except AttributeError:
try:
# not a function, must be a callable object, get info from the
# im_func binding of its bound __call__ method
if f.__call__.im_func.func_code.co_flags & STAR_ARGS:
return f
numargs = f.__call__.im_func.func_code.co_argcount
if hasattr(f.__call__,"im_self"):
numargs -= 1
except AttributeError:
# not a bound method, get info directly from __call__ method
if f.__call__.func_code.co_flags & STAR_ARGS:
return f
numargs = f.__call__.func_code.co_argcount
if hasattr(f.__call__,"im_self"):
numargs -= 1
#~ print ("adding function %s with %d args" % (f.func_name,numargs))
if numargs == 3:
return f
else:
if numargs == 2:
def tmp(s,l,t):
return f(l,t)
elif numargs == 1:
def tmp(s,l,t):
return f(t)
else: #~ numargs == 0:
def tmp(s,l,t):
return f()
try:
tmp.__name__ = f.__name__
except AttributeError:
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__doc__ = f.__doc__
except AttributeError:
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__dict__.update(f.__dict__)
except AttributeError:
# no need for special handling if attribute doesnt exist
pass
return tmp
_normalizeParseActionArgs = staticmethod(_normalizeParseActionArgs)
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as fn(s,loc,toks),
fn(loc,toks), fn(toks), or just fn(), where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a ParseResults object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = map(self._normalizeParseActionArgs, list(fns))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += map(self._normalizeParseActionArgs, list(fns))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
fn(s,loc,expr,err) where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw ParseFatalException
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = loc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseException, err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = loc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseException, err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
return self._parse( instring, loc, doActions=False )[0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value,Exception):
if isinstance(value,ParseBaseException):
value.loc = loc
raise value
return (value[0],value[1].copy())
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException, pe:
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method ParserElement.enablePackrat(). If
your program uses psyco to "compile as you go", you must call
enablePackrat before calling psyco.full(). If you do not do this,
Python will crash. For best results, call enablePackrat() immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
Note: parseString implicitly calls expandtabs() on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the loc argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling parseWithTabs on your grammar before calling parseString
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full (s,loc,toks) signature, and
reference the input string using the parse action's s argument
- explictly expand the tabs in your input string before calling
parseString
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if self.keepTabs:
loc, tokens = self._parse( instring, 0 )
else:
loc, tokens = self._parse( instring.expandtabs(), 0 )
return tokens
def scanString( self, instring, maxMatches=__MAX_INT__ ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
maxMatches argument, to clip scanning after 'n' matches are found.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
matches += 1
yield tokens, preloc, nextLoc
loc = nextLoc
def transformString( self, instring ):
"""Extension to scanString, to modify matching text with modified tokens that may
be returned from a parse action. To use transformString, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking transformString() on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. transformString() returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(_ustr,out))
def searchString( self, instring, maxMatches=__MAX_INT__ ):
"""Another extension to scanString, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
maxMatches argument, to clip searching after 'n' matches are found.
"""
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
def __add__(self, other ):
"""Implementation of + operator - returns And"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a ParserElement"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __mul__(self,other):
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
if len(other)==2:
if isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("can only multiply 'ParserElement' and int or (int,int) objects")
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
ret = And([self]*minElements)+ makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns MatchFirst"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a ParserElement"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns Or"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a ParserElement"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns Each"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a ParserElement"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns NotAny"""
return NotAny( self )
def __call__(self, name):
"""Shortcut for setResultsName, with listAllMatches=default::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
return self.setResultsName(name)
def suppress( self ):
"""Suppresses the output of this ParserElement; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
ParserElement's defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand <TAB>s to spaces before parsing the input string.
Must be called before parseString when the input grammar contains elements that
match <TAB> characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other )
else:
self.ignoreExprs.append( Suppress( other ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set flag to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
return self.parseString(file_contents)
def getException(self):
return ParseException("",0,self.errmsg,self)
def __getattr__(self,aname):
if aname == "myException":
self.myException = ret = self.getException();
return ret;
else:
raise AttributeError, "no such attribute " + aname
def __eq__(self,other):
if isinstance(other, __BASE_STRING__):
try:
(self + StringEnd()).parseString(_ustr(other))
return True
except ParseException:
return False
else:
return super(ParserElement,self)==other
def __req__(self,other):
return self == other
class Token(ParserElement):
"""Abstract ParserElement subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
#self.myException = ParseException("",0,"",self)
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
#s.myException.msg = self.errmsg
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
_L = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with Literal::
Literal("if") will match the leading 'if' in 'ifAndOnlyIf'.
Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)'
Accepts two optional constructor arguments in addition to the keyword string:
identChars is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; caseless allows case-insensitive
matching, default is False.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = _str2dict(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for min is 1 (a
minimum value < 1 is not valid); the default values for max and exact
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ):
super(Word,self).__init__()
self.initCharsOrig = initChars
self.initChars = _str2dict(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = _str2dict(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = _str2dict(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError, "cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted"
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = __MAX_INT__
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc,result.group()
if not(instring[ loc ] in self.initChars):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
def __init__( self, pattern, flags=0):
"""The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error,e:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d.keys():
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=False)
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error,e:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,__BASE_STRING__):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for min is 1 (a
minimum value < 1 is not valid); the default values for max and exact
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError, "cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted"
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = __MAX_INT__
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
#self.myException.msg = self.errmsg
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is " \\t\\n". Also takes optional min, max, and exact arguments,
as defined for the Word class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
#~ self.leaveWhitespace()
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = __MAX_INT__
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( " \t" )
self.errmsg = "Expected start of line"
#self.myException.msg = self.errmsg
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
#~ raise ParseException( instring, loc, "Expected start of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( " \t" )
self.errmsg = "Expected end of line"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
#~ raise ParseException( instring, loc, "Expected end of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
#~ raise ParseException( instring, loc, "Expected start of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
#~ raise ParseException( instring, loc, "Expected end of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of wordChars
(default=printables). To emulate the \b behavior of regular expressions,
use WordStart(alphanums). WordStart will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = _str2dict(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of wordChars
(default=printables). To emulate the \b behavior of regular expressions,
use WordEnd(alphanums). WordEnd will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = _str2dict(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
#~ raise ParseException( instring, loc, "Expected end of word" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, __BASE_STRING__ ):
self.exprs = [ Literal( exprs ) ]
else:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
class And(ParseExpression):
"""Requires all given ParseExpressions to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the '+' operator.
"""
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
for e in self.exprs[1:]:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one ParseExpression is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the '^' operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if self.exprs:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one ParseExpression is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the '|' operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if self.exprs:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given ParseExpressions to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the '&' operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.optionals = [ e.expr for e in exprs if isinstance(e,Optional) ]
self.multioptionals = [ e.expr for e in exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
def parseImpl( self, instring, loc, doActions=True ):
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, __BASE_STRING__ ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. FollowedBy
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. FollowedBy always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. NotAny
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, NotAny does *not* skip over leading whitespace. NotAny
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, exprs, default=_optionalNotMatched ):
super(Optional,self).__init__( exprs, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If include is set to true, the matched expression is also consumed. The ignore
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None ):
super( SkipTo, self ).__init__( other )
if ignore is not None:
self.expr = self.expr.copy()
self.expr.ignore(ignore)
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
self.errmsg = "No match found for "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
while loc <= instrlen:
try:
loc = expr._skipIgnorables( instring, loc )
expr._parse( instring, loc, doActions=False, callPreParse=False )
if self.includeMatch:
skipText = instring[startLoc:loc]
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ instring[startLoc:loc] ]
except (ParseException,IndexError):
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the Forward variable using the '<<' operator.
Note: take care when assigning to Forward not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the Forward::
fwdExpr << (a | b | c)
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, __BASE_STRING__ ):
other = Literal(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return None
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = Forward
return "Forward: "+retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of ParseExpression, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return map( string.upper, tokenlist )
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying 'adjacent=False' in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and len(retToks.keys())>0:
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, exprs ):
super(Dict,self).__init__( exprs )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = ParserElement._normalizeParseActionArgs(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = ParserElement._normalizeParseActionArgs(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception, exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing 'combine=True' in the constructor.
If combine is set to True, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = int(t[0])
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
return ( Word(nums).setName("arrayLen").setParseAction(countFieldParseAction, callDuringTry=True) + arrayExpr )
def _flatten(L):
if type(L) is not list: return [L]
if L == []: return L
return _flatten(L[0]) + _flatten(L[1:])
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match "1:1", but not "1:2". Because this matches a
previous literal, will also match the leading "1:1" in "1:10".
If this is not desired, use matchPreviousExpr.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match "1:1", but not "1:2". Because this matches by
expressions, will *not* match the leading "1:1" in "1:10";
the expressions are evaluated first, and then compared, so
"1" is compared with "10".
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep << e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,"\\"+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a MatchFirst for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a MatchFirst object (if caseless=True, or
if creating a Regex raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,(list,tuple)):
symbols = strs[:]
elif isinstance(strs,__BASE_STRING__):
symbols = strs.split()
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )
else:
return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the Dict, ZeroOrMore, and Group tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the Dict results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ])
_escapedHexChar = Combine( Suppress(_bslash + "0x") + Word(hexnums) ).setParseAction(lambda s,l,t:unichr(int(t[0],16)))
_escapedOctChar = Combine( Suppress(_bslash) + Word("0","01234567") ).setParseAction(lambda s,l,t:unichr(int(t[0],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\0x' (\0x21, which is a '!' character)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with transformString().
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException, "incorrect usage of keepOriginalText - may only be called as a parse action"
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException, "incorrect usage of getTokensEndLoc - may only be called from within a parse action"
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,__BASE_STRING__):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
<TD> or <DIV>.
Call withAttribute with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in (class="Customer",align="right"), or
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
withAttribute.ANY_VALUE as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def operatorPrecedence( baseExpr, opList ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal
- numTerms is the number of terms for this operator (must
be 1 or 2)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants opAssoc.RIGHT and opAssoc.LEFT.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
"""
ret = Forward()
lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = Group( FollowedBy(lastExpr + opExpr) + lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
matchExpr = Group( FollowedBy(lastExpr + opExpr + lastExpr) + lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
raise ValueError, "operator must be unary (1) or binary (2)"
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
matchExpr = Group( FollowedBy(lastExpr + opExpr + thisExpr) + lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
raise ValueError, "operator must be unary (1) or binary (2)"
else:
raise ValueError, "operator must indicate right or left associativity"
if pa:
matchExpr.setParseAction( pa )
thisExpr << ( matchExpr | lastExpr )
lastExpr = thisExpr
ret << lastExpr
return ret
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,__BASE_STRING__) and isinstance(closer,__BASE_STRING__):
content = (empty+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";")
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),"><& '"))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_noncomma = "".join( [ c for c in printables if c != "," ] )
_commasepitem = Combine(OneOrMore(Word(_noncomma) +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
print (teststring,"->",)
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (tokenlist)
print ("tokens = ", tokens)
print ("tokens.columns =", tokens.columns)
print ("tokens.tables =", tokens.tables)
print (tokens.asXML("SQL",True))
except ParseException,err:
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
| bsd-3-clause |
gunan/tensorflow | tensorflow/python/kernel_tests/rnn_test.py | 1 | 32669 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import timeit
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_lib
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import saver
class Plus1RNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def call(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class ScalarStateRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 1
@property
def state_size(self):
return tensor_shape.TensorShape([])
def zero_state(self, batch_size, dtype):
return array_ops.zeros([], dtype=dtypes.int32)
def call(self, input_, state, scope=None):
return (input_, state + 1)
class UnbalancedOutputRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return tensor_shape.TensorShape(1), tensor_shape.TensorShape((2))
@property
def state_size(self):
return tensor_shape.TensorShape([])
def zero_state(self, batch_size, dtype):
return array_ops.zeros([], dtype=dtypes.int32)
def call(self, input_, state, scope=None):
concatenated = array_ops.concat((input_, input_), axis=-1)
return (input_, concatenated), state + 1
class TensorArrayStateRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell its state as a TensorArray."""
@property
def output_size(self):
return 1
@property
def state_size(self):
return (tensor_shape.TensorShape([]), ())
def zero_state(self, batch_size, dtype):
return (array_ops.zeros([], dtype=dtypes.int32),
tensor_array_ops.TensorArray(
dtype=dtype, size=0, dynamic_size=True))
def call(self, input_, state, scope=None):
new_array = state[1].write(state[0], input_)
return (input_, (state[0] + 1, new_array))
class RNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_in_graph_and_eager_modes
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
if context.executing_eagerly():
inputs = [constant_op.constant(np.ones((3, 4)))]
else:
inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
rnn.dynamic_rnn(
cell,
array_ops.stack(inputs),
dtype=dtypes.float32,
sequence_length=[[4]])
@test_util.run_in_graph_and_eager_modes
def testInvalidDtype(self):
if context.executing_eagerly():
inputs = np.zeros((3, 4, 5), dtype=np.int32)
else:
inputs = array_ops.placeholder(dtypes.int32, shape=(3, 4, 5))
cells = [
rnn_cell_impl.BasicRNNCell,
rnn_cell_impl.GRUCell,
rnn_cell_impl.BasicLSTMCell,
rnn_cell_impl.LSTMCell,
]
for cell_cls in cells:
with self.cached_session():
with self.assertRaisesRegexp(
ValueError, "RNN cell only supports floating"):
cell = cell_cls(2, dtype=dtypes.int32)
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.int32)
@test_util.run_in_graph_and_eager_modes
def testBatchSizeFromInput(self):
cell = Plus1RNNCell()
in_eager_mode = context.executing_eagerly()
# With static batch size
if in_eager_mode:
inputs = np.zeros((3, 4, 5), dtype=np.float32)
initial_state = np.zeros((3, 5), dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(3, 4, 5))
initial_state = array_ops.placeholder(dtypes.float32, shape=(3, 5))
# - Without initial_state
outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(3, outputs.shape[0])
self.assertEqual(3, state.shape[0])
# - With initial_state
outputs, state = rnn.dynamic_rnn(
cell, inputs, initial_state=initial_state)
self.assertEqual(3, outputs.shape[0])
self.assertEqual(3, state.shape[0])
# Without static batch size
# Tensor shapes are fully determined with eager execution enabled,
# so only run this test for graph construction.
if not in_eager_mode:
inputs = array_ops.placeholder(dtypes.float32, shape=(None, 4, 5))
# - Without initial_state
outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(None, outputs.shape.dims[0].value)
self.assertEqual(None, state.shape.dims[0].value)
# - With initial_state
outputs, state = rnn.dynamic_rnn(
cell,
inputs,
initial_state=array_ops.placeholder(dtypes.float32, shape=(None, 5)))
self.assertEqual(None, outputs.shape.dims[0].value)
self.assertEqual(None, state.shape.dims[0].value)
@test_util.run_in_graph_and_eager_modes
def testScalarStateIsAccepted(self):
cell = ScalarStateRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session(use_gpu=True) as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={inputs: [[[1], [2], [3], [4]]]})
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state)
@test_util.run_in_graph_and_eager_modes
def testUnbalancedOutputIsAccepted(self):
cell = UnbalancedOutputRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session(use_gpu=True) as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={inputs: [[[1], [2], [3], [4]]]})
self.assertIsInstance(outputs, tuple)
self.assertAllEqual([[[1], [2], [3], [4]]], outputs[0])
self.assertAllEqual([[[1, 1], [2, 2], [3, 3], [4, 4]]], outputs[1])
self.assertAllEqual(4, state)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testEagerMemory(self):
with context.eager_mode():
cell = TensorArrayStateRNNCell()
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=[4])
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testTensorArrayStateIsAccepted(self):
cell = TensorArrayStateRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session(use_gpu=True) as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
state = (state[0], state[1].stack())
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={
inputs: [[[1], [2], [3], [4]]]
})
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state[0])
self.assertAllEqual([[[1]], [[2]], [[3]], [[4]]], state[1])
@test_util.run_deprecated_v1
def testCellGetInitialState(self):
cell = rnn_cell_impl.BasicRNNCell(5)
with self.assertRaisesRegexp(
ValueError, "batch_size and dtype cannot be None"):
cell.get_initial_state(None, None, None)
inputs = array_ops.placeholder(dtypes.float32, shape=(None, 4, 1))
with self.assertRaisesRegexp(
ValueError, "batch size from input tensor is different from"):
cell.get_initial_state(inputs=inputs, batch_size=50, dtype=None)
with self.assertRaisesRegexp(
ValueError, "batch size from input tensor is different from"):
cell.get_initial_state(
inputs=inputs, batch_size=constant_op.constant(50), dtype=None)
with self.assertRaisesRegexp(
ValueError, "dtype from input tensor is different from"):
cell.get_initial_state(inputs=inputs, batch_size=None, dtype=dtypes.int16)
initial_state = cell.get_initial_state(
inputs=inputs, batch_size=None, dtype=None)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
batch = array_ops.shape(inputs)[0]
dtype = inputs.dtype
initial_state = cell.get_initial_state(None, batch, dtype)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
def _assert_cell_builds(self, cell_class, dtype, batch_size, in_size,
out_size):
cell = cell_class(out_size, dtype=dtype)
in_shape = tensor_shape.TensorShape((batch_size, in_size))
cell.build(in_shape)
state_output = cell.get_initial_state(
inputs=None, batch_size=batch_size, dtype=dtype)
cell_output, _ = cell(array_ops.zeros(in_shape, dtype), state_output)
self.assertAllEqual([batch_size, out_size], cell_output.shape.as_list())
@test_util.run_in_graph_and_eager_modes
def testCellsBuild(self):
f32 = dtypes.float32
f64 = dtypes.float64
self._assert_cell_builds(rnn_cell_impl.BasicRNNCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicRNNCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicLSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicLSTMCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.GRUCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.GRUCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.LSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.LSTMCell, f64, 5, 7, 3)
@test_util.run_deprecated_v1
def testBasicLSTMCellInterchangeWithLSTMCell(self):
with self.session(graph=ops_lib.Graph()) as sess:
basic_cell = rnn_cell_impl.BasicLSTMCell(1)
basic_cell(array_ops.ones([1, 1]),
state=basic_cell.get_initial_state(inputs=None,
batch_size=1,
dtype=dtypes.float32))
self.evaluate([v.initializer for v in basic_cell.variables])
self.evaluate(basic_cell._bias.assign([10.] * 4))
save = saver.Saver()
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = save.save(sess, prefix)
with self.session(graph=ops_lib.Graph()) as sess:
lstm_cell = rnn_cell_impl.LSTMCell(1, name="basic_lstm_cell")
lstm_cell(array_ops.ones([1, 1]),
state=lstm_cell.get_initial_state(inputs=None,
batch_size=1,
dtype=dtypes.float32))
self.evaluate([v.initializer for v in lstm_cell.variables])
save = saver.Saver()
save.restore(sess, save_path)
self.assertAllEqual([10.] * 4, self.evaluate(lstm_cell._bias))
######### Benchmarking RNN code
def _static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length, dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# These parameters don't matter
batch_size = 512
num_units = 512
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
def _create_static_rnn():
with session.Session(config=config, graph=ops_lib.Graph()):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
_static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length)
def _create_dynamic_rnn():
with session.Session(config=config, graph=ops_lib.Graph()):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
_static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
delta_static = timeit.timeit(_create_static_rnn, number=5)
delta_dynamic = timeit.timeit(_create_dynamic_rnn, number=5)
print("%d \t %f \t %f \t %f" %
(max_time, delta_static, delta_dynamic, delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _timer(sess, ops):
# Warm in
for _ in range(2):
sess.run(ops)
# Timing run
runs = 20
start = time.time()
for _ in range(runs):
sess.run(ops)
end = time.time()
return (end - start) / float(runs)
def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# Using rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
delta_static = _timer(sess, ops)
# Using dynamic_rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
variables_lib.global_variables_initializer().run()
delta_dynamic = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f \t %f" %
(batch_size, max_time, num_units, use_gpu, delta_static, delta_dynamic,
delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def half_seq_len_vs_unroll_half_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Halve the sequence length, full static unroll
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t,
sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_half_seq_len = _timer(sess, ops)
# Halve the unroll size, don't use sequence length
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
inputs_list_t[:(max_time // 2)], sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_unroll_half = _timer(sess, ops)
print("%d \t %d \t\t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_half_seq_len,
delta_unroll_half, delta_half_seq_len / delta_unroll_half))
return delta_half_seq_len, delta_unroll_half
def _concat_state_vs_tuple_state_rnn_benchmark(inputs_list_t, sequence_length,
state_is_tuple):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, final_state = rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
final_state = list(final_state) if state_is_tuple else [final_state]
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + final_state,
trainable_variables)
return control_flow_ops.group(*(final_state + gradients + outputs))
def concat_state_vs_tuple_state_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Run with concatenated states (default)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=False)
variables_lib.global_variables_initializer().run()
delta_concat_state = _timer(sess, ops)
# Run with tuple states (new)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=True)
variables_lib.global_variables_initializer().run()
delta_tuple_state = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_concat_state,
delta_tuple_state, delta_concat_state / delta_tuple_state))
return delta_concat_state, delta_tuple_state
def _dynamic_rnn_swap_memory_benchmark(inputs_t, sequence_length, swap_memory):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell,
inputs_t,
sequence_length=sequence_length,
swap_memory=swap_memory,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def dynamic_rnn_swap_memory_benchmark(batch_size, max_time, num_units):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# No memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=False)
variables_lib.global_variables_initializer().run()
no_swap = _timer(sess, ops)
# Memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=True)
variables_lib.global_variables_initializer().run()
swap = _timer(sess, ops)
print("%d \t %d \t %d \t %f \t %f \t %f" %
(batch_size, max_time, num_units, no_swap, swap, swap / no_swap))
return no_swap, swap
def rnn_long_sequence_benchmark(batch_size, seqlen, num_units, dynamic,
swap_memory, nn):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = [seqlen for _ in range(batch_size)]
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(seqlen)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
for _ in range(nn):
if dynamic:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=swap_memory)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
else:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f" % (batch_size, seqlen, num_units,
dynamic, elapsed,
elapsed / seqlen))
class BenchmarkRNN(test.Benchmark):
def benchmarkGraphCreationStaticVsDynamicLSTM(self):
print("Graph Creation: Static Unroll vs. Dynamic Unroll LSTM")
print("max_t \t dt(static) \t dt(dynamic) \t dt(dynamic)/dt(static)")
for max_time in (1, 25, 50):
s_dt, d_dt = graph_creation_static_vs_dynamic_rnn_benchmark(max_time)
self.report_benchmark(
name="graph_creation_time_static_T%02d" % max_time,
iters=5,
wall_time=s_dt)
self.report_benchmark(
name="graph_creation_time_dynamic_T%02d" % max_time,
iters=5,
wall_time=d_dt)
def benchmarkStaticUnrollVsDynamicFlowLSTM(self):
print("Calculation: Static Unroll with Dynamic Flow LSTM "
"vs. Dynamic Unroll LSTM")
print("batch \t max_t \t units \t gpu \t dt(static) \t dt(dynamic) "
"\t dt(dynamic)/dt(static)")
for batch_size in (256,):
for max_time in (50,):
for num_units in (512, 256, 128):
for use_gpu in (False, True):
s_dt, d_dt = static_vs_dynamic_rnn_benchmark(batch_size, max_time,
num_units, use_gpu)
self.report_benchmark(
name="static_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="dynamic_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkDynamicLSTMNoMemorySwapVsMemorySwap(self):
print("Calculation: Dynamic LSTM No Memory Swap vs. Memory Swap")
print("batch \t max_t \t units \t no_swap \t swap \t swap/no_swap")
for batch_size in (256, 512):
for max_time in (100,):
for num_units in (512, 256, 128):
no_swap, swap = dynamic_rnn_swap_memory_benchmark(batch_size,
max_time, num_units)
self.report_benchmark(
name="dynamic_lstm_no_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=no_swap)
self.report_benchmark(
name="dynamic_lstm_with_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=swap)
def benchmarkStaticUnrollHalfSequenceLengthVsHalfUnroll(self):
print("Calculation: Static Unroll with Halved Sequence Length "
"vs. Half Static Unroll")
print("batch \t full_t \t units \t gpu \t dt(half_seq_len) "
"\t dt(unroll_half) \t dt(half_seq_len)/dt(unroll_half)")
for batch_size in (128,):
for max_time in (50,):
for num_units in (256,):
for use_gpu in (False, True):
s_dt, d_dt = half_seq_len_vs_unroll_half_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="half_seq_len_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="unroll_half_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkStaticUnrollStateConcatVsStateTuple(self):
print("Calculation: Static Unroll with Concatenated State "
"vs. Tuple State")
print("batch \t time \t units \t gpu \t dt(concat_state) "
"\t dt(tuple_state) \t dt(concat_state)/dt(tuple_state)")
for batch_size in (
16,
128,):
for max_time in (50,):
for num_units in (
16,
128,):
for use_gpu in (False, True):
c_dt, t_dt = concat_state_vs_tuple_state_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="concat_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=c_dt)
self.report_benchmark(
name="tuple_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=t_dt)
def _benchmarkDynamicLSTMMemorySwapLongSeq(self):
"""The memory swapping test for the SOSP submission."""
print("Calculation: Long LSTM Sequence")
print("batch \t len \t units \t dynamic \t elapsed_t \t elapsed_t/len")
batch_size = 512
seqlen = 800
num_units = 512
dynamic = True
swap_memory = True
# Some warming up.
if swap_memory:
rnn_long_sequence_benchmark(batch_size, seqlen, num_units,
dynamic, swap_memory, 2)
# Measure the performance.
for slen in xrange(100, 1100, 100):
rnn_long_sequence_benchmark(batch_size, slen, num_units, dynamic,
swap_memory, 3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
deepsrijit1105/edx-platform | lms/djangoapps/commerce/models.py | 14 | 1394 | """
Commerce-related models.
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from config_models.models import ConfigurationModel
class CommerceConfiguration(ConfigurationModel):
""" Commerce configuration """
class Meta(object):
app_label = "commerce"
API_NAME = 'commerce'
CACHE_KEY = 'commerce.api.data'
checkout_on_ecommerce_service = models.BooleanField(
default=False,
help_text=_('Use the checkout page hosted by the E-Commerce service.')
)
single_course_checkout_page = models.CharField(
max_length=255,
default='/basket/single-item/',
help_text=_('Path to single course checkout page hosted by the E-Commerce service.')
)
cache_ttl = models.PositiveIntegerField(
verbose_name=_('Cache Time To Live'),
default=0,
help_text=_(
'Specified in seconds. Enable caching by setting this to a value greater than 0.'
)
)
receipt_page = models.CharField(
max_length=255,
default='/commerce/checkout/receipt/?orderNum=',
help_text=_('Path to order receipt page.')
)
def __unicode__(self):
return "Commerce configuration"
@property
def is_cache_enabled(self):
"""Whether responses from the Ecommerce API will be cached."""
return self.cache_ttl > 0
| agpl-3.0 |
redhat-openstack/trove | trove/tests/unittests/secgroups/test_security_group.py | 2 | 6416 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from mock import Mock
from mock import patch
from novaclient import exceptions as nova_exceptions
from trove.common import exception
import trove.common.remote
from trove.extensions.security_group import models as sec_mod
from trove.instance import models as inst_model
from trove.tests.fakes import nova
from trove.tests.unittests import trove_testtools
"""
Unit tests for testing the exceptions raised by Security Groups
"""
class Security_Group_Exceptions_Test(trove_testtools.TestCase):
def setUp(self):
super(Security_Group_Exceptions_Test, self).setUp()
self.createNovaClient = trove.common.remote.create_nova_client
self.context = trove_testtools.TroveTestContext(self)
self.FakeClient = nova.fake_create_nova_client(self.context)
fException = Mock(
side_effect=lambda *args, **kwargs: self._raise(
nova_exceptions.ClientException("Test")))
self.FakeClient.security_groups.create = fException
self.FakeClient.security_groups.delete = fException
self.FakeClient.security_group_rules.create = fException
self.FakeClient.security_group_rules.delete = fException
trove.common.remote.create_nova_client = (
lambda c: self._return_mocked_nova_client(c))
def tearDown(self):
super(Security_Group_Exceptions_Test, self).tearDown()
trove.common.remote.create_nova_client = self.createNovaClient
def _return_mocked_nova_client(self, context):
return self.FakeClient
def _raise(self, ex):
raise ex
@patch('trove.network.nova.LOG')
def test_failed_to_create_security_group(self, mock_logging):
self.assertRaises(exception.SecurityGroupCreationError,
sec_mod.RemoteSecurityGroup.create,
"TestName",
"TestDescription",
self.context)
@patch('trove.network.nova.LOG')
def test_failed_to_delete_security_group(self, mock_logging):
self.assertRaises(exception.SecurityGroupDeletionError,
sec_mod.RemoteSecurityGroup.delete,
1, self.context)
@patch('trove.network.nova.LOG')
def test_failed_to_create_security_group_rule(self, mock_logging):
self.assertRaises(exception.SecurityGroupRuleCreationError,
sec_mod.RemoteSecurityGroup.add_rule,
1, "tcp", 3306, 3306, "0.0.0.0/0", self.context)
@patch('trove.network.nova.LOG')
def test_failed_to_delete_security_group_rule(self, mock_logging):
self.assertRaises(exception.SecurityGroupRuleDeletionError,
sec_mod.RemoteSecurityGroup.delete_rule,
1, self.context)
class fake_RemoteSecGr(object):
def data(self):
self.id = uuid.uuid4()
return {'id': self.id}
def delete(self, context):
pass
class fake_SecGr_Association(object):
def get_security_group(self):
return fake_RemoteSecGr()
def delete(self):
pass
class SecurityGroupDeleteTest(trove_testtools.TestCase):
def setUp(self):
super(SecurityGroupDeleteTest, self).setUp()
self.inst_model_conf_patch = patch.object(inst_model, 'CONF')
self.inst_model_conf_mock = self.inst_model_conf_patch.start()
self.addCleanup(self.inst_model_conf_patch.stop)
self.context = trove_testtools.TroveTestContext(self)
self.original_find_by = (
sec_mod.SecurityGroupInstanceAssociation.find_by)
self.original_delete = sec_mod.SecurityGroupInstanceAssociation.delete
self.fException = Mock(
side_effect=lambda *args, **kwargs: self._raise(
exception.ModelNotFoundError()))
def tearDown(self):
super(SecurityGroupDeleteTest, self).tearDown()
(sec_mod.SecurityGroupInstanceAssociation.
find_by) = self.original_find_by
(sec_mod.SecurityGroupInstanceAssociation.
delete) = self.original_delete
def _raise(self, ex):
raise ex
def test_failed_to_get_assoc_on_delete(self):
sec_mod.SecurityGroupInstanceAssociation.find_by = self.fException
self.assertIsNone(
sec_mod.SecurityGroup.delete_for_instance(
uuid.uuid4(), self.context))
def test_get_security_group_from_assoc_with_db_exception(self):
fException = Mock(
side_effect=lambda *args, **kwargs: self._raise(
nova_exceptions.ClientException('TEST')))
i_id = uuid.uuid4()
class new_fake_RemoteSecGrAssoc(object):
def get_security_group(self):
return None
def delete(self):
return fException
sec_mod.SecurityGroupInstanceAssociation.find_by = Mock(
return_value=new_fake_RemoteSecGrAssoc())
self.assertIsNone(
sec_mod.SecurityGroup.delete_for_instance(
i_id, self.context))
def test_delete_secgr_assoc_with_db_exception(self):
i_id = uuid.uuid4()
sec_mod.SecurityGroupInstanceAssociation.find_by = Mock(
return_value=fake_SecGr_Association())
sec_mod.SecurityGroupInstanceAssociation.delete = self.fException
self.assertNotEqual(sec_mod.SecurityGroupInstanceAssociation.find_by(
i_id, deleted=False).get_security_group(), None)
self.assertTrue(hasattr(sec_mod.SecurityGroupInstanceAssociation.
find_by(i_id, deleted=False).
get_security_group(), 'delete'))
self.assertIsNone(
sec_mod.SecurityGroup.delete_for_instance(
i_id, self.context))
| apache-2.0 |
thaumos/ansible | test/units/modules/network/voss/test_voss_command.py | 38 | 4637 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from units.modules.utils import set_module_args
from ansible.modules.network.voss import voss_command
from .voss_module import TestVossModule, load_fixture
class TestVossCommandModule(TestVossModule):
module = voss_command
def setUp(self):
super(TestVossCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.voss.voss_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestVossCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_voss_command_simple(self):
set_module_args(dict(commands=['show sys-info']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('General Info'))
def test_voss_command_multiple(self):
set_module_args(dict(commands=['show sys-info', 'show sys-info']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('General Info'))
def test_voss_command_wait_for(self):
wait_for = 'result[0] contains "General Info"'
set_module_args(dict(commands=['show sys-info'], wait_for=wait_for))
self.execute_module()
def test_voss_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show sys-info'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_voss_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show sys-info'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_voss_command_match_any(self):
wait_for = ['result[0] contains "General Info"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show sys-info'], wait_for=wait_for, match='any'))
self.execute_module()
def test_voss_command_match_all(self):
wait_for = ['result[0] contains "General Info"',
'result[0] contains "Chassis Info"']
set_module_args(dict(commands=['show sys-info'], wait_for=wait_for, match='all'))
self.execute_module()
def test_voss_command_match_all_failure(self):
wait_for = ['result[0] contains "General Info"',
'result[0] contains "test string"']
commands = ['show sys-info', 'show sys-info']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
def test_voss_command_configure_error(self):
commands = ['configure terminal']
set_module_args({
'commands': commands,
'_ansible_check_mode': True,
})
result = self.execute_module(failed=True)
self.assertEqual(
result['msg'],
'voss_command does not support running config mode commands. Please use voss_config instead'
)
| gpl-3.0 |
archf/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_net_facts.py | 18 | 3802 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_vpc_net_facts
short_description: Gather facts about ec2 VPCs in AWS
description:
- Gather facts about ec2 VPCs in AWS
version_added: "2.1"
author: "Rob White (@wimnat)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all VPCs
- ec2_vpc_net_facts:
# Gather facts about a particular VPC using VPC ID
- ec2_vpc_net_facts:
filters:
vpc-id: vpc-00112233
# Gather facts about any VPC with a tag key Name and value Example
- ec2_vpc_net_facts:
filters:
"tag:Name": Example
'''
import traceback
try:
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils._text import to_native
def get_vpc_info(vpc):
try:
classic_link = vpc.classic_link_enabled
except AttributeError:
classic_link = False
vpc_info = { 'id': vpc.id,
'instance_tenancy': vpc.instance_tenancy,
'classic_link_enabled': classic_link,
'dhcp_options_id': vpc.dhcp_options_id,
'state': vpc.state,
'is_default': vpc.is_default,
'cidr_block': vpc.cidr_block,
'tags': vpc.tags
}
return vpc_info
def list_ec2_vpcs(connection, module):
filters = module.params.get("filters")
vpc_dict_array = []
try:
all_vpcs = connection.get_all_vpcs(filters=filters)
except BotoServerError as e:
module.fail_json(msg=e.message)
for vpc in all_vpcs:
vpc_dict_array.append(get_vpc_info(vpc))
module.exit_json(vpcs=vpc_dict_array)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters = dict(default=None, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, Exception) as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
else:
module.fail_json(msg="region must be specified")
list_ec2_vpcs(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
juneJuly/MFFA | sp_stream.py | 20 | 2074 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
/*
* Android media framework fuzzer
* Copyright (c) 2015, Intel Corporation.
* Author: Alexandru Blanda (ioan-alexandru.blanda@intel.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
"""
from os import listdir
import sys
import subprocess
import re
import time
from utils import *
if sys.argv[1] == '-h':
print 'Usage:\n'
print 'python sp_stream.py <path_seed_files> <seed_number> <device_id>'
print 'path_seed_files - path to directory containing the seed files'
print 'seed_number - number of the seed file to start from'
print 'device_id - device id\n'
sys.exit()
seed_files = listdir(sys.argv[1])
length = len(seed_files)
start = int(sys.argv[2])
device_id = sys.argv[3]
# flush logcat buffer if a new campaign is started
if start == 0:
flush_log(device_id)
for x in range(start, length):
print '***** Sending file: ' + str(x) + ' - ' + seed_files[x]
# push the file to the device
cmd = 'adb -s ' + sys.argv[3] + ' push ' + "'" + sys.argv[1] + '/' \
+ seed_files[x] + "'" + " '/data/Music/" + seed_files[x] + "'"
run_subproc(cmd)
# log the file being sent to the device
cmd = 'adb -s ' + sys.argv[3] \
+ " shell log -p F -t Stagefright - sp_stream '*** " + str(x) \
+ " - Filename:'" + seed_files[x]
run_subproc(cmd)
# try to decode audio file
cmd = 'timeout 15 adb -s ' + sys.argv[3] \
+ " shell stream '/data/Music/" + seed_files[x] + "'"
run_subproc(cmd)
# remove the file from the device
cmd = 'adb -s ' + sys.argv[3] + ' shell rm /data/Music/*'
run_subproc(cmd)
| gpl-2.0 |
smartboyathome/Wonderland-Engine | Doorknob/DBWrapper.py | 1 | 11354 | '''
Copyright (c) 2012 Alexander Abbott
This file is part of the Cheshire Cyber Defense Scoring Engine (henceforth
referred to as Cheshire).
Cheshire is free software: you can redistribute it and/or modify it under
the terms of the GNU Affero General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
Cheshire is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
more details.
You should have received a copy of the GNU Affero General Public License
along with Cheshire. If not, see <http://www.gnu.org/licenses/>.
'''
import abc
class DBWrapper(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def close(self):
raise NotImplementedError()
@abc.abstractmethod
def get_all_teams(self):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_team(self, team_id):
raise NotImplementedError()
@abc.abstractmethod
def create_team(self, team_name, team_id):
raise NotImplementedError()
@abc.abstractmethod
def modify_team(self, team_id, **data):
raise NotImplementedError()
@abc.abstractmethod
def delete_team(self, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_scores_for_all_teams(self):
raise NotImplementedError()
@abc.abstractmethod
def get_score_for_team(self, team_id):
raise NotImplementedError()
@abc.abstractmethod
def calculate_scores_for_team(self, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_all_team_configs_for_all_machines(self):
raise NotImplementedError()
@abc.abstractmethod
def get_team_config_for_all_machines(self, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_team_config_for_machine(self, team_id, machine_id):
raise NotImplementedError()
@abc.abstractmethod
def create_team_config_for_machine(self, team_id, machine_id, **data):
raise NotImplementedError()
@abc.abstractmethod
def modify_team_config_for_machine(self, team_id, machine_id, **data):
raise NotImplementedError()
@abc.abstractmethod
def delete_team_config_for_machine(self, team_id, machine_id):
raise NotImplementedError()
@abc.abstractmethod
def get_all_machines(self):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_machine(self, machine_id):
raise NotImplementedError()
@abc.abstractmethod
def create_machine(self, machine_id, general_ip):
raise NotImplementedError()
@abc.abstractmethod
def modify_machine(self, machine_id, **data):
raise NotImplementedError()
@abc.abstractmethod
def delete_machine(self, machine_id):
raise NotImplementedError()
@abc.abstractmethod
def get_all_users(self):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_user(self, username, password_hash=None):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_user_with_password(self, username):
raise NotImplementedError()
@abc.abstractmethod
def create_user(self, username, password_hash, email, role, **extra_info):
raise NotImplementedError()
@abc.abstractmethod
def modify_user(self, username, **data):
raise NotImplementedError()
@abc.abstractmethod
def delete_user(self, username):
raise NotImplementedError()
@abc.abstractmethod
def get_current_scoring_session(self):
raise NotImplementedError()
@abc.abstractmethod
def start_current_scoring_session(self):
raise NotImplementedError()
@abc.abstractmethod
def stop_current_scoring_session(self):
raise NotImplementedError()
@abc.abstractmethod
def clear_current_scoring_session(self):
raise NotImplementedError()
@abc.abstractmethod
def get_all_archived_scoring_sessions(self):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_archived_scoring_session(self, archive_id):
raise NotImplementedError()
@abc.abstractmethod
def archive_current_scoring_session(self, archive_id):
raise NotImplementedError()
@abc.abstractmethod
def get_all_check_classes(self):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_check_class(self, class_name):
raise NotImplementedError()
@abc.abstractmethod
def create_check_class(self, class_name, check_type, module_name):
raise NotImplementedError()
@abc.abstractmethod
def modify_check_class(self, class_name, **data):
raise NotImplementedError()
@abc.abstractmethod
def delete_check_class(self, class_name):
raise NotImplementedError()
@abc.abstractmethod
def get_all_check_scripts(self):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_check_script(self, module_name):
raise NotImplementedError()
@abc.abstractmethod
def create_check_script(self, module_name, path):
raise NotImplementedError()
@abc.abstractmethod
def modify_check_script(self, module_name, **data):
raise NotImplementedError()
@abc.abstractmethod
def delete_check_script(self, module_name):
raise NotImplementedError()
@abc.abstractmethod
def get_all_checks(self):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_check(self, check_id, check_type):
raise NotImplementedError()
@abc.abstractmethod
def delete_specific_check(self, check_id, check_type):
raise NotImplementedError()
@abc.abstractmethod
def get_all_service_checks(self):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_service_check(self, check_id):
raise NotImplementedError()
@abc.abstractmethod
def create_service_check(self, check_id, description, machine, check_class):
raise NotImplementedError()
@abc.abstractmethod
def modify_service_check(self, check_id, **data):
raise NotImplementedError()
@abc.abstractmethod
def complete_service_check(self, check_id, team_id, timestamp, score):
raise NotImplementedError()
@abc.abstractmethod
def delete_service_check(self, check_id):
raise NotImplementedError()
@abc.abstractmethod
def get_all_inject_checks(self):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_inject_check(self, check_id):
raise NotImplementedError()
@abc.abstractmethod
def create_inject_check(self, check_id, description, machine, check_class, inject_number, time_to_check):
raise NotImplementedError()
@abc.abstractmethod
def modify_inject_check(self, check_id, **data):
raise NotImplementedError()
@abc.abstractmethod
def complete_inject_check(self, check_id, team_id, timestamp, score):
raise NotImplementedError()
@abc.abstractmethod
def delete_inject_check(self, check_id):
raise NotImplementedError()
@abc.abstractmethod
def get_all_manual_checks(self):
raise NotImplementedError()
@abc.abstractmethod
def get_all_manual_checks_for_team(self, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_manual_check(self, check_id):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_manual_check_for_team(self, check_id, team_id):
raise NotImplementedError()
@abc.abstractmethod
def create_manual_check(self, check_id, description, comments, inject_number, team_id, points_awarded, timestamp):
raise NotImplementedError()
@abc.abstractmethod
def modify_manual_check(self, check_id, team_id, **data):
raise NotImplementedError()
@abc.abstractmethod
def delete_manual_check(self, check_id, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_all_attacker_checks(self):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_attacker_check(self, check_id, team_id):
raise NotImplementedError()
@abc.abstractmethod
def create_attacker_check(self, check_id, description, machine, team_id, check_class):
raise NotImplementedError()
@abc.abstractmethod
def modify_attacker_check(self, check_id, team_id, **data):
raise NotImplementedError()
@abc.abstractmethod
def complete_attacker_check(self, check_id, team_id, timestamp, score):
raise NotImplementedError()
@abc.abstractmethod
def delete_attacker_check(self, check_id, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_all_attacker_checks_for_team(self, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_all_completed_checks(self):
raise NotImplementedError()
@abc.abstractmethod
def get_all_completed_service_checks(self):
raise NotImplementedError()
@abc.abstractmethod
def get_all_completed_inject_checks(self):
raise NotImplementedError()
@abc.abstractmethod
def get_all_completed_manual_checks(self):
raise NotImplementedError()
@abc.abstractmethod
def get_all_completed_attacker_checks(self):
raise NotImplementedError()
@abc.abstractmethod
def get_all_completed_checks_for_team(self, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_all_completed_service_checks_for_team(self, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_all_completed_inject_checks_for_team(self, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_all_completed_manual_checks_for_team(self, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_all_completed_attacker_checks_for_team(self, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_all_completed_checks_for_team_since(self, team_id, timestamp):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_completed_service_check_for_team(self, check_id, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_completed_inject_check_for_team(self, check_id, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_completed_manual_check_for_team(self, check_id, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_completed_attacker_check_for_team(self, check_id, team_id):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_completed_service_check_for_team_at_time(self, check_id, team_id, timestamp):
raise NotImplementedError()
@abc.abstractmethod
def get_specific_completed_attacker_check_for_team_at_time(self, check_id, team_id, timestamp):
raise NotImplementedError() | agpl-3.0 |
yangming85/lettuce | tests/integration/lib/Django-1.3/django/contrib/localflavor/pt/forms.py | 309 | 1561 | """
PT-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
phone_digits_re = re.compile(r'^(\d{9}|(00|\+)\d*)$')
class PTZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXX-XXX.'),
}
def __init__(self, *args, **kwargs):
super(PTZipCodeField, self).__init__(r'^(\d{4}-\d{3}|\d{7})$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
cleaned = super(PTZipCodeField, self).clean(value)
if len(cleaned) == 7:
return u'%s-%s' % (cleaned[:4],cleaned[4:])
else:
return cleaned
class PTPhoneNumberField(Field):
"""
Validate local Portuguese phone number (including international ones)
It should have 9 digits (may include spaces) or start by 00 or + (international)
"""
default_error_messages = {
'invalid': _('Phone numbers must have 9 digits, or start by + or 00.'),
}
def clean(self, value):
super(PTPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\.|\s)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s' % value
raise ValidationError(self.error_messages['invalid'])
| gpl-3.0 |
Ju2ender/amonone | amonone/core/defaults.py | 5 | 1044 | import sys
try:
import json
except ImportError:
import simplejson as json
try:
config_file = file('/etc/amonone.conf').read()
config = json.loads(config_file)
except Exception, e:
print "There was an error in your configuration file (/etc/amonone.conf)"
raise e
# Amon Defaults
MONGO = config.get('mongo', "mongodb://127.0.0.1:27017")
_web_app = config.get('web_app', {})
host = _web_app.get('host', 'http://127.0.0.1')
if not host.startswith('http'):
host = "http://{0}".format(host)
WEB_APP = {
'host': host,
'port': _web_app.get('port', 2464)
}
key = config.get('secret_key', None)
if key != None and len(key) > 0:
SECRET_KEY = key
else:
SECRET_KEY = 'TGJKhSSeZaPZr24W6GlByAaLVe0VKvg8qs+8O7y=' #Don't break the dashboard
# Always
ACL = 'True'
SYSTEM_CHECK_PERIOD = config.get('system_check_period', 60)
TIMEZONE = config.get('timezone','UTC')
PROXY = config.get('proxy', None) # Relative baseurl if true
LOGFILE = config.get("logfile", '/var/log/amonone/amonone.log') | mit |
hale36/SRTV | lib/unidecode/x066.py | 252 | 4677 | data = (
'Yun ', # 0x00
'Bei ', # 0x01
'Ang ', # 0x02
'Ze ', # 0x03
'Ban ', # 0x04
'Jie ', # 0x05
'Kun ', # 0x06
'Sheng ', # 0x07
'Hu ', # 0x08
'Fang ', # 0x09
'Hao ', # 0x0a
'Gui ', # 0x0b
'Chang ', # 0x0c
'Xuan ', # 0x0d
'Ming ', # 0x0e
'Hun ', # 0x0f
'Fen ', # 0x10
'Qin ', # 0x11
'Hu ', # 0x12
'Yi ', # 0x13
'Xi ', # 0x14
'Xin ', # 0x15
'Yan ', # 0x16
'Ze ', # 0x17
'Fang ', # 0x18
'Tan ', # 0x19
'Shen ', # 0x1a
'Ju ', # 0x1b
'Yang ', # 0x1c
'Zan ', # 0x1d
'Bing ', # 0x1e
'Xing ', # 0x1f
'Ying ', # 0x20
'Xuan ', # 0x21
'Pei ', # 0x22
'Zhen ', # 0x23
'Ling ', # 0x24
'Chun ', # 0x25
'Hao ', # 0x26
'Mei ', # 0x27
'Zuo ', # 0x28
'Mo ', # 0x29
'Bian ', # 0x2a
'Xu ', # 0x2b
'Hun ', # 0x2c
'Zhao ', # 0x2d
'Zong ', # 0x2e
'Shi ', # 0x2f
'Shi ', # 0x30
'Yu ', # 0x31
'Fei ', # 0x32
'Die ', # 0x33
'Mao ', # 0x34
'Ni ', # 0x35
'Chang ', # 0x36
'Wen ', # 0x37
'Dong ', # 0x38
'Ai ', # 0x39
'Bing ', # 0x3a
'Ang ', # 0x3b
'Zhou ', # 0x3c
'Long ', # 0x3d
'Xian ', # 0x3e
'Kuang ', # 0x3f
'Tiao ', # 0x40
'Chao ', # 0x41
'Shi ', # 0x42
'Huang ', # 0x43
'Huang ', # 0x44
'Xuan ', # 0x45
'Kui ', # 0x46
'Xu ', # 0x47
'Jiao ', # 0x48
'Jin ', # 0x49
'Zhi ', # 0x4a
'Jin ', # 0x4b
'Shang ', # 0x4c
'Tong ', # 0x4d
'Hong ', # 0x4e
'Yan ', # 0x4f
'Gai ', # 0x50
'Xiang ', # 0x51
'Shai ', # 0x52
'Xiao ', # 0x53
'Ye ', # 0x54
'Yun ', # 0x55
'Hui ', # 0x56
'Han ', # 0x57
'Han ', # 0x58
'Jun ', # 0x59
'Wan ', # 0x5a
'Xian ', # 0x5b
'Kun ', # 0x5c
'Zhou ', # 0x5d
'Xi ', # 0x5e
'Cheng ', # 0x5f
'Sheng ', # 0x60
'Bu ', # 0x61
'Zhe ', # 0x62
'Zhe ', # 0x63
'Wu ', # 0x64
'Han ', # 0x65
'Hui ', # 0x66
'Hao ', # 0x67
'Chen ', # 0x68
'Wan ', # 0x69
'Tian ', # 0x6a
'Zhuo ', # 0x6b
'Zui ', # 0x6c
'Zhou ', # 0x6d
'Pu ', # 0x6e
'Jing ', # 0x6f
'Xi ', # 0x70
'Shan ', # 0x71
'Yi ', # 0x72
'Xi ', # 0x73
'Qing ', # 0x74
'Qi ', # 0x75
'Jing ', # 0x76
'Gui ', # 0x77
'Zhen ', # 0x78
'Yi ', # 0x79
'Zhi ', # 0x7a
'An ', # 0x7b
'Wan ', # 0x7c
'Lin ', # 0x7d
'Liang ', # 0x7e
'Chang ', # 0x7f
'Wang ', # 0x80
'Xiao ', # 0x81
'Zan ', # 0x82
'Hi ', # 0x83
'Xuan ', # 0x84
'Xuan ', # 0x85
'Yi ', # 0x86
'Xia ', # 0x87
'Yun ', # 0x88
'Hui ', # 0x89
'Fu ', # 0x8a
'Min ', # 0x8b
'Kui ', # 0x8c
'He ', # 0x8d
'Ying ', # 0x8e
'Du ', # 0x8f
'Wei ', # 0x90
'Shu ', # 0x91
'Qing ', # 0x92
'Mao ', # 0x93
'Nan ', # 0x94
'Jian ', # 0x95
'Nuan ', # 0x96
'An ', # 0x97
'Yang ', # 0x98
'Chun ', # 0x99
'Yao ', # 0x9a
'Suo ', # 0x9b
'Jin ', # 0x9c
'Ming ', # 0x9d
'Jiao ', # 0x9e
'Kai ', # 0x9f
'Gao ', # 0xa0
'Weng ', # 0xa1
'Chang ', # 0xa2
'Qi ', # 0xa3
'Hao ', # 0xa4
'Yan ', # 0xa5
'Li ', # 0xa6
'Ai ', # 0xa7
'Ji ', # 0xa8
'Gui ', # 0xa9
'Men ', # 0xaa
'Zan ', # 0xab
'Xie ', # 0xac
'Hao ', # 0xad
'Mu ', # 0xae
'Mo ', # 0xaf
'Cong ', # 0xb0
'Ni ', # 0xb1
'Zhang ', # 0xb2
'Hui ', # 0xb3
'Bao ', # 0xb4
'Han ', # 0xb5
'Xuan ', # 0xb6
'Chuan ', # 0xb7
'Liao ', # 0xb8
'Xian ', # 0xb9
'Dan ', # 0xba
'Jing ', # 0xbb
'Pie ', # 0xbc
'Lin ', # 0xbd
'Tun ', # 0xbe
'Xi ', # 0xbf
'Yi ', # 0xc0
'Ji ', # 0xc1
'Huang ', # 0xc2
'Tai ', # 0xc3
'Ye ', # 0xc4
'Ye ', # 0xc5
'Li ', # 0xc6
'Tan ', # 0xc7
'Tong ', # 0xc8
'Xiao ', # 0xc9
'Fei ', # 0xca
'Qin ', # 0xcb
'Zhao ', # 0xcc
'Hao ', # 0xcd
'Yi ', # 0xce
'Xiang ', # 0xcf
'Xing ', # 0xd0
'Sen ', # 0xd1
'Jiao ', # 0xd2
'Bao ', # 0xd3
'Jing ', # 0xd4
'Yian ', # 0xd5
'Ai ', # 0xd6
'Ye ', # 0xd7
'Ru ', # 0xd8
'Shu ', # 0xd9
'Meng ', # 0xda
'Xun ', # 0xdb
'Yao ', # 0xdc
'Pu ', # 0xdd
'Li ', # 0xde
'Chen ', # 0xdf
'Kuang ', # 0xe0
'Die ', # 0xe1
'[?] ', # 0xe2
'Yan ', # 0xe3
'Huo ', # 0xe4
'Lu ', # 0xe5
'Xi ', # 0xe6
'Rong ', # 0xe7
'Long ', # 0xe8
'Nang ', # 0xe9
'Luo ', # 0xea
'Luan ', # 0xeb
'Shai ', # 0xec
'Tang ', # 0xed
'Yan ', # 0xee
'Chu ', # 0xef
'Yue ', # 0xf0
'Yue ', # 0xf1
'Qu ', # 0xf2
'Yi ', # 0xf3
'Geng ', # 0xf4
'Ye ', # 0xf5
'Hu ', # 0xf6
'He ', # 0xf7
'Shu ', # 0xf8
'Cao ', # 0xf9
'Cao ', # 0xfa
'Noboru ', # 0xfb
'Man ', # 0xfc
'Ceng ', # 0xfd
'Ceng ', # 0xfe
'Ti ', # 0xff
)
| gpl-3.0 |
nateberman/Python_Koans | python3/libs/colorama/ansi.py | 527 | 1039 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
def code_to_chars(code):
return CSI + str(code) + 'm'
class AnsiCodes(object):
def __init__(self, codes):
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
class AnsiFore:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
class AnsiBack:
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
class AnsiStyle:
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiCodes( AnsiFore )
Back = AnsiCodes( AnsiBack )
Style = AnsiCodes( AnsiStyle )
| mit |
rostj/pedantica | engine/android-port/app/src/main/jni/SDL2_ttf-2.0.14/external/freetype-2.4.12/src/tools/docmaker/sources.py | 106 | 10770 | # Sources (c) 2002-2004, 2006-2009, 2012
# David Turner <david@freetype.org>
#
#
# this file contains definitions of classes needed to decompose
# C sources files into a series of multi-line "blocks". There are
# two kinds of blocks:
#
# - normal blocks, which contain source code or ordinary comments
#
# - documentation blocks, which have restricted formatting, and
# whose text always start with a documentation markup tag like
# "<Function>", "<Type>", etc..
#
# the routines used to process the content of documentation blocks
# are not contained here, but in "content.py"
#
# the classes and methods found here only deal with text parsing
# and basic documentation block extraction
#
import fileinput, re, sys, os, string
################################################################
##
## BLOCK FORMAT PATTERN
##
## A simple class containing compiled regular expressions used
## to detect potential documentation format block comments within
## C source code
##
## note that the 'column' pattern must contain a group that will
## be used to "unbox" the content of documentation comment blocks
##
class SourceBlockFormat:
def __init__( self, id, start, column, end ):
"""create a block pattern, used to recognize special documentation blocks"""
self.id = id
self.start = re.compile( start, re.VERBOSE )
self.column = re.compile( column, re.VERBOSE )
self.end = re.compile( end, re.VERBOSE )
#
# format 1 documentation comment blocks look like the following:
#
# /************************************/
# /* */
# /* */
# /* */
# /************************************/
#
# we define a few regular expressions here to detect them
#
start = r'''
\s* # any number of whitespace
/\*{2,}/ # followed by '/' and at least two asterisks then '/'
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
/\*{1} # followed by '/' and precisely one asterisk
([^*].*) # followed by anything (group 1)
\*{1}/ # followed by one asterisk and a '/'
\s*$ # probably followed by whitespace
'''
re_source_block_format1 = SourceBlockFormat( 1, start, column, start )
#
# format 2 documentation comment blocks look like the following:
#
# /************************************ (at least 2 asterisks)
# *
# *
# *
# *
# **/ (1 or more asterisks at the end)
#
# we define a few regular expressions here to detect them
#
start = r'''
\s* # any number of whitespace
/\*{2,} # followed by '/' and at least two asterisks
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
\*{1}(?!/) # followed by precisely one asterisk not followed by `/'
(.*) # then anything (group1)
'''
end = r'''
\s* # any number of whitespace
\*+/ # followed by at least one asterisk, then '/'
'''
re_source_block_format2 = SourceBlockFormat( 2, start, column, end )
#
# the list of supported documentation block formats, we could add new ones
# relatively easily
#
re_source_block_formats = [re_source_block_format1, re_source_block_format2]
#
# the following regular expressions corresponds to markup tags
# within the documentation comment blocks. they're equivalent
# despite their different syntax
#
# notice how each markup tag _must_ begin a new line
#
re_markup_tag1 = re.compile( r'''\s*<((?:\w|-)*)>''' ) # <xxxx> format
re_markup_tag2 = re.compile( r'''\s*@((?:\w|-)*):''' ) # @xxxx: format
#
# the list of supported markup tags, we could add new ones relatively
# easily
#
re_markup_tags = [re_markup_tag1, re_markup_tag2]
#
# used to detect a cross-reference, after markup tags have been stripped
#
re_crossref = re.compile( r'@((?:\w|-)*)(.*)' )
#
# used to detect italic and bold styles in paragraph text
#
re_italic = re.compile( r"_(\w(\w|')*)_(.*)" ) # _italic_
re_bold = re.compile( r"\*(\w(\w|')*)\*(.*)" ) # *bold*
#
# used to detect the end of commented source lines
#
re_source_sep = re.compile( r'\s*/\*\s*\*/' )
#
# used to perform cross-reference within source output
#
re_source_crossref = re.compile( r'(\W*)(\w*)' )
#
# a list of reserved source keywords
#
re_source_keywords = re.compile( '''\\b ( typedef |
struct |
enum |
union |
const |
char |
int |
short |
long |
void |
signed |
unsigned |
\#include |
\#define |
\#undef |
\#if |
\#ifdef |
\#ifndef |
\#else |
\#endif ) \\b''', re.VERBOSE )
################################################################
##
## SOURCE BLOCK CLASS
##
## A SourceProcessor is in charge of reading a C source file
## and decomposing it into a series of different "SourceBlocks".
## each one of these blocks can be made of the following data:
##
## - A documentation comment block that starts with "/**" and
## whose exact format will be discussed later
##
## - normal sources lines, including comments
##
## the important fields in a text block are the following ones:
##
## self.lines : a list of text lines for the corresponding block
##
## self.content : for documentation comment blocks only, this is the
## block content that has been "unboxed" from its
## decoration. This is None for all other blocks
## (i.e. sources or ordinary comments with no starting
## markup tag)
##
class SourceBlock:
def __init__( self, processor, filename, lineno, lines ):
self.processor = processor
self.filename = filename
self.lineno = lineno
self.lines = lines[:]
self.format = processor.format
self.content = []
if self.format == None:
return
words = []
# extract comment lines
lines = []
for line0 in self.lines:
m = self.format.column.match( line0 )
if m:
lines.append( m.group( 1 ) )
# now, look for a markup tag
for l in lines:
l = string.strip( l )
if len( l ) > 0:
for tag in re_markup_tags:
if tag.match( l ):
self.content = lines
return
def location( self ):
return "(" + self.filename + ":" + repr( self.lineno ) + ")"
# debugging only - not used in normal operations
def dump( self ):
if self.content:
print "{{{content start---"
for l in self.content:
print l
print "---content end}}}"
return
fmt = ""
if self.format:
fmt = repr( self.format.id ) + " "
for line in self.lines:
print line
################################################################
##
## SOURCE PROCESSOR CLASS
##
## The SourceProcessor is in charge of reading a C source file
## and decomposing it into a series of different "SourceBlock"
## objects.
##
## each one of these blocks can be made of the following data:
##
## - A documentation comment block that starts with "/**" and
## whose exact format will be discussed later
##
## - normal sources lines, include comments
##
##
class SourceProcessor:
def __init__( self ):
"""initialize a source processor"""
self.blocks = []
self.filename = None
self.format = None
self.lines = []
def reset( self ):
"""reset a block processor, clean all its blocks"""
self.blocks = []
self.format = None
def parse_file( self, filename ):
"""parse a C source file, and add its blocks to the processor's list"""
self.reset()
self.filename = filename
fileinput.close()
self.format = None
self.lineno = 0
self.lines = []
for line in fileinput.input( filename ):
# strip trailing newlines, important on Windows machines!
if line[-1] == '\012':
line = line[0:-1]
if self.format == None:
self.process_normal_line( line )
else:
if self.format.end.match( line ):
# that's a normal block end, add it to 'lines' and
# create a new block
self.lines.append( line )
self.add_block_lines()
elif self.format.column.match( line ):
# that's a normal column line, add it to 'lines'
self.lines.append( line )
else:
# humm.. this is an unexpected block end,
# create a new block, but don't process the line
self.add_block_lines()
# we need to process the line again
self.process_normal_line( line )
# record the last lines
self.add_block_lines()
def process_normal_line( self, line ):
"""process a normal line and check whether it is the start of a new block"""
for f in re_source_block_formats:
if f.start.match( line ):
self.add_block_lines()
self.format = f
self.lineno = fileinput.filelineno()
self.lines.append( line )
def add_block_lines( self ):
"""add the current accumulated lines and create a new block"""
if self.lines != []:
block = SourceBlock( self, self.filename, self.lineno, self.lines )
self.blocks.append( block )
self.format = None
self.lines = []
# debugging only, not used in normal operations
def dump( self ):
"""print all blocks in a processor"""
for b in self.blocks:
b.dump()
# eof
| gpl-3.0 |
frankrousseau/weboob | weboob/capabilities/shop.py | 1 | 3939 | # -*- coding: utf-8 -*-
# Copyright(C) 2014 Oleg Plakhotniuk
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .base import BaseObject, StringField, DecimalField, UserError
from .date import DateField
from .collection import CapCollection
__all__ = ['OrderNotFound', 'Order', 'Payment', 'Item', 'CapShop']
class OrderNotFound(UserError):
"""
Raised when an order is not found.
"""
def __init__(self, msg='Order not found'):
UserError.__init__(self, msg)
class Order(BaseObject):
"""
Purchase order.
"""
date = DateField('Date when the order was placed')
shipping = DecimalField('Shipping price')
discount = DecimalField('Discounts')
tax = DecimalField('Tax')
def __repr__(self):
return u"<Order id=%r date=%r>" % (self.id, self.date)
class Payment(BaseObject):
"""
Payment for an order.
"""
date = DateField('The date when payment was applied')
method = StringField('Payment method; e.g. "VISA 1234"')
amount = DecimalField('Payment amount')
def __repr__(self):
return u"<Payment date=%r method=%r amount=%r>" % \
(self.date, self.method, self.amount)
class Item(BaseObject):
"""
Purchased item within an order.
"""
label = StringField('Item label')
url = StringField('URL with item description')
price = DecimalField('Item price')
def __repr__(self):
return u"<Item label=%r price=%r>" % (self.label, self.price)
class CapShop(CapCollection):
"""
Capability of online shops to see orders history.
"""
def iter_resources(self, objs, split_path):
"""
Iter resources.
Default implementation of this method is to return on top-level
all orders (by calling :func:`iter_accounts`).
:param objs: type of objects to get
:type objs: tuple[:class:`BaseObject`]
:param split_path: path to discover
:type split_path: :class:`list`
:rtype: iter[:class:`BaseObject`]
"""
if Order in objs:
self._restrict_level(split_path)
return self.iter_orders()
def get_currency(self):
"""
Get the currency this shop uses.
:rtype: :class:`str`
"""
raise NotImplementedError()
def iter_orders(self):
"""
Iter history of orders.
:rtype: iter[:class:`Order`]
"""
raise NotImplementedError()
def get_order(self, id):
"""
Get an order from its ID.
:param id: ID of the order
:type id: :class:`str`
:rtype: :class:`Order`
:raises: :class:`OrderNotFound`
"""
raise NotImplementedError()
def iter_payments(self, order):
"""
Iter payments of a specific order.
:param order: order to get payments
:type order: :class:`Order`
:rtype: iter[:class:`Payment`]
:raises: :class:`OrderNotFound`
"""
raise NotImplementedError()
def iter_items(self, order):
"""
Iter items of a specific order.
:param order: order to get items
:type order: :class:`Order`
:rtype: iter[:class:`Item`]
:raises: :class:`OrderNotFound`
"""
raise NotImplementedError()
| agpl-3.0 |
digitalocean/netbox | netbox/extras/migrations/0013_objectchange.py | 2 | 1959 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-06-22 18:13
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
('extras', '0012_webhooks'),
]
operations = [
migrations.CreateModel(
name='ObjectChange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True)),
('user_name', models.CharField(editable=False, max_length=150)),
('request_id', models.UUIDField(editable=False)),
('action', models.PositiveSmallIntegerField(choices=[(1, 'Created'), (2, 'Updated'), (3, 'Deleted')])),
('changed_object_id', models.PositiveIntegerField()),
('related_object_id', models.PositiveIntegerField(blank=True, null=True)),
('object_repr', models.CharField(editable=False, max_length=200)),
('object_data', django.contrib.postgres.fields.jsonb.JSONField(editable=False)),
('changed_object_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='+', to='contenttypes.ContentType')),
('related_object_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='contenttypes.ContentType')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='changes', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-time'],
},
),
]
| apache-2.0 |
carlTLR/gyp | pylib/gyp/generator/make.py | 10 | 90525 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?=
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?=
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = ln -f "$<" "$@" 2>/dev/null || (rm -rf "$@" && cp -af "$<" "$@")
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
command = command,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False, command=None):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' %
(' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in
# make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the
# actual command.
#
# For an explanation of the problem and several solutions that don't
# work, see this:
# http://www.gnu.org/software/hello/manual/automake/Multiple-Outputs.html
intermediate = "%s.intermediate" % (command if command else self.target)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' %
(intermediate, ' '.join(inputs), force_append))
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
if (self.type == 'shared_library' and
(self.flavor != 'mac' or self.toolset != 'target')):
# Install all shared libs into a common directory (per toolset) for
# convenient access with LD_LIBRARY_PATH.
return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'aix':
header_params.update({
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host',), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host',), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host',), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host',), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| bsd-3-clause |
norayr/unisubs | apps/auth/tests.py | 4 | 7877 | # -*- coding: utf-8 -*-
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from datetime import datetime, timedelta
from urlparse import urlparse
from nose.tools import *
import re
from django.core.urlresolvers import reverse
from django.core import mail
from django.test import TestCase
from auth.models import CustomUser as User, UserLanguage
from auth.models import LoginToken
from caching.tests.utils import assert_invalidates_model_cache
from utils.factories import *
from utils import test_utils
class VideosFieldTest(TestCase):
def setUp(self):
self.user = UserFactory()
def check_user_videos(self, videos):
self.assertEqual(set(self.user.videos.all()), set(videos))
def test_with_video_followers(self):
video = VideoFactory(title='video1')
video2 = VideoFactory(title='video2')
self.check_user_videos([])
# add
video.followers.add(self.user)
self.check_user_videos([video])
# add with reverse relation
self.user.followed_videos.add(video2)
self.check_user_videos([video, video2])
# remove
video.followers.remove(self.user)
self.check_user_videos([video2])
# remove with reverse relation
self.user.followed_videos.remove(video2)
self.check_user_videos([])
# clear
video.followers.add(self.user)
self.check_user_videos([video])
video.followers = []
self.check_user_videos([])
# clear with reverse relation
video.followers.add(self.user)
self.check_user_videos([video])
self.user.followed_videos = []
self.check_user_videos([])
class UserCreationTest(TestCase):
def test_notfications(self):
self.assertEqual(len(mail.outbox), 0)
user = User(email='la@example.com', username='someone')
user.set_password("secret")
user.save()
self.assertEqual(len(mail.outbox), 1)
def test_notifications_unicode(self):
self.assertEqual(len(mail.outbox), 0)
user = User(email=u'Leandro Andrés@example.com', username='unicodesomeone')
user.set_password("secret")
user.save()
self.assertEqual(len(mail.outbox), 1)
class UniqueUsernameTest(TestCase):
def test_username_already_unique(self):
# if the username is unique to begin with, we should use that
user = User.objects.create_with_unique_username(username='test')
assert_equal(user.username, 'test')
def test_strategy1(self):
# If the username is not unique, we should try to append "00", "01",
# "02", ... until "99" to the username
UserFactory(username='test')
for i in xrange(5):
UserFactory(username='test0{}'.format(i))
user = User.objects.create_with_unique_username(username='test')
assert_equal(user.username, 'test05')
def test_strategy2(self):
# If strategy1 doesn't produce a unique username, then we should
# append random strings until we find one
UserFactory(username='test')
for i in xrange(100):
UserFactory(username='test{:0>2d}'.format(i))
user = User.objects.create_with_unique_username(username='test')
assert_true(re.match(r'test[a-zA-Z0-9]{6}', user.username),
user.username)
def test_at_symbol(self):
# if there is an "@" symbol in the username, we should insert our
# extra chars before it.
UserFactory(username='test@example.com')
for i in xrange(5):
UserFactory(username='test0{}@example.com'.format(i))
user = User.objects.create_with_unique_username(
username='test@example.com')
assert_equal(user.username, 'test05@example.com')
def test_at_symbol_strategy2(self):
UserFactory(username='test@example.com')
for i in xrange(100):
UserFactory(username='test{:0>2d}@example.com'.format(i))
user = User.objects.create_with_unique_username(
username='test@example.com')
assert_true(re.match(r'test[a-zA-Z0-9]{6}@example.com', user.username),
user.username)
class UserCacheTest(TestCase):
def test_user_language_change_invalidates_cache(self):
user = UserFactory()
with assert_invalidates_model_cache(user):
user_lang = UserLanguage.objects.create(user=user, language='en')
with assert_invalidates_model_cache(user):
user_lang.delete()
class LoginTokenModelTest(TestCase):
def test_creation(self):
user = UserFactory()
lt1 = LoginToken.objects.for_user(user)
lt2 = LoginToken.objects.for_user(user, updates=False)
self.assertEqual(lt1.token, lt2.token)
self.assertEqual(len(lt1.token), 40)
# assesrt updates does what it says
lt3 = LoginToken.objects.for_user(user, updates=True)
self.assertNotEqual(lt3.token, lt2.token)
self.assertEqual(len(lt3.token), 40)
def test_expire(self):
user = UserFactory()
lt1 = LoginToken.objects.for_user(user)
self.assertFalse(lt1.is_expired)
self.assertFalse(LoginToken.objects.get_expired().filter(pk=lt1.pk).exists())
older_date = datetime.now() - timedelta(minutes=1) - LoginToken.EXPIRES_IN
lt1.created = older_date
lt1.save()
self.assertTrue(lt1.is_expired)
self.assertTrue(LoginToken.objects.get_expired().filter(pk=lt1.pk).exists())
class LoginTokenViewsTest(TestCase):
def test_valid_login(self):
user = UserFactory()
lt1 = LoginToken.objects.for_user(user)
redirect_url = '/en/videos/watch'
url = reverse("auth:token-login", args=(lt1.token,)) + "?next=%s" % redirect_url
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
location = response._headers['location'][1]
redirect_path = urlparse(location).path
self.assertEqual(redirect_path, redirect_url)
def test_invalid_login(self):
user = UserFactory()
lt1 = LoginToken.objects.for_user(user)
redirect_url = '/en/videos/watch'
url = reverse("auth:token-login", args=(lt1.token,)) + "?next=%s" % redirect_url
lt1.delete()
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_staff_is_offlimit(self):
user = UserFactory()
lt1 = LoginToken.objects.for_user(user)
user.is_staff = True
user.save()
redirect_url = '/en/videos/watch'
url = reverse("auth:token-login", args=(lt1.token,)) + "?next=%s" % redirect_url
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_superuser_is_offlimit(self):
user = UserFactory()
lt1 = LoginToken.objects.for_user(user)
user.is_superuser = True
user.save()
redirect_url = '/en/videos/watch'
url = reverse("auth:token-login", args=(lt1.token,)) + "?next=%s" % redirect_url
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
| agpl-3.0 |
OpenConceptLab/oclapi | ocl/concepts/search_indexes.py | 2 | 2578 | from django.contrib.contenttypes.models import ContentType
from haystack import indexes
from concepts.models import ConceptVersion
from oclapi.search_backends import SortOrFilterField, FilterField
from oclapi.search_indexes import OCLSearchIndex
from sources.models import SourceVersion, Source
__author__ = 'misternando'
class ConceptVersionIndex(OCLSearchIndex, indexes.Indexable):
text = indexes.CharField(
document=True, use_template=True)
mnemonic = SortOrFilterField(
model_attr='name', indexed=True, stored=True, default="")
name = SortOrFilterField(
model_attr='display_name', indexed=True, stored=True, default="")
lastUpdate = indexes.DateTimeField(
model_attr='updated_at', indexed=True, stored=True)
num_stars = indexes.IntegerField(
model_attr='versioned_object__num_stars', indexed=True, stored=True)
conceptClass = SortOrFilterField(
model_attr='concept_class', indexed=True, stored=True, faceted=True)
datatype = SortOrFilterField(
model_attr='datatype', null=True, indexed=True, stored=True, faceted=True)
locale = FilterField(
indexed=True, stored=True, faceted=True)
is_latest_version = indexes.BooleanField(
model_attr='is_latest_version', indexed=True, stored=True)
public_can_view = indexes.BooleanField(
model_attr='public_can_view', indexed=True, stored=True)
retired = indexes.BooleanField(
model_attr='retired', indexed=True, stored=True, faceted=True)
source = SortOrFilterField(model_attr='parent_resource', indexed=True, stored=True, faceted=True)
owner = SortOrFilterField(
model_attr='owner_name', indexed=True, stored=True, faceted=True)
ownerType = SortOrFilterField(
model_attr='owner_type', indexed=True, stored=True, faceted=True)
source_version = FilterField()
collection = FilterField()
collection_version = FilterField()
is_active = indexes.BooleanField(model_attr='is_active', indexed=True, stored=True)
def get_model(self):
return ConceptVersion
def prepare_locale(self, obj):
locales = set()
if obj.names:
for name in obj.names:
if name.locale is not None:
locales.add(name.locale)
return list(locales)
def prepare_source_version(self, obj):
return list(obj.source_version_ids)
def prepare_collection_version(self, obj):
return obj.get_collection_version_ids()
def prepare_collection(self, obj):
return obj.get_collection_ids()
| mpl-2.0 |
gigglearrows/anniesbot | alembic/versions/3841cd597e_added_a_table_for_duel_stats.py | 1 | 1033 | """Added a table for duel stats
Revision ID: 3841cd597e
Revises: d5f1b8bd68
Create Date: 2015-12-02 00:12:07.548855
"""
# revision identifiers, used by Alembic.
revision = '3841cd597e'
down_revision = 'd5f1b8bd68'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('tb_user_duel_stats',
sa.Column('user_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('duels_won', sa.Integer(), nullable=False),
sa.Column('duels_total', sa.Integer(), nullable=False),
sa.Column('points_won', sa.Integer(), nullable=False),
sa.Column('points_lost', sa.Integer(), nullable=False),
sa.Column('last_duel', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('user_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('tb_user_duel_stats')
### end Alembic commands ###
| mit |
ewindisch/nova | nova/tests/virt/libvirt/test_image_utils.py | 15 | 9555 | # Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nova import test
from nova import utils
from nova.virt import images
from nova.virt.libvirt import utils as libvirt_utils
class ImageUtilsTestCase(test.NoDBTestCase):
def test_disk_type(self):
# Seems like lvm detection
# if its in /dev ??
for p in ['/dev/b', '/dev/blah/blah']:
d_type = libvirt_utils.get_disk_type(p)
self.assertEqual('lvm', d_type)
# Try rbd detection
d_type = libvirt_utils.get_disk_type('rbd:pool/instance')
self.assertEqual('rbd', d_type)
# Try the other types
template_output = """image: %(path)s
file format: %(format)s
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
"""
path = '/myhome/disk.config'
for f in ['raw', 'qcow2']:
output = template_output % ({
'format': f,
'path': path,
})
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((output, ''))
self.mox.ReplayAll()
d_type = libvirt_utils.get_disk_type(path)
self.assertEqual(f, d_type)
self.mox.UnsetStubs()
def test_disk_backing(self):
path = '/myhome/disk.config'
template_output = """image: %(path)s
file format: raw
virtual size: 2K (2048 bytes)
cluster_size: 65536
disk size: 96K
"""
output = template_output % ({
'path': path,
})
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((output, ''))
self.mox.ReplayAll()
d_backing = libvirt_utils.get_disk_backing_file(path)
self.assertIsNone(d_backing)
def test_disk_size(self):
path = '/myhome/disk.config'
template_output = """image: %(path)s
file format: raw
virtual size: %(v_size)s (%(vsize_b)s bytes)
cluster_size: 65536
disk size: 96K
"""
for i in range(0, 128):
bytes = i * 65336
kbytes = bytes / 1024
mbytes = kbytes / 1024
output = template_output % ({
'v_size': "%sM" % (mbytes),
'vsize_b': i,
'path': path,
})
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((output, ''))
self.mox.ReplayAll()
d_size = libvirt_utils.get_disk_size(path)
self.assertEqual(i, d_size)
self.mox.UnsetStubs()
output = template_output % ({
'v_size': "%sK" % (kbytes),
'vsize_b': i,
'path': path,
})
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((output, ''))
self.mox.ReplayAll()
d_size = libvirt_utils.get_disk_size(path)
self.assertEqual(i, d_size)
self.mox.UnsetStubs()
def test_qemu_info_canon(self):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
blah BLAH: bb
"""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
image_info = images.qemu_img_info(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(65536, image_info.cluster_size)
def test_qemu_info_canon2(self):
path = "disk.config"
example_output = """image: disk.config
file format: QCOW2
virtual size: 67108844
cluster_size: 65536
disk size: 963434
backing file: /var/lib/nova/a328c7998805951a_2
"""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
image_info = images.qemu_img_info(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('qcow2', image_info.file_format)
self.assertEqual(67108844, image_info.virtual_size)
self.assertEqual(963434, image_info.disk_size)
self.assertEqual(65536, image_info.cluster_size)
self.assertEqual('/var/lib/nova/a328c7998805951a_2',
image_info.backing_file)
def test_qemu_backing_file_actual(self):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
backing file: /var/lib/nova/a328c7998805951a_2 (actual path: /b/3a988059e51a_2)
"""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
image_info = images.qemu_img_info(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(1, len(image_info.snapshots))
self.assertEqual('/b/3a988059e51a_2',
image_info.backing_file)
def test_qemu_info_convert(self):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
junk stuff: bbb
"""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
image_info = images.qemu_img_info(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
def test_qemu_info_snaps(self):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
"""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
image_info = images.qemu_img_info(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(3, len(image_info.snapshots))
| apache-2.0 |
syaiful6/django | django/contrib/postgres/forms/ranges.py | 393 | 3005 | from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django import forms
from django.core import exceptions
from django.forms.widgets import MultiWidget
from django.utils.translation import ugettext_lazy as _
__all__ = ['IntegerRangeField', 'FloatRangeField', 'DateTimeRangeField', 'DateRangeField']
class BaseRangeField(forms.MultiValueField):
default_error_messages = {
'invalid': _('Enter two valid values.'),
'bound_ordering': _('The start of the range must not exceed the end of the range.'),
}
def __init__(self, **kwargs):
kwargs.setdefault('widget', RangeWidget(self.base_field.widget))
kwargs.setdefault('fields', [self.base_field(required=False), self.base_field(required=False)])
kwargs.setdefault('required', False)
kwargs.setdefault('require_all_fields', False)
super(BaseRangeField, self).__init__(**kwargs)
def prepare_value(self, value):
lower_base, upper_base = self.fields
if isinstance(value, self.range_type):
return [
lower_base.prepare_value(value.lower),
upper_base.prepare_value(value.upper),
]
if value is None:
return [
lower_base.prepare_value(None),
upper_base.prepare_value(None),
]
return value
def compress(self, values):
if not values:
return None
lower, upper = values
if lower is not None and upper is not None and lower > upper:
raise exceptions.ValidationError(
self.error_messages['bound_ordering'],
code='bound_ordering',
)
try:
range_value = self.range_type(lower, upper)
except TypeError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
)
else:
return range_value
class IntegerRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two whole numbers.')}
base_field = forms.IntegerField
range_type = NumericRange
class FloatRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two numbers.')}
base_field = forms.FloatField
range_type = NumericRange
class DateTimeRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid date/times.')}
base_field = forms.DateTimeField
range_type = DateTimeTZRange
class DateRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid dates.')}
base_field = forms.DateField
range_type = DateRange
class RangeWidget(MultiWidget):
def __init__(self, base_widget, attrs=None):
widgets = (base_widget, base_widget)
super(RangeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return (value.lower, value.upper)
return (None, None)
| bsd-3-clause |
marscher/PyEMMA | pyemma/_ext/variational/estimators/tests/benchmark_moments.py | 1 | 5907 | from __future__ import absolute_import
from __future__ import print_function
__author__ = 'noe'
import time
import numpy as np
from .. import moments
def genS(N):
""" Generates sparsities given N (number of cols) """
S = [10, 90, 100, 500, 900, 1000, 2000, 5000, 7500, 9000, 10000, 20000, 50000, 75000, 90000] # non-zero
return [s for s in S if s <= N]
def genX(L, N, n_var=None, const=False):
X = np.random.rand(L, N) # random data
if n_var is not None:
if const:
Xsparse = np.ones((L, N))
else:
Xsparse = np.zeros((L, N))
Xsparse[:, :n_var] = X[:, :n_var]
X = Xsparse
return X
def genY(L, N, n_var=None, const=False):
X = np.random.rand(L, N) # random data
if n_var is not None:
if const:
Xsparse = -np.ones((L, N))
else:
Xsparse = np.zeros((L, N))
Xsparse[:, :n_var] = X[:, :n_var]
X = Xsparse
return X
def reftime_momentsXX(X, remove_mean=False, nrep=3):
# time for reference calculation
t1 = time.time()
for r in range(nrep):
s_ref = X.sum(axis=0) # computation of mean
if remove_mean:
X = X - s_ref/float(X.shape[0])
C_XX_ref = np.dot(X.T, X) # covariance matrix
t2 = time.time()
# return mean time
return (t2-t1)/float(nrep)
def mytime_momentsXX(X, remove_mean=False, nrep=3):
# time for reference calculation
t1 = time.time()
for r in range(nrep):
w, s, C_XX = moments.moments_XX(X, remove_mean=remove_mean)
t2 = time.time()
# return mean time
return (t2-t1)/float(nrep)
def reftime_momentsXXXY(X, Y, remove_mean=False, symmetrize=False, nrep=3):
# time for reference calculation
t1 = time.time()
for r in range(nrep):
sx = X.sum(axis=0) # computation of mean
sy = Y.sum(axis=0) # computation of mean
if symmetrize:
sx = 0.5*(sx + sy)
sy = sx
if remove_mean:
X = X - sx/float(X.shape[0])
Y = Y - sy/float(Y.shape[0])
if symmetrize:
C_XX_ref = np.dot(X.T, X) + np.dot(Y.T, Y)
C_XY = np.dot(X.T, Y)
C_XY_ref = C_XY + C_XY.T
else:
C_XX_ref = np.dot(X.T, X)
C_XY_ref = np.dot(X.T, Y)
t2 = time.time()
# return mean time
return (t2-t1)/float(nrep)
def mytime_momentsXXXY(X, Y, remove_mean=False, symmetrize=False, nrep=3):
# time for reference calculation
t1 = time.time()
for r in range(nrep):
w, sx, sy, C_XX, C_XY = moments.moments_XXXY(X, Y, remove_mean=remove_mean, symmetrize=symmetrize)
t2 = time.time()
# return mean time
return (t2-t1)/float(nrep)
def benchmark_moments(L=10000, N=10000, nrep=5, xy=False, remove_mean=False, symmetrize=False, const=False):
#S = [10, 100, 1000]
S = genS(N)
# time for reference calculation
X = genX(L, N)
if xy:
Y = genY(L, N)
reftime = reftime_momentsXXXY(X, Y, remove_mean=remove_mean, symmetrize=symmetrize, nrep=nrep)
else:
reftime = reftime_momentsXX(X, remove_mean=remove_mean, nrep=nrep)
# my time
times = np.zeros(len(S))
for k, s in enumerate(S):
X = genX(L, N, n_var=s, const=const)
if xy:
Y = genY(L, N, n_var=s, const=const)
times[k] = mytime_momentsXXXY(X, Y, remove_mean=remove_mean, symmetrize=symmetrize, nrep=nrep)
else:
times[k] = mytime_momentsXX(X, remove_mean=remove_mean, nrep=nrep)
# assemble report
rows = ['L, data points', 'N, dimensions', 'S, nonzeros', 'time trivial', 'time moments_XX', 'speed-up']
table = np.zeros((6, len(S)))
table[0, :] = L
table[1, :] = N
table[2, :] = S
table[3, :] = reftime
table[4, :] = times
table[5, :] = reftime / times
# print table
if xy:
fname = 'moments_XXXY'
else:
fname = 'moments_XX'
print(fname + '\tremove_mean = ' + str(remove_mean) + '\tsym = ' + str(symmetrize) + '\tconst = ' + str(const))
print(rows[0] + ('\t%i' * table.shape[1])%tuple(table[0]))
print(rows[1] + ('\t%i' * table.shape[1])%tuple(table[1]))
print(rows[2] + ('\t%i' * table.shape[1])%tuple(table[2]))
print(rows[3] + ('\t%.3f' * table.shape[1])%tuple(table[3]))
print(rows[4] + ('\t%.3f' * table.shape[1])%tuple(table[4]))
print(rows[5] + ('\t%.3f' * table.shape[1])%tuple(table[5]))
print()
def main():
LNs = [(100000, 100, 10), (10000, 1000, 7), (1000, 2000, 5), (250, 5000, 5), (100, 10000, 5)]
for L, N, nrep in LNs:
benchmark_moments(L=L, N=N, nrep=nrep, xy=False, remove_mean=False, symmetrize=False, const=False)
benchmark_moments(L=L, N=N, nrep=nrep, xy=False, remove_mean=False, symmetrize=False, const=True)
benchmark_moments(L=L, N=N, nrep=nrep, xy=False, remove_mean=True, symmetrize=False, const=False)
benchmark_moments(L=L, N=N, nrep=nrep, xy=False, remove_mean=True, symmetrize=False, const=True)
benchmark_moments(L=L, N=N, nrep=nrep, xy=True, remove_mean=False, symmetrize=False, const=False)
benchmark_moments(L=L, N=N, nrep=nrep, xy=True, remove_mean=False, symmetrize=False, const=True)
benchmark_moments(L=L, N=N, nrep=nrep, xy=True, remove_mean=False, symmetrize=True, const=False)
benchmark_moments(L=L, N=N, nrep=nrep, xy=True, remove_mean=False, symmetrize=True, const=True)
benchmark_moments(L=L, N=N, nrep=nrep, xy=True, remove_mean=True, symmetrize=False, const=False)
benchmark_moments(L=L, N=N, nrep=nrep, xy=True, remove_mean=True, symmetrize=False, const=True)
benchmark_moments(L=L, N=N, nrep=nrep, xy=True, remove_mean=True, symmetrize=True, const=False)
benchmark_moments(L=L, N=N, nrep=nrep, xy=True, remove_mean=True, symmetrize=True, const=True)
if __name__ == "__main__":
main() | lgpl-3.0 |
Mazecreator/tensorflow | tensorflow/contrib/learn/python/learn/ops/seq2seq_ops.py | 63 | 5870 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Ops for Sequence to Sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import rnn
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope as vs
def sequence_classifier(decoding, labels, sampling_decoding=None, name=None):
"""Returns predictions and loss for sequence of predictions.
Args:
decoding: List of Tensors with predictions.
labels: List of Tensors with labels.
sampling_decoding: Optional, List of Tensor with predictions to be used
in sampling. E.g. they shouldn't have dependncy on outputs.
If not provided, decoding is used.
name: Operation name.
Returns:
Predictions and losses tensors.
"""
with ops.name_scope(name, "sequence_classifier", [decoding, labels]):
predictions, xent_list = [], []
for i, pred in enumerate(decoding):
xent_list.append(nn.softmax_cross_entropy_with_logits(
labels=labels[i], logits=pred,
name="sequence_loss/xent_raw{0}".format(i)))
if sampling_decoding:
predictions.append(nn.softmax(sampling_decoding[i]))
else:
predictions.append(nn.softmax(pred))
xent = math_ops.add_n(xent_list, name="sequence_loss/xent")
loss = math_ops.reduce_sum(xent, name="sequence_loss")
return array_ops.stack(predictions, axis=1), loss
def seq2seq_inputs(x, y, input_length, output_length, sentinel=None, name=None):
"""Processes inputs for Sequence to Sequence models.
Args:
x: Input Tensor [batch_size, input_length, embed_dim].
y: Output Tensor [batch_size, output_length, embed_dim].
input_length: length of input x.
output_length: length of output y.
sentinel: optional first input to decoder and final output expected.
If sentinel is not provided, zeros are used. Due to fact that y is not
available in sampling time, shape of sentinel will be inferred from x.
name: Operation name.
Returns:
Encoder input from x, and decoder inputs and outputs from y.
"""
with ops.name_scope(name, "seq2seq_inputs", [x, y]):
in_x = array_ops.unstack(x, axis=1)
y = array_ops.unstack(y, axis=1)
if not sentinel:
# Set to zeros of shape of y[0], using x for batch size.
sentinel_shape = array_ops.stack(
[array_ops.shape(x)[0], y[0].get_shape()[1]])
sentinel = array_ops.zeros(sentinel_shape)
sentinel.set_shape(y[0].get_shape())
in_y = [sentinel] + y
out_y = y + [sentinel]
return in_x, in_y, out_y
def rnn_decoder(decoder_inputs, initial_state, cell, scope=None):
"""RNN Decoder that creates training and sampling sub-graphs.
Args:
decoder_inputs: Inputs for decoder, list of tensors.
This is used only in training sub-graph.
initial_state: Initial state for the decoder.
cell: RNN cell to use for decoder.
scope: Scope to use, if None new will be produced.
Returns:
List of tensors for outputs and states for training and sampling sub-graphs.
"""
with vs.variable_scope(scope or "dnn_decoder"):
states, sampling_states = [initial_state], [initial_state]
outputs, sampling_outputs = [], []
with ops.name_scope("training", values=[decoder_inputs, initial_state]):
for i, inp in enumerate(decoder_inputs):
if i > 0:
vs.get_variable_scope().reuse_variables()
output, new_state = cell(inp, states[-1])
outputs.append(output)
states.append(new_state)
with ops.name_scope("sampling", values=[initial_state]):
for i, _ in enumerate(decoder_inputs):
if i == 0:
sampling_outputs.append(outputs[i])
sampling_states.append(states[i])
else:
sampling_output, sampling_state = cell(sampling_outputs[-1],
sampling_states[-1])
sampling_outputs.append(sampling_output)
sampling_states.append(sampling_state)
return outputs, states, sampling_outputs, sampling_states
def rnn_seq2seq(encoder_inputs,
decoder_inputs,
encoder_cell,
decoder_cell=None,
dtype=dtypes.float32,
scope=None):
"""RNN Sequence to Sequence model.
Args:
encoder_inputs: List of tensors, inputs for encoder.
decoder_inputs: List of tensors, inputs for decoder.
encoder_cell: RNN cell to use for encoder.
decoder_cell: RNN cell to use for decoder, if None encoder_cell is used.
dtype: Type to initialize encoder state with.
scope: Scope to use, if None new will be produced.
Returns:
List of tensors for outputs and states for training and sampling sub-graphs.
"""
with vs.variable_scope(scope or "rnn_seq2seq"):
_, last_enc_state = rnn.static_rnn(
encoder_cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, last_enc_state, decoder_cell or
encoder_cell)
| apache-2.0 |
manasapte/pants | src/python/pants/bin/exiter.py | 13 | 4940 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import datetime
import logging
import os
import sys
import traceback
from pants.util.dirutil import safe_open
logger = logging.getLogger(__name__)
class Exiter(object):
"""A class that provides standard runtime exit and global exception handling behavior.
The expected method call order of this class is as follows:
1) Call Exiter.set_except_hook() to set sys.excepthook to the internal exception hook. This
should happen as early as possible to ensure any/all exceptions are handled by the hook.
2) Call Exiter.apply_options() to set traceback printing behavior via an Options object.
3) Perform other operations as normal.
4) Call Exiter.exit(), Exiter.exit_and_fail() or exiter_inst() when you wish to exit the runtime.
"""
def __init__(self, exiter=sys.exit, formatter=traceback.format_tb, print_backtraces=True):
"""
:param func exiter: A function to be called to conduct the final exit of the runtime. (Optional)
:param func formatter: A function to be called to format any encountered tracebacks. (Optional)
:param bool print_backtraces: Whether or not to print backtraces by default. Can be
overridden by Exiter.apply_options(). (Optional)
"""
# Since we have some exit paths that run via the sys.excepthook,
# symbols we use can become garbage collected before we use them; ie:
# we can find `sys` and `traceback` are `None`. As a result we capture
# all symbols we need here to ensure we function in excepthook context.
# See: http://stackoverflow.com/questions/2572172/referencing-other-modules-in-atexit
self._exit = exiter
self._format_tb = formatter
self._should_print_backtrace = print_backtraces
self._workdir = None
def __call__(self, *args, **kwargs):
"""Map class calls to self.exit() to support sys.exit() fungibility."""
return self.exit(*args, **kwargs)
def apply_options(self, options):
"""Applies global configuration options to internal behavior.
:param Options options: An instance of an Options object to fetch global options from.
"""
self._should_print_backtrace = options.for_global_scope().print_exception_stacktrace
self._workdir = options.for_global_scope().pants_workdir
def exit(self, result=0, msg=None, out=None):
"""Exits the runtime.
:param result: The exit status. Typically a 0 indicating success or a 1 indicating failure, but
can be a string as well. (Optional)
:param msg: A string message to print to stderr or another custom file desciptor before exiting.
(Optional)
:param out: The file descriptor to emit `msg` to. (Optional)
"""
if msg:
print(msg, file=out or sys.stderr)
self._exit(result)
def exit_and_fail(self, msg=None):
"""Exits the runtime with an exit code of 1, indicating failure.
:param str msg: A string message to print to stderr before exiting. (Optional)
"""
self.exit(result=1, msg=msg)
def handle_unhandled_exception(self, exc_class=None, exc=None, tb=None, add_newline=False):
"""Default sys.excepthook implementation for unhandled exceptions."""
exc_class = exc_class or sys.exc_type
exc = exc or sys.exc_value
tb = tb or sys.exc_traceback
def format_msg(print_backtrace=True):
msg = 'Exception caught: ({})\n'.format(type(exc))
msg += '{}\n'.format(''.join(self._format_tb(tb))) if print_backtrace else '\n'
msg += 'Exception message: {}\n'.format(exc if str(exc) else 'none')
msg += '\n' if add_newline else ''
return msg
# Always output the unhandled exception details into a log file.
self._log_exception(format_msg())
self.exit_and_fail(format_msg(self._should_print_backtrace))
def _log_exception(self, msg):
if self._workdir:
try:
output_path = os.path.join(self._workdir, 'logs', 'exceptions.log')
with safe_open(output_path, 'a') as exception_log:
exception_log.write('timestamp: {}\n'.format(datetime.datetime.now().isoformat()))
exception_log.write('args: {}\n'.format(sys.argv))
exception_log.write('pid: {}\n'.format(os.getpid()))
exception_log.write(msg)
exception_log.write('\n')
except Exception as e:
# This is all error recovery logic so we catch all exceptions from the logic above because
# we don't want to hide the original error.
logger.error('Problem logging original exception: {}'.format(e))
def set_except_hook(self):
"""Sets the global exception hook."""
sys.excepthook = self.handle_unhandled_exception
| apache-2.0 |
Symmetry-Innovations-Pty-Ltd/Python-2.7-for-QNX6.5.0-x86 | usr/pkg/lib/python2.7/encodings/cp1251.py | 593 | 13617 | """ Python Character Mapping Codec cp1251 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1251.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1251',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u0402' # 0x80 -> CYRILLIC CAPITAL LETTER DJE
u'\u0403' # 0x81 -> CYRILLIC CAPITAL LETTER GJE
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0453' # 0x83 -> CYRILLIC SMALL LETTER GJE
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u20ac' # 0x88 -> EURO SIGN
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0409' # 0x8A -> CYRILLIC CAPITAL LETTER LJE
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u040a' # 0x8C -> CYRILLIC CAPITAL LETTER NJE
u'\u040c' # 0x8D -> CYRILLIC CAPITAL LETTER KJE
u'\u040b' # 0x8E -> CYRILLIC CAPITAL LETTER TSHE
u'\u040f' # 0x8F -> CYRILLIC CAPITAL LETTER DZHE
u'\u0452' # 0x90 -> CYRILLIC SMALL LETTER DJE
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0459' # 0x9A -> CYRILLIC SMALL LETTER LJE
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u045a' # 0x9C -> CYRILLIC SMALL LETTER NJE
u'\u045c' # 0x9D -> CYRILLIC SMALL LETTER KJE
u'\u045b' # 0x9E -> CYRILLIC SMALL LETTER TSHE
u'\u045f' # 0x9F -> CYRILLIC SMALL LETTER DZHE
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U
u'\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u0490' # 0xA5 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u0404' # 0xAA -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u0407' # 0xAF -> CYRILLIC CAPITAL LETTER YI
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0491' # 0xB4 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO
u'\u2116' # 0xB9 -> NUMERO SIGN
u'\u0454' # 0xBA -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE
u'\u0405' # 0xBD -> CYRILLIC CAPITAL LETTER DZE
u'\u0455' # 0xBE -> CYRILLIC SMALL LETTER DZE
u'\u0457' # 0xBF -> CYRILLIC SMALL LETTER YI
u'\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
cube2matrix/Krypton | test/networkx/algorithms/graphical.py | 30 | 12990 | # -*- coding: utf-8 -*-
"""Test sequences for graphiness.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from collections import defaultdict
import heapq
import networkx as nx
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult (dschult@colgate.edu)'
'Joel Miller (joel.c.miller.research@gmail.com)'
'Ben Edwards'
'Brian Cloteaux <brian.cloteaux@nist.gov>'])
__all__ = ['is_graphical',
'is_multigraphical',
'is_pseudographical',
'is_digraphical',
'is_valid_degree_sequence_erdos_gallai',
'is_valid_degree_sequence_havel_hakimi',
'is_valid_degree_sequence', # deprecated
]
def is_graphical(sequence, method='eg'):
"""Returns True if sequence is a valid degree sequence.
A degree sequence is valid if some graph can realize it.
Parameters
----------
sequence : list or iterable container
A sequence of integer node degrees
method : "eg" | "hh"
The method used to validate the degree sequence.
"eg" corresponds to the Erdős-Gallai algorithm, and
"hh" to the Havel-Hakimi algorithm.
Returns
-------
valid : bool
True if the sequence is a valid degree sequence and False if not.
Examples
--------
>>> G = nx.path_graph(4)
>>> sequence = G.degree().values()
>>> nx.is_valid_degree_sequence(sequence)
True
References
----------
Erdős-Gallai
[EG1960]_, [choudum1986]_
Havel-Hakimi
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
if method == 'eg':
valid = is_valid_degree_sequence_erdos_gallai(list(sequence))
elif method == 'hh':
valid = is_valid_degree_sequence_havel_hakimi(list(sequence))
else:
msg = "`method` must be 'eg' or 'hh'"
raise nx.NetworkXException(msg)
return valid
is_valid_degree_sequence = is_graphical
def _basic_graphical_tests(deg_sequence):
# Sort and perform some simple tests on the sequence
if not nx.utils.is_list_of_ints(deg_sequence):
raise nx.NetworkXUnfeasible
p = len(deg_sequence)
num_degs = [0]*p
dmax, dmin, dsum, n = 0, p, 0, 0
for d in deg_sequence:
# Reject if degree is negative or larger than the sequence length
if d<0 or d>=p:
raise nx.NetworkXUnfeasible
# Process only the non-zero integers
elif d>0:
dmax, dmin, dsum, n = max(dmax,d), min(dmin,d), dsum+d, n+1
num_degs[d] += 1
# Reject sequence if it has odd sum or is oversaturated
if dsum%2 or dsum>n*(n-1):
raise nx.NetworkXUnfeasible
return dmax,dmin,dsum,n,num_degs
def is_valid_degree_sequence_havel_hakimi(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation proceeds using the Havel-Hakimi theorem.
Worst-case run time is: O(s) where s is the sum of the sequence.
Parameters
----------
deg_sequence : list
A list of integers where each element specifies the degree of a node
in a graph.
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Notes
-----
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [1]_.
References
----------
.. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
try:
dmax,dmin,dsum,n,num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n==0 or 4*dmin*n >= (dmax+dmin+1) * (dmax+dmin+1):
return True
modstubs = [0]*(dmax+1)
# Successively reduce degree sequence by removing the maximum degree
while n > 0:
# Retrieve the maximum degree in the sequence
while num_degs[dmax] == 0:
dmax -= 1;
# If there are not enough stubs to connect to, then the sequence is
# not graphical
if dmax > n-1:
return False
# Remove largest stub in list
num_degs[dmax], n = num_degs[dmax]-1, n-1
# Reduce the next dmax largest stubs
mslen = 0
k = dmax
for i in range(dmax):
while num_degs[k] == 0:
k -= 1
num_degs[k], n = num_degs[k]-1, n-1
if k > 1:
modstubs[mslen] = k-1
mslen += 1
# Add back to the list any non-zero stubs that were removed
for i in range(mslen):
stub = modstubs[i]
num_degs[stub], n = num_degs[stub]+1, n+1
return True
def is_valid_degree_sequence_erdos_gallai(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation is done using the Erdős-Gallai theorem [EG1960]_.
Parameters
----------
deg_sequence : list
A list of integers
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Notes
-----
This implementation uses an equivalent form of the Erdős-Gallai criterion.
Worst-case run time is: O(n) where n is the length of the sequence.
Specifically, a sequence d is graphical if and only if the
sum of the sequence is even and for all strong indices k in the sequence,
.. math::
\sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k)
= k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j )
A strong index k is any index where `d_k \geq k` and the value `n_j` is the
number of occurrences of j in d. The maximal strong index is called the
Durfee index.
This particular rearrangement comes from the proof of Theorem 3 in [2]_.
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [2]_.
References
----------
.. [1] A. Tripathi and S. Vijay. "A note on a theorem of Erdős & Gallai",
Discrete Mathematics, 265, pp. 417-420 (2003).
.. [2] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
[EG1960]_, [choudum1986]_
"""
try:
dmax,dmin,dsum,n,num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n==0 or 4*dmin*n >= (dmax+dmin+1) * (dmax+dmin+1):
return True
# Perform the EG checks using the reformulation of Zverovich and Zverovich
k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0
for dk in range(dmax, dmin-1, -1):
if dk < k+1: # Check if already past Durfee index
return True
if num_degs[dk] > 0:
run_size = num_degs[dk] # Process a run of identical-valued degrees
if dk < k+run_size: # Check if end of run is past Durfee index
run_size = dk-k # Adjust back to Durfee index
sum_deg += run_size * dk
for v in range(run_size):
sum_nj += num_degs[k+v]
sum_jnj += (k+v) * num_degs[k+v]
k += run_size
if sum_deg > k*(n-1) - k*sum_nj + sum_jnj:
return False
return True
def is_multigraphical(sequence):
"""Returns True if some multigraph can realize the sequence.
Parameters
----------
deg_sequence : list
A list of integers
Returns
-------
valid : bool
True if deg_sequence is a multigraphic degree sequence and False if not.
Notes
-----
The worst-case run time is O(n) where n is the length of the sequence.
References
----------
.. [1] S. L. Hakimi. "On the realizability of a set of integers as
degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506
(1962).
"""
deg_sequence = list(sequence)
if not nx.utils.is_list_of_ints(deg_sequence):
return False
dsum, dmax = 0, 0
for d in deg_sequence:
if d<0:
return False
dsum, dmax = dsum+d, max(dmax,d)
if dsum%2 or dsum<2*dmax:
return False
return True
def is_pseudographical(sequence):
"""Returns True if some pseudograph can realize the sequence.
Every nonnegative integer sequence with an even sum is pseudographical
(see [1]_).
Parameters
----------
sequence : list or iterable container
A sequence of integer node degrees
Returns
-------
valid : bool
True if the sequence is a pseudographic degree sequence and False if not.
Notes
-----
The worst-case run time is O(n) where n is the length of the sequence.
References
----------
.. [1] F. Boesch and F. Harary. "Line removal algorithms for graphs
and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12),
pp. 778-782 (1976).
"""
s = list(sequence)
if not nx.utils.is_list_of_ints(s):
return False
return sum(s)%2 == 0 and min(s) >= 0
def is_digraphical(in_sequence, out_sequence):
r"""Returns True if some directed graph can realize the in- and out-degree
sequences.
Parameters
----------
in_sequence : list or iterable container
A sequence of integer node in-degrees
out_sequence : list or iterable container
A sequence of integer node out-degrees
Returns
-------
valid : bool
True if in and out-sequences are digraphic False if not.
Notes
-----
This algorithm is from Kleitman and Wang [1]_.
The worst case runtime is O(s * log n) where s and n are the sum and length
of the sequences respectively.
References
----------
.. [1] D.J. Kleitman and D.L. Wang
Algorithms for Constructing Graphs and Digraphs with Given Valences
and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973)
"""
in_deg_sequence = list(in_sequence)
out_deg_sequence = list(out_sequence)
if not nx.utils.is_list_of_ints(in_deg_sequence):
return False
if not nx.utils.is_list_of_ints(out_deg_sequence):
return False
# Process the sequences and form two heaps to store degree pairs with
# either zero or non-zero out degrees
sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence)
maxn = max(nin, nout)
maxin = 0
if maxn==0:
return True
stubheap, zeroheap = [ ], [ ]
for n in range(maxn):
in_deg, out_deg = 0, 0
if n<nout:
out_deg = out_deg_sequence[n]
if n<nin:
in_deg = in_deg_sequence[n]
if in_deg<0 or out_deg<0:
return False
sumin, sumout, maxin = sumin+in_deg, sumout+out_deg, max(maxin, in_deg)
if in_deg > 0:
stubheap.append((-1*out_deg, -1*in_deg))
elif out_deg > 0:
zeroheap.append(-1*out_deg)
if sumin != sumout:
return False
heapq.heapify(stubheap)
heapq.heapify(zeroheap)
modstubs = [(0,0)]*(maxin+1)
# Successively reduce degree sequence by removing the maximum out degree
while stubheap:
# Take the first value in the sequence with non-zero in degree
(freeout, freein) = heapq.heappop( stubheap )
freein *= -1
if freein > len(stubheap)+len(zeroheap):
return False
# Attach out stubs to the nodes with the most in stubs
mslen = 0
for i in range(freein):
if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]):
stubout = heapq.heappop(zeroheap)
stubin = 0
else:
(stubout, stubin) = heapq.heappop(stubheap)
if stubout == 0:
return False
# Check if target is now totally connected
if stubout+1<0 or stubin<0:
modstubs[mslen] = (stubout+1, stubin)
mslen += 1
# Add back the nodes to the heap that still have available stubs
for i in range(mslen):
stub = modstubs[i]
if stub[1] < 0:
heapq.heappush(stubheap, stub)
else:
heapq.heappush(zeroheap, stub[0])
if freeout<0:
heapq.heappush(zeroheap, freeout)
return True
| gpl-2.0 |
scottdangelo/RemoveVolumeMangerLocks | cinder/scheduler/filters/affinity_filter.py | 25 | 3997 | # Copyright 2014, eBay Inc.
# Copyright 2014, OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from oslo_utils import uuidutils
from cinder.openstack.common.scheduler import filters
from cinder.volume import api as volume
LOG = logging.getLogger(__name__)
class AffinityFilter(filters.BaseHostFilter):
def __init__(self):
self.volume_api = volume.API()
class DifferentBackendFilter(AffinityFilter):
"""Schedule volume on a different back-end from a set of volumes."""
def host_passes(self, host_state, filter_properties):
context = filter_properties['context']
scheduler_hints = filter_properties.get('scheduler_hints') or {}
affinity_uuids = scheduler_hints.get('different_host', [])
# scheduler hint verification: affinity_uuids can be a list of uuids
# or single uuid. The checks here is to make sure every single string
# in the list looks like a uuid, otherwise, this filter will fail to
# pass. Note that the filter does *NOT* ignore string doesn't look
# like a uuid, it is better to fail the request than serving it wrong.
if isinstance(affinity_uuids, list):
for uuid in affinity_uuids:
if uuidutils.is_uuid_like(uuid):
continue
else:
return False
elif uuidutils.is_uuid_like(affinity_uuids):
affinity_uuids = [affinity_uuids]
else:
# Not a list, not a string looks like uuid, don't pass it
# to DB for query to avoid potential risk.
return False
if affinity_uuids:
return not self.volume_api.get_all(
context, filters={'host': host_state.host,
'id': affinity_uuids,
'deleted': False})
# With no different_host key
return True
class SameBackendFilter(AffinityFilter):
"""Schedule volume on the same back-end as another volume."""
def host_passes(self, host_state, filter_properties):
context = filter_properties['context']
scheduler_hints = filter_properties.get('scheduler_hints') or {}
affinity_uuids = scheduler_hints.get('same_host', [])
# scheduler hint verification: affinity_uuids can be a list of uuids
# or single uuid. The checks here is to make sure every single string
# in the list looks like a uuid, otherwise, this filter will fail to
# pass. Note that the filter does *NOT* ignore string doesn't look
# like a uuid, it is better to fail the request than serving it wrong.
if isinstance(affinity_uuids, list):
for uuid in affinity_uuids:
if uuidutils.is_uuid_like(uuid):
continue
else:
return False
elif uuidutils.is_uuid_like(affinity_uuids):
affinity_uuids = [affinity_uuids]
else:
# Not a list, not a string looks like uuid, don't pass it
# to DB for query to avoid potential risk.
return False
if affinity_uuids:
return self.volume_api.get_all(
context, filters={'host': host_state.host,
'id': affinity_uuids,
'deleted': False})
# With no same_host key
return True
| apache-2.0 |
09zwcbupt/undergrad_thesis | ext/poxdesk/qx/tool/pylib/ecmascript/frontend/Comment_2.py | 1 | 27369 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2012 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# LGPL: http://www.gnu.org/licenses/lgpl.html
# EPL: http://www.eclipse.org/org/documents/epl-v10.php
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Sebastian Werner (wpbasti)
# * Fabian Jakobs (fjakobs)
# * Thomas Herchenroeder (thron7)
#
################################################################################
import sys, string, re
from ecmascript.frontend import tree
from generator import Context as context
from textile import textile
##
# Many Regexp's
S_INLINE_COMMENT = "//.*"
R_INLINE_COMMENT = re.compile("^" + S_INLINE_COMMENT + "$")
R_INLINE_COMMENT_TIGHT = re.compile("^//\S+")
R_INLINE_COMMENT_PURE = re.compile("^//")
S_BLOCK_COMMENT = "/\*(?:[^*]|[\n]|(?:\*+(?:[^*/]|[\n])))*\*+/"
R_BLOCK_COMMENT = re.compile("^" + S_BLOCK_COMMENT + "$")
R_BLOCK_COMMENT_JAVADOC = re.compile("^/\*\*")
R_BLOCK_COMMENT_QTDOC = re.compile("^/\*!")
R_BLOCK_COMMENT_AREA = re.compile("^/\*\n\s*\*\*\*\*\*")
R_BLOCK_COMMENT_DIVIDER = re.compile("^/\*\n\s*----")
R_BLOCK_COMMENT_HEADER = re.compile("^/\* \*\*\*\*")
R_BLOCK_COMMENT_TIGHT_START = re.compile("^/\*\S+")
R_BLOCK_COMMENT_TIGHT_END = re.compile("\S+\*/$")
R_BLOCK_COMMENT_PURE_START = re.compile("^/\*")
R_BLOCK_COMMENT_PURE_END = re.compile("\*/$")
R_ATTRIBUTE = re.compile('[^{]@(\w+)\s*')
R_JAVADOC_STARS = re.compile(r'^\s*\*')
R_NAMED_TYPE = re.compile(r'^\s*([a-zA-Z0-9_\.#-]+)\s*({([^}]+)})?')
R_SIMPLE_TYPE = re.compile(r'^\s*({([^}]+)})?')
VARPREFIXES = {
"a" : "Array",
"b" : "Boolean",
"d" : "Date",
"f" : "Function",
"i" : "Integer",
"h" : "Map",
"m" : "Map",
"n" : "Number",
"o" : "Object",
"r" : "RegExp",
"s" : "String",
"v" : "var",
"w" : "Widget"
}
VARNAMES = {
"a" : "Array",
"arr" : "Array",
"doc" : "Document",
"e" : "Event",
"ev" : "Event",
"evt" : "Event",
"el" : "Element",
"elem" : "Element",
"elm" : "Element",
"ex" : "Exception",
"exc" : "Exception",
"flag" : "Boolean",
"force" : "Boolean",
"f" : "Function",
"func" : "Function",
"h" : "Map",
"hash" : "Map",
"map" : "Map",
"node" : "Node",
"n" : "Number",
"num" : "Number",
"o" : "Object",
"obj" : "Object",
"reg" : "RegExp",
"s" : "String",
"str" : "String",
"win" : "Window"
}
VARDESC = {
"propValue" : "Current value",
"propOldValue" : "Previous value",
"propData" : "Property configuration map"
}
def nameToType(name):
typ = "var"
# Evaluate type from name
if name in VARNAMES:
typ = VARNAMES[name]
elif len(name) > 1:
if name[1].isupper():
if name[0] in VARPREFIXES:
typ = VARPREFIXES[name[0]]
return typ
def nameToDescription(name):
desc = "TODOC"
if name in VARDESC:
desc = VARDESC[name]
return desc
##
# Parsed comments are represented as lists of "attributes". This is a schematic:
# [{
# 'category' : 'description'|'param'|'throws'|'return'| ... (prob. all '@' tags),
# 'text' : <descriptive string>,
# 'name' : <name e.g. param name>,
# 'defaultValue' : <param default value>,
# 'type' : [{ (array for alternatives, e.g. "{Map|null}")
# 'type': 'Map'|'String'|..., (from e.g. "{String[]}")
# 'dimensions': <int> (0 = scalar, 1 = array, ...)
# }]
# }]
#
def getAttrib(attribList, category):
for attrib in attribList:
if attrib["category"] == category:
return attrib
def getParam(attribList, name):
for attrib in attribList:
if attrib["category"] == "param":
if "name" in attrib and attrib["name"] == name:
return attrib
def attribHas(attrib, key):
if attrib != None and key in attrib and not attrib[key] in ["", None]:
return True
return False
##
# Holds a string representing a JS comment
#
class Comment(object):
def __init__(self, s):
self.string = s
def correctInline(self):
if R_INLINE_COMMENT_TIGHT.match(self.string):
return R_INLINE_COMMENT_PURE.sub("// ", self.string)
return self.string
def correctBlock(self):
source = self.string
if not self.getFormat() in ["javadoc", "qtdoc"]:
if R_BLOCK_COMMENT_TIGHT_START.search(self.string):
source = R_BLOCK_COMMENT_PURE_START.sub("/* ", self.string)
if R_BLOCK_COMMENT_TIGHT_END.search(source):
source = R_BLOCK_COMMENT_PURE_END.sub(" */", self.string)
return source
def correct(self):
if self.string[:2] == "//":
return self.correctInline()
else:
return self.correctBlock()
def isMultiLine(self):
return self.string.find("\n") != -1
def getFormat(self):
if R_BLOCK_COMMENT_JAVADOC.search(self.string):
return "javadoc"
elif R_BLOCK_COMMENT_QTDOC.search(self.string):
return "qtdoc"
elif R_BLOCK_COMMENT_AREA.search(self.string):
return "area"
elif R_BLOCK_COMMENT_DIVIDER.search(self.string):
return "divider"
elif R_BLOCK_COMMENT_HEADER.search(self.string):
return "header"
return "block"
def qt2javadoc(self):
attribList = self.parse(False)
res = "/**"
desc = self.getAttrib(attribList, "description")
if "text" in desc:
desc = desc["text"]
else:
desc = ""
if "\n" in desc:
res += "\n"
for line in desc.split("\n"):
res += " * %s\n" % line
res += " "
else:
res += " %s " % desc
res += "*/"
return res
def parse(self, format=True):
# print "Parse: " + intext
# Strip "/**", "/*!" and "*/"
intext = self.string[3:-2]
# Strip leading stars in every line
text = ""
for line in intext.split("\n"):
text += R_JAVADOC_STARS.sub("", line) + "\n"
# Autodent
text = Text(text).autoOutdent()
# Search for attributes
desc = { "category" : "description", "text" : "" }
attribs = [desc]
pos = 0
while True:
# this is necessary to match ^ at the beginning of a line
if pos > 0 and text[pos-1] == "\n": pos -= 1
match = R_ATTRIBUTE.search(text, pos)
if match == None:
prevText = text[pos:].rstrip()
if len(attribs) == 0:
desc["text"] = prevText
else:
attribs[-1]["text"] = prevText
break
prevText = text[pos:match.start(0)].rstrip()
pos = match.end(0)
if len(attribs) == 0:
desc["text"] = prevText
else:
attribs[-1]["text"] = prevText
attribs.append({ "category" : match.group(1), "text" : "" })
# parse details
for attrib in attribs:
self.parseDetail(attrib, format)
return attribs
def parseDetail(self, attrib, format=True):
text = attrib["text"]
if attrib["category"] in ["param", "event", "see", "state", "appearance", "childControl"]:
match = R_NAMED_TYPE.search(text)
else:
match = R_SIMPLE_TYPE.search(text)
if match:
text = text[match.end(0):]
if attrib["category"] in ["param", "event", "see", "state", "appearance", "childControl"]:
attrib["name"] = match.group(1)
#print ">>> NAME: %s" % match.group(1)
remain = match.group(3)
else:
remain = match.group(2)
if remain != None:
defIndex = remain.rfind("?")
if defIndex != -1:
attrib["defaultValue"] = remain[defIndex+1:].strip()
remain = remain[0:defIndex].strip()
#print ">>> DEFAULT: %s" % attrib["defaultValue"]
typValues = []
for typ in remain.split("|"):
typValue = typ.strip()
arrayIndex = typValue.find("[")
if arrayIndex != -1:
arrayValue = (len(typValue) - arrayIndex) / 2
typValue = typValue[0:arrayIndex]
else:
arrayValue = 0
typValues.append({ "type" : typValue, "dimensions" : arrayValue })
if len(typValues) > 0:
attrib["type"] = typValues
#print ">>> TYPE: %s" % attrib["type"]
if format:
attrib["text"] = self.formatText(text)
else:
attrib["text"] = self.cleanupText(text)
if attrib["text"] == "":
del attrib["text"]
def cleanupText(self, text):
#print "============= INTEXT ========================="
#print text
text = text.replace("<p>", "\n")
text = text.replace("<br/>", "\n")
text = text.replace("<br>", "\n")
text = text.replace("</p>", " ")
# on single lines strip the content
if not "\n" in text:
text = text.strip()
else:
newline = False
lines = text.split("\n")
text = u""
for line in lines:
if line == "":
if not newline:
newline = True
else:
if text != "":
text += "\n"
if newline:
text += "\n"
newline = False
text += line
#print "============= OUTTEXT ========================="
#print text
# Process TODOC the same as no text
if text == "TODOC":
return ""
return text
##
# JSDoc can contain macros, which are expanded here.
#
def expandMacros(self, text):
_mmap = {
"qxversion" : (context.jobconf.get("let/QOOXDOO_VERSION", "!!TODO!!") if
hasattr(context,'jobconf') else "[undef]" ) # ecmalint.py doesn't know jobs
}
templ = string.Template(text)
text = templ.safe_substitute(_mmap)
return text
def formatText(self, text):
text = self.cleanupText(text)
#if "\n" in text:
# print
# print "------------- ORIGINAL ----------------"
# print text
text = text.replace("<pre", "\n\n<pre").replace("</pre>", "</pre>\n\n")
text = self.expandMacros(text)
# encode to ascii leads into a translation of umlauts to their XML code.
text = unicode(textile.textile(text.encode("utf-8"), output="ascii"))
#if "\n" in text:
# print "------------- TEXTILED ----------------"
# print text
return text
def splitText(self, attrib=True):
res = ""
first = True
for line in self.string.split("\n"):
if attrib:
if first:
res += " %s\n" % line
else:
res += " * %s\n" % line
else:
res += " * %s\n" % line
first = False
if not res.endswith("\n"):
res += "\n"
return res
@staticmethod
def parseType(vtype):
typeText = ""
firstType = True
for entry in vtype:
if not firstType:
typeText += " | "
typeText += entry["type"]
if "dimensions" in entry and entry["dimensions"] > 0:
typeText += "[]" * entry["dimensions"]
firstType = False
return typeText
##
# Helper class for text-level operations
#
class Text(object):
def __init__(self, s):
self.string = s
##
# Remove a fixed number of spaces from the beginning of each line
# in text.
#
# @param indent {Int} number of spaces to remove
#
def outdent(self, indent):
return re.compile("\n\s{%s}" % indent).sub("\n", self.string)
#def indent(self, source, indent):
# return re.compile("\n").sub("\n" + (" " * indent), source)
##
# Insert <indent> at the beginning of each line in text
#
# @param indent {String} string to insert
#
def indent(self, indent):
return re.compile("\n").sub("\n" + indent, self.string)
def autoOutdent(self):
text = self.string
lines = text.split("\n")
if len(lines) <= 1:
return text.strip()
for line in lines:
if len(line) > 0 and line[0] != " ":
return text
result = ""
for line in lines:
if len(line) >= 0:
result += line[1:]
result += "\n"
return result
# -- Helper functions working on tree nodes ------------------------------------
def hasThrows(node):
if node.type == "throw":
return True
if node.hasChildren():
for child in node.children:
if hasThrows(child):
return True
return False
def getReturns(node, found):
if node.type == "function":
pass
elif node.type == "return":
if node.getChildrenLength(True) > 0:
val = "var"
else:
val = "void"
if node.hasChild("expression"):
expr = node.getChild("expression")
if expr.hasChild("variable"):
var = expr.getChild("variable")
if var.getChildrenLength(True) == 1 and var.hasChild("identifier"):
val = nameToType(var.getChild("identifier").get("name"))
else:
val = "var"
elif expr.hasChild("constant"):
val = expr.getChild("constant").get("constantType")
if val == "number":
val = expr.getChild("constant").get("detail")
elif expr.hasChild("array"):
val = "Array"
elif expr.hasChild("map"):
val = "Map"
elif expr.hasChild("function"):
val = "Function"
elif expr.hasChild("call"):
val = "var"
if not val in found:
found.append(val)
elif node.hasChildren():
for child in node.children:
getReturns(child, found)
return found
def findComment(node):
def findCommentBefore(node):
while node:
if node.hasChild("commentsBefore"):
for comment in node.getChild("commentsBefore").children:
if comment.get("detail") in ["javadoc", "qtdoc"]:
comments = parseNode(node)
return comments
if node.hasParent():
node = node.parent
else:
return None
def findCommentAfter(node):
while node:
if node.hasChild("commentsBefore"):
for comment in node.getChild("commentsBefore").children:
if comment.get("detail") in ["javadoc", "qtdoc"]:
comments = parseNode(node)
return comments
if node.hasChildren():
node = node.children[0]
else:
return None
if node.type == "file":
return findCommentAfter(node)
else:
return findCommentBefore(node)
def parseNode(node):
"""Takes the last doc comment from the commentsBefore child, parses it and
returns a Node representing the doc comment"""
# Find the last doc comment
commentsBefore = node.getChild("commentsBefore", False)
if commentsBefore and commentsBefore.hasChildren():
for child in commentsBefore.children:
if child.type == "comment" and child.get("detail") in ["javadoc", "qtdoc"]:
return Comment(child.get("text")).parse()
return []
##
# fill(node) -- look for function definitions in the tree represented by <node>,
# look for their corresponding comment and amend it, or create it in the first
# place
#
def fill(node):
if node.type in ["comment", "commentsBefore", "commentsAfter"]:
return
if node.hasParent():
target = node
if node.type == "function":
name = node.get("name", False)
else:
name = ""
alternative = False
assignType = None
if name != None:
assignType = "function"
# move to hook operation
while target.parent.type in ["first", "second", "third"] and target.parent.parent.type == "operation" and target.parent.parent.get("operator") == "HOOK":
alternative = True
target = target.parent.parent
# move comment to assignment
while target.parent.type == "right" and target.parent.parent.type == "assignment":
target = target.parent.parent
if target.hasChild("left"):
left = target.getChild("left")
if left and left.hasChild("variable"):
var = left.getChild("variable")
last = var.getLastChild(False, True)
if last and last.type == "identifier":
name = last.get("name")
assignType = "object"
for child in var.children:
if child.type == "identifier":
if child.get("name") in ["prototype", "Proto"]:
assignType = "member"
elif child.get("name") in ["class", "base", "Class"]:
assignType = "static"
elif target.parent.type == "definition":
name = target.parent.get("identifier")
assignType = "definition"
# move to definition
if target.parent.type == "assignment" and target.parent.parent.type == "definition" and target.parent.parent.parent.getChildrenLength(True) == 1:
target = target.parent.parent.parent
assignType = "function"
# move comment to keyvalue
if target.parent.type == "value" and target.parent.parent.type == "keyvalue":
target = target.parent.parent
name = target.get("key")
assignType = "map"
if name == "construct":
assignType = "constructor"
if target.parent.type == "map" and target.parent.parent.type == "value" and target.parent.parent.parent.type == "keyvalue":
paname = target.parent.parent.parent.get("key")
if paname == "members":
assignType = "member"
elif paname == "statics":
assignType = "static"
# filter stuff, only add comments to member and static values and to all functions
if assignType in ["member", "static"] and node.type == "function":
if not hasattr(target, "documentationAdded") and target.parent.type != "params":
old = []
commentNode = None
# create commentsBefore
if target.hasChild("commentsBefore"):
commentsBefore = target.getChild("commentsBefore")
if commentsBefore.hasChild("comment"):
for child in commentsBefore.children:
if child.get("detail") in ["javadoc", "qtdoc"]:
old = Comment(child.get("text")).parse(False)
commentNode = child
commentNodeIndex = commentsBefore.children.index(child)
break
else:
commentsBefore = tree.Node("commentsBefore")
target.addChild(commentsBefore)
# create comment node
if commentNode == None:
commentNodeIndex = None
commentNode = tree.Node("comment")
commentNode.set("detail", "javadoc")
#if node.type == "function":
# commentNode.set("text", fromFunction(node, assignType, name, alternative, old))
#else:
# commentNode.set("text", fromNode(node, assignType, name, alternative, old))
commentNode.set("text", fromFunction(node, assignType, name, alternative, old))
commentNode.set("multiline", True)
commentsBefore.addChild(commentNode,commentNodeIndex)
# in case of alternative methods, use the first one, ignore the others
target.documentationAdded = True
if node.hasChildren():
for child in node.children:
fill(child)
def fromNode(node, assignType, name, alternative, old=[]):
#
# description
##############################################################
oldDesc = getAttrib(old, "description")
if attribHas(oldDesc, "text"):
newText = oldDesc["text"]
else:
newText = "{var} TODOC"
if "\n" in newText:
s = "/**\n%s\n-*/" % Comment(newText).splitText(False)
else:
s = "/** %s */" % newText
s = s.replace("/** ", "/** ").replace(" */", " */")
#
# other @attributes
##############################################################
for attrib in old:
cat = attrib["category"]
if cat != "description":
print " * Found unallowed attribute %s in comment for %s (node)" % (cat, name)
return s
def fromFunction(func, assignType, name, alternative, old=[]):
#
# open comment
##############################################################
s = "/**\n"
#
# description
##############################################################
oldDesc = getAttrib(old, "description")
if attribHas(oldDesc, "text"):
newText = oldDesc["text"]
else:
newText = "TODOC"
s += Comment(newText).splitText(False)
s += " *\n"
#
# add @type
##############################################################
# TODO: Remove the @type annotation as it conflicts with JSdoc
# if assignType != None:
# s += " * @type %s\n" % assignType
# else:
# s += " * @type unknown TODOC\n"
#
# add @abstract
##############################################################
oldAbstract = getAttrib(old, "abstract")
first = func.getChild("body").getChild("block").getFirstChild(False, True)
abstract = first and first.type == "throw"
if abstract:
if attribHas(oldAbstract, "text"):
newText = oldDesc["text"]
else:
newText = ""
s += " * @abstract%s" % Comment(newText).splitText()
if not s.endswith("\n"):
s += "\n"
elif oldAbstract:
print " * Removing old @abstract for %s" % name
#
# add @param
##############################################################
params = func.getChild("params")
if params.hasChildren():
for child in params.children:
if child.type == "variable":
newName = child.getChild("identifier").get("name")
newType = newTypeText = nameToType(newName)
newDefault = ""
newText = nameToDescription(newName)
oldParam = getParam(old, newName)
# Get type and text from old content
if oldParam:
if attribHas(oldParam, "type"):
newTypeText = Comment.parseType(oldParam["type"])
if attribHas(oldParam, "defaultValue"):
newDefault = " ? %s" % oldParam["defaultValue"]
if attribHas(oldParam, "text"):
newText = oldParam["text"].strip()
s += " * @param %s {%s%s}%s" % (newName, newTypeText, newDefault, Comment(newText).splitText())
if not s.endswith("\n"):
s += "\n"
#
# add @return
##############################################################
if name != "construct":
oldReturn = getAttrib(old, "return")
newType = "void"
newText = ""
# Get type and text from old content
if oldReturn:
if attribHas(oldReturn, "type"):
newType = Comment.parseType(oldReturn["type"])
if attribHas(oldReturn, "text"):
newText = oldReturn["text"].strip()
# Try to autodetect the type
if newType == "void":
returns = getReturns(func.getChild("body"), [])
if len(returns) > 0:
newType = " | ".join(returns)
elif name != None and name[:2] == "is" and name[3].isupper():
newType = "Boolean"
# Add documentation hint in non void cases
if newType != "void":
if newText == "":
newText = "TODOC"
s += " * @return {%s}%s" % (newType, Comment(newText).splitText())
if not s.endswith("\n"):
s += "\n"
#
# add @throws
##############################################################
oldThrows = getAttrib(old, "throws")
if hasThrows(func):
if oldThrows and attribHas(oldThrows, "text"):
newText = oldThrows["text"]
elif abstract:
newText = "the abstract function warning."
else:
newText = "TODOC"
s += " * @throws%s" % Comment(newText).splitText()
if not s.endswith("\n"):
s += "\n"
elif oldThrows:
print " * Removing old @throw attribute in comment for %s" % name
#
# other @attributes
##############################################################
for attrib in old:
cat = attrib["category"]
if cat in ["see", "author", "deprecated", "exception", "since", "version", "abstract", "overridden", "lint"]:
s += " * @%s" % cat
if cat == "see":
if attribHas(attrib, "name"):
s += Comment(attrib["name"]).splitText()
elif attribHas(attrib, "text"):
s += Comment(attrib["text"]).splitText()
if not s.endswith("\n"):
s += "\n"
elif not cat in ["description", "type", "abstract", "param", "return", "throws", "link", "internal", "signature"]:
print " * Found unallowed attribute %s in comment for %s (function)" % (cat, name)
#
# close comment
##############################################################
s += " */"
return s
| gpl-3.0 |
beni55/django | tests/forms_tests/tests/test_input_formats.py | 313 | 38501 | from datetime import date, datetime, time
from django import forms
from django.test import SimpleTestCase, override_settings
from django.utils.translation import activate, deactivate
@override_settings(TIME_INPUT_FORMATS=["%I:%M:%S %p", "%I:%M %p"], USE_L10N=True)
class LocalizedTimeTests(SimpleTestCase):
def setUp(self):
# nl/formats.py has customized TIME_INPUT_FORMATS:
# ['%H:%M:%S', '%H.%M:%S', '%H.%M', '%H:%M']
activate('nl')
def tearDown(self):
deactivate()
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '13:30:05')
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
# ISO formats are accepted, even if not specified in formats.py
result = f.clean('13:30:05.000155')
self.assertEqual(result, time(13, 30, 5, 155))
def test_localized_timeField(self):
"Localized TimeFields act as unlocalized widgets"
f = forms.TimeField(localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"])
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"], localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
@override_settings(TIME_INPUT_FORMATS=["%I:%M:%S %p", "%I:%M %p"])
class CustomTimeInputFormatsTests(SimpleTestCase):
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM')
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_localized_timeField(self):
"Localized TimeFields act as unlocalized widgets"
f = forms.TimeField(localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('01:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"])
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"], localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
class SimpleTimeFormatTests(SimpleTestCase):
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField(self):
"Localized TimeFields in a non-localized environment act as unlocalized widgets"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"])
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"], localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
@override_settings(DATE_INPUT_FORMATS=["%d/%m/%Y", "%d-%m-%Y"], USE_L10N=True)
class LocalizedDateTests(SimpleTestCase):
def setUp(self):
activate('de')
def tearDown(self):
deactivate()
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
# ISO formats are accepted, even if not specified in formats.py
self.assertEqual(f.clean('2010-12-21'), date(2010, 12, 21))
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21.12.10')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField(self):
"Localized DateFields act as unlocalized widgets"
f = forms.DateField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.10')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
@override_settings(DATE_INPUT_FORMATS=["%d.%m.%Y", "%d-%m-%Y"])
class CustomDateInputFormatsTests(SimpleTestCase):
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField(self):
"Localized DateFields act as unlocalized widgets"
f = forms.DateField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
class SimpleDateFormatTests(SimpleTestCase):
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('12/21/2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
def test_localized_dateField(self):
"Localized DateFields in a non-localized environment act as unlocalized widgets"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('12/21/2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
@override_settings(DATETIME_INPUT_FORMATS=["%I:%M:%S %p %d/%m/%Y", "%I:%M %p %d-%m-%Y"], USE_L10N=True)
class LocalizedDateTimeTests(SimpleTestCase):
def setUp(self):
activate('de')
def tearDown(self):
deactivate()
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
# ISO formats are accepted, even if not specified in formats.py
self.assertEqual(f.clean('2010-12-21 13:30:05'), datetime(2010, 12, 21, 13, 30, 5))
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010 13:30:05')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21.12.2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_localized_dateTimeField(self):
"Localized DateTimeFields act as unlocalized widgets"
f = forms.DateTimeField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%H.%M.%S %m.%d.%Y", "%H.%M %m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05 13:30:05')
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30.05 12.21.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30 12-21-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%H.%M.%S %m.%d.%Y", "%H.%M %m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30.05 12.21.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30 12-21-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
@override_settings(DATETIME_INPUT_FORMATS=["%I:%M:%S %p %d/%m/%Y", "%I:%M %p %d-%m-%Y"])
class CustomDateTimeInputFormatsTests(SimpleTestCase):
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21/12/2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM 21/12/2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_localized_dateTimeField(self):
"Localized DateTimeFields act as unlocalized widgets"
f = forms.DateTimeField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21/12/2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM 21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM 21/12/2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM 21/12/2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
class SimpleDateTimeFormatTests(SimpleTestCase):
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('12/21/2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
def test_localized_dateTimeField(self):
"Localized DateTimeFields in a non-localized environment act as unlocalized widgets"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('12/21/2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21.12.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:00")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21.12.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:00")
| bsd-3-clause |
brchiu/tensorflow | tensorflow/contrib/keras/api/keras/applications/mobilenet/__init__.py | 39 | 1111 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MobileNet Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.mobilenet import decode_predictions
from tensorflow.python.keras.applications.mobilenet import MobileNet
from tensorflow.python.keras.applications.mobilenet import preprocess_input
del absolute_import
del division
del print_function
| apache-2.0 |
andim27/magiccamp | build/lib/django/contrib/gis/gdal/__init__.py | 397 | 2173 | """
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existant file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.datasource import DataSource
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, gdal_release_date, GEOJSON, GDAL_VERSION
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform
from django.contrib.gis.gdal.geometries import OGRGeometry
HAS_GDAL = True
except:
HAS_GDAL, GEOJSON = False, False
try:
from django.contrib.gis.gdal.envelope import Envelope
except ImportError:
# No ctypes, but don't raise an exception.
pass
from django.contrib.gis.gdal.error import check_err, OGRException, OGRIndexError, SRSException
from django.contrib.gis.gdal.geomtype import OGRGeomType
| bsd-3-clause |
JioCloud/python-glanceclient | glanceclient/openstack/common/apiclient/base.py | 3 | 16202 | # Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Grid Dynamics
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
# E1102: %s is not callable
# pylint: disable=E1102
import abc
import copy
import six
from six.moves.urllib import parse
from glanceclient.openstack.common.apiclient import exceptions
from glanceclient.openstack.common import strutils
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
# TODO(aababilov): call run_hooks() in HookableMixin's child classes
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
"""Add a new hook of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param hook_func: hook function
"""
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
"""Run all hooks of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param **args: args to be passed to every hook function
:param **kwargs: kwargs to be passed to every hook function
"""
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
class BaseManager(HookableMixin):
"""Basic manager type providing common operations.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, client):
"""Initializes BaseManager with `client`.
:param client: instance of BaseClient descendant for HTTP requests
"""
super(BaseManager, self).__init__()
self.client = client
def _list(self, url, response_key, obj_class=None, json=None):
"""List the collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
:param obj_class: class for constructing the returned objects
(self.resource_class will be used by default)
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
"""
if json:
body = self.client.post(url, json=json).json()
else:
body = self.client.get(url).json()
if obj_class is None:
obj_class = self.resource_class
data = body[response_key]
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
try:
data = data['values']
except (KeyError, TypeError):
pass
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key):
"""Get an object from collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'
"""
body = self.client.get(url).json()
return self.resource_class(self, body[response_key], loaded=True)
def _head(self, url):
"""Retrieve request headers for an object.
:param url: a partial URL, e.g., '/servers'
"""
resp = self.client.head(url)
return resp.status_code == 204
def _post(self, url, json, response_key, return_raw=False):
"""Create an object.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
:param return_raw: flag to force returning raw JSON instead of
Python object of self.resource_class
"""
body = self.client.post(url, json=json).json()
if return_raw:
return body[response_key]
return self.resource_class(self, body[response_key])
def _put(self, url, json=None, response_key=None):
"""Update an object with PUT method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
"""
resp = self.client.put(url, json=json)
# PUT requests may not return a body
if resp.content:
body = resp.json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _patch(self, url, json=None, response_key=None):
"""Update an object with PATCH method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
"""
body = self.client.patch(url, json=json).json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
"""
return self.client.delete(url)
@six.add_metaclass(abc.ABCMeta)
class ManagerWithFind(BaseManager):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = "No %s matching %s." % (self.resource_class.__name__, kwargs)
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(BaseManager):
"""Base manager class for manipulating entities.
Children of this class are expected to define a `collection_key` and `key`.
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
objects containing a list of member resources (e.g. `{'entities': [{},
{}, {}]}`).
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
refer to an individual member of the collection.
"""
collection_key = None
key = None
def build_url(self, base_url=None, **kwargs):
"""Builds a resource URL for the given kwargs.
Given an example collection where `collection_key = 'entities'` and
`key = 'entity'`, the following URL's could be generated.
By default, the URL will represent a collection of entities, e.g.::
/entities
If kwargs contains an `entity_id`, then the URL will represent a
specific member, e.g.::
/entities/{entity_id}
:param base_url: if provided, the generated URL will be appended to it
"""
url = base_url if base_url is not None else ''
url += '/%s' % self.collection_key
# do we have a specific entity?
entity_id = kwargs.get('%s_id' % self.key)
if entity_id is not None:
url += '/%s' % entity_id
return url
def _filter_kwargs(self, kwargs):
"""Drop null values and handle ids."""
for key, ref in six.iteritems(kwargs.copy()):
if ref is None:
kwargs.pop(key)
else:
if isinstance(ref, Resource):
kwargs.pop(key)
kwargs['%s_id' % key] = getid(ref)
return kwargs
def create(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._post(
self.build_url(**kwargs),
{self.key: kwargs},
self.key)
def get(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._get(
self.build_url(**kwargs),
self.key)
def head(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._head(self.build_url(**kwargs))
def list(self, base_url=None, **kwargs):
"""List the collection.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
def put(self, base_url=None, **kwargs):
"""Update an element.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._put(self.build_url(base_url=base_url, **kwargs))
def update(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
params = kwargs.copy()
params.pop('%s_id' % self.key)
return self._patch(
self.build_url(**kwargs),
{self.key: params},
self.key)
def delete(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._delete(
self.build_url(**kwargs))
def find(self, base_url=None, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
rl = self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
num = len(rl)
if num == 0:
msg = "No %s matching %s." % (self.resource_class.__name__, kwargs)
raise exceptions.NotFound(404, msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
class Extension(HookableMixin):
"""Extension descriptor."""
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
manager_class = None
def __init__(self, name, module):
super(Extension, self).__init__()
self.name = name
self.module = module
self._parse_extension_module()
def _parse_extension_module(self):
self.manager_class = None
for attr_name, attr_value in self.module.__dict__.items():
if attr_name in self.SUPPORTED_HOOKS:
self.add_hook(attr_name, attr_value)
else:
try:
if issubclass(attr_value, BaseManager):
self.manager_class = attr_value
except TypeError:
pass
def __repr__(self):
return "<Extension '%s'>" % self.name
class Resource(object):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = 'name'
def __init__(self, manager, info, loaded=False):
"""Populate and bind to a manager.
:param manager: BaseManager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def __repr__(self):
reprkeys = sorted(k
for k in self.__dict__.keys()
if k[0] != '_' and k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
@property
def human_id(self):
"""Human-readable ID which can be used for bash completion.
"""
if self.NAME_ATTR in self.__dict__ and self.HUMAN_ID:
return strutils.to_slug(getattr(self, self.NAME_ATTR))
return None
def _add_details(self, info):
for (k, v) in six.iteritems(info):
try:
setattr(self, k, v)
self._info[k] = v
except AttributeError:
# In this case we already defined the attribute on the class
pass
def __getattr__(self, k):
if k not in self.__dict__:
#NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def get(self):
"""Support for lazy loading details.
Some clients, such as novaclient have the option to lazy load the
details, details which can be loaded with this function.
"""
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
def __eq__(self, other):
if not isinstance(other, Resource):
return NotImplemented
# two resources of different types are not equal
if not isinstance(other, self.__class__):
return False
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return self._info == other._info
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val
def to_dict(self):
return copy.deepcopy(self._info)
| apache-2.0 |
TPopovich/mongo-connector | mongo_connector/oplog_manager.py | 1 | 31232 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tails the oplog of a shard and returns entries
"""
import bson
import logging
try:
import Queue as queue
except ImportError:
import queue
import pymongo
import sys
import time
import threading
import traceback
from mongo_connector import errors, util
from mongo_connector.constants import DEFAULT_BATCH_SIZE
from mongo_connector.util import retry_until_ok
from pymongo import MongoClient
class OplogThread(threading.Thread):
"""OplogThread gathers the updates for a single oplog.
"""
def __init__(self, primary_conn, main_address, oplog_coll, is_sharded,
doc_manager, oplog_progress_dict, namespace_set, auth_key,
auth_username, repl_set=None, collection_dump=True,
batch_size=DEFAULT_BATCH_SIZE, fields=None,
dest_mapping={},
namespace_set_to_skip=[],
db_to_skip=[]): # additional db to skip, as we always skip "config" and "local"
"""Initialize the oplog thread.
"""
super(OplogThread, self).__init__()
self.batch_size = batch_size
#The connection to the primary for this replicaSet.
self.primary_connection = primary_conn
#Boolean chooses whether to dump the entire collection if no timestamp
# is present in the config file
self.collection_dump = collection_dump
#The mongos for sharded setups
#Otherwise the same as primary_connection.
#The value is set later on.
self.main_connection = None
#The connection to the oplog collection
self.oplog = oplog_coll
#Boolean describing whether the cluster is sharded or not
self.is_sharded = is_sharded
#A document manager for each target system.
#These are the same for all threads.
if type(doc_manager) == list:
self.doc_managers = doc_manager
else:
self.doc_managers = [doc_manager]
#Boolean describing whether or not the thread is running.
self.running = True
#Stores the timestamp of the last oplog entry read.
self.checkpoint = None
#A dictionary that stores OplogThread/timestamp pairs.
#Represents the last checkpoint for a OplogThread.
self.oplog_progress = oplog_progress_dict
#The set of namespaces to process from the mongo cluster.
self.namespace_set = namespace_set
#The set of namespaces to not process from the mongo cluster.
self.namespace_set_to_skip = namespace_set_to_skip
self.db_to_skip = set(db_to_skip) if db_to_skip else set([])
self.db_to_skip.add("config") # always skip "config" and "local"
self.db_to_skip.add("local")
#The dict of source namespaces to destination namespaces
self.dest_mapping = dest_mapping
#If authentication is used, this is an admin password.
self.auth_key = auth_key
#This is the username used for authentication.
self.auth_username = auth_username
# Set of fields to export
self._fields = set(fields) if fields else None
logging.info('OplogThread: Initializing oplog thread')
if is_sharded:
self.main_connection = MongoClient(main_address)
else:
self.main_connection = MongoClient(main_address,
replicaSet=repl_set)
self.oplog = self.main_connection['local']['oplog.rs']
if auth_key is not None:
#Authenticate for the whole system
self.primary_connection['admin'].authenticate(
auth_username, auth_key)
self.main_connection['admin'].authenticate(
auth_username, auth_key)
if not self.oplog.find_one():
err_msg = 'OplogThread: No oplog for thread:'
logging.warning('%s %s' % (err_msg, self.primary_connection))
@property
def fields(self):
return self._fields
@fields.setter
def fields(self, value):
if value:
self._fields = set(value)
# Always include _id field
self._fields.add('_id')
else:
self._fields = None
def run(self):
"""Start the oplog worker.
"""
logging.debug("OplogThread: Run thread started")
while self.running is True:
logging.debug("OplogThread: Getting cursor")
cursor = self.init_cursor()
logging.debug("OplogThread: Got the cursor, go go go!")
# we've fallen too far behind
if cursor is None and self.checkpoint is not None:
err_msg = "OplogThread: Last entry no longer in oplog"
effect = "cannot recover!"
logging.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
continue
#The only entry is the last one we processed
if cursor is None or util.retry_until_ok(cursor.count) == 1:
logging.debug("OplogThread: Last entry is the one we "
"already processed. Up to date. Sleeping.")
time.sleep(1)
continue
last_ts = None
err = False
remove_inc = 0
upsert_inc = 0
try:
logging.debug("OplogThread: about to process new oplog "
"entries")
while cursor.alive and self.running:
logging.debug("OplogThread: Cursor is still"
" alive and thread is still running.")
for n, entry in enumerate(cursor):
logging.debug("OplogThread: Iterating through cursor,"
" document number in this cursor is %d"
% n)
# Break out if this thread should stop
if not self.running:
break
# Don't replicate entries resulting from chunk moves
if entry.get("fromMigrate"):
continue
# Take fields out of the oplog entry that
# shouldn't be replicated. This may nullify
# the document if there's nothing to do.
if not self.filter_oplog_entry(entry):
continue
#sync the current oplog operation
operation = entry['op']
ns = entry['ns']
# use namespace mapping if one exists
ns = self.dest_mapping.get(entry['ns'], ns)
for docman in self.doc_managers:
try:
logging.debug("OplogThread: Operation for this "
"entry is %s" % str(operation))
# Remove
if operation == 'd':
entry['_id'] = entry['o']['_id']
docman.remove(entry)
remove_inc += 1
# Insert
elif operation == 'i': # Insert
# Retrieve inserted document from
# 'o' field in oplog record
doc = entry.get('o')
# Extract timestamp and namespace
doc['_ts'] = util.bson_ts_to_long(
entry['ts'])
doc['ns'] = ns
docman.upsert(doc)
upsert_inc += 1
# Update
elif operation == 'u':
doc = {"_id": entry['o2']['_id'],
"_ts": util.bson_ts_to_long(
entry['ts']),
"ns": ns}
# 'o' field contains the update spec
docman.update(doc, entry.get('o', {}))
update_inc += 1
except errors.OperationFailed:
logging.exception(
"Unable to process oplog document %r"
% entry)
except errors.ConnectionFailed:
logging.exception(
"Connection failed while processing oplog "
"document %r" % entry)
if (remove_inc + upsert_inc + update_inc) % 1000 == 0:
logging.debug(
"OplogThread: Documents removed: %d, "
"inserted: %d, updated: %d so far" % (
remove_inc, upsert_inc, update_inc))
logging.debug("OplogThread: Doc is processed.")
last_ts = entry['ts']
# update timestamp per batch size
# n % -1 (default for self.batch_size) == 0 for all n
if n % self.batch_size == 1 and last_ts is not None:
self.checkpoint = last_ts
self.update_checkpoint()
# update timestamp after running through oplog
if last_ts is not None:
logging.debug("OplogThread: updating checkpoint after"
"processing new oplog entries")
self.checkpoint = last_ts
self.update_checkpoint()
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure,
pymongo.errors.ConfigurationError):
logging.exception(
"Cursor closed due to an exception. "
"Will attempt to reconnect.")
err = True
if err is True and self.auth_key is not None:
self.primary_connection['admin'].authenticate(
self.auth_username, self.auth_key)
self.main_connection['admin'].authenticate(
self.auth_username, self.auth_key)
err = False
# update timestamp before attempting to reconnect to MongoDB,
# after being join()'ed, or if the cursor closes
if last_ts is not None:
logging.debug("OplogThread: updating checkpoint after an "
"Exception, cursor closing, or join() on this"
"thread.")
self.checkpoint = last_ts
self.update_checkpoint()
logging.debug("OplogThread: Sleeping. Documents removed: %d, "
"upserted: %d, updated: %d"
% (remove_inc, upsert_inc, update_inc))
time.sleep(2)
def join(self):
"""Stop this thread from managing the oplog.
"""
logging.debug("OplogThread: exiting due to join call.")
self.running = False
threading.Thread.join(self)
def filter_oplog_entry(self, entry):
"""Remove fields from an oplog entry that should not be replicated."""
if not self._fields:
return entry
def pop_excluded_fields(doc):
for key in set(doc) - self._fields:
doc.pop(key)
# 'i' indicates an insert. 'o' field is the doc to be inserted.
if entry['op'] == 'i':
pop_excluded_fields(entry['o'])
# 'u' indicates an update. 'o' field is the update spec.
elif entry['op'] == 'u':
pop_excluded_fields(entry['o'].get("$set", {}))
pop_excluded_fields(entry['o'].get("$unset", {}))
# not allowed to have empty $set/$unset, so remove if empty
if "$set" in entry['o'] and not entry['o']['$set']:
entry['o'].pop("$set")
if "$unset" in entry['o'] and not entry['o']['$unset']:
entry['o'].pop("$unset")
if not entry['o']:
return None
return entry
def get_oplog_cursor(self, timestamp):
"""Move cursor to the proper place in the oplog.
"""
logging.debug("OplogThread: Getting the oplog cursor and moving it "
"to the proper place in the oplog.")
if timestamp is None:
return None
cursor, cursor_len = None, 0
while (True):
try:
logging.debug("OplogThread: Getting the oplog cursor "
"in the while true loop for get_oplog_cursor")
if not self.namespace_set_to_skip:
if not self.namespace_set:
cursor = self.oplog.find(
{'ts': {'$gte': timestamp}},
tailable=True, await_data=True
)
else:
cursor = self.oplog.find(
{'ts': {'$gte': timestamp},
'ns': {'$in': self.namespace_set}},
tailable=True, await_data=True
)
else: ## user wants to skip
if not self.namespace_set:
cursor = self.oplog.find(
{'ts': {'$gte': timestamp},
'ns': {'$nin': self.namespace_set_to_skip}},
tailable=True, await_data=True
)
else: # here user might have both ns must be in namespace_set and namespace_set_to_skip, just use namespace_set
cursor = self.oplog.find(
{'ts': {'$gte': timestamp},
'ns': {'$in': self.namespace_set}},
tailable=True, await_data=True
)
# Applying 8 as the mask to the cursor enables OplogReplay
cursor.add_option(8)
logging.debug("OplogThread: Cursor created, getting a count.")
cursor_len = cursor.count()
logging.debug("OplogThread: Count is %d" % cursor_len)
break
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure,
pymongo.errors.ConfigurationError):
pass
if cursor_len == 0:
logging.debug("OplogThread: Initiating rollback from "
"get_oplog_cursor")
#rollback, we are past the last element in the oplog
timestamp = self.rollback()
logging.info('Finished rollback')
return self.get_oplog_cursor(timestamp)
first_oplog_entry = retry_until_ok(lambda: cursor[0])
cursor_ts_long = util.bson_ts_to_long(first_oplog_entry.get("ts"))
given_ts_long = util.bson_ts_to_long(timestamp)
if cursor_ts_long > given_ts_long:
# first entry in oplog is beyond timestamp, we've fallen behind!
return None
elif cursor_len == 1: # means we are the end of the oplog
self.checkpoint = timestamp
#to commit new TS after rollbacks
return cursor
elif cursor_len > 1:
doc = retry_until_ok(next, cursor)
if timestamp == doc['ts']:
return cursor
else: # error condition
logging.error('OplogThread: %s Bad timestamp in config file'
% self.oplog)
return None
def dump_collection(self):
"""Dumps collection into the target system.
This method is called when we're initializing the cursor and have no
configs i.e. when we're starting for the first time.
"""
dump_set = self.namespace_set or []
logging.debug("OplogThread: Dumping set of collections %s " % dump_set)
#no namespaces specified
if not self.namespace_set:
db_list = retry_until_ok(self.main_connection.database_names)
for database in db_list:
if database in self.db_to_skip: # skip "config" , "local" and any other db specified by user to skip
continue
coll_list = retry_until_ok(
self.main_connection[database].collection_names)
for coll in coll_list:
if coll.startswith("system"):
continue
namespace = "%s.%s" % (database, coll)
dump_set.append(namespace)
timestamp = util.retry_until_ok(self.get_last_oplog_timestamp)
if timestamp is None:
return None
long_ts = util.bson_ts_to_long(timestamp)
def docs_to_dump():
for namespace in dump_set:
logging.info("OplogThread: dumping collection %s"
% namespace)
database, coll = namespace.split('.', 1)
last_id = None
attempts = 0
sleep_sec = 1
# Loop to handle possible AutoReconnect
while attempts < 60:
target_coll = self.main_connection[database][coll]
if not last_id:
cursor = util.retry_until_ok(
target_coll.find,
fields=self._fields,
sort=[("_id", pymongo.ASCENDING)]
)
else:
cursor = util.retry_until_ok(
target_coll.find,
{"_id": {"$gt": last_id}},
fields=self._fields,
sort=[("_id", pymongo.ASCENDING)]
)
try:
for doc in cursor:
if not self.running:
raise StopIteration
doc["ns"] = self.dest_mapping.get(
namespace, namespace)
doc["_ts"] = long_ts
last_id = doc["_id"]
yield doc
break
except pymongo.errors.AutoReconnect:
attempts += 1
if (attempts%10 == 0): sleep_sec <<= 1 ## maybe we want some exponential backoff ???
time.sleep(sleep_sec)
# Extra threads (if any) that assist with collection dumps
dumping_threads = []
# Did the dump succeed for all target systems?
dump_success = True
# Holds any exceptions we can't recover from
errors = queue.Queue()
try:
for dm in self.doc_managers:
# Bulk upsert if possible
if hasattr(dm, "bulk_upsert"):
logging.debug("OplogThread: Using bulk upsert function for"
"collection dump")
# Slight performance gain breaking dump into separate
# threads, only if > 1 replication target
if len(self.doc_managers) == 1:
dm.bulk_upsert(docs_to_dump())
else:
def do_dump(error_queue):
all_docs = docs_to_dump()
try:
dm.bulk_upsert(all_docs)
except Exception:
# Likely exceptions:
# pymongo.errors.OperationFailure,
# mongo_connector.errors.ConnectionFailed
# mongo_connector.errors.OperationFailed
error_queue.put(sys.exc_info())
t = threading.Thread(target=do_dump, args=(errors,))
dumping_threads.append(t)
t.start()
else:
logging.debug("OplogThread: DocManager %s has not"
"bulk_upsert method. Upserting documents "
"serially for collection dump." % str(dm))
num = 0
for num, doc in enumerate(docs_to_dump()):
if num % 10000 == 0:
logging.debug("Upserted %d docs." % num)
dm.upsert(doc)
logging.debug("Upserted %d docs" % num)
# cleanup
for t in dumping_threads:
t.join()
except Exception:
# See "likely exceptions" comment above
errors.put(sys.exc_info())
# Print caught exceptions
try:
while True:
klass, value, trace = errors.get_nowait()
dump_success = False
traceback.print_exception(klass, value, trace)
except queue.Empty:
pass
if not dump_success:
err_msg = "OplogThread: Failed during dump collection"
effect = "cannot recover!"
logging.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
return None
return timestamp
def get_last_oplog_timestamp(self):
"""Return the timestamp of the latest entry in the oplog.
"""
if not self.namespace_set:
curr = self.oplog.find().sort(
'$natural', pymongo.DESCENDING
).limit(1)
else:
curr = self.oplog.find(
{'ns': {'$in': self.namespace_set}}
).sort('$natural', pymongo.DESCENDING).limit(1)
if curr.count(with_limit_and_skip=True) == 0:
return None
logging.debug("OplogThread: Last oplog entry has timestamp %d."
% curr[0]['ts'].time)
return curr[0]['ts']
def init_cursor(self):
"""Position the cursor appropriately.
The cursor is set to either the beginning of the oplog, or
wherever it was last left off.
"""
logging.debug("OplogThread: Initializing the oplog cursor.")
timestamp = self.read_last_checkpoint()
if timestamp is None and self.collection_dump:
timestamp = self.dump_collection()
if timestamp:
msg = "Dumped collection into target system"
logging.info('OplogThread: %s %s'
% (self.oplog, msg))
elif timestamp is None:
# set timestamp to top of oplog
timestamp = retry_until_ok(self.get_last_oplog_timestamp)
self.checkpoint = timestamp
cursor = self.get_oplog_cursor(timestamp)
if cursor is not None:
self.update_checkpoint()
return cursor
def update_checkpoint(self):
"""Store the current checkpoint in the oplog progress dictionary.
"""
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
oplog_dict[str(self.oplog)] = self.checkpoint
logging.debug("OplogThread: oplog checkpoint updated to %s" %
str(self.checkpoint))
def read_last_checkpoint(self):
"""Read the last checkpoint from the oplog progress dictionary.
"""
oplog_str = str(self.oplog)
ret_val = None
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
if oplog_str in oplog_dict.keys():
ret_val = oplog_dict[oplog_str]
logging.debug("OplogThread: reading last checkpoint as %s " %
str(ret_val))
return ret_val
def rollback(self):
"""Rollback target system to consistent state.
The strategy is to find the latest timestamp in the target system and
the largest timestamp in the oplog less than the latest target system
timestamp. This defines the rollback window and we just roll these
back until the oplog and target system are in consistent states.
"""
# Find the most recently inserted document in each target system
logging.debug("OplogThread: Initiating rollback sequence to bring "
"system into a consistent state.")
last_docs = []
for dm in self.doc_managers:
dm.commit()
last_docs.append(dm.get_last_doc())
# Of these documents, which is the most recent?
last_inserted_doc = max(last_docs,
key=lambda x: x["_ts"] if x else float("-inf"))
# Nothing has been replicated. No need to rollback target systems
if last_inserted_doc is None:
return None
# Find the oplog entry that touched the most recent document.
# We'll use this to figure where to pick up the oplog later.
target_ts = util.long_to_bson_ts(last_inserted_doc['_ts'])
last_oplog_entry = util.retry_until_ok(
self.oplog.find_one,
{'ts': {'$lte': target_ts}},
sort=[('$natural', pymongo.DESCENDING)]
)
logging.debug("OplogThread: last oplog entry is %s"
% str(last_oplog_entry))
# The oplog entry for the most recent document doesn't exist anymore.
# If we've fallen behind in the oplog, this will be caught later
if last_oplog_entry is None:
return None
# rollback_cutoff_ts happened *before* the rollback
rollback_cutoff_ts = last_oplog_entry['ts']
start_ts = util.bson_ts_to_long(rollback_cutoff_ts)
# timestamp of the most recent document on any target system
end_ts = last_inserted_doc['_ts']
for dm in self.doc_managers:
rollback_set = {} # this is a dictionary of ns:list of docs
# group potentially conflicted documents by namespace
for doc in dm.search(start_ts, end_ts):
if doc['ns'] in rollback_set:
rollback_set[doc['ns']].append(doc)
else:
rollback_set[doc['ns']] = [doc]
# retrieve these documents from MongoDB, either updating
# or removing them in each target system
for namespace, doc_list in rollback_set.items():
# Get the original namespace
original_namespace = namespace
for source_name, dest_name in self.dest_mapping.items():
if dest_name == namespace:
original_namespace = source_name
database, coll = original_namespace.split('.', 1)
obj_id = bson.objectid.ObjectId
bson_obj_id_list = [obj_id(doc['_id']) for doc in doc_list]
to_update = util.retry_until_ok(
self.main_connection[database][coll].find,
{'_id': {'$in': bson_obj_id_list}},
fields=self._fields
)
#doc list are docs in target system, to_update are
#docs in mongo
doc_hash = {} # hash by _id
for doc in doc_list:
doc_hash[bson.objectid.ObjectId(doc['_id'])] = doc
to_index = []
def collect_existing_docs():
for doc in to_update:
if doc['_id'] in doc_hash:
del doc_hash[doc['_id']]
to_index.append(doc)
retry_until_ok(collect_existing_docs)
#delete the inconsistent documents
logging.debug("OplogThread: Rollback, removing inconsistent "
"docs.")
remov_inc = 0
for doc in doc_hash.values():
try:
dm.remove(doc)
remov_inc += 1
logging.debug("OplogThread: Rollback, removed %s " %
str(doc))
except errors.OperationFailed:
logging.warning(
"Could not delete document during rollback: %s "
"This can happen if this document was already "
"removed by another rollback happening at the "
"same time." % str(doc)
)
logging.debug("OplogThread: Rollback, removed %d docs." %
remov_inc)
#insert the ones from mongo
logging.debug("OplogThread: Rollback, inserting documents "
"from mongo.")
insert_inc = 0
fail_insert_inc = 0
for doc in to_index:
doc['_ts'] = util.bson_ts_to_long(rollback_cutoff_ts)
doc['ns'] = self.dest_mapping.get(namespace, namespace)
try:
insert_inc += 1
dm.upsert(doc)
except errors.OperationFailed as e:
fail_insert_inc += 1
logging.error("OplogThread: Rollback, Unable to "
"insert %s with exception %s"
% (doc, str(e)))
logging.debug("OplogThread: Rollback, Successfully inserted %d "
" documents and failed to insert %d"
" documents. Returning a rollback cutoff time of %s "
% (insert_inc, fail_insert_inc, str(rollback_cutoff_ts)))
return rollback_cutoff_ts
| apache-2.0 |
CenturylinkTechnology/ansible-modules-extras | monitoring/sensu_check.py | 17 | 13614 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Anders Ingemann <aim@secoya.dk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: sensu_check
short_description: Manage Sensu checks
version_added: 2.0
description:
- Manage the checks that should be run on a machine by I(Sensu).
- Most options do not have a default and will not be added to the check definition unless specified.
- All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
- they are simply specified for your convenience.
options:
name:
description:
- The name of the check
- This is the key that is used to determine whether a check exists
required: true
state:
description:
- Whether the check should be present or not
choices: [ 'present', 'absent' ]
required: false
default: present
path:
description:
- Path to the json file of the check to be added/removed.
- Will be created if it does not exist (unless I(state=absent)).
- The parent folders need to exist when I(state=present), otherwise an error will be thrown
required: false
default: /etc/sensu/conf.d/checks.json
backup:
description:
- Create a backup file (if yes), including the timestamp information so
- you can get the original file back if you somehow clobbered it incorrectly.
choices: [ 'yes', 'no' ]
required: false
default: no
command:
description:
- Path to the sensu check to run (not required when I(state=absent))
required: true
handlers:
description:
- List of handlers to notify when the check fails
required: false
default: []
subscribers:
description:
- List of subscribers/channels this check should run for
- See sensu_subscribers to subscribe a machine to a channel
required: false
default: []
interval:
description:
- Check interval in seconds
required: false
default: null
timeout:
description:
- Timeout for the check
required: false
default: 10
handle:
description:
- Whether the check should be handled or not
choices: [ 'yes', 'no' ]
required: false
default: yes
subdue_begin:
description:
- When to disable handling of check failures
required: false
default: null
subdue_end:
description:
- When to enable handling of check failures
required: false
default: null
dependencies:
description:
- Other checks this check depends on, if dependencies fail,
- handling of this check will be disabled
required: false
default: []
metric:
description:
- Whether the check is a metric
choices: [ 'yes', 'no' ]
required: false
default: no
standalone:
description:
- Whether the check should be scheduled by the sensu client or server
- This option obviates the need for specifying the I(subscribers) option
choices: [ 'yes', 'no' ]
required: false
default: no
publish:
description:
- Whether the check should be scheduled at all.
- You can still issue it via the sensu api
choices: [ 'yes', 'no' ]
required: false
default: yes
occurrences:
description:
- Number of event occurrences before the handler should take action
required: false
default: 1
refresh:
description:
- Number of seconds handlers should wait before taking second action
required: false
default: null
aggregate:
description:
- Classifies the check as an aggregate check,
- making it available via the aggregate API
choices: [ 'yes', 'no' ]
required: false
default: no
low_flap_threshold:
description:
- The low threshhold for flap detection
required: false
default: null
high_flap_threshold:
description:
- The high threshhold for flap detection
required: false
default: null
custom:
version_added: "2.1"
description:
- A hash/dictionary of custom parameters for mixing to the configuration.
- You can't rewrite others module parameters using this
required: false
default: {}
source:
version_added: "2.1"
description:
- The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch).
required: false
default: null
requirements: [ ]
author: "Anders Ingemann (@andsens)"
'''
EXAMPLES = '''
# Fetch metrics about the CPU load every 60 seconds,
# the sensu server has a handler called 'relay' which forwards stats to graphite
- name: get cpu metrics
sensu_check: name=cpu_load
command=/etc/sensu/plugins/system/cpu-mpstat-metrics.rb
metric=yes handlers=relay subscribers=common interval=60
# Check whether nginx is running
- name: check nginx process
sensu_check: name=nginx_running
command='/etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid'
handlers=default subscribers=nginx interval=60
# Stop monitoring the disk capacity.
# Note that the check will still show up in the sensu dashboard,
# to remove it completely you need to issue a DELETE request to the sensu api.
- name: check disk
sensu_check: name=check_disk_capacity state=absent
'''
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
def sensu_check(module, path, name, state='present', backup=False):
changed = False
reasons = []
stream = None
try:
try:
stream = open(path, 'r')
config = json.load(stream)
except IOError:
e = get_exception()
if e.errno is 2: # File not found, non-fatal
if state == 'absent':
reasons.append('file did not exist and state is `absent\'')
return changed, reasons
config = {}
else:
module.fail_json(msg=str(e))
except ValueError:
msg = '{path} contains invalid JSON'.format(path=path)
module.fail_json(msg=msg)
finally:
if stream:
stream.close()
if 'checks' not in config:
if state == 'absent':
reasons.append('`checks\' section did not exist and state is `absent\'')
return changed, reasons
config['checks'] = {}
changed = True
reasons.append('`checks\' section did not exist')
if state == 'absent':
if name in config['checks']:
del config['checks'][name]
changed = True
reasons.append('check was present and state is `absent\'')
if state == 'present':
if name not in config['checks']:
check = {}
config['checks'][name] = check
changed = True
reasons.append('check was absent and state is `present\'')
else:
check = config['checks'][name]
simple_opts = ['command',
'handlers',
'subscribers',
'interval',
'timeout',
'handle',
'dependencies',
'standalone',
'publish',
'occurrences',
'refresh',
'aggregate',
'low_flap_threshold',
'high_flap_threshold',
'source',
]
for opt in simple_opts:
if module.params[opt] is not None:
if opt not in check or check[opt] != module.params[opt]:
check[opt] = module.params[opt]
changed = True
reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
else:
if opt in check:
del check[opt]
changed = True
reasons.append('`{opt}\' was removed'.format(opt=opt))
if module.params['custom']:
# Convert to json
custom_params = module.params['custom']
overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type','subdue','subdue_begin','subdue_end'])
if overwrited_fields:
msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields))
module.fail_json(msg=msg)
for k,v in custom_params.items():
if k in config['checks'][name]:
if not config['checks'][name][k] == v:
changed = True
reasons.append('`custom param {opt}\' was changed'.format(opt=k))
else:
changed = True
reasons.append('`custom param {opt}\' was added'.format(opt=k))
check[k] = v
simple_opts += custom_params.keys()
# Remove obsolete custom params
for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type','subdue','subdue_begin','subdue_end']):
changed = True
reasons.append('`custom param {opt}\' was deleted'.format(opt=opt))
del check[opt]
if module.params['metric']:
if 'type' not in check or check['type'] != 'metric':
check['type'] = 'metric'
changed = True
reasons.append('`type\' was not defined or not `metric\'')
if not module.params['metric'] and 'type' in check:
del check['type']
changed = True
reasons.append('`type\' was defined')
if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
subdue = {'begin': module.params['subdue_begin'],
'end': module.params['subdue_end'],
}
if 'subdue' not in check or check['subdue'] != subdue:
check['subdue'] = subdue
changed = True
reasons.append('`subdue\' did not exist or was different')
else:
if 'subdue' in check:
del check['subdue']
changed = True
reasons.append('`subdue\' was removed')
if changed and not module.check_mode:
if backup:
module.backup_local(path)
try:
try:
stream = open(path, 'w')
stream.write(json.dumps(config, indent=2) + '\n')
except IOError:
e = get_exception()
module.fail_json(msg=str(e))
finally:
if stream:
stream.close()
return changed, reasons
def main():
arg_spec = {'name': {'type': 'str', 'required': True},
'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
'backup': {'type': 'bool', 'default': 'no'},
'command': {'type': 'str'},
'handlers': {'type': 'list'},
'subscribers': {'type': 'list'},
'interval': {'type': 'int'},
'timeout': {'type': 'int'},
'handle': {'type': 'bool'},
'subdue_begin': {'type': 'str'},
'subdue_end': {'type': 'str'},
'dependencies': {'type': 'list'},
'metric': {'type': 'bool', 'default': 'no'},
'standalone': {'type': 'bool'},
'publish': {'type': 'bool'},
'occurrences': {'type': 'int'},
'refresh': {'type': 'int'},
'aggregate': {'type': 'bool'},
'low_flap_threshold': {'type': 'int'},
'high_flap_threshold': {'type': 'int'},
'custom': {'type': 'dict'},
'source': {'type': 'str'},
}
required_together = [['subdue_begin', 'subdue_end']]
module = AnsibleModule(argument_spec=arg_spec,
required_together=required_together,
supports_check_mode=True)
if module.params['state'] != 'absent' and module.params['command'] is None:
module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
path = module.params['path']
name = module.params['name']
state = module.params['state']
backup = module.params['backup']
changed, reasons = sensu_check(module, path, name, state, backup)
module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
main()
| gpl-3.0 |
simongoffin/website_version | addons/mail/tests/__init__.py | 121 | 1318 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_mail_group, test_mail_message, test_mail_features, test_mail_gateway, test_message_read, test_invite
checks = [
test_mail_group,
test_mail_message,
test_mail_features,
test_mail_gateway,
test_message_read,
test_invite,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rahuldan/sympy | sympy/plotting/pygletplot/plot_mode_base.py | 96 | 11337 | from __future__ import print_function, division
from pyglet.gl import *
from plot_mode import PlotMode
from threading import Thread, Event, RLock
from color_scheme import ColorScheme
from sympy.core import S
from sympy.core.compatibility import is_sequence
from time import sleep
import warnings
class PlotModeBase(PlotMode):
"""
Intended parent class for plotting
modes. Provides base functionality
in conjunction with its parent,
PlotMode.
"""
##
## Class-Level Attributes
##
"""
The following attributes are meant
to be set at the class level, and serve
as parameters to the plot mode registry
(in PlotMode). See plot_modes.py for
concrete examples.
"""
"""
i_vars
'x' for Cartesian2D
'xy' for Cartesian3D
etc.
d_vars
'y' for Cartesian2D
'r' for Polar
etc.
"""
i_vars, d_vars = '', ''
"""
intervals
Default intervals for each i_var, and in the
same order. Specified [min, max, steps].
No variable can be given (it is bound later).
"""
intervals = []
"""
aliases
A list of strings which can be used to
access this mode.
'cartesian' for Cartesian2D and Cartesian3D
'polar' for Polar
'cylindrical', 'polar' for Cylindrical
Note that _init_mode chooses the first alias
in the list as the mode's primary_alias, which
will be displayed to the end user in certain
contexts.
"""
aliases = []
"""
is_default
Whether to set this mode as the default
for arguments passed to PlotMode() containing
the same number of d_vars as this mode and
at most the same number of i_vars.
"""
is_default = False
"""
All of the above attributes are defined in PlotMode.
The following ones are specific to PlotModeBase.
"""
"""
A list of the render styles. Do not modify.
"""
styles = {'wireframe': 1, 'solid': 2, 'both': 3}
"""
style_override
Always use this style if not blank.
"""
style_override = ''
"""
default_wireframe_color
default_solid_color
Can be used when color is None or being calculated.
Used by PlotCurve and PlotSurface, but not anywhere
in PlotModeBase.
"""
default_wireframe_color = (0.85, 0.85, 0.85)
default_solid_color = (0.6, 0.6, 0.9)
default_rot_preset = 'xy'
##
## Instance-Level Attributes
##
## 'Abstract' member functions
def _get_evaluator(self):
if self.use_lambda_eval:
try:
e = self._get_lambda_evaluator()
return e
except Exception:
warnings.warn("\nWarning: creating lambda evaluator failed. "
"Falling back on sympy subs evaluator.")
return self._get_sympy_evaluator()
def _get_sympy_evaluator(self):
raise NotImplementedError()
def _get_lambda_evaluator(self):
raise NotImplementedError()
def _on_calculate_verts(self):
raise NotImplementedError()
def _on_calculate_cverts(self):
raise NotImplementedError()
## Base member functions
def __init__(self, *args, **kwargs):
self.verts = []
self.cverts = []
self.bounds = [[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0]]
self.cbounds = [[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0]]
self._draw_lock = RLock()
self._calculating_verts = Event()
self._calculating_cverts = Event()
self._calculating_verts_pos = 0.0
self._calculating_verts_len = 0.0
self._calculating_cverts_pos = 0.0
self._calculating_cverts_len = 0.0
self._max_render_stack_size = 3
self._draw_wireframe = [-1]
self._draw_solid = [-1]
self._style = None
self._color = None
self.predraw = []
self.postdraw = []
self.use_lambda_eval = self.options.pop('use_sympy_eval', None) is None
self.style = self.options.pop('style', '')
self.color = self.options.pop('color', 'rainbow')
self.bounds_callback = kwargs.pop('bounds_callback', None)
self._on_calculate()
def synchronized(f):
def w(self, *args, **kwargs):
self._draw_lock.acquire()
try:
r = f(self, *args, **kwargs)
return r
finally:
self._draw_lock.release()
return w
@synchronized
def push_wireframe(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_wireframe.append(function)
if len(self._draw_wireframe) > self._max_render_stack_size:
del self._draw_wireframe[1] # leave marker element
@synchronized
def push_solid(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_solid.append(function)
if len(self._draw_solid) > self._max_render_stack_size:
del self._draw_solid[1] # leave marker element
def _create_display_list(self, function):
dl = glGenLists(1)
glNewList(dl, GL_COMPILE)
function()
glEndList()
return dl
def _render_stack_top(self, render_stack):
top = render_stack[-1]
if top == -1:
return -1 # nothing to display
elif callable(top):
dl = self._create_display_list(top)
render_stack[-1] = (dl, top)
return dl # display newly added list
elif len(top) == 2:
if GL_TRUE == glIsList(top[0]):
return top[0] # display stored list
dl = self._create_display_list(top[1])
render_stack[-1] = (dl, top[1])
return dl # display regenerated list
def _draw_solid_display_list(self, dl):
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glCallList(dl)
glPopAttrib()
def _draw_wireframe_display_list(self, dl):
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glEnable(GL_POLYGON_OFFSET_LINE)
glPolygonOffset(-0.005, -50.0)
glCallList(dl)
glPopAttrib()
@synchronized
def draw(self):
for f in self.predraw:
if callable(f):
f()
if self.style_override:
style = self.styles[self.style_override]
else:
style = self.styles[self._style]
# Draw solid component if style includes solid
if style & 2:
dl = self._render_stack_top(self._draw_solid)
if dl > 0 and GL_TRUE == glIsList(dl):
self._draw_solid_display_list(dl)
# Draw wireframe component if style includes wireframe
if style & 1:
dl = self._render_stack_top(self._draw_wireframe)
if dl > 0 and GL_TRUE == glIsList(dl):
self._draw_wireframe_display_list(dl)
for f in self.postdraw:
if callable(f):
f()
def _on_change_color(self, color):
Thread(target=self._calculate_cverts).start()
def _on_calculate(self):
Thread(target=self._calculate_all).start()
def _calculate_all(self):
self._calculate_verts()
self._calculate_cverts()
def _calculate_verts(self):
if self._calculating_verts.isSet():
return
self._calculating_verts.set()
try:
self._on_calculate_verts()
finally:
self._calculating_verts.clear()
if callable(self.bounds_callback):
self.bounds_callback()
def _calculate_cverts(self):
if self._calculating_verts.isSet():
return
while self._calculating_cverts.isSet():
sleep(0) # wait for previous calculation
self._calculating_cverts.set()
try:
self._on_calculate_cverts()
finally:
self._calculating_cverts.clear()
def _get_calculating_verts(self):
return self._calculating_verts.isSet()
def _get_calculating_verts_pos(self):
return self._calculating_verts_pos
def _get_calculating_verts_len(self):
return self._calculating_verts_len
def _get_calculating_cverts(self):
return self._calculating_cverts.isSet()
def _get_calculating_cverts_pos(self):
return self._calculating_cverts_pos
def _get_calculating_cverts_len(self):
return self._calculating_cverts_len
## Property handlers
def _get_style(self):
return self._style
@synchronized
def _set_style(self, v):
if v is None:
return
if v == '':
step_max = 0
for i in self.intervals:
if i.v_steps is None:
continue
step_max = max([step_max, int(i.v_steps)])
v = ['both', 'solid'][step_max > 40]
if v not in self.styles:
raise ValueError("v should be there in self.styles")
if v == self._style:
return
self._style = v
def _get_color(self):
return self._color
@synchronized
def _set_color(self, v):
try:
if v is not None:
if is_sequence(v):
v = ColorScheme(*v)
else:
v = ColorScheme(v)
if repr(v) == repr(self._color):
return
self._on_change_color(v)
self._color = v
except Exception as e:
raise RuntimeError(("Color change failed. "
"Reason: %s" % (str(e))))
style = property(_get_style, _set_style)
color = property(_get_color, _set_color)
calculating_verts = property(_get_calculating_verts)
calculating_verts_pos = property(_get_calculating_verts_pos)
calculating_verts_len = property(_get_calculating_verts_len)
calculating_cverts = property(_get_calculating_cverts)
calculating_cverts_pos = property(_get_calculating_cverts_pos)
calculating_cverts_len = property(_get_calculating_cverts_len)
## String representations
def __str__(self):
f = ", ".join(str(d) for d in self.d_vars)
o = "'mode=%s'" % (self.primary_alias)
return ", ".join([f, o])
def __repr__(self):
f = ", ".join(str(d) for d in self.d_vars)
i = ", ".join(str(i) for i in self.intervals)
d = [('mode', self.primary_alias),
('color', str(self.color)),
('style', str(self.style))]
o = "'%s'" % (("; ".join("%s=%s" % (k, v)
for k, v in d if v != 'None')))
return ", ".join([f, i, o])
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.